@@ -93,6 +93,9 @@ typedef unsigned int ext4_group_t;
/* use in-allocatable blocks that have advisory flag */
#define EXT4_MB_ALLOC_ADVISORY 4096
+#define EXT4_MB_HINT_FLAGS(flag) ((flag) ? EXT4_MB_HINT_TRY_GOAL : \
+ EXT4_MB_HINT_TRY_GOAL|EXT4_MB_HINT_GOAL_ONLY)
+
struct ext4_allocation_request {
/* target inode for block we're allocating */
struct inode *inode;
@@ -1400,6 +1403,7 @@ extern void ext4_mb_release_arule_list(struct ext4_sb_info *);
extern int ext4_mb_add_inode_arule(struct inode *inode,
struct ext4_alloc_rule *arule);
extern void ext4_mb_del_inode_arule(struct inode *inode);
+extern void ext4_mb_dec_inode_arule(struct inode *inode, unsigned int len);
/* inode.c */
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
@@ -2783,6 +2783,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
int err = 0, depth, ret, cache_type;
unsigned int allocated = 0;
struct ext4_allocation_request ar;
+ struct ext4_inode_info *ei = EXT4_I(inode);
__clear_bit(BH_New, &bh_result->b_state);
ext_debug("blocks %u/%u requested for inode %u\n",
@@ -2938,15 +2939,23 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* allocate new block */
ar.inode = inode;
- ar.goal = ext4_ext_find_goal(inode, path, iblock);
ar.logical = iblock;
- ar.len = allocated;
- if (S_ISREG(inode->i_mode))
- ar.flags = EXT4_MB_HINT_DATA;
- else
- /* disable in-core preallocation for non-regular files */
- ar.flags = 0;
+ if (ei->i_alloc_rule && ei->i_alloc_rule->alloc_pid == current->pid) {
+ ar.goal = ei->i_alloc_rule->alloc_rule.start;
+ ar.len = allocated;
+ ar.flags = EXT4_MB_HINT_FLAGS(
+ ei->i_alloc_rule->alloc_rule.alloc_flag);
+ } else {
+ ar.goal = ext4_ext_find_goal(inode, path, iblock);
+ ar.len = allocated;
+ if (S_ISREG(inode->i_mode))
+ ar.flags = EXT4_MB_HINT_DATA;
+ else
+ /* disable in-core pa for non-regular files */
+ ar.flags = 0;
+ }
newblock = ext4_mb_new_blocks(handle, &ar, &err);
+ ext4_mb_dec_inode_arule(inode, ar.len);
if (!newblock)
goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
@@ -4579,6 +4579,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
struct super_block *sb = ar->inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ struct ext4_inode_info *ei = EXT4_I(ar->inode);
ext4_group_t group;
unsigned int len;
ext4_fsblk_t goal;
@@ -4587,10 +4588,6 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
/* we can't allocate > group size */
len = ar->len;
- /* just a dirty hack to filter too big requests */
- if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
- len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
-
/* start searching from the goal */
goal = ar->goal;
if (goal < le32_to_cpu(es->s_first_data_block) ||
@@ -4598,6 +4595,16 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
goal = le32_to_cpu(es->s_first_data_block);
ext4_get_group_no_and_offset(sb, goal, &group, &block);
+ if (ei->i_alloc_rule && ei->i_alloc_rule->alloc_pid == current->pid) {
+ /* we need to adjust len not to over a blockgroup region */
+ if (len + block > EXT4_BLOCKS_PER_GROUP(sb))
+ len = EXT4_BLOCKS_PER_GROUP(sb) - block;
+ } else {
+ /* just a dirty hack to filter too big requests */
+ if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
+ len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
+ }
+
/* set up allocation goals */
ac->ac_b_ex.fe_logical = ar->logical;
ac->ac_b_ex.fe_group = 0;
@@ -5795,3 +5802,15 @@ void ext4_mb_del_inode_arule(struct inode *inode)
kfree(ei->i_alloc_rule);
ei->i_alloc_rule = NULL;
}
+
+void ext4_mb_dec_inode_arule(struct inode *inode, unsigned int len)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ if (ei->i_alloc_rule && ei->i_alloc_rule->alloc_pid == current->pid) {
+ ei->i_alloc_rule->alloc_rule.len -= len;
+ ei->i_alloc_rule->alloc_rule.start += len;
+ if (ei->i_alloc_rule->alloc_rule.len <= 0)
+ ext4_mb_del_inode_arule(inode);
+ }
+}