Commit 68bf6bfd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Ext4 bug fixes and cleanups, plus some additional kunit tests"

* tag 'ext4_for_linus-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (23 commits)
  ext4: initialize sbi->s_freeclusters_counter and sbi->s_dirtyclusters_counter before use in kunit test
  ext4: hold group lock in ext4 kunit test
  ext4: alloc test super block from sget
  ext4: kunit: use dynamic inode allocation
  ext4: enable meta_bg only when new desc blocks are needed
  ext4: remove unused parameter biop in ext4_issue_discard()
  ext4: remove SLAB_MEM_SPREAD flag usage
  ext4: verify s_clusters_per_group even without bigalloc
  ext4: fix corruption during on-line resize
  ext4: don't report EOPNOTSUPP errors from discard
  ext4: drop duplicate ea_inode handling in ext4_xattr_block_set()
  ext4: fold quota accounting into ext4_xattr_inode_lookup_create()
  ext4: correct best extent lstart adjustment logic
  ext4: forbid commit inconsistent quota data when errors=remount-ro
  ext4: add a hint for block bitmap corrupt state in mb_groups
  ext4: fix the comment of ext4_map_blocks()/ext4_ext_map_blocks()
  ext4: improve error msg for ext4_mb_seq_groups_show
  ext4: remove unused buddy_loaded in ext4_mb_seq_groups_show
  ext4: Add unit test for ext4_mb_mark_diskspace_used
  ext4: Add unit test for mb_free_blocks
  ...
parents 32a50540 0ecae541
...@@ -4111,10 +4111,10 @@ static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode, ...@@ -4111,10 +4111,10 @@ static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
* *
* Need to be called with * Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) * (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
* *
* return > 0, number of blocks already mapped/allocated * return > 0, number of blocks already mapped/allocated
* if create == 0 and these are pre-allocated blocks * if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks
* buffer head is unmapped * buffer head is unmapped
* otherwise blocks are mapped * otherwise blocks are mapped
* *
...@@ -4218,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ...@@ -4218,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
/* /*
* requested block isn't allocated yet; * requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero * we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE
*/ */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
ext4_lblk_t len; ext4_lblk_t len;
......
...@@ -465,9 +465,10 @@ static void ext4_map_blocks_es_recheck(handle_t *handle, ...@@ -465,9 +465,10 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files * based files
* *
* On success, it returns the number of blocks being mapped or allocated. if * On success, it returns the number of blocks being mapped or allocated.
* create==0 and the blocks are pre-allocated and unwritten, the resulting @map * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are
* is marked as unwritten. If the create == 1, it will mark @map as mapped. * pre-allocated and unwritten, the resulting @map is marked as unwritten.
* If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped.
* *
* It returns 0 if plain look up failed (blocks have not been allocated), in * It returns 0 if plain look up failed (blocks have not been allocated), in
* that case, @map is returned as unmapped but we still do fill map->m_len to * that case, @map is returned as unmapped but we still do fill map->m_len to
...@@ -589,8 +590,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, ...@@ -589,8 +590,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* Returns if the blocks have already allocated * Returns if the blocks have already allocated
* *
* Note that if blocks have been preallocated * Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0 * ext4_ext_map_blocks() returns with buffer head unmapped
* with buffer head unmapped.
*/ */
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/* /*
......
This diff is collapsed.
...@@ -3015,8 +3015,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) ...@@ -3015,8 +3015,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
{ {
struct super_block *sb = pde_data(file_inode(seq->file)); struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group = (ext4_group_t) ((unsigned long) v); ext4_group_t group = (ext4_group_t) ((unsigned long) v);
int i; int i, err;
int err, buddy_loaded = 0; char nbuf[16];
struct ext4_buddy e4b; struct ext4_buddy e4b;
struct ext4_group_info *grinfo; struct ext4_group_info *grinfo;
unsigned char blocksize_bits = min_t(unsigned char, unsigned char blocksize_bits = min_t(unsigned char,
...@@ -3043,23 +3043,26 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) ...@@ -3043,23 +3043,26 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
err = ext4_mb_load_buddy(sb, group, &e4b); err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) { if (err) {
seq_printf(seq, "#%-5u: I/O error\n", group); seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
return 0; return 0;
} }
buddy_loaded = 1; ext4_mb_unload_buddy(&e4b);
} }
/*
* We care only about free space counters in the group info and
* these are safe to access even after the buddy has been unloaded
*/
memcpy(&sg, grinfo, i); memcpy(&sg, grinfo, i);
if (buddy_loaded)
ext4_mb_unload_buddy(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free); sg.info.bb_fragments, sg.info.bb_first_free);
for (i = 0; i <= 13; i++) for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
sg.info.bb_counters[i] : 0); sg.info.bb_counters[i] : 0);
seq_puts(seq, " ]\n"); seq_puts(seq, " ]");
if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
seq_puts(seq, " Block bitmap corrupted!");
seq_puts(seq, "\n");
return 0; return 0;
} }
...@@ -3829,8 +3832,7 @@ void ext4_mb_release(struct super_block *sb) ...@@ -3829,8 +3832,7 @@ void ext4_mb_release(struct super_block *sb)
} }
static inline int ext4_issue_discard(struct super_block *sb, static inline int ext4_issue_discard(struct super_block *sb,
ext4_group_t block_group, ext4_grpblk_t cluster, int count, ext4_group_t block_group, ext4_grpblk_t cluster, int count)
struct bio **biop)
{ {
ext4_fsblk_t discard_block; ext4_fsblk_t discard_block;
...@@ -3839,13 +3841,8 @@ static inline int ext4_issue_discard(struct super_block *sb, ...@@ -3839,13 +3841,8 @@ static inline int ext4_issue_discard(struct super_block *sb,
count = EXT4_C2B(EXT4_SB(sb), count); count = EXT4_C2B(EXT4_SB(sb), count);
trace_ext4_discard_blocks(sb, trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count); (unsigned long long) discard_block, count);
if (biop) {
return __blkdev_issue_discard(sb->s_bdev, return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
(sector_t)discard_block << (sb->s_blocksize_bits - 9),
(sector_t)count << (sb->s_blocksize_bits - 9),
GFP_NOFS, biop);
} else
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
} }
static void ext4_free_data_in_buddy(struct super_block *sb, static void ext4_free_data_in_buddy(struct super_block *sb,
...@@ -5169,10 +5166,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ...@@ -5169,10 +5166,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
.fe_len = ac->ac_orig_goal_len, .fe_len = ac->ac_orig_goal_len,
}; };
loff_t orig_goal_end = extent_logical_end(sbi, &ex); loff_t orig_goal_end = extent_logical_end(sbi, &ex);
loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
/* we can't allocate as much as normalizer wants. /*
* so, found space must get proper lstart * We can't allocate as much as normalizer wants, so we try
* to cover original request */ * to get proper lstart to cover the original request, except
* when the goal doesn't cover the original request as below:
*
* orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
* best_ex:0/200(200) -> adjusted: 1848/2048(200)
*/
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
...@@ -5184,7 +5187,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ...@@ -5184,7 +5187,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
* 1. Check if best ex can be kept at end of goal (before * 1. Check if best ex can be kept at end of goal (before
* cr_best_avail trimmed it) and still cover original start * cr_best_avail trimmed it) and still cover original start
* 2. Else, check if best ex can be kept at start of goal and * 2. Else, check if best ex can be kept at start of goal and
* still cover original start * still cover original end
* 3. Else, keep the best ex at start of original request. * 3. Else, keep the best ex at start of original request.
*/ */
ex.fe_len = ac->ac_b_ex.fe_len; ex.fe_len = ac->ac_b_ex.fe_len;
...@@ -5194,7 +5197,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ...@@ -5194,7 +5197,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
goto adjust_bex; goto adjust_bex;
ex.fe_logical = ac->ac_g_ex.fe_logical; ex.fe_logical = ac->ac_g_ex.fe_logical;
if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) if (o_ex_end <= extent_logical_end(sbi, &ex))
goto adjust_bex; goto adjust_bex;
ex.fe_logical = ac->ac_o_ex.fe_logical; ex.fe_logical = ac->ac_o_ex.fe_logical;
...@@ -5202,7 +5205,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ...@@ -5202,7 +5205,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
ac->ac_b_ex.fe_logical = ex.fe_logical; ac->ac_b_ex.fe_logical = ex.fe_logical;
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
} }
...@@ -6487,8 +6489,14 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, ...@@ -6487,8 +6489,14 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
} else { } else {
if (test_opt(sb, DISCARD)) { if (test_opt(sb, DISCARD)) {
err = ext4_issue_discard(sb, block_group, bit, err = ext4_issue_discard(sb, block_group, bit,
count_clusters, NULL); count_clusters);
if (err && err != -EOPNOTSUPP) /*
* Ignore EOPNOTSUPP error. This is consistent with
* what happens when using journal.
*/
if (err == -EOPNOTSUPP)
err = 0;
if (err)
ext4_msg(sb, KERN_WARNING, "discard request in" ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%u block:%d count:%lu failed" " group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count, " with %d", block_group, bit, count,
...@@ -6738,7 +6746,7 @@ __acquires(bitlock) ...@@ -6738,7 +6746,7 @@ __acquires(bitlock)
*/ */
mb_mark_used(e4b, &ex); mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
ret = ext4_issue_discard(sb, group, start, count, NULL); ret = ext4_issue_discard(sb, group, start, count);
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len); mb_free_blocks(NULL, e4b, start, ex.fe_len);
return ret; return ret;
......
...@@ -1602,7 +1602,8 @@ static int ext4_flex_group_add(struct super_block *sb, ...@@ -1602,7 +1602,8 @@ static int ext4_flex_group_add(struct super_block *sb,
int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
int gdb_num_end = ((group + flex_gd->count - 1) / int gdb_num_end = ((group + flex_gd->count - 1) /
EXT4_DESC_PER_BLOCK(sb)); EXT4_DESC_PER_BLOCK(sb));
int meta_bg = ext4_has_feature_meta_bg(sb); int meta_bg = ext4_has_feature_meta_bg(sb) &&
gdb_num >= le32_to_cpu(es->s_first_meta_bg);
sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
ext4_group_first_block_no(sb, 0); ext4_group_first_block_no(sb, 0);
...@@ -2084,7 +2085,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) ...@@ -2084,7 +2085,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
} }
} }
if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { if ((!resize_inode && !meta_bg && n_desc_blocks > o_desc_blocks) || n_blocks_count == o_blocks_count) {
err = ext4_convert_meta_bg(sb, resize_inode); err = ext4_convert_meta_bg(sb, resize_inode);
if (err) if (err)
goto out; goto out;
......
...@@ -4421,22 +4421,6 @@ static int ext4_handle_clustersize(struct super_block *sb) ...@@ -4421,22 +4421,6 @@ static int ext4_handle_clustersize(struct super_block *sb)
} }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size); le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
le32_to_cpu(es->s_clusters_per_group);
if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
ext4_msg(sb, KERN_ERR,
"#clusters per group too big: %lu",
sbi->s_clusters_per_group);
return -EINVAL;
}
if (sbi->s_blocks_per_group !=
(sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
"clusters per group (%lu) inconsistent",
sbi->s_blocks_per_group,
sbi->s_clusters_per_group);
return -EINVAL;
}
} else { } else {
if (clustersize != sb->s_blocksize) { if (clustersize != sb->s_blocksize) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
...@@ -4450,9 +4434,21 @@ static int ext4_handle_clustersize(struct super_block *sb) ...@@ -4450,9 +4434,21 @@ static int ext4_handle_clustersize(struct super_block *sb)
sbi->s_blocks_per_group); sbi->s_blocks_per_group);
return -EINVAL; return -EINVAL;
} }
sbi->s_clusters_per_group = sbi->s_blocks_per_group;
sbi->s_cluster_bits = 0; sbi->s_cluster_bits = 0;
} }
sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group);
if (sbi->s_clusters_per_group > sb->s_blocksize * 8) {
ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu",
sbi->s_clusters_per_group);
return -EINVAL;
}
if (sbi->s_blocks_per_group !=
(sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) {
ext4_msg(sb, KERN_ERR,
"blocks per group (%lu) and clusters per group (%lu) inconsistent",
sbi->s_blocks_per_group, sbi->s_clusters_per_group);
return -EINVAL;
}
sbi->s_cluster_ratio = clustersize / sb->s_blocksize; sbi->s_cluster_ratio = clustersize / sb->s_blocksize;
/* Do we have standard group size of clustersize * 8 blocks ? */ /* Do we have standard group size of clustersize * 8 blocks ? */
...@@ -6864,6 +6860,10 @@ static int ext4_write_dquot(struct dquot *dquot) ...@@ -6864,6 +6860,10 @@ static int ext4_write_dquot(struct dquot *dquot)
if (IS_ERR(handle)) if (IS_ERR(handle))
return PTR_ERR(handle); return PTR_ERR(handle);
ret = dquot_commit(dquot); ret = dquot_commit(dquot);
if (ret < 0)
ext4_error_err(dquot->dq_sb, -ret,
"Failed to commit dquot type %d",
dquot->dq_id.type);
err = ext4_journal_stop(handle); err = ext4_journal_stop(handle);
if (!ret) if (!ret)
ret = err; ret = err;
...@@ -6880,6 +6880,10 @@ static int ext4_acquire_dquot(struct dquot *dquot) ...@@ -6880,6 +6880,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
if (IS_ERR(handle)) if (IS_ERR(handle))
return PTR_ERR(handle); return PTR_ERR(handle);
ret = dquot_acquire(dquot); ret = dquot_acquire(dquot);
if (ret < 0)
ext4_error_err(dquot->dq_sb, -ret,
"Failed to acquire dquot type %d",
dquot->dq_id.type);
err = ext4_journal_stop(handle); err = ext4_journal_stop(handle);
if (!ret) if (!ret)
ret = err; ret = err;
...@@ -6899,6 +6903,10 @@ static int ext4_release_dquot(struct dquot *dquot) ...@@ -6899,6 +6903,10 @@ static int ext4_release_dquot(struct dquot *dquot)
return PTR_ERR(handle); return PTR_ERR(handle);
} }
ret = dquot_release(dquot); ret = dquot_release(dquot);
if (ret < 0)
ext4_error_err(dquot->dq_sb, -ret,
"Failed to release dquot type %d",
dquot->dq_id.type);
err = ext4_journal_stop(handle); err = ext4_journal_stop(handle);
if (!ret) if (!ret)
ret = err; ret = err;
......
...@@ -1565,46 +1565,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, ...@@ -1565,46 +1565,49 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
/* /*
* Add value of the EA in an inode. * Add value of the EA in an inode.
*/ */
static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode, static struct inode *ext4_xattr_inode_lookup_create(handle_t *handle,
const void *value, size_t value_len, struct inode *inode, const void *value, size_t value_len)
struct inode **ret_inode)
{ {
struct inode *ea_inode; struct inode *ea_inode;
u32 hash; u32 hash;
int err; int err;
/* Account inode & space to quota even if sharing... */
err = ext4_xattr_inode_alloc_quota(inode, value_len);
if (err)
return ERR_PTR(err);
hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len); hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash); ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
if (ea_inode) { if (ea_inode) {
err = ext4_xattr_inode_inc_ref(handle, ea_inode); err = ext4_xattr_inode_inc_ref(handle, ea_inode);
if (err) { if (err)
iput(ea_inode); goto out_err;
return err; return ea_inode;
}
*ret_inode = ea_inode;
return 0;
} }
/* Create an inode for the EA value */ /* Create an inode for the EA value */
ea_inode = ext4_xattr_inode_create(handle, inode, hash); ea_inode = ext4_xattr_inode_create(handle, inode, hash);
if (IS_ERR(ea_inode)) if (IS_ERR(ea_inode)) {
return PTR_ERR(ea_inode); ext4_xattr_inode_free_quota(inode, NULL, value_len);
return ea_inode;
}
err = ext4_xattr_inode_write(handle, ea_inode, value, value_len); err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
if (err) { if (err) {
if (ext4_xattr_inode_dec_ref(handle, ea_inode)) if (ext4_xattr_inode_dec_ref(handle, ea_inode))
ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err); ext4_warning_inode(ea_inode, "cleanup dec ref error %d", err);
iput(ea_inode); goto out_err;
return err;
} }
if (EA_INODE_CACHE(inode)) if (EA_INODE_CACHE(inode))
mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash, mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
ea_inode->i_ino, true /* reusable */); ea_inode->i_ino, true /* reusable */);
return ea_inode;
*ret_inode = ea_inode; out_err:
return 0; iput(ea_inode);
ext4_xattr_inode_free_quota(inode, NULL, value_len);
return ERR_PTR(err);
} }
/* /*
...@@ -1712,16 +1715,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, ...@@ -1712,16 +1715,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
if (i->value && in_inode) { if (i->value && in_inode) {
WARN_ON_ONCE(!i->value_len); WARN_ON_ONCE(!i->value_len);
ret = ext4_xattr_inode_alloc_quota(inode, i->value_len); new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
if (ret) i->value, i->value_len);
goto out; if (IS_ERR(new_ea_inode)) {
ret = PTR_ERR(new_ea_inode);
ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
i->value_len,
&new_ea_inode);
if (ret) {
new_ea_inode = NULL; new_ea_inode = NULL;
ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
goto out; goto out;
} }
} }
...@@ -2160,17 +2158,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -2160,17 +2158,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ENTRY(header(s->base)+1)); ENTRY(header(s->base)+1));
if (error) if (error)
goto getblk_failed; goto getblk_failed;
if (ea_inode) {
/* Drop the extra ref on ea_inode. */
error = ext4_xattr_inode_dec_ref(handle,
ea_inode);
if (error)
ext4_warning_inode(ea_inode,
"dec ref error=%d",
error);
iput(ea_inode);
ea_inode = NULL;
}
lock_buffer(new_bh); lock_buffer(new_bh);
error = ext4_journal_get_create_access(handle, sb, error = ext4_journal_get_create_access(handle, sb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment