Commit 65f8b800 authored by Jan Kara's avatar Jan Kara Committed by Theodore Ts'o

ext4: fix race when reusing xattr blocks

When ext4_xattr_block_set() decides to remove xattr block the following
race can happen:

CPU1                                    CPU2
ext4_xattr_block_set()                  ext4_xattr_release_block()
  new_bh = ext4_xattr_block_cache_find()

                                          lock_buffer(bh);
                                          ref = le32_to_cpu(BHDR(bh)->h_refcount);
                                          if (ref == 1) {
                                            ...
                                            mb_cache_entry_delete();
                                            unlock_buffer(bh);
                                            ext4_free_blocks();
                                              ...
                                              ext4_forget(..., bh, ...);
                                                jbd2_journal_revoke(..., bh);

  ext4_journal_get_write_access(..., new_bh, ...)
    do_get_write_access()
      jbd2_journal_cancel_revoke(..., new_bh);

Later the code in ext4_xattr_block_set() finds out the block got freed
and cancels reusal of the block but the revoke stays canceled and so in
case of block reuse and journal replay the filesystem can get corrupted.
If the race works out slightly differently, we can also hit assertions
in the jbd2 code.

Fix the problem by making sure that once matching mbcache entry is
found, code dropping the last xattr block reference (or trying to modify
xattr block in place) waits until the mbcache entry reference is
dropped. This way code trying to reuse xattr block is protected from
someone trying to drop the last reference to xattr block.
Reported-and-tested-by: default avatarRitesh Harjani <ritesh.list@gmail.com>
CC: stable@vger.kernel.org
Fixes: 82939d79 ("ext4: convert to mbcache2")
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20220712105436.32204-5-jack@suse.czSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent fd48e9ac
...@@ -439,9 +439,16 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, ...@@ -439,9 +439,16 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
/* Remove entry from mbcache when EA inode is getting evicted */ /* Remove entry from mbcache when EA inode is getting evicted */
void ext4_evict_ea_inode(struct inode *inode) void ext4_evict_ea_inode(struct inode *inode)
{ {
if (EA_INODE_CACHE(inode)) struct mb_cache_entry *oe;
mb_cache_entry_delete(EA_INODE_CACHE(inode),
ext4_xattr_inode_get_hash(inode), inode->i_ino); if (!EA_INODE_CACHE(inode))
return;
/* Wait for entry to get unused so that we can remove it */
while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
mb_cache_entry_wait_unused(oe);
mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
}
} }
static int static int
...@@ -1229,6 +1236,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, ...@@ -1229,6 +1236,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
if (error) if (error)
goto out; goto out;
retry_ref:
lock_buffer(bh); lock_buffer(bh);
hash = le32_to_cpu(BHDR(bh)->h_hash); hash = le32_to_cpu(BHDR(bh)->h_hash);
ref = le32_to_cpu(BHDR(bh)->h_refcount); ref = le32_to_cpu(BHDR(bh)->h_refcount);
...@@ -1238,9 +1246,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, ...@@ -1238,9 +1246,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
* This must happen under buffer lock for * This must happen under buffer lock for
* ext4_xattr_block_set() to reliably detect freed block * ext4_xattr_block_set() to reliably detect freed block
*/ */
if (ea_block_cache) if (ea_block_cache) {
mb_cache_entry_delete(ea_block_cache, hash, struct mb_cache_entry *oe;
oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
bh->b_blocknr); bh->b_blocknr);
if (oe) {
unlock_buffer(bh);
mb_cache_entry_wait_unused(oe);
mb_cache_entry_put(ea_block_cache, oe);
goto retry_ref;
}
}
get_bh(bh); get_bh(bh);
unlock_buffer(bh); unlock_buffer(bh);
...@@ -1867,9 +1884,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -1867,9 +1884,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
* ext4_xattr_block_set() to reliably detect modified * ext4_xattr_block_set() to reliably detect modified
* block * block
*/ */
if (ea_block_cache) if (ea_block_cache) {
mb_cache_entry_delete(ea_block_cache, hash, struct mb_cache_entry *oe;
bs->bh->b_blocknr);
oe = mb_cache_entry_delete_or_get(ea_block_cache,
hash, bs->bh->b_blocknr);
if (oe) {
/*
* Xattr block is getting reused. Leave
* it alone.
*/
mb_cache_entry_put(ea_block_cache, oe);
goto clone_block;
}
}
ea_bdebug(bs->bh, "modifying in-place"); ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode, error = ext4_xattr_set_entry(i, s, handle, inode,
true /* is_block */); true /* is_block */);
...@@ -1885,6 +1913,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -1885,6 +1913,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto cleanup; goto cleanup;
goto inserted; goto inserted;
} }
clone_block:
unlock_buffer(bs->bh); unlock_buffer(bs->bh);
ea_bdebug(bs->bh, "cloning"); ea_bdebug(bs->bh, "cloning");
s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS); s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
...@@ -1990,18 +2019,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -1990,18 +2019,13 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
lock_buffer(new_bh); lock_buffer(new_bh);
/* /*
* We have to be careful about races with * We have to be careful about races with
* freeing, rehashing or adding references to * adding references to xattr block. Once we
* xattr block. Once we hold buffer lock xattr * hold buffer lock xattr block's state is
* block's state is stable so we can check * stable so we can check the additional
* whether the block got freed / rehashed or * reference fits.
* not. Since we unhash mbcache entry under
* buffer lock when freeing / rehashing xattr
* block, checking whether entry is still
* hashed is reliable. Same rules hold for
* e_reusable handling.
*/ */
if (hlist_bl_unhashed(&ce->e_hash_list) || ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
!ce->e_reusable) { if (ref > EXT4_XATTR_REFCOUNT_MAX) {
/* /*
* Undo everything and check mbcache * Undo everything and check mbcache
* again. * again.
...@@ -2016,9 +2040,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -2016,9 +2040,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
new_bh = NULL; new_bh = NULL;
goto inserted; goto inserted;
} }
ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
BHDR(new_bh)->h_refcount = cpu_to_le32(ref); BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
if (ref >= EXT4_XATTR_REFCOUNT_MAX) if (ref == EXT4_XATTR_REFCOUNT_MAX)
ce->e_reusable = 0; ce->e_reusable = 0;
ea_bdebug(new_bh, "reusing; refcount now=%d", ea_bdebug(new_bh, "reusing; refcount now=%d",
ref); ref);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment