Commit 9a9dc3ec authored by Theodore Ts'o's avatar Theodore Ts'o Committed by Greg Kroah-Hartman

ext4: lock the xattr block before checksuming it

commit dac7a4b4 upstream.

We must lock the xattr block before calculating or verifying the
checksum in order to avoid spurious checksum failures.

https://bugzilla.kernel.org/show_bug.cgi?id=193661Reported-by: default avatarColin Ian King <colin.king@canonical.com>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f572ba9a
...@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, ...@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
} }
static int ext4_xattr_block_csum_verify(struct inode *inode, static int ext4_xattr_block_csum_verify(struct inode *inode,
sector_t block_nr, struct buffer_head *bh)
struct ext4_xattr_header *hdr)
{
if (ext4_has_metadata_csum(inode->i_sb) &&
(hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
return 0;
return 1;
}
static void ext4_xattr_block_csum_set(struct inode *inode,
sector_t block_nr,
struct ext4_xattr_header *hdr)
{ {
if (!ext4_has_metadata_csum(inode->i_sb)) struct ext4_xattr_header *hdr = BHDR(bh);
return; int ret = 1;
hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); if (ext4_has_metadata_csum(inode->i_sb)) {
lock_buffer(bh);
ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
bh->b_blocknr, hdr));
unlock_buffer(bh);
}
return ret;
} }
static inline int ext4_handle_dirty_xattr_block(handle_t *handle, static void ext4_xattr_block_csum_set(struct inode *inode,
struct inode *inode,
struct buffer_head *bh) struct buffer_head *bh)
{ {
ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); if (ext4_has_metadata_csum(inode->i_sb))
return ext4_handle_dirty_metadata(handle, inode, bh); BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
bh->b_blocknr, BHDR(bh));
} }
static inline const struct xattr_handler * static inline const struct xattr_handler *
...@@ -218,7 +213,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) ...@@ -218,7 +213,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED; return -EFSCORRUPTED;
if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC; return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
bh->b_data); bh->b_data);
...@@ -601,23 +596,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, ...@@ -601,23 +596,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
} }
} }
ext4_xattr_block_csum_set(inode, bh);
/* /*
* Beware of this ugliness: Releasing of xattr block references * Beware of this ugliness: Releasing of xattr block references
* from different inodes can race and so we have to protect * from different inodes can race and so we have to protect
* from a race where someone else frees the block (and releases * from a race where someone else frees the block (and releases
* its journal_head) before we are done dirtying the buffer. In * its journal_head) before we are done dirtying the buffer. In
* nojournal mode this race is harmless and we actually cannot * nojournal mode this race is harmless and we actually cannot
* call ext4_handle_dirty_xattr_block() with locked buffer as * call ext4_handle_dirty_metadata() with locked buffer as
* that function can call sync_dirty_buffer() so for that case * that function can call sync_dirty_buffer() so for that case
* we handle the dirtying after unlocking the buffer. * we handle the dirtying after unlocking the buffer.
*/ */
if (ext4_handle_valid(handle)) if (ext4_handle_valid(handle))
error = ext4_handle_dirty_xattr_block(handle, inode, error = ext4_handle_dirty_metadata(handle, inode, bh);
bh);
unlock_buffer(bh); unlock_buffer(bh);
if (!ext4_handle_valid(handle)) if (!ext4_handle_valid(handle))
error = ext4_handle_dirty_xattr_block(handle, inode, error = ext4_handle_dirty_metadata(handle, inode, bh);
bh);
if (IS_SYNC(inode)) if (IS_SYNC(inode))
ext4_handle_sync(handle); ext4_handle_sync(handle);
dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
...@@ -846,11 +840,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -846,11 +840,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ext4_xattr_cache_insert(ext4_mb_cache, ext4_xattr_cache_insert(ext4_mb_cache,
bs->bh); bs->bh);
} }
ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh); unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED) if (error == -EFSCORRUPTED)
goto bad_block; goto bad_block;
if (!error) if (!error)
error = ext4_handle_dirty_xattr_block(handle, error = ext4_handle_dirty_metadata(handle,
inode, inode,
bs->bh); bs->bh);
if (error) if (error)
...@@ -950,8 +945,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -950,8 +945,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ce->e_reusable = 0; ce->e_reusable = 0;
ea_bdebug(new_bh, "reusing; refcount now=%d", ea_bdebug(new_bh, "reusing; refcount now=%d",
ref); ref);
ext4_xattr_block_csum_set(inode, new_bh);
unlock_buffer(new_bh); unlock_buffer(new_bh);
error = ext4_handle_dirty_xattr_block(handle, error = ext4_handle_dirty_metadata(handle,
inode, inode,
new_bh); new_bh);
if (error) if (error)
...@@ -1003,11 +999,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -1003,11 +999,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto getblk_failed; goto getblk_failed;
} }
memcpy(new_bh->b_data, s->base, new_bh->b_size); memcpy(new_bh->b_data, s->base, new_bh->b_size);
ext4_xattr_block_csum_set(inode, new_bh);
set_buffer_uptodate(new_bh); set_buffer_uptodate(new_bh);
unlock_buffer(new_bh); unlock_buffer(new_bh);
ext4_xattr_cache_insert(ext4_mb_cache, new_bh); ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
error = ext4_handle_dirty_xattr_block(handle, error = ext4_handle_dirty_metadata(handle, inode,
inode, new_bh); new_bh);
if (error) if (error)
goto cleanup; goto cleanup;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment