Commit 70a2dc6a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 bugfixes from Ted Ts'o:
 "Bug fixes for ext4; most of which relate to vulnerabilities where a
  maliciously crafted file system image can result in a kernel OOPS or
  hang.

  At least one fix addresses an inline data bug could be triggered by
  userspace without the need of a crafted file system (although it does
  require that the inline data feature be enabled)"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: check superblock mapped prior to committing
  ext4: add more mount time checks of the superblock
  ext4: add more inode number paranoia checks
  ext4: avoid running out of journal credits when appending to an inline file
  jbd2: don't mark block as modified if the handle is out of credits
  ext4: never move the system.data xattr out of the inode body
  ext4: clear i_data in ext4_inode_info when removing inline data
  ext4: include the illegal physical block in the bad map ext4_error msg
  ext4: verify the depth of extent tree in ext4_find_extent()
  ext4: only look at the bg_flags field if it is valid
  ext4: make sure bitmaps and the inode table don't overlap with bg descriptors
  ext4: always check block group bounds in ext4_init_block_bitmap()
  ext4: always verify the magic number in xattr blocks
  ext4: add corruption check in ext4_xattr_set_entry()
  ext4: add warn_on_error mount option
parents 8979319f a17712c8
...@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, ...@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
unsigned int bit, bit_max; unsigned int bit, bit_max;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t start, tmp; ext4_fsblk_t start, tmp;
int flex_bg = 0;
J_ASSERT_BH(bh, buffer_locked(bh)); J_ASSERT_BH(bh, buffer_locked(bh));
...@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, ...@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
start = ext4_group_first_block_no(sb, block_group); start = ext4_group_first_block_no(sb, block_group);
if (ext4_has_feature_flex_bg(sb))
flex_bg = 1;
/* Set bits for block and inode bitmaps, and inode table */ /* Set bits for block and inode bitmaps, and inode table */
tmp = ext4_block_bitmap(sb, gdp); tmp = ext4_block_bitmap(sb, gdp);
if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_bitmap(sb, gdp); tmp = ext4_inode_bitmap(sb, gdp);
if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_table(sb, gdp); tmp = ext4_inode_table(sb, gdp);
for (; tmp < ext4_inode_table(sb, gdp) + for (; tmp < ext4_inode_table(sb, gdp) +
sbi->s_itb_per_group; tmp++) { sbi->s_itb_per_group; tmp++) {
if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
} }
...@@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) ...@@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
goto verify; goto verify;
} }
ext4_lock_group(sb, block_group); ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (ext4_has_group_desc_csum(sb) &&
(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
if (block_group == 0) {
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
ext4_error(sb, "Block bitmap for bg 0 marked "
"uninitialized");
err = -EFSCORRUPTED;
goto out;
}
err = ext4_init_block_bitmap(sb, bh, block_group, desc); err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh); set_bitmap_uptodate(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
......
...@@ -1114,6 +1114,7 @@ struct ext4_inode_info { ...@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */ #define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
...@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode) ...@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{ {
return ino == EXT4_ROOT_INO || return ino == EXT4_ROOT_INO ||
ino == EXT4_USR_QUOTA_INO ||
ino == EXT4_GRP_QUOTA_INO ||
ino == EXT4_BOOT_LOADER_INO ||
ino == EXT4_JOURNAL_INO ||
ino == EXT4_RESIZE_INO ||
(ino >= EXT4_FIRST_INO(sb) && (ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
} }
...@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode, ...@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode,
struct iomap; struct iomap;
extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap); extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
extern int ext4_try_to_evict_inline_data(handle_t *handle,
struct inode *inode,
int needed);
extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline); extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode); extern int ext4_convert_inline_data(struct inode *inode);
......
...@@ -91,6 +91,7 @@ struct ext4_extent_header { ...@@ -91,6 +91,7 @@ struct ext4_extent_header {
}; };
#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
#define EXT4_MAX_EXTENT_DEPTH 5
#define EXT4_EXTENT_TAIL_OFFSET(hdr) \ #define EXT4_EXTENT_TAIL_OFFSET(hdr) \
(sizeof(struct ext4_extent_header) + \ (sizeof(struct ext4_extent_header) + \
......
...@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, ...@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
eh = ext_inode_hdr(inode); eh = ext_inode_hdr(inode);
depth = ext_depth(inode); depth = ext_depth(inode);
if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
depth);
ret = -EFSCORRUPTED;
goto err;
}
if (path) { if (path) {
ext4_ext_drop_refs(path); ext4_ext_drop_refs(path);
......
...@@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
} }
ext4_lock_group(sb, block_group); ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { if (ext4_has_group_desc_csum(sb) &&
(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
if (block_group == 0) {
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
ext4_error(sb, "Inode bitmap for bg 0 marked "
"uninitialized");
err = -EFSCORRUPTED;
goto out;
}
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data); sb->s_blocksize * 8, bh->b_data);
...@@ -994,7 +1003,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ...@@ -994,7 +1003,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
/* recheck and clear flag under lock if we still need to */ /* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp, ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp)); ext4_free_clusters_after_init(sb, group, gdp));
......
...@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, ...@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
memset((void *)ext4_raw_inode(&is.iloc)->i_block, memset((void *)ext4_raw_inode(&is.iloc)->i_block,
0, EXT4_MIN_INLINE_DATA_SIZE); 0, EXT4_MIN_INLINE_DATA_SIZE);
memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
if (ext4_has_feature_extents(inode->i_sb)) { if (ext4_has_feature_extents(inode->i_sb)) {
if (S_ISDIR(inode->i_mode) || if (S_ISDIR(inode->i_mode) ||
...@@ -886,11 +887,11 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping, ...@@ -886,11 +887,11 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
flags |= AOP_FLAG_NOFS; flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping, ret = ext4_da_convert_inline_data_to_extent(mapping,
inode, inode,
flags, flags,
fsdata); fsdata);
ext4_journal_stop(handle);
if (ret == -ENOSPC && if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal; goto retry_journal;
...@@ -1890,42 +1891,6 @@ int ext4_inline_data_fiemap(struct inode *inode, ...@@ -1890,42 +1891,6 @@ int ext4_inline_data_fiemap(struct inode *inode,
return (error < 0 ? error : 0); return (error < 0 ? error : 0);
} }
/*
* Called during xattr set, and if we can sparse space 'needed',
* just create the extent tree evict the data to the outer block.
*
* We use jbd2 instead of page cache to move data to the 1st block
* so that the whole transaction can be committed as a whole and
* the data isn't lost because of the delayed page cache write.
*/
int ext4_try_to_evict_inline_data(handle_t *handle,
struct inode *inode,
int needed)
{
int error;
struct ext4_xattr_entry *entry;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
return error;
raw_inode = ext4_raw_inode(&iloc);
entry = (struct ext4_xattr_entry *)((void *)raw_inode +
EXT4_I(inode)->i_inline_off);
if (EXT4_XATTR_LEN(entry->e_name_len) +
EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
error = -ENOSPC;
goto out;
}
error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
out:
brelse(iloc.bh);
return error;
}
int ext4_inline_data_truncate(struct inode *inode, int *has_inline) int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{ {
handle_t *handle; handle_t *handle;
......
...@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func, ...@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) { map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk, ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock " "lblock %lu mapped to illegal pblock %llu "
"(length %d)", (unsigned long) map->m_lblk, "(length %d)", (unsigned long) map->m_lblk,
map->m_len); map->m_pblk, map->m_len);
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
return 0; return 0;
...@@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
int inodes_per_block, inode_offset; int inodes_per_block, inode_offset;
iloc->bh = NULL; iloc->bh = NULL;
if (!ext4_valid_inum(sb, inode->i_ino)) if (inode->i_ino < EXT4_ROOT_INO ||
inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return -EFSCORRUPTED; return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
......
...@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ...@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
* initialize bb_free to be able to skip * initialize bb_free to be able to skip
* empty groups without initialization * empty groups without initialization
*/ */
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (ext4_has_group_desc_csum(sb) &&
(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
meta_group_info[i]->bb_free = meta_group_info[i]->bb_free =
ext4_free_clusters_after_init(sb, group, desc); ext4_free_clusters_after_init(sb, group, desc);
} else { } else {
...@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
#endif #endif
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
ac->ac_b_ex.fe_len); ac->ac_b_ex.fe_len);
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp, ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, ext4_free_clusters_after_init(sb,
......
...@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) ...@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
static void ext4_handle_error(struct super_block *sb) static void ext4_handle_error(struct super_block *sb)
{ {
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
if (sb_rdonly(sb)) if (sb_rdonly(sb))
return; return;
...@@ -740,6 +743,9 @@ __acquires(bitlock) ...@@ -740,6 +743,9 @@ __acquires(bitlock)
va_end(args); va_end(args);
} }
if (test_opt(sb, WARN_ON_ERROR))
WARN_ON_ONCE(1);
if (test_opt(sb, ERRORS_CONT)) { if (test_opt(sb, ERRORS_CONT)) {
ext4_commit_super(sb, 0); ext4_commit_super(sb, 0);
return; return;
...@@ -1371,7 +1377,8 @@ enum { ...@@ -1371,7 +1377,8 @@ enum {
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
Opt_nowarn_on_error, Opt_mblk_io_submit,
Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_inode_readahead_blks, Opt_journal_ioprio,
...@@ -1438,6 +1445,8 @@ static const match_table_t tokens = { ...@@ -1438,6 +1445,8 @@ static const match_table_t tokens = {
{Opt_dax, "dax"}, {Opt_dax, "dax"},
{Opt_stripe, "stripe=%u"}, {Opt_stripe, "stripe=%u"},
{Opt_delalloc, "delalloc"}, {Opt_delalloc, "delalloc"},
{Opt_warn_on_error, "warn_on_error"},
{Opt_nowarn_on_error, "nowarn_on_error"},
{Opt_lazytime, "lazytime"}, {Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"}, {Opt_nolazytime, "nolazytime"},
{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"}, {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
...@@ -1602,6 +1611,8 @@ static const struct mount_opts { ...@@ -1602,6 +1611,8 @@ static const struct mount_opts {
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_CLEAR}, MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
{Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_CLEAR}, MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
...@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block; ext4_fsblk_t last_block;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
ext4_fsblk_t block_bitmap; ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table; ext4_fsblk_t inode_table;
...@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb)) if (!sb_rdonly(sb))
return 0; return 0;
} }
if (block_bitmap >= sb_block + 1 &&
block_bitmap <= last_bg_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u overlaps "
"block group descriptors", i);
if (!sb_rdonly(sb))
return 0;
}
if (block_bitmap < first_block || block_bitmap > last_block) { if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group " "Block bitmap for group %u not in group "
...@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb)) if (!sb_rdonly(sb))
return 0; return 0;
} }
if (inode_bitmap >= sb_block + 1 &&
inode_bitmap <= last_bg_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u overlaps "
"block group descriptors", i);
if (!sb_rdonly(sb))
return 0;
}
if (inode_bitmap < first_block || inode_bitmap > last_block) { if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group " "Inode bitmap for group %u not in group "
...@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb)) if (!sb_rdonly(sb))
return 0; return 0;
} }
if (inode_table >= sb_block + 1 &&
inode_table <= last_bg_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode table for group %u overlaps "
"block group descriptors", i);
if (!sb_rdonly(sb))
return 0;
}
if (inode_table < first_block || if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) { inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
...@@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) ...@@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
struct ext4_group_desc *gdp = NULL; struct ext4_group_desc *gdp = NULL;
if (!ext4_has_group_desc_csum(sb))
return ngroups;
for (group = 0; group < ngroups; group++) { for (group = 0; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL); gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) if (!gdp)
continue; continue;
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
continue;
if (group != 0)
break; break;
ext4_error(sb, "Inode table for bg 0 marked as "
"needing zeroing");
if (sb_rdonly(sb))
return ngroups;
} }
return group; return group;
...@@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
le32_to_cpu(es->s_log_block_size)); le32_to_cpu(es->s_log_block_size));
goto failed_mount; goto failed_mount;
} }
if (le32_to_cpu(es->s_log_cluster_size) >
(EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log cluster size: %u",
le32_to_cpu(es->s_log_cluster_size));
goto failed_mount;
}
if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
...@@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else { } else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino); sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
sbi->s_first_ino);
goto failed_mount;
}
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) || (!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) { (sbi->s_inode_size > blocksize)) {
...@@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize); "block size (%d)", clustersize, blocksize);
goto failed_mount; goto failed_mount;
} }
if (le32_to_cpu(es->s_log_cluster_size) >
(EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
"Invalid log cluster size: %u",
le32_to_cpu(es->s_log_cluster_size));
goto failed_mount;
}
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size); le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group = sbi->s_clusters_per_group =
...@@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} }
} else { } else {
if (clustersize != blocksize) { if (clustersize != blocksize) {
ext4_warning(sb, "fragment/cluster size (%d) != " ext4_msg(sb, KERN_ERR,
"block size (%d)", clustersize, "fragment/cluster size (%d) != "
blocksize); "block size (%d)", clustersize, blocksize);
clustersize = blocksize; goto failed_mount;
} }
if (sbi->s_blocks_per_group > blocksize * 8) { if (sbi->s_blocks_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
...@@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_blocks_count(es)); ext4_blocks_count(es));
goto failed_mount; goto failed_mount;
} }
if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
(sbi->s_cluster_ratio == 1)) {
ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
"block is 0 with a 1k block and cluster size");
goto failed_mount;
}
blocks_count = (ext4_blocks_count(es) - blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) + le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1); EXT4_BLOCKS_PER_GROUP(sb) - 1);
...@@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM; ret = -ENOMEM;
goto failed_mount; goto failed_mount;
} }
if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
le32_to_cpu(es->s_inodes_count)) {
ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
le32_to_cpu(es->s_inodes_count),
((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
ret = -EINVAL;
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock); bgl_lock_init(sbi->s_blockgroup_lock);
...@@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) ...@@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (!sbh || block_device_ejected(sb)) if (!sbh || block_device_ejected(sb))
return error; return error;
/*
* The superblock bh should be mapped, but it might not be if the
* device was hot-removed. Not much we can do but fail the I/O.
*/
if (!buffer_mapped(sbh))
return error;
/* /*
* If the file system is mounted read-only, don't update the * If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock * superblock write time. This avoids updating the superblock
......
...@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh, ...@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
{ {
int error = -EFSCORRUPTED; int error = -EFSCORRUPTED;
if (buffer_verified(bh))
return 0;
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) BHDR(bh)->h_blocks != cpu_to_le32(1))
goto errout; goto errout;
if (buffer_verified(bh))
return 0;
error = -EFSBADCRC; error = -EFSBADCRC;
if (!ext4_xattr_block_csum_verify(inode, bh)) if (!ext4_xattr_block_csum_verify(inode, bh))
goto errout; goto errout;
...@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, ...@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
handle_t *handle, struct inode *inode, handle_t *handle, struct inode *inode,
bool is_block) bool is_block)
{ {
struct ext4_xattr_entry *last; struct ext4_xattr_entry *last, *next;
struct ext4_xattr_entry *here = s->here; struct ext4_xattr_entry *here = s->here;
size_t min_offs = s->end - s->base, name_len = strlen(i->name); size_t min_offs = s->end - s->base, name_len = strlen(i->name);
int in_inode = i->in_inode; int in_inode = i->in_inode;
...@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, ...@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
/* Compute min_offs and last. */ /* Compute min_offs and last. */
last = s->first; last = s->first;
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { for (; !IS_LAST_ENTRY(last); last = next) {
next = EXT4_XATTR_NEXT(last);
if ((void *)next >= s->end) {
EXT4_ERROR_INODE(inode, "corrupted xattr entries");
ret = -EFSCORRUPTED;
goto out;
}
if (!last->e_value_inum && last->e_value_size) { if (!last->e_value_inum && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs); size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs) if (offs < min_offs)
...@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, ...@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0) if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC; return -ENOSPC;
error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
if (error) {
if (error == -ENOSPC &&
ext4_has_inline_data(inode)) {
error = ext4_try_to_evict_inline_data(handle, inode,
EXT4_XATTR_LEN(strlen(i->name) +
EXT4_XATTR_SIZE(i->value_len)));
if (error) if (error)
return error; return error;
error = ext4_xattr_ibody_find(inode, i, is);
if (error)
return error;
error = ext4_xattr_set_entry(i, s, handle, inode,
false /* is_block */);
}
if (error)
return error;
}
header = IHDR(inode, ext4_raw_inode(&is->iloc)); header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) { if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
...@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode, ...@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
last = IFIRST(header); last = IFIRST(header);
/* Find the entry best suited to be pushed into EA block */ /* Find the entry best suited to be pushed into EA block */
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
/* never move system.data out of the inode */
if ((last->e_name_len == 4) &&
(last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
!memcmp(last->e_name, "data", 4))
continue;
total_size = EXT4_XATTR_LEN(last->e_name_len); total_size = EXT4_XATTR_LEN(last->e_name_len);
if (!last->e_value_inum) if (!last->e_value_inum)
total_size += EXT4_XATTR_SIZE( total_size += EXT4_XATTR_SIZE(
......
...@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) ...@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == transaction && if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) { jh->b_jlist != BJ_Metadata) {
jbd_lock_bh_state(bh); jbd_lock_bh_state(bh);
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)
pr_err("JBD2: assertion failure: h_type=%u "
"h_line_no=%u block_no=%llu jlist=%u\n",
handle->h_type, handle->h_line_no,
(unsigned long long) bh->b_blocknr,
jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction || J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata); jh->b_jlist == BJ_Metadata);
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);
...@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) ...@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done * of the transaction. This needs to be done
* once a transaction -bzzz * once a transaction -bzzz
*/ */
jh->b_modified = 1;
if (handle->h_buffer_credits <= 0) { if (handle->h_buffer_credits <= 0) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_unlock_bh; goto out_unlock_bh;
} }
jh->b_modified = 1;
handle->h_buffer_credits--; handle->h_buffer_credits--;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment