Commit 1cf29683 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  jbd2: fix race between write_metadata_buffer and get_write_access
  ext4: Fix ext4_mb_initialize_context() to initialize all fields
  ext4: fix null handler of ioctls in no journal mode
  ext4: Fix buffer head reference leak in no-journal mode
  ext4: Move __ext4_journalled_writepage() to avoid forward declaration
  ext4: Fix mmap/truncate race when blocksize < pagesize && !nodellaoc
  ext4: Fix mmap/truncate race when blocksize < pagesize && delayed allocation
  ext4: Don't look at buffer_heads outside i_size.
  ext4: Fix goal inum check in the inode allocator
  ext4: fix no journal corruption with locale-gen
  ext4: Calculate required journal credits for inserting an extent properly
  ext4: Fix truncation of symlinks after failed write
  jbd2: Fix a race between checkpointing code and journal_get_write_access()
  ext4: Use rcu_barrier() on module unload.
  ext4: naturally align struct ext4_allocation_request
  ext4: mark several more functions in mballoc.c as noinline
  ext4: Fix potential reclaim deadlock when truncating partial block
  jbd2: Remove GFP_ATOMIC kmalloc from inside spinlock critical region
  ext4: Fix type warning on 64-bit platforms in tracing events header
parents 4a390e07 96577c43
...@@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t; ...@@ -93,20 +93,20 @@ typedef unsigned int ext4_group_t;
struct ext4_allocation_request { struct ext4_allocation_request {
/* target inode for block we're allocating */ /* target inode for block we're allocating */
struct inode *inode; struct inode *inode;
/* how many blocks we want to allocate */
unsigned int len;
/* logical block in target inode */ /* logical block in target inode */
ext4_lblk_t logical; ext4_lblk_t logical;
/* phys. target (a hint) */
ext4_fsblk_t goal;
/* the closest logical allocated block to the left */ /* the closest logical allocated block to the left */
ext4_lblk_t lleft; ext4_lblk_t lleft;
/* phys. block for ^^^ */
ext4_fsblk_t pleft;
/* the closest logical allocated block to the right */ /* the closest logical allocated block to the right */
ext4_lblk_t lright; ext4_lblk_t lright;
/* phys. block for ^^^ */ /* phys. target (a hint) */
ext4_fsblk_t goal;
/* phys. block for the closest logical allocated block to the left */
ext4_fsblk_t pleft;
/* phys. block for the closest logical allocated block to the right */
ext4_fsblk_t pright; ext4_fsblk_t pright;
/* how many blocks we want to allocate */
unsigned int len;
/* flags. see above EXT4_MB_HINT_* */ /* flags. see above EXT4_MB_HINT_* */
unsigned int flags; unsigned int flags;
}; };
......
...@@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle, ...@@ -43,6 +43,8 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh, ext4_journal_abort_handle(where, __func__, bh,
handle, err); handle, err);
} }
else
brelse(bh);
return err; return err;
} }
...@@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle, ...@@ -57,6 +59,8 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
ext4_journal_abort_handle(where, __func__, bh, ext4_journal_abort_handle(where, __func__, bh,
handle, err); handle, err);
} }
else
brelse(bh);
return err; return err;
} }
......
...@@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle, ...@@ -131,9 +131,11 @@ int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
int __ext4_journal_get_write_access(const char *where, handle_t *handle, int __ext4_journal_get_write_access(const char *where, handle_t *handle,
struct buffer_head *bh); struct buffer_head *bh);
/* When called with an invalid handle, this will still do a put on the BH */
int __ext4_journal_forget(const char *where, handle_t *handle, int __ext4_journal_forget(const char *where, handle_t *handle,
struct buffer_head *bh); struct buffer_head *bh);
/* When called with an invalid handle, this will still do a put on the BH */
int __ext4_journal_revoke(const char *where, handle_t *handle, int __ext4_journal_revoke(const char *where, handle_t *handle,
ext4_fsblk_t blocknr, struct buffer_head *bh); ext4_fsblk_t blocknr, struct buffer_head *bh);
...@@ -281,10 +283,10 @@ static inline int ext4_should_order_data(struct inode *inode) ...@@ -281,10 +283,10 @@ static inline int ext4_should_order_data(struct inode *inode)
static inline int ext4_should_writeback_data(struct inode *inode) static inline int ext4_should_writeback_data(struct inode *inode)
{ {
if (EXT4_JOURNAL(inode) == NULL)
return 0;
if (!S_ISREG(inode->i_mode)) if (!S_ISREG(inode->i_mode))
return 0; return 0;
if (EXT4_JOURNAL(inode) == NULL)
return 1;
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
return 0; return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
......
...@@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, ...@@ -1977,6 +1977,7 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
*/ */
/* 1 bitmap, 1 block group descriptor */ /* 1 bitmap, 1 block group descriptor */
ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
} }
} }
......
...@@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, ...@@ -833,7 +833,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
if (!goal) if (!goal)
goal = sbi->s_inode_goal; goal = sbi->s_inode_goal;
if (goal && goal < le32_to_cpu(sbi->s_es->s_inodes_count)) { if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
ret2 = 0; ret2 = 0;
......
This diff is collapsed.
...@@ -191,7 +191,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -191,7 +191,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case EXT4_IOC_GROUP_EXTEND: { case EXT4_IOC_GROUP_EXTEND: {
ext4_fsblk_t n_blocks_count; ext4_fsblk_t n_blocks_count;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
int err, err2; int err, err2=0;
if (!capable(CAP_SYS_RESOURCE)) if (!capable(CAP_SYS_RESOURCE))
return -EPERM; return -EPERM;
...@@ -204,9 +204,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -204,9 +204,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return err; return err;
err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count); err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); if (EXT4_SB(sb)->s_journal) {
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
}
if (err == 0) if (err == 0)
err = err2; err = err2;
mnt_drop_write(filp->f_path.mnt); mnt_drop_write(filp->f_path.mnt);
...@@ -251,7 +253,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -251,7 +253,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case EXT4_IOC_GROUP_ADD: { case EXT4_IOC_GROUP_ADD: {
struct ext4_new_group_data input; struct ext4_new_group_data input;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
int err, err2; int err, err2=0;
if (!capable(CAP_SYS_RESOURCE)) if (!capable(CAP_SYS_RESOURCE))
return -EPERM; return -EPERM;
...@@ -265,9 +267,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -265,9 +267,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return err; return err;
err = ext4_group_add(sb, &input); err = ext4_group_add(sb, &input);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); if (EXT4_SB(sb)->s_journal) {
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
}
if (err == 0) if (err == 0)
err = err2; err = err2;
mnt_drop_write(filp->f_path.mnt); mnt_drop_write(filp->f_path.mnt);
......
...@@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, ...@@ -657,7 +657,8 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
} }
} }
static void ext4_mb_generate_buddy(struct super_block *sb, static noinline_for_stack
void ext4_mb_generate_buddy(struct super_block *sb,
void *buddy, void *bitmap, ext4_group_t group) void *buddy, void *bitmap, ext4_group_t group)
{ {
struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_group_info *grp = ext4_get_group_info(sb, group);
...@@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, ...@@ -1480,7 +1481,8 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
ext4_mb_check_limits(ac, e4b, 0); ext4_mb_check_limits(ac, e4b, 0);
} }
static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, static noinline_for_stack
int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b) struct ext4_buddy *e4b)
{ {
struct ext4_free_extent ex = ac->ac_b_ex; struct ext4_free_extent ex = ac->ac_b_ex;
...@@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac, ...@@ -1507,7 +1509,8 @@ static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
return 0; return 0;
} }
static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, static noinline_for_stack
int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b) struct ext4_buddy *e4b)
{ {
ext4_group_t group = ac->ac_g_ex.fe_group; ext4_group_t group = ac->ac_g_ex.fe_group;
...@@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, ...@@ -1566,7 +1569,8 @@ static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
* The routine scans buddy structures (not bitmap!) from given order * The routine scans buddy structures (not bitmap!) from given order
* to max order and tries to find big enough chunk to satisfy the req * to max order and tries to find big enough chunk to satisfy the req
*/ */
static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, static noinline_for_stack
void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b) struct ext4_buddy *e4b)
{ {
struct super_block *sb = ac->ac_sb; struct super_block *sb = ac->ac_sb;
...@@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, ...@@ -1609,7 +1613,8 @@ static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
* In order to optimize scanning, caller must pass number of * In order to optimize scanning, caller must pass number of
* free blocks in the group, so the routine can know upper limit. * free blocks in the group, so the routine can know upper limit.
*/ */
static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, static noinline_for_stack
void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b) struct ext4_buddy *e4b)
{ {
struct super_block *sb = ac->ac_sb; struct super_block *sb = ac->ac_sb;
...@@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, ...@@ -1668,7 +1673,8 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
* we try to find stripe-aligned chunks for stripe-size requests * we try to find stripe-aligned chunks for stripe-size requests
* XXX should do so at least for multiples of stripe size as well * XXX should do so at least for multiples of stripe size as well
*/ */
static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, static noinline_for_stack
void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
struct ext4_buddy *e4b) struct ext4_buddy *e4b)
{ {
struct super_block *sb = ac->ac_sb; struct super_block *sb = ac->ac_sb;
...@@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb, ...@@ -1831,7 +1837,8 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
} }
static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) static noinline_for_stack
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{ {
int ret; int ret;
...@@ -2902,7 +2909,11 @@ int __init init_ext4_mballoc(void) ...@@ -2902,7 +2909,11 @@ int __init init_ext4_mballoc(void)
void exit_ext4_mballoc(void) void exit_ext4_mballoc(void)
{ {
/* XXX: synchronize_rcu(); */ /*
* Wait for completion of call_rcu()'s on ext4_pspace_cachep
* before destroying the slab cache.
*/
rcu_barrier();
kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_ac_cachep);
kmem_cache_destroy(ext4_free_ext_cachep); kmem_cache_destroy(ext4_free_ext_cachep);
...@@ -3457,7 +3468,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ...@@ -3457,7 +3468,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
* used in in-core bitmap. buddy must be generated from this bitmap * used in in-core bitmap. buddy must be generated from this bitmap
* Need to be called with ext4 group lock held * Need to be called with ext4 group lock held
*/ */
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, static noinline_for_stack
void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group) ext4_group_t group)
{ {
struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_group_info *grp = ext4_get_group_info(sb, group);
...@@ -4215,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ...@@ -4215,14 +4227,9 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block); ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */ /* set up allocation goals */
memset(ac, 0, sizeof(struct ext4_allocation_context));
ac->ac_b_ex.fe_logical = ar->logical; ac->ac_b_ex.fe_logical = ar->logical;
ac->ac_b_ex.fe_group = 0;
ac->ac_b_ex.fe_start = 0;
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE; ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_groups_scanned = 0;
ac->ac_ex_scanned = 0;
ac->ac_found = 0;
ac->ac_sb = sb; ac->ac_sb = sb;
ac->ac_inode = ar->inode; ac->ac_inode = ar->inode;
ac->ac_o_ex.fe_logical = ar->logical; ac->ac_o_ex.fe_logical = ar->logical;
...@@ -4233,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ...@@ -4233,15 +4240,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ac->ac_g_ex.fe_group = group; ac->ac_g_ex.fe_group = group;
ac->ac_g_ex.fe_start = block; ac->ac_g_ex.fe_start = block;
ac->ac_g_ex.fe_len = len; ac->ac_g_ex.fe_len = len;
ac->ac_f_ex.fe_len = 0;
ac->ac_flags = ar->flags; ac->ac_flags = ar->flags;
ac->ac_2order = 0;
ac->ac_criteria = 0;
ac->ac_pa = NULL;
ac->ac_bitmap_page = NULL;
ac->ac_buddy_page = NULL;
ac->alloc_semp = NULL;
ac->ac_lg = NULL;
/* we have to define context: we'll we work with a file or /* we have to define context: we'll we work with a file or
* locality group. this is a policy, actually */ * locality group. this is a policy, actually */
...@@ -4509,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ...@@ -4509,10 +4508,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
} }
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac) { if (!ac) {
ac->ac_sb = sb;
ac->ac_inode = ar->inode;
} else {
ar->len = 0; ar->len = 0;
*errp = -ENOMEM; *errp = -ENOMEM;
goto out1; goto out1;
......
...@@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -297,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
unsigned int new_offset; unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in); struct buffer_head *bh_in = jh2bh(jh_in);
struct jbd2_buffer_trigger_type *triggers; struct jbd2_buffer_trigger_type *triggers;
journal_t *journal = transaction->t_journal;
/* /*
* The buffer really shouldn't be locked: only the current committing * The buffer really shouldn't be locked: only the current committing
...@@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -310,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
/* keep subsequent assertions sane */
new_bh->b_state = 0;
init_buffer(new_bh, NULL, NULL);
atomic_set(&new_bh->b_count, 1);
new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
/* /*
* If a new transaction has already done a buffer copy-out, then * If a new transaction has already done a buffer copy-out, then
...@@ -388,14 +394,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -388,14 +394,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
kunmap_atomic(mapped_data, KM_USER0); kunmap_atomic(mapped_data, KM_USER0);
} }
/* keep subsequent assertions sane */
new_bh->b_state = 0;
init_buffer(new_bh, NULL, NULL);
atomic_set(&new_bh->b_count, 1);
jbd_unlock_bh_state(bh_in);
new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
set_bh_page(new_bh, new_page, new_offset); set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL; new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size; new_bh->b_size = jh2bh(jh_in)->b_size;
...@@ -412,7 +410,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, ...@@ -412,7 +410,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
* copying is moved to the transaction's shadow queue. * copying is moved to the transaction's shadow queue.
*/ */
JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh_in);
JBUFFER_TRACE(new_jh, "file as BJ_IO"); JBUFFER_TRACE(new_jh, "file as BJ_IO");
jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
...@@ -2410,6 +2412,7 @@ const char *jbd2_dev_to_name(dev_t device) ...@@ -2410,6 +2412,7 @@ const char *jbd2_dev_to_name(dev_t device)
int i = hash_32(device, CACHE_SIZE_BITS); int i = hash_32(device, CACHE_SIZE_BITS);
char *ret; char *ret;
struct block_device *bd; struct block_device *bd;
static struct devname_cache *new_dev;
rcu_read_lock(); rcu_read_lock();
if (devcache[i] && devcache[i]->device == device) { if (devcache[i] && devcache[i]->device == device) {
...@@ -2419,20 +2422,20 @@ const char *jbd2_dev_to_name(dev_t device) ...@@ -2419,20 +2422,20 @@ const char *jbd2_dev_to_name(dev_t device)
} }
rcu_read_unlock(); rcu_read_unlock();
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
if (!new_dev)
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
spin_lock(&devname_cache_lock); spin_lock(&devname_cache_lock);
if (devcache[i]) { if (devcache[i]) {
if (devcache[i]->device == device) { if (devcache[i]->device == device) {
kfree(new_dev);
ret = devcache[i]->devname; ret = devcache[i]->devname;
spin_unlock(&devname_cache_lock); spin_unlock(&devname_cache_lock);
return ret; return ret;
} }
call_rcu(&devcache[i]->rcu, free_devcache); call_rcu(&devcache[i]->rcu, free_devcache);
} }
devcache[i] = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); devcache[i] = new_dev;
if (!devcache[i]) {
spin_unlock(&devname_cache_lock);
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
}
devcache[i]->device = device; devcache[i]->device = device;
bd = bdget(device); bd = bdget(device);
if (bd) { if (bd) {
......
...@@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal) ...@@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal)
wake_up(&journal->j_wait_transaction_locked); wake_up(&journal->j_wait_transaction_locked);
} }
/* static void warn_dirty_buffer(struct buffer_head *bh)
* Report any unexpected dirty buffers which turn up. Normally those
* indicate an error, but they can occur if the user is running (say)
* tune2fs to modify the live filesystem, so we need the option of
* continuing as gracefully as possible. #
*
* The caller should already hold the journal lock and
* j_list_lock spinlock: most callers will need those anyway
* in order to probe the buffer's journaling state safely.
*/
static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
{ {
int jlist; char b[BDEVNAME_SIZE];
/* If this buffer is one which might reasonably be dirty
* --- ie. data, or not part of this journal --- then
* we're OK to leave it alone, but otherwise we need to
* move the dirty bit to the journal's own internal
* JBDDirty bit. */
jlist = jh->b_jlist;
if (jlist == BJ_Metadata || jlist == BJ_Reserved || printk(KERN_WARNING
jlist == BJ_Shadow || jlist == BJ_Forget) { "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
struct buffer_head *bh = jh2bh(jh); "There's a risk of filesystem corruption in case of system "
"crash.\n",
if (test_clear_buffer_dirty(bh)) bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
set_buffer_jbddirty(bh);
}
} }
/* /*
...@@ -593,14 +574,16 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -593,14 +574,16 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
if (jh->b_next_transaction) if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction == J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction); transaction);
warn_dirty_buffer(bh);
} }
/* /*
* In any case we need to clean the dirty flag and we must * In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race * do it under the buffer lock to be sure we don't race
* with running write-out. * with running write-out.
*/ */
JBUFFER_TRACE(jh, "Unexpected dirty buffer"); JBUFFER_TRACE(jh, "Journalling dirty buffer");
jbd_unexpected_dirty_buffer(jh); clear_buffer_dirty(bh);
set_buffer_jbddirty(bh);
} }
unlock_buffer(bh); unlock_buffer(bh);
...@@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) ...@@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
if (jh->b_transaction == NULL) { if (jh->b_transaction == NULL) {
/*
* Previous jbd2_journal_forget() could have left the buffer
* with jbddirty bit set because it was being committed. When
* the commit finished, we've filed the buffer for
* checkpointing and marked it dirty. Now we are reallocating
* the buffer so the transaction freeing it must have
* committed and so it's safe to clear the dirty bit.
*/
clear_buffer_dirty(jh2bh(jh));
jh->b_transaction = transaction; jh->b_transaction = transaction;
/* first access by this transaction */ /* first access by this transaction */
...@@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) ...@@ -1644,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
if (jh->b_cp_transaction) { if (jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "on running+cp transaction"); JBUFFER_TRACE(jh, "on running+cp transaction");
/*
* We don't want to write the buffer anymore, clear the
* bit so that we don't confuse checks in
* __journal_file_buffer
*/
clear_buffer_dirty(bh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
clear_buffer_jbddirty(bh);
may_free = 0; may_free = 0;
} else { } else {
JBUFFER_TRACE(jh, "on running transaction"); JBUFFER_TRACE(jh, "on running transaction");
...@@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh, ...@@ -1896,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
if (jh->b_transaction && jh->b_jlist == jlist) if (jh->b_transaction && jh->b_jlist == jlist)
return; return;
/* The following list of buffer states needs to be consistent
* with __jbd_unexpected_dirty_buffer()'s handling of dirty
* state. */
if (jlist == BJ_Metadata || jlist == BJ_Reserved || if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) { jlist == BJ_Shadow || jlist == BJ_Forget) {
/*
* For metadata buffers, we track dirty bit in buffer_jbddirty
* instead of buffer_dirty. We should not see a dirty bit set
* here because we clear it in do_get_write_access but e.g.
* tune2fs can modify the sb and set the dirty bit at any time
* so we try to gracefully handle that.
*/
if (buffer_dirty(bh))
warn_dirty_buffer(bh);
if (test_clear_buffer_dirty(bh) || if (test_clear_buffer_dirty(bh) ||
test_clear_buffer_jbddirty(bh)) test_clear_buffer_jbddirty(bh))
was_dirty = 1; was_dirty = 1;
......
...@@ -34,7 +34,8 @@ TRACE_EVENT(ext4_free_inode, ...@@ -34,7 +34,8 @@ TRACE_EVENT(ext4_free_inode,
TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode,
__entry->uid, __entry->gid, __entry->blocks) __entry->uid, __entry->gid,
(unsigned long long) __entry->blocks)
); );
TRACE_EVENT(ext4_request_inode, TRACE_EVENT(ext4_request_inode,
...@@ -189,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end, ...@@ -189,7 +190,7 @@ TRACE_EVENT(ext4_journalled_write_end,
__entry->copied) __entry->copied)
); );
TRACE_EVENT(ext4_da_writepage, TRACE_EVENT(ext4_writepage,
TP_PROTO(struct inode *inode, struct page *page), TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page), TP_ARGS(inode, page),
...@@ -341,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end, ...@@ -341,49 +342,6 @@ TRACE_EVENT(ext4_da_write_end,
__entry->copied) __entry->copied)
); );
TRACE_EVENT(ext4_normal_writepage,
TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = page->index;
),
TP_printk("dev %s ino %lu page_index %lu",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
);
TRACE_EVENT(ext4_journalled_writepage,
TP_PROTO(struct inode *inode, struct page *page),
TP_ARGS(inode, page),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = page->index;
),
TP_printk("dev %s ino %lu page_index %lu",
jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
);
TRACE_EVENT(ext4_discard_blocks, TRACE_EVENT(ext4_discard_blocks,
TP_PROTO(struct super_block *sb, unsigned long long blk, TP_PROTO(struct super_block *sb, unsigned long long blk,
unsigned long long count), unsigned long long count),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment