Commit 3e1e5f50 authored by Eric Sandeen's avatar Eric Sandeen Committed by Theodore Ts'o

ext4: don't use ext4_allocation_contexts for tracing

Many tracepoints were populating an ext4_allocation_context
to pass in, but this requires a slab allocation even when
tracepoints are off.  In fact, 4 of 5 of these allocations
were only for tracing.  In addition, we were only using a
small fraction of the 144 bytes of this structure for this
purpose.

We can do away with all these alloc/frees of the ac and
simply pass in the bits we care about, instead.

I tested this by turning on tracing and running through
xfstests on x86_64.  I did not actually do anything with
the trace output, however.
Signed-off-by: default avatarEric Sandeen <sandeen@redhat.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 4d547616
...@@ -3591,8 +3591,7 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) ...@@ -3591,8 +3591,7 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
*/ */
static noinline_for_stack int static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
struct ext4_prealloc_space *pa, struct ext4_prealloc_space *pa)
struct ext4_allocation_context *ac)
{ {
struct super_block *sb = e4b->bd_sb; struct super_block *sb = e4b->bd_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
...@@ -3610,11 +3609,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, ...@@ -3610,11 +3609,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
BUG_ON(group != e4b->bd_group && pa->pa_len != 0); BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
end = bit + pa->pa_len; end = bit + pa->pa_len;
if (ac) {
ac->ac_sb = sb;
ac->ac_inode = pa->pa_inode;
}
while (bit < end) { while (bit < end) {
bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
if (bit >= end) if (bit >= end)
...@@ -3625,16 +3619,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, ...@@ -3625,16 +3619,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
(unsigned) next - bit, (unsigned) group); (unsigned) next - bit, (unsigned) group);
free += next - bit; free += next - bit;
if (ac) { trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
ac->ac_b_ex.fe_group = group; trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa,
ac->ac_b_ex.fe_start = bit; grp_blk_start + bit, next - bit);
ac->ac_b_ex.fe_len = next - bit;
ac->ac_b_ex.fe_logical = 0;
trace_ext4_mballoc_discard(ac);
}
trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit,
next - bit);
mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
bit = next + 1; bit = next + 1;
} }
...@@ -3657,29 +3644,19 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, ...@@ -3657,29 +3644,19 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
static noinline_for_stack int static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy *e4b, ext4_mb_release_group_pa(struct ext4_buddy *e4b,
struct ext4_prealloc_space *pa, struct ext4_prealloc_space *pa)
struct ext4_allocation_context *ac)
{ {
struct super_block *sb = e4b->bd_sb; struct super_block *sb = e4b->bd_sb;
ext4_group_t group; ext4_group_t group;
ext4_grpblk_t bit; ext4_grpblk_t bit;
trace_ext4_mb_release_group_pa(sb, ac, pa); trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0); BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0); BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
if (ac) {
ac->ac_sb = sb;
ac->ac_inode = NULL;
ac->ac_b_ex.fe_group = group;
ac->ac_b_ex.fe_start = bit;
ac->ac_b_ex.fe_len = pa->pa_len;
ac->ac_b_ex.fe_logical = 0;
trace_ext4_mballoc_discard(ac);
}
return 0; return 0;
} }
...@@ -3700,7 +3677,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ...@@ -3700,7 +3677,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL; struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp; struct ext4_prealloc_space *pa, *tmp;
struct ext4_allocation_context *ac;
struct list_head list; struct list_head list;
struct ext4_buddy e4b; struct ext4_buddy e4b;
int err; int err;
...@@ -3729,9 +3705,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ...@@ -3729,9 +3705,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
needed = EXT4_BLOCKS_PER_GROUP(sb) + 1; needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac)
ac->ac_sb = sb;
repeat: repeat:
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp, list_for_each_entry_safe(pa, tmp,
...@@ -3786,9 +3759,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ...@@ -3786,9 +3759,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
spin_unlock(pa->pa_obj_lock); spin_unlock(pa->pa_obj_lock);
if (pa->pa_type == MB_GROUP_PA) if (pa->pa_type == MB_GROUP_PA)
ext4_mb_release_group_pa(&e4b, pa, ac); ext4_mb_release_group_pa(&e4b, pa);
else else
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
list_del(&pa->u.pa_tmp_list); list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
...@@ -3796,8 +3769,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, ...@@ -3796,8 +3769,6 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
out: out:
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh); put_bh(bitmap_bh);
return free; return free;
...@@ -3818,7 +3789,6 @@ void ext4_discard_preallocations(struct inode *inode) ...@@ -3818,7 +3789,6 @@ void ext4_discard_preallocations(struct inode *inode)
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct buffer_head *bitmap_bh = NULL; struct buffer_head *bitmap_bh = NULL;
struct ext4_prealloc_space *pa, *tmp; struct ext4_prealloc_space *pa, *tmp;
struct ext4_allocation_context *ac;
ext4_group_t group = 0; ext4_group_t group = 0;
struct list_head list; struct list_head list;
struct ext4_buddy e4b; struct ext4_buddy e4b;
...@@ -3834,11 +3804,6 @@ void ext4_discard_preallocations(struct inode *inode) ...@@ -3834,11 +3804,6 @@ void ext4_discard_preallocations(struct inode *inode)
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac) {
ac->ac_sb = sb;
ac->ac_inode = inode;
}
repeat: repeat:
/* first, collect all pa's in the inode */ /* first, collect all pa's in the inode */
spin_lock(&ei->i_prealloc_lock); spin_lock(&ei->i_prealloc_lock);
...@@ -3908,7 +3873,7 @@ void ext4_discard_preallocations(struct inode *inode) ...@@ -3908,7 +3873,7 @@ void ext4_discard_preallocations(struct inode *inode)
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
list_del(&pa->pa_group_list); list_del(&pa->pa_group_list);
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
...@@ -3917,8 +3882,6 @@ void ext4_discard_preallocations(struct inode *inode) ...@@ -3917,8 +3882,6 @@ void ext4_discard_preallocations(struct inode *inode)
list_del(&pa->u.pa_tmp_list); list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
} }
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
} }
/* /*
...@@ -4116,14 +4079,10 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, ...@@ -4116,14 +4079,10 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
struct ext4_buddy e4b; struct ext4_buddy e4b;
struct list_head discard_list; struct list_head discard_list;
struct ext4_prealloc_space *pa, *tmp; struct ext4_prealloc_space *pa, *tmp;
struct ext4_allocation_context *ac;
mb_debug(1, "discard locality group preallocation\n"); mb_debug(1, "discard locality group preallocation\n");
INIT_LIST_HEAD(&discard_list); INIT_LIST_HEAD(&discard_list);
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac)
ac->ac_sb = sb;
spin_lock(&lg->lg_prealloc_lock); spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
...@@ -4175,15 +4134,13 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, ...@@ -4175,15 +4134,13 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
} }
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
list_del(&pa->pa_group_list); list_del(&pa->pa_group_list);
ext4_mb_release_group_pa(&e4b, pa, ac); ext4_mb_release_group_pa(&e4b, pa);
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
list_del(&pa->u.pa_tmp_list); list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
} }
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
} }
/* /*
...@@ -4547,7 +4504,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4547,7 +4504,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
{ {
struct buffer_head *bitmap_bh = NULL; struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct ext4_allocation_context *ac = NULL;
struct ext4_group_desc *gdp; struct ext4_group_desc *gdp;
unsigned long freed = 0; unsigned long freed = 0;
unsigned int overflow; unsigned int overflow;
...@@ -4602,12 +4558,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4602,12 +4558,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (!ext4_should_writeback_data(inode)) if (!ext4_should_writeback_data(inode))
flags |= EXT4_FREE_BLOCKS_METADATA; flags |= EXT4_FREE_BLOCKS_METADATA;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac) {
ac->ac_inode = inode;
ac->ac_sb = sb;
}
do_more: do_more:
overflow = 0; overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit); ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
...@@ -4665,12 +4615,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4665,12 +4615,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
} }
#endif #endif
if (ac) { trace_ext4_mballoc_free(sb, inode, block_group, bit, count);
ac->ac_b_ex.fe_group = block_group;
ac->ac_b_ex.fe_start = bit;
ac->ac_b_ex.fe_len = count;
trace_ext4_mballoc_free(ac);
}
err = ext4_mb_load_buddy(sb, block_group, &e4b); err = ext4_mb_load_buddy(sb, block_group, &e4b);
if (err) if (err)
...@@ -4741,7 +4686,5 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4741,7 +4686,5 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
dquot_free_block(inode, freed); dquot_free_block(inode, freed);
brelse(bitmap_bh); brelse(bitmap_bh);
ext4_std_error(sb, err); ext4_std_error(sb, err);
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
return; return;
} }
...@@ -396,11 +396,11 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa, ...@@ -396,11 +396,11 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
TRACE_EVENT(ext4_mb_release_inode_pa, TRACE_EVENT(ext4_mb_release_inode_pa,
TP_PROTO(struct super_block *sb, TP_PROTO(struct super_block *sb,
struct ext4_allocation_context *ac, struct inode *inode,
struct ext4_prealloc_space *pa, struct ext4_prealloc_space *pa,
unsigned long long block, unsigned int count), unsigned long long block, unsigned int count),
TP_ARGS(sb, ac, pa, block, count), TP_ARGS(sb, inode, pa, block, count),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
...@@ -412,8 +412,7 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ...@@ -412,8 +412,7 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
TP_fast_assign( TP_fast_assign(
__entry->dev = sb->s_dev; __entry->dev = sb->s_dev;
__entry->ino = (ac && ac->ac_inode) ? __entry->ino = inode->i_ino;
ac->ac_inode->i_ino : 0;
__entry->block = block; __entry->block = block;
__entry->count = count; __entry->count = count;
), ),
...@@ -425,10 +424,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ...@@ -425,10 +424,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
TRACE_EVENT(ext4_mb_release_group_pa, TRACE_EVENT(ext4_mb_release_group_pa,
TP_PROTO(struct super_block *sb, TP_PROTO(struct super_block *sb,
struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa), struct ext4_prealloc_space *pa),
TP_ARGS(sb, ac, pa), TP_ARGS(sb, pa),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
...@@ -779,47 +777,56 @@ TRACE_EVENT(ext4_mballoc_prealloc, ...@@ -779,47 +777,56 @@ TRACE_EVENT(ext4_mballoc_prealloc,
); );
DECLARE_EVENT_CLASS(ext4__mballoc, DECLARE_EVENT_CLASS(ext4__mballoc,
TP_PROTO(struct ext4_allocation_context *ac), TP_PROTO(struct super_block *sb,
struct inode *inode,
ext4_group_t group,
ext4_grpblk_t start,
ext4_grpblk_t len),
TP_ARGS(ac), TP_ARGS(sb, inode, group, start, len),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
__field( ino_t, ino ) __field( ino_t, ino )
__field( __u32, result_logical )
__field( int, result_start ) __field( int, result_start )
__field( __u32, result_group ) __field( __u32, result_group )
__field( int, result_len ) __field( int, result_len )
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = ac->ac_sb->s_dev; __entry->dev = sb->s_dev;
__entry->ino = ac->ac_inode ? __entry->ino = inode ? inode->i_ino : 0;
ac->ac_inode->i_ino : 0; __entry->result_start = start;
__entry->result_logical = ac->ac_b_ex.fe_logical; __entry->result_group = group;
__entry->result_start = ac->ac_b_ex.fe_start; __entry->result_len = len;
__entry->result_group = ac->ac_b_ex.fe_group;
__entry->result_len = ac->ac_b_ex.fe_len;
), ),
TP_printk("dev %s inode %lu extent %u/%d/%u@%u ", TP_printk("dev %s inode %lu extent %u/%d/%u ",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->result_group, __entry->result_start, __entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical) __entry->result_len)
); );
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard, DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
TP_PROTO(struct ext4_allocation_context *ac), TP_PROTO(struct super_block *sb,
struct inode *inode,
ext4_group_t group,
ext4_grpblk_t start,
ext4_grpblk_t len),
TP_ARGS(ac) TP_ARGS(sb, inode, group, start, len)
); );
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free, DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
TP_PROTO(struct ext4_allocation_context *ac), TP_PROTO(struct super_block *sb,
struct inode *inode,
ext4_group_t group,
ext4_grpblk_t start,
ext4_grpblk_t len),
TP_ARGS(ac) TP_ARGS(sb, inode, group, start, len)
); );
TRACE_EVENT(ext4_forget, TRACE_EVENT(ext4_forget,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment