Commit cb851149 authored by John Sheu's avatar John Sheu Committed by Kent Overstreet

bcache: remove nested function usage

Uninlined nested functions can cause crashes when using ftrace, as they don't
follow the normal calling convention and confuse the ftrace function graph
tracer as it examines the stack.

Also, nested functions are supported as a gcc extension, but may fail on other
compilers (e.g. llvm).
Signed-off-by: default avatarJohn Sheu <john.sheu@gmail.com>
parent 3a2fd9d5
...@@ -308,6 +308,16 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, ...@@ -308,6 +308,16 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
return NULL; return NULL;
} }
static void bch_subtract_dirty(struct bkey *k,
struct cache_set *c,
uint64_t offset,
int sectors)
{
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
offset, -sectors);
}
static bool bch_extent_insert_fixup(struct btree_keys *b, static bool bch_extent_insert_fixup(struct btree_keys *b,
struct bkey *insert, struct bkey *insert,
struct btree_iter *iter, struct btree_iter *iter,
...@@ -315,13 +325,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -315,13 +325,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
{ {
struct cache_set *c = container_of(b, struct btree, keys)->c; struct cache_set *c = container_of(b, struct btree, keys)->c;
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
offset, -sectors);
}
uint64_t old_offset; uint64_t old_offset;
unsigned old_size, sectors_found = 0; unsigned old_size, sectors_found = 0;
...@@ -398,7 +401,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -398,7 +401,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct bkey *top; struct bkey *top;
subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); bch_subtract_dirty(k, c, KEY_START(insert),
KEY_SIZE(insert));
if (bkey_written(b, k)) { if (bkey_written(b, k)) {
/* /*
...@@ -448,7 +452,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -448,7 +452,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
} }
} }
subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
} }
check_failed: check_failed:
......
...@@ -405,7 +405,7 @@ struct bset_stats_op { ...@@ -405,7 +405,7 @@ struct bset_stats_op {
struct bset_stats stats; struct bset_stats stats;
}; };
static int btree_bset_stats(struct btree_op *b_op, struct btree *b) static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
{ {
struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
...@@ -423,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -423,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats); ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -441,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -441,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed); op.stats.floats, op.stats.failed);
} }
SHOW(__bch_cache_set) static unsigned bch_root_usage(struct cache_set *c)
{ {
unsigned root_usage(struct cache_set *c) unsigned bytes = 0;
{ struct bkey *k;
unsigned bytes = 0; struct btree *b;
struct bkey *k; struct btree_iter iter;
struct btree *b;
struct btree_iter iter;
goto lock_root; goto lock_root;
do { do {
rw_unlock(false, b); rw_unlock(false, b);
lock_root: lock_root:
b = c->root; b = c->root;
rw_lock(false, b, b->level); rw_lock(false, b, b->level);
} while (b != c->root); } while (b != c->root);
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
bytes += bkey_bytes(k);
rw_unlock(false, b); for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
bytes += bkey_bytes(k);
return (bytes * 100) / btree_bytes(c); rw_unlock(false, b);
}
size_t cache_size(struct cache_set *c) return (bytes * 100) / btree_bytes(c);
{ }
size_t ret = 0;
struct btree *b;
mutex_lock(&c->bucket_lock); static size_t bch_cache_size(struct cache_set *c)
list_for_each_entry(b, &c->btree_cache, list) {
ret += 1 << (b->keys.page_order + PAGE_SHIFT); size_t ret = 0;
struct btree *b;
mutex_unlock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
return ret; list_for_each_entry(b, &c->btree_cache, list)
} ret += 1 << (b->keys.page_order + PAGE_SHIFT);
unsigned cache_max_chain(struct cache_set *c) mutex_unlock(&c->bucket_lock);
{ return ret;
unsigned ret = 0; }
struct hlist_head *h;
mutex_lock(&c->bucket_lock); static unsigned bch_cache_max_chain(struct cache_set *c)
{
unsigned ret = 0;
struct hlist_head *h;
for (h = c->bucket_hash; mutex_lock(&c->bucket_lock);
h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) {
unsigned i = 0;
struct hlist_node *p;
hlist_for_each(p, h) for (h = c->bucket_hash;
i++; h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) {
unsigned i = 0;
struct hlist_node *p;
ret = max(ret, i); hlist_for_each(p, h)
} i++;
mutex_unlock(&c->bucket_lock); ret = max(ret, i);
return ret;
} }
unsigned btree_used(struct cache_set *c) mutex_unlock(&c->bucket_lock);
{ return ret;
return div64_u64(c->gc_stats.key_bytes * 100, }
(c->gc_stats.nodes ?: 1) * btree_bytes(c));
}
unsigned average_key_size(struct cache_set *c) static unsigned bch_btree_used(struct cache_set *c)
{ {
return c->gc_stats.nkeys return div64_u64(c->gc_stats.key_bytes * 100,
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) (c->gc_stats.nodes ?: 1) * btree_bytes(c));
: 0; }
}
static unsigned bch_average_key_size(struct cache_set *c)
{
return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
: 0;
}
SHOW(__bch_cache_set)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj); struct cache_set *c = container_of(kobj, struct cache_set, kobj);
sysfs_print(synchronous, CACHE_SYNC(&c->sb)); sysfs_print(synchronous, CACHE_SYNC(&c->sb));
...@@ -523,10 +523,10 @@ SHOW(__bch_cache_set) ...@@ -523,10 +523,10 @@ SHOW(__bch_cache_set)
sysfs_hprint(bucket_size, bucket_bytes(c)); sysfs_hprint(bucket_size, bucket_bytes(c));
sysfs_hprint(block_size, block_bytes(c)); sysfs_hprint(block_size, block_bytes(c));
sysfs_print(tree_depth, c->root->level); sysfs_print(tree_depth, c->root->level);
sysfs_print(root_usage_percent, root_usage(c)); sysfs_print(root_usage_percent, bch_root_usage(c));
sysfs_hprint(btree_cache_size, cache_size(c)); sysfs_hprint(btree_cache_size, bch_cache_size(c));
sysfs_print(btree_cache_max_chain, cache_max_chain(c)); sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
...@@ -534,9 +534,9 @@ SHOW(__bch_cache_set) ...@@ -534,9 +534,9 @@ SHOW(__bch_cache_set)
sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
sysfs_print(btree_used_percent, btree_used(c)); sysfs_print(btree_used_percent, bch_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes); sysfs_print(btree_nodes, c->gc_stats.nodes);
sysfs_hprint(average_key_size, average_key_size(c)); sysfs_hprint(average_key_size, bch_average_key_size(c));
sysfs_print(cache_read_races, sysfs_print(cache_read_races,
atomic_long_read(&c->cache_read_races)); atomic_long_read(&c->cache_read_races));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment