Commit 4e1ebae3 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: only use block_bytes() on struct cache

Because struct cache_set and struct cache both have struct cache_sb,
therefore macro block_bytes() can be used on both of them. When removing
the embedded struct cache_sb from struct cache_set, this macro won't be
used on struct cache_set anymore.

This patch unifies all block_bytes() usage only on struct cache, this is
one of the preparation to remove the embedded struct cache_sb from
struct cache_set.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1132e56e
...@@ -759,7 +759,7 @@ struct bbio { ...@@ -759,7 +759,7 @@ struct bbio {
#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9) #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
#define block_bytes(c) ((c)->sb.block_size << 9) #define block_bytes(ca) ((ca)->sb.block_size << 9)
static inline unsigned int meta_bucket_pages(struct cache_sb *sb) static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
{ {
......
...@@ -104,7 +104,7 @@ ...@@ -104,7 +104,7 @@
static inline struct bset *write_block(struct btree *b) static inline struct bset *write_block(struct btree *b)
{ {
return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
} }
static void bch_btree_init_next(struct btree *b) static void bch_btree_init_next(struct btree *b)
...@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -173,7 +173,7 @@ void bch_btree_node_read_done(struct btree *b)
goto err; goto err;
err = "bad btree header"; err = "bad btree header";
if (b->written + set_blocks(i, block_bytes(b->c)) > if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
btree_blocks(b)) btree_blocks(b))
goto err; goto err;
...@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
b->written += set_blocks(i, block_bytes(b->c)); b->written += set_blocks(i, block_bytes(b->c->cache));
} }
err = "corrupted btree"; err = "corrupted btree";
for (i = write_block(b); for (i = write_block(b);
bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key); bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
i = ((void *) i) + block_bytes(b->c)) i = ((void *) i) + block_bytes(b->c->cache))
if (i->seq == b->keys.set[0].data->seq) if (i->seq == b->keys.set[0].data->seq)
goto err; goto err;
...@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b) ...@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl; b->bio->bi_private = cl;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i); bch_bio_map(b->bio, i);
...@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) ...@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
do_btree_node_write(b); do_btree_node_write(b);
atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
b->written += set_blocks(i, block_bytes(b->c)); b->written += set_blocks(i, block_bytes(b->c->cache));
} }
void bch_btree_node_write(struct btree *b, struct closure *parent) void bch_btree_node_write(struct btree *b, struct closure *parent)
...@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, ...@@ -1344,7 +1344,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (nodes < 2 || if (nodes < 2 ||
__set_blocks(b->keys.set[0].data, keys, __set_blocks(b->keys.set[0].data, keys,
block_bytes(b->c)) > blocks * (nodes - 1)) block_bytes(b->c->cache)) > blocks * (nodes - 1))
return 0; return 0;
for (i = 0; i < nodes; i++) { for (i = 0; i < nodes; i++) {
...@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, ...@@ -1378,7 +1378,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
k = bkey_next(k)) { k = bkey_next(k)) {
if (__set_blocks(n1, n1->keys + keys + if (__set_blocks(n1, n1->keys + keys +
bkey_u64s(k), bkey_u64s(k),
block_bytes(b->c)) > blocks) block_bytes(b->c->cache)) > blocks)
break; break;
last = k; last = k;
...@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, ...@@ -1394,7 +1394,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
* though) * though)
*/ */
if (__set_blocks(n1, n1->keys + n2->keys, if (__set_blocks(n1, n1->keys + n2->keys,
block_bytes(b->c)) > block_bytes(b->c->cache)) >
btree_blocks(new_nodes[i])) btree_blocks(new_nodes[i]))
goto out_unlock_nocoalesce; goto out_unlock_nocoalesce;
...@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, ...@@ -1403,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
last = &r->b->key; last = &r->b->key;
} }
BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
btree_blocks(new_nodes[i])); btree_blocks(new_nodes[i]));
if (last) if (last)
...@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2210,7 +2210,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
goto err; goto err;
split = set_blocks(btree_bset_first(n1), split = set_blocks(btree_bset_first(n1),
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
if (split) { if (split) {
unsigned int keys = 0; unsigned int keys = 0;
......
...@@ -25,8 +25,8 @@ struct dentry *bcache_debug; ...@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
for (i = (start); \ for (i = (start); \
(void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\ (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
i->seq == (start)->seq; \ i->seq == (start)->seq; \
i = (void *) i + set_blocks(i, block_bytes(b->c)) * \ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
block_bytes(b->c)) block_bytes(b->c->cache))
void bch_btree_verify(struct btree *b) void bch_btree_verify(struct btree *b)
{ {
...@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b) ...@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
for_each_written_bset(b, ondisk, i) { for_each_written_bset(b, ondisk, i) {
unsigned int block = ((void *) i - (void *) ondisk) / unsigned int block = ((void *) i - (void *) ondisk) /
block_bytes(b->c); block_bytes(b->c->cache);
pr_err("*** on disk block %u:\n", block); pr_err("*** on disk block %u:\n", block);
bch_dump_bset(&b->keys, i, block); bch_dump_bset(&b->keys, i, block);
} }
pr_err("*** block %zu not written\n", pr_err("*** block %zu not written\n",
((void *) i - (void *) ondisk) / block_bytes(b->c)); ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
for (j = 0; j < inmemory->keys; j++) for (j = 0; j < inmemory->keys; j++)
if (inmemory->d[j] != sorted->d[j]) if (inmemory->d[j] != sorted->d[j])
......
...@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset; ...@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
return ret; return ret;
} }
blocks = set_blocks(j, block_bytes(ca->set)); blocks = set_blocks(j, block_bytes(ca));
/* /*
* Nodes in 'list' are in linear increasing order of * Nodes in 'list' are in linear increasing order of
...@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -734,7 +734,7 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca = c->cache; struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
c->sb.block_size; c->sb.block_size;
struct bio *bio; struct bio *bio;
...@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -754,7 +754,7 @@ static void journal_write_unlocked(struct closure *cl)
return; return;
} }
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
w->data->btree_level = c->root->level; w->data->btree_level = c->root->level;
...@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c, ...@@ -847,7 +847,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys, sectors = __set_blocks(w->data, w->data->keys + nkeys,
block_bytes(c)) * c->sb.block_size; block_bytes(c->cache)) * c->sb.block_size;
if (sectors <= min_t(size_t, if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size, c->journal.blocks_free * c->sb.block_size,
......
...@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s, ...@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
* bch_data_insert_keys() will insert the keys created so far * bch_data_insert_keys() will insert the keys created so far
* and finish the rest when the keylist is empty. * and finish the rest when the keylist is empty.
*/ */
if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
return -ENOMEM; return -ENOMEM;
return __bch_keylist_realloc(l, u64s); return __bch_keylist_realloc(l, u64s);
......
...@@ -1527,7 +1527,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) ...@@ -1527,7 +1527,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype); kobject_init(&d->kobj, &bch_flash_dev_ktype);
if (bcache_device_init(d, block_bytes(c), u->sectors, if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
NULL, &bcache_flash_ops)) NULL, &bcache_flash_ops))
goto err; goto err;
......
...@@ -714,7 +714,7 @@ SHOW(__bch_cache_set) ...@@ -714,7 +714,7 @@ SHOW(__bch_cache_set)
sysfs_print(synchronous, CACHE_SYNC(&c->sb)); sysfs_print(synchronous, CACHE_SYNC(&c->sb));
sysfs_print(journal_delay_ms, c->journal_delay_ms); sysfs_print(journal_delay_ms, c->journal_delay_ms);
sysfs_hprint(bucket_size, bucket_bytes(c)); sysfs_hprint(bucket_size, bucket_bytes(c));
sysfs_hprint(block_size, block_bytes(c)); sysfs_hprint(block_size, block_bytes(c->cache));
sysfs_print(tree_depth, c->root->level); sysfs_print(tree_depth, c->root->level);
sysfs_print(root_usage_percent, bch_root_usage(c)); sysfs_print(root_usage_percent, bch_root_usage(c));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment