Commit 4a784266 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: remove embedded struct cache_sb from struct cache_set

Since bcache code was merged into mainline kerrnel, each cache set only
as one single cache in it. The multiple caches framework is here but the
code is far from completed. Considering the multiple copies of cached
data can also be stored on e.g. md raid1 devices, it is unnecessary to
support multiple caches in one cache set indeed.

The previous preparation patches fix the dependencies of explicitly
making a cache set only have single cache. Now we don't have to maintain
an embedded partial super block in struct cache_set, the in-memory super
block can be directly referenced from struct cache.

This patch removes the embedded struct cache_sb from struct cache_set,
and fixes all locations where the superb lock was referenced from this
removed super block by referencing the in-memory super block of struct
cache.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6f9414e0
...@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) ...@@ -87,7 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned long next = c->nbuckets * c->sb.bucket_size / 1024; unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
int r; int r;
atomic_sub(sectors, &c->rescale); atomic_sub(sectors, &c->rescale);
...@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, ...@@ -583,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket, list); struct open_bucket, list);
found: found:
if (!ret->sectors_free && KEY_PTRS(alloc)) { if (!ret->sectors_free && KEY_PTRS(alloc)) {
ret->sectors_free = c->sb.bucket_size; ret->sectors_free = c->cache->sb.bucket_size;
bkey_copy(&ret->key, alloc); bkey_copy(&ret->key, alloc);
bkey_init(alloc); bkey_init(alloc);
} }
...@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c, ...@@ -677,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
&PTR_CACHE(c, &b->key, i)->sectors_written); &PTR_CACHE(c, &b->key, i)->sectors_written);
} }
if (b->sectors_free < c->sb.block_size) if (b->sectors_free < c->cache->sb.block_size)
b->sectors_free = 0; b->sectors_free = 0;
/* /*
......
...@@ -517,8 +517,6 @@ struct cache_set { ...@@ -517,8 +517,6 @@ struct cache_set {
atomic_t idle_counter; atomic_t idle_counter;
atomic_t at_max_writeback_rate; atomic_t at_max_writeback_rate;
struct cache_sb sb;
struct cache *cache; struct cache *cache;
struct bcache_device **devices; struct bcache_device **devices;
...@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) ...@@ -799,7 +797,7 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
{ {
return s & (c->sb.bucket_size - 1); return s & (c->cache->sb.bucket_size - 1);
} }
static inline struct cache *PTR_CACHE(struct cache_set *c, static inline struct cache *PTR_CACHE(struct cache_set *c,
......
...@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b) ...@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
if (b->written < btree_blocks(b)) if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b), bch_bset_init_next(&b->keys, write_block(b),
bset_magic(&b->c->sb)); bset_magic(&b->c->cache->sb));
} }
...@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
* See the comment arount cache_set->fill_iter. * See the comment arount cache_set->fill_iter.
*/ */
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
iter->used = 0; iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
...@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -178,7 +178,7 @@ void bch_btree_node_read_done(struct btree *b)
goto err; goto err;
err = "bad magic"; err = "bad magic";
if (i->magic != bset_magic(&b->c->sb)) if (i->magic != bset_magic(&b->c->cache->sb))
goto err; goto err;
err = "bad checksum"; err = "bad checksum";
...@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
if (b->written < btree_blocks(b)) if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b), bch_bset_init_next(&b->keys, write_block(b),
bset_magic(&b->c->sb)); bset_magic(&b->c->cache->sb));
out: out:
mempool_free(iter, &b->c->fill_iter); mempool_free(iter, &b->c->fill_iter);
return; return;
...@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) ...@@ -423,7 +423,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
do_btree_node_write(b); do_btree_node_write(b);
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->sb.block_size, atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
b->written += set_blocks(i, block_bytes(b->c->cache)); b->written += set_blocks(i, block_bytes(b->c->cache));
...@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c) ...@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
if (c->verify_data) if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache); list_move(&c->verify_data->list, &c->btree_cache);
free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb))); free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
#endif #endif
list_splice(&c->btree_cache_freeable, list_splice(&c->btree_cache_freeable,
...@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c) ...@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
mutex_init(&c->verify_lock); mutex_init(&c->verify_lock);
c->verify_ondisk = (void *) c->verify_ondisk = (void *)
__get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb))); __get_free_pages(GFP_KERNEL|__GFP_COMP,
ilog2(meta_bucket_pages(&c->cache->sb)));
if (!c->verify_ondisk) { if (!c->verify_ondisk) {
/* /*
* Don't worry about the mca_rereserve buckets * Don't worry about the mca_rereserve buckets
...@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, ...@@ -1108,7 +1109,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
} }
b->parent = parent; b->parent = parent;
bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
......
...@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i) ...@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
static inline void set_gc_sectors(struct cache_set *c) static inline void set_gc_sectors(struct cache_set *c)
{ {
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
} }
void bkey_put(struct cache_set *c, struct bkey *k); void bkey_put(struct cache_set *c, struct bkey *k);
......
...@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) ...@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i); size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
if (KEY_SIZE(k) + r > c->sb.bucket_size || if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
bucket < ca->sb.first_bucket || bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets) bucket >= ca->sb.nbuckets)
return true; return true;
...@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) ...@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i); size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
if (KEY_SIZE(k) + r > c->sb.bucket_size) if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
return "bad, length too big"; return "bad, length too big";
if (bucket < ca->sb.first_bucket) if (bucket < ca->sb.first_bucket)
return "bad, short offset"; return "bad, short offset";
...@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) ...@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
size_t n = PTR_BUCKET_NR(b->c, k, j); size_t n = PTR_BUCKET_NR(b->c, k, j);
pr_cont(" bucket %zu", n); pr_cont(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
pr_cont(" prio %i", pr_cont(" prio %i",
PTR_BUCKET(b->c, k, j)->prio); PTR_BUCKET(b->c, k, j)->prio);
} }
......
...@@ -30,7 +30,7 @@ static struct feature feature_list[] = { ...@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
for (f = &feature_list[0]; f->compat != 0; f++) { \ for (f = &feature_list[0]; f->compat != 0; f++) { \
if (f->compat != BCH_FEATURE_ ## type) \ if (f->compat != BCH_FEATURE_ ## type) \
continue; \ continue; \
if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
if (first) { \ if (first) { \
out += snprintf(out, buf + size - out, \ out += snprintf(out, buf + size - out, \
"["); \ "["); \
...@@ -44,7 +44,7 @@ static struct feature feature_list[] = { ...@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
\ \
out += snprintf(out, buf + size - out, "%s", f->string);\ out += snprintf(out, buf + size - out, "%s", f->string);\
\ \
if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
out += snprintf(out, buf + size - out, "]"); \ out += snprintf(out, buf + size - out, "]"); \
\ \
first = false; \ first = false; \
......
...@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c) ...@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio; struct bio *bio = &b->bio;
bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb)); bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
return bio; return bio;
} }
......
...@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -666,7 +666,7 @@ static void journal_reclaim(struct cache_set *c)
bkey_init(k); bkey_init(k);
SET_KEY_PTRS(k, 1); SET_KEY_PTRS(k, 1);
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
out: out:
if (!journal_full(&c->journal)) if (!journal_full(&c->journal))
...@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -735,7 +735,7 @@ static void journal_write_unlocked(struct closure *cl)
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) * unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
c->sb.block_size; ca->sb.block_size;
struct bio *bio; struct bio *bio;
struct bio_list list; struct bio_list list;
...@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -762,7 +762,7 @@ static void journal_write_unlocked(struct closure *cl)
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0]; w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
w->data->magic = jset_magic(&c->sb); w->data->magic = jset_magic(&ca->sb);
w->data->version = BCACHE_JSET_VERSION; w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal); w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data); w->data->csum = csum_set(w->data);
...@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c, ...@@ -838,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
size_t sectors; size_t sectors;
struct closure cl; struct closure cl;
bool wait = false; bool wait = false;
struct cache *ca = c->cache;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c, ...@@ -847,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys, sectors = __set_blocks(w->data, w->data->keys + nkeys,
block_bytes(c->cache)) * c->sb.block_size; block_bytes(ca)) * ca->sb.block_size;
if (sectors <= min_t(size_t, if (sectors <= min_t(size_t,
c->journal.blocks_free * c->sb.block_size, c->journal.blocks_free * ca->sb.block_size,
PAGE_SECTORS << JSET_BITS)) PAGE_SECTORS << JSET_BITS))
return w; return w;
......
...@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) ...@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip; goto skip;
} }
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) { bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
pr_debug("skipping unaligned io\n"); pr_debug("skipping unaligned io\n");
goto skip; goto skip;
} }
......
...@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c) ...@@ -350,16 +350,10 @@ void bcache_write_super(struct cache_set *c)
down(&c->sb_write_mutex); down(&c->sb_write_mutex);
closure_init(cl, &c->cl); closure_init(cl, &c->cl);
c->sb.seq++; ca->sb.seq++;
if (c->sb.version > version) if (ca->sb.version < version)
version = c->sb.version; ca->sb.version = version;
ca->sb.version = version;
ca->sb.seq = c->sb.seq;
ca->sb.last_mount = c->sb.last_mount;
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
bio_init(bio, ca->sb_bv, 1); bio_init(bio, ca->sb_bv, 1);
bio_set_dev(bio, ca->bdev); bio_set_dev(bio, ca->bdev);
...@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c) ...@@ -477,7 +471,7 @@ static int __uuid_write(struct cache_set *c)
{ {
BKEY_PADDED(key) k; BKEY_PADDED(key) k;
struct closure cl; struct closure cl;
struct cache *ca; struct cache *ca = c->cache;
unsigned int size; unsigned int size;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c) ...@@ -486,13 +480,12 @@ static int __uuid_write(struct cache_set *c)
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
return 1; return 1;
size = meta_bucket_pages(&c->sb) * PAGE_SECTORS; size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size); SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl); closure_sync(&cl);
/* Only one bucket used for uuid write */ /* Only one bucket used for uuid write */
ca = PTR_CACHE(c, &k.key, 0);
atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
bkey_copy(&c->uuid_bucket, &k.key); bkey_copy(&c->uuid_bucket, &k.key);
...@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -1205,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -EINVAL; return -EINVAL;
} }
if (dc->sb.block_size < c->sb.block_size) { if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */ /* Will die */
pr_err("Couldn't attach %s: block size less than set's block size\n", pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name); dc->backing_dev_name);
...@@ -1663,6 +1656,9 @@ static void cache_set_free(struct closure *cl) ...@@ -1663,6 +1656,9 @@ static void cache_set_free(struct closure *cl)
bch_journal_free(c); bch_journal_free(c);
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
ca = c->cache; ca = c->cache;
if (ca) { if (ca) {
ca->set = NULL; ca->set = NULL;
...@@ -1670,8 +1666,6 @@ static void cache_set_free(struct closure *cl) ...@@ -1670,8 +1666,6 @@ static void cache_set_free(struct closure *cl)
kobject_put(&ca->kobj); kobject_put(&ca->kobj);
} }
bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
if (c->moving_gc_wq) if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq); destroy_workqueue(c->moving_gc_wq);
...@@ -1837,6 +1831,7 @@ void bch_cache_set_unregister(struct cache_set *c) ...@@ -1837,6 +1831,7 @@ void bch_cache_set_unregister(struct cache_set *c)
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{ {
int iter_size; int iter_size;
struct cache *ca = container_of(sb, struct cache, sb);
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
if (!c) if (!c)
...@@ -1859,23 +1854,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1859,23 +1854,15 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bch_cache_accounting_init(&c->accounting, &c->cl); bch_cache_accounting_init(&c->accounting, &c->cl);
memcpy(c->set_uuid, sb->set_uuid, 16); memcpy(c->set_uuid, sb->set_uuid, 16);
c->sb.block_size = sb->block_size;
c->sb.bucket_size = sb->bucket_size;
c->sb.nr_in_set = sb->nr_in_set;
c->sb.last_mount = sb->last_mount;
c->sb.version = sb->version;
if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
c->sb.feature_compat = sb->feature_compat;
c->sb.feature_ro_compat = sb->feature_ro_compat;
c->sb.feature_incompat = sb->feature_incompat;
}
c->cache = ca;
c->cache->set = c;
c->bucket_bits = ilog2(sb->bucket_size); c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size); c->block_bits = ilog2(sb->block_size);
c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry); c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0; c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0); atomic_set(&c->attached_dev_nr, 0);
c->btree_pages = meta_bucket_pages(&c->sb); c->btree_pages = meta_bucket_pages(sb);
if (c->btree_pages > BTREE_MAX_PAGES) if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4, c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES); BTREE_MAX_PAGES);
...@@ -1913,7 +1900,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1913,7 +1900,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (mempool_init_kmalloc_pool(&c->bio_meta, 2, if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
sizeof(struct bbio) + sizeof(struct bbio) +
sizeof(struct bio_vec) * meta_bucket_pages(&c->sb))) sizeof(struct bio_vec) * meta_bucket_pages(sb)))
goto err; goto err;
if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
...@@ -1923,7 +1910,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1923,7 +1910,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err; goto err;
c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb); c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
if (!c->uuids) if (!c->uuids)
goto err; goto err;
...@@ -2103,7 +2090,7 @@ static int run_cache_set(struct cache_set *c) ...@@ -2103,7 +2090,7 @@ static int run_cache_set(struct cache_set *c)
goto err; goto err;
closure_sync(&cl); closure_sync(&cl);
c->sb.last_mount = (u32)ktime_get_real_seconds(); c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c); bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list) list_for_each_entry_safe(dc, t, &uncached_devices, list)
......
...@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc) ...@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* This is the size of the cache, minus the amount used for * This is the size of the cache, minus the amount used for
* flash-only devices * flash-only devices
*/ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
atomic_long_read(&c->flash_dev_dirty_sectors); atomic_long_read(&c->flash_dev_dirty_sectors);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment