Commit 21e478dd authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: handle c->uuids properly for bucket size > 8MB

Bcache allocates a whole bucket to store c->uuids on cache device, and
allocates continuous pages to store it in-memory. When the bucket size
exceeds maximum allocable continuous pages, bch_cache_set_alloc() will
fail and cache device registration will fail.

This patch allocates c->uuids by alloc_meta_bucket_pages(), and uses
ilog2(meta_bucket_pages(c)) to indicate order of c->uuids pages when
free it. When writing c->uuids to cache device, its size is decided
by meta_bucket_pages(c) * PAGE_SECTORS. Now c->uuids is properly handled
for bucket size > 8MB.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent de1fafab
...@@ -466,6 +466,7 @@ static int __uuid_write(struct cache_set *c) ...@@ -466,6 +466,7 @@ static int __uuid_write(struct cache_set *c)
BKEY_PADDED(key) k; BKEY_PADDED(key) k;
struct closure cl; struct closure cl;
struct cache *ca; struct cache *ca;
unsigned int size;
closure_init_stack(&cl); closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
...@@ -473,7 +474,8 @@ static int __uuid_write(struct cache_set *c) ...@@ -473,7 +474,8 @@ static int __uuid_write(struct cache_set *c)
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
return 1; return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size); size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl); closure_sync(&cl);
...@@ -1664,7 +1666,7 @@ static void cache_set_free(struct closure *cl) ...@@ -1664,7 +1666,7 @@ static void cache_set_free(struct closure *cl)
} }
bch_bset_sort_state_free(&c->sort); bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
if (c->moving_gc_wq) if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq); destroy_workqueue(c->moving_gc_wq);
...@@ -1870,7 +1872,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1870,7 +1872,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size); c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size); c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0; c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0); atomic_set(&c->attached_dev_nr, 0);
c->btree_pages = bucket_pages(c); c->btree_pages = bucket_pages(c);
...@@ -1921,7 +1923,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1921,7 +1923,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err; goto err;
c->uuids = alloc_bucket_pages(GFP_KERNEL, c); c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
if (!c->uuids) if (!c->uuids)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment