Commit b54d6934 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Kill op->cl

This isn't used for waiting asynchronously anymore - so this is a fairly
trivial refactoring.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent c18536a7
...@@ -1196,7 +1196,7 @@ int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -1196,7 +1196,7 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
int ret; int ret;
memset(&t, 0, sizeof(struct bset_stats)); memset(&t, 0, sizeof(struct bset_stats));
bch_btree_op_init_stack(&t.op); bch_btree_op_init(&t.op, -1);
ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
if (ret < 0) if (ret < 0)
......
...@@ -115,13 +115,6 @@ enum { ...@@ -115,13 +115,6 @@ enum {
static struct workqueue_struct *btree_io_wq; static struct workqueue_struct *btree_io_wq;
void bch_btree_op_init_stack(struct btree_op *op)
{
memset(op, 0, sizeof(struct btree_op));
closure_init_stack(&op->cl);
op->lock = -1;
}
static inline bool should_split(struct btree *b) static inline bool should_split(struct btree *b)
{ {
struct bset *i = write_block(b); struct bset *i = write_block(b);
...@@ -965,8 +958,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level) ...@@ -965,8 +958,7 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
* bch_btree_node_get - find a btree node in the cache and lock it, reading it * bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary. * in from disk if necessary.
* *
* If IO is necessary, it uses the closure embedded in struct btree_op to wait; * If IO is necessary and running under generic_make_request, returns -EAGAIN.
* if that closure is in non blocking mode, will return -EAGAIN.
* *
* The btree node will have either a read or a write lock held, depending on * The btree node will have either a read or a write lock held, depending on
* level and op->lock. * level and op->lock.
...@@ -1260,6 +1252,9 @@ static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc, ...@@ -1260,6 +1252,9 @@ static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc,
{ {
unsigned nodes = 0, keys = 0, blocks; unsigned nodes = 0, keys = 0, blocks;
int i; int i;
struct closure cl;
closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && r[nodes].b) while (nodes < GC_MERGE_NODES && r[nodes].b)
keys += r[nodes++].keys; keys += r[nodes++].keys;
...@@ -1353,9 +1348,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, ...@@ -1353,9 +1348,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
{ {
void write(struct btree *r) void write(struct btree *r)
{ {
if (!r->written) if (!r->written || btree_node_dirty(r))
bch_btree_node_write(r, &op->cl);
else if (btree_node_dirty(r))
bch_btree_node_write(r, writes); bch_btree_node_write(r, writes);
up_write(&r->lock); up_write(&r->lock);
...@@ -1431,6 +1424,9 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, ...@@ -1431,6 +1424,9 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
struct btree *n = NULL; struct btree *n = NULL;
unsigned keys = 0; unsigned keys = 0;
int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
struct closure cl;
closure_init_stack(&cl);
if (b->level || stale > 10) if (b->level || stale > 10)
n = btree_node_alloc_replacement(b); n = btree_node_alloc_replacement(b);
...@@ -1442,11 +1438,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, ...@@ -1442,11 +1438,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
ret = btree_gc_recurse(b, op, writes, gc); ret = btree_gc_recurse(b, op, writes, gc);
if (!b->written || btree_node_dirty(b)) { if (!b->written || btree_node_dirty(b)) {
bch_btree_node_write(b, n ? &op->cl : NULL); bch_btree_node_write(b, n ? &cl : NULL);
} }
if (!IS_ERR_OR_NULL(n)) { if (!IS_ERR_OR_NULL(n)) {
closure_sync(&op->cl); closure_sync(&cl);
bch_btree_set_root(b); bch_btree_set_root(b);
btree_node_free(n); btree_node_free(n);
rw_unlock(true, b); rw_unlock(true, b);
...@@ -1545,15 +1541,13 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1545,15 +1541,13 @@ static void bch_btree_gc(struct cache_set *c)
memset(&stats, 0, sizeof(struct gc_stat)); memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes); closure_init_stack(&writes);
bch_btree_op_init_stack(&op); bch_btree_op_init(&op, SHRT_MAX);
op.lock = SHRT_MAX;
btree_gc_start(c); btree_gc_start(c);
atomic_inc(&c->prio_blocked); atomic_inc(&c->prio_blocked);
ret = btree_root(gc_root, c, &op, &writes, &stats); ret = btree_root(gc_root, c, &op, &writes, &stats);
closure_sync(&op.cl);
closure_sync(&writes); closure_sync(&writes);
if (ret) { if (ret) {
...@@ -1562,8 +1556,8 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1562,8 +1556,8 @@ static void bch_btree_gc(struct cache_set *c)
} }
/* Possibly wait for new UUIDs or whatever to hit disk */ /* Possibly wait for new UUIDs or whatever to hit disk */
bch_journal_meta(c, &op.cl); bch_journal_meta(c, &writes);
closure_sync(&op.cl); closure_sync(&writes);
available = bch_btree_gc_finish(c); available = bch_btree_gc_finish(c);
...@@ -1671,8 +1665,7 @@ int bch_btree_check(struct cache_set *c) ...@@ -1671,8 +1665,7 @@ int bch_btree_check(struct cache_set *c)
struct btree_op op; struct btree_op op;
memset(seen, 0, sizeof(seen)); memset(seen, 0, sizeof(seen));
bch_btree_op_init_stack(&op); bch_btree_op_init(&op, SHRT_MAX);
op.lock = SHRT_MAX;
for (i = 0; c->cache[i]; i++) { for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
...@@ -1980,6 +1973,9 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -1980,6 +1973,9 @@ static int btree_split(struct btree *b, struct btree_op *op,
bool split; bool split;
struct btree *n1, *n2 = NULL, *n3 = NULL; struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock(); uint64_t start_time = local_clock();
struct closure cl;
closure_init_stack(&cl);
n1 = btree_node_alloc_replacement(b); n1 = btree_node_alloc_replacement(b);
if (IS_ERR(n1)) if (IS_ERR(n1))
...@@ -2025,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2025,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
bkey_copy_key(&n2->key, &b->key); bkey_copy_key(&n2->key, &b->key);
bch_keylist_add(parent_keys, &n2->key); bch_keylist_add(parent_keys, &n2->key);
bch_btree_node_write(n2, &op->cl); bch_btree_node_write(n2, &cl);
rw_unlock(true, n2); rw_unlock(true, n2);
} else { } else {
trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
...@@ -2034,23 +2030,23 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2034,23 +2030,23 @@ static int btree_split(struct btree *b, struct btree_op *op,
} }
bch_keylist_add(parent_keys, &n1->key); bch_keylist_add(parent_keys, &n1->key);
bch_btree_node_write(n1, &op->cl); bch_btree_node_write(n1, &cl);
if (n3) { if (n3) {
/* Depth increases, make a new root */ /* Depth increases, make a new root */
bkey_copy_key(&n3->key, &MAX_KEY); bkey_copy_key(&n3->key, &MAX_KEY);
bch_btree_insert_keys(n3, op, parent_keys); bch_btree_insert_keys(n3, op, parent_keys);
bch_btree_node_write(n3, &op->cl); bch_btree_node_write(n3, &cl);
closure_sync(&op->cl); closure_sync(&cl);
bch_btree_set_root(n3); bch_btree_set_root(n3);
rw_unlock(true, n3); rw_unlock(true, n3);
} else if (!b->parent) { } else if (!b->parent) {
/* Root filled up but didn't need to be split */ /* Root filled up but didn't need to be split */
bch_keylist_reset(parent_keys); bch_keylist_reset(parent_keys);
closure_sync(&op->cl); closure_sync(&cl);
bch_btree_set_root(n1); bch_btree_set_root(n1);
} else { } else {
unsigned i; unsigned i;
...@@ -2065,7 +2061,7 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2065,7 +2061,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
} }
bch_keylist_push(parent_keys); bch_keylist_push(parent_keys);
closure_sync(&op->cl); closure_sync(&cl);
atomic_inc(&b->c->prio_blocked); atomic_inc(&b->c->prio_blocked);
} }
...@@ -2126,10 +2122,15 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op, ...@@ -2126,10 +2122,15 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
BUG_ON(write_block(b) != b->sets[b->nsets].data); BUG_ON(write_block(b) != b->sets[b->nsets].data);
if (bch_btree_insert_keys(b, op, insert_keys)) { if (bch_btree_insert_keys(b, op, insert_keys)) {
if (!b->level) if (!b->level) {
bch_btree_leaf_dirty(b, journal_ref); bch_btree_leaf_dirty(b, journal_ref);
else } else {
bch_btree_node_write(b, &op->cl); struct closure cl;
closure_init_stack(&cl);
bch_btree_node_write(b, &cl);
closure_sync(&cl);
}
} }
} }
} while (!bch_keylist_empty(&split_keys)); } while (!bch_keylist_empty(&split_keys));
...@@ -2204,12 +2205,6 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c, ...@@ -2204,12 +2205,6 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
{ {
int ret = 0; int ret = 0;
/*
* Don't want to block with the btree locked unless we have to,
* otherwise we get deadlocks with try_harder and between split/gc
*/
clear_closure_blocking(&op->cl);
BUG_ON(bch_keylist_empty(keys)); BUG_ON(bch_keylist_empty(keys));
while (!bch_keylist_empty(keys)) { while (!bch_keylist_empty(keys)) {
...@@ -2217,8 +2212,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c, ...@@ -2217,8 +2212,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
ret = btree_root(insert_recurse, c, op, keys, journal_ref); ret = btree_root(insert_recurse, c, op, keys, journal_ref);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
BUG();
ret = 0; ret = 0;
closure_sync(&op->cl);
} else if (ret) { } else if (ret) {
struct bkey *k; struct bkey *k;
...@@ -2292,10 +2287,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, ...@@ -2292,10 +2287,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn, int flags) struct bkey *from, btree_map_nodes_fn *fn, int flags)
{ {
int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags); return btree_root(map_nodes_recurse, c, op, from, fn, flags);
if (closure_blocking(&op->cl))
closure_sync(&op->cl);
return ret;
} }
static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
...@@ -2328,10 +2320,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, ...@@ -2328,10 +2320,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags) struct bkey *from, btree_map_keys_fn *fn, int flags)
{ {
int ret = btree_root(map_keys_recurse, c, op, from, fn, flags); return btree_root(map_keys_recurse, c, op, from, fn, flags);
if (closure_blocking(&op->cl))
closure_sync(&op->cl);
return ret;
} }
/* Keybuf code */ /* Keybuf code */
...@@ -2409,7 +2398,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, ...@@ -2409,7 +2398,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
cond_resched(); cond_resched();
bch_btree_op_init_stack(&refill.op); bch_btree_op_init(&refill.op, -1);
refill.buf = buf; refill.buf = buf;
refill.end = end; refill.end = end;
refill.pred = pred; refill.pred = pred;
......
...@@ -237,8 +237,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k); ...@@ -237,8 +237,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
/* Recursing down the btree */ /* Recursing down the btree */
struct btree_op { struct btree_op {
struct closure cl;
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
...@@ -253,7 +251,11 @@ struct btree_op { ...@@ -253,7 +251,11 @@ struct btree_op {
BKEY_PADDED(replace); BKEY_PADDED(replace);
}; };
void bch_btree_op_init_stack(struct btree_op *); static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
{
memset(op, 0, sizeof(struct btree_op));
op->lock = write_lock_level;
}
static inline void rw_lock(bool w, struct btree *b, int level) static inline void rw_lock(bool w, struct btree *b, int level)
{ {
......
...@@ -305,8 +305,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) ...@@ -305,8 +305,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
struct btree_op op; struct btree_op op;
bch_keylist_init(&keylist); bch_keylist_init(&keylist);
bch_btree_op_init_stack(&op); bch_btree_op_init(&op, SHRT_MAX);
op.lock = SHRT_MAX;
list_for_each_entry(i, list, list) { list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1); BUG_ON(i->pin && atomic_read(i->pin) != 1);
...@@ -341,14 +340,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) ...@@ -341,14 +340,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
pr_info("journal replay done, %i keys in %i entries, seq %llu", pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end); keys, entries, end);
err:
while (!list_empty(list)) { while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list); i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list); list_del(&i->list);
kfree(i); kfree(i);
} }
err:
closure_sync(&op.cl);
return ret; return ret;
} }
......
...@@ -108,8 +108,8 @@ static void write_moving(struct closure *cl) ...@@ -108,8 +108,8 @@ static void write_moving(struct closure *cl)
s->op.type = BTREE_REPLACE; s->op.type = BTREE_REPLACE;
bkey_copy(&s->op.replace, &io->w->key); bkey_copy(&s->op.replace, &io->w->key);
closure_init(&s->op.cl, cl); closure_init(&s->btree, cl);
bch_data_insert(&s->op.cl); bch_data_insert(&s->btree);
} }
continue_at(cl, write_moving_finish, system_wq); continue_at(cl, write_moving_finish, system_wq);
......
...@@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) ...@@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
static void bch_data_insert_keys(struct closure *cl) static void bch_data_insert_keys(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
atomic_t *journal_ref = NULL; atomic_t *journal_ref = NULL;
/* /*
...@@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl) ...@@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl)
s->flush_journal s->flush_journal
? &s->cl : NULL); ? &s->cl : NULL);
if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) { if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
s->error = -ENOMEM; s->error = -ENOMEM;
s->insert_data_done = true; s->insert_data_done = true;
} }
...@@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, ...@@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
static void bch_data_invalidate(struct closure *cl) static void bch_data_invalidate(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
struct bio *bio = s->cache_bio; struct bio *bio = s->cache_bio;
pr_debug("invalidating %i sectors from %llu", pr_debug("invalidating %i sectors from %llu",
...@@ -461,8 +459,7 @@ static void bch_data_invalidate(struct closure *cl) ...@@ -461,8 +459,7 @@ static void bch_data_invalidate(struct closure *cl)
static void bch_data_insert_error(struct closure *cl) static void bch_data_insert_error(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
/* /*
* Our data write just errored, which means we've got a bunch of keys to * Our data write just errored, which means we've got a bunch of keys to
...@@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl) ...@@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl)
static void bch_data_insert_endio(struct bio *bio, int error) static void bch_data_insert_endio(struct bio *bio, int error)
{ {
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
if (error) { if (error) {
/* TODO: We could try to recover from this. */ /* TODO: We could try to recover from this. */
...@@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error) ...@@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
static void bch_data_insert_start(struct closure *cl) static void bch_data_insert_start(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
struct bio *bio = s->cache_bio, *n; struct bio *bio = s->cache_bio, *n;
if (s->bypass) if (s->bypass)
...@@ -630,8 +625,7 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -630,8 +625,7 @@ static void bch_data_insert_start(struct closure *cl)
*/ */
void bch_data_insert(struct closure *cl) void bch_data_insert(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
bch_keylist_init(&s->insert_keys); bch_keylist_init(&s->insert_keys);
bio_get(s->cache_bio); bio_get(s->cache_bio);
...@@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ...@@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
static void cache_lookup(struct closure *cl) static void cache_lookup(struct closure *cl)
{ {
struct btree_op *op = container_of(cl, struct btree_op, cl); struct search *s = container_of(cl, struct search, btree);
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
int ret = bch_btree_map_keys(op, s->c, int ret = bch_btree_map_keys(&s->op, s->c,
&KEY(s->inode, bio->bi_sector, 0), &KEY(s->inode, bio->bi_sector, 0),
cache_lookup_fn, MAP_END_KEY); cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN) if (ret == -EAGAIN)
...@@ -1064,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -1064,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl)
if (s->cache_bio && if (s->cache_bio &&
!test_bit(CACHE_SET_STOPPING, &s->c->flags)) { !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
s->op.type = BTREE_REPLACE; s->op.type = BTREE_REPLACE;
closure_call(&s->op.cl, bch_data_insert, NULL, cl); closure_call(&s->btree, bch_data_insert, NULL, cl);
} }
continue_at(cl, cached_dev_cache_miss_done, NULL); continue_at(cl, cached_dev_cache_miss_done, NULL);
...@@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) ...@@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
{ {
struct closure *cl = &s->cl; struct closure *cl = &s->cl;
closure_call(&s->op.cl, cache_lookup, NULL, cl); closure_call(&s->btree, cache_lookup, NULL, cl);
continue_at(cl, cached_dev_read_done_bh, NULL); continue_at(cl, cached_dev_read_done_bh, NULL);
} }
...@@ -1239,7 +1232,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) ...@@ -1239,7 +1232,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(bio, cl, s->d); closure_bio_submit(bio, cl, s->d);
} }
closure_call(&s->op.cl, bch_data_insert, NULL, cl); closure_call(&s->btree, bch_data_insert, NULL, cl);
continue_at(cl, cached_dev_write_complete, NULL); continue_at(cl, cached_dev_write_complete, NULL);
} }
...@@ -1418,9 +1411,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) ...@@ -1418,9 +1411,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
s->writeback = true; s->writeback = true;
s->cache_bio = bio; s->cache_bio = bio;
closure_call(&s->op.cl, bch_data_insert, NULL, cl); closure_call(&s->btree, bch_data_insert, NULL, cl);
} else { } else {
closure_call(&s->op.cl, cache_lookup, NULL, cl); closure_call(&s->btree, cache_lookup, NULL, cl);
} }
continue_at(cl, search_free, NULL); continue_at(cl, search_free, NULL);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
struct search { struct search {
/* Stack frame for bio_complete */ /* Stack frame for bio_complete */
struct closure cl; struct closure cl;
struct closure btree;
struct bcache_device *d; struct bcache_device *d;
struct cache_set *c; struct cache_set *c;
......
...@@ -143,7 +143,7 @@ static void write_dirty_finish(struct closure *cl) ...@@ -143,7 +143,7 @@ static void write_dirty_finish(struct closure *cl)
struct btree_op op; struct btree_op op;
struct keylist keys; struct keylist keys;
bch_btree_op_init_stack(&op); bch_btree_op_init(&op, -1);
bch_keylist_init(&keys); bch_keylist_init(&keys);
op.type = BTREE_REPLACE; op.type = BTREE_REPLACE;
...@@ -156,7 +156,6 @@ static void write_dirty_finish(struct closure *cl) ...@@ -156,7 +156,6 @@ static void write_dirty_finish(struct closure *cl)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
bch_btree_insert(&op, dc->disk.c, &keys, NULL); bch_btree_insert(&op, dc->disk.c, &keys, NULL);
closure_sync(&op.cl);
if (op.insert_collision) if (op.insert_collision)
trace_bcache_writeback_collision(&w->key); trace_bcache_writeback_collision(&w->key);
...@@ -457,7 +456,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) ...@@ -457,7 +456,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
{ {
struct sectors_dirty_init op; struct sectors_dirty_init op;
bch_btree_op_init_stack(&op.op); bch_btree_op_init(&op.op, -1);
op.inode = dc->disk.id; op.inode = dc->disk.id;
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment