Commit 48dad8ba authored by Kent Overstreet's avatar Kent Overstreet

bcache: Add btree_map() functions

Lots of stuff has been open coding its own btree traversal - which is
generally pretty simple code, but there are a few subtleties.

This adds new new functions, bch_btree_map_nodes() and
bch_btree_map_keys(), which do the traversal for you. Everything that's
open coding btree traversal now (with the exception of garbage
collection) is slowly going to be converted to these two functions;
being able to write other code at a higher level of abstraction  is a
big improvement w.r.t. overall code quality.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 5e6926da
...@@ -384,8 +384,6 @@ struct keybuf_key { ...@@ -384,8 +384,6 @@ struct keybuf_key {
void *private; void *private;
}; };
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
struct keybuf { struct keybuf {
struct bkey last_scanned; struct bkey last_scanned;
spinlock_t lock; spinlock_t lock;
......
...@@ -842,6 +842,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, ...@@ -842,6 +842,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */ /* Btree iterator */
/*
* Returns true if l > r - unless l == r, in which case returns true if l is
* older than r.
*
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
static inline bool btree_iter_cmp(struct btree_iter_set l, static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r) struct btree_iter_set r)
{ {
...@@ -1146,16 +1153,16 @@ void bch_btree_sort_lazy(struct btree *b) ...@@ -1146,16 +1153,16 @@ void bch_btree_sort_lazy(struct btree *b)
/* Sysfs stuff */ /* Sysfs stuff */
struct bset_stats { struct bset_stats {
struct btree_op op;
size_t nodes; size_t nodes;
size_t sets_written, sets_unwritten; size_t sets_written, sets_unwritten;
size_t bytes_written, bytes_unwritten; size_t bytes_written, bytes_unwritten;
size_t floats, failed; size_t floats, failed;
}; };
static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, static int btree_bset_stats(struct btree_op *op, struct btree *b)
struct bset_stats *stats)
{ {
struct bkey *k; struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i; unsigned i;
stats->nodes++; stats->nodes++;
...@@ -1180,30 +1187,20 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, ...@@ -1180,30 +1187,20 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
} }
} }
if (b->level) { return MAP_CONTINUE;
struct btree_iter iter;
for_each_key_filter(b, k, &iter, bch_ptr_bad) {
int ret = btree(bset_stats, k, b, op, stats);
if (ret)
return ret;
}
}
return 0;
} }
int bch_bset_print_stats(struct cache_set *c, char *buf) int bch_bset_print_stats(struct cache_set *c, char *buf)
{ {
struct btree_op op;
struct bset_stats t; struct bset_stats t;
int ret; int ret;
bch_btree_op_init_stack(&op);
memset(&t, 0, sizeof(struct bset_stats)); memset(&t, 0, sizeof(struct bset_stats));
bch_btree_op_init_stack(&t.op);
t.op.c = c;
ret = btree_root(bset_stats, c, &op, &t); ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
if (ret) if (ret < 0)
return ret; return ret;
return snprintf(buf, PAGE_SIZE, return snprintf(buf, PAGE_SIZE,
......
...@@ -2296,6 +2296,82 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op) ...@@ -2296,6 +2296,82 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
return ret; return ret;
} }
/* Map across nodes or keys */
static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
struct bkey *from,
btree_map_nodes_fn *fn, int flags)
{
int ret = MAP_CONTINUE;
if (b->level) {
struct bkey *k;
struct btree_iter iter;
bch_btree_iter_init(b, &iter, from);
while ((k = bch_btree_iter_next_filter(&iter, b,
bch_ptr_bad))) {
ret = btree(map_nodes_recurse, k, b,
op, from, fn, flags);
from = NULL;
if (ret != MAP_CONTINUE)
return ret;
}
}
if (!b->level || flags == MAP_ALL_NODES)
ret = fn(op, b);
return ret;
}
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
if (closure_blocking(&op->cl))
closure_sync(&op->cl);
return ret;
}
static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
struct bkey *from, btree_map_keys_fn *fn,
int flags)
{
int ret = MAP_CONTINUE;
struct bkey *k;
struct btree_iter iter;
bch_btree_iter_init(b, &iter, from);
while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: btree(map_keys_recurse, k, b, op, from, fn, flags);
from = NULL;
if (ret != MAP_CONTINUE)
return ret;
}
if (!b->level && (flags & MAP_END_KEY))
ret = fn(op, b, &KEY(KEY_INODE(&b->key),
KEY_OFFSET(&b->key), 0));
return ret;
}
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags)
{
int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
if (closure_blocking(&op->cl))
closure_sync(&op->cl);
return ret;
}
/* Keybuf code */ /* Keybuf code */
static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
...@@ -2314,74 +2390,70 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, ...@@ -2314,74 +2390,70 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
} }
static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op, struct refill {
struct keybuf *buf, struct bkey *end, struct btree_op op;
keybuf_pred_fn *pred) struct keybuf *buf;
{ struct bkey *end;
struct btree_iter iter; keybuf_pred_fn *pred;
bch_btree_iter_init(b, &iter, &buf->last_scanned); };
while (!array_freelist_empty(&buf->freelist)) {
struct bkey *k = bch_btree_iter_next_filter(&iter, b,
bch_ptr_bad);
if (!b->level) {
if (!k) {
buf->last_scanned = b->key;
break;
}
buf->last_scanned = *k; static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
if (bkey_cmp(&buf->last_scanned, end) >= 0) struct bkey *k)
break; {
struct refill *refill = container_of(op, struct refill, op);
struct keybuf *buf = refill->buf;
int ret = MAP_CONTINUE;
if (pred(buf, k)) { if (bkey_cmp(k, refill->end) >= 0) {
struct keybuf_key *w; ret = MAP_DONE;
goto out;
}
spin_lock(&buf->lock); if (!KEY_SIZE(k)) /* end key */
goto out;
w = array_alloc(&buf->freelist); if (refill->pred(buf, k)) {
struct keybuf_key *w;
w->private = NULL; spin_lock(&buf->lock);
bkey_copy(&w->key, k);
if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) w = array_alloc(&buf->freelist);
array_free(&buf->freelist, w); if (!w) {
spin_unlock(&buf->lock);
return MAP_DONE;
}
spin_unlock(&buf->lock); w->private = NULL;
} bkey_copy(&w->key, k);
} else {
if (!k)
break;
btree(refill_keybuf, k, b, op, buf, end, pred); if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
/* array_free(&buf->freelist, w);
* Might get an error here, but can't really do anything
* and it'll get logged elsewhere. Just read what we
* can.
*/
if (bkey_cmp(&buf->last_scanned, end) >= 0) if (array_freelist_empty(&buf->freelist))
break; ret = MAP_DONE;
cond_resched(); spin_unlock(&buf->lock);
}
} }
out:
return 0; buf->last_scanned = *k;
return ret;
} }
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *end, keybuf_pred_fn *pred) struct bkey *end, keybuf_pred_fn *pred)
{ {
struct bkey start = buf->last_scanned; struct bkey start = buf->last_scanned;
struct btree_op op; struct refill refill;
bch_btree_op_init_stack(&op);
cond_resched(); cond_resched();
btree_root(refill_keybuf, c, &op, buf, end, pred); bch_btree_op_init_stack(&refill.op);
closure_sync(&op.cl); refill.buf = buf;
refill.end = end;
refill.pred = pred;
bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
refill_keybuf_fn, MAP_END_KEY);
pr_debug("found %s keys from %llu:%llu to %llu:%llu", pr_debug("found %s keys from %llu:%llu to %llu:%llu",
RB_EMPTY_ROOT(&buf->keys) ? "no" : RB_EMPTY_ROOT(&buf->keys) ? "no" :
...@@ -2465,9 +2537,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf) ...@@ -2465,9 +2537,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
} }
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
struct keybuf *buf, struct keybuf *buf,
struct bkey *end, struct bkey *end,
keybuf_pred_fn *pred) keybuf_pred_fn *pred)
{ {
struct keybuf_key *ret; struct keybuf_key *ret;
......
...@@ -400,9 +400,42 @@ static inline void wake_up_gc(struct cache_set *c) ...@@ -400,9 +400,42 @@ static inline void wake_up_gc(struct cache_set *c)
wake_up_process(c->gc_thread); wake_up_process(c->gc_thread);
} }
#define MAP_DONE 0
#define MAP_CONTINUE 1
#define MAP_ALL_NODES 0
#define MAP_LEAF_NODES 1
#define MAP_END_KEY 1
typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_nodes_fn *, int);
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn)
{
return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
}
static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
struct cache_set *c,
struct bkey *from,
btree_map_nodes_fn *fn)
{
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
}
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
struct bkey *);
int bch_btree_map_keys(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_keys_fn *, int);
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *); void bch_keybuf_init(struct keybuf *);
void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, void bch_refill_keybuf(struct cache_set *, struct keybuf *,
keybuf_pred_fn *); struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *); struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *); void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
......
...@@ -433,31 +433,17 @@ static int bch_writeback_thread(void *arg) ...@@ -433,31 +433,17 @@ static int bch_writeback_thread(void *arg)
/* Init */ /* Init */
static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op, static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
struct cached_dev *dc) struct bkey *k)
{ {
struct bkey *k; if (KEY_INODE(k) > op->inode)
struct btree_iter iter; return MAP_DONE;
bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
if (!b->level) {
if (KEY_INODE(k) > dc->disk.id)
break;
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
KEY_START(k),
KEY_SIZE(k));
} else {
btree(sectors_dirty_init, k, b, op, dc);
if (KEY_INODE(k) > dc->disk.id)
break;
cond_resched();
}
return 0; if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
return MAP_CONTINUE;
} }
void bch_sectors_dirty_init(struct cached_dev *dc) void bch_sectors_dirty_init(struct cached_dev *dc)
...@@ -465,7 +451,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc) ...@@ -465,7 +451,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
struct btree_op op; struct btree_op op;
bch_btree_op_init_stack(&op); bch_btree_op_init_stack(&op);
btree_root(sectors_dirty_init, dc->disk.c, &op, dc); op.inode = dc->disk.id;
bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
} }
int bch_cached_dev_writeback_init(struct cached_dev *dc) int bch_cached_dev_writeback_init(struct cached_dev *dc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment