Commit c2e8dcf7 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: avoid unncessary cache prefetch bch_btree_node_get()

In bch_btree_node_get() the read-in btree node will be partially
prefetched into L1 cache for following bset iteration (if there is).
But if the btree node read is failed, the perfetch operations will
waste L1 cache space. This patch checkes whether read operation and
only does cache prefetch when read I/O succeeded.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b4cb6efc
...@@ -1011,6 +1011,13 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, ...@@ -1011,6 +1011,13 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
BUG_ON(b->level != level); BUG_ON(b->level != level);
} }
if (btree_node_io_error(b)) {
rw_unlock(write, b);
return ERR_PTR(-EIO);
}
BUG_ON(!b->written);
b->parent = parent; b->parent = parent;
b->accessed = 1; b->accessed = 1;
...@@ -1022,13 +1029,6 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, ...@@ -1022,13 +1029,6 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
for (; i <= b->keys.nsets; i++) for (; i <= b->keys.nsets; i++)
prefetch(b->keys.set[i].data); prefetch(b->keys.set[i].data);
if (btree_node_io_error(b)) {
rw_unlock(write, b);
return ERR_PTR(-EIO);
}
BUG_ON(!b->written);
return b; return b;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment