Commit f412f2c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull second round of block driver updates from Jens Axboe:
 "As mentioned in the original pull request, the bcache bits were pulled
  because of their dependency on the immutable bio vecs.  Kent re-did
  this part and resubmitted it, so here's the 2nd round of (mostly)
  driver updates for 3.13.  It contains:

 - The bcache work from Kent.

 - Conversion of virtio-blk to blk-mq.  This removes the bio and request
   path, and substitutes with the blk-mq path instead.  The end result
   almost 200 deleted lines.  Patch is acked by Asias and Christoph, who
   both did a bunch of testing.

 - A removal of bootmem.h include from Grygorii Strashko, part of a
   larger series of his killing the dependency on that header file.

 - Removal of __cpuinit from blk-mq from Paul Gortmaker"

* 'for-linus' of git://git.kernel.dk/linux-block: (56 commits)
  virtio_blk: blk-mq support
  blk-mq: remove newly added instances of __cpuinit
  bcache: defensively handle format strings
  bcache: Bypass torture test
  bcache: Delete some slower inline asm
  bcache: Use ida for bcache block dev minor
  bcache: Fix sysfs splat on shutdown with flash only devs
  bcache: Better full stripe scanning
  bcache: Have btree_split() insert into parent directly
  bcache: Move spinlock into struct time_stats
  bcache: Kill sequential_merge option
  bcache: Kill bch_next_recurse_key()
  bcache: Avoid deadlocking in garbage collection
  bcache: Incremental gc
  bcache: Add make_btree_freeing_key()
  bcache: Add btree_node_write_sync()
  bcache: PRECEDING_KEY()
  bcache: bch_(btree|extent)_ptr_invalid()
  bcache: Don't bother with bucket refcount for btree node allocations
  bcache: Debug code improvements
  ...
parents cd1177f2 1cf7e9c6
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/slab.h> #include <linux/slab.h>
#include "blk.h" #include "blk.h"
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
static LIST_HEAD(blk_mq_cpu_notify_list); static LIST_HEAD(blk_mq_cpu_notify_list);
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self, static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned long) hcpu; unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify; struct blk_mq_cpu_notifier *notify;
...@@ -28,8 +28,8 @@ static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self, ...@@ -28,8 +28,8 @@ static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action, static void blk_mq_cpu_notify(void *data, unsigned long action,
unsigned int cpu) unsigned int cpu)
{ {
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
/* /*
......
...@@ -1444,7 +1444,7 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -1444,7 +1444,7 @@ void blk_mq_free_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_mq_free_queue); EXPORT_SYMBOL(blk_mq_free_queue);
/* Basically redo blk_mq_init_queue with queue frozen */ /* Basically redo blk_mq_init_queue with queue frozen */
static void __cpuinit blk_mq_queue_reinit(struct request_queue *q) static void blk_mq_queue_reinit(struct request_queue *q)
{ {
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
...@@ -1461,8 +1461,8 @@ static void __cpuinit blk_mq_queue_reinit(struct request_queue *q) ...@@ -1461,8 +1461,8 @@ static void __cpuinit blk_mq_queue_reinit(struct request_queue *q)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
} }
static int __cpuinit blk_mq_queue_reinit_notify(struct notifier_block *nb, static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
struct request_queue *q; struct request_queue *q;
......
This diff is collapsed.
...@@ -13,15 +13,8 @@ config BCACHE_DEBUG ...@@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help--- ---help---
Don't select this option unless you're a developer Don't select this option unless you're a developer
Enables extra debugging tools (primarily a fuzz tester) Enables extra debugging tools, allows expensive runtime checks to be
turned on.
config BCACHE_EDEBUG
bool "Extended runtime checks"
depends on BCACHE
---help---
Don't select this option unless you're a developer
Enables extra runtime checks which significantly affect performance
config BCACHE_CLOSURES_DEBUG config BCACHE_CLOSURES_DEBUG
bool "Debug closures" bool "Debug closures"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -148,6 +148,9 @@ ...@@ -148,6 +148,9 @@
struct btree_iter { struct btree_iter {
size_t size, used; size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
struct btree *b;
#endif
struct btree_iter_set { struct btree_iter_set {
struct bkey *k, *end; struct bkey *k, *end;
} data[MAX_BSETS]; } data[MAX_BSETS];
...@@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, ...@@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
} }
static inline size_t bkey_u64s(const struct bkey *k)
{
BUG_ON(KEY_CSUM(k) > 1);
return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
}
static inline size_t bkey_bytes(const struct bkey *k)
{
return bkey_u64s(k) * sizeof(uint64_t);
}
static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
{
memcpy(dest, src, bkey_bytes(src));
}
static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
{
if (!src)
src = &KEY(0, 0, 0);
SET_KEY_INODE(dest, KEY_INODE(src));
SET_KEY_OFFSET(dest, KEY_OFFSET(src));
}
static inline struct bkey *bkey_next(const struct bkey *k)
{
uint64_t *d = (void *) k;
return (struct bkey *) (d + bkey_u64s(k));
}
/* Keylists */ /* Keylists */
struct keylist { struct keylist {
struct bkey *top;
union { union {
uint64_t *list; struct bkey *keys;
struct bkey *bottom; uint64_t *keys_p;
};
union {
struct bkey *top;
uint64_t *top_p;
}; };
/* Enough room for btree_split's keys without realloc */ /* Enough room for btree_split's keys without realloc */
#define KEYLIST_INLINE 16 #define KEYLIST_INLINE 16
uint64_t d[KEYLIST_INLINE]; uint64_t inline_keys[KEYLIST_INLINE];
}; };
static inline void bch_keylist_init(struct keylist *l) static inline void bch_keylist_init(struct keylist *l)
{ {
l->top = (void *) (l->list = l->d); l->top_p = l->keys_p = l->inline_keys;
} }
static inline void bch_keylist_push(struct keylist *l) static inline void bch_keylist_push(struct keylist *l)
...@@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k) ...@@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
static inline bool bch_keylist_empty(struct keylist *l) static inline bool bch_keylist_empty(struct keylist *l)
{ {
return l->top == (void *) l->list; return l->top == l->keys;
}
static inline void bch_keylist_reset(struct keylist *l)
{
l->top = l->keys;
} }
static inline void bch_keylist_free(struct keylist *l) static inline void bch_keylist_free(struct keylist *l)
{ {
if (l->list != l->d) if (l->keys_p != l->inline_keys)
kfree(l->list); kfree(l->keys_p);
}
static inline size_t bch_keylist_nkeys(struct keylist *l)
{
return l->top_p - l->keys_p;
}
static inline size_t bch_keylist_bytes(struct keylist *l)
{
return bch_keylist_nkeys(l) * sizeof(uint64_t);
} }
void bch_keylist_copy(struct keylist *, struct keylist *);
struct bkey *bch_keylist_pop(struct keylist *); struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *);
int bch_keylist_realloc(struct keylist *, int, struct cache_set *); int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
...@@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) ...@@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
} }
const char *bch_ptr_status(struct cache_set *, const struct bkey *); const char *bch_ptr_status(struct cache_set *, const struct bkey *);
bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *); bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
bool bch_ptr_bad(struct btree *, const struct bkey *); bool bch_ptr_bad(struct btree *, const struct bkey *);
static inline uint8_t gen_after(uint8_t a, uint8_t b) static inline uint8_t gen_after(uint8_t a, uint8_t b)
...@@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k, ...@@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
struct bkey *bch_btree_iter_next(struct btree_iter *); struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *, struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn); struct btree *, ptr_filter_fn);
...@@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *); ...@@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
const struct bkey *); const struct bkey *);
/*
* Returns the first key that is strictly greater than search
*/
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
const struct bkey *search) const struct bkey *search)
{ {
return search ? __bch_bset_search(b, t, search) : t->data->start; return search ? __bch_bset_search(b, t, search) : t->data->start;
} }
#define PRECEDING_KEY(_k) \
({ \
struct bkey *_ret = NULL; \
\
if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
\
if (!_ret->low) \
_ret->high--; \
_ret->low--; \
} \
\
_ret; \
})
bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
void bch_btree_sort_lazy(struct btree *); void bch_btree_sort_lazy(struct btree *);
void bch_btree_sort_into(struct btree *, struct btree *); void bch_btree_sort_into(struct btree *, struct btree *);
......
This diff is collapsed.
...@@ -125,6 +125,7 @@ struct btree { ...@@ -125,6 +125,7 @@ struct btree {
unsigned long seq; unsigned long seq;
struct rw_semaphore lock; struct rw_semaphore lock;
struct cache_set *c; struct cache_set *c;
struct btree *parent;
unsigned long flags; unsigned long flags;
uint16_t written; /* would be nice to kill */ uint16_t written; /* would be nice to kill */
...@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k) ...@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
static inline void set_gc_sectors(struct cache_set *c) static inline void set_gc_sectors(struct cache_set *c)
{ {
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
{
return __bch_ptr_invalid(b->c, b->level, k);
} }
static inline struct bkey *bch_btree_iter_init(struct btree *b, static inline struct bkey *bch_btree_iter_init(struct btree *b,
...@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, ...@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
return __bch_btree_iter_init(b, iter, search, b->sets); return __bch_btree_iter_init(b, iter, search, b->sets);
} }
static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
{
if (b->level)
return bch_btree_ptr_invalid(b->c, k);
else
return bch_extent_ptr_invalid(b->c, k);
}
void bkey_put(struct cache_set *c, struct bkey *k);
/* Looping macros */ /* Looping macros */
#define for_each_cached_btree(b, c, iter) \ #define for_each_cached_btree(b, c, iter) \
...@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b, ...@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
/* Recursing down the btree */ /* Recursing down the btree */
struct btree_op { struct btree_op {
struct closure cl;
struct cache_set *c;
/* Journal entry we have a refcount on */
atomic_t *journal;
/* Bio to be inserted into the cache */
struct bio *cache_bio;
unsigned inode;
uint16_t write_prio;
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
/* Btree insertion type */
enum {
BTREE_INSERT,
BTREE_REPLACE
} type:8;
unsigned csum:1;
unsigned skip:1;
unsigned flush_journal:1;
unsigned insert_data_done:1;
unsigned lookup_done:1;
unsigned insert_collision:1; unsigned insert_collision:1;
/* Anything after this point won't get zeroed in do_bio_hook() */
/* Keys to be inserted */
struct keylist keys;
BKEY_PADDED(replace);
}; };
enum { static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
BTREE_INSERT_STATUS_INSERT, {
BTREE_INSERT_STATUS_BACK_MERGE, memset(op, 0, sizeof(struct btree_op));
BTREE_INSERT_STATUS_OVERWROTE, op->lock = write_lock_level;
BTREE_INSERT_STATUS_FRONT_MERGE, }
};
void bch_btree_op_init_stack(struct btree_op *);
static inline void rw_lock(bool w, struct btree *b, int level) static inline void rw_lock(bool w, struct btree *b, int level)
{ {
...@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level) ...@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b) static inline void rw_unlock(bool w, struct btree *b)
{ {
#ifdef CONFIG_BCACHE_EDEBUG
unsigned i;
if (w && b->key.ptr[0])
for (i = 0; i <= b->nsets; i++)
bch_check_key_order(b, b->sets[i].data);
#endif
if (w) if (w)
b->seq++; b->seq++;
(w ? up_write : up_read)(&b->lock); (w ? up_write : up_read)(&b->lock);
} }
#define insert_lock(s, b) ((b)->level <= (s)->lock) void bch_btree_node_read(struct btree *);
void bch_btree_node_write(struct btree *, struct closure *);
/* void bch_btree_set_root(struct btree *);
* These macros are for recursing down the btree - they handle the details of struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
* locking and looking up nodes in the cache for you. They're best treated as struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
* mere syntax when reading code that uses them.
*
* op->lock determines whether we take a read or a write lock at a given depth.
* If you've got a read lock and find that you need a write lock (i.e. you're
* going to have to split), set op->lock and return -EINTR; btree_root() will
* call you again and you'll have the correct lock.
*/
/** int bch_btree_insert_check_key(struct btree *, struct btree_op *,
* btree - recurse down the btree on a specified key struct bkey *);
* @fn: function to call, which will be passed the child node int bch_btree_insert(struct cache_set *, struct keylist *,
* @key: key to recurse on atomic_t *, struct bkey *);
* @b: parent btree node
* @op: pointer to struct btree_op int bch_gc_thread_start(struct cache_set *);
*/ size_t bch_btree_gc_finish(struct cache_set *);
#define btree(fn, key, b, op, ...) \ void bch_moving_gc(struct cache_set *);
({ \ int bch_btree_check(struct cache_set *);
int _r, l = (b)->level - 1; \ uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
bool _w = l <= (op)->lock; \
struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
if (!IS_ERR(_b)) { \
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
rw_unlock(_w, _b); \
} else \
_r = PTR_ERR(_b); \
_r; \
})
/**
* btree_root - call a function on the root of the btree
* @fn: function to call, which will be passed the child node
* @c: cache set
* @op: pointer to struct btree_op
*/
#define btree_root(fn, c, op, ...) \
({ \
int _r = -EINTR; \
do { \
struct btree *_b = (c)->root; \
bool _w = insert_lock(op, _b); \
rw_lock(_w, _b, _b->level); \
if (_b == (c)->root && \
_w == insert_lock(op, _b)) \
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
rw_unlock(_w, _b); \
bch_cannibalize_unlock(c, &(op)->cl); \
} while (_r == -EINTR); \
\
_r; \
})
static inline bool should_split(struct btree *b) static inline void wake_up_gc(struct cache_set *c)
{ {
struct bset *i = write_block(b); if (c->gc_thread)
return b->written >= btree_blocks(b) || wake_up_process(c->gc_thread);
(i->seq == b->sets[0].data->seq &&
b->written + __set_blocks(i, i->keys + 15, b->c)
> btree_blocks(b));
} }
void bch_btree_node_read(struct btree *); #define MAP_DONE 0
void bch_btree_node_write(struct btree *, struct closure *); #define MAP_CONTINUE 1
void bch_cannibalize_unlock(struct cache_set *, struct closure *); #define MAP_ALL_NODES 0
void bch_btree_set_root(struct btree *); #define MAP_LEAF_NODES 1
struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
int, struct btree_op *);
bool bch_btree_insert_check_key(struct btree *, struct btree_op *, #define MAP_END_KEY 1
struct bio *);
int bch_btree_insert(struct btree_op *, struct cache_set *);
int bch_btree_search_recurse(struct btree *, struct btree_op *); typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_nodes_fn *, int);
void bch_queue_gc(struct cache_set *); static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
size_t bch_btree_gc_finish(struct cache_set *); struct bkey *from, btree_map_nodes_fn *fn)
void bch_moving_gc(struct closure *); {
int bch_btree_check(struct cache_set *, struct btree_op *); return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); }
static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
struct cache_set *c,
struct bkey *from,
btree_map_nodes_fn *fn)
{
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
}
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
struct bkey *);
int bch_btree_map_keys(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_keys_fn *, int);
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *); void bch_keybuf_init(struct keybuf *);
void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, void bch_refill_keybuf(struct cache_set *, struct keybuf *,
keybuf_pred_fn *); struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *); struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *); void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
......
...@@ -11,17 +11,6 @@ ...@@ -11,17 +11,6 @@
#include "closure.h" #include "closure.h"
void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
} else
cl->fn(cl);
}
EXPORT_SYMBOL_GPL(closure_queue);
#define CL_FIELD(type, field) \ #define CL_FIELD(type, field) \
case TYPE_ ## type: \ case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field return &container_of(cl, struct type, cl)->field
...@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl) ...@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{ {
switch (cl->type) { switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait); CL_FIELD(closure_with_waitlist, wait);
CL_FIELD(closure_with_waitlist_and_timer, wait);
default:
return NULL;
}
}
static struct timer_list *closure_timer(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_timer, timer);
CL_FIELD(closure_with_waitlist_and_timer, timer);
default: default:
return NULL; return NULL;
} }
...@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK; int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK); BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING))); BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */ /* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING)) if (r == 1 && (flags & CLOSURE_SLEEPING))
...@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) { if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
/* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining, atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER); CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl); closure_queue(cl);
...@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v) ...@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{ {
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
} }
EXPORT_SYMBOL_GPL(closure_sub); EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl) void closure_put(struct closure *cl)
{ {
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
} }
EXPORT_SYMBOL_GPL(closure_put); EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f) static void set_waiting(struct closure *cl, unsigned long f)
{ {
...@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list) ...@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1); closure_sub(cl, CLOSURE_WAITING + 1);
} }
} }
EXPORT_SYMBOL_GPL(__closure_wake_up); EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl) bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{ {
...@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl) ...@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true; return true;
} }
EXPORT_SYMBOL_GPL(closure_wait); EXPORT_SYMBOL(closure_wait);
/** /**
* closure_sync() - sleep until a closure a closure has nothing left to wait on * closure_sync() - sleep until a closure a closure has nothing left to wait on
...@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl) ...@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl); __closure_end_sleep(cl);
} }
EXPORT_SYMBOL_GPL(closure_sync); EXPORT_SYMBOL(closure_sync);
/** /**
* closure_trylock() - try to acquire the closure, without waiting * closure_trylock() - try to acquire the closure, without waiting
...@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent) ...@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1) CLOSURE_REMAINING_INITIALIZER) != -1)
return false; return false;
closure_set_ret_ip(cl);
smp_mb(); smp_mb();
cl->parent = parent; cl->parent = parent;
if (parent) if (parent)
closure_get(parent); closure_get(parent);
closure_set_ret_ip(cl);
closure_debug_create(cl); closure_debug_create(cl);
return true; return true;
} }
EXPORT_SYMBOL_GPL(closure_trylock); EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent, void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list) struct closure_waitlist *wait_list)
...@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent, ...@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent)) if (closure_trylock(cl, parent))
return; return;
closure_wait_event_sync(wait_list, &wait, closure_wait_event(wait_list, &wait,
atomic_read(&cl->remaining) == -1); atomic_read(&cl->remaining) == -1);
} }
} }
EXPORT_SYMBOL_GPL(__closure_lock); EXPORT_SYMBOL(__closure_lock);
static void closure_delay_timer_fn(unsigned long data)
{
struct closure *cl = (struct closure *) data;
closure_sub(cl, CLOSURE_TIMER + 1);
}
void do_closure_timer_init(struct closure *cl)
{
struct timer_list *timer = closure_timer(cl);
init_timer(timer);
timer->data = (unsigned long) cl;
timer->function = closure_delay_timer_fn;
}
EXPORT_SYMBOL_GPL(do_closure_timer_init);
bool __closure_delay(struct closure *cl, unsigned long delay,
struct timer_list *timer)
{
if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
return false;
BUG_ON(timer_pending(timer));
timer->expires = jiffies + delay;
atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
add_timer(timer);
return true;
}
EXPORT_SYMBOL_GPL(__closure_delay);
void __closure_flush(struct closure *cl, struct timer_list *timer)
{
if (del_timer(timer))
closure_sub(cl, CLOSURE_TIMER + 1);
}
EXPORT_SYMBOL_GPL(__closure_flush);
void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
{
if (del_timer_sync(timer))
closure_sub(cl, CLOSURE_TIMER + 1);
}
EXPORT_SYMBOL_GPL(__closure_flush_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
...@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl) ...@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list); list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags); spin_unlock_irqrestore(&closure_list_lock, flags);
} }
EXPORT_SYMBOL_GPL(closure_debug_create); EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl) void closure_debug_destroy(struct closure *cl)
{ {
...@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl) ...@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all); list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags); spin_unlock_irqrestore(&closure_list_lock, flags);
} }
EXPORT_SYMBOL_GPL(closure_debug_destroy); EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug; static struct dentry *debug;
...@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data) ...@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent, cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK); r & CLOSURE_REMAINING_MASK);
seq_printf(f, "%s%s%s%s%s%s\n", seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING, test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "", work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "", r & CLOSURE_RUNNING ? "R" : "",
r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "", r & CLOSURE_STACK ? "S" : "",
r & CLOSURE_SLEEPING ? "Sl" : "", r & CLOSURE_SLEEPING ? "Sl" : "");
r & CLOSURE_TIMER ? "T" : "");
if (r & CLOSURE_WAITING) if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n", seq_printf(f, " W %pF\n",
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "bcache.h" #include "bcache.h"
#include "btree.h" #include "btree.h"
#include "request.h"
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <linux/module.h> #include <linux/module.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment