Commit d4e3b928 authored by Kent Overstreet's avatar Kent Overstreet

closures: CLOSURE_CALLBACK() to fix type punning

Control flow integrity is now checking that type signatures match on
indirect function calls. That breaks closures, which embed a work_struct
in a closure in such a way that a closure_fn may also be used as a
workqueue fn by the underlying closure code.

So we have to change closure fns to take a work_struct as their
argument - but that results in a loss of clarity, as closure fns have
different semantics from normal workqueue functions (they run owning a
ref on the closure, which must be released with continue_at() or
closure_return()).

Thus, this patc introduces CLOSURE_CALLBACK() and closure_type() macros
as suggested by Kees, to smooth things over a bit.
Suggested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Coly Li <colyli@suse.de>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 98b1cc82
...@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w) ...@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL; w->journal = NULL;
} }
static void btree_node_write_unlock(struct closure *cl) static CLOSURE_CALLBACK(btree_node_write_unlock)
{ {
struct btree *b = container_of(cl, struct btree, io); closure_type(b, struct btree, io);
up(&b->io_mutex); up(&b->io_mutex);
} }
static void __btree_node_write_done(struct closure *cl) static CLOSURE_CALLBACK(__btree_node_write_done)
{ {
struct btree *b = container_of(cl, struct btree, io); closure_type(b, struct btree, io);
struct btree_write *w = btree_prev_write(b); struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c); bch_bbio_free(b->bio, b->c);
...@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl) ...@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
closure_return_with_destructor(cl, btree_node_write_unlock); closure_return_with_destructor(cl, btree_node_write_unlock);
} }
static void btree_node_write_done(struct closure *cl) static CLOSURE_CALLBACK(btree_node_write_done)
{ {
struct btree *b = container_of(cl, struct btree, io); closure_type(b, struct btree, io);
bio_free_pages(b->bio); bio_free_pages(b->bio);
__btree_node_write_done(cl); __btree_node_write_done(&cl->work);
} }
static void btree_node_write_endio(struct bio *bio) static void btree_node_write_endio(struct bio *bio)
......
...@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio) ...@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io); closure_put(&w->c->journal.io);
} }
static void journal_write(struct closure *cl); static CLOSURE_CALLBACK(journal_write);
static void journal_write_done(struct closure *cl) static CLOSURE_CALLBACK(journal_write_done)
{ {
struct journal *j = container_of(cl, struct journal, io); closure_type(j, struct journal, io);
struct journal_write *w = (j->cur == j->w) struct journal_write *w = (j->cur == j->w)
? &j->w[1] ? &j->w[1]
: &j->w[0]; : &j->w[0];
...@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl) ...@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, bch_journal_wq); continue_at_nobarrier(cl, journal_write, bch_journal_wq);
} }
static void journal_write_unlock(struct closure *cl) static CLOSURE_CALLBACK(journal_write_unlock)
__releases(&c->journal.lock) __releases(&c->journal.lock)
{ {
struct cache_set *c = container_of(cl, struct cache_set, journal.io); closure_type(c, struct cache_set, journal.io);
c->journal.io_in_flight = 0; c->journal.io_in_flight = 0;
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
} }
static void journal_write_unlocked(struct closure *cl) static CLOSURE_CALLBACK(journal_write_unlocked)
__releases(c->journal.lock) __releases(c->journal.lock)
{ {
struct cache_set *c = container_of(cl, struct cache_set, journal.io); closure_type(c, struct cache_set, journal.io);
struct cache *ca = c->cache; struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
...@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write_done, NULL); continue_at(cl, journal_write_done, NULL);
} }
static void journal_write(struct closure *cl) static CLOSURE_CALLBACK(journal_write)
{ {
struct cache_set *c = container_of(cl, struct cache_set, journal.io); closure_type(c, struct cache_set, journal.io);
spin_lock(&c->journal.lock); spin_lock(&c->journal.lock);
journal_write_unlocked(cl); journal_write_unlocked(&cl->work);
} }
static void journal_try_write(struct cache_set *c) static void journal_try_write(struct cache_set *c)
......
...@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) ...@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
/* Moving GC - IO loop */ /* Moving GC - IO loop */
static void moving_io_destructor(struct closure *cl) static CLOSURE_CALLBACK(moving_io_destructor)
{ {
struct moving_io *io = container_of(cl, struct moving_io, cl); closure_type(io, struct moving_io, cl);
kfree(io); kfree(io);
} }
static void write_moving_finish(struct closure *cl) static CLOSURE_CALLBACK(write_moving_finish)
{ {
struct moving_io *io = container_of(cl, struct moving_io, cl); closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio; struct bio *bio = &io->bio.bio;
bio_free_pages(bio); bio_free_pages(bio);
...@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io) ...@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
bch_bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void write_moving(struct closure *cl) static CLOSURE_CALLBACK(write_moving)
{ {
struct moving_io *io = container_of(cl, struct moving_io, cl); closure_type(io, struct moving_io, cl);
struct data_insert_op *op = &io->op; struct data_insert_op *op = &io->op;
if (!op->status) { if (!op->status) {
...@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl) ...@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
continue_at(cl, write_moving_finish, op->wq); continue_at(cl, write_moving_finish, op->wq);
} }
static void read_moving_submit(struct closure *cl) static CLOSURE_CALLBACK(read_moving_submit)
{ {
struct moving_io *io = container_of(cl, struct moving_io, cl); closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio; struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, io->op.c, &io->w->key, 0); bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache; struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *cl); static CLOSURE_CALLBACK(bch_data_insert_start);
static unsigned int cache_mode(struct cached_dev *dc) static unsigned int cache_mode(struct cached_dev *dc)
{ {
...@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k) ...@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */ /* Insert data into cache */
static void bch_data_insert_keys(struct closure *cl) static CLOSURE_CALLBACK(bch_data_insert_keys)
{ {
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); closure_type(op, struct data_insert_op, cl);
atomic_t *journal_ref = NULL; atomic_t *journal_ref = NULL;
struct bkey *replace_key = op->replace ? &op->replace_key : NULL; struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret; int ret;
...@@ -136,9 +136,9 @@ static void bch_data_invalidate(struct closure *cl) ...@@ -136,9 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
continue_at(cl, bch_data_insert_keys, op->wq); continue_at(cl, bch_data_insert_keys, op->wq);
} }
static void bch_data_insert_error(struct closure *cl) static CLOSURE_CALLBACK(bch_data_insert_error)
{ {
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); closure_type(op, struct data_insert_op, cl);
/* /*
* Our data write just errored, which means we've got a bunch of keys to * Our data write just errored, which means we've got a bunch of keys to
...@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl) ...@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
op->insert_keys.top = dst; op->insert_keys.top = dst;
bch_data_insert_keys(cl); bch_data_insert_keys(&cl->work);
} }
static void bch_data_insert_endio(struct bio *bio) static void bch_data_insert_endio(struct bio *bio)
...@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio) ...@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
} }
static void bch_data_insert_start(struct closure *cl) static CLOSURE_CALLBACK(bch_data_insert_start)
{ {
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); closure_type(op, struct data_insert_op, cl);
struct bio *bio = op->bio, *n; struct bio *bio = op->bio, *n;
if (op->bypass) if (op->bypass)
...@@ -305,16 +305,16 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -305,16 +305,16 @@ static void bch_data_insert_start(struct closure *cl)
* If op->bypass is true, instead of inserting the data it invalidates the * If op->bypass is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode. * region of the cache represented by op->bio and op->inode.
*/ */
void bch_data_insert(struct closure *cl) CLOSURE_CALLBACK(bch_data_insert)
{ {
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); closure_type(op, struct data_insert_op, cl);
trace_bcache_write(op->c, op->inode, op->bio, trace_bcache_write(op->c, op->inode, op->bio,
op->writeback, op->bypass); op->writeback, op->bypass);
bch_keylist_init(&op->insert_keys); bch_keylist_init(&op->insert_keys);
bio_get(op->bio); bio_get(op->bio);
bch_data_insert_start(cl); bch_data_insert_start(&cl->work);
} }
/* /*
...@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ...@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
return n == bio ? MAP_DONE : MAP_CONTINUE; return n == bio ? MAP_DONE : MAP_CONTINUE;
} }
static void cache_lookup(struct closure *cl) static CLOSURE_CALLBACK(cache_lookup)
{ {
struct search *s = container_of(cl, struct search, iop.cl); closure_type(s, struct search, iop.cl);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
struct cached_dev *dc; struct cached_dev *dc;
int ret; int ret;
...@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s, ...@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
bio_cnt_set(bio, 3); bio_cnt_set(bio, 3);
} }
static void search_free(struct closure *cl) static CLOSURE_CALLBACK(search_free)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
atomic_dec(&s->iop.c->search_inflight); atomic_dec(&s->iop.c->search_inflight);
...@@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio, ...@@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
/* Cached devices */ /* Cached devices */
static void cached_dev_bio_complete(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_bio_complete)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cached_dev_put(dc); cached_dev_put(dc);
search_free(cl); search_free(&cl->work);
} }
/* Process reads */ /* Process reads */
static void cached_dev_read_error_done(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_read_error_done)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
if (s->iop.replace_collision) if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d); bch_mark_cache_miss_collision(s->iop.c, s->d);
...@@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl) ...@@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
if (s->iop.bio) if (s->iop.bio)
bio_free_pages(s->iop.bio); bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl); cached_dev_bio_complete(&cl->work);
} }
static void cached_dev_read_error(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_read_error)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
/* /*
...@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl) ...@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
continue_at(cl, cached_dev_read_error_done, NULL); continue_at(cl, cached_dev_read_error_done, NULL);
} }
static void cached_dev_cache_miss_done(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct bcache_device *d = s->d; struct bcache_device *d = s->d;
if (s->iop.replace_collision) if (s->iop.replace_collision)
...@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl) ...@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.bio) if (s->iop.bio)
bio_free_pages(s->iop.bio); bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl); cached_dev_bio_complete(&cl->work);
closure_put(&d->cl); closure_put(&d->cl);
} }
static void cached_dev_read_done(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_read_done)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/* /*
...@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
continue_at(cl, cached_dev_cache_miss_done, NULL); continue_at(cl, cached_dev_cache_miss_done, NULL);
} }
static void cached_dev_read_done_bh(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_read_done_bh)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d, bch_mark_cache_accounting(s->iop.c, s->d,
...@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) ...@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
/* Process writes */ /* Process writes */
static void cached_dev_write_complete(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_write_complete)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
up_read_non_owner(&dc->writeback_lock); up_read_non_owner(&dc->writeback_lock);
cached_dev_bio_complete(cl); cached_dev_bio_complete(&cl->work);
} }
static void cached_dev_write(struct cached_dev *dc, struct search *s) static void cached_dev_write(struct cached_dev *dc, struct search *s)
...@@ -1048,9 +1048,9 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) ...@@ -1048,9 +1048,9 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
continue_at(cl, cached_dev_write_complete, NULL); continue_at(cl, cached_dev_write_complete, NULL);
} }
static void cached_dev_nodata(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_nodata)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
if (s->iop.flush_journal) if (s->iop.flush_journal)
...@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s, ...@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
return MAP_CONTINUE; return MAP_CONTINUE;
} }
static void flash_dev_nodata(struct closure *cl) static CLOSURE_CALLBACK(flash_dev_nodata)
{ {
struct search *s = container_of(cl, struct search, cl); closure_type(s, struct search, cl);
if (s->iop.flush_journal) if (s->iop.flush_journal)
bch_journal_meta(s->iop.c, cl); bch_journal_meta(s->iop.c, cl);
......
...@@ -34,7 +34,7 @@ struct data_insert_op { ...@@ -34,7 +34,7 @@ struct data_insert_op {
}; };
unsigned int bch_get_congested(const struct cache_set *c); unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl); CLOSURE_CALLBACK(bch_data_insert);
void bch_cached_dev_request_init(struct cached_dev *dc); void bch_cached_dev_request_init(struct cached_dev *dc);
void cached_dev_submit_bio(struct bio *bio); void cached_dev_submit_bio(struct bio *bio);
......
...@@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out, ...@@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
submit_bio(bio); submit_bio(bio);
} }
static void bch_write_bdev_super_unlock(struct closure *cl) static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
{ {
struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); closure_type(dc, struct cached_dev, sb_write);
up(&dc->sb_write_mutex); up(&dc->sb_write_mutex);
} }
...@@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio) ...@@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
closure_put(&ca->set->sb_write); closure_put(&ca->set->sb_write);
} }
static void bcache_write_super_unlock(struct closure *cl) static CLOSURE_CALLBACK(bcache_write_super_unlock)
{ {
struct cache_set *c = container_of(cl, struct cache_set, sb_write); closure_type(c, struct cache_set, sb_write);
up(&c->sb_write_mutex); up(&c->sb_write_mutex);
} }
...@@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio) ...@@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
closure_put(cl); closure_put(cl);
} }
static void uuid_io_unlock(struct closure *cl) static CLOSURE_CALLBACK(uuid_io_unlock)
{ {
struct cache_set *c = container_of(cl, struct cache_set, uuid_write); closure_type(c, struct cache_set, uuid_write);
up(&c->uuid_write_mutex); up(&c->uuid_write_mutex);
} }
...@@ -1342,9 +1342,9 @@ void bch_cached_dev_release(struct kobject *kobj) ...@@ -1342,9 +1342,9 @@ void bch_cached_dev_release(struct kobject *kobj)
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
static void cached_dev_free(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_free)
{ {
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); closure_type(dc, struct cached_dev, disk.cl);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc); cancel_writeback_rate_update_dwork(dc);
...@@ -1376,9 +1376,9 @@ static void cached_dev_free(struct closure *cl) ...@@ -1376,9 +1376,9 @@ static void cached_dev_free(struct closure *cl)
kobject_put(&dc->disk.kobj); kobject_put(&dc->disk.kobj);
} }
static void cached_dev_flush(struct closure *cl) static CLOSURE_CALLBACK(cached_dev_flush)
{ {
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); closure_type(dc, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk; struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
...@@ -1497,9 +1497,9 @@ void bch_flash_dev_release(struct kobject *kobj) ...@@ -1497,9 +1497,9 @@ void bch_flash_dev_release(struct kobject *kobj)
kfree(d); kfree(d);
} }
static void flash_dev_free(struct closure *cl) static CLOSURE_CALLBACK(flash_dev_free)
{ {
struct bcache_device *d = container_of(cl, struct bcache_device, cl); closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d), atomic_long_sub(bcache_dev_sectors_dirty(d),
...@@ -1510,9 +1510,9 @@ static void flash_dev_free(struct closure *cl) ...@@ -1510,9 +1510,9 @@ static void flash_dev_free(struct closure *cl)
kobject_put(&d->kobj); kobject_put(&d->kobj);
} }
static void flash_dev_flush(struct closure *cl) static CLOSURE_CALLBACK(flash_dev_flush)
{ {
struct bcache_device *d = container_of(cl, struct bcache_device, cl); closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock); mutex_lock(&bch_register_lock);
bcache_device_unlink(d); bcache_device_unlink(d);
...@@ -1668,9 +1668,9 @@ void bch_cache_set_release(struct kobject *kobj) ...@@ -1668,9 +1668,9 @@ void bch_cache_set_release(struct kobject *kobj)
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
static void cache_set_free(struct closure *cl) static CLOSURE_CALLBACK(cache_set_free)
{ {
struct cache_set *c = container_of(cl, struct cache_set, cl); closure_type(c, struct cache_set, cl);
struct cache *ca; struct cache *ca;
debugfs_remove(c->debug); debugfs_remove(c->debug);
...@@ -1709,9 +1709,9 @@ static void cache_set_free(struct closure *cl) ...@@ -1709,9 +1709,9 @@ static void cache_set_free(struct closure *cl)
kobject_put(&c->kobj); kobject_put(&c->kobj);
} }
static void cache_set_flush(struct closure *cl) static CLOSURE_CALLBACK(cache_set_flush)
{ {
struct cache_set *c = container_of(cl, struct cache_set, caching); closure_type(c, struct cache_set, caching);
struct cache *ca = c->cache; struct cache *ca = c->cache;
struct btree *b; struct btree *b;
...@@ -1806,9 +1806,9 @@ static void conditional_stop_bcache_device(struct cache_set *c, ...@@ -1806,9 +1806,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
} }
} }
static void __cache_set_unregister(struct closure *cl) static CLOSURE_CALLBACK(__cache_set_unregister)
{ {
struct cache_set *c = container_of(cl, struct cache_set, caching); closure_type(c, struct cache_set, caching);
struct cached_dev *dc; struct cached_dev *dc;
struct bcache_device *d; struct bcache_device *d;
size_t i; size_t i;
......
...@@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w) ...@@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL); bch_bio_map(bio, NULL);
} }
static void dirty_io_destructor(struct closure *cl) static CLOSURE_CALLBACK(dirty_io_destructor)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); closure_type(io, struct dirty_io, cl);
kfree(io); kfree(io);
} }
static void write_dirty_finish(struct closure *cl) static CLOSURE_CALLBACK(write_dirty_finish)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private; struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc; struct cached_dev *dc = io->dc;
...@@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio) ...@@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
closure_put(&io->cl); closure_put(&io->cl);
} }
static void write_dirty(struct closure *cl) static CLOSURE_CALLBACK(write_dirty)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private; struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc; struct cached_dev *dc = io->dc;
...@@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio) ...@@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
dirty_endio(bio); dirty_endio(bio);
} }
static void read_dirty_submit(struct closure *cl) static CLOSURE_CALLBACK(read_dirty_submit)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); closure_type(io, struct dirty_io, cl);
closure_bio_submit(io->dc->disk.c, &io->bio, cl); closure_bio_submit(io->dc->disk.c, &io->bio, cl);
......
...@@ -1358,10 +1358,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void * ...@@ -1358,10 +1358,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *
return offset; return offset;
} }
static void btree_node_read_all_replicas_done(struct closure *cl) static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
{ {
struct btree_node_read_all *ra = closure_type(ra, struct btree_node_read_all, cl);
container_of(cl, struct btree_node_read_all, cl);
struct bch_fs *c = ra->c; struct bch_fs *c = ra->c;
struct btree *b = ra->b; struct btree *b = ra->b;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
...@@ -1567,7 +1566,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool ...@@ -1567,7 +1566,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
if (sync) { if (sync) {
closure_sync(&ra->cl); closure_sync(&ra->cl);
btree_node_read_all_replicas_done(&ra->cl); btree_node_read_all_replicas_done(&ra->cl.work);
} else { } else {
continue_at(&ra->cl, btree_node_read_all_replicas_done, continue_at(&ra->cl, btree_node_read_all_replicas_done,
c->io_complete_wq); c->io_complete_wq);
......
...@@ -778,9 +778,9 @@ static void btree_interior_update_work(struct work_struct *work) ...@@ -778,9 +778,9 @@ static void btree_interior_update_work(struct work_struct *work)
} }
} }
static void btree_update_set_nodes_written(struct closure *cl) static CLOSURE_CALLBACK(btree_update_set_nodes_written)
{ {
struct btree_update *as = container_of(cl, struct btree_update, cl); closure_type(as, struct btree_update, cl);
struct bch_fs *c = as->c; struct bch_fs *c = as->c;
mutex_lock(&c->btree_interior_update_lock); mutex_lock(&c->btree_interior_update_lock);
......
...@@ -35,9 +35,9 @@ static void bio_check_or_release(struct bio *bio, bool check_dirty) ...@@ -35,9 +35,9 @@ static void bio_check_or_release(struct bio *bio, bool check_dirty)
} }
} }
static void bch2_dio_read_complete(struct closure *cl) static CLOSURE_CALLBACK(bch2_dio_read_complete)
{ {
struct dio_read *dio = container_of(cl, struct dio_read, cl); closure_type(dio, struct dio_read, cl);
dio->req->ki_complete(dio->req, dio->ret); dio->req->ki_complete(dio->req, dio->ret);
bio_check_or_release(&dio->rbio.bio, dio->should_dirty); bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
...@@ -325,9 +325,9 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio) ...@@ -325,9 +325,9 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
return 0; return 0;
} }
static void bch2_dio_write_flush_done(struct closure *cl) static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
{ {
struct dio_write *dio = container_of(cl, struct dio_write, op.cl); closure_type(dio, struct dio_write, op.cl);
struct bch_fs *c = dio->op.c; struct bch_fs *c = dio->op.c;
closure_debug_destroy(cl); closure_debug_destroy(cl);
......
...@@ -580,9 +580,9 @@ static inline void wp_update_state(struct write_point *wp, bool running) ...@@ -580,9 +580,9 @@ static inline void wp_update_state(struct write_point *wp, bool running)
__wp_update_state(wp, state); __wp_update_state(wp, state);
} }
static void bch2_write_index(struct closure *cl) static CLOSURE_CALLBACK(bch2_write_index)
{ {
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); closure_type(op, struct bch_write_op, cl);
struct write_point *wp = op->wp; struct write_point *wp = op->wp;
struct workqueue_struct *wq = index_update_wq(op); struct workqueue_struct *wq = index_update_wq(op);
unsigned long flags; unsigned long flags;
...@@ -1208,9 +1208,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op) ...@@ -1208,9 +1208,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op)
bch2_nocow_write_convert_unwritten(op); bch2_nocow_write_convert_unwritten(op);
} }
static void bch2_nocow_write_done(struct closure *cl) static CLOSURE_CALLBACK(bch2_nocow_write_done)
{ {
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); closure_type(op, struct bch_write_op, cl);
__bch2_nocow_write_done(op); __bch2_nocow_write_done(op);
bch2_write_done(cl); bch2_write_done(cl);
...@@ -1363,7 +1363,7 @@ static void bch2_nocow_write(struct bch_write_op *op) ...@@ -1363,7 +1363,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
op->insert_keys.top = op->insert_keys.keys; op->insert_keys.top = op->insert_keys.keys;
} else if (op->flags & BCH_WRITE_SYNC) { } else if (op->flags & BCH_WRITE_SYNC) {
closure_sync(&op->cl); closure_sync(&op->cl);
bch2_nocow_write_done(&op->cl); bch2_nocow_write_done(&op->cl.work);
} else { } else {
/* /*
* XXX * XXX
...@@ -1566,9 +1566,9 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len) ...@@ -1566,9 +1566,9 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
* If op->discard is true, instead of inserting the data it invalidates the * If op->discard is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode. * region of the cache represented by op->bio and op->inode.
*/ */
void bch2_write(struct closure *cl) CLOSURE_CALLBACK(bch2_write)
{ {
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); closure_type(op, struct bch_write_op, cl);
struct bio *bio = &op->wbio.bio; struct bio *bio = &op->wbio.bio;
struct bch_fs *c = op->c; struct bch_fs *c = op->c;
unsigned data_len; unsigned data_len;
......
...@@ -90,8 +90,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, ...@@ -90,8 +90,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->devs_need_flush = NULL; op->devs_need_flush = NULL;
} }
void bch2_write(struct closure *); CLOSURE_CALLBACK(bch2_write);
void bch2_write_point_do_index_updates(struct work_struct *); void bch2_write_point_do_index_updates(struct work_struct *);
static inline struct bch_write_bio *wbio_init(struct bio *bio) static inline struct bch_write_bio *wbio_init(struct bio *bio)
......
...@@ -1025,10 +1025,9 @@ static int journal_read_bucket(struct bch_dev *ca, ...@@ -1025,10 +1025,9 @@ static int journal_read_bucket(struct bch_dev *ca,
return 0; return 0;
} }
static void bch2_journal_read_device(struct closure *cl) static CLOSURE_CALLBACK(bch2_journal_read_device)
{ {
struct journal_device *ja = closure_type(ja, struct journal_device, read);
container_of(cl, struct journal_device, read);
struct bch_dev *ca = container_of(ja, struct bch_dev, journal); struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
struct bch_fs *c = ca->fs; struct bch_fs *c = ca->fs;
struct journal_list *jlist = struct journal_list *jlist =
...@@ -1520,9 +1519,9 @@ static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) ...@@ -1520,9 +1519,9 @@ static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
} }
static void journal_write_done(struct closure *cl) static CLOSURE_CALLBACK(journal_write_done)
{ {
struct journal *j = container_of(cl, struct journal, io); closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *w = journal_last_unwritten_buf(j); struct journal_buf *w = journal_last_unwritten_buf(j);
struct bch_replicas_padded replicas; struct bch_replicas_padded replicas;
...@@ -1638,9 +1637,9 @@ static void journal_write_endio(struct bio *bio) ...@@ -1638,9 +1637,9 @@ static void journal_write_endio(struct bio *bio)
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
} }
static void do_journal_write(struct closure *cl) static CLOSURE_CALLBACK(do_journal_write)
{ {
struct journal *j = container_of(cl, struct journal, io); closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca; struct bch_dev *ca;
struct journal_buf *w = journal_last_unwritten_buf(j); struct journal_buf *w = journal_last_unwritten_buf(j);
...@@ -1850,9 +1849,9 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf * ...@@ -1850,9 +1849,9 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
return 0; return 0;
} }
void bch2_journal_write(struct closure *cl) CLOSURE_CALLBACK(bch2_journal_write)
{ {
struct journal *j = container_of(cl, struct journal, io); closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca; struct bch_dev *ca;
struct journal_buf *w = journal_last_unwritten_buf(j); struct journal_buf *w = journal_last_unwritten_buf(j);
......
...@@ -60,6 +60,6 @@ void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *, ...@@ -60,6 +60,6 @@ void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *); int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);
void bch2_journal_write(struct closure *); CLOSURE_CALLBACK(bch2_journal_write);
#endif /* _BCACHEFS_JOURNAL_IO_H */ #endif /* _BCACHEFS_JOURNAL_IO_H */
...@@ -104,7 +104,7 @@ ...@@ -104,7 +104,7 @@
struct closure; struct closure;
struct closure_syncer; struct closure_syncer;
typedef void (closure_fn) (struct closure *); typedef void (closure_fn) (struct work_struct *);
extern struct dentry *bcache_debug; extern struct dentry *bcache_debug;
struct closure_waitlist { struct closure_waitlist {
...@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl) ...@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl)
INIT_WORK(&cl->work, cl->work.func); INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work)); BUG_ON(!queue_work(wq, &cl->work));
} else } else
cl->fn(cl); cl->fn(&cl->work);
} }
/** /**
...@@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list) ...@@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list)
__closure_wake_up(list); __closure_wake_up(list);
} }
#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
#define closure_type(name, type, member) \
struct closure *cl = container_of(ws, struct closure, work); \
type *name = container_of(cl, type, member)
/** /**
* continue_at - jump to another function with barrier * continue_at - jump to another function with barrier
* *
......
...@@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
closure_debug_destroy(cl); closure_debug_destroy(cl);
if (destructor) if (destructor)
destructor(cl); destructor(&cl->work);
if (parent) if (parent)
closure_put(parent); closure_put(parent);
...@@ -108,8 +108,9 @@ struct closure_syncer { ...@@ -108,8 +108,9 @@ struct closure_syncer {
int done; int done;
}; };
static void closure_sync_fn(struct closure *cl) static CLOSURE_CALLBACK(closure_sync_fn)
{ {
struct closure *cl = container_of(ws, struct closure, work);
struct closure_syncer *s = cl->s; struct closure_syncer *s = cl->s;
struct task_struct *p; struct task_struct *p;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment