closures: CLOSURE_CALLBACK() to fix type punning
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 18 Nov 2023 00:13:27 +0000 (19:13 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Fri, 24 Nov 2023 05:29:58 +0000 (00:29 -0500)
Control flow integrity is now checking that type signatures match on
indirect function calls. That breaks closures, which embed a work_struct
in a closure in such a way that a closure_fn may also be used as a
workqueue fn by the underlying closure code.

So we have to change closure fns to take a work_struct as their
argument - but that results in a loss of clarity, as closure fns have
different semantics from normal workqueue functions (they run owning a
ref on the closure, which must be released with continue_at() or
closure_return()).

Thus, this patc introduces CLOSURE_CALLBACK() and closure_type() macros
as suggested by Kees, to smooth things over a bit.

Suggested-by: Kees Cook <keescook@chromium.org>
Cc: Coly Li <colyli@suse.de>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
16 files changed:
drivers/md/bcache/btree.c
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/fs-io-direct.c
fs/bcachefs/io_write.c
fs/bcachefs/io_write.h
fs/bcachefs/journal_io.c
fs/bcachefs/journal_io.h
include/linux/closure.h
lib/closure.c

index ae5cbb55861fdb7967ad9ad686161b377de345b4..9441eac3d5468d01ce3a273addf9fd2878d83e6c 100644 (file)
@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
        w->journal      = NULL;
 }
 
-static void btree_node_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_unlock)
 {
-       struct btree *b = container_of(cl, struct btree, io);
+       closure_type(b, struct btree, io);
 
        up(&b->io_mutex);
 }
 
-static void __btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(__btree_node_write_done)
 {
-       struct btree *b = container_of(cl, struct btree, io);
+       closure_type(b, struct btree, io);
        struct btree_write *w = btree_prev_write(b);
 
        bch_bbio_free(b->bio, b->c);
@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
        closure_return_with_destructor(cl, btree_node_write_unlock);
 }
 
-static void btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_done)
 {
-       struct btree *b = container_of(cl, struct btree, io);
+       closure_type(b, struct btree, io);
 
        bio_free_pages(b->bio);
-       __btree_node_write_done(cl);
+       __btree_node_write_done(&cl->work);
 }
 
 static void btree_node_write_endio(struct bio *bio)
index c182c21de2e8199d1d57817e27e149e0d772d2b7..7ff14bd2feb8bba0215b4075f34e8f97940c7bc9 100644 (file)
@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
        closure_put(&w->c->journal.io);
 }
 
-static void journal_write(struct closure *cl);
+static CLOSURE_CALLBACK(journal_write);
 
-static void journal_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_done)
 {
-       struct journal *j = container_of(cl, struct journal, io);
+       closure_type(j, struct journal, io);
        struct journal_write *w = (j->cur == j->w)
                ? &j->w[1]
                : &j->w[0];
@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
        continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
-static void journal_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlock)
        __releases(&c->journal.lock)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       closure_type(c, struct cache_set, journal.io);
 
        c->journal.io_in_flight = 0;
        spin_unlock(&c->journal.lock);
 }
 
-static void journal_write_unlocked(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlocked)
        __releases(c->journal.lock)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       closure_type(c, struct cache_set, journal.io);
        struct cache *ca = c->cache;
        struct journal_write *w = c->journal.cur;
        struct bkey *k = &c->journal.key;
@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
        continue_at(cl, journal_write_done, NULL);
 }
 
-static void journal_write(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+       closure_type(c, struct cache_set, journal.io);
 
        spin_lock(&c->journal.lock);
-       journal_write_unlocked(cl);
+       journal_write_unlocked(&cl->work);
 }
 
 static void journal_try_write(struct cache_set *c)
index 9f32901fdad10243857f7301a560723a7f54802c..ebd500bdf0b2fb2b3562bd491b2a8b61ea992e2a 100644 (file)
@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
 
 /* Moving GC - IO loop */
 
-static void moving_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(moving_io_destructor)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       closure_type(io, struct moving_io, cl);
 
        kfree(io);
 }
 
-static void write_moving_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving_finish)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       closure_type(io, struct moving_io, cl);
        struct bio *bio = &io->bio.bio;
 
        bio_free_pages(bio);
@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
        bch_bio_map(bio, NULL);
 }
 
-static void write_moving(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       closure_type(io, struct moving_io, cl);
        struct data_insert_op *op = &io->op;
 
        if (!op->status) {
@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
        continue_at(cl, write_moving_finish, op->wq);
 }
 
-static void read_moving_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_moving_submit)
 {
-       struct moving_io *io = container_of(cl, struct moving_io, cl);
+       closure_type(io, struct moving_io, cl);
        struct bio *bio = &io->bio.bio;
 
        bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
index a9b1f3896249b3da67587076cbf816306709a99f..83d112bd2b1c0e336d21cfbd8f50eb716a2a4c4b 100644 (file)
@@ -25,7 +25,7 @@
 
 struct kmem_cache *bch_search_cache;
 
-static void bch_data_insert_start(struct closure *cl);
+static CLOSURE_CALLBACK(bch_data_insert_start);
 
 static unsigned int cache_mode(struct cached_dev *dc)
 {
@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
 
 /* Insert data into cache */
 
-static void bch_data_insert_keys(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_keys)
 {
-       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       closure_type(op, struct data_insert_op, cl);
        atomic_t *journal_ref = NULL;
        struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
        int ret;
@@ -136,9 +136,9 @@ out:
        continue_at(cl, bch_data_insert_keys, op->wq);
 }
 
-static void bch_data_insert_error(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_error)
 {
-       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       closure_type(op, struct data_insert_op, cl);
 
        /*
         * Our data write just errored, which means we've got a bunch of keys to
@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
 
        op->insert_keys.top = dst;
 
-       bch_data_insert_keys(cl);
+       bch_data_insert_keys(&cl->work);
 }
 
 static void bch_data_insert_endio(struct bio *bio)
@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
        bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 }
 
-static void bch_data_insert_start(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_start)
 {
-       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       closure_type(op, struct data_insert_op, cl);
        struct bio *bio = op->bio, *n;
 
        if (op->bypass)
@@ -305,16 +305,16 @@ err:
  * If op->bypass is true, instead of inserting the data it invalidates the
  * region of the cache represented by op->bio and op->inode.
  */
-void bch_data_insert(struct closure *cl)
+CLOSURE_CALLBACK(bch_data_insert)
 {
-       struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+       closure_type(op, struct data_insert_op, cl);
 
        trace_bcache_write(op->c, op->inode, op->bio,
                           op->writeback, op->bypass);
 
        bch_keylist_init(&op->insert_keys);
        bio_get(op->bio);
-       bch_data_insert_start(cl);
+       bch_data_insert_start(&cl->work);
 }
 
 /*
@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
        return n == bio ? MAP_DONE : MAP_CONTINUE;
 }
 
-static void cache_lookup(struct closure *cl)
+static CLOSURE_CALLBACK(cache_lookup)
 {
-       struct search *s = container_of(cl, struct search, iop.cl);
+       closure_type(s, struct search, iop.cl);
        struct bio *bio = &s->bio.bio;
        struct cached_dev *dc;
        int ret;
@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
        bio_cnt_set(bio, 3);
 }
 
-static void search_free(struct closure *cl)
+static CLOSURE_CALLBACK(search_free)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
 
        atomic_dec(&s->iop.c->search_inflight);
 
@@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
 
 /* Cached devices */
 
-static void cached_dev_bio_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_bio_complete)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        cached_dev_put(dc);
-       search_free(cl);
+       search_free(&cl->work);
 }
 
 /* Process reads */
 
-static void cached_dev_read_error_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error_done)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
 
        if (s->iop.replace_collision)
                bch_mark_cache_miss_collision(s->iop.c, s->d);
@@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
        if (s->iop.bio)
                bio_free_pages(s->iop.bio);
 
-       cached_dev_bio_complete(cl);
+       cached_dev_bio_complete(&cl->work);
 }
 
-static void cached_dev_read_error(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct bio *bio = &s->bio.bio;
 
        /*
@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
        continue_at(cl, cached_dev_read_error_done, NULL);
 }
 
-static void cached_dev_cache_miss_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct bcache_device *d = s->d;
 
        if (s->iop.replace_collision)
@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
        if (s->iop.bio)
                bio_free_pages(s->iop.bio);
 
-       cached_dev_bio_complete(cl);
+       cached_dev_bio_complete(&cl->work);
        closure_put(&d->cl);
 }
 
-static void cached_dev_read_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        /*
@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
        continue_at(cl, cached_dev_cache_miss_done, NULL);
 }
 
-static void cached_dev_read_done_bh(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done_bh)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        bch_mark_cache_accounting(s->iop.c, s->d,
@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
 
 /* Process writes */
 
-static void cached_dev_write_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_write_complete)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
        up_read_non_owner(&dc->writeback_lock);
-       cached_dev_bio_complete(cl);
+       cached_dev_bio_complete(&cl->work);
 }
 
 static void cached_dev_write(struct cached_dev *dc, struct search *s)
@@ -1048,9 +1048,9 @@ insert_data:
        continue_at(cl, cached_dev_write_complete, NULL);
 }
 
-static void cached_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_nodata)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
        struct bio *bio = &s->bio.bio;
 
        if (s->iop.flush_journal)
@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
        return MAP_CONTINUE;
 }
 
-static void flash_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_nodata)
 {
-       struct search *s = container_of(cl, struct search, cl);
+       closure_type(s, struct search, cl);
 
        if (s->iop.flush_journal)
                bch_journal_meta(s->iop.c, cl);
index 38ab4856eaab0dd9f73698db5dd30e012070f0d4..46bbef00aebb9d5e1cb0965b44df266a4908c582 100644 (file)
@@ -34,7 +34,7 @@ struct data_insert_op {
 };
 
 unsigned int bch_get_congested(const struct cache_set *c);
-void bch_data_insert(struct closure *cl);
+CLOSURE_CALLBACK(bch_data_insert);
 
 void bch_cached_dev_request_init(struct cached_dev *dc);
 void cached_dev_submit_bio(struct bio *bio);
index 8bd899766372afe253eae981870c20dea9dfedc7..e0db905c1ca0989ce791e52565eb5251f0c2cbb9 100644 (file)
@@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
        submit_bio(bio);
 }
 
-static void bch_write_bdev_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
 {
-       struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+       closure_type(dc, struct cached_dev, sb_write);
 
        up(&dc->sb_write_mutex);
 }
@@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
        closure_put(&ca->set->sb_write);
 }
 
-static void bcache_write_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bcache_write_super_unlock)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+       closure_type(c, struct cache_set, sb_write);
 
        up(&c->sb_write_mutex);
 }
@@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
        closure_put(cl);
 }
 
-static void uuid_io_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(uuid_io_unlock)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+       closure_type(c, struct cache_set, uuid_write);
 
        up(&c->uuid_write_mutex);
 }
@@ -1342,9 +1342,9 @@ void bch_cached_dev_release(struct kobject *kobj)
        module_put(THIS_MODULE);
 }
 
-static void cached_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_free)
 {
-       struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+       closure_type(dc, struct cached_dev, disk.cl);
 
        if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
                cancel_writeback_rate_update_dwork(dc);
@@ -1376,9 +1376,9 @@ static void cached_dev_free(struct closure *cl)
        kobject_put(&dc->disk.kobj);
 }
 
-static void cached_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_flush)
 {
-       struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+       closure_type(dc, struct cached_dev, disk.cl);
        struct bcache_device *d = &dc->disk;
 
        mutex_lock(&bch_register_lock);
@@ -1497,9 +1497,9 @@ void bch_flash_dev_release(struct kobject *kobj)
        kfree(d);
 }
 
-static void flash_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_free)
 {
-       struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+       closure_type(d, struct bcache_device, cl);
 
        mutex_lock(&bch_register_lock);
        atomic_long_sub(bcache_dev_sectors_dirty(d),
@@ -1510,9 +1510,9 @@ static void flash_dev_free(struct closure *cl)
        kobject_put(&d->kobj);
 }
 
-static void flash_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_flush)
 {
-       struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+       closure_type(d, struct bcache_device, cl);
 
        mutex_lock(&bch_register_lock);
        bcache_device_unlink(d);
@@ -1668,9 +1668,9 @@ void bch_cache_set_release(struct kobject *kobj)
        module_put(THIS_MODULE);
 }
 
-static void cache_set_free(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_free)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, cl);
+       closure_type(c, struct cache_set, cl);
        struct cache *ca;
 
        debugfs_remove(c->debug);
@@ -1709,9 +1709,9 @@ static void cache_set_free(struct closure *cl)
        kobject_put(&c->kobj);
 }
 
-static void cache_set_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_flush)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, caching);
+       closure_type(c, struct cache_set, caching);
        struct cache *ca = c->cache;
        struct btree *b;
 
@@ -1806,9 +1806,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
        }
 }
 
-static void __cache_set_unregister(struct closure *cl)
+static CLOSURE_CALLBACK(__cache_set_unregister)
 {
-       struct cache_set *c = container_of(cl, struct cache_set, caching);
+       closure_type(c, struct cache_set, caching);
        struct cached_dev *dc;
        struct bcache_device *d;
        size_t i;
index 24c049067f61ae6ec9e9c0280e948becb2943f88..77427e355613024419ec1c44261ac31b805c2bbd 100644 (file)
@@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
        bch_bio_map(bio, NULL);
 }
 
-static void dirty_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(dirty_io_destructor)
 {
-       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       closure_type(io, struct dirty_io, cl);
 
        kfree(io);
 }
 
-static void write_dirty_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty_finish)
 {
-       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       closure_type(io, struct dirty_io, cl);
        struct keybuf_key *w = io->bio.bi_private;
        struct cached_dev *dc = io->dc;
 
@@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
        closure_put(&io->cl);
 }
 
-static void write_dirty(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty)
 {
-       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       closure_type(io, struct dirty_io, cl);
        struct keybuf_key *w = io->bio.bi_private;
        struct cached_dev *dc = io->dc;
 
@@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
        dirty_endio(bio);
 }
 
-static void read_dirty_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_dirty_submit)
 {
-       struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+       closure_type(io, struct dirty_io, cl);
 
        closure_bio_submit(io->dc->disk.c, &io->bio, cl);
 
index 37d896edb06e0475cc7146e31a2790321f842394..57c20390e10e3fe05394415d8ccabb43201c871b 100644 (file)
@@ -1358,10 +1358,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *
        return offset;
 }
 
-static void btree_node_read_all_replicas_done(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
 {
-       struct btree_node_read_all *ra =
-               container_of(cl, struct btree_node_read_all, cl);
+       closure_type(ra, struct btree_node_read_all, cl);
        struct bch_fs *c = ra->c;
        struct btree *b = ra->b;
        struct printbuf buf = PRINTBUF;
@@ -1567,7 +1566,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
 
        if (sync) {
                closure_sync(&ra->cl);
-               btree_node_read_all_replicas_done(&ra->cl);
+               btree_node_read_all_replicas_done(&ra->cl.work);
        } else {
                continue_at(&ra->cl, btree_node_read_all_replicas_done,
                            c->io_complete_wq);
index 76f27bc9fa24e0a57ebbf1f70b5bfbb1f8a79ee6..d08efd6d958e87f8ed0ec3d8b90d9f978d89fb91 100644 (file)
@@ -778,9 +778,9 @@ static void btree_interior_update_work(struct work_struct *work)
        }
 }
 
-static void btree_update_set_nodes_written(struct closure *cl)
+static CLOSURE_CALLBACK(btree_update_set_nodes_written)
 {
-       struct btree_update *as = container_of(cl, struct btree_update, cl);
+       closure_type(as, struct btree_update, cl);
        struct bch_fs *c = as->c;
 
        mutex_lock(&c->btree_interior_update_lock);
index 5b42a76c4796f90062bb86e2914d0301e52cf7d0..9a479e4de6b36a71d1bc4b3c1ef62d8787098179 100644 (file)
@@ -35,9 +35,9 @@ static void bio_check_or_release(struct bio *bio, bool check_dirty)
        }
 }
 
-static void bch2_dio_read_complete(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_dio_read_complete)
 {
-       struct dio_read *dio = container_of(cl, struct dio_read, cl);
+       closure_type(dio, struct dio_read, cl);
 
        dio->req->ki_complete(dio->req, dio->ret);
        bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
@@ -325,9 +325,9 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
        return 0;
 }
 
-static void bch2_dio_write_flush_done(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
 {
-       struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+       closure_type(dio, struct dio_write, op.cl);
        struct bch_fs *c = dio->op.c;
 
        closure_debug_destroy(cl);
index d704a8f829c8a7fa3db1cc481857dd431b67190b..8ede46b1e354634763ff743a2d8aadf88b2931c6 100644 (file)
@@ -580,9 +580,9 @@ static inline void wp_update_state(struct write_point *wp, bool running)
        __wp_update_state(wp, state);
 }
 
-static void bch2_write_index(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_write_index)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
        struct write_point *wp = op->wp;
        struct workqueue_struct *wq = index_update_wq(op);
        unsigned long flags;
@@ -1208,9 +1208,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op)
                bch2_nocow_write_convert_unwritten(op);
 }
 
-static void bch2_nocow_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_nocow_write_done)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
 
        __bch2_nocow_write_done(op);
        bch2_write_done(cl);
@@ -1363,7 +1363,7 @@ err:
                op->insert_keys.top = op->insert_keys.keys;
        } else if (op->flags & BCH_WRITE_SYNC) {
                closure_sync(&op->cl);
-               bch2_nocow_write_done(&op->cl);
+               bch2_nocow_write_done(&op->cl.work);
        } else {
                /*
                 * XXX
@@ -1566,9 +1566,9 @@ err:
  * If op->discard is true, instead of inserting the data it invalidates the
  * region of the cache represented by op->bio and op->inode.
  */
-void bch2_write(struct closure *cl)
+CLOSURE_CALLBACK(bch2_write)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
        struct bio *bio = &op->wbio.bio;
        struct bch_fs *c = op->c;
        unsigned data_len;
index 9323167229eeae8900b65733f022dae6047448b5..6c276a48f95dc2051f22dbfe00e4181319f1ee76 100644 (file)
@@ -90,8 +90,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
        op->devs_need_flush     = NULL;
 }
 
-void bch2_write(struct closure *);
-
+CLOSURE_CALLBACK(bch2_write);
 void bch2_write_point_do_index_updates(struct work_struct *);
 
 static inline struct bch_write_bio *wbio_init(struct bio *bio)
index 786a0928550920a906197fff61d51eccaa6126bb..02e6484f9953b07a10227333b2d564882534e109 100644 (file)
@@ -1025,10 +1025,9 @@ next_block:
        return 0;
 }
 
-static void bch2_journal_read_device(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_journal_read_device)
 {
-       struct journal_device *ja =
-               container_of(cl, struct journal_device, read);
+       closure_type(ja, struct journal_device, read);
        struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
        struct bch_fs *c = ca->fs;
        struct journal_list *jlist =
@@ -1520,9 +1519,9 @@ static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
        return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
 }
 
-static void journal_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_done)
 {
-       struct journal *j = container_of(cl, struct journal, io);
+       closure_type(j, struct journal, io);
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct journal_buf *w = journal_last_unwritten_buf(j);
        struct bch_replicas_padded replicas;
@@ -1638,9 +1637,9 @@ static void journal_write_endio(struct bio *bio)
        percpu_ref_put(&ca->io_ref);
 }
 
-static void do_journal_write(struct closure *cl)
+static CLOSURE_CALLBACK(do_journal_write)
 {
-       struct journal *j = container_of(cl, struct journal, io);
+       closure_type(j, struct journal, io);
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_dev *ca;
        struct journal_buf *w = journal_last_unwritten_buf(j);
@@ -1850,9 +1849,9 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
        return 0;
 }
 
-void bch2_journal_write(struct closure *cl)
+CLOSURE_CALLBACK(bch2_journal_write)
 {
-       struct journal *j = container_of(cl, struct journal, io);
+       closure_type(j, struct journal, io);
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_dev *ca;
        struct journal_buf *w = journal_last_unwritten_buf(j);
index a88d097b13f1294a5ca1f3c30ebba5282ef56da3..c035e7c108e19012e6e4e1f708136dec27b5387c 100644 (file)
@@ -60,6 +60,6 @@ void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
 
 int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);
 
-void bch2_journal_write(struct closure *);
+CLOSURE_CALLBACK(bch2_journal_write);
 
 #endif /* _BCACHEFS_JOURNAL_IO_H */
index de7bb47d8a46ace38d95a81ed6df231d91ac725b..c554c6a08768ad60cdf529a65cf962095363a4a9 100644 (file)
 
 struct closure;
 struct closure_syncer;
-typedef void (closure_fn) (struct closure *);
+typedef void (closure_fn) (struct work_struct *);
 extern struct dentry *bcache_debug;
 
 struct closure_waitlist {
@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl)
                INIT_WORK(&cl->work, cl->work.func);
                BUG_ON(!queue_work(wq, &cl->work));
        } else
-               cl->fn(cl);
+               cl->fn(&cl->work);
 }
 
 /**
@@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list)
        __closure_wake_up(list);
 }
 
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member)                               \
+       struct closure *cl = container_of(ws, struct closure, work);    \
+       type *name = container_of(cl, type, member)
+
 /**
  * continue_at - jump to another function with barrier
  *
index f86c9eeafb35ad9da21ebddda8a182ea27970ff8..c16540552d61bc14121b034a9d6e302045ff0dc5 100644 (file)
@@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
                        closure_debug_destroy(cl);
 
                        if (destructor)
-                               destructor(cl);
+                               destructor(&cl->work);
 
                        if (parent)
                                closure_put(parent);
@@ -108,8 +108,9 @@ struct closure_syncer {
        int                     done;
 };
 
-static void closure_sync_fn(struct closure *cl)
+static CLOSURE_CALLBACK(closure_sync_fn)
 {
+       struct closure *cl = container_of(ws, struct closure, work);
        struct closure_syncer *s = cl->s;
        struct task_struct *p;