};
struct bch_writepage_io {
- struct closure cl;
struct bch_inode_info *inode;
/* must be last: */
};
}
-static void bch2_writepage_io_free(struct closure *cl)
-{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
-
- bio_put(&io->op.wbio.bio);
-}
-
-static void bch2_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct bch_write_op *op)
{
- struct bch_writepage_io *io = container_of(cl,
- struct bch_writepage_io, cl);
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
struct bvec_iter_all iter;
end_page_writeback(bvec->bv_page);
}
- closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
down(&io->op.c->io_in_flight);
w->io = NULL;
- closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
- continue_at(&io->cl, bch2_writepage_io_done, NULL);
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
- closure_init(&w->io->cl, NULL);
w->io->inode = inode;
-
op = &w->io->op;
bch2_write_op_init(op, c, w->opts);
op->target = w->opts.foreground_target;
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- if (op->end_io) {
- EBUG_ON(cl->parent);
- closure_debug_destroy(cl);
- op->end_io(op);
- } else {
- closure_return(cl);
- }
+ EBUG_ON(cl->parent);
+ closure_debug_destroy(cl);
+ op->end_io(op);
}
/**
/* Cache promotion on read */
struct promote_op {
- struct closure cl;
struct rcu_head rcu;
u64 start_time;
kfree_rcu(op, rcu);
}
-static void promote_done(struct closure *cl)
+static void promote_done(struct bch_write_op *wop)
{
struct promote_op *op =
- container_of(cl, struct promote_op, cl);
+ container_of(wop, struct promote_op, write.op);
struct bch_fs *c = op->write.op.c;
bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{
struct bch_fs *c = rbio->c;
- struct closure *cl = &op->cl;
struct bio *bio = &op->write.op.wbio.bio;
trace_promote(&rbio->bio);
bch2_migrate_read_done(&op->write, rbio);
- closure_init(cl, NULL);
- closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
- closure_return_with_destructor(cl, promote_done);
+ closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, NULL);
}
static struct promote_op *__promote_alloc(struct bch_fs *c,
},
btree_id, k);
BUG_ON(ret);
+ op->write.op.end_io = promote_done;
return op;
err:
return 0;
}
-static void move_free(struct closure *cl)
+static void move_free(struct moving_io *io)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->write.ctxt;
struct bvec_iter_all iter;
struct bio_vec *bv;
kfree(io);
}
-static void move_write_done(struct closure *cl)
+static void move_write_done(struct bch_write_op *op)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct moving_io *io = container_of(op, struct moving_io, write.op);
+ struct moving_context *ctxt = io->write.ctxt;
atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
- closure_return_with_destructor(cl, move_free);
+ move_free(io);
+ closure_put(&ctxt->cl);
}
-static void move_write(struct closure *cl)
+static void move_write(struct moving_io *io)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
-
if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
- closure_return_with_destructor(cl, move_free);
+ move_free(io);
return;
}
- bch2_migrate_read_done(&io->write, &io->rbio);
-
+ closure_get(&io->write.ctxt->cl);
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
- closure_call(&io->write.op.cl, bch2_write, NULL, cl);
- continue_at(cl, move_write_done, NULL);
+
+ bch2_migrate_read_done(&io->write, &io->rbio);
+ closure_call(&io->write.op.cl, bch2_write, NULL, NULL);
}
static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
- closure_call(&io->cl, move_write, NULL, &ctxt->cl);
+ move_write(io);
}
}
if (ret)
goto err_free_pages;
+ io->write.op.end_io = move_write_done;
+
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);