{
AIORequestData *reqdata = (AIORequestData *) opaque;
ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
- AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
- aio_context_acquire(ctx);
aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
- aio_context_release(ctx);
aio_cb->status = 0;
qemu_aio_unref(aio_cb);
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
bdrv_dec_in_flight(acb->common.bs);
- aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->ret);
- aio_context_release(ctx);
qemu_aio_unref(acb);
}
static void blk_aio_complete_bh(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
assert(acb->has_returned);
- aio_context_acquire(ctx);
blk_aio_complete(acb);
- aio_context_release(ctx);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
out:
+ aio_context_release(ctx);
if (ret != -EINPROGRESS) {
acb->common.cb(acb->common.opaque, ret);
qemu_aio_unref(acb);
}
- aio_context_release(ctx);
}
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
CoroutineIOCompletion *co = opaque;
co->ret = ret;
- qemu_coroutine_enter(co->coroutine);
+ aio_co_wake(co->coroutine);
}
static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
static void bdrv_co_em_bh(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
- BlockDriverState *bs = acb->common.bs;
- AioContext *ctx = bdrv_get_aio_context(bs);
assert(!acb->need_bh);
- aio_context_acquire(ctx);
bdrv_co_complete(acb);
- aio_context_release(ctx);
}
static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
iscsi_bh_cb(void *p)
{
IscsiAIOCB *acb = p;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
qemu_bh_delete(acb->bh);
g_free(acb->buf);
acb->buf = NULL;
- aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, acb->status);
- aio_context_release(ctx);
if (acb->task != NULL) {
scsi_free_scsi_task(acb->task);
*/
static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
{
- LinuxAioState *s = laiocb->ctx;
int ret;
ret = laiocb->ret;
}
laiocb->ret = ret;
- aio_context_acquire(s->aio_context);
if (laiocb->co) {
/* If the coroutine is already entered it must be in ioq_submit() and
* will notice laio->ret has been filled in when it eventually runs
* that!
*/
if (!qemu_coroutine_entered(laiocb->co)) {
- qemu_coroutine_enter(laiocb->co);
+ aio_co_wake(laiocb->co);
}
} else {
laiocb->common.cb(laiocb->common.opaque, ret);
qemu_aio_unref(laiocb);
}
- aio_context_release(s->aio_context);
}
/**
{
MirrorOp *op = opaque;
MirrorBlockJob *s = op->s;
+
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
if (ret < 0) {
BlockErrorAction action;
}
}
mirror_iteration_done(op, ret);
+ aio_context_release(blk_get_aio_context(s->common.blk));
}
static void mirror_read_complete(void *opaque, int ret)
{
MirrorOp *op = opaque;
MirrorBlockJob *s = op->s;
+
+ aio_context_acquire(blk_get_aio_context(s->common.blk));
if (ret < 0) {
BlockErrorAction action;
}
mirror_iteration_done(op, ret);
- return;
+ } else {
+ blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
+ 0, mirror_write_complete, op);
}
- blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
- 0, mirror_write_complete, op);
+ aio_context_release(blk_get_aio_context(s->common.blk));
}
static inline void mirror_clip_sectors(MirrorBlockJob *s,
static void null_bh_cb(void *opaque)
{
NullAIOCB *acb = opaque;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
- aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
- aio_context_release(ctx);
qemu_aio_unref(acb);
}
static void null_timer_cb(void *opaque)
{
NullAIOCB *acb = opaque;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
-
- aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
- aio_context_release(ctx);
timer_deinit(&acb->timer);
qemu_aio_unref(acb);
}
unsigned int index;
unsigned int n;
+ qed_acquire(s);
if (ret) {
goto out;
}
out:
find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
+ qed_release(s);
g_free(find_cluster_cb);
}
{
QEDReadTableCB *read_table_cb = opaque;
QEDTable *table = read_table_cb->table;
+ BDRVQEDState *s = read_table_cb->s;
int noffsets = read_table_cb->qiov.size / sizeof(uint64_t);
int i;
}
/* Byteswap offsets */
+ qed_acquire(s);
for (i = 0; i < noffsets; i++) {
table->offsets[i] = le64_to_cpu(table->offsets[i]);
}
+ qed_release(s);
out:
/* Completion */
- trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret);
+ trace_qed_read_table_cb(s, read_table_cb->table, ret);
gencb_complete(&read_table_cb->gencb, ret);
}
static void qed_write_table_cb(void *opaque, int ret)
{
QEDWriteTableCB *write_table_cb = opaque;
+ BDRVQEDState *s = write_table_cb->s;
- trace_qed_write_table_cb(write_table_cb->s,
+ trace_qed_write_table_cb(s,
write_table_cb->orig_table,
write_table_cb->flush,
ret);
if (write_table_cb->flush) {
/* We still need to flush first */
write_table_cb->flush = false;
+ qed_acquire(s);
bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb,
write_table_cb);
+ qed_release(s);
return;
}
CachedL2Table *l2_table = request->l2_table;
uint64_t l2_offset = read_l2_table_cb->l2_offset;
+ qed_acquire(s);
if (ret) {
/* can't trust loaded L2 table anymore */
qed_unref_l2_cache_entry(l2_table);
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
assert(request->l2_table != NULL);
}
+ qed_release(s);
gencb_complete(&read_l2_table_cb->gencb, ret);
}
}
if (cb->co) {
- qemu_coroutine_enter(cb->co);
+ aio_co_wake(cb->co);
}
}
cb->done = true;
cb->ret = ret;
if (cb->co) {
- qemu_coroutine_enter(cb->co);
+ aio_co_wake(cb->co);
}
}
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
RBDAIOCB *acb = rcb->acb;
- AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
int64_t r;
r = rcb->ret;
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
-
- aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
- aio_context_release(ctx);
qemu_aio_unref(acb);
}
qemu_vfree(waiocb->buf);
}
-
- aio_context_acquire(s->aio_ctx);
waiocb->common.cb(waiocb->common.opaque, ret);
- aio_context_release(s->aio_ctx);
qemu_aio_unref(waiocb);
}
static void virtio_blk_rw_complete(void *opaque, int ret)
{
VirtIOBlockReq *next = opaque;
+ VirtIOBlock *s = next->dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
while (next) {
VirtIOBlockReq *req = next;
next = req->mr_next;
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
}
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void virtio_blk_flush_complete(void *opaque, int ret)
{
VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev;
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
if (ret) {
if (virtio_blk_handle_rw_error(req, -ret, 0)) {
- return;
+ goto out;
}
}
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
virtio_blk_free_request(req);
+
+out:
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
#ifdef __linux__
virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
virtio_blk_req_complete(req, status);
virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
g_free(ioctl_req);
}
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
scsi_req_complete(&r->req, GOOD);
done:
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
scsi_req_unref(&r->req);
}
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_dma_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_read_complete(void * opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
done:
scsi_req_unref(&r->req);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Actually issue a read to the block device. */
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_do_read(opaque, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
/* Read more data from scsi device into buffer. */
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
} else {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_write_data(SCSIRequest *req)
{
UnmapCBData *data = opaque;
SCSIDiskReq *r = data->r;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
scsi_unmap_complete_noio(data, ret);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
if (scsi_disk_req_check_error(r, ret, true)) {
goto done;
}
scsi_req_unref(&r->req);
qemu_vfree(data->iov.iov_base);
g_free(data);
+ aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
static void scsi_command_complete(void *opaque, int ret)
{
SCSIGenericReq *r = (SCSIGenericReq *)opaque;
+ SCSIDevice *s = r->req.dev;
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
scsi_command_complete_noio(r, ret);
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
static int execute_command(BlockBackend *blk,
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- return;
+ goto done;
}
len = r->io_header.dxfer_len - r->io_header.resid;
r->len = -1;
if (len == 0) {
scsi_command_complete_noio(r, 0);
- return;
+ goto done;
}
/* Snoop READ CAPACITY output to set the blocksize. */
}
scsi_req_data(&r->req, len);
scsi_req_unref(&r->req);
+
+done:
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Read more data from scsi device into buffer. */
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+
if (ret || r->req.io_canceled) {
scsi_command_complete_noio(r, ret);
- return;
+ goto done;
}
if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
}
scsi_command_complete_noio(r, ret);
+
+done:
+ aio_context_release(blk_get_aio_context(s->conf.blk));
}
/* Write data to a scsi device. Returns nonzero on failure.
*/
qemu_bh_schedule(pool->completion_bh);
+ aio_context_release(pool->ctx);
elem->common.cb(elem->common.opaque, elem->ret);
+ aio_context_acquire(pool->ctx);
qemu_aio_unref(elem);
goto restart;
} else {
ThreadPoolCo *co = opaque;
co->ret = ret;
- qemu_coroutine_enter(co->co);
+ aio_co_wake(co->co);
}
int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,