struct glfs *glfs;
int fds[2];
struct glfs_fd *fd;
- int qemu_aio_count;
int event_reader_pos;
GlusterAIOCB *event_acb;
} BDRVGlusterState;
ret = -EIO; /* Partial read/write - fail it */
}
- s->qemu_aio_count--;
qemu_aio_release(acb);
cb(opaque, ret);
if (finished) {
} while (ret < 0 && errno == EINTR);
}
-static int qemu_gluster_aio_flush_cb(void *opaque)
-{
- BDRVGlusterState *s = opaque;
-
- return (s->qemu_aio_count > 0);
-}
-
/* TODO Convert to fine grained options */
static QemuOptsList runtime_opts = {
.name = "gluster",
}
fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
- qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s);
+ qemu_gluster_aio_event_reader, NULL, NULL, s);
out:
qemu_opts_del(opts);
qemu_mutex_lock_iothread(); /* We are in gluster thread context */
acb->common.cb(acb->common.opaque, -EIO);
qemu_aio_release(acb);
- s->qemu_aio_count--;
close(s->fds[GLUSTER_FD_READ]);
close(s->fds[GLUSTER_FD_WRITE]);
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
offset = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
- s->qemu_aio_count++;
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = size;
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
acb->size = 0;
acb->ret = 0;
acb->finished = NULL;
- s->qemu_aio_count++;
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
if (ret < 0) {
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}
acb->size = 0;
acb->ret = 0;
acb->finished = NULL;
- s->qemu_aio_count++;
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
if (ret < 0) {
return &acb->common;
out:
- s->qemu_aio_count--;
qemu_aio_release(acb);
return NULL;
}