*
* On failure NULL is returned, errp is set and the reference to
* child_bs is also dropped.
+ *
+ * The caller must hold the AioContext lock @child_bs, but not that of @ctx
+ * (unless @child_bs is already in @ctx).
*/
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
const char *child_name,
const BdrvChildRole *child_role,
+ AioContext *ctx,
uint64_t perm, uint64_t shared_perm,
void *opaque, Error **errp)
{
BdrvChild *child;
+ Error *local_err = NULL;
int ret;
ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp);
.opaque = opaque,
};
+ /* If the AioContexts don't match, first try to move the subtree of
+ * child_bs into the AioContext of the new parent. If this doesn't work,
+ * try moving the parent into the AioContext of child_bs instead. */
+ if (bdrv_get_aio_context(child_bs) != ctx) {
+ ret = bdrv_try_set_aio_context(child_bs, ctx, &local_err);
+ if (ret < 0 && child_role->can_set_aio_ctx) {
+ GSList *ignore = g_slist_prepend(NULL, child);;
+ ctx = bdrv_get_aio_context(child_bs);
+ if (child_role->can_set_aio_ctx(child, ctx, &ignore, NULL)) {
+ error_free(local_err);
+ ret = 0;
+ g_slist_free(ignore);
+ ignore = g_slist_prepend(NULL, child);;
+ child_role->set_aio_ctx(child, ctx, &ignore);
+ }
+ g_slist_free(ignore);
+ }
+ if (ret < 0) {
+ error_propagate(errp, local_err);
+ g_free(child);
+ bdrv_abort_perm_update(child_bs);
+ return NULL;
+ }
+ }
+
/* This performs the matching bdrv_set_perm() for the above check. */
bdrv_replace_child(child, child_bs);
*
* On failure NULL is returned, errp is set and the reference to
* child_bs is also dropped.
+ *
+ * If @parent_bs and @child_bs are in different AioContexts, the caller must
+ * hold the AioContext lock for @child_bs, but not for @parent_bs.
*/
BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
BlockDriverState *child_bs,
bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm);
assert(parent_bs->drv);
- assert(bdrv_get_aio_context(parent_bs) == bdrv_get_aio_context(child_bs));
bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL,
perm, shared_perm, &perm, &shared_perm);
child = bdrv_root_attach_child(child_bs, child_name, child_role,
+ bdrv_get_aio_context(parent_bs),
perm, shared_perm, parent_bs, errp);
if (child == NULL) {
return NULL;
return NULL;
}
- blk->root = bdrv_root_attach_child(bs, "root", &child_root,
+ blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
perm, BLK_PERM_ALL, blk, errp);
if (!blk->root) {
blk_unref(blk);
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
bdrv_ref(bs);
- blk->root = bdrv_root_attach_child(bs, "root", &child_root,
+ blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
blk->perm, blk->shared_perm, blk, errp);
if (blk->root == NULL) {
return -EPERM;
{
BlockDriverState *bs = blk_bs(blk);
- /* FIXME The AioContext of bs and blk can be inconsistent. For the moment,
- * we prefer the one of bs for compatibility. */
if (bs) {
- return bdrv_get_aio_context(blk_bs(blk));
+ AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
+ assert(ctx == blk->ctx);
}
return blk->ctx;
BdrvChild *c;
bdrv_ref(bs);
- c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
- job, errp);
+ if (job->job.aio_context != qemu_get_aio_context()) {
+ aio_context_release(job->job.aio_context);
+ }
+ c = bdrv_root_attach_child(bs, name, &child_job, job->job.aio_context,
+ perm, shared_perm, job, errp);
+ if (job->job.aio_context != qemu_get_aio_context()) {
+ aio_context_acquire(job->job.aio_context);
+ }
if (c == NULL) {
return -EPERM;
}
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
const char *child_name,
const BdrvChildRole *child_role,
+ AioContext *ctx,
uint64_t perm, uint64_t shared_perm,
void *opaque, Error **errp);
void bdrv_root_unref_child(BdrvChild *child);
&error_abort);
blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
blk_insert_bs(blk_target, target, &error_abort);
+ blk_set_allow_aio_context_change(blk_target, true);
aio_context_acquire(ctx);
tjob = block_job_create("job0", &test_job_driver, NULL, src,
g_assert_false(job->job.paused);
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
- do_drain_begin(drain_type, target);
+ do_drain_begin_unlocked(drain_type, target);
if (drain_type == BDRV_DRAIN_ALL) {
/* bdrv_drain_all() drains both src and target */
g_assert_true(job->job.paused);
g_assert_false(job->job.busy); /* The job is paused */
- do_drain_end(drain_type, target);
+ do_drain_end_unlocked(drain_type, target);
if (use_iothread) {
/* paused is reset in the I/O thread, wait for it */
if (use_iothread) {
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
+ blk_set_aio_context(blk_target, qemu_get_aio_context(), &error_abort);
}
aio_context_release(ctx);