block: Use block_job_add_bdrv() in backup_start()
authorAlberto Garcia <berto@igalia.com>
Fri, 28 Oct 2016 07:08:06 +0000 (10:08 +0300)
committerKevin Wolf <kwolf@redhat.com>
Mon, 31 Oct 2016 15:52:38 +0000 (16:52 +0100)
Use block_job_add_bdrv() instead of blocking all operations in
backup_start() and unblocking them in backup_run().

Signed-off-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
block/backup.c

index 81d4042ae8d9a1851319bb8e88c737cb82f3645e..44c7ff3d16946ef4f500efbefaf48ba9e7ab6433 100644 (file)
@@ -446,7 +446,6 @@ static void coroutine_fn backup_run(void *opaque)
     BackupBlockJob *job = opaque;
     BackupCompleteData *data;
     BlockDriverState *bs = blk_bs(job->common.blk);
-    BlockBackend *target = job->target;
     int64_t start, end;
     int64_t sectors_per_cluster = cluster_size_sectors(job);
     int ret = 0;
@@ -533,8 +532,6 @@ static void coroutine_fn backup_run(void *opaque)
     qemu_co_rwlock_unlock(&job->flush_rwlock);
     g_free(job->done_bitmap);
 
-    bdrv_op_unblock_all(blk_bs(target), job->common.blocker);
-
     data = g_malloc(sizeof(*data));
     data->ret = ret;
     block_job_defer_to_main_loop(&job->common, backup_complete, data);
@@ -648,7 +645,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
         job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
     }
 
-    bdrv_op_block_all(target, job->common.blocker);
+    block_job_add_bdrv(&job->common, target);
     job->common.len = len;
     job->common.co = qemu_coroutine_create(backup_run, job);
     block_job_txn_add_job(txn, &job->common);