int64_t active_write_bytes_in_flight;
bool prepared;
bool in_drain;
+ bool base_ro;
} MirrorBlockJob;
typedef struct MirrorBDSOpaque {
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
bdrv_graph_wrunlock();
+ if (abort && s->base_ro && !bdrv_is_read_only(target_bs)) {
+ bdrv_reopen_set_read_only(target_bs, true, NULL);
+ }
+
bdrv_drained_end(target_bs);
bdrv_unref(target_bs);
bool is_none_mode, BlockDriverState *base,
bool auto_complete, const char *filter_node_name,
bool is_mirror, MirrorCopyMode copy_mode,
+ bool base_ro,
Error **errp)
{
MirrorBlockJob *s;
bdrv_unref(mirror_top_bs);
s->mirror_top_bs = mirror_top_bs;
+ s->base_ro = base_ro;
/* No resize for the target either; while the mirror is still running, a
* consistent read isn't necessarily possible. We could possibly allow
speed, granularity, buf_size, backing_mode, zero_target,
on_source_error, on_target_error, unmap, NULL, NULL,
&mirror_job_driver, is_none_mode, base, false,
- filter_node_name, true, copy_mode, errp);
+ filter_node_name, true, copy_mode, false, errp);
}
BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
on_error, on_error, true, cb, opaque,
&commit_active_job_driver, false, base, auto_complete,
filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
- errp);
+ base_read_only, errp);
if (!job) {
goto error_restore_flags;
}