@@ -93,6 +93,7 @@ typedef struct MirrorBlockJob {
int64_t active_write_bytes_in_flight;
bool prepared;
bool in_drain;
+ bool base_ro;
} MirrorBlockJob;
typedef struct MirrorBDSOpaque {
@@ -794,6 +795,10 @@ static int mirror_exit_common(Job *job)
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
bdrv_graph_wrunlock();
+ if (abort && s->base_ro && !bdrv_is_read_only(target_bs)) {
+ bdrv_reopen_set_read_only(target_bs, true, NULL);
+ }
+
bdrv_drained_end(target_bs);
bdrv_unref(target_bs);
@@ -1717,6 +1722,7 @@ static BlockJob *mirror_start_job(
bool is_none_mode, BlockDriverState *base,
bool auto_complete, const char *filter_node_name,
bool is_mirror, MirrorCopyMode copy_mode,
+ bool base_ro,
Error **errp)
{
MirrorBlockJob *s;
@@ -1800,6 +1806,7 @@ static BlockJob *mirror_start_job(
bdrv_unref(mirror_top_bs);
s->mirror_top_bs = mirror_top_bs;
+ s->base_ro = base_ro;
/* No resize for the target either; while the mirror is still running, a
* consistent read isn't necessarily possible. We could possibly allow
@@ -2029,7 +2036,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
speed, granularity, buf_size, backing_mode, zero_target,
on_source_error, on_target_error, unmap, NULL, NULL,
&mirror_job_driver, is_none_mode, base, false,
- filter_node_name, true, copy_mode, errp);
+ filter_node_name, true, copy_mode, false, errp);
}
BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
@@ -2058,7 +2065,7 @@ BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
on_error, on_error, true, cb, opaque,
&commit_active_job_driver, false, base, auto_complete,
filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
- errp);
+ base_read_only, errp);
if (!job) {
goto error_restore_flags;
}