@@ -170,9 +170,10 @@ BlockDriverState * GRAPH_RDLOCK
check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
Error **errp);
-int no_coroutine_fn bdrv_activate(BlockDriverState *bs, Error **errp);
+int no_coroutine_fn GRAPH_RDLOCK
+bdrv_activate(BlockDriverState *bs, Error **errp);
-int coroutine_fn no_co_wrapper
+int coroutine_fn no_co_wrapper_bdrv_rdlock
bdrv_co_activate(BlockDriverState *bs, Error **errp);
void bdrv_activate_all(Error **errp);
@@ -208,8 +209,8 @@ typedef struct BdrvNextIterator {
BlockDriverState *bs;
} BdrvNextIterator;
-BlockDriverState *bdrv_first(BdrvNextIterator *it);
-BlockDriverState *bdrv_next(BdrvNextIterator *it);
+BlockDriverState * GRAPH_RDLOCK bdrv_first(BdrvNextIterator *it);
+BlockDriverState * GRAPH_RDLOCK bdrv_next(BdrvNextIterator *it);
void bdrv_next_cleanup(BdrvNextIterator *it);
BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
@@ -59,8 +59,8 @@ BlockBackend *blk_by_public(BlockBackendPublic *public);
void blk_remove_bs(BlockBackend *blk);
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
-bool bdrv_has_blk(BlockDriverState *bs);
-bool bdrv_is_root_node(BlockDriverState *bs);
+bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
+bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
uint64_t shared_perm, Error **errp);
void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
@@ -6960,6 +6960,7 @@ void bdrv_activate_all(Error **errp)
BdrvNextIterator it;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
@@ -780,11 +780,12 @@ BlockDriverState *blk_bs(BlockBackend *blk)
return blk->root ? blk->root->bs : NULL;
}
-static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
+static BlockBackend * GRAPH_RDLOCK bdrv_first_blk(BlockDriverState *bs)
{
BdrvChild *child;
GLOBAL_STATE_CODE();
+ assert_bdrv_graph_readable();
QLIST_FOREACH(child, &bs->parents, next_parent) {
if (child->klass == &child_root) {
@@ -812,6 +813,8 @@ bool bdrv_is_root_node(BlockDriverState *bs)
BdrvChild *c;
GLOBAL_STATE_CODE();
+ assert_bdrv_graph_readable();
+
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->klass != &child_root) {
return false;
@@ -2259,6 +2262,7 @@ void blk_activate(BlockBackend *blk, Error **errp)
if (qemu_in_coroutine()) {
bdrv_co_activate(bs, errp);
} else {
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
bdrv_activate(bs, errp);
}
}
@@ -83,6 +83,8 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
uint64_t perm;
int ret;
+ GLOBAL_STATE_CODE();
+
if (!id_wellformed(export->id)) {
error_setg(errp, "Invalid block export id");
return NULL;
@@ -145,7 +147,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
* access since the export could be available before migration handover.
* ctx was acquired in the caller.
*/
+ bdrv_graph_rdlock_main_loop();
bdrv_activate(bs, NULL);
+ bdrv_graph_rdunlock_main_loop();
perm = BLK_PERM_CONSISTENT_READ;
if (export->writable) {
@@ -2329,6 +2329,7 @@ int bdrv_flush_all(void)
int result = 0;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
/*
* bdrv queue is managed by record/replay,
@@ -896,6 +896,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
SnapshotEntry *snapshot_entry;
Error *err = NULL;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = bdrv_all_find_vmstate_bs(NULL, false, NULL, &err);
if (!bs) {
error_report_err(err);
@@ -279,6 +279,8 @@ static void blockdev_insert_medium(const char *device, const char *id,
BlockBackend *blk;
BlockDriverState *bs;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
blk = qmp_get_blk(device, id, errp);
if (!blk) {
return;
@@ -458,6 +458,8 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
Error *local_err = NULL;
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
+ GLOBAL_STATE_CODE();
+
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
@@ -504,12 +506,15 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
+ bdrv_graph_rdlock_main_loop();
secondary_disk = hidden_disk->bs->backing;
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
error_setg(errp, "The secondary disk doesn't have block backend");
+ bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
+ bdrv_graph_rdunlock_main_loop();
/* verify the length */
active_length = bdrv_getlength(active_disk->bs);
@@ -566,8 +571,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
- bdrv_graph_wrunlock();
-
/* start backup job now */
error_setg(&s->blocker,
"Block device is in use by internal backup job");
@@ -576,6 +579,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!top_bs || !bdrv_is_root_node(top_bs) ||
!check_top_bs(top_bs, bs)) {
error_setg(errp, "No top_bs or it is invalid");
+ bdrv_graph_wrunlock();
reopen_backing_file(bs, false, NULL);
aio_context_release(aio_context);
return;
@@ -583,6 +587,8 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
bdrv_op_block_all(top_bs, s->blocker);
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
+ bdrv_graph_wrunlock();
+
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL,
@@ -462,9 +462,9 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
}
-static int bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
- GList **all_bdrvs,
- Error **errp)
+static int GRAPH_RDLOCK
+bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
+ GList **all_bdrvs, Error **errp)
{
g_autoptr(GList) bdrvs = NULL;
@@ -496,7 +496,7 @@ static int bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
}
-static bool bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
+static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
{
if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
return false;
@@ -518,6 +518,7 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return false;
@@ -554,6 +555,7 @@ int bdrv_all_delete_snapshot(const char *name,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@@ -595,6 +597,7 @@ int bdrv_all_goto_snapshot(const char *name,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@@ -631,6 +634,7 @@ int bdrv_all_has_snapshot(const char *name,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@@ -673,7 +677,9 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
{
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
+
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@@ -715,6 +721,7 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return NULL;
@@ -1041,6 +1041,8 @@ static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
BlockDriverState *bs;
AioContext *aio_context;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = bdrv_lookup_bs(name, name, errp);
if (bs == NULL) {
return NULL;
@@ -3509,6 +3511,7 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
BlockDriverState *bs;
GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_node(node_name);
if (!bs) {
@@ -3636,6 +3639,8 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
AioContext *new_context;
BlockDriverState *bs;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);
@@ -388,6 +388,8 @@ static int init_blk_migration(QEMUFile *f)
Error *local_err = NULL;
int ret;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
block_mig_state.submitted = 0;
block_mig_state.read_done = 0;
block_mig_state.transferred = 0;
@@ -794,6 +794,8 @@ static void vm_completion(ReadLineState *rs, const char *str)
BlockDriverState *bs;
BdrvNextIterator it;
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
len = strlen(str);
readline_set_completion_index(rs, len);
@@ -383,6 +383,9 @@ static void test_sync_op_check(BdrvChild *c)
static void test_sync_op_activate(BdrvChild *c)
{
+ GLOBAL_STATE_CODE();
+ GRAPH_RDLOCK_GUARD_MAINLOOP();
+
/* Early success: Image is not inactive */
bdrv_activate(c->bs, NULL);
}
This adds GRAPH_RDLOCK annotations to declare that callers of bdrv_first_blk() and bdrv_is_root_node() need to hold a reader lock for the graph. These functions are the only functions in block-backend.c that access the parent list of a node. Signed-off-by: Kevin Wolf <kwolf@redhat.com> --- include/block/block-global-state.h | 9 +++++---- include/sysemu/block-backend-global-state.h | 4 ++-- block.c | 1 + block/block-backend.c | 6 +++++- block/export/export.c | 4 ++++ block/io.c | 1 + block/monitor/block-hmp-cmds.c | 2 ++ block/qapi-sysemu.c | 2 ++ block/replication.c | 10 ++++++++-- block/snapshot.c | 15 +++++++++++---- blockdev.c | 5 +++++ migration/block.c | 2 ++ migration/migration-hmp-cmds.c | 2 ++ tests/unit/test-block-iothread.c | 3 +++ 14 files changed, 53 insertions(+), 13 deletions(-)