@@ -308,7 +308,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
*/
#define aio_bh_schedule_oneshot(ctx, cb, opaque) \
aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)), \
- QEMU_CLOCK_REALTIME)
+ QEMU_CLOCK_MAX)
/**
* aio_bh_new_full: Allocate a new bottom half structure.
@@ -7144,7 +7144,9 @@ void bdrv_schedule_unref(BlockDriverState *bs)
if (!bs) {
return;
}
- aio_bh_schedule_oneshot(qemu_get_aio_context(), bdrv_schedule_unref_bh, bs);
+ aio_bh_schedule_oneshot_event(qemu_get_aio_context(),
+ bdrv_schedule_unref_bh, bs,
+ QEMU_CLOCK_REALTIME);
}
struct BdrvOpBlocker {
@@ -2780,7 +2780,7 @@ static void ide_restart_cb(void *opaque, bool running, RunState state)
if (!bus->bh) {
bus->bh = qemu_bh_new(ide_restart_bh, bus);
- qemu_bh_schedule(bus->bh);
+ qemu_bh_schedule_event(bus->bh, QEMU_CLOCK_VIRTUAL);
}
}
@@ -166,9 +166,9 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
/* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
blk_inc_in_flight(s->conf.blk);
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
- scsi_device_for_each_req_async_bh,
- data);
+ aio_bh_schedule_oneshot_event(blk_get_aio_context(s->conf.blk),
+ scsi_device_for_each_req_async_bh,
+ data, QEMU_CLOCK_REALTIME);
}
static void scsi_device_realize(SCSIDevice *s, Error **errp)
@@ -580,7 +580,7 @@ void monitor_resume(Monitor *mon)
ctx = qemu_get_aio_context();
}
- aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
+ aio_bh_schedule_oneshot_event(ctx, monitor_accept_input, mon, QEMU_CLOCK_REALTIME);
}
trace_monitor_suspend(mon, -1);
@@ -541,8 +541,9 @@ void monitor_init_qmp(Chardev *chr, bool pretty, Error **errp)
* since chardev might be running in the monitor I/O
* thread. Schedule a bottom half.
*/
- aio_bh_schedule_oneshot(iothread_get_aio_context(mon_iothread),
- monitor_qmp_setup_handlers_bh, mon);
+ aio_bh_schedule_oneshot_event(iothread_get_aio_context(mon_iothread),
+ monitor_qmp_setup_handlers_bh, mon,
+ QEMU_CLOCK_REALTIME);
/* The bottom half will add @mon to @mon_list */
} else {
qemu_chr_fe_set_handlers(&mon->common.chr, monitor_can_read,
@@ -254,8 +254,8 @@ QDict *coroutine_mixed_fn qmp_dispatch(const QmpCommandList *cmds, QObject *requ
.errp = &err,
.co = qemu_coroutine_self(),
};
- aio_bh_schedule_oneshot(iohandler_get_aio_context(), do_qmp_dispatch_bh,
- &data);
+ aio_bh_schedule_oneshot_event(iohandler_get_aio_context(), do_qmp_dispatch_bh,
+ &data, QEMU_CLOCK_REALTIME);
qemu_coroutine_yield();
}
qobject_unref(args);
@@ -132,23 +132,17 @@ void replay_add_event(ReplayAsyncEventKind event_kind,
void replay_bh_schedule_event(QEMUBH *bh)
{
- if (events_enabled) {
- uint64_t id = replay_get_current_icount();
- replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
- } else {
- qemu_bh_schedule(bh);
- }
+ uint64_t id = replay_get_current_icount();
+ assert(events_enabled);
+ replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
}
-void replay_bh_schedule_oneshot_event(AioContext *ctx,
+void replay_bh_oneshot_event(AioContext *ctx,
QEMUBHFunc *cb, void *opaque)
{
- if (events_enabled) {
- uint64_t id = replay_get_current_icount();
- replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
- } else {
- aio_bh_schedule_oneshot(ctx, cb, opaque);
- }
+ uint64_t id = replay_get_current_icount();
+ assert(events_enabled);
+ replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
}
void replay_add_input_event(struct InputEvent *event)
@@ -166,7 +160,10 @@ void replay_block_event(QEMUBH *bh, uint64_t id)
if (events_enabled) {
replay_add_event(REPLAY_ASYNC_EVENT_BLOCK, bh, NULL, id);
} else {
- qemu_bh_schedule(bh);
+ /*
+ * Block can be used before events come up.
+ */
+ qemu_bh_schedule_event(bh, QEMU_CLOCK_REALTIME);
}
}
@@ -51,7 +51,7 @@ void aio_wait_kick(void)
smp_mb();
if (qatomic_read(&global_aio_wait.num_waiters)) {
- aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
+ aio_bh_schedule_oneshot_event(qemu_get_aio_context(), dummy_bh_cb, NULL, QEMU_CLOCK_REALTIME);
}
}
@@ -57,6 +57,9 @@ enum {
/* Schedule periodically when the event loop is idle */
BH_IDLE = (1 << 4),
+
+ /* BH being handled by replay machinery */
+ BH_REPLAY = (1 << 4),
};
struct QEMUBH {
@@ -144,6 +147,10 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
void *opaque, const char *name,
QEMUClockType clock_type)
{
+ if (clock_type == QEMU_CLOCK_MAX) {
+ assert(replay_mode == REPLAY_MODE_NONE);
+ clock_type = QEMU_CLOCK_REALTIME;
+ }
switch (clock_type) {
case QEMU_CLOCK_VIRTUAL:
case QEMU_CLOCK_VIRTUAL_RT:
@@ -177,6 +184,12 @@ void aio_bh_call(QEMUBH *bh)
{
bool last_engaged_in_io = false;
+ if (bh->flags & BH_REPLAY) {
+ assert(!(bh->flags & BH_SCHEDULED));
+ assert(!(bh->flags & BH_DELETED));
+ assert(!(bh->flags & BH_PENDING));
+ bh->flags &= ~BH_REPLAY;
+ }
/* Make a copy of the guard-pointer as cb may free the bh */
MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
if (reentrancy_guard) {
@@ -263,11 +276,15 @@ void qemu_bh_schedule_event(QEMUBH *bh, QEMUClockType clock_type)
void qemu_bh_schedule_idle(QEMUBH *bh)
{
+ /* No mechanism for scheduling idle replay-scheduled bh at the moment */
+ assert(replay_mode == REPLAY_MODE_NONE);
aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
}
void qemu_bh_schedule(QEMUBH *bh)
{
+ /* Callers should be converted to use qemu_bh_schedule_event */
+ assert(replay_mode == REPLAY_MODE_NONE);
aio_bh_enqueue(bh, BH_SCHEDULED);
}
@@ -275,6 +292,8 @@ void qemu_bh_schedule(QEMUBH *bh)
*/
void qemu_bh_cancel(QEMUBH *bh)
{
+ /* No mechanism for canceling replay-scheduled bh at the moment */
+ assert(!(bh->flags & BH_REPLAY));
qatomic_and(&bh->flags, ~BH_SCHEDULED);
}
@@ -283,6 +302,8 @@ void qemu_bh_cancel(QEMUBH *bh)
*/
void qemu_bh_delete(QEMUBH *bh)
{
+ /* No mechanism for deleting replay-scheduled bh at the moment */
+ assert(!(bh->flags & BH_REPLAY));
aio_bh_enqueue(bh, BH_DELETED);
}
@@ -683,7 +704,7 @@ void aio_co_schedule(AioContext *ctx, Coroutine *co)
QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
co, co_scheduled_next);
- qemu_bh_schedule(ctx->co_schedule_bh);
+ qemu_bh_schedule_event(ctx->co_schedule_bh, QEMU_CLOCK_REALTIME);
aio_context_unref(ctx);
}
@@ -148,7 +148,7 @@ void qemu_notify_event(void)
if (!qemu_aio_context) {
return;
}
- qemu_bh_schedule(qemu_notify_bh);
+ qemu_bh_schedule_event(qemu_notify_bh, QEMU_CLOCK_REALTIME);
}
static GArray *gpollfds;
@@ -115,7 +115,7 @@ static void *worker_thread(void *opaque)
smp_wmb();
req->state = THREAD_DONE;
- qemu_bh_schedule(pool->completion_bh);
+ qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
qemu_mutex_lock(&pool->lock);
}
@@ -167,7 +167,7 @@ static void spawn_thread(ThreadPool *pool)
* inherit the correct affinity instead of the vcpu affinity.
*/
if (!pool->pending_threads) {
- qemu_bh_schedule(pool->new_thread_bh);
+ qemu_bh_schedule_event(pool->new_thread_bh, QEMU_CLOCK_REALTIME);
}
}
@@ -195,7 +195,7 @@ restart:
/* Schedule ourselves in case elem->common.cb() calls aio_poll() to
* wait for another request that completed at the same time.
*/
- qemu_bh_schedule(pool->completion_bh);
+ qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
elem->common.cb(elem->common.opaque, elem->ret);
@@ -225,7 +225,7 @@ static void thread_pool_cancel(BlockAIOCB *acb)
QEMU_LOCK_GUARD(&pool->lock);
if (elem->state == THREAD_QUEUED) {
QTAILQ_REMOVE(&pool->request_list, elem, reqs);
- qemu_bh_schedule(pool->completion_bh);
+ qemu_bh_schedule_event(pool->completion_bh, QEMU_CLOCK_REALTIME);
elem->state = THREAD_DONE;
elem->ret = -ECANCELED;
@@ -292,7 +292,7 @@ def gen_no_co_wrapper(func: FuncDecl) -> str:
}};
assert(qemu_in_coroutine());
- aio_bh_schedule_oneshot(qemu_get_aio_context(), {name}_bh, &s);
+ aio_bh_schedule_oneshot_event(qemu_get_aio_context(), {name}_bh, &s, QEMU_CLOCK_REALTIME);
qemu_coroutine_yield();
{func.ret}