@@ -23,7 +23,6 @@ struct AioHandler
GPollFD pfd;
IOHandler *io_read;
IOHandler *io_write;
- AioFlushHandler *io_flush;
int deleted;
int pollfds_idx;
void *opaque;
@@ -84,7 +83,6 @@ void aio_set_fd_handler(AioContext *ctx,
/* Update handler with latest information */
node->io_read = io_read;
node->io_write = io_write;
- node->io_flush = io_flush;
node->opaque = opaque;
node->pollfds_idx = -1;
@@ -173,7 +171,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
int ret;
- bool busy, progress;
+ bool progress;
progress = false;
@@ -200,20 +198,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
g_array_set_size(ctx->pollfds, 0);
/* fill pollfds */
- busy = false;
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
node->pollfds_idx = -1;
-
- /* If there aren't pending AIO operations, don't invoke callbacks.
- * Otherwise, if there are no AIO requests, qemu_aio_wait() would
- * wait indefinitely.
- */
- if (!node->deleted && node->io_flush) {
- if (node->io_flush(node->opaque) == 0) {
- continue;
- }
- busy = true;
- }
if (!node->deleted && node->pfd.events) {
GPollFD pfd = {
.fd = node->pfd.fd,
@@ -226,11 +212,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers--;
- /* No AIO operations? Get us out of here */
- if (!busy) {
- return progress;
- }
-
/* wait until next event */
ret = g_poll((GPollFD *)ctx->pollfds->data,
ctx->pollfds->len,
@@ -250,6 +231,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
- assert(progress || busy);
- return true;
+ return progress;
}
@@ -23,7 +23,6 @@
struct AioHandler {
EventNotifier *e;
EventNotifierHandler *io_notify;
- AioFlushEventNotifierHandler *io_flush;
GPollFD pfd;
int deleted;
QLIST_ENTRY(AioHandler) node;
@@ -73,7 +72,6 @@ void aio_set_event_notifier(AioContext *ctx,
}
/* Update handler with latest information */
node->io_notify = io_notify;
- node->io_flush = io_flush;
}
aio_notify(ctx);
@@ -96,7 +94,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
- bool busy, progress;
+ bool progress;
int count;
progress = false;
@@ -147,19 +145,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers++;
/* fill fd sets */
- busy = false;
count = 0;
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
- /* If there aren't pending AIO operations, don't invoke callbacks.
- * Otherwise, if there are no AIO requests, qemu_aio_wait() would
- * wait indefinitely.
- */
- if (!node->deleted && node->io_flush) {
- if (node->io_flush(node->e) == 0) {
- continue;
- }
- busy = true;
- }
if (!node->deleted && node->io_notify) {
events[count++] = event_notifier_get_handle(node->e);
}
@@ -167,11 +154,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
ctx->walking_handlers--;
- /* No AIO operations? Get us out of here */
- if (!busy) {
- return progress;
- }
-
/* wait until next event */
while (count > 0) {
int timeout = blocking ? INFINITE : 0;
@@ -214,6 +196,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
events[ret - WAIT_OBJECT_0] = events[--count];
}
- assert(progress || busy);
- return true;
+ return progress;
}
Now that bdrv_drain_all() checks that requests are pending before calling qemu_aio_wait(), it is no longer necessary to call .io_flush() handlers. Behavior of aio_poll() changes as follows: .io_flush() is no longer invoked and file descriptors are *always* monitored. Previously returning 0 from .io_flush() would skip this file descriptor. Due to these changes it is essential to check that requests are pending before calling qemu_aio_wait(). Failure to do so means we block, for example, waiting for an idle iSCSI socket to become readable when there are no requests. Currently all qemu_aio_wait()/aio_poll() callers check before calling. The next patches will remove .io_flush() handler code until we can finally drop the io_flush arguments to aio_set_fd_handler() and friends. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> --- aio-posix.c | 24 ++---------------------- aio-win32.c | 23 ++--------------------- 2 files changed, 4 insertions(+), 43 deletions(-)