Message ID | 20200611171143.21589-3-den@openvz.org |
---|---|
State | New |
Headers | show |
Series | block: seriously improve savevm performance | expand |
11.06.2020 20:11, Denis V. Lunev wrote: > From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > > Currently, aio task pool assumes that there is a main coroutine, which > creates tasks and wait for them. Let's remove the restriction by using > CoQueue. Code becomes clearer, interface more obvious. > > Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > Signed-off-by: Denis V. Lunev <den@openvz.org> > CC: Kevin Wolf <kwolf@redhat.com> > CC: Max Reitz <mreitz@redhat.com> > CC: Stefan Hajnoczi <stefanha@redhat.com> > CC: Fam Zheng <fam@euphon.net> > CC: Juan Quintela <quintela@redhat.com> > CC: "Dr. David Alan Gilbert" <dgilbert@redhat.com> > CC: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > CC: Denis Plotnikov <dplotnikov@virtuozzo.com> > --- > block/aio_task.c | 21 ++++++--------------- > 1 file changed, 6 insertions(+), 15 deletions(-) > > diff --git a/block/aio_task.c b/block/aio_task.c > index 88989fa248..cf62e5c58b 100644 > --- a/block/aio_task.c > +++ b/block/aio_task.c > @@ -27,11 +27,10 @@ > #include "block/aio_task.h" > > struct AioTaskPool { > - Coroutine *main_co; > int status; > int max_busy_tasks; > int busy_tasks; > - bool waiting; > + CoQueue waiters; > }; > > static void coroutine_fn aio_task_co(void *opaque) > @@ -52,31 +51,23 @@ static void coroutine_fn aio_task_co(void *opaque) > > g_free(task); > > - if (pool->waiting) { > - pool->waiting = false; > - aio_co_wake(pool->main_co); > - } > + qemu_co_queue_restart_all(&pool->waiters); > } > > void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) > { > assert(pool->busy_tasks > 0); > - assert(qemu_coroutine_self() == pool->main_co); > > - pool->waiting = true; > - qemu_coroutine_yield(); > + qemu_co_queue_wait(&pool->waiters, NULL); > > - assert(!pool->waiting); > assert(pool->busy_tasks < pool->max_busy_tasks); As we wake up several coroutines now, I'm afraid this assertion may start to fire. And aio_task_pool_wait_one() becomes useless as a public API (still, it's used only locally, so we can make it static). I'll send updated patch after reviewing the rest of the series. > } > > void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool) > { > - if (pool->busy_tasks < pool->max_busy_tasks) { > - return; > + while (pool->busy_tasks >= pool->max_busy_tasks) { > + aio_task_pool_wait_one(pool); > } > - > - aio_task_pool_wait_one(pool); > } > > void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool) > @@ -98,8 +89,8 @@ AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks) > { > AioTaskPool *pool = g_new0(AioTaskPool, 1); > > - pool->main_co = qemu_coroutine_self(); > pool->max_busy_tasks = max_busy_tasks; > + qemu_co_queue_init(&pool->waiters); > > return pool; > } >
15.06.2020 10:47, Vladimir Sementsov-Ogievskiy wrote: > 11.06.2020 20:11, Denis V. Lunev wrote: >> From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >> >> Currently, aio task pool assumes that there is a main coroutine, which >> creates tasks and wait for them. Let's remove the restriction by using >> CoQueue. Code becomes clearer, interface more obvious. >> >> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >> Signed-off-by: Denis V. Lunev <den@openvz.org> >> CC: Kevin Wolf <kwolf@redhat.com> >> CC: Max Reitz <mreitz@redhat.com> >> CC: Stefan Hajnoczi <stefanha@redhat.com> >> CC: Fam Zheng <fam@euphon.net> >> CC: Juan Quintela <quintela@redhat.com> >> CC: "Dr. David Alan Gilbert" <dgilbert@redhat.com> >> CC: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >> CC: Denis Plotnikov <dplotnikov@virtuozzo.com> >> --- >> block/aio_task.c | 21 ++++++--------------- >> 1 file changed, 6 insertions(+), 15 deletions(-) >> >> diff --git a/block/aio_task.c b/block/aio_task.c >> index 88989fa248..cf62e5c58b 100644 >> --- a/block/aio_task.c >> +++ b/block/aio_task.c >> @@ -27,11 +27,10 @@ >> #include "block/aio_task.h" >> struct AioTaskPool { >> - Coroutine *main_co; >> int status; >> int max_busy_tasks; >> int busy_tasks; >> - bool waiting; >> + CoQueue waiters; >> }; >> static void coroutine_fn aio_task_co(void *opaque) >> @@ -52,31 +51,23 @@ static void coroutine_fn aio_task_co(void *opaque) >> g_free(task); >> - if (pool->waiting) { >> - pool->waiting = false; >> - aio_co_wake(pool->main_co); >> - } >> + qemu_co_queue_restart_all(&pool->waiters); >> } >> void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) >> { >> assert(pool->busy_tasks > 0); >> - assert(qemu_coroutine_self() == pool->main_co); >> - pool->waiting = true; >> - qemu_coroutine_yield(); >> + qemu_co_queue_wait(&pool->waiters, NULL); >> - assert(!pool->waiting); >> assert(pool->busy_tasks < pool->max_busy_tasks); > > As we wake up several coroutines now, I'm afraid this assertion may start to fire. > And aio_task_pool_wait_one() becomes useless as a public API (still, it's used only locally, so we can make it static). > > I'll send updated patch after reviewing the rest of the series. > Hm, OK, we have two kinds of waiters: waiting for a slot and for all tasks to finish. So, either we need two queues, or do like this patch (one queue, but wake-up all waiters, for them to check does their condition satisfied or not). I'm OK with this patch with the following squashed-in: diff --git a/include/block/aio_task.h b/include/block/aio_task.h index 50bc1e1817..50b1c036c5 100644 --- a/include/block/aio_task.h +++ b/include/block/aio_task.h @@ -48,7 +48,6 @@ bool aio_task_pool_empty(AioTaskPool *pool); void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task); void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool); -void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool); void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool); #endif /* BLOCK_AIO_TASK_H */ diff --git a/block/aio_task.c b/block/aio_task.c index cf62e5c58b..7ba15ff41f 100644 --- a/block/aio_task.c +++ b/block/aio_task.c @@ -54,26 +54,17 @@ static void coroutine_fn aio_task_co(void *opaque) qemu_co_queue_restart_all(&pool->waiters); } -void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) -{ - assert(pool->busy_tasks > 0); - - qemu_co_queue_wait(&pool->waiters, NULL); - - assert(pool->busy_tasks < pool->max_busy_tasks); -} - void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool) { while (pool->busy_tasks >= pool->max_busy_tasks) { - aio_task_pool_wait_one(pool); + qemu_co_queue_wait(&pool->waiters, NULL); } } void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool) { while (pool->busy_tasks > 0) { - aio_task_pool_wait_one(pool); + qemu_co_queue_wait(&pool->waiters, NULL); } }
On 6/15/20 12:34 PM, Vladimir Sementsov-Ogievskiy wrote: > 15.06.2020 10:47, Vladimir Sementsov-Ogievskiy wrote: >> 11.06.2020 20:11, Denis V. Lunev wrote: >>> From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >>> >>> Currently, aio task pool assumes that there is a main coroutine, which >>> creates tasks and wait for them. Let's remove the restriction by using >>> CoQueue. Code becomes clearer, interface more obvious. >>> >>> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >>> Signed-off-by: Denis V. Lunev <den@openvz.org> >>> CC: Kevin Wolf <kwolf@redhat.com> >>> CC: Max Reitz <mreitz@redhat.com> >>> CC: Stefan Hajnoczi <stefanha@redhat.com> >>> CC: Fam Zheng <fam@euphon.net> >>> CC: Juan Quintela <quintela@redhat.com> >>> CC: "Dr. David Alan Gilbert" <dgilbert@redhat.com> >>> CC: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> >>> CC: Denis Plotnikov <dplotnikov@virtuozzo.com> >>> --- >>> block/aio_task.c | 21 ++++++--------------- >>> 1 file changed, 6 insertions(+), 15 deletions(-) >>> >>> diff --git a/block/aio_task.c b/block/aio_task.c >>> index 88989fa248..cf62e5c58b 100644 >>> --- a/block/aio_task.c >>> +++ b/block/aio_task.c >>> @@ -27,11 +27,10 @@ >>> #include "block/aio_task.h" >>> struct AioTaskPool { >>> - Coroutine *main_co; >>> int status; >>> int max_busy_tasks; >>> int busy_tasks; >>> - bool waiting; >>> + CoQueue waiters; >>> }; >>> static void coroutine_fn aio_task_co(void *opaque) >>> @@ -52,31 +51,23 @@ static void coroutine_fn aio_task_co(void *opaque) >>> g_free(task); >>> - if (pool->waiting) { >>> - pool->waiting = false; >>> - aio_co_wake(pool->main_co); >>> - } >>> + qemu_co_queue_restart_all(&pool->waiters); >>> } >>> void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) >>> { >>> assert(pool->busy_tasks > 0); >>> - assert(qemu_coroutine_self() == pool->main_co); >>> - pool->waiting = true; >>> - qemu_coroutine_yield(); >>> + qemu_co_queue_wait(&pool->waiters, NULL); >>> - assert(!pool->waiting); >>> assert(pool->busy_tasks < pool->max_busy_tasks); >> >> As we wake up several coroutines now, I'm afraid this assertion may >> start to fire. >> And aio_task_pool_wait_one() becomes useless as a public API (still, >> it's used only locally, so we can make it static). >> >> I'll send updated patch after reviewing the rest of the series. >> > > Hm, OK, we have two kinds of waiters: waiting for a slot and for all > tasks to finish. So, either we need two queues, or do like this patch > (one queue, but wake-up all waiters, for them to check does their > condition satisfied or not). > > I'm OK with this patch with the following squashed-in: > > diff --git a/include/block/aio_task.h b/include/block/aio_task.h > index 50bc1e1817..50b1c036c5 100644 > --- a/include/block/aio_task.h > +++ b/include/block/aio_task.h > @@ -48,7 +48,6 @@ bool aio_task_pool_empty(AioTaskPool *pool); > void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask > *task); > > void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool); > -void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool); > void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool); > > #endif /* BLOCK_AIO_TASK_H */ > diff --git a/block/aio_task.c b/block/aio_task.c > index cf62e5c58b..7ba15ff41f 100644 > --- a/block/aio_task.c > +++ b/block/aio_task.c > @@ -54,26 +54,17 @@ static void coroutine_fn aio_task_co(void *opaque) > qemu_co_queue_restart_all(&pool->waiters); > } > > -void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) > -{ > - assert(pool->busy_tasks > 0); > - > - qemu_co_queue_wait(&pool->waiters, NULL); > - > - assert(pool->busy_tasks < pool->max_busy_tasks); > -} > - > void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool) > { > while (pool->busy_tasks >= pool->max_busy_tasks) { > - aio_task_pool_wait_one(pool); > + qemu_co_queue_wait(&pool->waiters, NULL); > } > } > > void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool) > { > while (pool->busy_tasks > 0) { > - aio_task_pool_wait_one(pool); > + qemu_co_queue_wait(&pool->waiters, NULL); > } > } > > > > I'd better make this separate Den
diff --git a/block/aio_task.c b/block/aio_task.c index 88989fa248..cf62e5c58b 100644 --- a/block/aio_task.c +++ b/block/aio_task.c @@ -27,11 +27,10 @@ #include "block/aio_task.h" struct AioTaskPool { - Coroutine *main_co; int status; int max_busy_tasks; int busy_tasks; - bool waiting; + CoQueue waiters; }; static void coroutine_fn aio_task_co(void *opaque) @@ -52,31 +51,23 @@ static void coroutine_fn aio_task_co(void *opaque) g_free(task); - if (pool->waiting) { - pool->waiting = false; - aio_co_wake(pool->main_co); - } + qemu_co_queue_restart_all(&pool->waiters); } void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool) { assert(pool->busy_tasks > 0); - assert(qemu_coroutine_self() == pool->main_co); - pool->waiting = true; - qemu_coroutine_yield(); + qemu_co_queue_wait(&pool->waiters, NULL); - assert(!pool->waiting); assert(pool->busy_tasks < pool->max_busy_tasks); } void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool) { - if (pool->busy_tasks < pool->max_busy_tasks) { - return; + while (pool->busy_tasks >= pool->max_busy_tasks) { + aio_task_pool_wait_one(pool); } - - aio_task_pool_wait_one(pool); } void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool) @@ -98,8 +89,8 @@ AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks) { AioTaskPool *pool = g_new0(AioTaskPool, 1); - pool->main_co = qemu_coroutine_self(); pool->max_busy_tasks = max_busy_tasks; + qemu_co_queue_init(&pool->waiters); return pool; }