@@ -1459,41 +1459,6 @@ static void nvme_rw_cb(void *opaque, int ret)
nvme_enqueue_req_completion(nvme_cq(req), req);
}
-struct nvme_aio_flush_ctx {
- NvmeRequest *req;
- NvmeNamespace *ns;
- BlockAcctCookie acct;
-};
-
-static void nvme_aio_flush_cb(void *opaque, int ret)
-{
- struct nvme_aio_flush_ctx *ctx = opaque;
- NvmeRequest *req = ctx->req;
- uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
-
- BlockBackend *blk = ctx->ns->blkconf.blk;
- BlockAcctCookie *acct = &ctx->acct;
- BlockAcctStats *stats = blk_get_stats(blk);
-
- trace_pci_nvme_aio_flush_cb(nvme_cid(req), blk_name(blk));
-
- if (!ret) {
- block_acct_done(stats, acct);
- } else {
- block_acct_failed(stats, acct);
- nvme_aio_err(req, ret);
- }
-
- (*num_flushes)--;
- g_free(ctx);
-
- if (*num_flushes) {
- return;
- }
-
- nvme_enqueue_req_completion(nvme_cq(req), req);
-}
-
static void nvme_misc_cb(void *opaque, int ret)
{
NvmeRequest *req = opaque;
@@ -2055,13 +2020,74 @@ static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
return NVME_NO_COMPLETE;
}
+typedef struct NvmeFlushAIOCB {
+ BlockAIOCB common;
+ BlockAIOCB *aiocb;
+ NvmeRequest *req;
+ QEMUBH *bh;
+ int ret;
+
+ int nsid;
+} NvmeFlushAIOCB;
+
+static void nvme_flush_cancel(BlockAIOCB *acb)
+{
+ NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common);
+ NvmeCtrl *n = nvme_ctrl(iocb->req);
+
+ iocb->nsid = n->num_namespaces + 1;
+ iocb->ret = -ECANCELED;
+
+ if (iocb->aiocb) {
+ blk_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
+ }
+}
+
+static const AIOCBInfo nvme_flush_aiocb_info = {
+ .aiocb_size = sizeof(NvmeFlushAIOCB),
+ .cancel_async = nvme_flush_cancel,
+};
+
+static void nvme_flush_bh(void *opaque)
+{
+ NvmeFlushAIOCB *iocb = opaque;
+
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+ qemu_bh_delete(iocb->bh);
+ iocb->bh = NULL;
+ qemu_aio_unref(iocb);
+}
+
+static void nvme_flush_aio_cb(void *opaque, int ret)
+{
+ NvmeFlushAIOCB *iocb = opaque;
+ NvmeRequest *req = iocb->req;
+ NvmeCtrl *n = nvme_ctrl(req);
+
+ if (ret < 0) {
+ iocb->ret = ret;
+ goto done;
+ }
+
+ while (iocb->nsid <= n->num_namespaces) {
+ NvmeNamespace *ns = nvme_ns(n, iocb->nsid++);
+ if (ns) {
+ iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_aio_cb,
+ iocb);
+ return;
+ }
+ }
+
+done:
+ iocb->aiocb = NULL;
+ qemu_bh_schedule(iocb->bh);
+}
+
static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
{
+ NvmeFlushAIOCB *iocb;
uint32_t nsid = le32_to_cpu(req->cmd.nsid);
- uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
- uint16_t status;
- struct nvme_aio_flush_ctx *ctx;
- NvmeNamespace *ns;
trace_pci_nvme_flush(nvme_cid(req), nsid);
@@ -2071,42 +2097,21 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
- block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
- BLOCK_ACCT_FLUSH);
- req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req);
+ req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_misc_cb, req);
return NVME_NO_COMPLETE;
}
- /* 1-initialize; see comment in nvme_dsm */
- *num_flushes = 1;
+ iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req);
- for (int i = 1; i <= n->num_namespaces; i++) {
- ns = nvme_ns(n, i);
- if (!ns) {
- continue;
- }
+ iocb->req = req;
+ iocb->bh = qemu_bh_new(nvme_flush_bh, iocb);
+ iocb->ret = 0;
+ iocb->nsid = 1;
- ctx = g_new(struct nvme_aio_flush_ctx, 1);
- ctx->req = req;
- ctx->ns = ns;
+ nvme_flush_aio_cb(iocb, 0);
+ req->aiocb = &iocb->common;
- (*num_flushes)++;
-
- block_acct_start(blk_get_stats(ns->blkconf.blk), &ctx->acct, 0,
- BLOCK_ACCT_FLUSH);
- blk_aio_flush(ns->blkconf.blk, nvme_aio_flush_cb, ctx);
- }
-
- /* account for the 1-initialization */
- (*num_flushes)--;
-
- if (*num_flushes) {
- status = NVME_NO_COMPLETE;
- } else {
- status = req->status;
- }
-
- return status;
+ return NVME_NO_COMPLETE;
}
static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)