@@ -1554,136 +1554,6 @@ static void nvme_aio_zone_reset_cb(void *opaque, int ret)
nvme_enqueue_req_completion(nvme_cq(req), req);
}
-struct nvme_copy_ctx {
- int copies;
- uint8_t *bounce;
- uint32_t nlb;
-};
-
-struct nvme_copy_in_ctx {
- NvmeRequest *req;
- QEMUIOVector iov;
-};
-
-static void nvme_copy_cb(void *opaque, int ret)
-{
- NvmeRequest *req = opaque;
- NvmeNamespace *ns = req->ns;
- struct nvme_copy_ctx *ctx = req->opaque;
-
- trace_pci_nvme_copy_cb(nvme_cid(req));
-
- if (ns->params.zoned) {
- NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
- uint64_t sdlba = le64_to_cpu(copy->sdlba);
- NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
-
- __nvme_advance_zone_wp(ns, zone, ctx->nlb);
- }
-
- if (!ret) {
- block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
- } else {
- block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
- nvme_aio_err(req, ret);
- }
-
- g_free(ctx->bounce);
- g_free(ctx);
-
- nvme_enqueue_req_completion(nvme_cq(req), req);
-}
-
-static void nvme_copy_in_complete(NvmeRequest *req)
-{
- NvmeNamespace *ns = req->ns;
- NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
- struct nvme_copy_ctx *ctx = req->opaque;
- uint64_t sdlba = le64_to_cpu(copy->sdlba);
- uint16_t status;
-
- trace_pci_nvme_copy_in_complete(nvme_cid(req));
-
- block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
-
- status = nvme_check_bounds(ns, sdlba, ctx->nlb);
- if (status) {
- trace_pci_nvme_err_invalid_lba_range(sdlba, ctx->nlb, ns->id_ns.nsze);
- goto invalid;
- }
-
- if (ns->params.zoned) {
- NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
-
- status = nvme_check_zone_write(ns, zone, sdlba, ctx->nlb);
- if (status) {
- goto invalid;
- }
-
- status = nvme_zrm_auto(ns, zone);
- if (status) {
- goto invalid;
- }
-
- zone->w_ptr += ctx->nlb;
- }
-
- qemu_iovec_init(&req->iov, 1);
- qemu_iovec_add(&req->iov, ctx->bounce, nvme_l2b(ns, ctx->nlb));
-
- block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
- BLOCK_ACCT_WRITE);
-
- req->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba),
- &req->iov, 0, nvme_copy_cb, req);
-
- return;
-
-invalid:
- req->status = status;
-
- g_free(ctx->bounce);
- g_free(ctx);
-
- nvme_enqueue_req_completion(nvme_cq(req), req);
-}
-
-static void nvme_aio_copy_in_cb(void *opaque, int ret)
-{
- struct nvme_copy_in_ctx *in_ctx = opaque;
- NvmeRequest *req = in_ctx->req;
- NvmeNamespace *ns = req->ns;
- struct nvme_copy_ctx *ctx = req->opaque;
-
- qemu_iovec_destroy(&in_ctx->iov);
- g_free(in_ctx);
-
- trace_pci_nvme_aio_copy_in_cb(nvme_cid(req));
-
- if (ret) {
- nvme_aio_err(req, ret);
- }
-
- ctx->copies--;
-
- if (ctx->copies) {
- return;
- }
-
- if (req->status) {
- block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
-
- g_free(ctx->bounce);
- g_free(ctx);
-
- nvme_enqueue_req_completion(nvme_cq(req), req);
-
- return;
- }
-
- nvme_copy_in_complete(req);
-}
-
struct nvme_compare_ctx {
QEMUIOVector iov;
uint8_t *bounce;
@@ -1874,18 +1744,184 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
return status;
}
+typedef struct NvmeCopyAIOCB {
+ BlockAIOCB common;
+ BlockAIOCB *aiocb;
+ NvmeRequest *req;
+ QEMUBH *bh;
+ int ret;
+
+ NvmeCopySourceRange *range;
+ int nr;
+ uint32_t nlb;
+ uint8_t *bounce;
+ bool done;
+ struct {
+ int idx;
+ uint8_t *p;
+ } copy_in;
+} NvmeCopyAIOCB;
+
+static void nvme_copy_cancel(BlockAIOCB *aiocb)
+{
+ NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common);
+
+ iocb->ret = -ECANCELED;
+ iocb->copy_in.idx = iocb->nr;
+ iocb->done = true;
+
+ if (iocb->aiocb) {
+ blk_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
+ }
+}
+
+static const AIOCBInfo nvme_copy_aiocb_info = {
+ .aiocb_size = sizeof(NvmeCopyAIOCB),
+ .cancel_async = nvme_copy_cancel,
+};
+
+static void nvme_copy_bh(void *opaque)
+{
+ NvmeCopyAIOCB *iocb = opaque;
+ NvmeRequest *req = iocb->req;
+ NvmeNamespace *ns = req->ns;
+
+ if (ns->params.zoned) {
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ uint64_t sdlba = le64_to_cpu(copy->sdlba);
+ NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
+
+ __nvme_advance_zone_wp(ns, zone, iocb->nlb);
+ }
+
+ if (iocb->ret < 0) {
+ block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
+ } else {
+ block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
+ }
+
+
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+
+ qemu_bh_delete(iocb->bh);
+ iocb->bh = NULL;
+
+ g_free(iocb->bounce);
+
+ qemu_aio_unref(iocb);
+}
+
+static void nvme_copy_aio_cb(void *opaque, int ret);
+
+static uint16_t nvme_copy_in_complete(NvmeCopyAIOCB *iocb)
+{
+ NvmeRequest *req = iocb->req;
+ NvmeNamespace *ns = req->ns;
+ NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
+ uint64_t sdlba = le64_to_cpu(copy->sdlba);
+ uint16_t status;
+
+ block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
+
+ status = nvme_check_bounds(ns, sdlba, iocb->nlb);
+ if (status) {
+ trace_pci_nvme_err_invalid_lba_range(sdlba, iocb->nlb, ns->id_ns.nsze);
+ return status;
+ }
+
+ if (ns->params.zoned) {
+ NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
+
+ status = nvme_check_zone_write(ns, zone, sdlba, iocb->nlb);
+ if (status) {
+ return status;
+ }
+
+ status = nvme_zrm_auto(ns, zone);
+ if (status) {
+ return status;
+ }
+
+ zone->w_ptr += iocb->nlb;
+ }
+
+ iocb->done = true;
+
+ qemu_iovec_reset(&req->iov);
+ qemu_iovec_add(&req->iov, iocb->bounce, nvme_l2b(ns, iocb->nlb));
+
+ block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_WRITE);
+
+ iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba),
+ &req->iov, 0, nvme_copy_aio_cb, iocb);
+
+ return NVME_SUCCESS;
+}
+
+static void nvme_copy_aio_cb(void *opaque, int ret)
+{
+ NvmeCopyAIOCB *iocb = opaque;
+ NvmeRequest *req = iocb->req;
+ NvmeNamespace *ns = req->ns;
+ NvmeCopySourceRange *range;
+ uint64_t slba;
+ uint32_t nlb;
+ size_t bytes;
+ uint16_t status;
+
+ if (ret < 0) {
+ iocb->ret = ret;
+ goto done;
+ }
+
+ if (iocb->copy_in.idx == iocb->nr) {
+ if (iocb->done) {
+ goto done;
+ }
+
+ status = nvme_copy_in_complete(iocb);
+ if (status) {
+ req->status = status;
+ goto done;
+ }
+
+ return;
+ }
+
+ range = &iocb->range[iocb->copy_in.idx++];
+ slba = le64_to_cpu(range->slba);
+ nlb = le32_to_cpu(range->nlb);
+ bytes = nvme_l2b(ns, nlb);
+
+ trace_pci_nvme_copy_source_range(slba, nlb);
+
+ qemu_iovec_reset(&req->iov);
+ qemu_iovec_add(&req->iov, iocb->copy_in.p, bytes);
+ iocb->copy_in.p += bytes;
+
+ iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba),
+ &req->iov, 0, nvme_copy_aio_cb, iocb);
+ return;
+
+done:
+ iocb->aiocb = NULL;
+ if (iocb->bh) {
+ qemu_bh_schedule(iocb->bh);
+ }
+}
+
+
static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
{
NvmeNamespace *ns = req->ns;
NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
- g_autofree NvmeCopySourceRange *range = NULL;
-
+ NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk,
+ nvme_misc_cb, req);
uint16_t nr = copy->nr + 1;
uint8_t format = copy->control[0] & 0xf;
uint32_t nlb = 0;
-
- uint8_t *bounce = NULL, *bouncep = NULL;
- struct nvme_copy_ctx *ctx;
uint16_t status;
int i;
@@ -1900,39 +1936,46 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
return NVME_CMD_SIZE_LIMIT | NVME_DNR;
}
- range = g_new(NvmeCopySourceRange, nr);
+ iocb->req = req;
+ iocb->bh = qemu_bh_new(nvme_copy_bh, iocb);
+ iocb->ret = 0;
+ iocb->done = false;
+ iocb->range = g_new(NvmeCopySourceRange, nr);
+ iocb->nr = nr;
+ iocb->copy_in.idx = 0;
- status = nvme_dma(n, (uint8_t *)range, nr * sizeof(NvmeCopySourceRange),
+ status = nvme_dma(n, (uint8_t *)iocb->range,
+ sizeof(NvmeCopySourceRange) * nr,
DMA_DIRECTION_TO_DEVICE, req);
if (status) {
- return status;
+ goto invalid;
}
for (i = 0; i < nr; i++) {
- uint64_t slba = le64_to_cpu(range[i].slba);
- uint32_t _nlb = le16_to_cpu(range[i].nlb) + 1;
+ uint64_t slba = le64_to_cpu(iocb->range[i].slba);
+ uint32_t _nlb = le16_to_cpu(iocb->range[i].nlb) + 1;
if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) {
- return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
}
status = nvme_check_bounds(ns, slba, _nlb);
if (status) {
trace_pci_nvme_err_invalid_lba_range(slba, _nlb, ns->id_ns.nsze);
- return status;
+ goto invalid;
}
if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
status = nvme_check_dulbe(ns, slba, _nlb);
if (status) {
- return status;
+ goto invalid;
}
}
if (ns->params.zoned) {
status = nvme_check_zone_read(ns, slba, _nlb);
if (status) {
- return status;
+ goto invalid;
}
}
@@ -1940,53 +1983,26 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
}
if (nlb > le32_to_cpu(ns->id_ns.mcl)) {
- return NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
+ goto invalid;
}
- bounce = bouncep = g_malloc(nvme_l2b(ns, nlb));
+ iocb->nlb = nlb;
+ iocb->bounce = g_malloc(nvme_l2b(ns, nlb));
+ iocb->copy_in.p = iocb->bounce;
+
+ qemu_iovec_init(&req->iov, 1);
block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
BLOCK_ACCT_READ);
- ctx = g_new(struct nvme_copy_ctx, 1);
-
- ctx->bounce = bounce;
- ctx->nlb = nlb;
- ctx->copies = 1;
-
- req->opaque = ctx;
-
- for (i = 0; i < nr; i++) {
- uint64_t slba = le64_to_cpu(range[i].slba);
- uint32_t nlb = le16_to_cpu(range[i].nlb) + 1;
-
- size_t len = nvme_l2b(ns, nlb);
- int64_t offset = nvme_l2b(ns, slba);
-
- trace_pci_nvme_copy_source_range(slba, nlb);
-
- struct nvme_copy_in_ctx *in_ctx = g_new(struct nvme_copy_in_ctx, 1);
- in_ctx->req = req;
-
- qemu_iovec_init(&in_ctx->iov, 1);
- qemu_iovec_add(&in_ctx->iov, bouncep, len);
-
- ctx->copies++;
-
- blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0,
- nvme_aio_copy_in_cb, in_ctx);
-
- bouncep += len;
- }
-
- /* account for the 1-initialization */
- ctx->copies--;
-
- if (!ctx->copies) {
- nvme_copy_in_complete(req);
- }
+ nvme_copy_aio_cb(iocb, 0);
+ req->aiocb = &iocb->common;
return NVME_NO_COMPLETE;
+
+invalid:
+ return status;
}
static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)