@@ -21,6 +21,7 @@
typedef struct NvmeNamespaceParams {
uint32_t nsid;
+ uint16_t ms;
} NvmeNamespaceParams;
typedef struct NvmeNamespace {
@@ -57,18 +58,30 @@ static inline uint8_t nvme_ns_lbads(NvmeNamespace *ns)
return nvme_ns_lbaf(ns)->ds;
}
-/* calculate the number of LBAs that the namespace can accomodate */
-static inline uint64_t nvme_ns_nlbas(NvmeNamespace *ns)
+static inline uint16_t nvme_ns_ms(NvmeNamespace *ns)
{
- return ns->size >> nvme_ns_lbads(ns);
+ return nvme_ns_lbaf(ns)->ms;
}
/* convert an LBA to the equivalent in bytes */
static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba)
{
+ if (NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas)) {
+ return (lba << nvme_ns_lbads(ns)) + (lba * nvme_ns_ms(ns));
+ }
+
return lba << nvme_ns_lbads(ns);
}
+/* calculate the number of LBAs that the namespace can accomodate */
+static inline uint64_t nvme_ns_nlbas(NvmeNamespace *ns)
+{
+ if (NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas)) {
+ return ns->size / nvme_l2b(ns, 1);
+ }
+ return ns->size >> nvme_ns_lbads(ns);
+}
+
typedef struct NvmeCtrl NvmeCtrl;
int nvme_ns_setup(NvmeCtrl *n, NvmeNamespace *ns, Error **errp);
@@ -37,9 +37,24 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
int lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
int npdg;
- ns->id_ns.dlfeat = 0x9;
+ id_ns->dlfeat = 0x10;
id_ns->lbaf[lba_index].ds = 31 - clz32(ns->blkconf.logical_block_size);
+ id_ns->lbaf[lba_index].ms = ns->params.ms;
+
+ /* support DULBE and I/O optimization fields */
+ id_ns->nsfeat |= 0x10;
+
+ if (!ns->params.ms) {
+ /* zeroes are guaranteed to be read from deallocated blocks */
+ id_ns->dlfeat |= 0x1 | 0x8;
+
+ /* support DULBE */
+ id_ns->nsfeat |= 0x4;
+ } else {
+ id_ns->mc = 0x1;
+ id_ns->flbas |= 0x10;
+ }
id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(ns));
@@ -47,9 +62,6 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
id_ns->ncap = id_ns->nsze;
id_ns->nuse = id_ns->ncap;
- /* support DULBE and I/O optimization fields */
- id_ns->nsfeat |= (0x4 | 0x10);
-
npdg = ns->blkconf.discard_granularity / ns->blkconf.logical_block_size;
if (bdrv_get_info(blk_bs(ns->blkconf.blk), &bdi) >= 0 &&
@@ -150,6 +162,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
static Property nvme_ns_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf),
DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0),
+ DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1214,6 +1214,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
+ uint32_t sector_size;
trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
nvme_nsid(ns), nlb, data_size, slba);
@@ -1246,12 +1247,13 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
if (req->qsg.sg) {
+ sector_size = nvme_l2b(ns, 1);
if (acct == BLOCK_ACCT_WRITE) {
req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ sector_size, nvme_rw_cb, req);
} else {
req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ sector_size, nvme_rw_cb, req);
}
} else {
if (acct == BLOCK_ACCT_WRITE) {