@@ -22,7 +22,8 @@
* [pmrdev=<mem_backend_file_id>,] \
* max_ioqpairs=<N[optional]>, \
* aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
- * mdts=<N[optional]>,zoned.append_size_limit=<N[optional]> \
+ * mdts=<N[optional]>,zoned.append_size_limit=<N[optional]>, \
+ * subsys=<subsys_id> \
* -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
* zoned=<true|false[optional]>
* -device nvme-subsys,id=<subsys_id>
@@ -44,6 +45,13 @@
*
* nvme device parameters
* ~~~~~~~~~~~~~~~~~~~~~~
+ * - `subsys`
+ * NVM Subsystem device. If given, a subsystem NQN will be initialized with
+ * <subsys_id> given. Otherwise, <serial> will be taken for subsystem NQN.
+ * Also, it will enable multi controller capability represented in Identify
+ * Controller data structure in CMIC (Controller Multi-path I/O and Namesapce
+ * Sharing Capabilities), if given.
+ *
* - `aerl`
* The Asynchronous Event Request Limit (AERL). Indicates the maximum number
* of concurrently outstanding Asynchronous Event Request commands support
@@ -4404,11 +4412,23 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
return 0;
}
+static void nvme_init_subnqn(NvmeCtrl *n)
+{
+ NvmeSubsystem *subsys = n->subsys;
+ NvmeIdCtrl *id = &n->id_ctrl;
+
+ if (!subsys) {
+ snprintf((char *)id->subnqn, sizeof(id->subnqn),
+ "nqn.2019-08.org.qemu:%s", n->params.serial);
+ } else {
+ pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn);
+ }
+}
+
static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
{
NvmeIdCtrl *id = &n->id_ctrl;
uint8_t *pci_conf = pci_dev->config;
- char *subnqn;
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -4455,9 +4475,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
NVME_CTRL_SGLS_BITBUCKET);
- subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
- strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');
- g_free(subnqn);
+ nvme_init_subnqn(n);
id->psd[0].mp = cpu_to_le16(0x9c4);
id->psd[0].enlat = cpu_to_le32(0x10);
@@ -4545,6 +4563,8 @@ static Property nvme_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND,
HostMemoryBackend *),
+ DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS,
+ NvmeSubsystem *),
DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
@@ -2,6 +2,7 @@
#define HW_NVME_H
#include "block/nvme.h"
+#include "nvme-subsys.h"
#include "nvme-ns.h"
#define NVME_MAX_NAMESPACES 256
@@ -170,6 +171,8 @@ typedef struct NvmeCtrl {
uint8_t zasl;
+ NvmeSubsystem *subsys;
+
NvmeNamespace namespace;
NvmeNamespace *namespaces[NVME_MAX_NAMESPACES];
NvmeSQueue **sq;
nvme controller(nvme) can be mapped to a NVMe subsystem(nvme-subsys). This patch maps a controller to a subsystem by adding a parameter 'subsys' to the nvme device. To map a controller to a subsystem, we need to put nvme-subsys first and then maps the subsystem to the controller: -device nvme-subsys,id=subsys0 -device nvme,serial=foo,id=nvme0,subsys=subsys0 If 'subsys' property is not given to the nvme controller, then subsystem NQN will be created with serial (e.g., 'foo' in above example), Otherwise, it will be based on subsys id (e.g., 'subsys0' in above example). Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com> --- hw/block/nvme.c | 30 +++++++++++++++++++++++++----- hw/block/nvme.h | 3 +++ 2 files changed, 28 insertions(+), 5 deletions(-)