hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
/*
|
|
|
|
* QEMU NVM Express Virtual Namespace
|
|
|
|
*
|
|
|
|
* Copyright (c) 2019 CNEX Labs
|
|
|
|
* Copyright (c) 2020 Samsung Electronics
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Klaus Jensen <k.jensen@samsung.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See the
|
|
|
|
* COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/units.h"
|
2023-02-20 14:59:26 +03:00
|
|
|
#include "qemu/cutils.h"
|
2021-01-11 15:52:40 +03:00
|
|
|
#include "qemu/error-report.h"
|
2021-04-14 21:41:20 +03:00
|
|
|
#include "qapi/error.h"
|
2023-02-20 14:59:26 +03:00
|
|
|
#include "qemu/bitops.h"
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
#include "sysemu/sysemu.h"
|
|
|
|
#include "sysemu/block-backend.h"
|
|
|
|
|
|
|
|
#include "nvme.h"
|
2021-04-14 21:41:20 +03:00
|
|
|
#include "trace.h"
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
|
2020-10-21 15:03:19 +03:00
|
|
|
#define MIN_DISCARD_GRANULARITY (4 * KiB)
|
2021-04-14 21:42:27 +03:00
|
|
|
#define NVME_DEFAULT_ZONE_SIZE (128 * MiB)
|
2020-10-21 15:03:19 +03:00
|
|
|
|
2021-02-12 15:11:39 +03:00
|
|
|
void nvme_ns_init_format(NvmeNamespace *ns)
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
{
|
2021-03-08 11:25:26 +03:00
|
|
|
NvmeIdNs *id_ns = &ns->id_ns;
|
2020-10-21 15:03:19 +03:00
|
|
|
BlockDriverInfo bdi;
|
2022-04-12 11:59:09 +03:00
|
|
|
int npdg, ret;
|
|
|
|
int64_t nlbas;
|
2021-03-08 11:25:26 +03:00
|
|
|
|
2021-04-14 22:34:44 +03:00
|
|
|
ns->lbaf = id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)];
|
|
|
|
ns->lbasz = 1 << ns->lbaf.ds;
|
|
|
|
|
|
|
|
nlbas = ns->size / (ns->lbasz + ns->lbaf.ms);
|
2021-03-08 11:25:26 +03:00
|
|
|
|
|
|
|
id_ns->nsze = cpu_to_le64(nlbas);
|
|
|
|
|
|
|
|
/* no thin provisioning */
|
|
|
|
id_ns->ncap = id_ns->nsze;
|
|
|
|
id_ns->nuse = id_ns->ncap;
|
|
|
|
|
2022-04-12 11:59:09 +03:00
|
|
|
ns->moff = nlbas << ns->lbaf.ds;
|
2021-03-08 11:25:26 +03:00
|
|
|
|
2021-04-14 22:34:44 +03:00
|
|
|
npdg = ns->blkconf.discard_granularity / ns->lbasz;
|
2021-03-08 11:25:26 +03:00
|
|
|
|
|
|
|
ret = bdrv_get_info(blk_bs(ns->blkconf.blk), &bdi);
|
|
|
|
if (ret >= 0 && bdi.cluster_size > ns->blkconf.discard_granularity) {
|
2021-04-14 22:34:44 +03:00
|
|
|
npdg = bdi.cluster_size / ns->lbasz;
|
2021-03-08 11:25:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
id_ns->npda = id_ns->npdg = npdg - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_ns_init(NvmeNamespace *ns, Error **errp)
|
|
|
|
{
|
2021-06-14 23:19:01 +03:00
|
|
|
static uint64_t ns_count;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
NvmeIdNs *id_ns = &ns->id_ns;
|
2021-11-16 16:26:52 +03:00
|
|
|
NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm;
|
2021-02-12 14:25:08 +03:00
|
|
|
uint8_t ds;
|
|
|
|
uint16_t ms;
|
|
|
|
int i;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
|
2021-03-08 11:25:26 +03:00
|
|
|
ns->csi = NVME_CSI_NVM;
|
2021-02-12 15:11:39 +03:00
|
|
|
ns->status = 0x0;
|
2021-03-08 11:25:26 +03:00
|
|
|
|
2021-02-04 11:55:48 +03:00
|
|
|
ns->id_ns.dlfeat = 0x1;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
|
2021-03-08 11:25:26 +03:00
|
|
|
/* support DULBE and I/O optimization fields */
|
|
|
|
id_ns->nsfeat |= (0x4 | 0x10);
|
|
|
|
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
if (ns->params.shared) {
|
2021-03-08 11:25:26 +03:00
|
|
|
id_ns->nmic |= NVME_NMIC_NS_SHARED;
|
|
|
|
}
|
|
|
|
|
2021-06-14 23:19:01 +03:00
|
|
|
/* Substitute a missing EUI-64 by an autogenerated one */
|
|
|
|
++ns_count;
|
|
|
|
if (!ns->params.eui64 && ns->params.eui64_default) {
|
|
|
|
ns->params.eui64 = ns_count + NVME_EUI64_DEFAULT;
|
|
|
|
}
|
|
|
|
|
2021-03-08 11:25:26 +03:00
|
|
|
/* simple copy */
|
|
|
|
id_ns->mssrl = cpu_to_le16(ns->params.mssrl);
|
|
|
|
id_ns->mcl = cpu_to_le32(ns->params.mcl);
|
|
|
|
id_ns->msrc = ns->params.msrc;
|
2021-06-14 23:19:00 +03:00
|
|
|
id_ns->eui64 = cpu_to_be64(ns->params.eui64);
|
2024-02-22 20:50:16 +03:00
|
|
|
memcpy(&id_ns->nguid, &ns->params.nguid.data, sizeof(id_ns->nguid));
|
2021-03-08 11:25:26 +03:00
|
|
|
|
2021-02-12 14:25:08 +03:00
|
|
|
ds = 31 - clz32(ns->blkconf.logical_block_size);
|
|
|
|
ms = ns->params.ms;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
|
2021-04-21 15:51:00 +03:00
|
|
|
id_ns->mc = NVME_ID_NS_MC_EXTENDED | NVME_ID_NS_MC_SEPARATE;
|
2020-11-23 13:24:55 +03:00
|
|
|
|
2021-04-21 15:51:00 +03:00
|
|
|
if (ms && ns->params.mset) {
|
|
|
|
id_ns->flbas |= NVME_ID_NS_FLBAS_EXTENDED;
|
|
|
|
}
|
2021-02-04 11:55:48 +03:00
|
|
|
|
2021-04-21 15:51:00 +03:00
|
|
|
id_ns->dpc = 0x1f;
|
|
|
|
id_ns->dps = ns->params.pi;
|
|
|
|
if (ns->params.pi && ns->params.pil) {
|
|
|
|
id_ns->dps |= NVME_ID_NS_DPS_FIRST_EIGHT;
|
2021-02-12 14:25:08 +03:00
|
|
|
}
|
|
|
|
|
2021-11-16 16:26:52 +03:00
|
|
|
ns->pif = ns->params.pif;
|
|
|
|
|
2023-09-25 09:05:05 +03:00
|
|
|
static const NvmeLBAF defaults[16] = {
|
2021-04-21 15:51:00 +03:00
|
|
|
[0] = { .ds = 9 },
|
|
|
|
[1] = { .ds = 9, .ms = 8 },
|
|
|
|
[2] = { .ds = 9, .ms = 16 },
|
|
|
|
[3] = { .ds = 9, .ms = 64 },
|
|
|
|
[4] = { .ds = 12 },
|
|
|
|
[5] = { .ds = 12, .ms = 8 },
|
|
|
|
[6] = { .ds = 12, .ms = 16 },
|
|
|
|
[7] = { .ds = 12, .ms = 64 },
|
|
|
|
};
|
|
|
|
|
2021-10-06 09:53:30 +03:00
|
|
|
ns->nlbaf = 8;
|
|
|
|
|
2023-09-25 09:05:05 +03:00
|
|
|
memcpy(&id_ns->lbaf, &defaults, sizeof(defaults));
|
2021-04-21 15:51:00 +03:00
|
|
|
|
2021-10-06 09:53:30 +03:00
|
|
|
for (i = 0; i < ns->nlbaf; i++) {
|
2021-02-12 14:25:08 +03:00
|
|
|
NvmeLBAF *lbaf = &id_ns->lbaf[i];
|
|
|
|
if (lbaf->ds == ds) {
|
|
|
|
if (lbaf->ms == ms) {
|
|
|
|
id_ns->flbas |= i;
|
|
|
|
goto lbaf_found;
|
|
|
|
}
|
|
|
|
}
|
2020-11-23 13:24:55 +03:00
|
|
|
}
|
|
|
|
|
2021-02-12 14:25:08 +03:00
|
|
|
/* add non-standard lba format */
|
2021-10-06 09:53:30 +03:00
|
|
|
id_ns->lbaf[ns->nlbaf].ds = ds;
|
|
|
|
id_ns->lbaf[ns->nlbaf].ms = ms;
|
|
|
|
ns->nlbaf++;
|
|
|
|
|
|
|
|
id_ns->flbas |= i;
|
2021-02-12 14:25:08 +03:00
|
|
|
|
2021-11-16 16:26:52 +03:00
|
|
|
|
2021-02-12 14:25:08 +03:00
|
|
|
lbaf_found:
|
2021-11-16 16:26:52 +03:00
|
|
|
id_ns_nvm->elbaf[i] = (ns->pif & 0x3) << 7;
|
2021-10-06 09:53:30 +03:00
|
|
|
id_ns->nlbaf = ns->nlbaf - 1;
|
2021-03-08 11:25:26 +03:00
|
|
|
nvme_ns_init_format(ns);
|
2020-11-06 12:46:01 +03:00
|
|
|
|
2020-10-21 15:03:19 +03:00
|
|
|
return 0;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
}
|
|
|
|
|
2021-01-17 17:53:33 +03:00
|
|
|
static int nvme_ns_init_blk(NvmeNamespace *ns, Error **errp)
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
{
|
2021-01-18 15:34:47 +03:00
|
|
|
bool read_only;
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
if (!blkconf_blocksizes(&ns->blkconf, errp)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-01-18 15:34:47 +03:00
|
|
|
read_only = !blk_supports_write_perm(ns->blkconf.blk);
|
|
|
|
if (!blkconf_apply_backend_options(&ns->blkconf, read_only, false, errp)) {
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-10-21 15:03:19 +03:00
|
|
|
if (ns->blkconf.discard_granularity == -1) {
|
|
|
|
ns->blkconf.discard_granularity =
|
|
|
|
MAX(ns->blkconf.logical_block_size, MIN_DISCARD_GRANULARITY);
|
|
|
|
}
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
ns->size = blk_getlength(ns->blkconf.blk);
|
|
|
|
if (ns->size < 0) {
|
|
|
|
error_setg_errno(errp, -ns->size, "could not get blockdev size");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-08 23:04:06 +03:00
|
|
|
static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp)
|
|
|
|
{
|
|
|
|
uint64_t zone_size, zone_cap;
|
|
|
|
|
|
|
|
/* Make sure that the values of ZNS properties are sane */
|
|
|
|
if (ns->params.zone_size_bs) {
|
|
|
|
zone_size = ns->params.zone_size_bs;
|
|
|
|
} else {
|
|
|
|
zone_size = NVME_DEFAULT_ZONE_SIZE;
|
|
|
|
}
|
|
|
|
if (ns->params.zone_cap_bs) {
|
|
|
|
zone_cap = ns->params.zone_cap_bs;
|
|
|
|
} else {
|
|
|
|
zone_cap = zone_size;
|
|
|
|
}
|
|
|
|
if (zone_cap > zone_size) {
|
|
|
|
error_setg(errp, "zone capacity %"PRIu64"B exceeds "
|
|
|
|
"zone size %"PRIu64"B", zone_cap, zone_size);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-04-14 22:34:44 +03:00
|
|
|
if (zone_size < ns->lbasz) {
|
2020-12-08 23:04:06 +03:00
|
|
|
error_setg(errp, "zone size %"PRIu64"B too small, "
|
2021-04-14 22:34:44 +03:00
|
|
|
"must be at least %zuB", zone_size, ns->lbasz);
|
2020-12-08 23:04:06 +03:00
|
|
|
return -1;
|
|
|
|
}
|
2021-04-14 22:34:44 +03:00
|
|
|
if (zone_cap < ns->lbasz) {
|
2020-12-08 23:04:06 +03:00
|
|
|
error_setg(errp, "zone capacity %"PRIu64"B too small, "
|
2021-04-14 22:34:44 +03:00
|
|
|
"must be at least %zuB", zone_cap, ns->lbasz);
|
2020-12-08 23:04:06 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the main zone geometry values to avoid
|
|
|
|
* calculating them later again.
|
|
|
|
*/
|
2021-04-14 22:34:44 +03:00
|
|
|
ns->zone_size = zone_size / ns->lbasz;
|
|
|
|
ns->zone_capacity = zone_cap / ns->lbasz;
|
|
|
|
ns->num_zones = le64_to_cpu(ns->id_ns.nsze) / ns->zone_size;
|
2020-12-08 23:04:07 +03:00
|
|
|
|
|
|
|
/* Do a few more sanity checks of ZNS properties */
|
2021-01-15 15:19:20 +03:00
|
|
|
if (!ns->num_zones) {
|
|
|
|
error_setg(errp,
|
|
|
|
"insufficient drive capacity, must be at least the size "
|
|
|
|
"of one zone (%"PRIu64"B)", zone_size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-12-08 23:04:06 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_ns_zoned_init_state(NvmeNamespace *ns)
|
|
|
|
{
|
|
|
|
uint64_t start = 0, zone_size = ns->zone_size;
|
|
|
|
uint64_t capacity = ns->num_zones * zone_size;
|
|
|
|
NvmeZone *zone;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ns->zone_array = g_new0(NvmeZone, ns->num_zones);
|
2020-12-08 23:04:08 +03:00
|
|
|
if (ns->params.zd_extension_size) {
|
|
|
|
ns->zd_extensions = g_malloc0(ns->params.zd_extension_size *
|
|
|
|
ns->num_zones);
|
|
|
|
}
|
2020-12-08 23:04:06 +03:00
|
|
|
|
|
|
|
QTAILQ_INIT(&ns->exp_open_zones);
|
|
|
|
QTAILQ_INIT(&ns->imp_open_zones);
|
|
|
|
QTAILQ_INIT(&ns->closed_zones);
|
|
|
|
QTAILQ_INIT(&ns->full_zones);
|
|
|
|
|
|
|
|
zone = ns->zone_array;
|
|
|
|
for (i = 0; i < ns->num_zones; i++, zone++) {
|
|
|
|
if (start + zone_size > capacity) {
|
|
|
|
zone_size = capacity - start;
|
|
|
|
}
|
|
|
|
zone->d.zt = NVME_ZONE_TYPE_SEQ_WRITE;
|
|
|
|
nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY);
|
|
|
|
zone->d.za = 0;
|
|
|
|
zone->d.zcap = ns->zone_capacity;
|
|
|
|
zone->d.zslba = start;
|
|
|
|
zone->d.wp = start;
|
|
|
|
zone->w_ptr = start;
|
|
|
|
start += zone_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns->zone_size_log2 = 0;
|
|
|
|
if (is_power_of_2(ns->zone_size)) {
|
|
|
|
ns->zone_size_log2 = 63 - clz64(ns->zone_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-12 14:25:08 +03:00
|
|
|
static void nvme_ns_init_zoned(NvmeNamespace *ns)
|
2020-12-08 23:04:06 +03:00
|
|
|
{
|
|
|
|
NvmeIdNsZoned *id_ns_z;
|
2021-02-12 14:25:08 +03:00
|
|
|
int i;
|
2020-12-08 23:04:06 +03:00
|
|
|
|
|
|
|
nvme_ns_zoned_init_state(ns);
|
|
|
|
|
2022-03-15 17:41:56 +03:00
|
|
|
id_ns_z = g_new0(NvmeIdNsZoned, 1);
|
2020-12-08 23:04:06 +03:00
|
|
|
|
2021-04-16 06:52:28 +03:00
|
|
|
/* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */
|
2020-12-08 23:04:07 +03:00
|
|
|
id_ns_z->mar = cpu_to_le32(ns->params.max_active_zones - 1);
|
|
|
|
id_ns_z->mor = cpu_to_le32(ns->params.max_open_zones - 1);
|
2020-12-08 23:04:06 +03:00
|
|
|
id_ns_z->zoc = 0;
|
2021-11-23 01:38:31 +03:00
|
|
|
id_ns_z->ozcs = ns->params.cross_zone_read ?
|
|
|
|
NVME_ID_NS_ZONED_OZCS_RAZB : 0x00;
|
2020-12-08 23:04:06 +03:00
|
|
|
|
2021-02-12 14:25:08 +03:00
|
|
|
for (i = 0; i <= ns->id_ns.nlbaf; i++) {
|
|
|
|
id_ns_z->lbafe[i].zsze = cpu_to_le64(ns->zone_size);
|
|
|
|
id_ns_z->lbafe[i].zdes =
|
|
|
|
ns->params.zd_extension_size >> 6; /* Units of 64B */
|
|
|
|
}
|
2020-12-08 23:04:06 +03:00
|
|
|
|
2021-03-04 10:40:11 +03:00
|
|
|
if (ns->params.zrwas) {
|
|
|
|
ns->zns.numzrwa = ns->params.numzrwa ?
|
|
|
|
ns->params.numzrwa : ns->num_zones;
|
|
|
|
|
|
|
|
ns->zns.zrwas = ns->params.zrwas >> ns->lbaf.ds;
|
|
|
|
ns->zns.zrwafg = ns->params.zrwafg >> ns->lbaf.ds;
|
|
|
|
|
|
|
|
id_ns_z->ozcs |= NVME_ID_NS_ZONED_OZCS_ZRWASUP;
|
|
|
|
id_ns_z->zrwacap = NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP;
|
|
|
|
|
|
|
|
id_ns_z->numzrwa = cpu_to_le32(ns->params.numzrwa);
|
|
|
|
id_ns_z->zrwas = cpu_to_le16(ns->zns.zrwas);
|
|
|
|
id_ns_z->zrwafg = cpu_to_le16(ns->zns.zrwafg);
|
|
|
|
}
|
|
|
|
|
|
|
|
id_ns_z->ozcs = cpu_to_le16(id_ns_z->ozcs);
|
|
|
|
|
2020-12-08 23:04:06 +03:00
|
|
|
ns->csi = NVME_CSI_ZONED;
|
|
|
|
ns->id_ns.nsze = cpu_to_le64(ns->num_zones * ns->zone_size);
|
|
|
|
ns->id_ns.ncap = ns->id_ns.nsze;
|
|
|
|
ns->id_ns.nuse = ns->id_ns.ncap;
|
|
|
|
|
2021-01-11 15:52:40 +03:00
|
|
|
/*
|
|
|
|
* The device uses the BDRV_BLOCK_ZERO flag to determine the "deallocated"
|
|
|
|
* status of logical blocks. Since the spec defines that logical blocks
|
|
|
|
* SHALL be deallocated when then zone is in the Empty or Offline states,
|
|
|
|
* we can only support DULBE if the zone size is a multiple of the
|
|
|
|
* calculated NPDG.
|
|
|
|
*/
|
|
|
|
if (ns->zone_size % (ns->id_ns.npdg + 1)) {
|
|
|
|
warn_report("the zone size (%"PRIu64" blocks) is not a multiple of "
|
|
|
|
"the calculated deallocation granularity (%d blocks); "
|
|
|
|
"DULBE support disabled",
|
|
|
|
ns->zone_size, ns->id_ns.npdg + 1);
|
|
|
|
|
|
|
|
ns->id_ns.nsfeat &= ~0x4;
|
|
|
|
}
|
|
|
|
|
2020-12-08 23:04:06 +03:00
|
|
|
ns->id_ns_zoned = id_ns_z;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_clear_zone(NvmeNamespace *ns, NvmeZone *zone)
|
|
|
|
{
|
|
|
|
uint8_t state;
|
|
|
|
|
|
|
|
zone->w_ptr = zone->d.wp;
|
|
|
|
state = nvme_get_zone_state(zone);
|
2020-12-08 23:04:08 +03:00
|
|
|
if (zone->d.wp != zone->d.zslba ||
|
|
|
|
(zone->d.za & NVME_ZA_ZD_EXT_VALID)) {
|
2020-12-08 23:04:06 +03:00
|
|
|
if (state != NVME_ZONE_STATE_CLOSED) {
|
|
|
|
trace_pci_nvme_clear_ns_close(state, zone->d.zslba);
|
|
|
|
nvme_set_zone_state(zone, NVME_ZONE_STATE_CLOSED);
|
|
|
|
}
|
2020-12-08 23:04:07 +03:00
|
|
|
nvme_aor_inc_active(ns);
|
2020-12-08 23:04:06 +03:00
|
|
|
QTAILQ_INSERT_HEAD(&ns->closed_zones, zone, entry);
|
|
|
|
} else {
|
|
|
|
trace_pci_nvme_clear_ns_reset(state, zone->d.zslba);
|
2021-03-04 10:40:11 +03:00
|
|
|
if (zone->d.za & NVME_ZA_ZRWA_VALID) {
|
|
|
|
zone->d.za &= ~NVME_ZA_ZRWA_VALID;
|
|
|
|
ns->zns.numzrwa++;
|
|
|
|
}
|
2020-12-08 23:04:06 +03:00
|
|
|
nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close all the zones that are currently open.
|
|
|
|
*/
|
|
|
|
static void nvme_zoned_ns_shutdown(NvmeNamespace *ns)
|
|
|
|
{
|
|
|
|
NvmeZone *zone, *next;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) {
|
|
|
|
QTAILQ_REMOVE(&ns->closed_zones, zone, entry);
|
2020-12-08 23:04:07 +03:00
|
|
|
nvme_aor_dec_active(ns);
|
2020-12-08 23:04:06 +03:00
|
|
|
nvme_clear_zone(ns, zone);
|
|
|
|
}
|
|
|
|
QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) {
|
|
|
|
QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
|
2020-12-08 23:04:07 +03:00
|
|
|
nvme_aor_dec_open(ns);
|
|
|
|
nvme_aor_dec_active(ns);
|
2020-12-08 23:04:06 +03:00
|
|
|
nvme_clear_zone(ns, zone);
|
|
|
|
}
|
|
|
|
QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) {
|
|
|
|
QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry);
|
2020-12-08 23:04:07 +03:00
|
|
|
nvme_aor_dec_open(ns);
|
|
|
|
nvme_aor_dec_active(ns);
|
2020-12-08 23:04:06 +03:00
|
|
|
nvme_clear_zone(ns, zone);
|
|
|
|
}
|
2020-12-08 23:04:07 +03:00
|
|
|
|
|
|
|
assert(ns->nr_open_zones == 0);
|
2020-12-08 23:04:06 +03:00
|
|
|
}
|
|
|
|
|
2023-02-20 14:59:26 +03:00
|
|
|
static NvmeRuHandle *nvme_find_ruh_by_attr(NvmeEnduranceGroup *endgrp,
|
|
|
|
uint8_t ruha, uint16_t *ruhid)
|
|
|
|
{
|
|
|
|
for (uint16_t i = 0; i < endgrp->fdp.nruh; i++) {
|
|
|
|
NvmeRuHandle *ruh = &endgrp->fdp.ruhs[i];
|
|
|
|
|
|
|
|
if (ruh->ruha == ruha) {
|
|
|
|
*ruhid = i;
|
|
|
|
return ruh;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp)
|
|
|
|
{
|
|
|
|
NvmeEnduranceGroup *endgrp = ns->endgrp;
|
|
|
|
NvmeRuHandle *ruh;
|
|
|
|
uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
|
2023-04-11 21:34:11 +03:00
|
|
|
g_autofree unsigned int *ruhids = NULL;
|
2023-02-14 12:16:56 +03:00
|
|
|
unsigned int n, m, *ruhid;
|
|
|
|
const char *endptr, *token;
|
|
|
|
char *r, *p;
|
2023-02-20 14:59:26 +03:00
|
|
|
uint16_t *ph;
|
|
|
|
|
|
|
|
if (!ns->params.fdp.ruhs) {
|
|
|
|
ns->fdp.nphs = 1;
|
|
|
|
ph = ns->fdp.phs = g_new(uint16_t, 1);
|
|
|
|
|
|
|
|
ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_CTRL, ph);
|
|
|
|
if (!ruh) {
|
|
|
|
ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_UNUSED, ph);
|
|
|
|
if (!ruh) {
|
|
|
|
error_setg(errp, "no unused reclaim unit handles left");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ruh->ruha = NVME_RUHA_CTRL;
|
|
|
|
ruh->lbafi = lbafi;
|
|
|
|
ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;
|
|
|
|
|
|
|
|
for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
|
|
|
|
ruh->rus[rg].ruamw = ruh->ruamw;
|
|
|
|
}
|
|
|
|
} else if (ruh->lbafi != lbafi) {
|
|
|
|
error_setg(errp, "lba format index of controller assigned "
|
|
|
|
"reclaim unit handle does not match namespace lba "
|
|
|
|
"format index");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
ruhid = ruhids = g_new0(unsigned int, endgrp->fdp.nruh);
|
|
|
|
r = p = strdup(ns->params.fdp.ruhs);
|
|
|
|
|
|
|
|
/* parse the placement handle identifiers */
|
|
|
|
while ((token = qemu_strsep(&p, ";")) != NULL) {
|
2023-02-14 12:16:56 +03:00
|
|
|
if (qemu_strtoui(token, &endptr, 0, &n) < 0) {
|
|
|
|
error_setg(errp, "cannot parse reclaim unit handle identifier");
|
2023-02-20 14:59:26 +03:00
|
|
|
free(r);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-02-14 12:16:56 +03:00
|
|
|
m = n;
|
|
|
|
|
|
|
|
/* parse range */
|
|
|
|
if (*endptr == '-') {
|
|
|
|
token = endptr + 1;
|
|
|
|
|
|
|
|
if (qemu_strtoui(token, NULL, 0, &m) < 0) {
|
|
|
|
error_setg(errp, "cannot parse reclaim unit handle identifier");
|
|
|
|
free(r);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m < n) {
|
|
|
|
error_setg(errp, "invalid reclaim unit handle identifier range");
|
|
|
|
free(r);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; n <= m; n++) {
|
|
|
|
if (ns->fdp.nphs++ == endgrp->fdp.nruh) {
|
|
|
|
error_setg(errp, "too many placement handles");
|
|
|
|
free(r);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
*ruhid++ = n;
|
2023-02-20 14:59:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(r);
|
|
|
|
|
2023-05-24 12:45:04 +03:00
|
|
|
/* verify that the ruhids are unique */
|
|
|
|
for (unsigned int i = 0; i < ns->fdp.nphs; i++) {
|
|
|
|
for (unsigned int j = i + 1; j < ns->fdp.nphs; j++) {
|
|
|
|
if (ruhids[i] == ruhids[j]) {
|
|
|
|
error_setg(errp, "duplicate reclaim unit handle identifier: %u",
|
|
|
|
ruhids[i]);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-20 14:59:26 +03:00
|
|
|
ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs);
|
|
|
|
|
|
|
|
ruhid = ruhids;
|
|
|
|
|
|
|
|
/* verify the identifiers */
|
|
|
|
for (unsigned int i = 0; i < ns->fdp.nphs; i++, ruhid++, ph++) {
|
|
|
|
if (*ruhid >= endgrp->fdp.nruh) {
|
|
|
|
error_setg(errp, "invalid reclaim unit handle identifier");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ruh = &endgrp->fdp.ruhs[*ruhid];
|
|
|
|
|
|
|
|
switch (ruh->ruha) {
|
|
|
|
case NVME_RUHA_UNUSED:
|
|
|
|
ruh->ruha = NVME_RUHA_HOST;
|
|
|
|
ruh->lbafi = lbafi;
|
|
|
|
ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds;
|
|
|
|
|
|
|
|
for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) {
|
|
|
|
ruh->rus[rg].ruamw = ruh->ruamw;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVME_RUHA_HOST:
|
|
|
|
if (ruh->lbafi != lbafi) {
|
|
|
|
error_setg(errp, "lba format index of host assigned"
|
|
|
|
"reclaim unit handle does not match namespace "
|
|
|
|
"lba format index");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVME_RUHA_CTRL:
|
|
|
|
error_setg(errp, "reclaim unit handle is controller assigned");
|
|
|
|
return false;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
*ph = *ruhid;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-07-06 10:10:56 +03:00
|
|
|
static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp)
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
{
|
2021-11-16 16:26:52 +03:00
|
|
|
unsigned int pi_size;
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
if (!ns->blkconf.blk) {
|
|
|
|
error_setg(errp, "block backend not configured");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-11-16 16:26:52 +03:00
|
|
|
if (ns->params.pi) {
|
|
|
|
if (ns->params.pi > NVME_ID_NS_DPS_TYPE_3) {
|
|
|
|
error_setg(errp, "invalid 'pi' value");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ns->params.pif) {
|
|
|
|
case NVME_PI_GUARD_16:
|
|
|
|
pi_size = 8;
|
|
|
|
break;
|
|
|
|
case NVME_PI_GUARD_64:
|
|
|
|
pi_size = 16;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error_setg(errp, "invalid 'pif'");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.ms < pi_size) {
|
|
|
|
error_setg(errp, "at least %u bytes of metadata required to "
|
|
|
|
"enable protection information", pi_size);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-02-04 11:55:48 +03:00
|
|
|
}
|
|
|
|
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
if (ns->params.nsid > NVME_MAX_NAMESPACES) {
|
|
|
|
error_setg(errp, "invalid namespace id (must be between 0 and %d)",
|
|
|
|
NVME_MAX_NAMESPACES);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2023-02-20 14:59:26 +03:00
|
|
|
if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) {
|
|
|
|
error_setg(errp, "cannot be a zoned- in an FDP configuration");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-03-09 14:20:41 +03:00
|
|
|
if (ns->params.zoned) {
|
|
|
|
if (ns->params.max_active_zones) {
|
|
|
|
if (ns->params.max_open_zones > ns->params.max_active_zones) {
|
|
|
|
error_setg(errp, "max_open_zones (%u) exceeds "
|
|
|
|
"max_active_zones (%u)", ns->params.max_open_zones,
|
|
|
|
ns->params.max_active_zones);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ns->params.max_open_zones) {
|
|
|
|
ns->params.max_open_zones = ns->params.max_active_zones;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.zd_extension_size) {
|
|
|
|
if (ns->params.zd_extension_size & 0x3f) {
|
|
|
|
error_setg(errp, "zone descriptor extension size must be a "
|
|
|
|
"multiple of 64B");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if ((ns->params.zd_extension_size >> 6) > 0xff) {
|
|
|
|
error_setg(errp,
|
|
|
|
"zone descriptor extension size is too large");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2021-03-04 10:40:11 +03:00
|
|
|
|
|
|
|
if (ns->params.zrwas) {
|
|
|
|
if (ns->params.zrwas % ns->blkconf.logical_block_size) {
|
|
|
|
error_setg(errp, "zone random write area size (zoned.zrwas "
|
|
|
|
"%"PRIu64") must be a multiple of the logical "
|
|
|
|
"block size (logical_block_size %"PRIu32")",
|
|
|
|
ns->params.zrwas, ns->blkconf.logical_block_size);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.zrwafg == -1) {
|
|
|
|
ns->params.zrwafg = ns->blkconf.logical_block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.zrwas % ns->params.zrwafg) {
|
|
|
|
error_setg(errp, "zone random write area size (zoned.zrwas "
|
|
|
|
"%"PRIu64") must be a multiple of the zone random "
|
|
|
|
"write area flush granularity (zoned.zrwafg, "
|
|
|
|
"%"PRIu64")", ns->params.zrwas, ns->params.zrwafg);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.max_active_zones) {
|
|
|
|
if (ns->params.numzrwa > ns->params.max_active_zones) {
|
|
|
|
error_setg(errp, "number of zone random write area "
|
|
|
|
"resources (zoned.numzrwa, %d) must be less "
|
|
|
|
"than or equal to maximum active resources "
|
|
|
|
"(zoned.max_active_zones, %d)",
|
|
|
|
ns->params.numzrwa,
|
|
|
|
ns->params.max_active_zones);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-09 14:20:41 +03:00
|
|
|
}
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-06 10:10:56 +03:00
|
|
|
int nvme_ns_setup(NvmeNamespace *ns, Error **errp)
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
{
|
2021-07-06 10:10:56 +03:00
|
|
|
if (nvme_ns_check_constraints(ns, errp)) {
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-01-17 17:53:33 +03:00
|
|
|
if (nvme_ns_init_blk(ns, errp)) {
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-10-21 15:03:19 +03:00
|
|
|
if (nvme_ns_init(ns, errp)) {
|
|
|
|
return -1;
|
|
|
|
}
|
2020-12-08 23:04:06 +03:00
|
|
|
if (ns->params.zoned) {
|
|
|
|
if (nvme_ns_zoned_check_calc_geometry(ns, errp) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2021-02-12 14:25:08 +03:00
|
|
|
nvme_ns_init_zoned(ns);
|
2020-12-08 23:04:06 +03:00
|
|
|
}
|
2020-10-14 10:55:08 +03:00
|
|
|
|
2023-02-20 14:59:26 +03:00
|
|
|
if (ns->endgrp && ns->endgrp->fdp.enabled) {
|
|
|
|
if (!nvme_ns_init_fdp(ns, errp)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_ns_drain(NvmeNamespace *ns)
|
|
|
|
{
|
|
|
|
blk_drain(ns->blkconf.blk);
|
|
|
|
}
|
|
|
|
|
2020-12-08 23:03:58 +03:00
|
|
|
void nvme_ns_shutdown(NvmeNamespace *ns)
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
{
|
|
|
|
blk_flush(ns->blkconf.blk);
|
2020-12-08 23:04:06 +03:00
|
|
|
if (ns->params.zoned) {
|
|
|
|
nvme_zoned_ns_shutdown(ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_ns_cleanup(NvmeNamespace *ns)
|
|
|
|
{
|
|
|
|
if (ns->params.zoned) {
|
|
|
|
g_free(ns->id_ns_zoned);
|
|
|
|
g_free(ns->zone_array);
|
2020-12-08 23:04:08 +03:00
|
|
|
g_free(ns->zd_extensions);
|
2020-12-08 23:04:06 +03:00
|
|
|
}
|
2023-02-20 14:59:26 +03:00
|
|
|
|
|
|
|
if (ns->endgrp && ns->endgrp->fdp.enabled) {
|
|
|
|
g_free(ns->fdp.phs);
|
|
|
|
}
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
}
|
|
|
|
|
2021-04-23 19:55:11 +03:00
|
|
|
static void nvme_ns_unrealize(DeviceState *dev)
|
|
|
|
{
|
|
|
|
NvmeNamespace *ns = NVME_NS(dev);
|
|
|
|
|
|
|
|
nvme_ns_drain(ns);
|
|
|
|
nvme_ns_shutdown(ns);
|
|
|
|
nvme_ns_cleanup(ns);
|
|
|
|
}
|
|
|
|
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
static void nvme_ns_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
NvmeNamespace *ns = NVME_NS(dev);
|
|
|
|
BusState *s = qdev_get_parent_bus(dev);
|
|
|
|
NvmeCtrl *n = NVME(s->parent);
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
NvmeSubsystem *subsys = n->subsys;
|
|
|
|
uint32_t nsid = ns->params.nsid;
|
|
|
|
int i;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
|
2021-07-06 10:10:56 +03:00
|
|
|
if (!n->subsys) {
|
2022-06-28 15:22:09 +03:00
|
|
|
/* If no subsys, the ns cannot be attached to more than one ctrl. */
|
|
|
|
ns->params.shared = false;
|
2021-07-06 10:10:56 +03:00
|
|
|
if (ns->params.detached) {
|
|
|
|
error_setg(errp, "detached requires that the nvme device is "
|
|
|
|
"linked to an nvme-subsys device");
|
|
|
|
return;
|
|
|
|
}
|
2021-04-23 19:55:11 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If this namespace belongs to a subsystem (through a link on the
|
|
|
|
* controller device), reparent the device.
|
|
|
|
*/
|
|
|
|
if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) {
|
|
|
|
return;
|
|
|
|
}
|
2023-02-20 14:59:23 +03:00
|
|
|
ns->subsys = subsys;
|
2023-02-20 14:59:26 +03:00
|
|
|
ns->endgrp = &subsys->endgrp;
|
2021-07-06 10:10:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_ns_setup(ns, errp)) {
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
return;
|
|
|
|
}
|
2021-01-17 17:53:34 +03:00
|
|
|
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
if (!nsid) {
|
|
|
|
for (i = 1; i <= NVME_MAX_NAMESPACES; i++) {
|
|
|
|
if (nvme_ns(n, i) || nvme_subsys_ns(subsys, i)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsid = ns->params.nsid = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nsid) {
|
|
|
|
error_setg(errp, "no free namespace id");
|
hw/block/nvme: support for shared namespace in subsystem
nvme-ns device is registered to a nvme controller device during the
initialization in nvme_register_namespace() in case that 'bus' property
is given which means it's mapped to a single controller.
This patch introduced a new property 'subsys' just like the controller
device instance did to map a namespace to a NVMe subsystem.
If 'subsys' property is given to the nvme-ns device, it will belong to
the specified subsystem and will be attached to all controllers in that
subsystem by enabling shared namespace capability in NMIC(Namespace
Multi-path I/O and Namespace Capabilities) in Identify Namespace.
Usage:
-device nvme-subsys,id=subsys0
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0
-device nvme,serial=baz,id=nvme2,subsys=subsys0
-device nvme-ns,id=ns1,drive=<drv>,nsid=1,subsys=subsys0 # Shared
-device nvme-ns,id=ns2,drive=<drv>,nsid=2,bus=nvme2 # Non-shared
In the above example, 'ns1' will be shared to 'nvme0' and 'nvme1' in
the same subsystem. On the other hand, 'ns2' will be attached to the
'nvme2' only as a private namespace in that subsystem.
All the namespace with 'subsys' parameter will attach all controllers in
the subsystem to the namespace by default.
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
Tested-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
2021-01-24 05:54:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
if (nvme_ns(n, nsid) || nvme_subsys_ns(subsys, nsid)) {
|
|
|
|
error_setg(errp, "namespace id '%d' already allocated", nsid);
|
hw/block/nvme: support for shared namespace in subsystem
nvme-ns device is registered to a nvme controller device during the
initialization in nvme_register_namespace() in case that 'bus' property
is given which means it's mapped to a single controller.
This patch introduced a new property 'subsys' just like the controller
device instance did to map a namespace to a NVMe subsystem.
If 'subsys' property is given to the nvme-ns device, it will belong to
the specified subsystem and will be attached to all controllers in that
subsystem by enabling shared namespace capability in NMIC(Namespace
Multi-path I/O and Namespace Capabilities) in Identify Namespace.
Usage:
-device nvme-subsys,id=subsys0
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme,serial=bar,id=nvme1,subsys=subsys0
-device nvme,serial=baz,id=nvme2,subsys=subsys0
-device nvme-ns,id=ns1,drive=<drv>,nsid=1,subsys=subsys0 # Shared
-device nvme-ns,id=ns2,drive=<drv>,nsid=2,bus=nvme2 # Non-shared
In the above example, 'ns1' will be shared to 'nvme0' and 'nvme1' in
the same subsystem. On the other hand, 'ns2' will be attached to the
'nvme2' only as a private namespace in that subsystem.
All the namespace with 'subsys' parameter will attach all controllers in
the subsystem to the namespace by default.
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
Tested-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
2021-01-24 05:54:50 +03:00
|
|
|
return;
|
|
|
|
}
|
2021-01-17 17:53:34 +03:00
|
|
|
}
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
|
|
|
|
if (subsys) {
|
|
|
|
subsys->namespaces[nsid] = ns;
|
|
|
|
|
2023-02-20 14:59:24 +03:00
|
|
|
ns->id_ns.endgid = cpu_to_le16(0x1);
|
|
|
|
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
if (ns->params.detached) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ns->params.shared) {
|
|
|
|
for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) {
|
|
|
|
NvmeCtrl *ctrl = subsys->ctrls[i];
|
|
|
|
|
2022-05-09 17:16:11 +03:00
|
|
|
if (ctrl && ctrl != SUBSYS_SLOT_RSVD) {
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
nvme_attach_ns(ctrl, ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2023-02-20 14:59:24 +03:00
|
|
|
|
hw/block/nvme: fix handling of private namespaces
Prior to this patch, if a private nvme-ns device (that is, a namespace
that is not linked to a subsystem) is wired up to an nvme-subsys linked
nvme controller device, the device fails to verify that the namespace id
is unique within the subsystem. NVM Express v1.4b, Section 6.1.6 ("NSID
and Namespace Usage") states that because the device supports Namespace
Management, "NSIDs *shall* be unique within the NVM subsystem".
Additionally, prior to this patch, private namespaces are not known to
the subsystem and the namespace is considered exclusive to the
controller with which it is initially wired up to. However, this is not
the definition of a private namespace; per Section 1.6.33 ("private
namespace"), a private namespace is just a namespace that does not
support multipath I/O or namespace sharing, which means "that it is only
able to be attached to one controller at a time".
Fix this by always allocating namespaces in the subsystem (if one is
linked to the controller), regardless of the shared/private status of
the namespace. Whether or not the namespace is shareable is controlled
by a new `shared` nvme-ns parameter.
Finally, this fix allows the nvme-ns `subsys` parameter to be removed,
since the `shared` parameter now serves the purpose of attaching the
namespace to all controllers in the subsystem upon device realization.
It is invalid to have an nvme-ns namespace device with a linked
subsystem without the parent nvme controller device also being linked to
one and since the nvme-ns devices will unconditionally be "attached" (in
QEMU terms that is) to an nvme controller device through an NvmeBus, the
nvme-ns namespace device can always get a reference to the subsystem of
the controller it is explicitly (using 'bus=' parameter) or implicitly
attaching to.
Fixes: e570768566b3 ("hw/block/nvme: support for shared namespace in subsystem")
Cc: Minwoo Im <minwoo.im.dev@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2021-03-23 14:43:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nvme_attach_ns(n, ns);
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static Property nvme_ns_props[] = {
|
|
|
|
DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf),
|
hw/block/nvme: support namespace detach
Given that now we have nvme-subsys device supported, we can manage
namespace allocated, but not attached: detached. This patch introduced
a parameter for nvme-ns device named 'detached'. This parameter
indicates whether the given namespace device is detached from
a entire NVMe subsystem('subsys' given case, shared namespace) or a
controller('bus' given case, private namespace).
- Allocated namespace
1) Shared ns in the subsystem 'subsys0':
-device nvme-ns,id=ns1,drive=blknvme0,nsid=1,subsys=subsys0,detached=true
2) Private ns for the controller 'nvme0' of the subsystem 'subsys0':
-device nvme-subsys,id=subsys0
-device nvme,serial=foo,id=nvme0,subsys=subsys0
-device nvme-ns,id=ns1,drive=blknvme0,nsid=1,bus=nvme0,detached=true
3) (Invalid case) Controller 'nvme0' has no subsystem to manage ns:
-device nvme,serial=foo,id=nvme0
-device nvme-ns,id=ns1,drive=blknvme0,nsid=1,bus=nvme0,detached=true
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
2021-02-05 18:30:10 +03:00
|
|
|
DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false),
|
2021-09-24 09:52:22 +03:00
|
|
|
DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true),
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0),
|
2022-04-29 11:33:34 +03:00
|
|
|
DEFINE_PROP_UUID_NODEFAULT("uuid", NvmeNamespace, params.uuid),
|
2024-02-22 20:50:16 +03:00
|
|
|
DEFINE_PROP_NGUID_NODEFAULT("nguid", NvmeNamespace, params.nguid),
|
2021-06-14 23:19:00 +03:00
|
|
|
DEFINE_PROP_UINT64("eui64", NvmeNamespace, params.eui64, 0),
|
2020-11-23 13:24:55 +03:00
|
|
|
DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0),
|
|
|
|
DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0),
|
2021-02-04 11:55:48 +03:00
|
|
|
DEFINE_PROP_UINT8("pi", NvmeNamespace, params.pi, 0),
|
|
|
|
DEFINE_PROP_UINT8("pil", NvmeNamespace, params.pil, 0),
|
2021-11-16 16:26:52 +03:00
|
|
|
DEFINE_PROP_UINT8("pif", NvmeNamespace, params.pif, 0),
|
2020-11-06 12:46:01 +03:00
|
|
|
DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128),
|
|
|
|
DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128),
|
|
|
|
DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127),
|
2020-12-08 23:04:06 +03:00
|
|
|
DEFINE_PROP_BOOL("zoned", NvmeNamespace, params.zoned, false),
|
|
|
|
DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace, params.zone_size_bs,
|
|
|
|
NVME_DEFAULT_ZONE_SIZE),
|
|
|
|
DEFINE_PROP_SIZE("zoned.zone_capacity", NvmeNamespace, params.zone_cap_bs,
|
|
|
|
0),
|
|
|
|
DEFINE_PROP_BOOL("zoned.cross_read", NvmeNamespace,
|
|
|
|
params.cross_zone_read, false),
|
2020-12-08 23:04:07 +03:00
|
|
|
DEFINE_PROP_UINT32("zoned.max_active", NvmeNamespace,
|
|
|
|
params.max_active_zones, 0),
|
|
|
|
DEFINE_PROP_UINT32("zoned.max_open", NvmeNamespace,
|
|
|
|
params.max_open_zones, 0),
|
2020-12-08 23:04:08 +03:00
|
|
|
DEFINE_PROP_UINT32("zoned.descr_ext_size", NvmeNamespace,
|
|
|
|
params.zd_extension_size, 0),
|
2021-03-04 10:40:11 +03:00
|
|
|
DEFINE_PROP_UINT32("zoned.numzrwa", NvmeNamespace, params.numzrwa, 0),
|
|
|
|
DEFINE_PROP_SIZE("zoned.zrwas", NvmeNamespace, params.zrwas, 0),
|
|
|
|
DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1),
|
2021-06-14 23:19:01 +03:00
|
|
|
DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default,
|
2022-04-29 11:33:33 +03:00
|
|
|
false),
|
2023-02-20 14:59:26 +03:00
|
|
|
DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs),
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvme_ns_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
|
|
|
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
|
|
|
|
|
|
|
dc->bus_type = TYPE_NVME_BUS;
|
|
|
|
dc->realize = nvme_ns_realize;
|
2021-04-23 19:55:11 +03:00
|
|
|
dc->unrealize = nvme_ns_unrealize;
|
hw/block/nvme: support multiple namespaces
This adds support for multiple namespaces by introducing a new 'nvme-ns'
device model. The nvme device creates a bus named from the device name
('id'). The nvme-ns devices then connect to this and registers
themselves with the nvme device.
This changes how an nvme device is created. Example with two namespaces:
-drive file=nvme0n1.img,if=none,id=disk1
-drive file=nvme0n2.img,if=none,id=disk2
-device nvme,serial=deadbeef,id=nvme0
-device nvme-ns,drive=disk1,bus=nvme0,nsid=1
-device nvme-ns,drive=disk2,bus=nvme0,nsid=2
The drive property is kept on the nvme device to keep the change
backward compatible, but the property is now optional. Specifying a
drive for the nvme device will always create the namespace with nsid 1.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
2019-06-26 09:51:06 +03:00
|
|
|
device_class_set_props(dc, nvme_ns_props);
|
|
|
|
dc->desc = "Virtual NVMe namespace";
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_ns_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
NvmeNamespace *ns = NVME_NS(obj);
|
|
|
|
char *bootindex = g_strdup_printf("/namespace@%d,0", ns->params.nsid);
|
|
|
|
|
|
|
|
device_add_bootindex_property(obj, &ns->bootindex, "bootindex",
|
|
|
|
bootindex, DEVICE(obj));
|
|
|
|
|
|
|
|
g_free(bootindex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo nvme_ns_info = {
|
|
|
|
.name = TYPE_NVME_NS,
|
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.class_init = nvme_ns_class_init,
|
|
|
|
.instance_size = sizeof(NvmeNamespace),
|
|
|
|
.instance_init = nvme_ns_instance_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nvme_ns_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&nvme_ns_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(nvme_ns_register_types)
|