hw/nvme: Add SPDM over DOE support

Setup Data Object Exchange (DOE) as an extended capability for the NVME
controller and connect SPDM to it (CMA) to it.

Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Acked-by: Klaus Jensen <k.jensen@samsung.com>
Message-Id: <20240703092027.644758-4-alistair.francis@wdc.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Wilfred Mallawa 2024-07-03 19:20:27 +10:00 committed by Michael S. Tsirkin
parent bc419a1cc5
commit 4f947b10d5
5 changed files with 207 additions and 0 deletions

View File

@ -29,6 +29,7 @@ guest hardware that is specific to QEMU.
edu
ivshmem-spec
pvpanic
spdm
standard-vga
virt-ctlr
vmcoreinfo

134
docs/specs/spdm.rst Normal file
View File

@ -0,0 +1,134 @@
======================================================
QEMU Security Protocols and Data Models (SPDM) Support
======================================================
SPDM enables authentication, attestation and key exchange to assist in
providing infrastructure security enablement. It's a standard published
by the `DMTF`_.
QEMU supports connecting to a SPDM responder implementation. This allows an
external application to emulate the SPDM responder logic for an SPDM device.
Setting up a SPDM server
========================
When using QEMU with SPDM devices QEMU will connect to a server which
implements the SPDM functionality.
SPDM-Utils
----------
You can use `SPDM Utils`_ to emulate a responder. This is the simplest method.
SPDM-Utils is a Linux applications to manage, test and develop devices
supporting DMTF Security Protocol and Data Model (SPDM). It is written in Rust
and utilises libspdm.
To use SPDM-Utils you will need to do the following steps. Details are included
in the SPDM-Utils README.
1. `Build libspdm`_
2. `Build SPDM Utils`_
3. `Run it as a server`_
spdm-emu
--------
You can use `spdm emu`_ to model the
SPDM responder.
.. code-block:: shell
$ cd spdm-emu
$ git submodule init; git submodule update --recursive
$ mkdir build; cd build
$ cmake -DARCH=x64 -DTOOLCHAIN=GCC -DTARGET=Debug -DCRYPTO=openssl ..
$ make -j32
$ make copy_sample_key # Build certificates, required for SPDM authentication.
It is worth noting that the certificates should be in compliance with
PCIe r6.1 sec 6.31.3. This means you will need to add the following to
openssl.cnf
.. code-block::
subjectAltName = otherName:2.23.147;UTF8:Vendor=1b36:Device=0010:CC=010802:REV=02:SSVID=1af4:SSID=1100
2.23.147 = ASN1:OID:2.23.147
and then manually regenerate some certificates with:
.. code-block:: shell
$ openssl req -nodes -newkey ec:param.pem -keyout end_responder.key \
-out end_responder.req -sha384 -batch \
-subj "/CN=DMTF libspdm ECP384 responder cert"
$ openssl x509 -req -in end_responder.req -out end_responder.cert \
-CA inter.cert -CAkey inter.key -sha384 -days 3650 -set_serial 3 \
-extensions v3_end -extfile ../openssl.cnf
$ openssl asn1parse -in end_responder.cert -out end_responder.cert.der
$ cat ca.cert.der inter.cert.der end_responder.cert.der > bundle_responder.certchain.der
You can use SPDM-Utils instead as it will generate the correct certificates
automatically.
The responder can then be launched with
.. code-block:: shell
$ cd bin
$ ./spdm_responder_emu --trans PCI_DOE
Connecting an SPDM NVMe device
==============================
Once a SPDM server is running we can start QEMU and connect to the server.
For an NVMe device first let's setup a block we can use
.. code-block:: shell
$ cd qemu-spdm/linux/image
$ dd if=/dev/zero of=blknvme bs=1M count=2096 # 2GB NNMe Drive
Then you can add this to your QEMU command line:
.. code-block:: shell
-drive file=blknvme,if=none,id=mynvme,format=raw \
-device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
At which point QEMU will try to connect to the SPDM server.
Note that if using x64-64 you will want to use the q35 machine instead
of the default. So the entire QEMU command might look like this
.. code-block:: shell
qemu-system-x86_64 -M q35 \
--kernel bzImage \
-drive file=rootfs.ext2,if=virtio,format=raw \
-append "root=/dev/vda console=ttyS0" \
-net none -nographic \
-drive file=blknvme,if=none,id=mynvme,format=raw \
-device nvme,drive=mynvme,serial=deadbeef,spdm_port=2323
.. _DMTF:
https://www.dmtf.org/standards/SPDM
.. _SPDM Utils:
https://github.com/westerndigitalcorporation/spdm-utils
.. _spdm emu:
https://github.com/dmtf/spdm-emu
.. _Build libspdm:
https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-libspdm
.. _Build SPDM Utils:
https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-the-binary
.. _Run it as a server:
https://github.com/westerndigitalcorporation/spdm-utils#qemu-spdm-device-emulation

View File

@ -203,6 +203,7 @@
#include "sysemu/hostmem.h"
#include "hw/pci/msix.h"
#include "hw/pci/pcie_sriov.h"
#include "sysemu/spdm-socket.h"
#include "migration/vmstate.h"
#include "nvme.h"
@ -8113,6 +8114,27 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
return 0;
}
static bool pcie_doe_spdm_rsp(DOECap *doe_cap)
{
void *req = pcie_doe_get_write_mbox_ptr(doe_cap);
uint32_t req_len = pcie_doe_get_obj_len(req) * 4;
void *rsp = doe_cap->read_mbox;
uint32_t rsp_len = SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE;
uint32_t recvd = spdm_socket_rsp(doe_cap->spdm_socket,
SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE,
req, req_len, rsp, rsp_len);
doe_cap->read_mbox_len += DIV_ROUND_UP(recvd, 4);
return recvd != 0;
}
static DOEProtocol doe_spdm_prot[] = {
{ PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_CMA, pcie_doe_spdm_rsp },
{ PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_SECURED_CMA, pcie_doe_spdm_rsp },
{ }
};
static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
{
ERRP_GUARD();
@ -8200,6 +8222,25 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
pcie_cap_deverr_init(pci_dev);
/* DOE Initialisation */
if (pci_dev->spdm_port) {
uint16_t doe_offset = n->params.sriov_max_vfs ?
PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF
: PCI_CONFIG_SPACE_SIZE;
pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset,
doe_spdm_prot, true, 0);
pci_dev->doe_spdm.spdm_socket = spdm_socket_connect(pci_dev->spdm_port,
errp);
if (pci_dev->doe_spdm.spdm_socket < 0) {
return false;
}
}
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
}
@ -8446,6 +8487,11 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cmb.buf);
}
if (pci_dev->doe_spdm.spdm_socket > 0) {
spdm_socket_close(pci_dev->doe_spdm.spdm_socket,
SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE);
}
if (n->pmr.dev) {
host_memory_backend_set_mapped(n->pmr.dev, false);
}
@ -8491,6 +8537,7 @@ static Property nvme_props[] = {
DEFINE_PROP_BOOL("msix-exclusive-bar", NvmeCtrl, params.msix_exclusive_bar,
false),
DEFINE_PROP_UINT16("mqes", NvmeCtrl, params.mqes, 0x7ff),
DEFINE_PROP_UINT16("spdm_port", PCIDevice, spdm_port, 0),
DEFINE_PROP_END_OF_LIST(),
};
@ -8562,11 +8609,25 @@ static void nvme_pci_write_config(PCIDevice *dev, uint32_t address,
{
uint16_t old_num_vfs = pcie_sriov_num_vfs(dev);
if (pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
pcie_doe_write_config(&dev->doe_spdm, address, val, len);
}
pci_default_write_config(dev, address, val, len);
pcie_cap_flr_write_config(dev, address, val, len);
nvme_sriov_post_write_config(dev, old_num_vfs);
}
static uint32_t nvme_pci_read_config(PCIDevice *dev, uint32_t address, int len)
{
uint32_t val;
if (dev->spdm_port && pcie_find_capability(dev, PCI_EXT_CAP_ID_DOE)) {
if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) {
return val;
}
}
return pci_default_read_config(dev, address, len);
}
static const VMStateDescription nvme_vmstate = {
.name = "nvme",
.unmigratable = 1,
@ -8579,6 +8640,7 @@ static void nvme_class_init(ObjectClass *oc, void *data)
pc->realize = nvme_realize;
pc->config_write = nvme_pci_write_config;
pc->config_read = nvme_pci_read_config;
pc->exit = nvme_exit;
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
pc->revision = 2;

View File

@ -3,6 +3,7 @@
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_doe.h"
#define TYPE_PCI_DEVICE "pci-device"
typedef struct PCIDeviceClass PCIDeviceClass;
@ -159,6 +160,12 @@ struct PCIDevice {
MSIVectorReleaseNotifier msix_vector_release_notifier;
MSIVectorPollNotifier msix_vector_poll_notifier;
/* SPDM */
uint16_t spdm_port;
/* DOE */
DOECap doe_spdm;
/* ID of standby device in net_failover pair */
char *failover_pair_id;
uint32_t acpi_index;

View File

@ -108,6 +108,9 @@ struct DOECap {
/* Protocols and its callback response */
DOEProtocol *protocols;
uint16_t protocol_num;
/* Used for spdm-socket */
int spdm_socket;
};
void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset,