spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
/*
|
|
|
|
* QEMU PAPR Storage Class Memory Interfaces
|
|
|
|
*
|
|
|
|
* Copyright (c) 2019-2020, IBM Corporation.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
2022-02-18 10:34:14 +03:00
|
|
|
#include "qemu/cutils.h"
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "hw/ppc/spapr_drc.h"
|
|
|
|
#include "hw/ppc/spapr_nvdimm.h"
|
|
|
|
#include "hw/mem/nvdimm.h"
|
|
|
|
#include "qemu/nvdimm-utils.h"
|
|
|
|
#include "hw/ppc/fdt.h"
|
2020-02-10 07:56:42 +03:00
|
|
|
#include "qemu/range.h"
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
#include "hw/ppc/spapr_numa.h"
|
2022-02-18 10:34:14 +03:00
|
|
|
#include "block/thread-pool.h"
|
|
|
|
#include "migration/vmstate.h"
|
|
|
|
#include "qemu/pmem.h"
|
2022-02-18 10:34:14 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
|
2021-04-02 13:21:28 +03:00
|
|
|
/* DIMM health bitmap bitmap indicators. Taken from kernel's papr_scm.c */
|
|
|
|
/* SCM device is unable to persist memory contents */
|
|
|
|
#define PAPR_PMEM_UNARMED PPC_BIT(0)
|
|
|
|
|
2021-05-18 15:03:17 +03:00
|
|
|
/*
|
|
|
|
* The nvdimm size should be aligned to SCM block size.
|
|
|
|
* The SCM block size should be aligned to SPAPR_MEMORY_BLOCK_SIZE
|
|
|
|
* in order to have SCM regions not to overlap with dimm memory regions.
|
|
|
|
* The SCM devices can have variable block sizes. For now, fixing the
|
|
|
|
* block size to the minimum value.
|
|
|
|
*/
|
|
|
|
#define SPAPR_MINIMUM_SCM_BLOCK_SIZE SPAPR_MEMORY_BLOCK_SIZE
|
|
|
|
|
|
|
|
/* Have an explicit check for alignment */
|
|
|
|
QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE);
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
#define TYPE_SPAPR_NVDIMM "spapr-nvdimm"
|
|
|
|
OBJECT_DECLARE_TYPE(SpaprNVDIMMDevice, SPAPRNVDIMMClass, SPAPR_NVDIMM)
|
|
|
|
|
|
|
|
struct SPAPRNVDIMMClass {
|
|
|
|
/* private */
|
|
|
|
NVDIMMClass parent_class;
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
/* public */
|
|
|
|
void (*realize)(NVDIMMDevice *dimm, Error **errp);
|
|
|
|
void (*unrealize)(NVDIMMDevice *dimm, Error **errp);
|
2022-02-18 10:34:14 +03:00
|
|
|
};
|
|
|
|
|
2020-09-14 15:35:02 +03:00
|
|
|
bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm,
|
2020-08-26 00:57:48 +03:00
|
|
|
uint64_t size, Error **errp)
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
{
|
2020-08-26 00:57:48 +03:00
|
|
|
const MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
|
ppc/spapr_nvdimm: do not enable support with 'nvdimm=off'
The NVDIMM support for pSeries was introduced in 5.1, but it
didn't contemplate the 'nvdimm' machine option that other
archs uses. For every other arch, if no '-machine nvdimm(=on)'
is present, it is assumed that the NVDIMM support is disabled.
The user must explictly inform that the machine supports
NVDIMM. For pseries-5.1 the 'nvdimm' option is completely
ignored, and support is always assumed to exist. This
leads to situations where the user is able to set 'nvdimm=off'
but the guest boots up with the NVDIMMs anyway.
Fixing this now, after 5.1 launch, can put the overall NVDIMM
support for pseries in a strange place regarding this 'nvdimm'
machine option. If we force everything to be like other archs,
existing pseries-5.1 guests that didn't use 'nvdimm' to use NVDIMM
devices will break. If we attempt to make the newer pseries
machines (5.2+) behave like everyone else, but keep pseries-5.1
untouched, we'll have consistency problems on machine upgrade
(5.1 will have different default values for NVDIMM support than
5.2).
The common ground here is, if the user sets 'nvdimm=off', we
must comply regardless of being 5.1 or 5.2+. This patch
changes spapr_nvdimm_validate() to verify if the user set
NVDIMM support off in the machine options and, in that
case, error out if we have a NVDIMM device. The default
value for 5.2+ pseries machines will still be 'nvdimm=on'
when there is no 'nvdimm' option declared, just like it is today
with pseries-5.1. In the end we'll have different default
semantics from everyone else in the absence of the 'nvdimm'
machine option, but this boat has sailed.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1848887
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200825215749.213536-4-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-08-26 00:57:49 +03:00
|
|
|
const MachineState *ms = MACHINE(hotplug_dev);
|
2022-02-18 10:34:14 +03:00
|
|
|
PCDIMMDevice *dimm = PC_DIMM(nvdimm);
|
|
|
|
MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem);
|
2020-08-26 00:57:47 +03:00
|
|
|
g_autofree char *uuidstr = NULL;
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
QemuUUID uuid;
|
2020-02-27 16:42:49 +03:00
|
|
|
int ret;
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
|
2020-08-26 00:57:48 +03:00
|
|
|
if (!mc->nvdimm_supported) {
|
|
|
|
error_setg(errp, "NVDIMM hotplug not supported for this machine");
|
2020-09-14 15:35:02 +03:00
|
|
|
return false;
|
2020-08-26 00:57:48 +03:00
|
|
|
}
|
|
|
|
|
2020-12-08 19:46:06 +03:00
|
|
|
if (!ms->nvdimms_state->is_enabled) {
|
ppc/spapr_nvdimm: do not enable support with 'nvdimm=off'
The NVDIMM support for pSeries was introduced in 5.1, but it
didn't contemplate the 'nvdimm' machine option that other
archs uses. For every other arch, if no '-machine nvdimm(=on)'
is present, it is assumed that the NVDIMM support is disabled.
The user must explictly inform that the machine supports
NVDIMM. For pseries-5.1 the 'nvdimm' option is completely
ignored, and support is always assumed to exist. This
leads to situations where the user is able to set 'nvdimm=off'
but the guest boots up with the NVDIMMs anyway.
Fixing this now, after 5.1 launch, can put the overall NVDIMM
support for pseries in a strange place regarding this 'nvdimm'
machine option. If we force everything to be like other archs,
existing pseries-5.1 guests that didn't use 'nvdimm' to use NVDIMM
devices will break. If we attempt to make the newer pseries
machines (5.2+) behave like everyone else, but keep pseries-5.1
untouched, we'll have consistency problems on machine upgrade
(5.1 will have different default values for NVDIMM support than
5.2).
The common ground here is, if the user sets 'nvdimm=off', we
must comply regardless of being 5.1 or 5.2+. This patch
changes spapr_nvdimm_validate() to verify if the user set
NVDIMM support off in the machine options and, in that
case, error out if we have a NVDIMM device. The default
value for 5.2+ pseries machines will still be 'nvdimm=on'
when there is no 'nvdimm' option declared, just like it is today
with pseries-5.1. In the end we'll have different default
semantics from everyone else in the absence of the 'nvdimm'
machine option, but this boat has sailed.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1848887
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200825215749.213536-4-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-08-26 00:57:49 +03:00
|
|
|
error_setg(errp, "nvdimm device found but 'nvdimm=off' was set");
|
2020-09-14 15:35:02 +03:00
|
|
|
return false;
|
ppc/spapr_nvdimm: do not enable support with 'nvdimm=off'
The NVDIMM support for pSeries was introduced in 5.1, but it
didn't contemplate the 'nvdimm' machine option that other
archs uses. For every other arch, if no '-machine nvdimm(=on)'
is present, it is assumed that the NVDIMM support is disabled.
The user must explictly inform that the machine supports
NVDIMM. For pseries-5.1 the 'nvdimm' option is completely
ignored, and support is always assumed to exist. This
leads to situations where the user is able to set 'nvdimm=off'
but the guest boots up with the NVDIMMs anyway.
Fixing this now, after 5.1 launch, can put the overall NVDIMM
support for pseries in a strange place regarding this 'nvdimm'
machine option. If we force everything to be like other archs,
existing pseries-5.1 guests that didn't use 'nvdimm' to use NVDIMM
devices will break. If we attempt to make the newer pseries
machines (5.2+) behave like everyone else, but keep pseries-5.1
untouched, we'll have consistency problems on machine upgrade
(5.1 will have different default values for NVDIMM support than
5.2).
The common ground here is, if the user sets 'nvdimm=off', we
must comply regardless of being 5.1 or 5.2+. This patch
changes spapr_nvdimm_validate() to verify if the user set
NVDIMM support off in the machine options and, in that
case, error out if we have a NVDIMM device. The default
value for 5.2+ pseries machines will still be 'nvdimm=on'
when there is no 'nvdimm' option declared, just like it is today
with pseries-5.1. In the end we'll have different default
semantics from everyone else in the absence of the 'nvdimm'
machine option, but this boat has sailed.
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1848887
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200825215749.213536-4-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-08-26 00:57:49 +03:00
|
|
|
}
|
|
|
|
|
2020-04-13 23:36:28 +03:00
|
|
|
if (object_property_get_int(OBJECT(nvdimm), NVDIMM_LABEL_SIZE_PROP,
|
|
|
|
&error_abort) == 0) {
|
2020-04-24 04:56:17 +03:00
|
|
|
error_setg(errp, "PAPR requires NVDIMM devices to have label-size set");
|
2020-09-14 15:35:02 +03:00
|
|
|
return false;
|
2020-04-13 23:36:28 +03:00
|
|
|
}
|
|
|
|
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
if (size % SPAPR_MINIMUM_SCM_BLOCK_SIZE) {
|
2020-04-24 04:56:17 +03:00
|
|
|
error_setg(errp, "PAPR requires NVDIMM memory size (excluding label)"
|
|
|
|
" to be a multiple of %" PRIu64 "MB",
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
SPAPR_MINIMUM_SCM_BLOCK_SIZE / MiB);
|
2020-09-14 15:35:02 +03:00
|
|
|
return false;
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
}
|
|
|
|
|
2020-02-27 16:42:49 +03:00
|
|
|
uuidstr = object_property_get_str(OBJECT(nvdimm), NVDIMM_UUID_PROP,
|
|
|
|
&error_abort);
|
|
|
|
ret = qemu_uuid_parse(uuidstr, &uuid);
|
|
|
|
g_assert(!ret);
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
|
|
|
|
if (qemu_uuid_is_null(&uuid)) {
|
|
|
|
error_setg(errp, "NVDIMM device requires the uuid to be set");
|
2020-09-14 15:35:02 +03:00
|
|
|
return false;
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
}
|
2020-09-14 15:35:02 +03:00
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM) &&
|
|
|
|
(memory_region_get_fd(mr) < 0)) {
|
|
|
|
error_setg(errp, "spapr-nvdimm device requires the "
|
|
|
|
"memdev %s to be of memory-backend-file type",
|
|
|
|
object_get_canonical_path_component(OBJECT(dimm->hostmem)));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-14 15:35:02 +03:00
|
|
|
return true;
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-11-21 02:42:01 +03:00
|
|
|
void spapr_add_nvdimm(DeviceState *dev, uint64_t slot)
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
{
|
|
|
|
SpaprDrc *drc;
|
|
|
|
bool hotplugged = spapr_drc_hotplugged(dev);
|
|
|
|
|
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot);
|
|
|
|
g_assert(drc);
|
|
|
|
|
2020-11-21 02:42:01 +03:00
|
|
|
/*
|
|
|
|
* pc_dimm_get_free_slot() provided a free slot at pre-plug. The
|
|
|
|
* corresponding DRC is thus assumed to be attachable.
|
|
|
|
*/
|
2020-12-01 14:37:28 +03:00
|
|
|
spapr_drc_attach(drc, dev);
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
|
|
|
|
if (hotplugged) {
|
|
|
|
spapr_hotplug_req_add_by_index(drc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt,
|
|
|
|
int parent_offset, NVDIMMDevice *nvdimm)
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
{
|
|
|
|
int child_offset;
|
|
|
|
char *buf;
|
|
|
|
SpaprDrc *drc;
|
|
|
|
uint32_t drc_idx;
|
|
|
|
uint32_t node = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_NODE_PROP,
|
|
|
|
&error_abort);
|
|
|
|
uint64_t slot = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_SLOT_PROP,
|
|
|
|
&error_abort);
|
|
|
|
uint64_t lsize = nvdimm->label_size;
|
|
|
|
uint64_t size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot);
|
|
|
|
g_assert(drc);
|
|
|
|
|
|
|
|
drc_idx = spapr_drc_index(drc);
|
|
|
|
|
|
|
|
buf = g_strdup_printf("ibm,pmemory@%x", drc_idx);
|
|
|
|
child_offset = fdt_add_subnode(fdt, parent_offset, buf);
|
|
|
|
g_free(buf);
|
|
|
|
|
|
|
|
_FDT(child_offset);
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, child_offset, "reg", drc_idx)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, child_offset, "compatible", "ibm,pmemory")));
|
|
|
|
_FDT((fdt_setprop_string(fdt, child_offset, "device_type", "ibm,pmemory")));
|
|
|
|
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
spapr_numa_write_associativity_dt(spapr, fdt, child_offset, node);
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
|
|
|
|
buf = qemu_uuid_unparse_strdup(&nvdimm->uuid);
|
|
|
|
_FDT((fdt_setprop_string(fdt, child_offset, "ibm,unit-guid", buf)));
|
|
|
|
g_free(buf);
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_cell(fdt, child_offset, "ibm,my-drc-index", drc_idx)));
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_u64(fdt, child_offset, "ibm,block-size",
|
|
|
|
SPAPR_MINIMUM_SCM_BLOCK_SIZE)));
|
|
|
|
_FDT((fdt_setprop_u64(fdt, child_offset, "ibm,number-of-blocks",
|
|
|
|
size / SPAPR_MINIMUM_SCM_BLOCK_SIZE)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, child_offset, "ibm,metadata-size", lsize)));
|
|
|
|
|
|
|
|
_FDT((fdt_setprop_string(fdt, child_offset, "ibm,pmem-application",
|
|
|
|
"operating-system")));
|
|
|
|
_FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0));
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) {
|
|
|
|
bool is_pmem = false, pmem_override = false;
|
|
|
|
PCDIMMDevice *dimm = PC_DIMM(nvdimm);
|
|
|
|
HostMemoryBackend *hostmem = dimm->hostmem;
|
|
|
|
|
|
|
|
is_pmem = object_property_get_bool(OBJECT(hostmem), "pmem", NULL);
|
|
|
|
pmem_override = object_property_get_bool(OBJECT(nvdimm),
|
|
|
|
"pmem-override", NULL);
|
|
|
|
if (!is_pmem || pmem_override) {
|
|
|
|
_FDT(fdt_setprop(fdt, child_offset, "ibm,hcall-flush-required",
|
|
|
|
NULL, 0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
return child_offset;
|
|
|
|
}
|
|
|
|
|
2020-09-01 15:56:40 +03:00
|
|
|
int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,
|
|
|
|
void *fdt, int *fdt_start_offset, Error **errp)
|
|
|
|
{
|
|
|
|
NVDIMMDevice *nvdimm = NVDIMM(drc->dev);
|
|
|
|
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
*fdt_start_offset = spapr_dt_nvdimm(spapr, fdt, 0, nvdimm);
|
2020-09-01 15:56:40 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt)
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
{
|
2021-05-26 18:27:15 +03:00
|
|
|
int offset = fdt_subnode_offset(fdt, 0, "ibm,persistent-memory");
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
GSList *iter, *nvdimms = nvdimm_get_device_list();
|
|
|
|
|
|
|
|
if (offset < 0) {
|
2021-05-26 18:27:15 +03:00
|
|
|
offset = fdt_add_subnode(fdt, 0, "ibm,persistent-memory");
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
_FDT(offset);
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0x1)));
|
|
|
|
_FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0x0)));
|
|
|
|
_FDT((fdt_setprop_string(fdt, offset, "device_type",
|
|
|
|
"ibm,persistent-memory")));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create DT entries for cold plugged NVDIMM devices */
|
|
|
|
for (iter = nvdimms; iter; iter = iter->next) {
|
|
|
|
NVDIMMDevice *nvdimm = iter->data;
|
|
|
|
|
spapr: introduce SpaprMachineState::numa_assoc_array
The next step to centralize all NUMA/associativity handling in
the spapr machine is to create a 'one stop place' for all
things ibm,associativity.
This patch introduces numa_assoc_array, a 2 dimensional array
that will store all ibm,associativity arrays of all NUMA nodes.
This array is initialized in a new spapr_numa_associativity_init()
function, called in spapr_machine_init(). It is being initialized
with the same values used in other ibm,associativity properties
around spapr files (i.e. all zeros, last value is node_id).
The idea is to remove all hardcoded definitions and FDT writes
of ibm,associativity arrays, doing instead a call to the new
helper spapr_numa_write_associativity_dt() helper, that will
be able to write the DT with the correct values.
We'll start small, handling the trivial cases first. The
remaining instances of ibm,associativity will be handled
next.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-2-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-09-04 01:06:33 +03:00
|
|
|
spapr_dt_nvdimm(spapr, fdt, offset, nvdimm);
|
spapr: Add NVDIMM device support
Add support for NVDIMM devices for sPAPR. Piggyback on existing nvdimm
device interface in QEMU to support virtual NVDIMM devices for Power.
Create the required DT entries for the device (some entries have
dummy values right now).
The patch creates the required DT node and sends a hotplug
interrupt to the guest. Guest is expected to undertake the normal
DR resource add path in response and start issuing PAPR SCM hcalls.
The device support is verified based on the machine version unlike x86.
This is how it can be used ..
Ex :
For coldplug, the device to be added in qemu command line as shown below
-object memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
-device nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
For hotplug, the device to be added from monitor as below
object_add memory-backend-file,id=memnvdimm0,prealloc=yes,mem-path=/tmp/nvdimm0,share=yes,size=1073872896
device_add nvdimm,label-size=128k,uuid=75a3cdd7-6a2f-4791-8d15-fe0a920e8e9e,memdev=memnvdimm0,id=nvdimm0,slot=0
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
[Early implementation]
Message-Id: <158131058078.2897.12767731856697459923.stgit@lep8c.aus.stglabs.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-02-10 07:56:31 +03:00
|
|
|
}
|
|
|
|
g_slist_free(nvdimms);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2020-02-10 07:56:42 +03:00
|
|
|
|
|
|
|
static target_ulong h_scm_read_metadata(PowerPCCPU *cpu,
|
|
|
|
SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode,
|
|
|
|
target_ulong *args)
|
|
|
|
{
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
uint64_t offset = args[1];
|
|
|
|
uint64_t len = args[2];
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
NVDIMMClass *ddc;
|
|
|
|
uint64_t data = 0;
|
|
|
|
uint8_t buf[8] = { 0 };
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len != 1 && len != 2 &&
|
|
|
|
len != 4 && len != 8) {
|
|
|
|
return H_P3;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
if ((offset + len < offset) ||
|
|
|
|
(nvdimm->label_size < len + offset)) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
ddc = NVDIMM_GET_CLASS(nvdimm);
|
|
|
|
ddc->read_label_data(nvdimm, buf, len, offset);
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
data = ldub_p(buf);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
data = lduw_be_p(buf);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
data = ldl_be_p(buf);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
data = ldq_be_p(buf);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
args[0] = data;
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static target_ulong h_scm_write_metadata(PowerPCCPU *cpu,
|
|
|
|
SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode,
|
|
|
|
target_ulong *args)
|
|
|
|
{
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
uint64_t offset = args[1];
|
|
|
|
uint64_t data = args[2];
|
|
|
|
uint64_t len = args[3];
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
NVDIMMClass *ddc;
|
|
|
|
uint8_t buf[8] = { 0 };
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len != 1 && len != 2 &&
|
|
|
|
len != 4 && len != 8) {
|
|
|
|
return H_P4;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
if ((offset + len < offset) ||
|
nvdimm: Reject writing label data to ROM instead of crashing QEMU
Currently, when using a true R/O NVDIMM (ROM memory backend) with a label
area, the VM can easily crash QEMU by trying to write to the label area,
because the ROM memory is mmap'ed without PROT_WRITE.
[root@vm-0 ~]# ndctl disable-region region0
disabled 1 region
[root@vm-0 ~]# ndctl zero-labels nmem0
-> QEMU segfaults
Let's remember whether we have a ROM memory backend and properly
reject the write request:
[root@vm-0 ~]# ndctl disable-region region0
disabled 1 region
[root@vm-0 ~]# ndctl zero-labels nmem0
zeroed 0 nmem
In comparison, on a system with a R/W NVDIMM:
[root@vm-0 ~]# ndctl disable-region region0
disabled 1 region
[root@vm-0 ~]# ndctl zero-labels nmem0
zeroed 1 nmem
For ACPI, just return "unsupported", like if no label exists. For spapr,
return "H_P2", similar to when no label area exists.
Could we rely on the "unarmed" property? Maybe, but it looks cleaner to
only disallow what certainly cannot work.
After all "unarmed=on" primarily means: cannot accept persistent writes. In
theory, there might be setups where devices with "unarmed=on" set could
be used to host non-persistent data (temporary files, system RAM, ...); for
example, in Linux, admins can overwrite the "readonly" setting and still
write to the device -- which will work as long as we're not using ROM.
Allowing writing label data in such configurations can make sense.
Message-ID: <20230906120503.359863-2-david@redhat.com>
Fixes: dbd730e85987 ("nvdimm: check -object memory-backend-file, readonly=on option")
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-06 15:04:53 +03:00
|
|
|
(nvdimm->label_size < len + offset) ||
|
|
|
|
nvdimm->readonly) {
|
2020-02-10 07:56:42 +03:00
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
if (data & 0xffffffffffffff00) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
stb_p(buf, data);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (data & 0xffffffffffff0000) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
stw_be_p(buf, data);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
if (data & 0xffffffff00000000) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
stl_be_p(buf, data);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
stq_be_p(buf, data);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
ddc = NVDIMM_GET_CLASS(nvdimm);
|
|
|
|
ddc->write_label_data(nvdimm, buf, len, offset);
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode, target_ulong *args)
|
|
|
|
{
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
uint64_t starting_idx = args[1];
|
|
|
|
uint64_t no_of_scm_blocks_to_bind = args[2];
|
|
|
|
uint64_t target_logical_mem_addr = args[3];
|
|
|
|
uint64_t continue_token = args[4];
|
|
|
|
uint64_t size;
|
|
|
|
uint64_t total_no_of_scm_blocks;
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
hwaddr addr;
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently continue token should be zero qemu has already bound
|
2023-07-14 14:18:16 +03:00
|
|
|
* everything and this hcall doesn't return H_BUSY.
|
2020-02-10 07:56:42 +03:00
|
|
|
*/
|
|
|
|
if (continue_token > 0) {
|
|
|
|
return H_P5;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Currently qemu assigns the address. */
|
|
|
|
if (target_logical_mem_addr != 0xffffffffffffffff) {
|
|
|
|
return H_OVERLAP;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
|
|
|
|
size = object_property_get_uint(OBJECT(nvdimm),
|
|
|
|
PC_DIMM_SIZE_PROP, &error_abort);
|
|
|
|
|
|
|
|
total_no_of_scm_blocks = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
|
|
|
|
|
|
|
|
if (starting_idx > total_no_of_scm_blocks) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (((starting_idx + no_of_scm_blocks_to_bind) < starting_idx) ||
|
|
|
|
((starting_idx + no_of_scm_blocks_to_bind) > total_no_of_scm_blocks)) {
|
|
|
|
return H_P3;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = object_property_get_uint(OBJECT(nvdimm),
|
|
|
|
PC_DIMM_ADDR_PROP, &error_abort);
|
|
|
|
|
|
|
|
addr += starting_idx * SPAPR_MINIMUM_SCM_BLOCK_SIZE;
|
|
|
|
|
|
|
|
/* Already bound, Return target logical address in R5 */
|
|
|
|
args[1] = addr;
|
|
|
|
args[2] = no_of_scm_blocks_to_bind;
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
typedef struct SpaprNVDIMMDeviceFlushState {
|
|
|
|
uint64_t continue_token;
|
|
|
|
int64_t hcall_ret;
|
|
|
|
uint32_t drcidx;
|
|
|
|
|
|
|
|
QLIST_ENTRY(SpaprNVDIMMDeviceFlushState) node;
|
|
|
|
} SpaprNVDIMMDeviceFlushState;
|
|
|
|
|
|
|
|
typedef struct SpaprNVDIMMDevice SpaprNVDIMMDevice;
|
|
|
|
struct SpaprNVDIMMDevice {
|
2022-02-18 10:34:14 +03:00
|
|
|
/* private */
|
2022-02-18 10:34:14 +03:00
|
|
|
NVDIMMDevice parent_obj;
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
bool hcall_flush_required;
|
2022-02-18 10:34:14 +03:00
|
|
|
uint64_t nvdimm_flush_token;
|
|
|
|
QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) pending_nvdimm_flush_states;
|
|
|
|
QLIST_HEAD(, SpaprNVDIMMDeviceFlushState) completed_nvdimm_flush_states;
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
/* public */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The 'on' value for this property forced the qemu to enable the hcall
|
|
|
|
* flush for the nvdimm device even if the backend is a pmem
|
|
|
|
*/
|
|
|
|
bool pmem_override;
|
2022-02-18 10:34:14 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static int flush_worker_cb(void *opaque)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDeviceFlushState *state = opaque;
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(state->drcidx);
|
2022-04-09 23:08:56 +03:00
|
|
|
PCDIMMDevice *dimm;
|
|
|
|
HostMemoryBackend *backend;
|
|
|
|
int backend_fd;
|
|
|
|
|
|
|
|
g_assert(drc != NULL);
|
|
|
|
|
|
|
|
dimm = PC_DIMM(drc->dev);
|
|
|
|
backend = MEMORY_BACKEND(dimm->hostmem);
|
|
|
|
backend_fd = memory_region_get_fd(&backend->mr);
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
if (object_property_get_bool(OBJECT(backend), "pmem", NULL)) {
|
|
|
|
MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem);
|
|
|
|
void *ptr = memory_region_get_ram_ptr(mr);
|
|
|
|
size_t size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
/* flush pmem backend */
|
|
|
|
pmem_persist(ptr, size);
|
|
|
|
} else {
|
|
|
|
/* flush raw backing image */
|
|
|
|
if (qemu_fdatasync(backend_fd) < 0) {
|
|
|
|
error_report("papr_scm: Could not sync nvdimm to backend file: %s",
|
|
|
|
strerror(errno));
|
|
|
|
return H_HARDWARE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_nvdimm_flush_completion_cb(void *opaque, int hcall_ret)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDeviceFlushState *state = opaque;
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(state->drcidx);
|
2022-04-09 23:08:56 +03:00
|
|
|
SpaprNVDIMMDevice *s_nvdimm;
|
|
|
|
|
|
|
|
g_assert(drc != NULL);
|
|
|
|
|
|
|
|
s_nvdimm = SPAPR_NVDIMM(drc->dev);
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
state->hcall_ret = hcall_ret;
|
|
|
|
QLIST_REMOVE(state, node);
|
|
|
|
QLIST_INSERT_HEAD(&s_nvdimm->completed_nvdimm_flush_states, state, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque;
|
|
|
|
SpaprNVDIMMDeviceFlushState *state;
|
2022-02-18 10:34:14 +03:00
|
|
|
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem);
|
|
|
|
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
|
|
|
bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm),
|
|
|
|
"pmem-override", NULL);
|
|
|
|
bool dest_hcall_flush_required = pmem_override || !is_pmem;
|
|
|
|
|
|
|
|
if (!s_nvdimm->hcall_flush_required && dest_hcall_flush_required) {
|
|
|
|
error_report("The file backend for the spapr-nvdimm device %s at "
|
|
|
|
"source is a pmem, use pmem=on and pmem-override=off to "
|
|
|
|
"continue.", DEVICE(s_nvdimm)->id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (s_nvdimm->hcall_flush_required && !dest_hcall_flush_required) {
|
|
|
|
error_report("The guest expects hcall-flush support for the "
|
|
|
|
"spapr-nvdimm device %s, use pmem_override=on to "
|
|
|
|
"continue.", DEVICE(s_nvdimm)->id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
|
2023-02-03 16:17:31 +03:00
|
|
|
thread_pool_submit_aio(flush_worker_cb, state,
|
2022-02-18 10:34:14 +03:00
|
|
|
spapr_nvdimm_flush_completion_cb, state);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_spapr_nvdimm_flush_state = {
|
|
|
|
.name = "spapr_nvdimm_flush_state",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2023-12-21 06:16:28 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2022-02-18 10:34:14 +03:00
|
|
|
VMSTATE_UINT64(continue_token, SpaprNVDIMMDeviceFlushState),
|
|
|
|
VMSTATE_INT64(hcall_ret, SpaprNVDIMMDeviceFlushState),
|
|
|
|
VMSTATE_UINT32(drcidx, SpaprNVDIMMDeviceFlushState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
const VMStateDescription vmstate_spapr_nvdimm_states = {
|
|
|
|
.name = "spapr_nvdimm_states",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.post_load = spapr_nvdimm_flush_post_load,
|
2023-12-21 06:16:28 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2022-02-18 10:34:14 +03:00
|
|
|
VMSTATE_BOOL(hcall_flush_required, SpaprNVDIMMDevice),
|
2022-02-18 10:34:14 +03:00
|
|
|
VMSTATE_UINT64(nvdimm_flush_token, SpaprNVDIMMDevice),
|
|
|
|
VMSTATE_QLIST_V(completed_nvdimm_flush_states, SpaprNVDIMMDevice, 1,
|
|
|
|
vmstate_spapr_nvdimm_flush_state,
|
|
|
|
SpaprNVDIMMDeviceFlushState, node),
|
|
|
|
VMSTATE_QLIST_V(pending_nvdimm_flush_states, SpaprNVDIMMDevice, 1,
|
|
|
|
vmstate_spapr_nvdimm_flush_state,
|
|
|
|
SpaprNVDIMMDeviceFlushState, node),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign a token and reserve it for the new flush state.
|
|
|
|
*/
|
|
|
|
static SpaprNVDIMMDeviceFlushState *spapr_nvdimm_init_new_flush_state(
|
|
|
|
SpaprNVDIMMDevice *spapr_nvdimm)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDeviceFlushState *state;
|
|
|
|
|
|
|
|
state = g_malloc0(sizeof(*state));
|
|
|
|
|
|
|
|
spapr_nvdimm->nvdimm_flush_token++;
|
|
|
|
/* Token zero is presumed as no job pending. Assert on overflow to zero */
|
|
|
|
g_assert(spapr_nvdimm->nvdimm_flush_token != 0);
|
|
|
|
|
|
|
|
state->continue_token = spapr_nvdimm->nvdimm_flush_token;
|
|
|
|
|
|
|
|
QLIST_INSERT_HEAD(&spapr_nvdimm->pending_nvdimm_flush_states, state, node);
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* spapr_nvdimm_finish_flushes
|
|
|
|
* Waits for all pending flush requests to complete
|
|
|
|
* their execution and free the states
|
|
|
|
*/
|
|
|
|
void spapr_nvdimm_finish_flushes(void)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDeviceFlushState *state, *next;
|
|
|
|
GSList *list, *nvdimms;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called on reset path, the main loop thread which calls
|
|
|
|
* the pending BHs has gotten out running in the reset path,
|
|
|
|
* finally reaching here. Other code path being guest
|
2023-07-14 14:18:16 +03:00
|
|
|
* h_client_architecture_support, that's early boot up.
|
2022-02-18 10:34:14 +03:00
|
|
|
*/
|
|
|
|
nvdimms = nvdimm_get_device_list();
|
|
|
|
for (list = nvdimms; list; list = list->next) {
|
|
|
|
NVDIMMDevice *nvdimm = list->data;
|
|
|
|
if (object_dynamic_cast(OBJECT(nvdimm), TYPE_SPAPR_NVDIMM)) {
|
|
|
|
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(nvdimm);
|
|
|
|
while (!QLIST_EMPTY(&s_nvdimm->pending_nvdimm_flush_states)) {
|
|
|
|
aio_poll(qemu_get_aio_context(), true);
|
|
|
|
}
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states,
|
|
|
|
node, next) {
|
|
|
|
QLIST_REMOVE(state, node);
|
|
|
|
g_free(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_slist_free(nvdimms);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* spapr_nvdimm_get_flush_status
|
|
|
|
* Fetches the status of the hcall worker and returns
|
|
|
|
* H_LONG_BUSY_ORDER_10_MSEC if the worker is still running.
|
|
|
|
*/
|
|
|
|
static int spapr_nvdimm_get_flush_status(SpaprNVDIMMDevice *s_nvdimm,
|
|
|
|
uint64_t token)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDeviceFlushState *state, *node;
|
|
|
|
|
|
|
|
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
|
|
|
|
if (state->continue_token == token) {
|
|
|
|
return H_LONG_BUSY_ORDER_10_MSEC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(state, &s_nvdimm->completed_nvdimm_flush_states,
|
|
|
|
node, node) {
|
|
|
|
if (state->continue_token == token) {
|
|
|
|
int ret = state->hcall_ret;
|
|
|
|
QLIST_REMOVE(state, node);
|
|
|
|
g_free(state);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If not found in complete list too, invalid token */
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* H_SCM_FLUSH
|
|
|
|
* Input: drc_index, continue-token
|
|
|
|
* Out: continue-token
|
|
|
|
* Return Value: H_SUCCESS, H_Parameter, H_P2, H_LONG_BUSY_ORDER_10_MSEC,
|
|
|
|
* H_UNSUPPORTED
|
|
|
|
*
|
|
|
|
* Given a DRC Index Flush the data to backend NVDIMM device. The hcall returns
|
|
|
|
* H_LONG_BUSY_ORDER_10_MSEC when the flush takes longer time and the hcall
|
|
|
|
* needs to be issued multiple times in order to be completely serviced. The
|
|
|
|
* continue-token from the output to be passed in the argument list of
|
|
|
|
* subsequent hcalls until the hcall is completely serviced at which point
|
|
|
|
* H_SUCCESS or other error is returned.
|
|
|
|
*/
|
|
|
|
static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode, target_ulong *args)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
uint64_t continue_token = args[1];
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
PCDIMMDevice *dimm;
|
|
|
|
HostMemoryBackend *backend = NULL;
|
|
|
|
SpaprNVDIMMDeviceFlushState *state;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
dimm = PC_DIMM(drc->dev);
|
2022-02-18 10:34:14 +03:00
|
|
|
if (!object_dynamic_cast(OBJECT(dimm), TYPE_SPAPR_NVDIMM)) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
2022-02-18 10:34:14 +03:00
|
|
|
if (continue_token == 0) {
|
2022-02-18 10:34:14 +03:00
|
|
|
bool is_pmem = false, pmem_override = false;
|
2022-02-18 10:34:14 +03:00
|
|
|
backend = MEMORY_BACKEND(dimm->hostmem);
|
|
|
|
fd = memory_region_get_fd(&backend->mr);
|
|
|
|
|
|
|
|
if (fd < 0) {
|
|
|
|
return H_UNSUPPORTED;
|
|
|
|
}
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
|
|
|
pmem_override = object_property_get_bool(OBJECT(dimm),
|
|
|
|
"pmem-override", NULL);
|
|
|
|
if (is_pmem && !pmem_override) {
|
|
|
|
return H_UNSUPPORTED;
|
|
|
|
}
|
|
|
|
|
2022-02-18 10:34:14 +03:00
|
|
|
state = spapr_nvdimm_init_new_flush_state(SPAPR_NVDIMM(dimm));
|
|
|
|
if (!state) {
|
|
|
|
return H_HARDWARE;
|
|
|
|
}
|
|
|
|
|
|
|
|
state->drcidx = drc_index;
|
|
|
|
|
2023-02-03 16:17:31 +03:00
|
|
|
thread_pool_submit_aio(flush_worker_cb, state,
|
2022-02-18 10:34:14 +03:00
|
|
|
spapr_nvdimm_flush_completion_cb, state);
|
|
|
|
|
|
|
|
continue_token = state->continue_token;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = spapr_nvdimm_get_flush_status(SPAPR_NVDIMM(dimm), continue_token);
|
|
|
|
if (H_IS_LONG_BUSY(ret)) {
|
|
|
|
args[0] = continue_token;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-10 07:56:42 +03:00
|
|
|
static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode, target_ulong *args)
|
|
|
|
{
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
uint64_t starting_scm_logical_addr = args[1];
|
|
|
|
uint64_t no_of_scm_blocks_to_unbind = args[2];
|
|
|
|
uint64_t continue_token = args[3];
|
|
|
|
uint64_t size_to_unbind;
|
|
|
|
Range blockrange = range_empty;
|
|
|
|
Range nvdimmrange = range_empty;
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
uint64_t size, addr;
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* continue_token should be zero as this hcall doesn't return H_BUSY. */
|
|
|
|
if (continue_token > 0) {
|
|
|
|
return H_P4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if starting_scm_logical_addr is block aligned */
|
|
|
|
if (!QEMU_IS_ALIGNED(starting_scm_logical_addr,
|
|
|
|
SPAPR_MINIMUM_SCM_BLOCK_SIZE)) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_to_unbind = no_of_scm_blocks_to_unbind * SPAPR_MINIMUM_SCM_BLOCK_SIZE;
|
|
|
|
if (no_of_scm_blocks_to_unbind == 0 || no_of_scm_blocks_to_unbind !=
|
|
|
|
size_to_unbind / SPAPR_MINIMUM_SCM_BLOCK_SIZE) {
|
|
|
|
return H_P3;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
|
|
|
|
&error_abort);
|
|
|
|
addr = object_property_get_int(OBJECT(nvdimm), PC_DIMM_ADDR_PROP,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
range_init_nofail(&nvdimmrange, addr, size);
|
|
|
|
range_init_nofail(&blockrange, starting_scm_logical_addr, size_to_unbind);
|
|
|
|
|
|
|
|
if (!range_contains_range(&nvdimmrange, &blockrange)) {
|
|
|
|
return H_P3;
|
|
|
|
}
|
|
|
|
|
|
|
|
args[1] = no_of_scm_blocks_to_unbind;
|
|
|
|
|
|
|
|
/* let unplug take care of actual unbind */
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define H_UNBIND_SCOPE_ALL 0x1
|
|
|
|
#define H_UNBIND_SCOPE_DRC 0x2
|
|
|
|
|
|
|
|
static target_ulong h_scm_unbind_all(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode, target_ulong *args)
|
|
|
|
{
|
|
|
|
uint64_t target_scope = args[0];
|
|
|
|
uint32_t drc_index = args[1];
|
|
|
|
uint64_t continue_token = args[2];
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
uint64_t size;
|
|
|
|
uint64_t no_of_scm_blocks_unbound = 0;
|
|
|
|
|
|
|
|
/* continue_token should be zero as this hcall doesn't return H_BUSY. */
|
|
|
|
if (continue_token > 0) {
|
|
|
|
return H_P4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (target_scope == H_UNBIND_SCOPE_DRC) {
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_P2;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
no_of_scm_blocks_unbound = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
|
|
|
|
} else if (target_scope == H_UNBIND_SCOPE_ALL) {
|
|
|
|
GSList *list, *nvdimms;
|
|
|
|
|
|
|
|
nvdimms = nvdimm_get_device_list();
|
|
|
|
for (list = nvdimms; list; list = list->next) {
|
|
|
|
nvdimm = list->data;
|
|
|
|
size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP,
|
|
|
|
&error_abort);
|
|
|
|
|
|
|
|
no_of_scm_blocks_unbound += size / SPAPR_MINIMUM_SCM_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
g_slist_free(nvdimms);
|
|
|
|
} else {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
args[1] = no_of_scm_blocks_unbound;
|
|
|
|
|
|
|
|
/* let unplug take care of actual unbind */
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-04-02 13:21:28 +03:00
|
|
|
static target_ulong h_scm_health(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
|
|
|
target_ulong opcode, target_ulong *args)
|
|
|
|
{
|
|
|
|
|
|
|
|
NVDIMMDevice *nvdimm;
|
|
|
|
uint64_t hbitmap = 0;
|
|
|
|
uint32_t drc_index = args[0];
|
|
|
|
SpaprDrc *drc = spapr_drc_by_index(drc_index);
|
|
|
|
const uint64_t hbitmap_mask = PAPR_PMEM_UNARMED;
|
|
|
|
|
|
|
|
|
|
|
|
/* Ensure that the drc is valid & is valid PMEM dimm and is plugged in */
|
|
|
|
if (!drc || !drc->dev ||
|
|
|
|
spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) {
|
|
|
|
return H_PARAMETER;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvdimm = NVDIMM(drc->dev);
|
|
|
|
|
|
|
|
/* Update if the nvdimm is unarmed and send its status via health bitmaps */
|
|
|
|
if (object_property_get_bool(OBJECT(nvdimm), NVDIMM_UNARMED_PROP, NULL)) {
|
|
|
|
hbitmap |= PAPR_PMEM_UNARMED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the out args with health bitmap/mask */
|
|
|
|
args[0] = hbitmap;
|
|
|
|
args[1] = hbitmap_mask;
|
|
|
|
|
|
|
|
return H_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-02-10 07:56:42 +03:00
|
|
|
static void spapr_scm_register_types(void)
|
|
|
|
{
|
|
|
|
/* qemu/scm specific hcalls */
|
|
|
|
spapr_register_hypercall(H_SCM_READ_METADATA, h_scm_read_metadata);
|
|
|
|
spapr_register_hypercall(H_SCM_WRITE_METADATA, h_scm_write_metadata);
|
|
|
|
spapr_register_hypercall(H_SCM_BIND_MEM, h_scm_bind_mem);
|
|
|
|
spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem);
|
|
|
|
spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all);
|
2021-04-02 13:21:28 +03:00
|
|
|
spapr_register_hypercall(H_SCM_HEALTH, h_scm_health);
|
2022-02-18 10:34:14 +03:00
|
|
|
spapr_register_hypercall(H_SCM_FLUSH, h_scm_flush);
|
2020-02-10 07:56:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type_init(spapr_scm_register_types)
|
2022-02-18 10:34:14 +03:00
|
|
|
|
|
|
|
static void spapr_nvdimm_realize(NVDIMMDevice *dimm, Error **errp)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(dimm);
|
|
|
|
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(dimm)->hostmem);
|
|
|
|
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
|
|
|
bool pmem_override = object_property_get_bool(OBJECT(dimm), "pmem-override",
|
|
|
|
NULL);
|
|
|
|
if (!is_pmem || pmem_override) {
|
|
|
|
s_nvdimm->hcall_flush_required = true;
|
|
|
|
}
|
|
|
|
|
2023-10-20 12:07:20 +03:00
|
|
|
vmstate_register_any(NULL, &vmstate_spapr_nvdimm_states, dimm);
|
2022-02-18 10:34:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_nvdimm_unrealize(NVDIMMDevice *dimm)
|
|
|
|
{
|
|
|
|
vmstate_unregister(NULL, &vmstate_spapr_nvdimm_states, dimm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Property spapr_nvdimm_properties[] = {
|
|
|
|
#ifdef CONFIG_LIBPMEM
|
|
|
|
DEFINE_PROP_BOOL("pmem-override", SpaprNVDIMMDevice, pmem_override, false),
|
|
|
|
#endif
|
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void spapr_nvdimm_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
|
|
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
|
|
|
|
|
|
|
|
nvc->realize = spapr_nvdimm_realize;
|
|
|
|
nvc->unrealize = spapr_nvdimm_unrealize;
|
|
|
|
|
|
|
|
device_class_set_props(dc, spapr_nvdimm_properties);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_nvdimm_init(Object *obj)
|
|
|
|
{
|
|
|
|
SpaprNVDIMMDevice *s_nvdimm = SPAPR_NVDIMM(obj);
|
|
|
|
|
|
|
|
s_nvdimm->hcall_flush_required = false;
|
|
|
|
QLIST_INIT(&s_nvdimm->pending_nvdimm_flush_states);
|
|
|
|
QLIST_INIT(&s_nvdimm->completed_nvdimm_flush_states);
|
|
|
|
}
|
|
|
|
|
|
|
|
static TypeInfo spapr_nvdimm_info = {
|
|
|
|
.name = TYPE_SPAPR_NVDIMM,
|
|
|
|
.parent = TYPE_NVDIMM,
|
|
|
|
.class_init = spapr_nvdimm_class_init,
|
|
|
|
.class_size = sizeof(SPAPRNVDIMMClass),
|
|
|
|
.instance_size = sizeof(SpaprNVDIMMDevice),
|
|
|
|
.instance_init = spapr_nvdimm_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void spapr_nvdimm_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&spapr_nvdimm_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(spapr_nvdimm_register_types)
|