spapr_numa: move NVLink2 associativity handling to spapr_numa.c

The NVLink2 GPUs works like a regular NUMA node with its
own associativity values, regardless of user input.

This can be handled inside spapr_numa_associativity_init(),
initializing NVGPU_MAX_NUM associativity arrays that can
be used by the GPUs.

Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20200903220639.563090-5-danielhb413@gmail.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Daniel Henrique Barboza 2020-09-03 19:06:36 -03:00 committed by David Gibson
parent 0ee520126a
commit dd7e1d7ae4
2 changed files with 30 additions and 18 deletions

View File

@ -13,14 +13,18 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "hw/ppc/spapr_numa.h" #include "hw/ppc/spapr_numa.h"
#include "hw/pci-host/spapr.h"
#include "hw/ppc/fdt.h" #include "hw/ppc/fdt.h"
/* Moved from hw/ppc/spapr_pci_nvlink2.c */
#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
void spapr_numa_associativity_init(SpaprMachineState *spapr, void spapr_numa_associativity_init(SpaprMachineState *spapr,
MachineState *machine) MachineState *machine)
{ {
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
int nb_numa_nodes = machine->numa_state->num_nodes; int nb_numa_nodes = machine->numa_state->num_nodes;
int i; int i, j, max_nodes_with_gpus;
/* /*
* For all associativity arrays: first position is the size, * For all associativity arrays: first position is the size,
@ -35,6 +39,28 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr,
spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
} }
/*
* Initialize NVLink GPU associativity arrays. We know that
* the first GPU will take the first available NUMA id, and
* we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine.
* At this point we're not sure if there are GPUs or not, but
* let's initialize the associativity arrays and allow NVLink
* GPUs to be handled like regular NUMA nodes later on.
*/
max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;
for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) {
uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
spapr->numa_assoc_array[i][j] = gpu_assoc;
}
spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
}
} }
void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,

View File

@ -26,6 +26,7 @@
#include "qemu-common.h" #include "qemu-common.h"
#include "hw/pci/pci.h" #include "hw/pci/pci.h"
#include "hw/pci-host/spapr.h" #include "hw/pci-host/spapr.h"
#include "hw/ppc/spapr_numa.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "hw/ppc/fdt.h" #include "hw/ppc/fdt.h"
#include "hw/pci/pci_bridge.h" #include "hw/pci/pci_bridge.h"
@ -37,8 +38,6 @@
#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ #define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \
((gn) << 4) | (nn)) ((gn) << 4) | (nn))
#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
typedef struct SpaprPhbPciNvGpuSlot { typedef struct SpaprPhbPciNvGpuSlot {
uint64_t tgt; uint64_t tgt;
uint64_t gpa; uint64_t gpa;
@ -360,13 +359,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", "nvlink2-mr[0]",
&error_abort); &error_abort);
uint32_t associativity[] = {
cpu_to_be32(0x4),
cpu_to_be32(nvslot->numa_id),
cpu_to_be32(nvslot->numa_id),
cpu_to_be32(nvslot->numa_id),
cpu_to_be32(nvslot->numa_id)
};
uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
@ -376,14 +368,8 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
_FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
_FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
if (sphb->pre_5_1_assoc) { spapr_numa_write_associativity_dt(SPAPR_MACHINE(qdev_get_machine()),
associativity[1] = SPAPR_GPU_NUMA_ID; fdt, off, nvslot->numa_id);
associativity[2] = SPAPR_GPU_NUMA_ID;
associativity[3] = SPAPR_GPU_NUMA_ID;
}
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
sizeof(associativity))));
_FDT((fdt_setprop_string(fdt, off, "compatible", _FDT((fdt_setprop_string(fdt, off, "compatible",
"ibm,coherent-device-memory"))); "ibm,coherent-device-memory")));