spapr/pci: Convert types to QEMU coding style

The QEMU coding style requires:
- to typedef structured types (HACKING)
- to use CamelCase for types and structure names (CODING_STYLE)

Do that for PCI and Nvlink2 code.

Signed-off-by: Greg Kurz <groug@kaod.org>
Message-Id: <156701644465.505236.2850655823182656869.stgit@bahia.lan>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Greg Kurz 2019-08-28 20:20:44 +02:00 committed by David Gibson
parent 6c3829a265
commit 572ebd08b3
3 changed files with 49 additions and 43 deletions

View File

@ -280,7 +280,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
unsigned int irq, max_irqs = 0; unsigned int irq, max_irqs = 0;
SpaprPhbState *phb = NULL; SpaprPhbState *phb = NULL;
PCIDevice *pdev = NULL; PCIDevice *pdev = NULL;
spapr_pci_msi *msi; SpaprPciMsi *msi;
int *config_addr_key; int *config_addr_key;
Error *err = NULL; Error *err = NULL;
int i; int i;
@ -328,7 +328,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
return; return;
} }
msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr);
/* Releasing MSIs */ /* Releasing MSIs */
if (!req_num) { if (!req_num) {
@ -415,7 +415,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr,
irq, req_num); irq, req_num);
/* Add MSI device to cache */ /* Add MSI device to cache */
msi = g_new(spapr_pci_msi, 1); msi = g_new(SpaprPciMsi, 1);
msi->first_irq = irq; msi->first_irq = irq;
msi->num = req_num; msi->num = req_num;
config_addr_key = g_new(int, 1); config_addr_key = g_new(int, 1);
@ -446,7 +446,7 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3); unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
SpaprPhbState *phb = NULL; SpaprPhbState *phb = NULL;
PCIDevice *pdev = NULL; PCIDevice *pdev = NULL;
spapr_pci_msi *msi; SpaprPciMsi *msi;
/* Find SpaprPhbState */ /* Find SpaprPhbState */
phb = spapr_pci_find_phb(spapr, buid); phb = spapr_pci_find_phb(spapr, buid);
@ -459,7 +459,7 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
} }
/* Find device descriptor and start IRQ */ /* Find device descriptor and start IRQ */
msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr);
if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) { if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
trace_spapr_pci_msi("Failed to return vector", config_addr); trace_spapr_pci_msi("Failed to return vector", config_addr);
rtas_st(rets, 0, RTAS_OUT_HW_ERROR); rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
@ -1806,7 +1806,7 @@ static void spapr_phb_destroy_msi(gpointer opaque)
{ {
SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
spapr_pci_msi *msi = opaque; SpaprPciMsi *msi = opaque;
if (!smc->legacy_irq_allocation) { if (!smc->legacy_irq_allocation) {
spapr_irq_msi_free(spapr, msi->first_irq, msi->num); spapr_irq_msi_free(spapr, msi->first_irq, msi->num);
@ -2120,7 +2120,7 @@ static const VMStateDescription vmstate_spapr_pci_lsi = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi, NULL), VMSTATE_UINT32_EQUAL(irq, SpaprPciLsi, NULL),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
}, },
@ -2131,9 +2131,9 @@ static const VMStateDescription vmstate_spapr_pci_msi = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField []) { .fields = (VMStateField []) {
VMSTATE_UINT32(key, spapr_pci_msi_mig), VMSTATE_UINT32(key, SpaprPciMsiMig),
VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig), VMSTATE_UINT32(value.first_irq, SpaprPciMsiMig),
VMSTATE_UINT32(value.num, spapr_pci_msi_mig), VMSTATE_UINT32(value.num, SpaprPciMsiMig),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
}, },
}; };
@ -2165,12 +2165,12 @@ static int spapr_pci_pre_save(void *opaque)
if (!sphb->msi_devs_num) { if (!sphb->msi_devs_num) {
return 0; return 0;
} }
sphb->msi_devs = g_new(spapr_pci_msi_mig, sphb->msi_devs_num); sphb->msi_devs = g_new(SpaprPciMsiMig, sphb->msi_devs_num);
g_hash_table_iter_init(&iter, sphb->msi); g_hash_table_iter_init(&iter, sphb->msi);
for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) { for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
sphb->msi_devs[i].key = *(uint32_t *) key; sphb->msi_devs[i].key = *(uint32_t *) key;
sphb->msi_devs[i].value = *(spapr_pci_msi *) value; sphb->msi_devs[i].value = *(SpaprPciMsi *) value;
} }
return 0; return 0;
@ -2217,10 +2217,10 @@ static const VMStateDescription vmstate_spapr_pci = {
VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration),
VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0, VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0,
vmstate_spapr_pci_lsi, struct spapr_pci_lsi), vmstate_spapr_pci_lsi, SpaprPciLsi),
VMSTATE_INT32(msi_devs_num, SpaprPhbState), VMSTATE_INT32(msi_devs_num, SpaprPhbState),
VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, SpaprPhbState, msi_devs_num, 0, VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, SpaprPhbState, msi_devs_num, 0,
vmstate_spapr_pci_msi, spapr_pci_msi_mig), vmstate_spapr_pci_msi, SpaprPciMsiMig),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
}, },
}; };

View File

@ -39,11 +39,7 @@
#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) #define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
struct spapr_phb_pci_nvgpu_config { typedef struct SpaprPhbPciNvGpuSlot {
uint64_t nv2_ram_current;
uint64_t nv2_atsd_current;
int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */
struct spapr_phb_pci_nvgpu_slot {
uint64_t tgt; uint64_t tgt;
uint64_t gpa; uint64_t gpa;
unsigned numa_id; unsigned numa_id;
@ -54,12 +50,18 @@ struct spapr_phb_pci_nvgpu_config {
PCIDevice *npdev; PCIDevice *npdev;
uint32_t link_speed; uint32_t link_speed;
} links[NVGPU_MAX_LINKS]; } links[NVGPU_MAX_LINKS];
} slots[NVGPU_MAX_NUM]; } SpaprPhbPciNvGpuSlot;
struct SpaprPhbPciNvGpuConfig {
uint64_t nv2_ram_current;
uint64_t nv2_atsd_current;
int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */
SpaprPhbPciNvGpuSlot slots[NVGPU_MAX_NUM];
Error *errp; Error *errp;
}; };
static struct spapr_phb_pci_nvgpu_slot * static SpaprPhbPciNvGpuSlot *
spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt) spapr_nvgpu_get_slot(SpaprPhbPciNvGpuConfig *nvgpus, uint64_t tgt)
{ {
int i; int i;
@ -81,13 +83,13 @@ spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt)
return &nvgpus->slots[i]; return &nvgpus->slots[i];
} }
static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus, static void spapr_pci_collect_nvgpu(SpaprPhbPciNvGpuConfig *nvgpus,
PCIDevice *pdev, uint64_t tgt, PCIDevice *pdev, uint64_t tgt,
MemoryRegion *mr, Error **errp) MemoryRegion *mr, Error **errp)
{ {
MachineState *machine = MACHINE(qdev_get_machine()); MachineState *machine = MACHINE(qdev_get_machine());
SpaprMachineState *spapr = SPAPR_MACHINE(machine); SpaprMachineState *spapr = SPAPR_MACHINE(machine);
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
if (!nvslot) { if (!nvslot) {
error_setg(errp, "Found too many GPUs per vPHB"); error_setg(errp, "Found too many GPUs per vPHB");
@ -102,11 +104,11 @@ static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
++spapr->gpu_numa_id; ++spapr->gpu_numa_id;
} }
static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus, static void spapr_pci_collect_nvnpu(SpaprPhbPciNvGpuConfig *nvgpus,
PCIDevice *pdev, uint64_t tgt, PCIDevice *pdev, uint64_t tgt,
MemoryRegion *mr, Error **errp) MemoryRegion *mr, Error **errp)
{ {
struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
int j; int j;
if (!nvslot) { if (!nvslot) {
@ -138,7 +140,7 @@ static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev,
if (tgt) { if (tgt) {
Error *local_err = NULL; Error *local_err = NULL;
struct spapr_phb_pci_nvgpu_config *nvgpus = opaque; SpaprPhbPciNvGpuConfig *nvgpus = opaque;
Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL); Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL);
Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]", Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]",
NULL); NULL);
@ -177,7 +179,7 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
return; return;
} }
sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1); sphb->nvgpus = g_new0(SpaprPhbPciNvGpuConfig, 1);
sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr; sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr;
sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
@ -194,7 +196,7 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp)
/* Add found GPU RAM and ATSD MRs if found */ /* Add found GPU RAM and ATSD MRs if found */
for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) { for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) {
Object *nvmrobj; Object *nvmrobj;
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
if (!nvslot->gpdev) { if (!nvslot->gpdev) {
continue; continue;
@ -242,7 +244,7 @@ void spapr_phb_nvgpu_free(SpaprPhbState *sphb)
} }
for (i = 0; i < sphb->nvgpus->num; ++i) { for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", NULL); "nvlink2-mr[0]", NULL);
@ -276,7 +278,7 @@ void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
} }
for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) { for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
if (!nvslot->gpdev) { if (!nvslot->gpdev) {
continue; continue;
@ -354,7 +356,7 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
/* Add memory nodes for GPU RAM and mark them unusable */ /* Add memory nodes for GPU RAM and mark them unusable */
for (i = 0; i < sphb->nvgpus->num; ++i) { for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
"nvlink2-mr[0]", NULL); "nvlink2-mr[0]", NULL);
uint32_t associativity[] = { uint32_t associativity[] = {
@ -398,7 +400,7 @@ void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
} }
for (i = 0; i < sphb->nvgpus->num; ++i) { for (i = 0; i < sphb->nvgpus->num; ++i) {
struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i];
/* Skip "slot" without attached GPU */ /* Skip "slot" without attached GPU */
if (!nvslot->gpdev) { if (!nvslot->gpdev) {

View File

@ -34,15 +34,21 @@
typedef struct SpaprPhbState SpaprPhbState; typedef struct SpaprPhbState SpaprPhbState;
typedef struct spapr_pci_msi { typedef struct SpaprPciMsi {
uint32_t first_irq; uint32_t first_irq;
uint32_t num; uint32_t num;
} spapr_pci_msi; } SpaprPciMsi;
typedef struct spapr_pci_msi_mig { typedef struct SpaprPciMsiMig {
uint32_t key; uint32_t key;
spapr_pci_msi value; SpaprPciMsi value;
} spapr_pci_msi_mig; } SpaprPciMsiMig;
typedef struct SpaprPciLsi {
uint32_t irq;
} SpaprPciLsi;
typedef struct SpaprPhbPciNvGpuConfig SpaprPhbPciNvGpuConfig;
struct SpaprPhbState { struct SpaprPhbState {
PCIHostState parent_obj; PCIHostState parent_obj;
@ -63,14 +69,12 @@ struct SpaprPhbState {
AddressSpace iommu_as; AddressSpace iommu_as;
MemoryRegion iommu_root; MemoryRegion iommu_root;
struct spapr_pci_lsi { SpaprPciLsi lsi_table[PCI_NUM_PINS];
uint32_t irq;
} lsi_table[PCI_NUM_PINS];
GHashTable *msi; GHashTable *msi;
/* Temporary cache for migration purposes */ /* Temporary cache for migration purposes */
int32_t msi_devs_num; int32_t msi_devs_num;
spapr_pci_msi_mig *msi_devs; SpaprPciMsiMig *msi_devs;
QLIST_ENTRY(SpaprPhbState) list; QLIST_ENTRY(SpaprPhbState) list;
@ -89,7 +93,7 @@ struct SpaprPhbState {
hwaddr mig_io_win_addr, mig_io_win_size; hwaddr mig_io_win_addr, mig_io_win_size;
hwaddr nv2_gpa_win_addr; hwaddr nv2_gpa_win_addr;
hwaddr nv2_atsd_win_addr; hwaddr nv2_atsd_win_addr;
struct spapr_phb_pci_nvgpu_config *nvgpus; SpaprPhbPciNvGpuConfig *nvgpus;
}; };
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL #define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL