* Various bug fixes

* Big cleanup of deprecated machines
 * Power11 support for spapr
 * XIVE improvements
 * Goodbye to Cedric and David as ppc reviewers, thank you both o7
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEETkN92lZhb0MpsKeVZ7MCdqhiHK4FAmcoEicACgkQZ7MCdqhi
 HK5M8Q//fz+ZkJndXkBjb1Oinx+q+eVtNm2JrvcWIsXyhG3K+6VxYPp69H+SRv/Z
 TWuUqMQPxq8mhQvBJlDAttp/oaUEiOcCRvs/iUoBN12L4mVxXfdoT88TZ4frN3eP
 8bePq+DW2N/7gpmsJm5CyEZPpcf9AjVHgLRp3KYFkOJ/14uzvuwnocU39gl+2IUh
 MXHTedQgMNXaKorJXk1NVdM6NxMuVhOvwxAs6ya2gwhxyA5tteo5PiQOnDJWkejf
 xg3RRsNzGYcs1Qg/3kFIf3RfEB0aYbPxROM8IfPaJWKN5KnMggj/JAkHyK1x/V3J
 wml7+cB0doMt/yRiuYJhXpyrtOqpvjRWPA6RhxECWW2kwrovv8NAF8IrFnw9NvOQ
 QC66ZaaFcbAcFrVT1e/iggU76d01II6m4OAgKcXw+FRHgps4VU9y83j7ApNnNUWN
 IXp9hkzoHi5VwX0FrG4ELUr2iEf1HASMvM8EZ/0AxzWj5iNtQB8lFsrEdaGVXyIS
 M5JaJeNjCn4koCyYaFSctH5eKtbzIwnGWnDcdTwaOuQ+9itBvY8O+HZalE6sAc5S
 kLFZ7i/Ut/qxbY5pMumt8LKD4pR1SsOxFB8dJCmn/f/tvRGtIVsoY6btNe4M0+24
 42MxZbWO6W379C32bwbtsPiGA+aLSgShjP4cWm9cgRjz4RJFnwg=
 =vmIG
 -----END PGP SIGNATURE-----

Merge tag 'pull-ppc-for-9.2-1-20241104' of https://gitlab.com/npiggin/qemu into staging

* Various bug fixes
* Big cleanup of deprecated machines
* Power11 support for spapr
* XIVE improvements
* Goodbye to Cedric and David as ppc reviewers, thank you both o7

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEETkN92lZhb0MpsKeVZ7MCdqhiHK4FAmcoEicACgkQZ7MCdqhi
# HK5M8Q//fz+ZkJndXkBjb1Oinx+q+eVtNm2JrvcWIsXyhG3K+6VxYPp69H+SRv/Z
# TWuUqMQPxq8mhQvBJlDAttp/oaUEiOcCRvs/iUoBN12L4mVxXfdoT88TZ4frN3eP
# 8bePq+DW2N/7gpmsJm5CyEZPpcf9AjVHgLRp3KYFkOJ/14uzvuwnocU39gl+2IUh
# MXHTedQgMNXaKorJXk1NVdM6NxMuVhOvwxAs6ya2gwhxyA5tteo5PiQOnDJWkejf
# xg3RRsNzGYcs1Qg/3kFIf3RfEB0aYbPxROM8IfPaJWKN5KnMggj/JAkHyK1x/V3J
# wml7+cB0doMt/yRiuYJhXpyrtOqpvjRWPA6RhxECWW2kwrovv8NAF8IrFnw9NvOQ
# QC66ZaaFcbAcFrVT1e/iggU76d01II6m4OAgKcXw+FRHgps4VU9y83j7ApNnNUWN
# IXp9hkzoHi5VwX0FrG4ELUr2iEf1HASMvM8EZ/0AxzWj5iNtQB8lFsrEdaGVXyIS
# M5JaJeNjCn4koCyYaFSctH5eKtbzIwnGWnDcdTwaOuQ+9itBvY8O+HZalE6sAc5S
# kLFZ7i/Ut/qxbY5pMumt8LKD4pR1SsOxFB8dJCmn/f/tvRGtIVsoY6btNe4M0+24
# 42MxZbWO6W379C32bwbtsPiGA+aLSgShjP4cWm9cgRjz4RJFnwg=
# =vmIG
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 04 Nov 2024 00:15:35 GMT
# gpg:                using RSA key 4E437DDA56616F4329B0A79567B30276A8621CAE
# gpg: Good signature from "Nicholas Piggin <npiggin@gmail.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 4E43 7DDA 5661 6F43 29B0  A795 67B3 0276 A862 1CAE

* tag 'pull-ppc-for-9.2-1-20241104' of https://gitlab.com/npiggin/qemu: (67 commits)
  MAINTAINERS: Remove myself as reviewer
  MAINTAINERS: Remove myself from XIVE
  MAINTAINERS: Remove myself from the PowerNV machines
  hw/ppc: Consolidate ppc440 initial mapping creation functions
  hw/ppc: Consolidate e500 initial mapping creation functions
  tests/qtest: Add XIVE tests for the powernv10 machine
  pnv/xive2: TIMA CI ops using alternative offsets or byte lengths
  pnv/xive2: TIMA support for 8-byte OS context push for PHYP
  pnv/xive: Update PIPR when updating CPPR
  pnv/xive: Add special handling for pool targets
  ppc/xive2: Support "Pull Thread Context to Odd Thread Reporting Line"
  ppc/xive2: Change context/ring specific functions to be generic
  ppc/xive2: Support "Pull Thread Context to Register" operation
  ppc/xive2: Allow 1-byte write of Target field in TIMA
  ppc/xive2: Dump the VP-group and crowd tables with 'info pic'
  ppc/xive2: Dump more NVP state with 'info pic'
  pnv/xive2: Support for "OS LGS Push" TIMA operation
  ppc/xive2: Support TIMA "Pull OS Context to Odd Thread Reporting Line"
  pnv/xive2: Define OGEN field in the TIMA
  pnv/xive: TIMA patch sets pre-req alignment and formatting changes
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-11-05 10:05:59 +00:00
commit 6b829602e2
54 changed files with 1949 additions and 1228 deletions

View File

@ -1473,7 +1473,6 @@ F: tests/functional/test_ppc_40p.py
sPAPR (pseries)
M: Nicholas Piggin <npiggin@gmail.com>
R: Daniel Henrique Barboza <danielhb413@gmail.com>
R: David Gibson <david@gibson.dropbear.id.au>
R: Harsh Prateek Bora <harshpb@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
@ -1497,7 +1496,6 @@ F: tests/functional/test_ppc64_hv.py
F: tests/functional/test_ppc64_tuxrun.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
M: Nicholas Piggin <npiggin@gmail.com>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
@ -1507,8 +1505,10 @@ F: hw/ppc/pnv*
F: hw/intc/pnv*
F: hw/intc/xics_pnv.c
F: hw/pci-host/pnv*
F: hw/ssi/pnv_spi.c
F: include/hw/ppc/pnv*
F: include/hw/pci-host/pnv*
F: include/hw/ssi/pnv_spi*
F: pc-bios/skiboot.lid
F: tests/qtest/pnv*
F: tests/functional/test_ppc64_powernv.py
@ -1563,7 +1563,6 @@ F: tests/functional/test_ppc_amiga.py
Virtual Open Firmware (VOF)
M: Alexey Kardashevskiy <aik@ozlabs.ru>
R: David Gibson <david@gibson.dropbear.id.au>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/spapr_vof*
@ -2639,12 +2638,12 @@ F: tests/qtest/fw_cfg-test.c
T: git https://github.com/philmd/qemu.git fw_cfg-next
XIVE
M: Cédric Le Goater <clg@kaod.org>
R: Frédéric Barrat <fbarrat@linux.ibm.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/*/*xive*
F: include/hw/*/*xive*
F: tests/qtest/*xive*
F: docs/*/*xive*
Renesas peripherals

View File

@ -255,14 +255,6 @@ These old machine types are quite neglected nowadays and thus might have
various pitfalls with regards to live migration. Use a newer machine type
instead.
``pseries-2.1`` up to ``pseries-2.12`` (since 9.0)
''''''''''''''''''''''''''''''''''''''''''''''''''
Older pseries machines before version 3.0 have undergone many changes
to correct issues, mostly regarding migration compatibility. These are
no longer maintained and removing them will make the code easier to
read and maintain. Use versions 3.0 and above as a replacement.
PPC 405 ``ref405ep`` machine (since 9.1)
''''''''''''''''''''''''''''''''''''''''

View File

@ -14,10 +14,19 @@ virtualization capabilities.
Supported devices
=================
* Multi processor support for many Power processors generations: POWER7,
POWER7+, POWER8, POWER8NVL, POWER9, and Power10. Support for POWER5+ exists,
but its state is unknown.
* Interrupt Controller, XICS (POWER8) and XIVE (POWER9 and Power10)
* Multi processor support for many Power processors generations:
- POWER7, POWER7+
- POWER8, POWER8NVL
- POWER9
- Power10
- Power11
- Support for POWER5+ also exists, works with correct kernel/userspace
* Interrupt Controller
- XICS (POWER8)
- XIVE (Supported by below:)
- POWER9
- Power10
- Power11
* vPHB PCIe Host bridge.
* vscsi and vnet devices, compatible with the same devices available on a
PowerVM hypervisor with VIOS managing LPARs.

View File

@ -281,33 +281,6 @@ GlobalProperty hw_compat_2_4[] = {
};
const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4);
GlobalProperty hw_compat_2_3[] = {
{ "virtio-blk-pci", "any_layout", "off" },
{ "virtio-balloon-pci", "any_layout", "off" },
{ "virtio-serial-pci", "any_layout", "off" },
{ "virtio-9p-pci", "any_layout", "off" },
{ "virtio-rng-pci", "any_layout", "off" },
{ TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" },
{ "migration", "send-configuration", "off" },
{ "migration", "send-section-footer", "off" },
{ "migration", "store-global-state", "off" },
};
const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3);
GlobalProperty hw_compat_2_2[] = {};
const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2);
GlobalProperty hw_compat_2_1[] = {
{ "intel-hda", "old_msi_addr", "on" },
{ "VGA", "qemu-extended-regs", "off" },
{ "secondary-vga", "qemu-extended-regs", "off" },
{ "virtio-scsi-pci", "any_layout", "off" },
{ "usb-mouse", "usb_version", "1" },
{ "usb-kbd", "usb_version", "1" },
{ "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" },
};
const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1);
MachineState *current_machine;
static char *machine_get_kernel(Object *obj, Error **errp)

View File

@ -490,6 +490,23 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
word_number);
}
static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t blk, uint32_t idx,
Xive2Nvgc *nvgc)
{
return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
blk, idx, nvgc);
}
static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t blk, uint32_t idx,
Xive2Nvgc *nvgc)
{
return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG,
blk, idx, nvgc,
XIVE_VST_WORD_ALL);
}
static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
{
switch (nxc_type) {
@ -2407,6 +2424,8 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data)
xrc->write_end = pnv_xive2_write_end;
xrc->get_nvp = pnv_xive2_get_nvp;
xrc->write_nvp = pnv_xive2_write_nvp;
xrc->get_nvgc = pnv_xive2_get_nvgc;
xrc->write_nvgc = pnv_xive2_write_nvgc;
xrc->get_config = pnv_xive2_get_config;
xrc->get_block_id = pnv_xive2_get_block_id;
@ -2497,8 +2516,9 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
Xive2Eas eas;
Xive2End end;
Xive2Nvp nvp;
Xive2Nvgc nvgc;
int i;
uint64_t xive_nvp_per_subpage;
uint64_t entries_per_subpage;
g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n",
blk, srcno0, srcno0 + nr_esbs - 1);
@ -2530,10 +2550,28 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf)
g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n",
chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
xive2_nvp_pic_print_info(&nvp, i++, buf);
}
}
g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n",
chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG);
for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) {
xive2_nvgc_pic_print_info(&nvgc, i++, buf);
}
}
g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n",
chip_id, blk, 0, XIVE2_NVP_COUNT - 1);
entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC);
for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) {
while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) {
xive2_nvgc_pic_print_info(&nvgc, i++, buf);
}
}
}

View File

@ -720,7 +720,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers,
{
SpaprXive *xive = SPAPR_XIVE(intc);
XiveSource *xsrc = &xive->source;
size_t esb_len = xive_source_esb_len(xsrc);
uint64_t esb_len = xive_source_esb_len(xsrc);
size_t tima_len = 4ull << TM_SHIFT;
CPUState *cs;
int fd;
@ -824,7 +824,7 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc)
{
SpaprXive *xive = SPAPR_XIVE(intc);
XiveSource *xsrc;
size_t esb_len;
uint64_t esb_len;
assert(xive->fd != -1);

View File

@ -335,22 +335,6 @@ static void icp_realize(DeviceState *dev, Error **errp)
return;
}
}
/*
* The way that pre_2_10_icp is handling is really, really hacky.
* We used to have here this call:
*
* vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
*
* But we were doing:
* pre_2_10_vmstate_register_dummy_icp()
* this vmstate_register()
* pre_2_10_vmstate_unregister_dummy_icp()
*
* So for a short amount of time we had to vmstate entries with
* the same name. This fixes it.
*/
vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index,
&vmstate_icp_server, icp);
}
static void icp_unrealize(DeviceState *dev)

View File

@ -74,33 +74,48 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
if (regs[TM_NSR] & mask) {
uint8_t cppr = regs[TM_PIPR];
uint8_t alt_ring;
uint8_t *alt_regs;
/* POOL interrupt uses IPB in QW2, POOL ring */
if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) {
alt_ring = TM_QW2_HV_POOL;
} else {
alt_ring = ring;
}
alt_regs = &tctx->regs[alt_ring];
regs[TM_CPPR] = cppr;
/* Reset the pending buffer bit */
regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
/* Drop Exception bit */
regs[TM_NSR] &= ~mask;
trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
regs[TM_IPB], regs[TM_PIPR],
trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
alt_regs[TM_IPB], regs[TM_PIPR],
regs[TM_CPPR], regs[TM_NSR]);
}
return (nsr << 8) | regs[TM_CPPR];
return ((uint64_t)nsr << 8) | regs[TM_CPPR];
}
static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
{
/* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
uint8_t *alt_regs = &tctx->regs[alt_ring];
uint8_t *regs = &tctx->regs[ring];
if (regs[TM_PIPR] < regs[TM_CPPR]) {
if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
switch (ring) {
case TM_QW1_OS:
regs[TM_NSR] |= TM_QW1_NSR_EO;
break;
case TM_QW2_HV_POOL:
alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6);
break;
case TM_QW3_HV_PHYS:
regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
break;
@ -108,26 +123,27 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
g_assert_not_reached();
}
trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
regs[TM_IPB], regs[TM_PIPR],
regs[TM_CPPR], regs[TM_NSR]);
regs[TM_IPB], alt_regs[TM_PIPR],
alt_regs[TM_CPPR], alt_regs[TM_NSR]);
qemu_irq_raise(xive_tctx_output(tctx, ring));
}
}
void xive_tctx_reset_os_signal(XiveTCTX *tctx)
void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring)
{
/*
* Lower the External interrupt. Used when pulling an OS
* context. It is necessary to avoid catching it in the hypervisor
* context. It should be raised again when re-pushing the OS
* context.
* Lower the External interrupt. Used when pulling a context. It is
* necessary to avoid catching it in the higher privilege context. It
* should be raised again when re-pushing the lower privilege context.
*/
qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS));
qemu_irq_lower(xive_tctx_output(tctx, ring));
}
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t pipr_min;
uint8_t ring_min;
trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
regs[TM_IPB], regs[TM_PIPR],
@ -139,8 +155,37 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
tctx->regs[ring + TM_CPPR] = cppr;
/*
* Recompute the PIPR based on local pending interrupts. The PHYS
* ring must take the minimum of both the PHYS and POOL PIPR values.
*/
pipr_min = ipb_to_pipr(regs[TM_IPB]);
ring_min = ring;
/* PHYS updates also depend on POOL values */
if (ring == TM_QW3_HV_PHYS) {
uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL];
/* POOL values only matter if POOL ctx is valid */
if (pool_regs[TM_WORD2] & 0x80) {
uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]);
/*
* Determine highest priority interrupt and
* remember which ring has it.
*/
if (pool_pipr < pipr_min) {
pipr_min = pool_pipr;
ring_min = TM_QW2_HV_POOL;
}
}
}
regs[TM_PIPR] = pipr_min;
/* CPPR has changed, check if we need to raise a pending exception */
xive_tctx_notify(tctx, ring);
xive_tctx_notify(tctx, ring_min);
}
void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
@ -179,6 +224,17 @@ static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
return qw2w2;
}
static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size)
{
uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
uint8_t qw3b8;
qw3b8 = qw3b8_prev & ~TM_QW3B8_VT;
tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8;
return qw3b8;
}
static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
@ -207,14 +263,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
static const uint8_t xive_tm_hw_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
};
static const uint8_t xive_tm_hv_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
};
@ -341,6 +397,19 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
}
static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs)
{
uint8_t *regs = &tctx->regs[ring];
regs[TM_LGS] = lgs;
}
static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff);
}
/*
* Adjust the IPB to allow a CPU to process event queues of other
* priorities during one physical interrupt cycle.
@ -400,7 +469,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
xive_tctx_set_os_cam(tctx, qw1w2_new);
xive_tctx_reset_os_signal(tctx);
xive_tctx_reset_signal(tctx, TM_QW1_OS);
return qw1w2;
}
@ -488,20 +557,34 @@ static const XiveTmOp xive_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
xive_tm_vt_poll },
/* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
xive_tm_pull_phys_ctx },
};
static const XiveTmOp xive2_tm_operations[] = {
@ -509,20 +592,50 @@ static const XiveTmOp xive2_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx,
NULL },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
xive_tm_vt_poll },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target,
NULL },
/* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL,
xive_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
xive_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol,
NULL },
};
static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
@ -718,6 +831,10 @@ void xive_tctx_reset(XiveTCTX *tctx)
tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
if (!(xive_presenter_get_config(tctx->xptr) &
XIVE_PRESENTER_GEN1_TIMA_OS)) {
tctx->regs[TM_QW1_OS + TM_OGEN] = 2;
}
/*
* Initialize PIPR to 0xFF to avoid phantom interrupts when the
@ -1242,7 +1359,7 @@ static void xive_source_reset(void *dev)
static void xive_source_realize(DeviceState *dev, Error **errp)
{
XiveSource *xsrc = XIVE_SOURCE(dev);
size_t esb_len = xive_source_esb_len(xsrc);
uint64_t esb_len = xive_source_esb_len(xsrc);
assert(xsrc->xive);

View File

@ -26,6 +26,43 @@ uint32_t xive2_router_get_config(Xive2Router *xrtr)
return xrc->get_config(xrtr);
}
static int xive2_router_get_block_id(Xive2Router *xrtr)
{
Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
return xrc->get_block_id(xrtr);
}
static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
{
uint64_t cache_addr;
cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
cache_addr <<= 8; /* aligned on a cache line pair */
return cache_addr;
}
static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
{
uint32_t val = 0;
uint8_t *ptr, i;
if (priority > 7) {
return 0;
}
/*
* The per-priority backlog counters are 24-bit and the structure
* is stored in big endian
*/
ptr = (uint8_t *)&nvgc->w2 + priority * 3;
for (i = 0; i < 3; i++, ptr++) {
val = (val << 8) + *ptr;
}
return val;
}
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
{
if (!xive2_eas_is_valid(eas)) {
@ -144,14 +181,20 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
{
uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
if (!xive2_nvp_is_valid(nvp)) {
return;
}
g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x",
g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
nvp_idx, eq_blk, eq_idx,
xive_get_field32(NVP2_W2_IPB, nvp->w2));
xive_get_field32(NVP2_W2_IPB, nvp->w2),
xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
if (cache_line) {
g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line);
}
/*
* When the NVP is HW controlled, more fields are updated
*/
@ -166,6 +209,23 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
g_string_append_c(buf, '\n');
}
void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
{
uint8_t i;
if (!xive2_nvgc_is_valid(nvgc)) {
return;
}
g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx,
xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
g_string_append_printf(buf, "[%d]=0x%x ",
i, xive2_nvgc_get_backlog(nvgc, i));
}
g_string_append_printf(buf, "\n");
}
static void xive2_end_enqueue(Xive2End *end, uint32_t data)
{
uint64_t qaddr_base = xive2_end_qaddr(end);
@ -210,13 +270,14 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data)
* the NVP by changing the H bit while the context is enabled
*/
static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
uint8_t nvp_blk, uint32_t nvp_idx)
static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
uint8_t nvp_blk, uint32_t nvp_idx,
uint8_t ring)
{
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
Xive2Nvp nvp;
uint8_t *regs = &tctx->regs[TM_QW1_OS];
uint8_t *regs = &tctx->regs[ring];
if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
@ -261,44 +322,190 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
}
static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
uint32_t *nvp_idx, bool *vo, bool *ho)
static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
uint32_t *nvp_idx, bool *valid, bool *hw)
{
*nvp_blk = xive2_nvp_blk(cam);
*nvp_idx = xive2_nvp_idx(cam);
*vo = !!(cam & TM2_QW1W2_VO);
*ho = !!(cam & TM2_QW1W2_HO);
*valid = !!(cam & TM2_W2_VALID);
*hw = !!(cam & TM2_W2_HW);
}
/*
* Encode the HW CAM line with 7bit or 8bit thread id. The thread id
* width and block id width is configurable at the IC level.
*
* chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
* chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
*/
static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
{
Xive2Router *xrtr = XIVE2_ROUTER(xptr);
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
uint8_t blk = xive2_router_get_block_id(xrtr);
uint8_t tid_shift =
xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
uint8_t tid_mask = (1 << tid_shift) - 1;
return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
}
static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size, uint8_t ring)
{
Xive2Router *xrtr = XIVE2_ROUTER(xptr);
uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
uint32_t cam = be32_to_cpu(target_ringw2);
uint8_t nvp_blk;
uint32_t nvp_idx;
uint8_t cur_ring;
bool valid;
bool do_save;
xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
if (!valid) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
nvp_blk, nvp_idx);
}
/* Invalidate CAM line of requested ring and all lower rings */
for (cur_ring = TM_QW0_USER; cur_ring <= ring;
cur_ring += XIVE_TM_RING_SIZE) {
uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
}
if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
}
/*
* Lower external interrupt line of requested ring and below except for
* USER, which doesn't exist.
*/
for (cur_ring = TM_QW1_OS; cur_ring <= ring;
cur_ring += XIVE_TM_RING_SIZE) {
xive_tctx_reset_signal(tctx, cur_ring);
}
return target_ringw2;
}
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size)
{
return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
}
#define REPORT_LINE_GEN1_SIZE 16
static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
uint8_t size)
{
uint8_t *regs = tctx->regs;
g_assert(size == REPORT_LINE_GEN1_SIZE);
memset(data, 0, size);
/*
* See xive architecture for description of what is saved. It is
* hand-picked information to fit in 16 bytes.
*/
data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
data[0x6] = 0xFF;
data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
data[0x8] = regs[TM_QW1_OS + TM_NSR];
data[0x9] = regs[TM_QW1_OS + TM_CPPR];
data[0xA] = regs[TM_QW1_OS + TM_IPB];
data[0xB] = regs[TM_QW1_OS + TM_LGS];
if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
/*
* Logical server extension, except VU bit replaced by EB bit
* from NSR
*/
data[0xC] = regs[TM_QW0_USER + TM_WORD2];
data[0xC] &= ~0x80;
data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
}
}
static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value,
unsigned size, uint8_t ring)
{
Xive2Router *xrtr = XIVE2_ROUTER(xptr);
uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
uint32_t qw1w2_new;
uint32_t cam = be32_to_cpu(qw1w2);
uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
uint8_t nvp_blk;
uint32_t nvp_idx;
bool vo;
bool do_save;
Xive2Nvp nvp;
uint64_t phys_addr;
MemTxResult result;
xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save);
hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
nvp_blk = xive2_nvp_blk(hw_cam);
nvp_idx = xive2_nvp_idx(hw_cam);
if (!vo) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
nvp_blk, nvp_idx);
return;
}
/* Invalidate CAM line */
qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
if (!xive2_nvp_is_valid(&nvp)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
nvp_blk, nvp_idx);
return;
}
xive_tctx_reset_os_signal(tctx);
return qw1w2;
xive2_cfg = xive2_router_get_config(xrtr);
phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
result = dma_memory_write(&address_space_memory, phys_addr,
pull_ctxt, REPORT_LINE_GEN1_SIZE,
MEMTXATTRS_UNSPECIFIED);
assert(result == MEMTX_OK);
} else {
result = dma_memory_write(&address_space_memory, phys_addr,
&tctx->regs, sizeof(tctx->regs),
MEMTXATTRS_UNSPECIFIED);
assert(result == MEMTX_OK);
reserved = 0xFFFFFFFF;
result = dma_memory_write(&address_space_memory, phys_addr + 12,
&reserved, sizeof(reserved),
MEMTXATTRS_UNSPECIFIED);
assert(result == MEMTX_OK);
}
/* the rest is similar to pull context to registers */
xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
}
void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
}
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
}
static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
@ -390,17 +597,31 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
uint32_t cam = value;
uint32_t qw1w2 = cpu_to_be32(cam);
uint32_t cam;
uint32_t qw1w2;
uint64_t qw1dw1;
uint8_t nvp_blk;
uint32_t nvp_idx;
bool vo;
bool do_restore;
xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
/* First update the thead context */
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
switch (size) {
case 4:
cam = value;
qw1w2 = cpu_to_be32(cam);
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
break;
case 8:
cam = value >> 32;
qw1dw1 = cpu_to_be64(value);
memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
break;
default:
g_assert_not_reached();
}
xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
/* Check the interrupt pending bits */
if (vo) {
@ -409,6 +630,19 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
}
}
static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
{
uint8_t *regs = &tctx->regs[ring];
regs[TM_T] = target;
}
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
}
/*
* XIVE Router (aka. Virtualization Controller or IVRE)
*/
@ -471,31 +705,22 @@ int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
}
static int xive2_router_get_block_id(Xive2Router *xrtr)
int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc)
{
Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
return xrc->get_block_id(xrtr);
return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
}
/*
* Encode the HW CAM line with 7bit or 8bit thread id. The thread id
* width and block id width is configurable at the IC level.
*
* chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
* chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
*/
static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc)
{
Xive2Router *xrtr = XIVE2_ROUTER(xptr);
CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
uint32_t pir = env->spr_cb[SPR_PIR].default_value;
uint8_t blk = xive2_router_get_block_id(xrtr);
uint8_t tid_shift =
xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
uint8_t tid_mask = (1 << tid_shift) - 1;
Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
}
/*

View File

@ -721,11 +721,21 @@ static int ppce500_prep_device_tree(PPCE500MachineState *machine,
kernel_base, kernel_size, true);
}
hwaddr booke206_page_size_to_tlb(uint64_t size)
static hwaddr booke206_page_size_to_tlb(uint64_t size)
{
return 63 - clz64(size / KiB);
}
void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa,
hwaddr len)
{
tlb->mas1 = booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT;
tlb->mas1 |= MAS1_VALID;
tlb->mas2 = va & TARGET_PAGE_MASK;
tlb->mas7_3 = pa & TARGET_PAGE_MASK;
tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
}
static int booke206_initial_map_tsize(CPUPPCState *env)
{
struct boot_info *bi = env->load_info;
@ -751,25 +761,6 @@ static uint64_t mmubooke_initial_mapsize(CPUPPCState *env)
return (1ULL << 10 << tsize);
}
/* Create -kernel TLB entries for BookE. */
static void mmubooke_create_initial_mapping(CPUPPCState *env)
{
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0);
hwaddr size;
int ps;
ps = booke206_initial_map_tsize(env);
size = (ps << MAS1_TSIZE_SHIFT);
tlb->mas1 = MAS1_VALID | size;
tlb->mas2 = 0;
tlb->mas7_3 = 0;
tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
#ifdef CONFIG_KVM
env->tlb_dirty = true;
#endif
}
static void ppce500_cpu_reset_sec(void *opaque)
{
PowerPCCPU *cpu = opaque;
@ -786,6 +777,8 @@ static void ppce500_cpu_reset(void *opaque)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
struct boot_info *bi = env->load_info;
uint64_t map_size = mmubooke_initial_mapsize(env);
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0);
cpu_reset(cs);
@ -796,11 +789,15 @@ static void ppce500_cpu_reset(void *opaque)
env->gpr[4] = 0;
env->gpr[5] = 0;
env->gpr[6] = EPAPR_MAGIC;
env->gpr[7] = mmubooke_initial_mapsize(env);
env->gpr[7] = map_size;
env->gpr[8] = 0;
env->gpr[9] = 0;
env->nip = bi->entry;
mmubooke_create_initial_mapping(env);
/* create initial mapping */
booke206_set_tlb(tlb, 0, 0, map_size);
#ifdef CONFIG_KVM
env->tlb_dirty = true;
#endif
}
static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms,

View File

@ -41,8 +41,6 @@ struct PPCE500MachineClass {
void ppce500_init(MachineState *machine);
hwaddr booke206_page_size_to_tlb(uint64_t size);
#define TYPE_PPCE500_MACHINE "ppce500-base-machine"
OBJECT_DECLARE_TYPE(PPCE500MachineState, PPCE500MachineClass, PPCE500_MACHINE)

View File

@ -736,21 +736,27 @@ static void pnv_reset(MachineState *machine, ResetType type)
}
}
fdt = pnv_dt_create(machine);
/* Pack resulting tree */
_FDT((fdt_pack(fdt)));
if (machine->fdt) {
fdt = machine->fdt;
} else {
fdt = pnv_dt_create(machine);
/* Pack resulting tree */
_FDT((fdt_pack(fdt)));
}
qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt));
/*
* Set machine->fdt for 'dumpdtb' QMP/HMP command. Free
* the existing machine->fdt to avoid leaking it during
* a reset.
*/
g_free(machine->fdt);
machine->fdt = fdt;
/* Update machine->fdt with latest fdt */
if (machine->fdt != fdt) {
/*
* Set machine->fdt for 'dumpdtb' QMP/HMP command. Free
* the existing machine->fdt to avoid leaking it during
* a reset.
*/
g_free(machine->fdt);
machine->fdt = fdt;
}
}
static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp)
@ -952,6 +958,14 @@ static void pnv_init(MachineState *machine)
g_free(sz);
exit(EXIT_FAILURE);
}
/* checks for invalid option combinations */
if (machine->dtb && (strlen(machine->kernel_cmdline) != 0)) {
error_report("-append and -dtb cannot be used together, as passed"
" command line is ignored in case of custom dtb");
exit(EXIT_FAILURE);
}
memory_region_add_subregion(get_system_memory(), 0, machine->ram);
/*
@ -1003,6 +1017,21 @@ static void pnv_init(MachineState *machine)
}
}
/* load dtb if passed */
if (machine->dtb) {
int fdt_size;
warn_report("with manually passed dtb, some options like '-append'"
" will get ignored and the dtb passed will be used as-is");
/* read the file 'machine->dtb', and load it into 'fdt' buffer */
machine->fdt = load_device_tree(machine->dtb, &fdt_size);
if (!machine->fdt) {
error_report("Could not load dtb '%s'", machine->dtb);
exit(1);
}
}
/* MSIs are supported on this platform */
msi_nonbroken = true;

View File

@ -116,6 +116,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val,
uint32_t lpc_size = lpc_cmd_size(adu);
uint64_t data = 0;
if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) {
qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access "
"size:%" PRId32 "\n", lpc_size);
break;
}
pnv_lpc_opb_read(adu->lpc, lpc_addr, (void *)&data, lpc_size);
/*
@ -135,6 +141,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val,
uint32_t lpc_size = lpc_cmd_size(adu);
uint64_t data;
if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) {
qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access "
"size:%" PRId32 "\n", lpc_size);
break;
}
data = cpu_to_be64(val) >> ((lpc_addr & 7) * 8); /* See above */
pnv_lpc_opb_write(adu->lpc, lpc_addr, (void *)&data, lpc_size);
}

View File

@ -427,21 +427,27 @@ static void pnv_lpc_eval_serirq_routes(PnvLpcController *lpc)
int irq;
if (!lpc->psi_has_serirq) {
if ((lpc->opb_irq_route0 & PPC_BITMASK(8, 13)) ||
(lpc->opb_irq_route1 & PPC_BITMASK(4, 31))) {
if ((lpc->opb_irq_route0 & PPC_BITMASK32(8, 13)) ||
(lpc->opb_irq_route1 & PPC_BITMASK32(4, 31))) {
qemu_log_mask(LOG_GUEST_ERROR,
"OPB: setting serirq routing on POWER8 system, ignoring.\n");
}
return;
}
/*
* Each of the ISA irqs is routed to one of the 4 SERIRQ irqs with 2
* bits, split across 2 OPB registers.
*/
for (irq = 0; irq <= 13; irq++) {
int serirq = (lpc->opb_irq_route1 >> (31 - 5 - (irq * 2))) & 0x3;
int serirq = extract32(lpc->opb_irq_route1,
PPC_BIT32_NR(5 + irq * 2), 2);
lpc->irq_to_serirq_route[irq] = serirq;
}
for (irq = 14; irq < ISA_NUM_IRQS; irq++) {
int serirq = (lpc->opb_irq_route0 >> (31 - 9 - (irq * 2))) & 0x3;
int serirq = extract32(lpc->opb_irq_route0,
PPC_BIT32_NR(9 + (irq - 14) * 2), 2);
lpc->irq_to_serirq_route[irq] = serirq;
}
}

View File

@ -728,7 +728,9 @@ static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now,
int64_t decr;
n = ns_to_tb(tb_env->decr_freq, now);
if (next > n && tb_env->flags & PPC_TIMER_BOOKE) {
/* BookE timers stop when reaching 0. */
if (next < n && tb_env->flags & PPC_TIMER_BOOKE) {
decr = 0;
} else {
decr = next - n;

View File

@ -110,29 +110,6 @@ static int bamboo_load_device_tree(MachineState *machine,
return 0;
}
/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
static void mmubooke_create_initial_mapping(CPUPPCState *env,
target_ulong va,
hwaddr pa)
{
ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 1U << 31; /* up to 0x80000000 */
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
tlb = &env->tlb.tlbe[1];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 1U << 31; /* up to 0xffffffff */
tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->PID = 0;
}
static void main_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
@ -143,8 +120,9 @@ static void main_cpu_reset(void *opaque)
env->gpr[3] = FDT_ADDR;
env->nip = entry;
/* Create a mapping for the kernel. */
mmubooke_create_initial_mapping(env, 0, 0);
/* Create a mapping spanning the 32bit addr space. */
booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31);
booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31);
}
static void bamboo_init(MachineState *machine)

View File

@ -31,6 +31,16 @@
#include "hw/loader.h"
#include "kvm_ppc.h"
void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa,
target_ulong size)
{
tlb->attr = 0;
tlb->prot = PAGE_RWX << 4 | PAGE_VALID;
tlb->size = size;
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
}
/* Timer Control Register */

View File

@ -33,6 +33,7 @@
#include "hw/hw.h"
#include "hw/sysbus.h"
#include "sysemu/hw_accel.h"
#include "hw/ppc/ppc.h"
#include "e500.h"
#include "qom/object.h"
@ -70,30 +71,12 @@ static void spin_reset(DeviceState *dev)
}
}
static void mmubooke_create_initial_mapping(CPUPPCState *env,
target_ulong va,
hwaddr pa,
hwaddr len)
{
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1);
hwaddr size;
size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT);
tlb->mas1 = MAS1_VALID | size;
tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M;
tlb->mas7_3 = pa & TARGET_PAGE_MASK;
tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
#ifdef CONFIG_KVM
env->tlb_dirty = true;
#endif
}
static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
CPUPPCState *env = cpu_env(cs);
SpinInfo *curspin = data.host_ptr;
hwaddr map_size = 64 * MiB;
hwaddr map_start;
hwaddr map_start, map_size = 64 * MiB;
ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1);
cpu_synchronize_state(cs);
stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
@ -107,7 +90,12 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data)
env->gpr[9] = 0;
map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
mmubooke_create_initial_mapping(env, 0, map_start, map_size);
/* create initial mapping */
booke206_set_tlb(tlb, 0, map_start, map_size);
tlb->mas2 |= MAS2_M;
#ifdef CONFIG_KVM
env->tlb_dirty = true;
#endif
cs->halted = 0;
cs->exception_index = -1;

View File

@ -213,38 +213,6 @@ static int sam460ex_load_device_tree(MachineState *machine,
return fdt_size;
}
/* Create reset TLB entries for BookE, mapping only the flash memory. */
static void mmubooke_create_initial_mapping_uboot(CPUPPCState *env)
{
ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
/* on reset the flash is mapped by a shadow TLB,
* but since we don't implement them we need to use
* the same values U-Boot will use to avoid a fault.
*/
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 0x10000000; /* up to 0xffffffff */
tlb->EPN = 0xf0000000 & TARGET_PAGE_MASK;
tlb->RPN = (0xf0000000 & TARGET_PAGE_MASK) | 0x4;
tlb->PID = 0;
}
/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
static void mmubooke_create_initial_mapping(CPUPPCState *env,
target_ulong va,
hwaddr pa)
{
ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 1 << 31; /* up to 0x80000000 */
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
}
static void main_cpu_reset(void *opaque)
{
PowerPCCPU *cpu = opaque;
@ -253,20 +221,27 @@ static void main_cpu_reset(void *opaque)
cpu_reset(CPU(cpu));
/* either we have a kernel to boot or we jump to U-Boot */
/*
* On reset the flash is mapped by a shadow TLB, but since we
* don't implement them we need to use the same values U-Boot
* will use to avoid a fault.
* either we have a kernel to boot or we jump to U-Boot
*/
if (bi->entry != UBOOT_ENTRY) {
env->gpr[1] = (16 * MiB) - 8;
env->gpr[3] = FDT_ADDR;
env->nip = bi->entry;
/* Create a mapping for the kernel. */
mmubooke_create_initial_mapping(env, 0, 0);
booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1 << 31);
env->gpr[6] = tswap32(EPAPR_MAGIC);
env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */
} else {
env->nip = UBOOT_ENTRY;
mmubooke_create_initial_mapping_uboot(env);
/* Create a mapping for U-Boot. */
booke_set_tlb(&env->tlb.tlbe[0], 0xf0000000, 0xf0000000, 0x10000000);
env->tlb.tlbe[0].RPN |= 4;
}
}

View File

@ -132,61 +132,6 @@ static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr,
return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0;
}
static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
{
/* Dummy entries correspond to unused ICPState objects in older QEMUs,
* and newer QEMUs don't even have them. In both cases, we don't want
* to send anything on the wire.
*/
return false;
}
static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
/*
* Hack ahead. We can't have two devices with the same name and
* instance id. So I rename this to pass make check.
* Real help from people who knows the hardware is needed.
*/
.name = "icp/server",
.version_id = 1,
.minimum_version_id = 1,
.needed = pre_2_10_vmstate_dummy_icp_needed,
.fields = (const VMStateField[]) {
VMSTATE_UNUSED(4), /* uint32_t xirr */
VMSTATE_UNUSED(1), /* uint8_t pending_priority */
VMSTATE_UNUSED(1), /* uint8_t mfrr */
VMSTATE_END_OF_LIST()
},
};
/*
* See comment in hw/intc/xics.c:icp_realize()
*
* You have to remove vmstate_replace_hack_for_ppc() when you remove
* the machine types that need the following function.
*/
static void pre_2_10_vmstate_register_dummy_icp(int i)
{
vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
(void *)(uintptr_t) i);
}
/*
* See comment in hw/intc/xics.c:icp_realize()
*
* You have to remove vmstate_replace_hack_for_ppc() when you remove
* the machine types that need the following function.
*/
static void pre_2_10_vmstate_unregister_dummy_icp(int i)
{
/*
* This used to be:
*
* vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
* (void *)(uintptr_t) i);
*/
}
int spapr_max_server_number(SpaprMachineState *spapr)
{
MachineState *ms = MACHINE(spapr);
@ -682,7 +627,6 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr,
static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
{
MachineState *machine = MACHINE(spapr);
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
hwaddr mem_start, node_size;
int i, nb_nodes = machine->numa_state->num_nodes;
NodeInfo *nodes = machine->numa_state->nodes;
@ -724,7 +668,6 @@ static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt)
if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) {
int ret;
g_assert(smc->dr_lmb_enabled);
ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt);
if (ret) {
return ret;
@ -1307,9 +1250,7 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space)
spapr_dt_cpus(fdt, spapr);
/* ibm,drc-indexes and friends */
if (smc->dr_lmb_enabled) {
root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
}
root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB;
if (smc->dr_phb_enabled) {
root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB;
}
@ -2715,7 +2656,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
MachineClass *mc = MACHINE_GET_CLASS(machine);
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
const char *type = spapr_get_cpu_core_type(machine->cpu_type);
const CPUArchIdList *possible_cpus;
unsigned int smp_cpus = machine->smp.cpus;
@ -2744,15 +2684,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr)
boot_cores_nr = possible_cpus->len;
}
if (smc->pre_2_10_has_unused_icps) {
for (i = 0; i < spapr_max_server_number(spapr); i++) {
/* Dummy entries get deregistered when real ICPState objects
* are registered during CPU core hotplug.
*/
pre_2_10_vmstate_register_dummy_icp(i);
}
}
for (i = 0; i < possible_cpus->len; i++) {
int core_id = i * smp_threads;
@ -2929,10 +2860,8 @@ static void spapr_machine_init(MachineState *machine)
spapr->ov5 = spapr_ovec_new();
spapr->ov5_cas = spapr_ovec_new();
if (smc->dr_lmb_enabled) {
spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
spapr_validate_node_memory(machine, &error_fatal);
}
spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
spapr_validate_node_memory(machine, &error_fatal);
spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
@ -3016,9 +2945,7 @@ static void spapr_machine_init(MachineState *machine)
machine_memory_devices_init(machine, device_mem_base, device_mem_size);
}
if (smc->dr_lmb_enabled) {
spapr_create_lmb_dr_connectors(spapr);
}
spapr_create_lmb_dr_connectors(spapr);
if (mc->nvdimm_supported) {
spapr_create_nvdimm_dr_connectors(spapr);
@ -3078,11 +3005,7 @@ static void spapr_machine_init(MachineState *machine)
}
if (machine->usb) {
if (smc->use_ohci_by_default) {
pci_create_simple(phb->bus, -1, "pci-ohci");
} else {
pci_create_simple(phb->bus, -1, "nec-usb-xhci");
}
pci_create_simple(phb->bus, -1, "nec-usb-xhci");
if (has_vga) {
USBBus *usb_bus;
@ -3662,7 +3585,6 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp)
{
const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev);
SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev);
bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
PCDIMMDevice *dimm = PC_DIMM(dev);
@ -3671,11 +3593,6 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Object *memdev;
hwaddr pagesize;
if (!smc->dr_lmb_enabled) {
error_setg(errp, "Memory hotplug not supported for this machine");
return;
}
size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
if (local_err) {
error_propagate(errp, local_err);
@ -3932,21 +3849,9 @@ void spapr_core_release(DeviceState *dev)
static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
MachineState *ms = MACHINE(hotplug_dev);
SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
CPUCore *cc = CPU_CORE(dev);
CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
if (smc->pre_2_10_has_unused_icps) {
SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
int i;
for (i = 0; i < cc->nr_threads; i++) {
CPUState *cs = CPU(sc->threads[i]);
pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
}
}
assert(core_slot);
core_slot->cpu = NULL;
qdev_unrealize(dev);
@ -4027,7 +3932,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
{
SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
MachineClass *mc = MACHINE_GET_CLASS(spapr);
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
SpaprDrc *drc;
@ -4077,12 +3981,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev)
}
}
if (smc->pre_2_10_has_unused_icps) {
for (i = 0; i < cc->nr_threads; i++) {
CPUState *cs = CPU(core->threads[i]);
pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
}
}
}
static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
@ -4713,7 +4611,6 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
hc->unplug_request = spapr_machine_device_unplug_request;
hc->unplug = spapr_machine_device_unplug;
smc->dr_lmb_enabled = true;
smc->update_dt_enabled = true;
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0");
mc->has_hotpluggable_cpus = true;
@ -4834,8 +4731,6 @@ static void spapr_machine_latest_class_options(MachineClass *mc)
DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
#define DEFINE_SPAPR_MACHINE(major, minor) \
DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
#define DEFINE_SPAPR_MACHINE_TAGGED(major, minor, tag) \
DEFINE_SPAPR_MACHINE_IMPL(false, major, minor, _, tag)
/*
* pseries-9.2
@ -5120,278 +5015,6 @@ static void spapr_machine_3_0_class_options(MachineClass *mc)
DEFINE_SPAPR_MACHINE(3, 0);
/*
* pseries-2.12
*/
static void spapr_machine_2_12_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
static GlobalProperty compat[] = {
{ TYPE_POWERPC_CPU, "pre-3.0-migration", "on" },
{ TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" },
};
spapr_machine_3_0_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
/* We depend on kvm_enabled() to choose a default value for the
* hpt-max-page-size capability. Of course we can't do it here
* because this is too early and the HW accelerator isn't initialized
* yet. Postpone this to machine init (see default_caps_with_cpu()).
*/
smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0;
}
DEFINE_SPAPR_MACHINE(2, 12);
static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
spapr_machine_2_12_class_options(mc);
smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND;
smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND;
smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD;
}
DEFINE_SPAPR_MACHINE_TAGGED(2, 12, sxxm);
/*
* pseries-2.11
*/
static void spapr_machine_2_11_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
spapr_machine_2_12_class_options(mc);
smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON;
compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len);
}
DEFINE_SPAPR_MACHINE(2, 11);
/*
* pseries-2.10
*/
static void spapr_machine_2_10_class_options(MachineClass *mc)
{
spapr_machine_2_11_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len);
}
DEFINE_SPAPR_MACHINE(2, 10);
/*
* pseries-2.9
*/
static void spapr_machine_2_9_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
static GlobalProperty compat[] = {
{ TYPE_POWERPC_CPU, "pre-2.10-migration", "on" },
};
spapr_machine_2_10_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
smc->pre_2_10_has_unused_icps = true;
smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED;
}
DEFINE_SPAPR_MACHINE(2, 9);
/*
* pseries-2.8
*/
static void spapr_machine_2_8_class_options(MachineClass *mc)
{
static GlobalProperty compat[] = {
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" },
};
spapr_machine_2_9_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
mc->numa_mem_align_shift = 23;
}
DEFINE_SPAPR_MACHINE(2, 8);
/*
* pseries-2.7
*/
static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index,
uint64_t *buid, hwaddr *pio,
hwaddr *mmio32, hwaddr *mmio64,
unsigned n_dma, uint32_t *liobns, Error **errp)
{
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
const uint64_t base_buid = 0x800000020000000ULL;
const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
const hwaddr pio_offset = 0x80000000; /* 2 GiB */
const uint32_t max_index = 255;
const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
uint64_t ram_top = MACHINE(spapr)->ram_size;
hwaddr phb0_base, phb_base;
int i;
/* Do we have device memory? */
if (MACHINE(spapr)->device_memory) {
/* Can't just use maxram_size, because there may be an
* alignment gap between normal and device memory regions
*/
ram_top = MACHINE(spapr)->device_memory->base +
memory_region_size(&MACHINE(spapr)->device_memory->mr);
}
phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
if (index > max_index) {
error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
max_index);
return false;
}
*buid = base_buid + index;
for (i = 0; i < n_dma; ++i) {
liobns[i] = SPAPR_PCI_LIOBN(index, i);
}
phb_base = phb0_base + index * phb_spacing;
*pio = phb_base + pio_offset;
*mmio32 = phb_base + mmio_offset;
/*
* We don't set the 64-bit MMIO window, relying on the PHB's
* fallback behaviour of automatically splitting a large "32-bit"
* window into contiguous 32-bit and 64-bit windows
*/
return true;
}
static void spapr_machine_2_7_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
static GlobalProperty compat[] = {
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", },
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", },
{ TYPE_POWERPC_CPU, "pre-2.8-migration", "on", },
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", },
};
spapr_machine_2_8_class_options(mc);
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3");
mc->default_machine_opts = "modern-hotplug-events=off";
compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
smc->phb_placement = phb_placement_2_7;
}
DEFINE_SPAPR_MACHINE(2, 7);
/*
* pseries-2.6
*/
static void spapr_machine_2_6_class_options(MachineClass *mc)
{
static GlobalProperty compat[] = {
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" },
};
spapr_machine_2_7_class_options(mc);
mc->has_hotpluggable_cpus = false;
compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
}
DEFINE_SPAPR_MACHINE(2, 6);
/*
* pseries-2.5
*/
static void spapr_machine_2_5_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
static GlobalProperty compat[] = {
{ "spapr-vlan", "use-rx-buffer-pools", "off" },
};
spapr_machine_2_6_class_options(mc);
smc->use_ohci_by_default = true;
compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
}
DEFINE_SPAPR_MACHINE(2, 5);
/*
* pseries-2.4
*/
static void spapr_machine_2_4_class_options(MachineClass *mc)
{
SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
spapr_machine_2_5_class_options(mc);
smc->dr_lmb_enabled = false;
compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len);
}
DEFINE_SPAPR_MACHINE(2, 4);
/*
* pseries-2.3
*/
static void spapr_machine_2_3_class_options(MachineClass *mc)
{
static GlobalProperty compat[] = {
{ "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
};
spapr_machine_2_4_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
}
DEFINE_SPAPR_MACHINE(2, 3);
/*
* pseries-2.2
*/
static void spapr_machine_2_2_class_options(MachineClass *mc)
{
static GlobalProperty compat[] = {
{ TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" },
};
spapr_machine_2_3_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len);
compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat));
mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on";
}
DEFINE_SPAPR_MACHINE(2, 2);
/*
* pseries-2.1
*/
static void spapr_machine_2_1_class_options(MachineClass *mc)
{
spapr_machine_2_2_class_options(mc);
compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len);
}
DEFINE_SPAPR_MACHINE(2, 1);
static void spapr_machine_register_types(void)
{
type_register_static(&spapr_machine_info);

View File

@ -197,9 +197,7 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc)
{
CPUPPCState *env = &cpu->env;
if (!sc->pre_3_0_migration) {
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
}
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu);
cpu_ppc_tb_free(env);
qdev_unrealize(DEVICE(cpu));
@ -285,10 +283,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr,
return false;
}
if (!sc->pre_3_0_migration) {
vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state,
cpu->machine_data);
}
vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state,
cpu->machine_data);
return true;
}
@ -366,8 +362,6 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
static Property spapr_cpu_core_properties[] = {
DEFINE_PROP_INT32("node-id", SpaprCpuCore, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_BOOL("pre-3.0-migration", SpaprCpuCore, pre_3_0_migration,
false),
DEFINE_PROP_END_OF_LIST()
};
@ -411,6 +405,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = {
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"),
DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"),
DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"),
DEFINE_SPAPR_CPU_CORE_TYPE("power11_v2.0"),
#ifdef CONFIG_KVM
DEFINE_SPAPR_CPU_CORE_TYPE("host"),
#endif

View File

@ -771,6 +771,7 @@ static void copy_logical_pvr(void *a, void *b, bool set)
if (*pvr_logical_ptr) {
switch (*pvr_logical_ptr) {
case CPU_POWERPC_LOGICAL_3_10_P11:
case CPU_POWERPC_LOGICAL_3_10:
pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00;
break;
@ -982,6 +983,7 @@ struct guest_state_element_type guest_state_element_types[] = {
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb),
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl),
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DPDES, dpdes),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave),
GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar),
GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr),
@ -1184,6 +1186,12 @@ static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
return H_PARAMETER;
}
/* P11 capabilities */
if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
spapr->max_compat_pvr)) {
env->gpr[4] |= H_GUEST_CAPABILITIES_P11_MODE;
}
/* P10 capabilities */
if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
spapr->max_compat_pvr)) {
@ -1226,7 +1234,10 @@ static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu,
env->gpr[4] = 1;
/* set R5 to the first supported Power Processor Mode */
if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
spapr->max_compat_pvr)) {
env->gpr[5] = H_GUEST_CAP_P11_MODE_BMAP;
} else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
spapr->max_compat_pvr)) {
env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP;
} else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,

View File

@ -1237,10 +1237,6 @@ static void add_drcs(SpaprPhbState *phb, PCIBus *bus)
int i;
uint8_t chassis;
if (!phb->dr_enabled) {
return;
}
chassis = chassis_from_bus(bus);
if (pci_bus_is_root(bus)) {
@ -1260,10 +1256,6 @@ static void remove_drcs(SpaprPhbState *phb, PCIBus *bus)
int i;
uint8_t chassis;
if (!phb->dr_enabled) {
return;
}
chassis = chassis_from_bus(bus);
for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) {
@ -1548,17 +1540,6 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)));
uint32_t slotnr = PCI_SLOT(pdev->devfn);
if (!phb->dr_enabled) {
/* if this is a hotplug operation initiated by the user
* we need to let them know it's not enabled
*/
if (plugged_dev->hotplugged) {
error_setg(errp, "Bus '%s' does not support hotplugging",
phb->parent_obj.bus->qbus.name);
return;
}
}
if (IS_PCI_BRIDGE(plugged_dev)) {
if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) {
return;
@ -1591,14 +1572,6 @@ static void spapr_pci_plug(HotplugHandler *plug_handler,
SpaprDrc *drc = drc_from_dev(phb, pdev);
uint32_t slotnr = PCI_SLOT(pdev->devfn);
/*
* If DR is disabled we don't need to do anything in the case of
* hotplug or coldplug callbacks.
*/
if (!phb->dr_enabled) {
return;
}
g_assert(drc);
if (IS_PCI_BRIDGE(plugged_dev)) {
@ -1673,12 +1646,6 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler,
PCIDevice *pdev = PCI_DEVICE(plugged_dev);
SpaprDrc *drc = drc_from_dev(phb, pdev);
if (!phb->dr_enabled) {
error_setg(errp, "Bus '%s' does not support hotplugging",
phb->parent_obj.bus->qbus.name);
return;
}
g_assert(drc);
g_assert(drc->dev == plugged_dev);
@ -1847,30 +1814,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */
if (sphb->mem64_win_size != 0) {
if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
" (max 2 GiB)", sphb->mem_win_size);
return;
}
/* 64-bit window defaults to identity mapping */
sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
} else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
/*
* For compatibility with old configuration, if no 64-bit MMIO
* window is specified, but the ordinary (32-bit) memory
* window is specified as > 2GiB, we treat it as a 2GiB 32-bit
* window, with a 64-bit MMIO window following on immediately
* afterwards
*/
sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE;
sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE;
sphb->mem64_win_pciaddr =
SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE;
sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE;
if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
" (max 2 GiB)", sphb->mem_win_size);
return;
}
/* 64-bit window defaults to identity mapping */
sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
if (spapr_pci_find_phb(spapr, sphb->buid)) {
SpaprPhbState *s;
@ -2089,8 +2041,6 @@ static Property spapr_phb_properties[] = {
SPAPR_PCI_MEM64_WIN_SIZE),
DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled,
true),
/* Default DMA window is 0..1GB */
DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0),
DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000),
@ -2101,8 +2051,6 @@ static Property spapr_phb_properties[] = {
(1ULL << 12) | (1ULL << 16)
| (1ULL << 21) | (1ULL << 24)),
DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1),
DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState,
pre_2_8_migration, false),
DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState,
pcie_ecs, true),
DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState,
@ -2140,20 +2088,6 @@ static int spapr_pci_pre_save(void *opaque)
gpointer key, value;
int i;
if (sphb->pre_2_8_migration) {
sphb->mig_liobn = sphb->dma_liobn[0];
sphb->mig_mem_win_addr = sphb->mem_win_addr;
sphb->mig_mem_win_size = sphb->mem_win_size;
sphb->mig_io_win_addr = sphb->io_win_addr;
sphb->mig_io_win_size = sphb->io_win_size;
if ((sphb->mem64_win_size != 0)
&& (sphb->mem64_win_addr
== (sphb->mem_win_addr + sphb->mem_win_size))) {
sphb->mig_mem_win_size += sphb->mem64_win_size;
}
}
g_free(sphb->msi_devs);
sphb->msi_devs = NULL;
sphb->msi_devs_num = g_hash_table_size(sphb->msi);
@ -2200,13 +2134,6 @@ static int spapr_pci_post_load(void *opaque, int version_id)
return 0;
}
static bool pre_2_8_migration(void *opaque, int version_id)
{
SpaprPhbState *sphb = opaque;
return sphb->pre_2_8_migration;
}
static const VMStateDescription vmstate_spapr_pci = {
.name = "spapr_pci",
.version_id = 2,
@ -2216,11 +2143,6 @@ static const VMStateDescription vmstate_spapr_pci = {
.post_load = spapr_pci_post_load,
.fields = (const VMStateField[]) {
VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL),
VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration),
VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0,
vmstate_spapr_pci_lsi, SpaprPciLsi),
VMSTATE_INT32(msi_devs_num, SpaprPhbState),

View File

@ -67,29 +67,6 @@ static struct boot_info
void *vfdt;
} boot_info;
/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
static void mmubooke_create_initial_mapping(CPUPPCState *env,
target_ulong va,
hwaddr pa)
{
ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 1U << 31; /* up to 0x80000000 */
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
tlb = &env->tlb.tlbe[1];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
tlb->size = 1U << 31; /* up to 0xffffffff */
tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->PID = 0;
}
static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk)
{
PowerPCCPU *cpu;
@ -139,8 +116,9 @@ static void main_cpu_reset(void *opaque)
env->gpr[3] = bi->fdt;
env->nip = bi->bootstrap_pc;
/* Create a mapping for the kernel. */
mmubooke_create_initial_mapping(env, 0, 0);
/* Create a mapping spanning the 32bit addr space. */
booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31);
booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31);
env->gpr[6] = tswap32(EPAPR_MAGIC);
env->gpr[7] = bi->ima_size;
}

View File

@ -53,8 +53,8 @@ static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
{
free(payload->data);
free(payload);
g_free(payload->data);
g_free(payload);
}
static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
@ -217,6 +217,9 @@ static void transfer(PnvSpi *s, PnvXferBuffer *payload)
PnvXferBuffer *rsp_payload = NULL;
rsp_payload = pnv_spi_xfer_buffer_new();
if (!rsp_payload) {
return;
}
for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
tx = 0;
for (int i = 0; i < s->transfer_len; i++) {
@ -235,9 +238,8 @@ static void transfer(PnvSpi *s, PnvXferBuffer *payload)
(rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
}
}
if (rsp_payload != NULL) {
spi_response(s, s->N1_bits, rsp_payload);
}
spi_response(s, s->N1_bits, rsp_payload);
pnv_spi_xfer_buffer_free(rsp_payload);
}
static inline uint8_t get_seq_index(PnvSpi *s)

View File

@ -824,13 +824,4 @@ extern const size_t hw_compat_2_5_len;
extern GlobalProperty hw_compat_2_4[];
extern const size_t hw_compat_2_4_len;
extern GlobalProperty hw_compat_2_3[];
extern const size_t hw_compat_2_3_len;
extern GlobalProperty hw_compat_2_2[];
extern const size_t hw_compat_2_2_len;
extern GlobalProperty hw_compat_2_1[];
extern const size_t hw_compat_2_1_len;
#endif

View File

@ -53,7 +53,6 @@ struct SpaprPhbState {
uint32_t index;
uint64_t buid;
char *dtbusname;
bool dr_enabled;
MemoryRegion memspace, iospace;
hwaddr mem_win_addr, mem_win_size, mem64_win_addr, mem64_win_size;
@ -84,10 +83,6 @@ struct SpaprPhbState {
bool pcie_ecs; /* Allow access to PCIe extended config space? */
/* Fields for migration compatibility hacks */
bool pre_2_8_migration;
uint32_t mig_liobn;
hwaddr mig_mem_win_addr, mig_mem_win_size;
hwaddr mig_io_win_addr, mig_io_win_size;
bool pre_5_1_assoc;
};

View File

@ -116,6 +116,13 @@ enum {
#define PPC_SERIAL_MM_BAUDBASE 399193
#ifndef CONFIG_USER_ONLY
void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa,
hwaddr len);
void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa,
target_ulong size);
#endif
/* ppc_booke.c */
void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags);
#endif

View File

@ -141,11 +141,8 @@ struct SpaprMachineClass {
MachineClass parent_class;
/*< public >*/
bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
bool dr_phb_enabled; /* enable dynamic-reconfig/hotplug of PHBs */
bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
bool pre_2_10_has_unused_icps;
bool legacy_irq_allocation;
uint32_t nr_xirqs;
bool broken_host_serial_model; /* present real host info to the guest */

View File

@ -28,7 +28,6 @@ struct SpaprCpuCore {
/*< public >*/
PowerPCCPU **threads;
int node_id;
bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */
};
struct SpaprCpuCoreClass {

View File

@ -99,7 +99,8 @@
#define GSB_VCPU_SPR_HASHKEYR 0x1050
#define GSB_VCPU_SPR_HASHPKEYR 0x1051
#define GSB_VCPU_SPR_CTRL 0x1052
/* RESERVED 0x1053 - 0x1FFF */
#define GSB_VCPU_SPR_DPDES 0x1053
/* RESERVED 0x1054 - 0x1FFF */
#define GSB_VCPU_SPR_CR 0x2000
#define GSB_VCPU_SPR_PIDR 0x2001
#define GSB_VCPU_SPR_DSISR 0x2002
@ -210,11 +211,14 @@ typedef struct SpaprMachineStateNestedGuest {
#define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000
#define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000
#define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000
#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \
#define H_GUEST_CAPABILITIES_P11_MODE 0x1000000000000000
#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P11_MODE | \
H_GUEST_CAPABILITIES_P10_MODE | \
H_GUEST_CAPABILITIES_P9_MODE)
#define H_GUEST_CAP_COPY_MEM_BMAP 0
#define H_GUEST_CAP_P9_MODE_BMAP 1
#define H_GUEST_CAP_P10_MODE_BMAP 2
#define H_GUEST_CAP_P11_MODE_BMAP 3
#define PAPR_NESTED_GUEST_MAX 4096
#define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL
#define PAPR_NESTED_GUEST_VCPU_MAX 2048

View File

@ -218,7 +218,7 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc)
xsrc->esb_shift == XIVE_ESB_4K_2PAGE;
}
static inline size_t xive_source_esb_len(XiveSource *xsrc)
static inline uint64_t xive_source_esb_len(XiveSource *xsrc)
{
return (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
}
@ -533,7 +533,7 @@ Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb);
void xive_tctx_reset_os_signal(XiveTCTX *tctx);
void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring);
/*
* KVM XIVE device helpers

View File

@ -53,6 +53,12 @@ typedef struct Xive2RouterClass {
Xive2Nvp *nvp);
int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
int (*get_nvgc)(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc);
int (*write_nvgc)(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc);
uint8_t (*get_block_id)(Xive2Router *xrtr);
uint32_t (*get_config)(Xive2Router *xrtr);
} Xive2RouterClass;
@ -67,6 +73,12 @@ int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp);
int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
Xive2Nvp *nvp, uint8_t word_number);
int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc);
int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
uint8_t nvgc_blk, uint32_t nvgc_idx,
Xive2Nvgc *nvgc);
uint32_t xive2_router_get_config(Xive2Router *xrtr);
void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
@ -107,5 +119,11 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
#endif /* PPC_XIVE2_H */

View File

@ -19,16 +19,18 @@
* mode (P10), the CAM line is slightly different as the VP space was
* increased.
*/
#define TM2_QW0W2_VU PPC_BIT32(0)
#define TM2_W2_VALID PPC_BIT32(0)
#define TM2_W2_HW PPC_BIT32(1)
#define TM2_QW0W2_VU TM2_W2_VALID
#define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31)
#define TM2_QW1W2_VO PPC_BIT32(0)
#define TM2_QW1W2_HO PPC_BIT32(1)
#define TM2_QW1W2_VO TM2_W2_VALID
#define TM2_QW1W2_HO TM2_W2_HW
#define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31)
#define TM2_QW2W2_VP PPC_BIT32(0)
#define TM2_QW2W2_HP PPC_BIT32(1)
#define TM2_QW2W2_VP TM2_W2_VALID
#define TM2_QW2W2_HP TM2_W2_HW
#define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31)
#define TM2_QW3W2_VT PPC_BIT32(0)
#define TM2_QW3W2_HT PPC_BIT32(1)
#define TM2_QW3W2_VT TM2_W2_VALID
#define TM2_QW3W2_HT TM2_W2_HW
#define TM2_QW3W2_LP PPC_BIT32(6)
#define TM2_QW3W2_LE PPC_BIT32(7)
@ -151,6 +153,7 @@ typedef struct Xive2Nvp {
#define NVP2_W0_VALID PPC_BIT32(0)
#define NVP2_W0_HW PPC_BIT32(7)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31)
uint32_t w1;
#define NVP2_W1_CO PPC_BIT32(13)
#define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15)
@ -171,7 +174,9 @@ typedef struct Xive2Nvp {
#define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7)
#define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31)
uint32_t w6;
#define NVP2_W6_REPORTING_LINE PPC_BITMASK32(4, 31)
uint32_t w7;
#define NVP2_W7_REPORTING_LINE PPC_BITMASK32(0, 23)
} Xive2Nvp;
#define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID)
@ -209,6 +214,7 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf);
typedef struct Xive2Nvgc {
uint32_t w0;
#define NVGC2_W0_VALID PPC_BIT32(0)
#define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31)
uint32_t w1;
uint32_t w2;
uint32_t w3;
@ -218,4 +224,9 @@ typedef struct Xive2Nvgc {
uint32_t w7;
} Xive2Nvgc;
#define xive2_nvgc_is_valid(nvgc) (be32_to_cpu((nvgc)->w0) & NVGC2_W0_VALID)
void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx,
GString *buf);
#endif /* PPC_XIVE2_REGS_H */

View File

@ -77,8 +77,11 @@
#define TM_LSMFB 0x3 /* - + + + */
#define TM_ACK_CNT 0x4 /* - + - - */
#define TM_INC 0x5 /* - + - + */
#define TM_LGS 0x5 /* + + + + */ /* Rename P10 */
#define TM_AGE 0x6 /* - + - + */
#define TM_T 0x6 /* - + - + */ /* Rename P10 */
#define TM_PIPR 0x7 /* - + - + */
#define TM_OGEN 0xF /* - + - - */ /* P10 only */
#define TM_WORD0 0x0
#define TM_WORD1 0x4
@ -98,6 +101,7 @@
#define TM_QW3W2_LP PPC_BIT32(6)
#define TM_QW3W2_LE PPC_BIT32(7)
#define TM_QW3W2_T PPC_BIT32(31)
#define TM_QW3B8_VT PPC_BIT8(0)
/*
* In addition to normal loads to "peek" and writes (only when invalid)
@ -114,23 +118,32 @@
* Then we have all these "special" CI ops at these offset that trigger
* all sorts of side effects:
*/
#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/
#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg */
#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */
#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user
* context */
#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS
* context to reg */
#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool
* context to reg*/
#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd
* line */
#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even
* line */
#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user */
/* context */
#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
#define TM_SPC_PULL_OS_CTX_G2 0x810 /* Load32/Load64 Pull/Invalidate OS */
/* context to reg */
#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS */
/* context to reg */
#define TM_SPC_PULL_POOL_CTX_G2 0x820 /* Load32/Load64 Pull/Invalidate Pool */
/* context to reg */
#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool */
/* context to reg */
#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
#define TM_SPC_PULL_PHYS_CTX_G2 0x830 /* Load32 Pull phys ctx to reg */
#define TM_SPC_PULL_PHYS_CTX 0x838 /* Load8 Pull phys ctx to reg */
#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd */
/* line */
#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
#define TM_SPC_PULL_OS_CTX_OL 0xc18 /* Pull/Invalidate OS context to */
/* odd Thread reporting line */
#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even */
/* line */
#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
#define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */
/* XXX more... */
/* NSR fields for the various QW ack types */

View File

@ -860,25 +860,6 @@ static void vmstate_check(const VMStateDescription *vmsd)
}
}
/*
* See comment in hw/intc/xics.c:icp_realize()
*
* This function can be removed when
* pre_2_10_vmstate_register_dummy_icp() is removed.
*/
int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id,
const VMStateDescription *vmsd,
void *opaque)
{
SaveStateEntry *se = find_se(vmsd->name, instance_id);
if (se) {
savevm_state_handler_remove(se);
g_free(se->compat);
g_free(se);
}
return vmstate_register(obj, instance_id, vmsd, opaque);
}
int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id,
const VMStateDescription *vmsd,

View File

@ -100,6 +100,13 @@ static const CompatInfo compat_table[] = {
.pcr_level = PCR_COMPAT_3_10,
.max_vthreads = 8,
},
{ /* POWER11, ISA3.10 */
.name = "power11",
.pvr = CPU_POWERPC_LOGICAL_3_10_P11,
.pcr = PCR_COMPAT_3_10,
.pcr_level = PCR_COMPAT_3_10,
.max_vthreads = 8,
},
};
static const CompatInfo *compat_by_pvr(uint32_t pvr)
@ -132,6 +139,10 @@ static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr,
/* Outside specified range */
return false;
}
if (compat->pvr > pcc->spapr_logical_pvr) {
/* Older CPU cannot support a newer processor's compat mode */
return false;
}
if (!(pcc->pcr_supported & compat->pcr_level)) {
/* Not supported by this CPU */
return false;

View File

@ -734,6 +734,8 @@
"POWER9 v2.2")
POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10,
"POWER10 v2.0")
POWERPC_DEF("power11_v2.0", CPU_POWERPC_POWER11_DD20, POWER11,
"POWER11_v2.0")
#endif /* defined (TARGET_PPC64) */
/***************************************************************************/
@ -909,6 +911,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "power8nvl", "power8nvl_v1.0" },
{ "power9", "power9_v2.2" },
{ "power10", "power10_v2.0" },
{ "power11", "power11_v2.0" },
#endif
/* Generic PowerPCs */

View File

@ -354,6 +354,8 @@ enum {
CPU_POWERPC_POWER10_BASE = 0x00800000,
CPU_POWERPC_POWER10_DD1 = 0x00801100,
CPU_POWERPC_POWER10_DD20 = 0x00801200,
CPU_POWERPC_POWER11_BASE = 0x00820000,
CPU_POWERPC_POWER11_DD20 = 0x00821200,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
@ -391,6 +393,7 @@ enum {
CPU_POWERPC_LOGICAL_2_07 = 0x0F000004,
CPU_POWERPC_LOGICAL_3_00 = 0x0F000005,
CPU_POWERPC_LOGICAL_3_10 = 0x0F000006,
CPU_POWERPC_LOGICAL_3_10_P11 = 0x0F000007,
};
/* System version register (used on MPC 8xxx) */

View File

@ -40,6 +40,7 @@
#define PPC_BIT_NR(bit) (63 - (bit))
#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
#define PPC_BIT32_NR(bit) (31 - (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
@ -215,6 +216,8 @@ typedef enum powerpc_excp_t {
POWERPC_EXCP_POWER9,
/* POWER10 exception model */
POWERPC_EXCP_POWER10,
/* POWER11 exception model */
POWERPC_EXCP_POWER11,
} powerpc_excp_t;
/*****************************************************************************/
@ -634,8 +637,8 @@ FIELD(MSR, LE, MSR_LE, 1)
#define PSSCR_EC PPC_BIT(43) /* Exit Criterion */
/* HFSCR bits */
#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */
#define HFSCR_BHRB PPC_BIT(59) /* BHRB Instructions */
#define HFSCR_MSGP PPC_BIT_NR(53) /* Privileged Message Send Facilities */
#define HFSCR_BHRB PPC_BIT_NR(59) /* BHRB Instructions */
#define HFSCR_IC_MSGP 0xA
#define DBCR0_ICMP (1 << 27)
@ -1454,16 +1457,6 @@ struct ArchCPU {
/* Those resources are used only during code translation */
/* opcode handlers */
opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
/* Fields related to migration compatibility hacks */
bool pre_2_8_migration;
target_ulong mig_msr_mask;
uint64_t mig_insns_flags;
uint64_t mig_insns_flags2;
uint32_t mig_nb_BATs;
bool pre_2_10_migration;
bool pre_3_0_migration;
int32_t mig_slb_nr;
};
/**
@ -1482,6 +1475,7 @@ struct PowerPCCPUClass {
void (*parent_parse_features)(const char *type, char *str, Error **errp);
uint32_t pvr;
uint32_t spapr_logical_pvr;
/*
* If @best is false, match if pcc is in the family of pvr
* Else match only if pcc is the best match for pvr in this family.

View File

@ -52,6 +52,7 @@
#include "kvm_ppc.h"
#endif
#include "cpu_init.h"
/* #define PPC_DEBUG_SPR */
/* #define USE_APPLE_GDB */
@ -6153,6 +6154,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER7";
dc->desc = "POWER7";
pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_06_PLUS;
pcc->pvr_match = ppc_pvr_match_power7;
pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@ -6316,6 +6318,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_07;
pcc->pvr_match = ppc_pvr_match_power8;
pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@ -6407,7 +6410,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = {
#endif /* CONFIG_USER_ONLY */
#define POWER9_BHRB_ENTRIES_LOG2 5
static void init_proc_POWER9(CPUPPCState *env)
static void register_power9_common_sprs(CPUPPCState *env)
{
/* Common Registers */
init_proc_book3s_common(env);
@ -6426,7 +6429,6 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
register_HEIR32_spr(env);
register_power6_dbg_sprs(env);
register_power7_common_sprs(env);
register_power8_tce_address_control_sprs(env);
@ -6444,16 +6446,21 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
/* POWER9 Specific registers */
spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
spr_read_generic, spr_write_generic,
KVM_REG_PPC_TIDR, 0);
/* FIXME: Filter fields properly based on privilege level */
spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
spr_read_generic, spr_write_generic,
KVM_REG_PPC_PSSCR, 0);
}
static void init_proc_POWER9(CPUPPCState *env)
{
register_power9_common_sprs(env);
register_HEIR32_spr(env);
/* POWER9 Specific registers */
spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
spr_read_generic, spr_write_generic,
KVM_REG_PPC_TIDR, 0);
/* env variables */
env->dcache_line_size = 128;
env->icache_line_size = 128;
@ -6509,59 +6516,17 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER9";
dc->desc = "POWER9";
pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_00;
pcc->pvr_match = ppc_pvr_match_power9;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 |
PCR_COMPAT_2_05;
pcc->pcr_mask = PPC_PCR_MASK_POWER9;
pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER9;
pcc->init_proc = init_proc_POWER9;
pcc->check_pow = check_pow_nocheck;
pcc->check_attn = check_attn_hid0_power9;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
PPC_FLOAT_FRSQRTES |
PPC_FLOAT_STFIWX |
PPC_FLOAT_EXT |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD |
PPC_CILDST;
pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC |
PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
(1ull << MSR_TM) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
(1ull << MSR_EE) |
(1ull << MSR_PR) |
(1ull << MSR_FP) |
(1ull << MSR_ME) |
(1ull << MSR_FE0) |
(1ull << MSR_SE) |
(1ull << MSR_DE) |
(1ull << MSR_FE1) |
(1ull << MSR_IR) |
(1ull << MSR_DR) |
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
(LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
(LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
LPCR_DEE | LPCR_OEE))
| LPCR_MER | LPCR_GTSE | LPCR_TC |
LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
pcc->insns_flags = PPC_INSNS_FLAGS_POWER9;
pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER9;
pcc->msr_mask = PPC_MSR_MASK_POWER9;
pcc->lpcr_mask = PPC_LPCR_MASK_POWER9;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
#if !defined(CONFIG_USER_ONLY)
@ -6574,10 +6539,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
pcc->excp_model = POWERPC_EXCP_POWER9;
pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV;
pcc->flags = POWERPC_FLAGS_POWER9;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@ -6604,50 +6566,12 @@ static struct ppc_radix_page_info POWER10_radix_page_info = {
#define POWER10_BHRB_ENTRIES_LOG2 5
static void init_proc_POWER10(CPUPPCState *env)
{
/* Common Registers */
init_proc_book3s_common(env);
register_book3s_207_dbg_sprs(env);
/* Common TCG PMU */
init_tcg_pmu_power8(env);
/* POWER8 Specific Registers */
register_book3s_ids_sprs(env);
register_amr_sprs(env);
register_iamr_sprs(env);
register_book3s_purr_sprs(env);
register_power5p_common_sprs(env);
register_power5p_lpar_sprs(env);
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
register_power9_common_sprs(env);
register_HEIR64_spr(env);
register_power6_dbg_sprs(env);
register_power7_common_sprs(env);
register_power8_tce_address_control_sprs(env);
register_power8_ids_sprs(env);
register_power8_ebb_sprs(env);
register_power8_fscr_sprs(env);
register_power8_pmu_sup_sprs(env);
register_power8_pmu_user_sprs(env);
register_power8_tm_sprs(env);
register_power8_pspb_sprs(env);
register_power8_dpdes_sprs(env);
register_vtb_sprs(env);
register_power8_ic_sprs(env);
register_power9_book4_sprs(env);
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
register_power10_hash_sprs(env);
register_power10_dexcr_sprs(env);
register_power10_pmu_sup_sprs(env);
register_power10_pmu_user_sprs(env);
/* FIXME: Filter fields properly based on privilege level */
spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
spr_read_generic, spr_write_generic,
KVM_REG_PPC_PSSCR, 0);
/* env variables */
env->dcache_line_size = 128;
env->icache_line_size = 128;
@ -6689,61 +6613,17 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER10";
dc->desc = "POWER10";
pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10;
pcc->pvr_match = ppc_pvr_match_power10;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 |
PCR_COMPAT_3_00;
pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->pcr_mask = PPC_PCR_MASK_POWER10;
pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER10;
pcc->init_proc = init_proc_POWER10;
pcc->check_pow = check_pow_nocheck;
pcc->check_attn = check_attn_hid0_power9;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
PPC_FLOAT_FRSQRTES |
PPC_FLOAT_STFIWX |
PPC_FLOAT_EXT |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD |
PPC_CILDST;
pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 |
PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
(1ull << MSR_EE) |
(1ull << MSR_PR) |
(1ull << MSR_FP) |
(1ull << MSR_ME) |
(1ull << MSR_FE0) |
(1ull << MSR_SE) |
(1ull << MSR_DE) |
(1ull << MSR_FE1) |
(1ull << MSR_IR) |
(1ull << MSR_DR) |
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
(LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
(LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
LPCR_DEE | LPCR_OEE))
| LPCR_MER | LPCR_GTSE | LPCR_TC |
LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
/* DD2 adds an extra HAIL bit */
pcc->lpcr_mask |= LPCR_HAIL;
pcc->insns_flags = PPC_INSNS_FLAGS_POWER10;
pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER10;
pcc->msr_mask = PPC_MSR_MASK_POWER10;
pcc->lpcr_mask = PPC_LPCR_MASK_POWER10;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
@ -6756,11 +6636,67 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
pcc->excp_model = POWERPC_EXCP_POWER10;
pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
POWERPC_FLAG_VSX | POWERPC_FLAG_SCV |
POWERPC_FLAG_BHRB;
pcc->flags = POWERPC_FLAGS_POWER10;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
static void init_proc_POWER11(CPUPPCState *env)
{
init_proc_POWER10(env);
}
static bool ppc_pvr_match_power11(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
if (!best && (base == CPU_POWERPC_POWER11_BASE)) {
return true;
}
if (base != pcc_base) {
return false;
}
if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) {
return true;
}
return false;
}
POWERPC_FAMILY(POWER11)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER11";
dc->desc = "POWER11";
pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10_P11;
pcc->pvr_match = ppc_pvr_match_power11;
pcc->pcr_mask = PPC_PCR_MASK_POWER11;
pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER11;
pcc->init_proc = init_proc_POWER11;
pcc->check_pow = check_pow_nocheck;
pcc->check_attn = check_attn_hid0_power9;
pcc->insns_flags = PPC_INSNS_FLAGS_POWER11;
pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER11;
pcc->msr_mask = PPC_MSR_MASK_POWER11;
pcc->lpcr_mask = PPC_LPCR_MASK_POWER11;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
#if !defined(CONFIG_USER_ONLY)
/* segment page size remain the same */
pcc->hash64_opts = &ppc_hash64_opts_POWER7;
pcc->radix_page_info = &POWER10_radix_page_info;
pcc->lrg_decr_bits = 56;
#endif
pcc->excp_model = POWERPC_EXCP_POWER11;
pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAGS_POWER11;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@ -7452,11 +7388,7 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
}
static Property ppc_cpu_properties[] = {
DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration,
false),
DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration,
false),
/* add default property here */
DEFINE_PROP_END_OF_LIST(),
};

91
target/ppc/cpu_init.h Normal file
View File

@ -0,0 +1,91 @@
#ifndef TARGET_PPC_CPU_INIT_H
#define TARGET_PPC_CPU_INIT_H
#define PPC_INSNS_FLAGS_POWER9 \
(PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | \
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | \
PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | \
PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \
PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | \
PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | \
PPC_CILDST)
#define PPC_INSNS_FLAGS_POWER10 PPC_INSNS_FLAGS_POWER9
#define PPC_INSNS_FLAGS_POWER11 PPC_INSNS_FLAGS_POWER10
#define PPC_INSNS_FLAGS2_POWER_COMMON \
(PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | \
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \
PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | \
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | \
PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_ISA300 | PPC2_PRCNTL | \
PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206)
#define PPC_INSNS_FLAGS2_POWER9 \
(PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_TM)
#define PPC_INSNS_FLAGS2_POWER10 \
(PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_ISA310)
#define PPC_INSNS_FLAGS2_POWER11 PPC_INSNS_FLAGS2_POWER10
#define PPC_MSR_MASK_POWER_COMMON \
((1ull << MSR_SF) | \
(1ull << MSR_HV) | \
(1ull << MSR_VR) | \
(1ull << MSR_VSX) | \
(1ull << MSR_EE) | \
(1ull << MSR_PR) | \
(1ull << MSR_FP) | \
(1ull << MSR_ME) | \
(1ull << MSR_FE0) | \
(1ull << MSR_SE) | \
(1ull << MSR_DE) | \
(1ull << MSR_FE1) | \
(1ull << MSR_IR) | \
(1ull << MSR_DR) | \
(1ull << MSR_PMM) | \
(1ull << MSR_RI) | \
(1ull << MSR_LE))
#define PPC_MSR_MASK_POWER9 \
(PPC_MSR_MASK_POWER_COMMON | (1ull << MSR_TM))
#define PPC_MSR_MASK_POWER10 \
PPC_MSR_MASK_POWER_COMMON
#define PPC_MSR_MASK_POWER11 PPC_MSR_MASK_POWER10
#define PPC_PCR_MASK_POWER9 \
(PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07)
#define PPC_PCR_MASK_POWER10 \
(PPC_PCR_MASK_POWER9 | PCR_COMPAT_3_00)
#define PPC_PCR_MASK_POWER11 PPC_PCR_MASK_POWER10
#define PPC_PCR_SUPPORTED_POWER9 \
(PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05)
#define PPC_PCR_SUPPORTED_POWER10 \
(PPC_PCR_SUPPORTED_POWER9 | PCR_COMPAT_3_10)
#define PPC_PCR_SUPPORTED_POWER11 PPC_PCR_SUPPORTED_POWER10
#define PPC_LPCR_MASK_POWER9 \
(LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | \
(LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | \
LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | \
(LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | \
LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | \
LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE)
/* DD2 adds an extra HAIL bit */
#define PPC_LPCR_MASK_POWER10 \
(PPC_LPCR_MASK_POWER9 | LPCR_HAIL)
#define PPC_LPCR_MASK_POWER11 PPC_LPCR_MASK_POWER10
#define POWERPC_FLAGS_POWER_COMMON \
(POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | \
POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | \
POWERPC_FLAG_VSX | POWERPC_FLAG_SCV)
#define POWERPC_FLAGS_POWER9 \
(POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_TM)
#define POWERPC_FLAGS_POWER10 \
(POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_BHRB)
#define POWERPC_FLAGS_POWER11 POWERPC_FLAGS_POWER10
#endif /* TARGET_PPC_CPU_INIT_H */

View File

@ -324,10 +324,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
}
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
if (ail == 0) {
return;
}
if (ail == 1) {
if (ail == 0 || ail == 1) {
/* AIL=1 is reserved, treat it like AIL=0 */
return;
}
@ -351,10 +348,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
} else {
ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
}
if (ail == 0) {
return;
}
if (ail == 1 || ail == 2) {
if (ail == 0 || ail == 1 || ail == 2) {
/* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
return;
}
@ -1661,6 +1655,7 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_POWER8:
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
case POWERPC_EXCP_POWER11:
powerpc_excp_books(cpu, excp);
break;
default:
@ -1682,51 +1677,54 @@ void ppc_cpu_do_interrupt(CPUState *cs)
PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
static int p7_interrupt_powersave(CPUPPCState *env)
static int p7_interrupt_powersave(uint32_t pending_interrupts,
target_ulong lpcr)
{
if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
(env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
(lpcr & LPCR_P7_PECE0)) {
return PPC_INTERRUPT_EXT;
}
if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
(env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
(lpcr & LPCR_P7_PECE1)) {
return PPC_INTERRUPT_DECR;
}
if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
(env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
(lpcr & LPCR_P7_PECE2)) {
return PPC_INTERRUPT_MCK;
}
if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
(env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
(lpcr & LPCR_P7_PECE2)) {
return PPC_INTERRUPT_HMI;
}
if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
static int p7_next_unmasked_interrupt(CPUPPCState *env)
static int p7_next_unmasked_interrupt(CPUPPCState *env,
uint32_t pending_interrupts,
target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
/* Ignore MSR[EE] when coming out of some power management states */
bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
assert((pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
if (cs->halted) {
/* LPCR[PECE] controls which interrupts can exit power-saving mode */
return p7_interrupt_powersave(env);
return p7_interrupt_powersave(pending_interrupts, lpcr);
}
/* Machine check exception */
if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
@ -1736,9 +1734,9 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
}
/* External interrupt can ignore MSR:EE under some circumstances */
if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
if (pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(lpcr & LPCR_LPES0);
bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@ -1748,10 +1746,10 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
}
@ -1764,39 +1762,42 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env)
PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
static int p8_interrupt_powersave(CPUPPCState *env)
static int p8_interrupt_powersave(uint32_t pending_interrupts,
target_ulong lpcr)
{
if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
(lpcr & LPCR_P8_PECE2)) {
return PPC_INTERRUPT_EXT;
}
if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
(lpcr & LPCR_P8_PECE3)) {
return PPC_INTERRUPT_DECR;
}
if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
if ((pending_interrupts & PPC_INTERRUPT_MCK) &&
(lpcr & LPCR_P8_PECE4)) {
return PPC_INTERRUPT_MCK;
}
if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
if ((pending_interrupts & PPC_INTERRUPT_HMI) &&
(lpcr & LPCR_P8_PECE4)) {
return PPC_INTERRUPT_HMI;
}
if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
(lpcr & LPCR_P8_PECE0)) {
return PPC_INTERRUPT_DOORBELL;
}
if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
(env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
(lpcr & LPCR_P8_PECE1)) {
return PPC_INTERRUPT_HDOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
static int p8_next_unmasked_interrupt(CPUPPCState *env)
static int p8_next_unmasked_interrupt(CPUPPCState *env,
uint32_t pending_interrupts,
target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
@ -1807,18 +1808,18 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
if (cs->halted) {
/* LPCR[PECE] controls which interrupts can exit power-saving mode */
return p8_interrupt_powersave(env);
return p8_interrupt_powersave(pending_interrupts, lpcr);
}
/* Machine check exception */
if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
bool hdice = !!(lpcr & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@ -1826,9 +1827,9 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
}
/* External interrupt can ignore MSR:EE under some circumstances */
if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
if (pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(lpcr & LPCR_LPES0);
bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@ -1838,20 +1839,20 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* EBB exception */
if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@ -1871,60 +1872,65 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env)
PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
static int p9_interrupt_powersave(CPUPPCState *env)
static int p9_interrupt_powersave(CPUPPCState *env,
uint32_t pending_interrupts,
target_ulong lpcr)
{
/* External Exception */
if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
(env->spr[SPR_LPCR] & LPCR_EEE)) {
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
if ((pending_interrupts & PPC_INTERRUPT_EXT) &&
(lpcr & LPCR_EEE)) {
bool heic = !!(lpcr & LPCR_HEIC);
if (!heic || !FIELD_EX64_HV(env->msr) ||
FIELD_EX64(env->msr, MSR, PR)) {
return PPC_INTERRUPT_EXT;
}
}
/* Decrementer Exception */
if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
(env->spr[SPR_LPCR] & LPCR_DEE)) {
if ((pending_interrupts & PPC_INTERRUPT_DECR) &&
(lpcr & LPCR_DEE)) {
return PPC_INTERRUPT_DECR;
}
/* Machine Check or Hypervisor Maintenance Exception */
if (env->spr[SPR_LPCR] & LPCR_OEE) {
if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
if (lpcr & LPCR_OEE) {
if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
if (env->pending_interrupts & PPC_INTERRUPT_HMI) {
if (pending_interrupts & PPC_INTERRUPT_HMI) {
return PPC_INTERRUPT_HMI;
}
}
/* Privileged Doorbell Exception */
if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
(env->spr[SPR_LPCR] & LPCR_PDEE)) {
if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
(lpcr & LPCR_PDEE)) {
return PPC_INTERRUPT_DOORBELL;
}
/* Hypervisor Doorbell Exception */
if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
(env->spr[SPR_LPCR] & LPCR_HDEE)) {
if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
(lpcr & LPCR_HDEE)) {
return PPC_INTERRUPT_HDOORBELL;
}
/* Hypervisor virtualization exception */
if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) &&
(env->spr[SPR_LPCR] & LPCR_HVEE)) {
if ((pending_interrupts & PPC_INTERRUPT_HVIRT) &&
(lpcr & LPCR_HVEE)) {
return PPC_INTERRUPT_HVIRT;
}
if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
return 0;
}
static int p9_next_unmasked_interrupt(CPUPPCState *env)
static int p9_next_unmasked_interrupt(CPUPPCState *env,
uint32_t pending_interrupts,
target_ulong lpcr)
{
CPUState *cs = env_cpu(env);
/* Ignore MSR[EE] when coming out of some power management states */
bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
assert((pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
if (cs->halted) {
if (env->spr[SPR_PSSCR] & PSSCR_EC) {
@ -1932,7 +1938,7 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
* When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
* wakeup the processor
*/
return p9_interrupt_powersave(env);
return p9_interrupt_powersave(env, pending_interrupts, lpcr);
} else {
/*
* When it's clear, any system-caused exception exits power-saving
@ -1943,14 +1949,14 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
/* Machine check exception */
if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
/* Hypervisor decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
bool hdice = !!(lpcr & LPCR_HDICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@ -1958,18 +1964,18 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
/* Hypervisor virtualization interrupt */
if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
/* LPCR will be clear when not supported so this will work */
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
bool hvice = !!(lpcr & LPCR_HVICE);
if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
return PPC_INTERRUPT_HVIRT;
}
}
/* External interrupt can ignore MSR:EE under some circumstances */
if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
if (pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(lpcr & LPCR_LPES0);
bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@ -1979,20 +1985,20 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
}
if (msr_ee != 0) {
/* Decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* EBB exception */
if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@ -2010,27 +2016,31 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env)
static int ppc_next_unmasked_interrupt(CPUPPCState *env)
{
uint32_t pending_interrupts = env->pending_interrupts;
target_ulong lpcr = env->spr[SPR_LPCR];
bool async_deliver;
#ifdef TARGET_PPC64
switch (env->excp_model) {
case POWERPC_EXCP_POWER7:
return p7_next_unmasked_interrupt(env);
return p7_next_unmasked_interrupt(env, pending_interrupts, lpcr);
case POWERPC_EXCP_POWER8:
return p8_next_unmasked_interrupt(env);
return p8_next_unmasked_interrupt(env, pending_interrupts, lpcr);
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
return p9_next_unmasked_interrupt(env);
case POWERPC_EXCP_POWER11:
return p9_next_unmasked_interrupt(env, pending_interrupts, lpcr);
default:
break;
}
#endif
bool async_deliver;
/* External reset */
if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
if (pending_interrupts & PPC_INTERRUPT_RESET) {
return PPC_INTERRUPT_RESET;
}
/* Machine check exception */
if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
if (pending_interrupts & PPC_INTERRUPT_MCK) {
return PPC_INTERRUPT_MCK;
}
#if 0 /* TODO */
@ -2049,9 +2059,9 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
/* Hypervisor decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
if (pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
bool hdice = !!(lpcr & LPCR_HDICE);
if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
return PPC_INTERRUPT_HDECR;
@ -2059,18 +2069,18 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
}
/* Hypervisor virtualization interrupt */
if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
if (pending_interrupts & PPC_INTERRUPT_HVIRT) {
/* LPCR will be clear when not supported so this will work */
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
bool hvice = !!(lpcr & LPCR_HVICE);
if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
return PPC_INTERRUPT_HVIRT;
}
}
/* External interrupt can ignore MSR:EE under some circumstances */
if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
if (pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(lpcr & LPCR_LPES0);
bool heic = !!(lpcr & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
!FIELD_EX64(env->msr, MSR, PR))) ||
@ -2080,45 +2090,45 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env)
}
if (FIELD_EX64(env->msr, MSR, CE)) {
/* External critical interrupt */
if (env->pending_interrupts & PPC_INTERRUPT_CEXT) {
if (pending_interrupts & PPC_INTERRUPT_CEXT) {
return PPC_INTERRUPT_CEXT;
}
}
if (async_deliver != 0) {
/* Watchdog timer on embedded PowerPC */
if (env->pending_interrupts & PPC_INTERRUPT_WDT) {
if (pending_interrupts & PPC_INTERRUPT_WDT) {
return PPC_INTERRUPT_WDT;
}
if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
return PPC_INTERRUPT_CDOORBELL;
}
/* Fixed interval timer on embedded PowerPC */
if (env->pending_interrupts & PPC_INTERRUPT_FIT) {
if (pending_interrupts & PPC_INTERRUPT_FIT) {
return PPC_INTERRUPT_FIT;
}
/* Programmable interval timer on embedded PowerPC */
if (env->pending_interrupts & PPC_INTERRUPT_PIT) {
if (pending_interrupts & PPC_INTERRUPT_PIT) {
return PPC_INTERRUPT_PIT;
}
/* Decrementer exception */
if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
if (pending_interrupts & PPC_INTERRUPT_DECR) {
return PPC_INTERRUPT_DECR;
}
if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_DOORBELL) {
return PPC_INTERRUPT_DOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
return PPC_INTERRUPT_HDOORBELL;
}
if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
if (pending_interrupts & PPC_INTERRUPT_PERFM) {
return PPC_INTERRUPT_PERFM;
}
/* Thermal interrupt */
if (env->pending_interrupts & PPC_INTERRUPT_THERM) {
if (pending_interrupts & PPC_INTERRUPT_THERM) {
return PPC_INTERRUPT_THERM;
}
/* EBB exception */
if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
if (pending_interrupts & PPC_INTERRUPT_EBB) {
/*
* EBB exception must be taken in problem state and
* with BESCR_GE set.
@ -2187,7 +2197,6 @@ static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_PERFM:
env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case 0:
@ -2238,7 +2247,9 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_DOORBELL:
env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
if (!env->resume_as_sreset) {
env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
}
if (is_book3s_arch2x(env)) {
powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
} else {
@ -2246,11 +2257,12 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
}
break;
case PPC_INTERRUPT_HDOORBELL:
env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
if (!env->resume_as_sreset) {
env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
}
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_EBB: /* EBB exception */
@ -2303,6 +2315,7 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
/* HDEC clears on delivery */
/* XXX: should not see an HDEC if resume_as_sreset. assert? */
env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
powerpc_excp(cpu, POWERPC_EXCP_HDECR);
break;
@ -2322,15 +2335,18 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_DECR);
break;
case PPC_INTERRUPT_DOORBELL:
env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
if (!env->resume_as_sreset) {
env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
}
powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
break;
case PPC_INTERRUPT_HDOORBELL:
env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
if (!env->resume_as_sreset) {
env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
}
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_EBB: /* EBB exception */
@ -2372,6 +2388,7 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
return p8_deliver_interrupt(env, interrupt);
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
case POWERPC_EXCP_POWER11:
return p9_deliver_interrupt(env, interrupt);
default:
break;
@ -2444,7 +2461,6 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
break;
case PPC_INTERRUPT_PERFM:
env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
powerpc_excp(cpu, POWERPC_EXCP_PERFM);
break;
case PPC_INTERRUPT_THERM: /* Thermal interrupt */
@ -3163,6 +3179,7 @@ void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
case POWERPC_EXCP_POWER8:
case POWERPC_EXCP_POWER9:
case POWERPC_EXCP_POWER10:
case POWERPC_EXCP_POWER11:
/*
* Machine check codes can be found in processor User Manual or
* Linux or skiboot source.

View File

@ -83,15 +83,16 @@ static bool hreg_check_bhrb_enable(CPUPPCState *env)
static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
{
uint32_t hflags = 0;
#if defined(TARGET_PPC64)
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
if (mmcr0 & MMCR0_PMCC0) {
hflags |= 1 << HFLAGS_PMCC0;
}
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
if (mmcr0 & MMCR0_PMCC1) {
hflags |= 1 << HFLAGS_PMCC1;
}
if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) {
if (mmcr0 & MMCR0_PMCjCE) {
hflags |= 1 << HFLAGS_PMCJCE;
}
if (hreg_check_bhrb_enable(env)) {
@ -101,9 +102,9 @@ static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
#ifndef CONFIG_USER_ONLY
if (env->pmc_ins_cnt) {
hflags |= 1 << HFLAGS_INSN_CNT;
}
if (env->pmc_ins_cnt & 0x1e) {
hflags |= 1 << HFLAGS_PMC_OTHER;
if (env->pmc_ins_cnt & 0x1e) {
hflags |= 1 << HFLAGS_PMC_OTHER;
}
}
#endif
#endif
@ -143,10 +144,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
if (ppc_flags & POWERPC_FLAG_DE) {
target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) {
if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(msr, MSR, DE)) {
hflags |= 1 << HFLAGS_SE;
}
if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) {
if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(msr, MSR, DE)) {
hflags |= 1 << HFLAGS_BE;
}
} else {

View File

@ -118,43 +118,11 @@ static const VMStateInfo vmstate_info_vsr = {
#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
static bool cpu_pre_2_8_migration(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
return cpu->pre_2_8_migration;
}
#if defined(TARGET_PPC64)
static bool cpu_pre_3_0_migration(void *opaque, int version_id)
{
PowerPCCPU *cpu = opaque;
return cpu->pre_3_0_migration;
}
#endif
static int cpu_pre_save(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
int i;
uint64_t insns_compat_mask =
PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
| PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
| PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
| PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
| PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
| PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
| PPC_64B | PPC_64BX | PPC_ALTIVEC
| PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
| PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
| PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
| PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
| PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
| PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM
| PPC2_MEM_LWSYNC;
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
@ -177,35 +145,6 @@ static int cpu_pre_save(void *opaque)
env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
}
/* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
if (cpu->pre_2_8_migration) {
/*
* Mask out bits that got added to msr_mask since the versions
* which stupidly included it in the migration stream.
*/
target_ulong metamask = 0
#if defined(TARGET_PPC64)
| (1ULL << MSR_TS0)
| (1ULL << MSR_TS1)
#endif
;
cpu->mig_msr_mask = env->msr_mask & ~metamask;
cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
/*
* CPU models supported by old machines all have
* PPC_MEM_TLBIE, so we set it unconditionally to allow
* backward migration from a POWER9 host to a POWER8 host.
*/
cpu->mig_insns_flags |= PPC_MEM_TLBIE;
cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
cpu->mig_nb_BATs = env->nb_BATs;
}
if (cpu->pre_3_0_migration) {
if (cpu->hash64_opts) {
cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
}
}
/* Used to retain migration compatibility for pre 6.0 for 601 machines. */
env->hflags_compat_nmsr = 0;
@ -549,12 +488,11 @@ static int slb_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_slb = {
.name = "cpu/slb",
.version_id = 1,
.version_id = 2,
.minimum_version_id = 1,
.needed = slb_needed,
.post_load = slb_post_load,
.fields = (const VMStateField[]) {
VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
VMSTATE_END_OF_LIST()
}
@ -676,7 +614,7 @@ static bool compat_needed(void *opaque)
PowerPCCPU *cpu = opaque;
assert(!(cpu->compat_pvr && !cpu->vhyp));
return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
return cpu->compat_pvr != 0;
}
static const VMStateDescription vmstate_compat = {
@ -760,12 +698,6 @@ const VMStateDescription vmstate_ppc_cpu = {
/* Backward compatible internal state */
VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
/* Sanity checking */
VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
cpu_pre_2_8_migration),
VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * const []) {

View File

@ -288,7 +288,7 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val)
PowerPCCPU *ccpu = POWERPC_CPU(ccs);
uint32_t thread_id = ppc_cpu_tir(ccpu);
ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
}
bql_unlock();
}

View File

@ -993,6 +993,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
int exec_prot, pp_prot, amr_prot, prot;
int need_prot;
hwaddr raddr;
bool vrma = false;
/*
* Note on LPCR usage: 970 uses HID4, but our special variant of
@ -1022,6 +1023,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
}
} else if (ppc_hash64_use_vrma(env)) {
/* Emulated VRMA mode */
vrma = true;
slb = &vrma_slbe;
if (build_vrma_slbe(cpu, slb) != 0) {
/* Invalid VRMA setup, machine check */
@ -1136,7 +1138,12 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
amr_prot = ppc_hash64_amr_prot(cpu, pte);
if (vrma) {
/* VRMA does not check keys */
amr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
amr_prot = ppc_hash64_amr_prot(cpu, pte);
}
prot = exec_prot & pp_prot & amr_prot;
need_prot = check_prot_access_type(PAGE_RWX, access_type);

View File

@ -1820,7 +1820,7 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret,
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
if (unlikely(Rc(ctx->opcode) != 0)) {
if (unlikely(compute_rc0)) {
gen_set_Rc0(ctx, ret);
}
}
@ -6423,8 +6423,6 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
opc_handler_t **table, *handler;
uint32_t inval;
ctx->opcode = insn;
LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
ctx->le_mode ? "little" : "big");
@ -6558,6 +6556,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
ctx->base.pc_next = pc += 4;
if (!is_prefix_insn(ctx, insn)) {
ctx->opcode = insn;
ok = (decode_insn32(ctx, insn) ||
decode_legacy(cpu, ctx, insn));
} else if ((pc & 63) == 0) {

View File

@ -176,6 +176,7 @@ qtests_ppc64 = \
qtests_ppc + \
(config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xive2-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-spi-seeprom-test'] : []) + \
(config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-host-i2c-test'] : []) + \
(config_all_devices.has_key('CONFIG_PSERIES') ? ['numa-test'] : []) + \
@ -345,6 +346,7 @@ qtests = {
'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'],
'migration-test': migration_files,
'pxe-test': files('boot-sector.c'),
'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'),
'qos-test': [chardev, io, qos_test_ss.apply({}).sources()],
'tpm-crb-swtpm-test': [io, tpmemu_files],
'tpm-crb-test': [io, tpmemu_files],

View File

@ -0,0 +1,190 @@
/*
* QTest testcase for PowerNV 10 interrupt controller (xive2)
* - Common functions for XIVE2 tests
*
* Copyright (c) 2024, IBM Corporation.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "libqtest.h"
#include "pnv-xive2-common.h"
static uint64_t pnv_xscom_addr(uint32_t pcba)
{
return P10_XSCOM_BASE | ((uint64_t) pcba << 3);
}
static uint64_t pnv_xive_xscom_addr(uint32_t reg)
{
return pnv_xscom_addr(XIVE_XSCOM + reg);
}
uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg)
{
return qtest_readq(qts, pnv_xive_xscom_addr(reg));
}
void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val)
{
qtest_writeq(qts, pnv_xive_xscom_addr(reg), val);
}
static void xive_get_struct(QTestState *qts, uint64_t src, void *dest,
size_t size)
{
uint8_t *destination = (uint8_t *)dest;
size_t i;
for (i = 0; i < size; i++) {
*(destination + i) = qtest_readb(qts, src + i);
}
}
static void xive_copy_struct(QTestState *qts, void *src, uint64_t dest,
size_t size)
{
uint8_t *source = (uint8_t *)src;
size_t i;
for (i = 0; i < size; i++) {
qtest_writeb(qts, dest + i, *(source + i));
}
}
uint64_t xive_get_queue_addr(uint32_t end_index)
{
return XIVE_QUEUE_MEM + (uint64_t)end_index * XIVE_QUEUE_SIZE;
}
uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page,
uint32_t offset)
{
uint64_t addr;
addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1));
if (page == 1) {
addr += 1 << XIVE_PAGE_SHIFT;
}
return qtest_readb(qts, addr + offset);
}
void set_esb(QTestState *qts, uint32_t index, uint8_t page,
uint32_t offset, uint32_t val)
{
uint64_t addr;
addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1));
if (page == 1) {
addr += 1 << XIVE_PAGE_SHIFT;
}
return qtest_writel(qts, addr + offset, cpu_to_be32(val));
}
void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp)
{
uint64_t addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp);
xive_get_struct(qts, addr, nvp, sizeof(Xive2Nvp));
}
void set_nvp(QTestState *qts, uint32_t index, uint8_t first)
{
uint64_t nvp_addr;
Xive2Nvp nvp;
uint64_t report_addr;
nvp_addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp);
report_addr = (XIVE_REPORT_MEM + (uint64_t)index * XIVE_REPORT_SIZE) >> 8;
memset(&nvp, 0, sizeof(nvp));
nvp.w0 = xive_set_field32(NVP2_W0_VALID, 0, 1);
nvp.w0 = xive_set_field32(NVP2_W0_PGOFIRST, nvp.w0, first);
nvp.w6 = xive_set_field32(NVP2_W6_REPORTING_LINE, nvp.w6,
(report_addr >> 24) & 0xfffffff);
nvp.w7 = xive_set_field32(NVP2_W7_REPORTING_LINE, nvp.w7,
report_addr & 0xffffff);
xive_copy_struct(qts, &nvp, nvp_addr, sizeof(nvp));
}
static uint64_t get_cl_pair_addr(Xive2Nvp *nvp)
{
uint64_t upper = xive_get_field32(0x0fffffff, nvp->w6);
uint64_t lower = xive_get_field32(0xffffff00, nvp->w7);
return (upper << 32) | (lower << 8);
}
void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair)
{
uint64_t addr = get_cl_pair_addr(nvp);
xive_get_struct(qts, addr, cl_pair, XIVE_REPORT_SIZE);
}
void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair)
{
uint64_t addr = get_cl_pair_addr(nvp);
xive_copy_struct(qts, cl_pair, addr, XIVE_REPORT_SIZE);
}
void set_nvg(QTestState *qts, uint32_t index, uint8_t next)
{
uint64_t nvg_addr;
Xive2Nvgc nvg;
nvg_addr = XIVE_NVG_MEM + (uint64_t)index * sizeof(Xive2Nvgc);
memset(&nvg, 0, sizeof(nvg));
nvg.w0 = xive_set_field32(NVGC2_W0_VALID, 0, 1);
nvg.w0 = xive_set_field32(NVGC2_W0_PGONEXT, nvg.w0, next);
xive_copy_struct(qts, &nvg, nvg_addr, sizeof(nvg));
}
void set_eas(QTestState *qts, uint32_t index, uint32_t end_index,
uint32_t data)
{
uint64_t eas_addr;
Xive2Eas eas;
eas_addr = XIVE_EAS_MEM + (uint64_t)index * sizeof(Xive2Eas);
memset(&eas, 0, sizeof(eas));
eas.w = xive_set_field64(EAS2_VALID, 0, 1);
eas.w = xive_set_field64(EAS2_END_INDEX, eas.w, end_index);
eas.w = xive_set_field64(EAS2_END_DATA, eas.w, data);
xive_copy_struct(qts, &eas, eas_addr, sizeof(eas));
}
void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index,
uint8_t priority, bool i)
{
uint64_t end_addr, queue_addr, queue_hi, queue_lo;
uint8_t queue_size;
Xive2End end;
end_addr = XIVE_END_MEM + (uint64_t)index * sizeof(Xive2End);
queue_addr = xive_get_queue_addr(index);
queue_hi = (queue_addr >> 32) & END2_W2_EQ_ADDR_HI;
queue_lo = queue_addr & END2_W3_EQ_ADDR_LO;
queue_size = ctz16(XIVE_QUEUE_SIZE) - 12;
memset(&end, 0, sizeof(end));
end.w0 = xive_set_field32(END2_W0_VALID, 0, 1);
end.w0 = xive_set_field32(END2_W0_ENQUEUE, end.w0, 1);
end.w0 = xive_set_field32(END2_W0_UCOND_NOTIFY, end.w0, 1);
end.w0 = xive_set_field32(END2_W0_BACKLOG, end.w0, 1);
end.w1 = xive_set_field32(END2_W1_GENERATION, 0, 1);
end.w2 = cpu_to_be32(queue_hi);
end.w3 = cpu_to_be32(queue_lo);
end.w3 = xive_set_field32(END2_W3_QSIZE, end.w3, queue_size);
end.w6 = xive_set_field32(END2_W6_IGNORE, 0, i);
end.w6 = xive_set_field32(END2_W6_VP_OFFSET, end.w6, nvp_index);
end.w7 = xive_set_field32(END2_W7_F0_PRIORITY, 0, priority);
xive_copy_struct(qts, &end, end_addr, sizeof(end));
}

View File

@ -0,0 +1,111 @@
/*
* QTest testcase for PowerNV 10 interrupt controller (xive2)
*
* Copyright (c) 2024, IBM Corporation.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef TEST_PNV_XIVE2_COMMON_H
#define TEST_PNV_XIVE2_COMMON_H
#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \
PPC_BIT32(bs))
#include "qemu/bswap.h"
#include "hw/intc/pnv_xive2_regs.h"
#include "hw/ppc/xive_regs.h"
#include "hw/ppc/xive2_regs.h"
/*
* sizing:
* 128 interrupts
* => ESB BAR range: 16M
* 256 ENDs
* => END BAR range: 16M
* 256 VPs
* => NVPG,NVC BAR range: 32M
*/
#define MAX_IRQS 128
#define MAX_ENDS 256
#define MAX_VPS 256
#define XIVE_PAGE_SHIFT 16
#define XIVE_TRIGGER_PAGE 0
#define XIVE_EOI_PAGE 1
#define XIVE_IC_ADDR 0x0006030200000000ull
#define XIVE_IC_TM_INDIRECT (XIVE_IC_ADDR + (256 << XIVE_PAGE_SHIFT))
#define XIVE_IC_BAR ((0x3ull << 62) | XIVE_IC_ADDR)
#define XIVE_TM_BAR 0xc006030203180000ull
#define XIVE_ESB_ADDR 0x0006050000000000ull
#define XIVE_ESB_BAR ((0x3ull << 62) | XIVE_ESB_ADDR)
#define XIVE_END_BAR 0xc006060000000000ull
#define XIVE_NVPG_ADDR 0x0006040000000000ull
#define XIVE_NVPG_BAR ((0x3ull << 62) | XIVE_NVPG_ADDR)
#define XIVE_NVC_ADDR 0x0006030208000000ull
#define XIVE_NVC_BAR ((0x3ull << 62) | XIVE_NVC_ADDR)
/*
* Memory layout
* A check is done when a table is configured to ensure that the max
* size of the resource fits in the table.
*/
#define XIVE_VST_SIZE 0x10000ull /* must be at least 4k */
#define XIVE_MEM_START 0x10000000ull
#define XIVE_ESB_MEM XIVE_MEM_START
#define XIVE_EAS_MEM (XIVE_ESB_MEM + XIVE_VST_SIZE)
#define XIVE_END_MEM (XIVE_EAS_MEM + XIVE_VST_SIZE)
#define XIVE_NVP_MEM (XIVE_END_MEM + XIVE_VST_SIZE)
#define XIVE_NVG_MEM (XIVE_NVP_MEM + XIVE_VST_SIZE)
#define XIVE_NVC_MEM (XIVE_NVG_MEM + XIVE_VST_SIZE)
#define XIVE_SYNC_MEM (XIVE_NVC_MEM + XIVE_VST_SIZE)
#define XIVE_QUEUE_MEM (XIVE_SYNC_MEM + XIVE_VST_SIZE)
#define XIVE_QUEUE_SIZE 4096 /* per End */
#define XIVE_REPORT_MEM (XIVE_QUEUE_MEM + XIVE_QUEUE_SIZE * MAX_VPS)
#define XIVE_REPORT_SIZE 256 /* two cache lines per NVP */
#define XIVE_MEM_END (XIVE_REPORT_MEM + XIVE_REPORT_SIZE * MAX_VPS)
#define P10_XSCOM_BASE 0x000603fc00000000ull
#define XIVE_XSCOM 0x2010800ull
#define XIVE_ESB_RESET 0b00
#define XIVE_ESB_OFF 0b01
#define XIVE_ESB_PENDING 0b10
#define XIVE_ESB_QUEUED 0b11
#define XIVE_ESB_GET 0x800
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
extern uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg);
extern void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val);
extern uint64_t xive_get_queue_addr(uint32_t end_index);
extern uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page,
uint32_t offset);
extern void set_esb(QTestState *qts, uint32_t index, uint8_t page,
uint32_t offset, uint32_t val);
extern void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp);
extern void set_nvp(QTestState *qts, uint32_t index, uint8_t first);
extern void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair);
extern void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair);
extern void set_nvg(QTestState *qts, uint32_t index, uint8_t next);
extern void set_eas(QTestState *qts, uint32_t index, uint32_t end_index,
uint32_t data);
extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index,
uint8_t priority, bool i);
void test_flush_sync_inject(QTestState *qts);
#endif /* TEST_PNV_XIVE2_COMMON_H */

View File

@ -0,0 +1,205 @@
/*
* QTest testcase for PowerNV 10 interrupt controller (xive2)
* - Test cache flush/queue sync injection
*
* Copyright (c) 2024, IBM Corporation.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "libqtest.h"
#include "pnv-xive2-common.h"
#include "hw/intc/pnv_xive2_regs.h"
#include "hw/ppc/xive_regs.h"
#include "hw/ppc/xive2_regs.h"
#define PNV_XIVE2_QUEUE_IPI 0x00
#define PNV_XIVE2_QUEUE_HW 0x01
#define PNV_XIVE2_QUEUE_NXC 0x02
#define PNV_XIVE2_QUEUE_INT 0x03
#define PNV_XIVE2_QUEUE_OS 0x04
#define PNV_XIVE2_QUEUE_POOL 0x05
#define PNV_XIVE2_QUEUE_HARD 0x06
#define PNV_XIVE2_CACHE_ENDC 0x08
#define PNV_XIVE2_CACHE_ESBC 0x09
#define PNV_XIVE2_CACHE_EASC 0x0a
#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
#define PNV_XIVE2_CACHE_NXC 0x18
#define PNV_XIVE2_SYNC_IPI 0x000
#define PNV_XIVE2_SYNC_HW 0x080
#define PNV_XIVE2_SYNC_NxC 0x100
#define PNV_XIVE2_SYNC_INT 0x180
#define PNV_XIVE2_SYNC_OS_ESC 0x200
#define PNV_XIVE2_SYNC_POOL_ESC 0x280
#define PNV_XIVE2_SYNC_HARD_ESC 0x300
#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
static uint64_t get_sync_addr(uint32_t src_pir, int ic_topo_id, int type)
{
int thread_nr = src_pir & 0x7f;
uint64_t addr = XIVE_SYNC_MEM + thread_nr * 512 + ic_topo_id * 32 + type;
return addr;
}
static uint8_t get_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id,
int type)
{
uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type);
return qtest_readb(qts, addr);
}
static void clr_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id,
int type)
{
uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type);
qtest_writeb(qts, addr, 0x0);
}
static void inject_cache_flush(QTestState *qts, int ic_topo_id,
uint64_t scom_addr)
{
(void)ic_topo_id;
pnv_xive_xscom_write(qts, scom_addr, 0);
}
static void inject_queue_sync(QTestState *qts, int ic_topo_id, uint64_t offset)
{
(void)ic_topo_id;
uint64_t addr = XIVE_IC_ADDR + (VST_SYNC << XIVE_PAGE_SHIFT) + offset;
qtest_writeq(qts, addr, 0);
}
static void inject_op(QTestState *qts, int ic_topo_id, int type)
{
switch (type) {
case PNV_XIVE2_QUEUE_IPI:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_IPI);
break;
case PNV_XIVE2_QUEUE_HW:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HW);
break;
case PNV_XIVE2_QUEUE_NXC:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NxC);
break;
case PNV_XIVE2_QUEUE_INT:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_INT);
break;
case PNV_XIVE2_QUEUE_OS:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_OS_ESC);
break;
case PNV_XIVE2_QUEUE_POOL:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_POOL_ESC);
break;
case PNV_XIVE2_QUEUE_HARD:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HARD_ESC);
break;
case PNV_XIVE2_CACHE_ENDC:
inject_cache_flush(qts, ic_topo_id, X_VC_ENDC_FLUSH_INJECT);
break;
case PNV_XIVE2_CACHE_ESBC:
inject_cache_flush(qts, ic_topo_id, X_VC_ESBC_FLUSH_INJECT);
break;
case PNV_XIVE2_CACHE_EASC:
inject_cache_flush(qts, ic_topo_id, X_VC_EASC_FLUSH_INJECT);
break;
case PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_NCO);
break;
case PNV_XIVE2_QUEUE_NXC_LD_LCL_CO:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_CO);
break;
case PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_NCI);
break;
case PNV_XIVE2_QUEUE_NXC_ST_LCL_CI:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_CI);
break;
case PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_NCI);
break;
case PNV_XIVE2_QUEUE_NXC_ST_RMT_CI:
inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_CI);
break;
case PNV_XIVE2_CACHE_NXC:
inject_cache_flush(qts, ic_topo_id, X_PC_NXC_FLUSH_INJECT);
break;
default:
g_assert_not_reached();
break;
}
}
const uint8_t xive_inject_tests[] = {
PNV_XIVE2_QUEUE_IPI,
PNV_XIVE2_QUEUE_HW,
PNV_XIVE2_QUEUE_NXC,
PNV_XIVE2_QUEUE_INT,
PNV_XIVE2_QUEUE_OS,
PNV_XIVE2_QUEUE_POOL,
PNV_XIVE2_QUEUE_HARD,
PNV_XIVE2_CACHE_ENDC,
PNV_XIVE2_CACHE_ESBC,
PNV_XIVE2_CACHE_EASC,
PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO,
PNV_XIVE2_QUEUE_NXC_LD_LCL_CO,
PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI,
PNV_XIVE2_QUEUE_NXC_ST_LCL_CI,
PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI,
PNV_XIVE2_QUEUE_NXC_ST_RMT_CI,
PNV_XIVE2_CACHE_NXC,
};
void test_flush_sync_inject(QTestState *qts)
{
int ic_topo_id = 0;
/*
* Writes performed by qtest are not done in the context of a thread.
* This means that QEMU XIVE code doesn't have a way to determine what
* thread is originating the write. In order to allow for some testing,
* QEMU XIVE code will assume a PIR of 0 when unable to determine the
* source thread for cache flush and queue sync inject operations.
* See hw/intc/pnv_xive2.c: pnv_xive2_inject_notify() for details.
*/
int src_pir = 0;
int test_nr;
uint8_t byte;
printf("# ============================================================\n");
printf("# Starting cache flush/queue sync injection tests...\n");
for (test_nr = 0; test_nr < sizeof(xive_inject_tests);
test_nr++) {
int op_type = xive_inject_tests[test_nr];
printf("# Running test %d\n", test_nr);
/* start with status byte set to 0 */
clr_sync(qts, src_pir, ic_topo_id, op_type);
byte = get_sync(qts, src_pir, ic_topo_id, op_type);
g_assert_cmphex(byte, ==, 0);
/* request cache flush or queue sync operation */
inject_op(qts, ic_topo_id, op_type);
/* verify that status byte was written to 0xff */
byte = get_sync(qts, src_pir, ic_topo_id, op_type);
g_assert_cmphex(byte, ==, 0xff);
clr_sync(qts, src_pir, ic_topo_id, op_type);
}
}

View File

@ -0,0 +1,344 @@
/*
* QTest testcase for PowerNV 10 interrupt controller (xive2)
* - Test irq to hardware thread
* - Test 'Pull Thread Context to Odd Thread Reporting Line'
*
* Copyright (c) 2024, IBM Corporation.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "libqtest.h"
#include "pnv-xive2-common.h"
#include "hw/intc/pnv_xive2_regs.h"
#include "hw/ppc/xive_regs.h"
#include "hw/ppc/xive2_regs.h"
#define SMT 4 /* some tests will break if less than 4 */
static void set_table(QTestState *qts, uint64_t type, uint64_t addr)
{
uint64_t vsd, size, log_size;
/*
* First, let's make sure that all the resources used fit in the
* given table.
*/
switch (type) {
case VST_ESB:
size = MAX_IRQS / 4;
break;
case VST_EAS:
size = MAX_IRQS * 8;
break;
case VST_END:
size = MAX_ENDS * 32;
break;
case VST_NVP:
case VST_NVG:
case VST_NVC:
size = MAX_VPS * 32;
break;
case VST_SYNC:
size = 64 * 1024;
break;
default:
g_assert_not_reached();
}
g_assert_cmpuint(size, <=, XIVE_VST_SIZE);
log_size = ctzl(XIVE_VST_SIZE) - 12;
vsd = ((uint64_t) VSD_MODE_EXCLUSIVE) << 62 | addr | log_size;
pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_ADDR, type << 48);
pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_DATA, vsd);
if (type != VST_EAS && type != VST_IC && type != VST_ERQ) {
pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_ADDR, type << 48);
pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_DATA, vsd);
}
}
static void set_tima8(QTestState *qts, uint32_t pir, uint32_t offset,
uint8_t b)
{
uint64_t ic_addr;
ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
qtest_writeb(qts, ic_addr + offset, b);
}
static void set_tima32(QTestState *qts, uint32_t pir, uint32_t offset,
uint32_t l)
{
uint64_t ic_addr;
ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
qtest_writel(qts, ic_addr + offset, l);
}
static uint8_t get_tima8(QTestState *qts, uint32_t pir, uint32_t offset)
{
uint64_t ic_addr;
ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
return qtest_readb(qts, ic_addr + offset);
}
static uint16_t get_tima16(QTestState *qts, uint32_t pir, uint32_t offset)
{
uint64_t ic_addr;
ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
return qtest_readw(qts, ic_addr + offset);
}
static uint32_t get_tima32(QTestState *qts, uint32_t pir, uint32_t offset)
{
uint64_t ic_addr;
ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT);
return qtest_readl(qts, ic_addr + offset);
}
static void reset_pool_threads(QTestState *qts)
{
uint8_t first_group = 0;
int i;
for (i = 0; i < SMT; i++) {
uint32_t nvp_idx = 0x100 + i;
set_nvp(qts, nvp_idx, first_group);
set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD0, 0x000000ff);
set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD1, 0);
set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | nvp_idx);
}
}
static void reset_hw_threads(QTestState *qts)
{
uint8_t first_group = 0;
uint32_t w1 = 0x000000ff;
int i;
if (SMT >= 4) {
/* define 2 groups of 2, part of a bigger group of size 4 */
set_nvg(qts, 0x80, 0x02);
set_nvg(qts, 0x82, 0x02);
set_nvg(qts, 0x81, 0);
first_group = 0x01;
w1 = 0x000300ff;
}
for (i = 0; i < SMT; i++) {
set_nvp(qts, 0x80 + i, first_group);
set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0, 0x00ff00ff);
set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD1, w1);
set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD2, 0x80000000);
}
}
static void reset_state(QTestState *qts)
{
size_t mem_used = XIVE_MEM_END - XIVE_MEM_START;
qtest_memset(qts, XIVE_MEM_START, 0, mem_used);
reset_hw_threads(qts);
reset_pool_threads(qts);
}
static void init_xive(QTestState *qts)
{
uint64_t val1, val2, range;
/*
* We can take a few shortcuts here, as we know the default values
* used for xive initialization
*/
/*
* Set the BARs.
* We reuse the same values used by firmware to ease debug.
*/
pnv_xive_xscom_write(qts, X_CQ_IC_BAR, XIVE_IC_BAR);
pnv_xive_xscom_write(qts, X_CQ_TM_BAR, XIVE_TM_BAR);
/* ESB and NVPG use 2 pages per resource. The others only one page */
range = (MAX_IRQS << 17) >> 25;
val1 = XIVE_ESB_BAR | range;
pnv_xive_xscom_write(qts, X_CQ_ESB_BAR, val1);
range = (MAX_ENDS << 16) >> 25;
val1 = XIVE_END_BAR | range;
pnv_xive_xscom_write(qts, X_CQ_END_BAR, val1);
range = (MAX_VPS << 17) >> 25;
val1 = XIVE_NVPG_BAR | range;
pnv_xive_xscom_write(qts, X_CQ_NVPG_BAR, val1);
range = (MAX_VPS << 16) >> 25;
val1 = XIVE_NVC_BAR | range;
pnv_xive_xscom_write(qts, X_CQ_NVC_BAR, val1);
/*
* Enable hw threads.
* We check the value written. Useless with current
* implementation, but it validates the xscom read path and it's
* what the hardware procedure says
*/
val1 = 0xF000000000000000ull; /* core 0, 4 threads */
pnv_xive_xscom_write(qts, X_TCTXT_EN0, val1);
val2 = pnv_xive_xscom_read(qts, X_TCTXT_EN0);
g_assert_cmphex(val1, ==, val2);
/* Memory tables */
set_table(qts, VST_ESB, XIVE_ESB_MEM);
set_table(qts, VST_EAS, XIVE_EAS_MEM);
set_table(qts, VST_END, XIVE_END_MEM);
set_table(qts, VST_NVP, XIVE_NVP_MEM);
set_table(qts, VST_NVG, XIVE_NVG_MEM);
set_table(qts, VST_NVC, XIVE_NVC_MEM);
set_table(qts, VST_SYNC, XIVE_SYNC_MEM);
reset_hw_threads(qts);
reset_pool_threads(qts);
}
static void test_hw_irq(QTestState *qts)
{
uint32_t irq = 2;
uint32_t irq_data = 0x600df00d;
uint32_t end_index = 5;
uint32_t target_pir = 1;
uint32_t target_nvp = 0x80 + target_pir;
uint8_t priority = 5;
uint32_t reg32;
uint16_t reg16;
uint8_t pq, nsr, cppr;
printf("# ============================================================\n");
printf("# Testing irq %d to hardware thread %d\n", irq, target_pir);
/* irq config */
set_eas(qts, irq, end_index, irq_data);
set_end(qts, end_index, target_nvp, priority, false /* group */);
/* enable and trigger irq */
get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00);
set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0);
/* check irq is raised on cpu */
pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING);
reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
nsr = reg32 >> 24;
cppr = (reg32 >> 16) & 0xFF;
g_assert_cmphex(nsr, ==, 0x80);
g_assert_cmphex(cppr, ==, 0xFF);
/* ack the irq */
reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG);
nsr = reg16 >> 8;
cppr = reg16 & 0xFF;
g_assert_cmphex(nsr, ==, 0x80);
g_assert_cmphex(cppr, ==, priority);
/* check irq data is what was configured */
reg32 = qtest_readl(qts, xive_get_queue_addr(end_index));
g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff));
/* End Of Interrupt */
set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0);
pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET);
g_assert_cmpuint(pq, ==, XIVE_ESB_RESET);
/* reset CPPR */
set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF);
reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
nsr = reg32 >> 24;
cppr = (reg32 >> 16) & 0xFF;
g_assert_cmphex(nsr, ==, 0x00);
g_assert_cmphex(cppr, ==, 0xFF);
}
#define XIVE_ODD_CL 0x80
static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts)
{
uint32_t target_pir = 1;
uint32_t target_nvp = 0x80 + target_pir;
Xive2Nvp nvp;
uint8_t cl_pair[XIVE_REPORT_SIZE];
uint32_t qw1w0, qw3w0, qw1w2, qw2w2;
uint8_t qw3b8;
uint32_t cl_word;
uint32_t word2;
printf("# ============================================================\n");
printf("# Testing 'Pull Thread Context to Odd Thread Reporting Line'\n");
/* clear odd cache line prior to pull operation */
memset(cl_pair, 0, sizeof(cl_pair));
get_nvp(qts, target_nvp, &nvp);
set_cl_pair(qts, &nvp, cl_pair);
/* Read some values from TIMA that we expect to see in cacheline */
qw1w0 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD0);
qw3w0 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0);
qw1w2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
qw2w2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
qw3b8 = get_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
/* Execute the pull operation */
set_tima8(qts, target_pir, TM_SPC_PULL_PHYS_CTX_OL, 0);
/* Verify odd cache line values match TIMA after pull operation */
get_cl_pair(qts, &nvp, cl_pair);
memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD0], 4);
g_assert_cmphex(qw1w0, ==, be32_to_cpu(cl_word));
memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD0], 4);
g_assert_cmphex(qw3w0, ==, be32_to_cpu(cl_word));
memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD2], 4);
g_assert_cmphex(qw1w2, ==, be32_to_cpu(cl_word));
memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW2_HV_POOL + TM_WORD2], 4);
g_assert_cmphex(qw2w2, ==, be32_to_cpu(cl_word));
g_assert_cmphex(qw3b8, ==,
cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD2]);
/* Verify that all TIMA valid bits for target thread are cleared */
word2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2);
g_assert_cmphex(xive_get_field32(TM_QW1W2_VO, word2), ==, 0);
word2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2);
g_assert_cmphex(xive_get_field32(TM_QW2W2_VP, word2), ==, 0);
word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2);
g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0);
}
static void test_xive(void)
{
QTestState *qts;
qts = qtest_initf("-M powernv10 -smp %d,cores=1,threads=%d -nographic "
"-nodefaults -serial mon:stdio -S "
"-d guest_errors -trace '*xive*'",
SMT, SMT);
init_xive(qts);
test_hw_irq(qts);
/* omit reset_state here and use settings from test_hw_irq */
test_pull_thread_ctx_to_odd_thread_cl(qts);
reset_state(qts);
test_flush_sync_inject(qts);
qtest_quit(qts);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
qtest_add_func("xive2", test_xive);
return g_test_run();
}

View File

@ -6,7 +6,7 @@ VPATH += $(SRC_PATH)/tests/tcg/ppc64
config-cc.mak: Makefile
$(quiet-@)( \
$(call cc-option,-mpower8-vector, CROSS_CC_HAS_POWER8_VECTOR); \
$(call cc-option,-mcpu=power8, CROSS_CC_HAS_CPU_POWER8); \
$(call cc-option,-mpower10, CROSS_CC_HAS_POWER10)) 3> config-cc.mak
-include config-cc.mak
@ -23,15 +23,15 @@ run-threadcount: threadcount
run-plugin-threadcount-with-%:
$(call skip-test, $<, "BROKEN (flaky with clang) ")
ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),)
ifneq ($(CROSS_CC_HAS_CPU_POWER8),)
PPC64_TESTS=bcdsub non_signalling_xscv
endif
$(PPC64_TESTS): CFLAGS += -mpower8-vector
$(PPC64_TESTS): CFLAGS += -mcpu=power8
ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),)
ifneq ($(CROSS_CC_HAS_CPU_POWER8),)
PPC64_TESTS += vsx_f2i_nan
endif
vsx_f2i_nan: CFLAGS += -mpower8-vector -I$(SRC_PATH)/include
vsx_f2i_nan: CFLAGS += -mcpu=power8 -I$(SRC_PATH)/include
PPC64_TESTS += mtfsf
PPC64_TESTS += mffsce