ppc patch queue 2019-01-09
Second main pull request for qemu-4.0. Highlights are: * Final parts of XIVE support for pseries (without KVM) * Preliminary work for PHB hotplug * Starting to use TCG vector operations This includes some changes in the PCI core, which Michael Tsirkin requested come through this tree, since they're primarily of interest for ppc. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAlw1J7oACgkQbDjKyiDZ s5Jz4RAAlVsjDrs/tlgYwg2UTFYEwWcOuS7WGaiO2WOZg87CKtuhS9ViYTA0ei/k ZAdmH2Yq3OsLiCXHGIrsJsYHn/7wUsGqVkkN2/7b20yEq8EUMfJLuOgnvV8Hhl4z suQaDNQgJZ1dp0xZO/t8Gxb/SmKEjeUBwyjTB5MWfKI/o7e9QY59iS4ttN1hjSSw Qy3pCwhlebJYlRtzSJvXZAR48JmSZYJK3NFkbugsRvQE6UgYGnKGtlspd+E48WeD sd+9yK3krVJoj/8M2JgFGaoliHWP5WURd6xsVmDEn4JSJEp62zLQNVUwnRFKH+Uf 32auV3KyOBm8T7F/qj+HMPz3vX2hHftNvchia1SwYl/Fc7yWyXGWZS217SvQWNg9 S1qIBVbTiQneys7QfHNJlQ9Hr65++m4esJagz3uDS5RxJqH3FMt0WjKQKHh5ohLt lzRO3VHS0Fx1u4Pg6btk37tdBucZn75jO9dw1LCfK9H3zYr23ZsiYSPlYsVuCRwi 8+mnRvTa34D2eVMIR6/1oWTd66Fv+t1lV5+xQTjUnGa7Yac0tk8sFQaUG6mgDG+l oyKhNdpQV3+6sLe27A3DUykgpaS9DtYNYOPyFqi5Uw+lieHKDv4msXHZUVHXLca+ 1eXcGKA/XtdHmAzovLWFOQZSqDzBB+ea0yHK0a9DQCe13oyGGWw= =4Q/4 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.0-20190109' into staging ppc patch queue 2019-01-09 Second main pull request for qemu-4.0. Highlights are: * Final parts of XIVE support for pseries (without KVM) * Preliminary work for PHB hotplug * Starting to use TCG vector operations This includes some changes in the PCI core, which Michael Tsirkin requested come through this tree, since they're primarily of interest for ppc. # gpg: Signature made Tue 08 Jan 2019 22:44:10 GMT # gpg: using RSA key 6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.0-20190109: (29 commits) spapr: enable XIVE MMIOs at reset spapr: introduce a new sPAPR IRQ backend supporting XIVE and XICS ppc/xics: allow ICSState to have an offset 0 spapr: move the qemu_irq array under the machine pnv/psi: move the ICSState qemu_irq array under the PSI device model ppc: export the XICS and XIVE set_irq handlers spapr: return from post_load method when RTC import fails ppc: replace the 'Object *intc' by a 'ICPState *icp' pointer under the CPU ppc/xive: introduce a XiveTCTX pointer under PowerPCCPU spapr: modify the prototype of the cpu_intc_create() method spapr/xive: simplify the sPAPR IRQ qirq method for XIVE spapr_pci: Define SPAPR_MAX_PHBS in hw/pci-host/spapr.h pci: allow cleanup/unregistration of PCI root buses spapr: move spapr_create_phb() to core machine code MAINTAINERS: add qemu_vga.ndrv file entry for Mac machines MAINTAINERS: Add some missing ppc-related files target/ppc: replace AVR* macros with Vsr* macros target/ppc: move FP and VMX registers into aligned vsr register array target/ppc: merge ppc_vsr_t and ppc_avr_t union types target/ppc: switch FPR, VMX and VSX helpers to access data directly from cpu_env ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
8ae951fbc1
17
MAINTAINERS
17
MAINTAINERS
@ -962,6 +962,7 @@ L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
F: hw/ppc/e500.[hc]
|
||||
F: hw/ppc/e500plat.c
|
||||
F: hw/gpio/mpc8xxx.c
|
||||
F: include/hw/ppc/ppc_e500.h
|
||||
F: include/hw/pci-host/ppce500.h
|
||||
F: pc-bios/u-boot.e500
|
||||
@ -973,7 +974,7 @@ S: Odd Fixes
|
||||
F: hw/ppc/mpc8544ds.c
|
||||
F: hw/ppc/mpc8544_guts.c
|
||||
|
||||
New World
|
||||
New World (mac99)
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
@ -983,12 +984,15 @@ F: hw/pci-bridge/dec.[hc]
|
||||
F: hw/misc/macio/
|
||||
F: hw/misc/mos6522.c
|
||||
F: hw/nvram/mac_nvram.c
|
||||
F: hw/input/adb*
|
||||
F: include/hw/misc/macio/
|
||||
F: include/hw/misc/mos6522.h
|
||||
F: include/hw/ppc/mac_dbdma.h
|
||||
F: include/hw/pci-host/uninorth.h
|
||||
F: include/hw/input/adb*
|
||||
F: pc-bios/qemu_vga.ndrv
|
||||
|
||||
Old World
|
||||
Old World (g3beige)
|
||||
M: David Gibson <david@gibson.dropbear.id.au>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Odd Fixes
|
||||
@ -996,7 +1000,10 @@ F: hw/ppc/mac_oldworld.c
|
||||
F: hw/pci-host/grackle.c
|
||||
F: hw/misc/macio/
|
||||
F: hw/intc/heathrow_pic.c
|
||||
F: hw/input/adb*
|
||||
F: include/hw/intc/heathrow_pic.h
|
||||
F: include/hw/input/adb*
|
||||
F: pc-bios/qemu_vga.ndrv
|
||||
|
||||
PReP
|
||||
M: Hervé Poussineau <hpoussin@reactos.org>
|
||||
@ -1052,8 +1059,14 @@ sam460ex
|
||||
M: BALATON Zoltan <balaton@eik.bme.hu>
|
||||
L: qemu-ppc@nongnu.org
|
||||
S: Maintained
|
||||
F: hw/ppc/sam460ex.c
|
||||
F: hw/ppc/ppc440_pcix.c
|
||||
F: hw/display/sm501*
|
||||
F: hw/ide/sii3112.c
|
||||
F: hw/timer/m41t80.c
|
||||
F: pc-bios/canyonlands.dt[sb]
|
||||
F: pc-bios/u-boot-sam460ex-20100605.bin
|
||||
F: roms/u-boot-sam460ex
|
||||
|
||||
SH4 Machines
|
||||
------------
|
||||
|
2
configure
vendored
2
configure
vendored
@ -3939,7 +3939,7 @@ if test "$fdt" != "no" ; then
|
||||
cat > $TMPC << EOF
|
||||
#include <libfdt.h>
|
||||
#include <libfdt_env.h>
|
||||
int main(void) { fdt_first_subnode(0, 0); return 0; }
|
||||
int main(void) { fdt_check_full(NULL, 0); return 0; }
|
||||
EOF
|
||||
if compile_prog "" "$fdt_libs" ; then
|
||||
# system DTC is good - use it
|
||||
|
@ -179,6 +179,15 @@ static void spapr_xive_map_mmio(sPAPRXive *xive)
|
||||
sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
|
||||
}
|
||||
|
||||
void spapr_xive_mmio_set_enabled(sPAPRXive *xive, bool enable)
|
||||
{
|
||||
memory_region_set_enabled(&xive->source.esb_mmio, enable);
|
||||
memory_region_set_enabled(&xive->tm_mmio, enable);
|
||||
|
||||
/* Disable the END ESBs until a guest OS makes use of them */
|
||||
memory_region_set_enabled(&xive->end_source.esb_mmio, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* When a Virtual Processor is scheduled to run on a HW thread, the
|
||||
* hypervisor pushes its identifier in the OS CAM line. Emulate the
|
||||
@ -488,20 +497,6 @@ bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn)
|
||||
return true;
|
||||
}
|
||||
|
||||
qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn)
|
||||
{
|
||||
XiveSource *xsrc = &xive->source;
|
||||
|
||||
if (lisn >= xive->nr_irqs) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The sPAPR machine/device should have claimed the IRQ before */
|
||||
assert(xive_eas_is_valid(&xive->eat[lisn]));
|
||||
|
||||
return xive_source_qirq(xsrc, lisn);
|
||||
}
|
||||
|
||||
/*
|
||||
* XIVE hcalls
|
||||
*
|
||||
|
@ -461,7 +461,7 @@ static void ics_simple_set_irq_lsi(ICSState *ics, int srcno, int val)
|
||||
ics_simple_resend_lsi(ics, srcno);
|
||||
}
|
||||
|
||||
static void ics_simple_set_irq(void *opaque, int srcno, int val)
|
||||
void ics_simple_set_irq(void *opaque, int srcno, int val)
|
||||
{
|
||||
ICSState *ics = (ICSState *)opaque;
|
||||
|
||||
@ -571,8 +571,6 @@ static void ics_simple_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
|
||||
|
||||
qemu_register_reset(ics_simple_reset_handler, ics);
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ static int ics_set_kvm_state(ICSState *ics, int version_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ics_kvm_set_irq(void *opaque, int srcno, int val)
|
||||
void ics_kvm_set_irq(void *opaque, int srcno, int val)
|
||||
{
|
||||
ICSState *ics = opaque;
|
||||
struct kvm_irq_level args;
|
||||
@ -344,7 +344,6 @@ static void ics_kvm_realize(DeviceState *dev, Error **errp)
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
ics->qirqs = qemu_allocate_irqs(ics_kvm_set_irq, ics, ics->nr_irqs);
|
||||
|
||||
qemu_register_reset(ics_kvm_reset_handler, ics);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
{
|
||||
target_ulong cppr = args[0];
|
||||
|
||||
icp_set_cppr(ICP(cpu->intc), cppr);
|
||||
icp_set_cppr(cpu->icp, cppr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -65,7 +65,7 @@ static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
uint32_t xirr = icp_accept(ICP(cpu->intc));
|
||||
uint32_t xirr = icp_accept(cpu->icp);
|
||||
|
||||
args[0] = xirr;
|
||||
return H_SUCCESS;
|
||||
@ -74,7 +74,7 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
uint32_t xirr = icp_accept(ICP(cpu->intc));
|
||||
uint32_t xirr = icp_accept(cpu->icp);
|
||||
|
||||
args[0] = xirr;
|
||||
args[1] = cpu_get_host_ticks();
|
||||
@ -86,7 +86,7 @@ static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
{
|
||||
target_ulong xirr = args[0];
|
||||
|
||||
icp_eoi(ICP(cpu->intc), xirr);
|
||||
icp_eoi(cpu->icp, xirr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
uint32_t mfrr;
|
||||
uint32_t xirr = icp_ipoll(ICP(cpu->intc), &mfrr);
|
||||
uint32_t xirr = icp_ipoll(cpu->icp, &mfrr);
|
||||
|
||||
args[0] = xirr;
|
||||
args[1] = mfrr;
|
||||
|
@ -321,7 +321,7 @@ static void xive_tm_write(void *opaque, hwaddr offset,
|
||||
uint64_t value, unsigned size)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
|
||||
XiveTCTX *tctx = XIVE_TCTX(cpu->intc);
|
||||
XiveTCTX *tctx = cpu->tctx;
|
||||
const XiveTmOp *xto;
|
||||
|
||||
/*
|
||||
@ -360,7 +360,7 @@ static void xive_tm_write(void *opaque, hwaddr offset,
|
||||
static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
|
||||
XiveTCTX *tctx = XIVE_TCTX(cpu->intc);
|
||||
XiveTCTX *tctx = cpu->tctx;
|
||||
const XiveTmOp *xto;
|
||||
|
||||
/*
|
||||
@ -845,7 +845,7 @@ static const MemoryRegionOps xive_source_esb_ops = {
|
||||
},
|
||||
};
|
||||
|
||||
static void xive_source_set_irq(void *opaque, int srcno, int val)
|
||||
void xive_source_set_irq(void *opaque, int srcno, int val)
|
||||
{
|
||||
XiveSource *xsrc = XIVE_SOURCE(opaque);
|
||||
bool notify = false;
|
||||
@ -932,9 +932,6 @@ static void xive_source_realize(DeviceState *dev, Error **errp)
|
||||
&xive_source_esb_ops, xsrc, "xive.esb",
|
||||
(1ull << xsrc->esb_shift) * xsrc->nr_irqs);
|
||||
|
||||
xsrc->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc,
|
||||
xsrc->nr_irqs);
|
||||
|
||||
qemu_register_reset(xive_source_reset, dev);
|
||||
}
|
||||
|
||||
@ -1186,7 +1183,7 @@ static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
|
||||
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
XiveTCTX *tctx = XIVE_TCTX(cpu->intc);
|
||||
XiveTCTX *tctx = cpu->tctx;
|
||||
int ring;
|
||||
|
||||
/*
|
||||
|
33
hw/pci/pci.c
33
hw/pci/pci.c
@ -333,6 +333,13 @@ static void pci_host_bus_register(DeviceState *host)
|
||||
QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
|
||||
}
|
||||
|
||||
static void pci_host_bus_unregister(DeviceState *host)
|
||||
{
|
||||
PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
|
||||
|
||||
QLIST_REMOVE(host_bridge, next);
|
||||
}
|
||||
|
||||
PCIBus *pci_device_root_bus(const PCIDevice *d)
|
||||
{
|
||||
PCIBus *bus = pci_get_bus(d);
|
||||
@ -379,6 +386,11 @@ static void pci_root_bus_init(PCIBus *bus, DeviceState *parent,
|
||||
pci_host_bus_register(parent);
|
||||
}
|
||||
|
||||
static void pci_bus_uninit(PCIBus *bus)
|
||||
{
|
||||
pci_host_bus_unregister(BUS(bus)->parent);
|
||||
}
|
||||
|
||||
bool pci_bus_is_express(PCIBus *bus)
|
||||
{
|
||||
return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
|
||||
@ -413,6 +425,12 @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
|
||||
return bus;
|
||||
}
|
||||
|
||||
void pci_root_bus_cleanup(PCIBus *bus)
|
||||
{
|
||||
pci_bus_uninit(bus);
|
||||
object_unparent(OBJECT(bus));
|
||||
}
|
||||
|
||||
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||
void *irq_opaque, int nirq)
|
||||
{
|
||||
@ -423,6 +441,15 @@ void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||
bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
|
||||
}
|
||||
|
||||
void pci_bus_irqs_cleanup(PCIBus *bus)
|
||||
{
|
||||
bus->set_irq = NULL;
|
||||
bus->map_irq = NULL;
|
||||
bus->irq_opaque = NULL;
|
||||
bus->nirq = 0;
|
||||
g_free(bus->irq_count);
|
||||
}
|
||||
|
||||
PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
|
||||
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||
void *irq_opaque,
|
||||
@ -439,6 +466,12 @@ PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
|
||||
return bus;
|
||||
}
|
||||
|
||||
void pci_unregister_root_bus(PCIBus *bus)
|
||||
{
|
||||
pci_bus_irqs_cleanup(bus);
|
||||
pci_root_bus_cleanup(bus);
|
||||
}
|
||||
|
||||
int pci_bus_num(PCIBus *s)
|
||||
{
|
||||
return PCI_BUS_GET_CLASS(s)->bus_num(s);
|
||||
|
27
hw/ppc/pnv.c
27
hw/ppc/pnv.c
@ -668,11 +668,20 @@ static uint32_t pnv_chip_core_pir_p8(PnvChip *chip, uint32_t core_id)
|
||||
return (chip->chip_id << 7) | (core_id << 3);
|
||||
}
|
||||
|
||||
static Object *pnv_chip_power8_intc_create(PnvChip *chip, Object *child,
|
||||
Error **errp)
|
||||
static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu,
|
||||
Error **errp)
|
||||
{
|
||||
return icp_create(child, TYPE_PNV_ICP, XICS_FABRIC(qdev_get_machine()),
|
||||
errp);
|
||||
Error *local_err = NULL;
|
||||
Object *obj;
|
||||
|
||||
obj = icp_create(OBJECT(cpu), TYPE_PNV_ICP, XICS_FABRIC(qdev_get_machine()),
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu->icp = ICP(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -690,10 +699,10 @@ static uint32_t pnv_chip_core_pir_p9(PnvChip *chip, uint32_t core_id)
|
||||
return (chip->chip_id << 8) | (core_id << 2);
|
||||
}
|
||||
|
||||
static Object *pnv_chip_power9_intc_create(PnvChip *chip, Object *child,
|
||||
Error **errp)
|
||||
static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu,
|
||||
Error **errp)
|
||||
{
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Allowed core identifiers on a POWER8 Processor Chip :
|
||||
@ -1090,7 +1099,7 @@ static ICPState *pnv_icp_get(XICSFabric *xi, int pir)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_get_vcpu_by_pir(pir);
|
||||
|
||||
return cpu ? ICP(cpu->intc) : NULL;
|
||||
return cpu ? cpu->icp : NULL;
|
||||
}
|
||||
|
||||
static void pnv_pic_print_info(InterruptStatsProvider *obj,
|
||||
@ -1103,7 +1112,7 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj,
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
icp_pic_print_info(ICP(cpu->intc), mon);
|
||||
icp_pic_print_info(cpu->icp, mon);
|
||||
}
|
||||
|
||||
for (i = 0; i < pnv->num_chips; i++) {
|
||||
|
@ -114,7 +114,7 @@ static void pnv_realize_vcpu(PowerPCCPU *cpu, PnvChip *chip, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu->intc = pcc->intc_create(chip, OBJECT(cpu), &local_err);
|
||||
pcc->intc_create(chip, cpu, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
@ -190,7 +190,7 @@ err:
|
||||
static void pnv_unrealize_vcpu(PowerPCCPU *cpu)
|
||||
{
|
||||
qemu_unregister_reset(pnv_cpu_reset, cpu);
|
||||
object_unparent(cpu->intc);
|
||||
object_unparent(OBJECT(cpu->icp));
|
||||
cpu_remove_sync(CPU(cpu));
|
||||
object_unparent(OBJECT(cpu));
|
||||
}
|
||||
|
@ -207,7 +207,6 @@ static const uint64_t stat_bits[] = {
|
||||
|
||||
void pnv_psi_irq_set(PnvPsi *psi, PnvPsiIrq irq, bool state)
|
||||
{
|
||||
ICSState *ics = &psi->ics;
|
||||
uint32_t xivr_reg;
|
||||
uint32_t stat_reg;
|
||||
uint32_t src;
|
||||
@ -227,14 +226,14 @@ void pnv_psi_irq_set(PnvPsi *psi, PnvPsiIrq irq, bool state)
|
||||
/* TODO: optimization, check mask here. That means
|
||||
* re-evaluating when unmasking
|
||||
*/
|
||||
qemu_irq_raise(ics->qirqs[src]);
|
||||
qemu_irq_raise(psi->qirqs[src]);
|
||||
} else {
|
||||
psi->regs[stat_reg] &= ~stat_bits[irq];
|
||||
|
||||
/* FSP and PSI are muxed so don't lower if either is still set */
|
||||
if (stat_reg != PSIHB_XSCOM_CR ||
|
||||
!(psi->regs[stat_reg] & (PSIHB_CR_PSI_IRQ | PSIHB_CR_FSP_IRQ))) {
|
||||
qemu_irq_lower(ics->qirqs[src]);
|
||||
qemu_irq_lower(psi->qirqs[src]);
|
||||
} else {
|
||||
state = true;
|
||||
}
|
||||
@ -491,6 +490,8 @@ static void pnv_psi_realize(DeviceState *dev, Error **errp)
|
||||
ics_set_irq_type(ics, i, true);
|
||||
}
|
||||
|
||||
psi->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
|
||||
|
||||
/* XSCOM region for PSI registers */
|
||||
pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_xscom_ops,
|
||||
psi, "xscom-psi", PNV_XSCOM_PSIHB_SIZE);
|
||||
|
@ -32,8 +32,7 @@
|
||||
#include "exec/address-spaces.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#define DEBUG_UIC
|
||||
|
||||
/*#define DEBUG_UIC*/
|
||||
|
||||
#ifdef DEBUG_UIC
|
||||
# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
|
||||
|
@ -1048,6 +1048,7 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
|
||||
add_str(hypertas, "hcall-sprg0");
|
||||
add_str(hypertas, "hcall-copy");
|
||||
add_str(hypertas, "hcall-debug");
|
||||
add_str(hypertas, "hcall-vphn");
|
||||
add_str(qemu_hypertas, "hcall-memop1");
|
||||
|
||||
if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
|
||||
@ -1668,7 +1669,10 @@ static void spapr_machine_reset(void)
|
||||
/* Load the fdt */
|
||||
qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
|
||||
cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
|
||||
g_free(fdt);
|
||||
g_free(spapr->fdt_blob);
|
||||
spapr->fdt_size = fdt_totalsize(fdt);
|
||||
spapr->fdt_initial_size = spapr->fdt_size;
|
||||
spapr->fdt_blob = fdt;
|
||||
|
||||
/* Set up the entry state */
|
||||
spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, fdt_addr);
|
||||
@ -1743,12 +1747,17 @@ static int spapr_post_load(void *opaque, int version_id)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* In earlier versions, there was no separate qdev for the PAPR
|
||||
/*
|
||||
* In earlier versions, there was no separate qdev for the PAPR
|
||||
* RTC, so the RTC offset was stored directly in sPAPREnvironment.
|
||||
* So when migrating from those versions, poke the incoming offset
|
||||
* value into the RTC device */
|
||||
* value into the RTC device
|
||||
*/
|
||||
if (version_id < 3) {
|
||||
err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_enabled() && spapr->patb_entry) {
|
||||
@ -1919,6 +1928,39 @@ static const VMStateDescription vmstate_spapr_irq_map = {
|
||||
},
|
||||
};
|
||||
|
||||
static bool spapr_dtb_needed(void *opaque)
|
||||
{
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque);
|
||||
|
||||
return smc->update_dt_enabled;
|
||||
}
|
||||
|
||||
static int spapr_dtb_pre_load(void *opaque)
|
||||
{
|
||||
sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
|
||||
|
||||
g_free(spapr->fdt_blob);
|
||||
spapr->fdt_blob = NULL;
|
||||
spapr->fdt_size = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_spapr_dtb = {
|
||||
.name = "spapr_dtb",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = spapr_dtb_needed,
|
||||
.pre_load = spapr_dtb_pre_load,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(fdt_initial_size, sPAPRMachineState),
|
||||
VMSTATE_UINT32(fdt_size, sPAPRMachineState),
|
||||
VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, sPAPRMachineState, 0, NULL,
|
||||
fdt_size),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static const VMStateDescription vmstate_spapr = {
|
||||
.name = "spapr",
|
||||
.version_id = 3,
|
||||
@ -1948,6 +1990,7 @@ static const VMStateDescription vmstate_spapr = {
|
||||
&vmstate_spapr_cap_ibs,
|
||||
&vmstate_spapr_irq_map,
|
||||
&vmstate_spapr_cap_nested_kvm_hv,
|
||||
&vmstate_spapr_dtb,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
@ -2514,6 +2557,17 @@ static void spapr_init_cpus(sPAPRMachineState *spapr)
|
||||
}
|
||||
}
|
||||
|
||||
static PCIHostState *spapr_create_default_phb(void)
|
||||
{
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
|
||||
qdev_prop_set_uint32(dev, "index", 0);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
return PCI_HOST_BRIDGE(dev);
|
||||
}
|
||||
|
||||
/* pSeries LPAR / sPAPR hardware init */
|
||||
static void spapr_machine_init(MachineState *machine)
|
||||
{
|
||||
@ -2632,11 +2686,11 @@ static void spapr_machine_init(MachineState *machine)
|
||||
spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2);
|
||||
|
||||
/* advertise XIVE on POWER9 machines */
|
||||
if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) {
|
||||
if (spapr->irq->ov5 & (SPAPR_OV5_XIVE_EXPLOIT | SPAPR_OV5_XIVE_BOTH)) {
|
||||
if (ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
|
||||
0, spapr->max_compat_pvr)) {
|
||||
spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT);
|
||||
} else {
|
||||
} else if (spapr->irq->ov5 & SPAPR_OV5_XIVE_EXPLOIT) {
|
||||
error_report("XIVE-only machines require a POWER9 CPU");
|
||||
exit(1);
|
||||
}
|
||||
@ -2746,7 +2800,7 @@ static void spapr_machine_init(MachineState *machine)
|
||||
/* Set up PCI */
|
||||
spapr_pci_rtas_init();
|
||||
|
||||
phb = spapr_create_phb(spapr, 0);
|
||||
phb = spapr_create_default_phb();
|
||||
|
||||
for (i = 0; i < nb_nics; i++) {
|
||||
NICInfo *nd = &nd_table[i];
|
||||
@ -3062,6 +3116,8 @@ static char *spapr_get_ic_mode(Object *obj, Error **errp)
|
||||
return g_strdup("xics");
|
||||
} else if (spapr->irq == &spapr_irq_xive) {
|
||||
return g_strdup("xive");
|
||||
} else if (spapr->irq == &spapr_irq_dual) {
|
||||
return g_strdup("dual");
|
||||
}
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -3075,6 +3131,8 @@ static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp)
|
||||
spapr->irq = &spapr_irq_xics;
|
||||
} else if (strcmp(value, "xive") == 0) {
|
||||
spapr->irq = &spapr_irq_xive;
|
||||
} else if (strcmp(value, "dual") == 0) {
|
||||
spapr->irq = &spapr_irq_dual;
|
||||
} else {
|
||||
error_setg(errp, "Bad value for \"ic-mode\" property");
|
||||
}
|
||||
@ -3123,7 +3181,7 @@ static void spapr_instance_init(Object *obj)
|
||||
object_property_add_str(obj, "ic-mode", spapr_get_ic_mode,
|
||||
spapr_set_ic_mode, NULL);
|
||||
object_property_set_description(obj, "ic-mode",
|
||||
"Specifies the interrupt controller mode (xics, xive)",
|
||||
"Specifies the interrupt controller mode (xics, xive, dual)",
|
||||
NULL);
|
||||
}
|
||||
|
||||
@ -3791,8 +3849,6 @@ static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
|
||||
* 1TiB 64-bit MMIO windows for each PHB.
|
||||
*/
|
||||
const uint64_t base_buid = 0x800000020000000ULL;
|
||||
#define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
|
||||
SPAPR_PCI_MEM64_WIN_SIZE - 1)
|
||||
int i;
|
||||
|
||||
/* Sanity check natural alignments */
|
||||
@ -3840,7 +3896,7 @@ static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
|
||||
{
|
||||
PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
|
||||
|
||||
return cpu ? ICP(cpu->intc) : NULL;
|
||||
return cpu ? cpu->icp : NULL;
|
||||
}
|
||||
|
||||
static void spapr_pic_print_info(InterruptStatsProvider *obj,
|
||||
@ -3930,6 +3986,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
hc->unplug = spapr_machine_device_unplug;
|
||||
|
||||
smc->dr_lmb_enabled = true;
|
||||
smc->update_dt_enabled = true;
|
||||
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0");
|
||||
mc->has_hotpluggable_cpus = true;
|
||||
smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED;
|
||||
@ -4022,9 +4079,12 @@ DEFINE_SPAPR_MACHINE(4_0, "4.0", true);
|
||||
*/
|
||||
static void spapr_machine_3_1_class_options(MachineClass *mc)
|
||||
{
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
||||
|
||||
spapr_machine_4_0_class_options(mc);
|
||||
compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len);
|
||||
mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0");
|
||||
smc->update_dt_enabled = false;
|
||||
}
|
||||
|
||||
DEFINE_SPAPR_MACHINE(3_1, "3.1", false);
|
||||
|
@ -194,7 +194,12 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, sPAPRCPUCore *sc)
|
||||
vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data);
|
||||
}
|
||||
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
||||
object_unparent(cpu->intc);
|
||||
if (cpu->icp) {
|
||||
object_unparent(OBJECT(cpu->icp));
|
||||
}
|
||||
if (cpu->tctx) {
|
||||
object_unparent(OBJECT(cpu->tctx));
|
||||
}
|
||||
cpu_remove_sync(CPU(cpu));
|
||||
object_unparent(OBJECT(cpu));
|
||||
}
|
||||
@ -232,7 +237,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
qemu_register_reset(spapr_cpu_reset, cpu);
|
||||
spapr_cpu_reset(cpu);
|
||||
|
||||
cpu->intc = spapr->irq->cpu_intc_create(spapr, OBJECT(cpu), &local_err);
|
||||
spapr->irq->cpu_intc_create(spapr, cpu, &local_err);
|
||||
if (local_err) {
|
||||
goto error_unregister;
|
||||
}
|
||||
|
@ -1654,6 +1654,17 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
|
||||
(spapr_h_cas_compose_response(spapr, args[1], args[2],
|
||||
ov5_updates) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate a machine reset when we have an update of the
|
||||
* interrupt mode. Only required when the machine supports both
|
||||
* modes.
|
||||
*/
|
||||
if (!spapr->cas_reboot) {
|
||||
spapr->cas_reboot = spapr_ovec_test(ov5_updates, OV5_XIVE_EXPLOIT)
|
||||
&& spapr->irq->ov5 & SPAPR_OV5_XIVE_BOTH;
|
||||
}
|
||||
|
||||
spapr_ovec_cleanup(ov5_updates);
|
||||
|
||||
if (spapr->cas_reboot) {
|
||||
@ -1663,6 +1674,42 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
|
||||
sPAPRMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
target_ulong *args)
|
||||
{
|
||||
target_ulong flags = args[0];
|
||||
target_ulong procno = args[1];
|
||||
PowerPCCPU *tcpu;
|
||||
int idx;
|
||||
|
||||
/* only support procno from H_REGISTER_VPA */
|
||||
if (flags != 0x1) {
|
||||
return H_FUNCTION;
|
||||
}
|
||||
|
||||
tcpu = spapr_find_cpu(procno);
|
||||
if (tcpu == NULL) {
|
||||
return H_P2;
|
||||
}
|
||||
|
||||
/* sequence is the same as in the "ibm,associativity" property */
|
||||
|
||||
idx = 0;
|
||||
#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
|
||||
((uint64_t)(b) & 0xffffffff))
|
||||
args[idx++] = ASSOCIATIVITY(0, 0);
|
||||
args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
|
||||
args[idx++] = ASSOCIATIVITY(procno, -1);
|
||||
for ( ; idx < 6; idx++) {
|
||||
args[idx] = -1;
|
||||
}
|
||||
#undef ASSOCIATIVITY
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
|
||||
sPAPRMachineState *spapr,
|
||||
target_ulong opcode,
|
||||
@ -1717,6 +1764,46 @@ static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
|
||||
|
||||
args[0] = characteristics;
|
||||
args[1] = behaviour;
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_update_dt(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
target_ulong dt = ppc64_phys_to_real(args[0]);
|
||||
struct fdt_header hdr = { 0 };
|
||||
unsigned cb;
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
|
||||
void *fdt;
|
||||
|
||||
cpu_physical_memory_read(dt, &hdr, sizeof(hdr));
|
||||
cb = fdt32_to_cpu(hdr.totalsize);
|
||||
|
||||
if (!smc->update_dt_enabled) {
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
/* Check that the fdt did not grow out of proportion */
|
||||
if (cb > spapr->fdt_initial_size * 2) {
|
||||
trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb,
|
||||
fdt32_to_cpu(hdr.magic));
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
fdt = g_malloc0(cb);
|
||||
cpu_physical_memory_read(dt, fdt, cb);
|
||||
|
||||
/* Check the fdt consistency */
|
||||
if (fdt_check_full(fdt, cb)) {
|
||||
trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb,
|
||||
fdt32_to_cpu(hdr.magic));
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
g_free(spapr->fdt_blob);
|
||||
spapr->fdt_size = cb;
|
||||
spapr->fdt_blob = fdt;
|
||||
trace_spapr_update_dt(cb);
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
@ -1822,6 +1909,12 @@ static void hypercall_register_types(void)
|
||||
|
||||
/* ibm,client-architecture-support support */
|
||||
spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
|
||||
|
||||
spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
|
||||
|
||||
/* Virtual Processor Home Node */
|
||||
spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
|
||||
h_home_node_associativity);
|
||||
}
|
||||
|
||||
type_init(hypercall_register_types)
|
||||
|
@ -171,7 +171,7 @@ static qemu_irq spapr_qirq_xics(sPAPRMachineState *spapr, int irq)
|
||||
uint32_t srcno = irq - ics->offset;
|
||||
|
||||
if (ics_valid_irq(ics, irq)) {
|
||||
return ics->qirqs[srcno];
|
||||
return spapr->qirqs[srcno];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -184,16 +184,26 @@ static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon)
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
icp_pic_print_info(ICP(cpu->intc), mon);
|
||||
icp_pic_print_info(cpu->icp, mon);
|
||||
}
|
||||
|
||||
ics_pic_print_info(spapr->ics, mon);
|
||||
}
|
||||
|
||||
static Object *spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr,
|
||||
Object *cpu, Error **errp)
|
||||
static void spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
return icp_create(cpu, spapr->icp_type, XICS_FABRIC(spapr), errp);
|
||||
Error *local_err = NULL;
|
||||
Object *obj;
|
||||
|
||||
obj = icp_create(OBJECT(cpu), spapr->icp_type, XICS_FABRIC(spapr),
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu->icp = ICP(obj);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id)
|
||||
@ -202,12 +212,29 @@ static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id)
|
||||
CPUState *cs;
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
icp_resend(ICP(cpu->intc));
|
||||
icp_resend(cpu->icp);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
|
||||
{
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
MachineState *machine = MACHINE(opaque);
|
||||
|
||||
if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
|
||||
ics_kvm_set_irq(spapr->ics, srcno, val);
|
||||
} else {
|
||||
ics_simple_set_irq(spapr->ics, srcno, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_irq_reset_xics(sPAPRMachineState *spapr, Error **errp)
|
||||
{
|
||||
/* TODO: create the KVM XICS device */
|
||||
}
|
||||
|
||||
#define SPAPR_IRQ_XICS_NR_IRQS 0x1000
|
||||
#define SPAPR_IRQ_XICS_NR_MSIS \
|
||||
(XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
|
||||
@ -225,6 +252,8 @@ sPAPRIrq spapr_irq_xics = {
|
||||
.dt_populate = spapr_dt_xics,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xics,
|
||||
.post_load = spapr_irq_post_load_xics,
|
||||
.reset = spapr_irq_reset_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -284,7 +313,16 @@ static void spapr_irq_free_xive(sPAPRMachineState *spapr, int irq, int num)
|
||||
|
||||
static qemu_irq spapr_qirq_xive(sPAPRMachineState *spapr, int irq)
|
||||
{
|
||||
return spapr_xive_qirq(spapr->xive, irq);
|
||||
sPAPRXive *xive = spapr->xive;
|
||||
|
||||
if (irq >= xive->nr_irqs) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* The sPAPR machine/device should have claimed the IRQ before */
|
||||
assert(xive_eas_is_valid(&xive->eat[irq]));
|
||||
|
||||
return spapr->qirqs[irq];
|
||||
}
|
||||
|
||||
static void spapr_irq_print_info_xive(sPAPRMachineState *spapr,
|
||||
@ -295,23 +333,31 @@ static void spapr_irq_print_info_xive(sPAPRMachineState *spapr,
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
xive_tctx_pic_print_info(XIVE_TCTX(cpu->intc), mon);
|
||||
xive_tctx_pic_print_info(cpu->tctx, mon);
|
||||
}
|
||||
|
||||
spapr_xive_pic_print_info(spapr->xive, mon);
|
||||
}
|
||||
|
||||
static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr,
|
||||
Object *cpu, Error **errp)
|
||||
static void spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
Object *obj = xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp);
|
||||
Error *local_err = NULL;
|
||||
Object *obj;
|
||||
|
||||
obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu->tctx = XIVE_TCTX(obj);
|
||||
|
||||
/*
|
||||
* (TCG) Early setting the OS CAM line for hotplugged CPUs as they
|
||||
* don't benificiate from the reset of the XIVE IRQ backend
|
||||
* don't beneficiate from the reset of the XIVE IRQ backend
|
||||
*/
|
||||
spapr_xive_set_tctx_os_cam(XIVE_TCTX(obj));
|
||||
return obj;
|
||||
spapr_xive_set_tctx_os_cam(cpu->tctx);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id)
|
||||
@ -327,8 +373,18 @@ static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp)
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
|
||||
/* (TCG) Set the OS CAM line of the thread interrupt context. */
|
||||
spapr_xive_set_tctx_os_cam(XIVE_TCTX(cpu->intc));
|
||||
spapr_xive_set_tctx_os_cam(cpu->tctx);
|
||||
}
|
||||
|
||||
/* Activate the XIVE MMIOs */
|
||||
spapr_xive_mmio_set_enabled(spapr->xive, true);
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
|
||||
{
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
|
||||
xive_source_set_irq(&spapr->xive->source, srcno, val);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -353,6 +409,186 @@ sPAPRIrq spapr_irq_xive = {
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xive,
|
||||
.post_load = spapr_irq_post_load_xive,
|
||||
.reset = spapr_irq_reset_xive,
|
||||
.set_irq = spapr_irq_set_irq_xive,
|
||||
};
|
||||
|
||||
/*
|
||||
* Dual XIVE and XICS IRQ backend.
|
||||
*
|
||||
* Both interrupt mode, XIVE and XICS, objects are created but the
|
||||
* machine starts in legacy interrupt mode (XICS). It can be changed
|
||||
* by the CAS negotiation process and, in that case, the new mode is
|
||||
* activated after an extra machine reset.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
|
||||
* default.
|
||||
*/
|
||||
static sPAPRIrq *spapr_irq_current(sPAPRMachineState *spapr)
|
||||
{
|
||||
return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
|
||||
&spapr_irq_xive : &spapr_irq_xics;
|
||||
}
|
||||
|
||||
static void spapr_irq_init_dual(sPAPRMachineState *spapr, Error **errp)
|
||||
{
|
||||
MachineState *machine = MACHINE(spapr);
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
|
||||
error_setg(errp, "No KVM support for the 'dual' machine");
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_irq_xics.init(spapr, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Align the XICS and the XIVE IRQ number space under QEMU.
|
||||
*
|
||||
* However, the XICS KVM device still considers that the IRQ
|
||||
* numbers should start at XICS_IRQ_BASE (0x1000). Either we
|
||||
* should introduce a KVM device ioctl to set the offset or ignore
|
||||
* the lower 4K numbers when using the get/set ioctl of the XICS
|
||||
* KVM device. The second option seems the least intrusive.
|
||||
*/
|
||||
spapr->ics->offset = 0;
|
||||
|
||||
spapr_irq_xive.init(spapr, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static int spapr_irq_claim_dual(sPAPRMachineState *spapr, int irq, bool lsi,
|
||||
Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void spapr_irq_free_dual(sPAPRMachineState *spapr, int irq, int num)
|
||||
{
|
||||
spapr_irq_xics.free(spapr, irq, num);
|
||||
spapr_irq_xive.free(spapr, irq, num);
|
||||
}
|
||||
|
||||
static qemu_irq spapr_qirq_dual(sPAPRMachineState *spapr, int irq)
|
||||
{
|
||||
sPAPRXive *xive = spapr->xive;
|
||||
ICSState *ics = spapr->ics;
|
||||
|
||||
if (irq >= spapr->irq->nr_irqs) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The IRQ number should have been claimed under both interrupt
|
||||
* controllers.
|
||||
*/
|
||||
assert(!ICS_IRQ_FREE(ics, irq - ics->offset));
|
||||
assert(xive_eas_is_valid(&xive->eat[irq]));
|
||||
|
||||
return spapr->qirqs[irq];
|
||||
}
|
||||
|
||||
static void spapr_irq_print_info_dual(sPAPRMachineState *spapr, Monitor *mon)
|
||||
{
|
||||
spapr_irq_current(spapr)->print_info(spapr, mon);
|
||||
}
|
||||
|
||||
static void spapr_irq_dt_populate_dual(sPAPRMachineState *spapr,
|
||||
uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle)
|
||||
{
|
||||
spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
|
||||
}
|
||||
|
||||
static void spapr_irq_cpu_intc_create_dual(sPAPRMachineState *spapr,
|
||||
PowerPCCPU *cpu, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
|
||||
spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
|
||||
}
|
||||
|
||||
static int spapr_irq_post_load_dual(sPAPRMachineState *spapr, int version_id)
|
||||
{
|
||||
/*
|
||||
* Force a reset of the XIVE backend after migration. The machine
|
||||
* defaults to XICS at startup.
|
||||
*/
|
||||
if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
|
||||
spapr_irq_xive.reset(spapr, &error_fatal);
|
||||
}
|
||||
|
||||
return spapr_irq_current(spapr)->post_load(spapr, version_id);
|
||||
}
|
||||
|
||||
static void spapr_irq_reset_dual(sPAPRMachineState *spapr, Error **errp)
|
||||
{
|
||||
/*
|
||||
* Deactivate the XIVE MMIOs. The XIVE backend will reenable them
|
||||
* if selected.
|
||||
*/
|
||||
spapr_xive_mmio_set_enabled(spapr->xive, false);
|
||||
|
||||
spapr_irq_current(spapr)->reset(spapr, errp);
|
||||
}
|
||||
|
||||
static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
|
||||
{
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
|
||||
spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Define values in sync with the XIVE and XICS backend
|
||||
*/
|
||||
#define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
|
||||
#define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
|
||||
|
||||
sPAPRIrq spapr_irq_dual = {
|
||||
.nr_irqs = SPAPR_IRQ_DUAL_NR_IRQS,
|
||||
.nr_msis = SPAPR_IRQ_DUAL_NR_MSIS,
|
||||
.ov5 = SPAPR_OV5_XIVE_BOTH,
|
||||
|
||||
.init = spapr_irq_init_dual,
|
||||
.claim = spapr_irq_claim_dual,
|
||||
.free = spapr_irq_free_dual,
|
||||
.qirq = spapr_qirq_dual,
|
||||
.print_info = spapr_irq_print_info_dual,
|
||||
.dt_populate = spapr_irq_dt_populate_dual,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_dual,
|
||||
.post_load = spapr_irq_post_load_dual,
|
||||
.reset = spapr_irq_reset_dual,
|
||||
.set_irq = spapr_irq_set_irq_dual
|
||||
};
|
||||
|
||||
/*
|
||||
@ -366,6 +602,9 @@ void spapr_irq_init(sPAPRMachineState *spapr, Error **errp)
|
||||
}
|
||||
|
||||
spapr->irq->init(spapr, errp);
|
||||
|
||||
spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
|
||||
spapr->irq->nr_irqs);
|
||||
}
|
||||
|
||||
int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp)
|
||||
@ -465,4 +704,5 @@ sPAPRIrq spapr_irq_xics_legacy = {
|
||||
.dt_populate = spapr_dt_xics,
|
||||
.cpu_intc_create = spapr_irq_cpu_intc_create_xics,
|
||||
.post_load = spapr_irq_post_load_xics,
|
||||
.set_irq = spapr_irq_set_irq_xics,
|
||||
};
|
||||
|
@ -1988,17 +1988,6 @@ static const TypeInfo spapr_phb_info = {
|
||||
}
|
||||
};
|
||||
|
||||
PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
|
||||
{
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
|
||||
qdev_prop_set_uint32(dev, "index", index);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
return PCI_HOST_BRIDGE(dev);
|
||||
}
|
||||
|
||||
typedef struct sPAPRFDT {
|
||||
void *fdt;
|
||||
int node_off;
|
||||
|
@ -22,6 +22,9 @@ spapr_cas_pvr_try(uint32_t pvr) "0x%x"
|
||||
spapr_cas_pvr(uint32_t cur_pvr, bool explicit_match, uint32_t new_pvr) "current=0x%x, explicit_match=%u, new=0x%x"
|
||||
spapr_h_resize_hpt_prepare(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64
|
||||
spapr_h_resize_hpt_commit(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64
|
||||
spapr_update_dt(unsigned cb) "New blob %u bytes"
|
||||
spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x"
|
||||
spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x"
|
||||
|
||||
# hw/ppc/spapr_iommu.c
|
||||
spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
|
||||
|
@ -94,11 +94,13 @@ struct sPAPRPHBState {
|
||||
((1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET)
|
||||
#define SPAPR_PCI_MEM64_WIN_SIZE 0x10000000000ULL /* 1 TiB */
|
||||
|
||||
/* Without manual configuration, all PCI outbound windows will be
|
||||
* within this range */
|
||||
/* All PCI outbound windows will be within this range */
|
||||
#define SPAPR_PCI_BASE (1ULL << 45) /* 32 TiB */
|
||||
#define SPAPR_PCI_LIMIT (1ULL << 46) /* 64 TiB */
|
||||
|
||||
#define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
|
||||
SPAPR_PCI_MEM64_WIN_SIZE - 1)
|
||||
|
||||
#define SPAPR_PCI_2_7_MMIO_WIN_SIZE 0xf80000000
|
||||
#define SPAPR_PCI_IO_WIN_SIZE 0x10000
|
||||
|
||||
@ -111,8 +113,6 @@ static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
|
||||
return spapr_qirq(spapr, phb->lsi_table[pin].irq);
|
||||
}
|
||||
|
||||
PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index);
|
||||
|
||||
int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt,
|
||||
uint32_t nr_msis);
|
||||
|
||||
|
@ -405,8 +405,10 @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
|
||||
MemoryRegion *address_space_mem,
|
||||
MemoryRegion *address_space_io,
|
||||
uint8_t devfn_min, const char *typename);
|
||||
void pci_root_bus_cleanup(PCIBus *bus);
|
||||
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
||||
void *irq_opaque, int nirq);
|
||||
void pci_bus_irqs_cleanup(PCIBus *bus);
|
||||
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
||||
/* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */
|
||||
int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin);
|
||||
@ -417,6 +419,7 @@ PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
|
||||
MemoryRegion *address_space_io,
|
||||
uint8_t devfn_min, int nirq,
|
||||
const char *typename);
|
||||
void pci_unregister_root_bus(PCIBus *bus);
|
||||
void pci_bus_set_route_irq_fn(PCIBus *, pci_route_irq_fn);
|
||||
PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin);
|
||||
bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new);
|
||||
|
@ -98,7 +98,7 @@ typedef struct PnvChipClass {
|
||||
DeviceRealize parent_realize;
|
||||
|
||||
uint32_t (*core_pir)(PnvChip *chip, uint32_t core_id);
|
||||
Object *(*intc_create)(PnvChip *chip, Object *child, Error **errp);
|
||||
void (*intc_create)(PnvChip *chip, PowerPCCPU *cpu, Error **errp);
|
||||
ISABus *(*isa_create)(PnvChip *chip, Error **errp);
|
||||
} PnvChipClass;
|
||||
|
||||
|
@ -40,6 +40,7 @@ typedef struct PnvPsi {
|
||||
|
||||
/* Interrupt generation */
|
||||
ICSState ics;
|
||||
qemu_irq *qirqs;
|
||||
|
||||
/* Registers */
|
||||
uint64_t regs[PSIHB_XSCOM_MAX];
|
||||
|
@ -103,6 +103,7 @@ struct sPAPRMachineClass {
|
||||
|
||||
/*< public >*/
|
||||
bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
|
||||
bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */
|
||||
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
|
||||
bool pre_2_10_has_unused_icps;
|
||||
bool legacy_irq_allocation;
|
||||
@ -139,6 +140,9 @@ struct sPAPRMachineState {
|
||||
int vrma_adjust;
|
||||
ssize_t rtas_size;
|
||||
void *rtas_blob;
|
||||
uint32_t fdt_size;
|
||||
uint32_t fdt_initial_size;
|
||||
void *fdt_blob;
|
||||
long kernel_size;
|
||||
bool kernel_le;
|
||||
uint32_t initrd_base;
|
||||
@ -178,6 +182,7 @@ struct sPAPRMachineState {
|
||||
unsigned long *irq_map;
|
||||
sPAPRXive *xive;
|
||||
sPAPRIrq *irq;
|
||||
qemu_irq *qirqs;
|
||||
|
||||
bool cmd_line_caps[SPAPR_CAP_NUM];
|
||||
sPAPRCapabilities def, eff, mig;
|
||||
@ -444,6 +449,7 @@ struct sPAPRMachineState {
|
||||
#define H_GET_EM_PARMS 0x2B8
|
||||
#define H_SET_MPP 0x2D0
|
||||
#define H_GET_MPP 0x2D4
|
||||
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
|
||||
#define H_XIRR_X 0x2FC
|
||||
#define H_RANDOM 0x300
|
||||
#define H_SET_MODE 0x31C
|
||||
@ -480,7 +486,8 @@ struct sPAPRMachineState {
|
||||
#define KVMPPC_H_LOGICAL_MEMOP (KVMPPC_HCALL_BASE + 0x1)
|
||||
/* Client Architecture support */
|
||||
#define KVMPPC_H_CAS (KVMPPC_HCALL_BASE + 0x2)
|
||||
#define KVMPPC_HCALL_MAX KVMPPC_H_CAS
|
||||
#define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3)
|
||||
#define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT
|
||||
|
||||
typedef struct sPAPRDeviceTreeUpdateHeader {
|
||||
uint32_t version_id;
|
||||
|
@ -42,15 +42,17 @@ typedef struct sPAPRIrq {
|
||||
void (*print_info)(sPAPRMachineState *spapr, Monitor *mon);
|
||||
void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers,
|
||||
void *fdt, uint32_t phandle);
|
||||
Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu,
|
||||
Error **errp);
|
||||
void (*cpu_intc_create)(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
Error **errp);
|
||||
int (*post_load)(sPAPRMachineState *spapr, int version_id);
|
||||
void (*reset)(sPAPRMachineState *spapr, Error **errp);
|
||||
void (*set_irq)(void *opaque, int srcno, int val);
|
||||
} sPAPRIrq;
|
||||
|
||||
extern sPAPRIrq spapr_irq_xics;
|
||||
extern sPAPRIrq spapr_irq_xics_legacy;
|
||||
extern sPAPRIrq spapr_irq_xive;
|
||||
extern sPAPRIrq spapr_irq_dual;
|
||||
|
||||
void spapr_irq_init(sPAPRMachineState *spapr, Error **errp);
|
||||
int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp);
|
||||
|
@ -40,7 +40,6 @@ typedef struct sPAPRXive {
|
||||
bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi);
|
||||
bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn);
|
||||
void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon);
|
||||
qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn);
|
||||
|
||||
typedef struct sPAPRMachineState sPAPRMachineState;
|
||||
|
||||
@ -48,5 +47,6 @@ void spapr_xive_hcall_init(sPAPRMachineState *spapr);
|
||||
void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt,
|
||||
uint32_t phandle);
|
||||
void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx);
|
||||
void spapr_xive_mmio_set_enabled(sPAPRXive *xive, bool enable);
|
||||
|
||||
#endif /* PPC_SPAPR_XIVE_H */
|
||||
|
@ -131,7 +131,6 @@ struct ICSState {
|
||||
/*< public >*/
|
||||
uint32_t nr_irqs;
|
||||
uint32_t offset;
|
||||
qemu_irq *qirqs;
|
||||
ICSIRQState *irqs;
|
||||
XICSFabric *xics;
|
||||
};
|
||||
@ -140,8 +139,7 @@ struct ICSState {
|
||||
|
||||
static inline bool ics_valid_irq(ICSState *ics, uint32_t nr)
|
||||
{
|
||||
return (ics->offset != 0) && (nr >= ics->offset)
|
||||
&& (nr < (ics->offset + ics->nr_irqs));
|
||||
return (nr >= ics->offset) && (nr < (ics->offset + ics->nr_irqs));
|
||||
}
|
||||
|
||||
struct ICSIRQState {
|
||||
@ -192,6 +190,8 @@ void icp_eoi(ICPState *icp, uint32_t xirr);
|
||||
|
||||
void ics_simple_write_xive(ICSState *ics, int nr, int server,
|
||||
uint8_t priority, uint8_t saved_priority);
|
||||
void ics_simple_set_irq(void *opaque, int srcno, int val);
|
||||
void ics_kvm_set_irq(void *opaque, int srcno, int val);
|
||||
|
||||
void ics_set_irq_type(ICSState *ics, int srcno, bool lsi);
|
||||
void icp_pic_print_info(ICPState *icp, Monitor *mon);
|
||||
|
@ -184,7 +184,6 @@ typedef struct XiveSource {
|
||||
|
||||
/* IRQs */
|
||||
uint32_t nr_irqs;
|
||||
qemu_irq *qirqs;
|
||||
unsigned long *lsi_map;
|
||||
|
||||
/* PQ bits and LSI assertion bit */
|
||||
@ -278,12 +277,6 @@ uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq);
|
||||
void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset,
|
||||
Monitor *mon);
|
||||
|
||||
static inline qemu_irq xive_source_qirq(XiveSource *xsrc, uint32_t srcno)
|
||||
{
|
||||
assert(srcno < xsrc->nr_irqs);
|
||||
return xsrc->qirqs[srcno];
|
||||
}
|
||||
|
||||
static inline bool xive_source_irq_is_lsi(XiveSource *xsrc, uint32_t srcno)
|
||||
{
|
||||
assert(srcno < xsrc->nr_irqs);
|
||||
@ -299,6 +292,8 @@ static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno,
|
||||
}
|
||||
}
|
||||
|
||||
void xive_source_set_irq(void *opaque, int srcno, int val);
|
||||
|
||||
/*
|
||||
* XIVE Router
|
||||
*/
|
||||
|
@ -258,8 +258,8 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
|
||||
/* Save Altivec registers if necessary. */
|
||||
if (env->insns_flags & PPC_ALTIVEC) {
|
||||
uint32_t *vrsave;
|
||||
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
|
||||
ppc_avr_t *avr = &env->avr[i];
|
||||
for (i = 0; i < 32; i++) {
|
||||
ppc_avr_t *avr = cpu_avr_ptr(env, i);
|
||||
ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
|
||||
|
||||
__put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
|
||||
@ -281,15 +281,17 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
|
||||
/* Save VSX second halves */
|
||||
if (env->insns_flags2 & PPC2_VSX) {
|
||||
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
|
||||
for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
|
||||
__put_user(env->vsr[i], &vsregs[i]);
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *vsrl = cpu_vsrl_ptr(env, i);
|
||||
__put_user(*vsrl, &vsregs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Save floating point registers. */
|
||||
if (env->insns_flags & PPC_FLOAT) {
|
||||
for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
|
||||
__put_user(env->fpr[i], &frame->mc_fregs[i]);
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *fpr = cpu_fpr_ptr(env, i);
|
||||
__put_user(*fpr, &frame->mc_fregs[i]);
|
||||
}
|
||||
__put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
|
||||
}
|
||||
@ -373,8 +375,8 @@ static void restore_user_regs(CPUPPCState *env,
|
||||
#else
|
||||
v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
|
||||
#endif
|
||||
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
|
||||
ppc_avr_t *avr = &env->avr[i];
|
||||
for (i = 0; i < 32; i++) {
|
||||
ppc_avr_t *avr = cpu_avr_ptr(env, i);
|
||||
ppc_avr_t *vreg = &v_regs[i];
|
||||
|
||||
__get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
|
||||
@ -393,16 +395,18 @@ static void restore_user_regs(CPUPPCState *env,
|
||||
/* Restore VSX second halves */
|
||||
if (env->insns_flags2 & PPC2_VSX) {
|
||||
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
|
||||
for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
|
||||
__get_user(env->vsr[i], &vsregs[i]);
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *vsrl = cpu_vsrl_ptr(env, i);
|
||||
__get_user(*vsrl, &vsregs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Restore floating point registers. */
|
||||
if (env->insns_flags & PPC_FLOAT) {
|
||||
uint64_t fpscr;
|
||||
for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
|
||||
__get_user(env->fpr[i], &frame->mc_fregs[i]);
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t *fpr = cpu_fpr_ptr(env, i);
|
||||
__get_user(*fpr, &frame->mc_fregs[i]);
|
||||
}
|
||||
__get_user(fpscr, &frame->mc_fregs[32]);
|
||||
env->fpscr = (uint32_t) fpscr;
|
||||
|
@ -140,7 +140,8 @@ static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
memset(fpregset, 0, sizeof(*fpregset));
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
fpregset->fpr[i] = cpu_to_dump64(s, cpu->env.fpr[i]);
|
||||
uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
|
||||
fpregset->fpr[i] = cpu_to_dump64(s, *fpr);
|
||||
}
|
||||
fpregset->fpscr = cpu_to_dump_reg(s, cpu->env.fpscr);
|
||||
}
|
||||
@ -158,6 +159,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
bool needs_byteswap;
|
||||
ppc_avr_t *avr = cpu_avr_ptr(&cpu->env, i);
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB;
|
||||
@ -166,11 +168,11 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
#endif
|
||||
|
||||
if (needs_byteswap) {
|
||||
vmxregset->avr[i].u64[0] = bswap64(cpu->env.avr[i].u64[1]);
|
||||
vmxregset->avr[i].u64[1] = bswap64(cpu->env.avr[i].u64[0]);
|
||||
vmxregset->avr[i].u64[0] = bswap64(avr->u64[1]);
|
||||
vmxregset->avr[i].u64[1] = bswap64(avr->u64[0]);
|
||||
} else {
|
||||
vmxregset->avr[i].u64[0] = cpu->env.avr[i].u64[0];
|
||||
vmxregset->avr[i].u64[1] = cpu->env.avr[i].u64[1];
|
||||
vmxregset->avr[i].u64[0] = avr->u64[0];
|
||||
vmxregset->avr[i].u64[1] = avr->u64[1];
|
||||
}
|
||||
}
|
||||
vmxregset->vscr.u32[3] = cpu_to_dump32(s, cpu->env.vscr);
|
||||
@ -188,7 +190,8 @@ static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
memset(vsxregset, 0, sizeof(*vsxregset));
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
vsxregset->vsr[i] = cpu_to_dump64(s, cpu->env.vsr[i]);
|
||||
uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
|
||||
vsxregset->vsr[i] = cpu_to_dump64(s, *vsrl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,6 @@ typedef struct opc_handler_t opc_handler_t;
|
||||
/* Types used to describe some PowerPC registers etc. */
|
||||
typedef struct DisasContext DisasContext;
|
||||
typedef struct ppc_spr_t ppc_spr_t;
|
||||
typedef union ppc_avr_t ppc_avr_t;
|
||||
typedef union ppc_tlb_t ppc_tlb_t;
|
||||
typedef struct ppc_hash_pte64 ppc_hash_pte64_t;
|
||||
|
||||
@ -242,22 +241,26 @@ struct ppc_spr_t {
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Altivec registers (128 bits) */
|
||||
union ppc_avr_t {
|
||||
float32 f[4];
|
||||
/* VSX/Altivec registers (128 bits) */
|
||||
typedef union _ppc_vsr_t {
|
||||
uint8_t u8[16];
|
||||
uint16_t u16[8];
|
||||
uint32_t u32[4];
|
||||
uint64_t u64[2];
|
||||
int8_t s8[16];
|
||||
int16_t s16[8];
|
||||
int32_t s32[4];
|
||||
uint64_t u64[2];
|
||||
int64_t s64[2];
|
||||
float32 f32[4];
|
||||
float64 f64[2];
|
||||
float128 f128;
|
||||
#ifdef CONFIG_INT128
|
||||
__uint128_t u128;
|
||||
#endif
|
||||
Int128 s128;
|
||||
};
|
||||
Int128 s128;
|
||||
} ppc_vsr_t;
|
||||
|
||||
typedef ppc_vsr_t ppc_avr_t;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* Software TLB cache */
|
||||
@ -1001,8 +1004,6 @@ struct CPUPPCState {
|
||||
|
||||
/* Floating point execution context */
|
||||
float_status fp_status;
|
||||
/* floating point registers */
|
||||
float64 fpr[32];
|
||||
/* floating point status and control register */
|
||||
target_ulong fpscr;
|
||||
|
||||
@ -1052,11 +1053,10 @@ struct CPUPPCState {
|
||||
/* Special purpose registers */
|
||||
target_ulong spr[1024];
|
||||
ppc_spr_t spr_cb[1024];
|
||||
/* Altivec registers */
|
||||
ppc_avr_t avr[32];
|
||||
/* Vector status and control register */
|
||||
uint32_t vscr;
|
||||
/* VSX registers */
|
||||
uint64_t vsr[32];
|
||||
/* VSX registers (including FP and AVR) */
|
||||
ppc_vsr_t vsr[64] QEMU_ALIGNED(16);
|
||||
/* SPE registers */
|
||||
uint64_t spe_acc;
|
||||
uint32_t spe_fscr;
|
||||
@ -1177,6 +1177,8 @@ do { \
|
||||
|
||||
typedef struct PPCVirtualHypervisor PPCVirtualHypervisor;
|
||||
typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass;
|
||||
typedef struct XiveTCTX XiveTCTX;
|
||||
typedef struct ICPState ICPState;
|
||||
|
||||
/**
|
||||
* PowerPCCPU:
|
||||
@ -1195,7 +1197,8 @@ struct PowerPCCPU {
|
||||
int vcpu_id;
|
||||
uint32_t compat_pvr;
|
||||
PPCVirtualHypervisor *vhyp;
|
||||
Object *intc;
|
||||
ICPState *icp;
|
||||
XiveTCTX *tctx;
|
||||
void *machine_data;
|
||||
int32_t node_id; /* NUMA node this CPU belongs to */
|
||||
PPCHash64Options *hash64_opts;
|
||||
@ -2537,6 +2540,22 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
|
||||
(start + nregs > 32 && (rx >= start || rx < start + nregs - 32));
|
||||
}
|
||||
|
||||
/* Accessors for FP, VMX and VSX registers */
|
||||
static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return &env->vsr[i].u64[0];
|
||||
}
|
||||
|
||||
static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return &env->vsr[i].u64[1];
|
||||
}
|
||||
|
||||
static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i)
|
||||
{
|
||||
return &env->vsr[32 + i];
|
||||
}
|
||||
|
||||
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
|
||||
|
||||
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
|
||||
|
@ -126,7 +126,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
gdb_get_regl(mem_buf, env->gpr[n]);
|
||||
} else if (n < 64) {
|
||||
/* fprs */
|
||||
stfq_p(mem_buf, env->fpr[n-32]);
|
||||
stfq_p(mem_buf, *cpu_fpr_ptr(env, n - 32));
|
||||
} else {
|
||||
switch (n) {
|
||||
case 64:
|
||||
@ -178,7 +178,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
gdb_get_reg64(mem_buf, env->gpr[n]);
|
||||
} else if (n < 64) {
|
||||
/* fprs */
|
||||
stfq_p(mem_buf, env->fpr[n-32]);
|
||||
stfq_p(mem_buf, *cpu_fpr_ptr(env, n - 32));
|
||||
} else if (n < 96) {
|
||||
/* Altivec */
|
||||
stq_p(mem_buf, n - 64);
|
||||
@ -234,7 +234,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
env->gpr[n] = ldtul_p(mem_buf);
|
||||
} else if (n < 64) {
|
||||
/* fprs */
|
||||
env->fpr[n-32] = ldfq_p(mem_buf);
|
||||
*cpu_fpr_ptr(env, n - 32) = ldfq_p(mem_buf);
|
||||
} else {
|
||||
switch (n) {
|
||||
case 64:
|
||||
@ -284,7 +284,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
env->gpr[n] = ldq_p(mem_buf);
|
||||
} else if (n < 64) {
|
||||
/* fprs */
|
||||
env->fpr[n-32] = ldfq_p(mem_buf);
|
||||
*cpu_fpr_ptr(env, n - 32) = ldfq_p(mem_buf);
|
||||
} else {
|
||||
switch (n) {
|
||||
case 64 + 32:
|
||||
|
@ -391,13 +391,9 @@ target_ulong helper_602_mfrom(target_ulong arg)
|
||||
#if defined(HOST_WORDS_BIGENDIAN)
|
||||
#define HI_IDX 0
|
||||
#define LO_IDX 1
|
||||
#define AVRB(i) u8[i]
|
||||
#define AVRW(i) u32[i]
|
||||
#else
|
||||
#define HI_IDX 1
|
||||
#define LO_IDX 0
|
||||
#define AVRB(i) u8[15-(i)]
|
||||
#define AVRW(i) u32[3-(i)]
|
||||
#endif
|
||||
|
||||
#if defined(HOST_WORDS_BIGENDIAN)
|
||||
@ -548,8 +544,8 @@ VARITH_DO(muluwm, *, u32)
|
||||
{ \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \
|
||||
} \
|
||||
}
|
||||
VARITHFP(addfp, float32_add)
|
||||
@ -563,9 +559,9 @@ VARITHFP(maxfp, float32_max)
|
||||
ppc_avr_t *b, ppc_avr_t *c) \
|
||||
{ \
|
||||
int i; \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
|
||||
type, &env->vec_status); \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \
|
||||
type, &env->vec_status); \
|
||||
} \
|
||||
}
|
||||
VARITHFPFMA(maddfp, 0);
|
||||
@ -670,9 +666,9 @@ VABSDU(w, u32)
|
||||
{ \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
float32 t = cvt(b->element[i], &env->vec_status); \
|
||||
r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
|
||||
r->f32[i] = float32_scalbn(t, -uim, &env->vec_status); \
|
||||
} \
|
||||
}
|
||||
VCF(ux, uint32_to_float32, u32)
|
||||
@ -782,9 +778,9 @@ VCMPNE(w, u32, uint32_t, 0)
|
||||
uint32_t none = 0; \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
uint32_t result; \
|
||||
int rel = float32_compare_quiet(a->f[i], b->f[i], \
|
||||
int rel = float32_compare_quiet(a->f32[i], b->f32[i], \
|
||||
&env->vec_status); \
|
||||
if (rel == float_relation_unordered) { \
|
||||
result = 0; \
|
||||
@ -816,14 +812,16 @@ static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
|
||||
int i;
|
||||
int all_in = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
|
||||
int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
||||
int le_rel = float32_compare_quiet(a->f32[i], b->f32[i],
|
||||
&env->vec_status);
|
||||
if (le_rel == float_relation_unordered) {
|
||||
r->u32[i] = 0xc0000000;
|
||||
all_in = 1;
|
||||
} else {
|
||||
float32 bneg = float32_chs(b->f[i]);
|
||||
int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
|
||||
float32 bneg = float32_chs(b->f32[i]);
|
||||
int ge_rel = float32_compare_quiet(a->f32[i], bneg,
|
||||
&env->vec_status);
|
||||
int le = le_rel != float_relation_greater;
|
||||
int ge = ge_rel != float_relation_less;
|
||||
|
||||
@ -856,11 +854,11 @@ void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
|
||||
float_status s = env->vec_status; \
|
||||
\
|
||||
set_float_rounding_mode(float_round_to_zero, &s); \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
if (float32_is_any_nan(b->f[i])) { \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
if (float32_is_any_nan(b->f32[i])) { \
|
||||
r->element[i] = 0; \
|
||||
} else { \
|
||||
float64 t = float32_to_float64(b->f[i], &s); \
|
||||
float64 t = float32_to_float64(b->f32[i], &s); \
|
||||
int64_t j; \
|
||||
\
|
||||
t = float64_scalbn(t, uim, &s); \
|
||||
@ -1661,8 +1659,8 @@ void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
|
||||
r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
||||
r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1674,8 +1672,8 @@ void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
||||
float_status s = env->vec_status; \
|
||||
\
|
||||
set_float_rounding_mode(rounding, &s); \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
|
||||
r->f[i] = float32_round_to_int (b->f[i], &s); \
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
|
||||
r->f32[i] = float32_round_to_int (b->f32[i], &s); \
|
||||
} \
|
||||
}
|
||||
VRFI(n, float_round_nearest_even)
|
||||
@ -1705,10 +1703,10 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
|
||||
float32 t = float32_sqrt(b->f[i], &env->vec_status);
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
||||
float32 t = float32_sqrt(b->f32[i], &env->vec_status);
|
||||
|
||||
r->f[i] = float32_div(float32_one, t, &env->vec_status);
|
||||
r->f32[i] = float32_div(float32_one, t, &env->vec_status);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1751,8 +1749,8 @@ void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
|
||||
r->f[i] = float32_exp2(b->f[i], &env->vec_status);
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
||||
r->f32[i] = float32_exp2(b->f32[i], &env->vec_status);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1760,8 +1758,8 @@ void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(r->f); i++) {
|
||||
r->f[i] = float32_log2(b->f[i], &env->vec_status);
|
||||
for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
|
||||
r->f32[i] = float32_log2(b->f32[i], &env->vec_status);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3275,11 +3273,11 @@ void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
||||
int i;
|
||||
|
||||
VECTOR_FOR_INORDER_I(i, u32) {
|
||||
result.AVRW(i) = b->AVRW(i) ^
|
||||
(AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^
|
||||
AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^
|
||||
AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^
|
||||
AES_Te3[a->AVRB(AES_shifts[4*i + 3])]);
|
||||
result.VsrW(i) = b->VsrW(i) ^
|
||||
(AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^
|
||||
AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^
|
||||
AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^
|
||||
AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]);
|
||||
}
|
||||
*r = result;
|
||||
}
|
||||
@ -3290,7 +3288,7 @@ void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
||||
int i;
|
||||
|
||||
VECTOR_FOR_INORDER_I(i, u8) {
|
||||
result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
|
||||
result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]);
|
||||
}
|
||||
*r = result;
|
||||
}
|
||||
@ -3303,15 +3301,15 @@ void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
||||
ppc_avr_t tmp;
|
||||
|
||||
VECTOR_FOR_INORDER_I(i, u8) {
|
||||
tmp.AVRB(i) = b->AVRB(i) ^ AES_isbox[a->AVRB(AES_ishifts[i])];
|
||||
tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])];
|
||||
}
|
||||
|
||||
VECTOR_FOR_INORDER_I(i, u32) {
|
||||
r->AVRW(i) =
|
||||
AES_imc[tmp.AVRB(4*i + 0)][0] ^
|
||||
AES_imc[tmp.AVRB(4*i + 1)][1] ^
|
||||
AES_imc[tmp.AVRB(4*i + 2)][2] ^
|
||||
AES_imc[tmp.AVRB(4*i + 3)][3];
|
||||
r->VsrW(i) =
|
||||
AES_imc[tmp.VsrB(4 * i + 0)][0] ^
|
||||
AES_imc[tmp.VsrB(4 * i + 1)][1] ^
|
||||
AES_imc[tmp.VsrB(4 * i + 2)][2] ^
|
||||
AES_imc[tmp.VsrB(4 * i + 3)][3];
|
||||
}
|
||||
}
|
||||
|
||||
@ -3321,7 +3319,7 @@ void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
|
||||
int i;
|
||||
|
||||
VECTOR_FOR_INORDER_I(i, u8) {
|
||||
result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
|
||||
result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]);
|
||||
}
|
||||
*r = result;
|
||||
}
|
||||
|
@ -52,20 +52,20 @@ FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX);
|
||||
#define EXTRACT_HELPER(name, shift, nb) \
|
||||
static inline uint32_t name(uint32_t opcode) \
|
||||
{ \
|
||||
return (opcode >> (shift)) & ((1 << (nb)) - 1); \
|
||||
return extract32(opcode, shift, nb); \
|
||||
}
|
||||
|
||||
#define EXTRACT_SHELPER(name, shift, nb) \
|
||||
static inline int32_t name(uint32_t opcode) \
|
||||
{ \
|
||||
return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
|
||||
return sextract32(opcode, shift, nb); \
|
||||
}
|
||||
|
||||
#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
|
||||
static inline uint32_t name(uint32_t opcode) \
|
||||
{ \
|
||||
return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
|
||||
((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
|
||||
return extract32(opcode, shift1, nb1) << nb2 | \
|
||||
extract32(opcode, shift2, nb2); \
|
||||
}
|
||||
|
||||
#define EXTRACT_HELPER_SPLIT_3(name, \
|
||||
@ -124,7 +124,7 @@ EXTRACT_SHELPER(SIMM, 0, 16);
|
||||
/* 16 bits unsigned immediate value */
|
||||
EXTRACT_HELPER(UIMM, 0, 16);
|
||||
/* 5 bits signed immediate value */
|
||||
EXTRACT_HELPER(SIMM5, 16, 5);
|
||||
EXTRACT_SHELPER(SIMM5, 16, 5);
|
||||
/* 5 bits signed immediate value */
|
||||
EXTRACT_HELPER(UIMM5, 16, 5);
|
||||
/* 4 bits unsigned immediate value */
|
||||
@ -204,17 +204,6 @@ EXTRACT_HELPER(IMM8, 11, 8);
|
||||
EXTRACT_HELPER(DCMX, 16, 7);
|
||||
EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6);
|
||||
|
||||
typedef union _ppc_vsr_t {
|
||||
uint8_t u8[16];
|
||||
uint16_t u16[8];
|
||||
uint32_t u32[4];
|
||||
uint64_t u64[2];
|
||||
float32 f32[4];
|
||||
float64 f64[2];
|
||||
float128 f128;
|
||||
Int128 s128;
|
||||
} ppc_vsr_t;
|
||||
|
||||
#if defined(HOST_WORDS_BIGENDIAN)
|
||||
#define VsrB(i) u8[i]
|
||||
#define VsrH(i) u16[i]
|
||||
@ -229,24 +218,14 @@ typedef union _ppc_vsr_t {
|
||||
|
||||
static inline void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
|
||||
{
|
||||
if (n < 32) {
|
||||
vsr->VsrD(0) = env->fpr[n];
|
||||
vsr->VsrD(1) = env->vsr[n];
|
||||
} else {
|
||||
vsr->u64[0] = env->avr[n - 32].u64[0];
|
||||
vsr->u64[1] = env->avr[n - 32].u64[1];
|
||||
}
|
||||
vsr->VsrD(0) = env->vsr[n].u64[0];
|
||||
vsr->VsrD(1) = env->vsr[n].u64[1];
|
||||
}
|
||||
|
||||
static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
|
||||
{
|
||||
if (n < 32) {
|
||||
env->fpr[n] = vsr->VsrD(0);
|
||||
env->vsr[n] = vsr->VsrD(1);
|
||||
} else {
|
||||
env->avr[n - 32].u64[0] = vsr->u64[0];
|
||||
env->avr[n - 32].u64[1] = vsr->u64[1];
|
||||
}
|
||||
env->vsr[n].u64[0] = vsr->VsrD(0);
|
||||
env->vsr[n].u64[1] = vsr->VsrD(1);
|
||||
}
|
||||
|
||||
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
|
||||
|
@ -629,13 +629,15 @@ static int kvm_put_fp(CPUState *cs)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t vsr[2];
|
||||
uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
|
||||
uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
vsr[0] = float64_val(env->fpr[i]);
|
||||
vsr[1] = env->vsr[i];
|
||||
vsr[0] = float64_val(*fpr);
|
||||
vsr[1] = *vsrl;
|
||||
#else
|
||||
vsr[0] = env->vsr[i];
|
||||
vsr[1] = float64_val(env->fpr[i]);
|
||||
vsr[0] = *vsrl;
|
||||
vsr[1] = float64_val(*fpr);
|
||||
#endif
|
||||
reg.addr = (uintptr_t) &vsr;
|
||||
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
|
||||
@ -660,7 +662,7 @@ static int kvm_put_fp(CPUState *cs)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
reg.id = KVM_REG_PPC_VR(i);
|
||||
reg.addr = (uintptr_t)&env->avr[i];
|
||||
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret < 0) {
|
||||
DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
|
||||
@ -696,6 +698,8 @@ static int kvm_get_fp(CPUState *cs)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
uint64_t vsr[2];
|
||||
uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
|
||||
uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
|
||||
|
||||
reg.addr = (uintptr_t) &vsr;
|
||||
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
|
||||
@ -707,14 +711,14 @@ static int kvm_get_fp(CPUState *cs)
|
||||
return ret;
|
||||
} else {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
env->fpr[i] = vsr[0];
|
||||
*fpr = vsr[0];
|
||||
if (vsx) {
|
||||
env->vsr[i] = vsr[1];
|
||||
*vsrl = vsr[1];
|
||||
}
|
||||
#else
|
||||
env->fpr[i] = vsr[1];
|
||||
*fpr = vsr[1];
|
||||
if (vsx) {
|
||||
env->vsr[i] = vsr[0];
|
||||
*vsrl = vsr[0];
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -732,7 +736,7 @@ static int kvm_get_fp(CPUState *cs)
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
reg.id = KVM_REG_PPC_VR(i);
|
||||
reg.addr = (uintptr_t)&env->avr[i];
|
||||
reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
|
||||
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret < 0) {
|
||||
DPRINTF("Unable to get VR%d from KVM: %s\n",
|
||||
|
@ -45,7 +45,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||
uint64_t l;
|
||||
} u;
|
||||
u.l = qemu_get_be64(f);
|
||||
env->fpr[i] = u.d;
|
||||
*cpu_fpr_ptr(env, i) = u.d;
|
||||
}
|
||||
qemu_get_be32s(f, &fpscr);
|
||||
env->fpscr = fpscr;
|
||||
@ -138,11 +138,73 @@ static const VMStateInfo vmstate_info_avr = {
|
||||
};
|
||||
|
||||
#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
|
||||
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t)
|
||||
VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
|
||||
|
||||
#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
|
||||
VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
|
||||
|
||||
static int get_fpr(QEMUFile *f, void *pv, size_t size,
|
||||
const VMStateField *field)
|
||||
{
|
||||
ppc_vsr_t *v = pv;
|
||||
|
||||
v->u64[0] = qemu_get_be64(f);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int put_fpr(QEMUFile *f, void *pv, size_t size,
|
||||
const VMStateField *field, QJSON *vmdesc)
|
||||
{
|
||||
ppc_vsr_t *v = pv;
|
||||
|
||||
qemu_put_be64(f, v->u64[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_info_fpr = {
|
||||
.name = "fpr",
|
||||
.get = get_fpr,
|
||||
.put = put_fpr,
|
||||
};
|
||||
|
||||
#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
|
||||
VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
|
||||
|
||||
#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
|
||||
VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
|
||||
|
||||
static int get_vsr(QEMUFile *f, void *pv, size_t size,
|
||||
const VMStateField *field)
|
||||
{
|
||||
ppc_vsr_t *v = pv;
|
||||
|
||||
v->u64[1] = qemu_get_be64(f);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int put_vsr(QEMUFile *f, void *pv, size_t size,
|
||||
const VMStateField *field, QJSON *vmdesc)
|
||||
{
|
||||
ppc_vsr_t *v = pv;
|
||||
|
||||
qemu_put_be64(f, v->u64[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const VMStateInfo vmstate_info_vsr = {
|
||||
.name = "vsr",
|
||||
.get = get_vsr,
|
||||
.put = put_vsr,
|
||||
};
|
||||
|
||||
#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \
|
||||
VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
|
||||
|
||||
#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
|
||||
VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
|
||||
|
||||
static bool cpu_pre_2_8_migration(void *opaque, int version_id)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
@ -354,7 +416,7 @@ static const VMStateDescription vmstate_fpu = {
|
||||
.minimum_version_id = 1,
|
||||
.needed = fpu_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_FLOAT64_ARRAY(env.fpr, PowerPCCPU, 32),
|
||||
VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
|
||||
VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
@ -373,7 +435,7 @@ static const VMStateDescription vmstate_altivec = {
|
||||
.minimum_version_id = 1,
|
||||
.needed = altivec_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_AVR_ARRAY(env.avr, PowerPCCPU, 32),
|
||||
VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
|
||||
VMSTATE_UINT32(env.vscr, PowerPCCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
@ -392,7 +454,7 @@ static const VMStateDescription vmstate_vsx = {
|
||||
.minimum_version_id = 1,
|
||||
.needed = vsx_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT64_ARRAY(env.vsr, PowerPCCPU, 32),
|
||||
VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
@ -123,8 +123,8 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
|
||||
|
||||
/* Floating point registers */
|
||||
if ((qemu_tolower(name[0]) == 'f') &&
|
||||
ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->fpr), ®num)) {
|
||||
*pval = env->fpr[regnum];
|
||||
ppc_cpu_get_reg_num(name + 1, 32, ®num)) {
|
||||
*pval = *cpu_fpr_ptr(env, regnum);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,15 +55,9 @@
|
||||
/* global register indexes */
|
||||
static char cpu_reg_names[10*3 + 22*4 /* GPR */
|
||||
+ 10*4 + 22*5 /* SPE GPRh */
|
||||
+ 10*4 + 22*5 /* FPR */
|
||||
+ 2*(10*6 + 22*7) /* AVRh, AVRl */
|
||||
+ 10*5 + 22*6 /* VSR */
|
||||
+ 8*5 /* CRF */];
|
||||
static TCGv cpu_gpr[32];
|
||||
static TCGv cpu_gprh[32];
|
||||
static TCGv_i64 cpu_fpr[32];
|
||||
static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
|
||||
static TCGv_i64 cpu_vsr[32];
|
||||
static TCGv_i32 cpu_crf[8];
|
||||
static TCGv cpu_nip;
|
||||
static TCGv cpu_msr;
|
||||
@ -108,39 +102,6 @@ void ppc_translate_init(void)
|
||||
offsetof(CPUPPCState, gprh[i]), p);
|
||||
p += (i < 10) ? 4 : 5;
|
||||
cpu_reg_names_size -= (i < 10) ? 4 : 5;
|
||||
|
||||
snprintf(p, cpu_reg_names_size, "fp%d", i);
|
||||
cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, fpr[i]), p);
|
||||
p += (i < 10) ? 4 : 5;
|
||||
cpu_reg_names_size -= (i < 10) ? 4 : 5;
|
||||
|
||||
snprintf(p, cpu_reg_names_size, "avr%dH", i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[0]), p);
|
||||
#else
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[1]), p);
|
||||
#endif
|
||||
p += (i < 10) ? 6 : 7;
|
||||
cpu_reg_names_size -= (i < 10) ? 6 : 7;
|
||||
|
||||
snprintf(p, cpu_reg_names_size, "avr%dL", i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[1]), p);
|
||||
#else
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[0]), p);
|
||||
#endif
|
||||
p += (i < 10) ? 6 : 7;
|
||||
cpu_reg_names_size -= (i < 10) ? 6 : 7;
|
||||
snprintf(p, cpu_reg_names_size, "vsr%d", i);
|
||||
cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, vsr[i]), p);
|
||||
p += (i < 10) ? 5 : 6;
|
||||
cpu_reg_names_size -= (i < 10) ? 5 : 6;
|
||||
}
|
||||
|
||||
cpu_nip = tcg_global_mem_new(cpu_env,
|
||||
@ -6699,6 +6660,38 @@ static inline void gen_##name(DisasContext *ctx) \
|
||||
GEN_TM_PRIV_NOOP(treclaim);
|
||||
GEN_TM_PRIV_NOOP(trechkpt);
|
||||
|
||||
static inline void get_fpr(TCGv_i64 dst, int regno)
|
||||
{
|
||||
tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState, vsr[regno].u64[0]));
|
||||
}
|
||||
|
||||
static inline void set_fpr(int regno, TCGv_i64 src)
|
||||
{
|
||||
tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState, vsr[regno].u64[0]));
|
||||
}
|
||||
|
||||
static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
|
||||
{
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState,
|
||||
vsr[32 + regno].u64[(high ? 0 : 1)]));
|
||||
#else
|
||||
tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState,
|
||||
vsr[32 + regno].u64[(high ? 1 : 0)]));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void set_avr64(int regno, TCGv_i64 src, bool high)
|
||||
{
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState,
|
||||
vsr[32 + regno].u64[(high ? 0 : 1)]));
|
||||
#else
|
||||
tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState,
|
||||
vsr[32 + regno].u64[(high ? 1 : 0)]));
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "translate/fp-impl.inc.c"
|
||||
|
||||
#include "translate/vmx-impl.inc.c"
|
||||
@ -7447,7 +7440,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
|
||||
if ((i & (RFPL - 1)) == 0) {
|
||||
cpu_fprintf(f, "FPR%02d", i);
|
||||
}
|
||||
cpu_fprintf(f, " %016" PRIx64, *((uint64_t *)&env->fpr[i]));
|
||||
cpu_fprintf(f, " %016" PRIx64, *cpu_fpr_ptr(env, i));
|
||||
if ((i & (RFPL - 1)) == (RFPL - 1)) {
|
||||
cpu_fprintf(f, "\n");
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
static inline TCGv_ptr gen_fprp_ptr(int reg)
|
||||
{
|
||||
TCGv_ptr r = tcg_temp_new_ptr();
|
||||
tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, fpr[reg]));
|
||||
tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
|
||||
return r;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -10,60 +10,79 @@
|
||||
static inline TCGv_ptr gen_avr_ptr(int reg)
|
||||
{
|
||||
TCGv_ptr r = tcg_temp_new_ptr();
|
||||
tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]));
|
||||
tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[32 + reg].u64[0]));
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long avr64_offset(int reg, bool high)
|
||||
{
|
||||
return offsetof(CPUPPCState, vsr[32 + reg].u64[(high ? 0 : 1)]);
|
||||
}
|
||||
|
||||
#define GEN_VR_LDX(name, opc2, opc3) \
|
||||
static void glue(gen_, name)(DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv EA; \
|
||||
TCGv_i64 avr; \
|
||||
if (unlikely(!ctx->altivec_enabled)) { \
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_set_access_type(ctx, ACCESS_INT); \
|
||||
avr = tcg_temp_new_i64(); \
|
||||
EA = tcg_temp_new(); \
|
||||
gen_addr_reg_index(ctx, EA); \
|
||||
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
||||
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
|
||||
necessary 64-bit byteswap already. */ \
|
||||
if (ctx->le_mode) { \
|
||||
gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_ld64_i64(ctx, avr, EA); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_ld64_i64(ctx, avr, EA); \
|
||||
set_avr64(rD(ctx->opcode), avr, true); \
|
||||
} else { \
|
||||
gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_ld64_i64(ctx, avr, EA); \
|
||||
set_avr64(rD(ctx->opcode), avr, true); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
gen_qemu_ld64_i64(ctx, avr, EA); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
} \
|
||||
tcg_temp_free(EA); \
|
||||
tcg_temp_free_i64(avr); \
|
||||
}
|
||||
|
||||
#define GEN_VR_STX(name, opc2, opc3) \
|
||||
static void gen_st##name(DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv EA; \
|
||||
TCGv_i64 avr; \
|
||||
if (unlikely(!ctx->altivec_enabled)) { \
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU); \
|
||||
return; \
|
||||
} \
|
||||
gen_set_access_type(ctx, ACCESS_INT); \
|
||||
avr = tcg_temp_new_i64(); \
|
||||
EA = tcg_temp_new(); \
|
||||
gen_addr_reg_index(ctx, EA); \
|
||||
tcg_gen_andi_tl(EA, EA, ~0xf); \
|
||||
/* We only need to swap high and low halves. gen_qemu_st64_i64 does \
|
||||
necessary 64-bit byteswap already. */ \
|
||||
if (ctx->le_mode) { \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
get_avr64(avr, rD(ctx->opcode), false); \
|
||||
gen_qemu_st64_i64(ctx, avr, EA); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
get_avr64(avr, rD(ctx->opcode), true); \
|
||||
gen_qemu_st64_i64(ctx, avr, EA); \
|
||||
} else { \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
|
||||
get_avr64(avr, rD(ctx->opcode), true); \
|
||||
gen_qemu_st64_i64(ctx, avr, EA); \
|
||||
tcg_gen_addi_tl(EA, EA, 8); \
|
||||
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
|
||||
get_avr64(avr, rD(ctx->opcode), false); \
|
||||
gen_qemu_st64_i64(ctx, avr, EA); \
|
||||
} \
|
||||
tcg_temp_free(EA); \
|
||||
tcg_temp_free_i64(avr); \
|
||||
}
|
||||
|
||||
#define GEN_VR_LVE(name, opc2, opc3, size) \
|
||||
@ -159,15 +178,20 @@ static void gen_lvsr(DisasContext *ctx)
|
||||
static void gen_mfvscr(DisasContext *ctx)
|
||||
{
|
||||
TCGv_i32 t;
|
||||
TCGv_i64 avr;
|
||||
if (unlikely(!ctx->altivec_enabled)) {
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU);
|
||||
return;
|
||||
}
|
||||
tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0);
|
||||
avr = tcg_temp_new_i64();
|
||||
tcg_gen_movi_i64(avr, 0);
|
||||
set_avr64(rD(ctx->opcode), avr, true);
|
||||
t = tcg_temp_new_i32();
|
||||
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, vscr));
|
||||
tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t);
|
||||
tcg_gen_extu_i32_i64(avr, t);
|
||||
set_avr64(rD(ctx->opcode), avr, false);
|
||||
tcg_temp_free_i32(t);
|
||||
tcg_temp_free_i64(avr);
|
||||
}
|
||||
|
||||
static void gen_mtvscr(DisasContext *ctx)
|
||||
@ -185,9 +209,10 @@ static void gen_mtvscr(DisasContext *ctx)
|
||||
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
|
||||
static void glue(gen_, name)(DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv_i64 t0 = tcg_temp_new_i64(); \
|
||||
TCGv_i64 t1 = tcg_temp_new_i64(); \
|
||||
TCGv_i64 t2 = tcg_temp_new_i64(); \
|
||||
TCGv_i64 t0; \
|
||||
TCGv_i64 t1; \
|
||||
TCGv_i64 t2; \
|
||||
TCGv_i64 avr; \
|
||||
TCGv_i64 ten, z; \
|
||||
\
|
||||
if (unlikely(!ctx->altivec_enabled)) { \
|
||||
@ -195,30 +220,43 @@ static void glue(gen_, name)(DisasContext *ctx) \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
t0 = tcg_temp_new_i64(); \
|
||||
t1 = tcg_temp_new_i64(); \
|
||||
t2 = tcg_temp_new_i64(); \
|
||||
avr = tcg_temp_new_i64(); \
|
||||
ten = tcg_const_i64(10); \
|
||||
z = tcg_const_i64(0); \
|
||||
\
|
||||
if (add_cin) { \
|
||||
tcg_gen_mulu2_i64(t0, t1, cpu_avrl[rA(ctx->opcode)], ten); \
|
||||
tcg_gen_andi_i64(t2, cpu_avrl[rB(ctx->opcode)], 0xF); \
|
||||
tcg_gen_add2_i64(cpu_avrl[rD(ctx->opcode)], t2, t0, t1, t2, z); \
|
||||
get_avr64(avr, rA(ctx->opcode), false); \
|
||||
tcg_gen_mulu2_i64(t0, t1, avr, ten); \
|
||||
get_avr64(avr, rB(ctx->opcode), false); \
|
||||
tcg_gen_andi_i64(t2, avr, 0xF); \
|
||||
tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
} else { \
|
||||
tcg_gen_mulu2_i64(cpu_avrl[rD(ctx->opcode)], t2, \
|
||||
cpu_avrl[rA(ctx->opcode)], ten); \
|
||||
get_avr64(avr, rA(ctx->opcode), false); \
|
||||
tcg_gen_mulu2_i64(avr, t2, avr, ten); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
} \
|
||||
\
|
||||
if (ret_carry) { \
|
||||
tcg_gen_mulu2_i64(t0, t1, cpu_avrh[rA(ctx->opcode)], ten); \
|
||||
tcg_gen_add2_i64(t0, cpu_avrl[rD(ctx->opcode)], t0, t1, t2, z); \
|
||||
tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0); \
|
||||
get_avr64(avr, rA(ctx->opcode), true); \
|
||||
tcg_gen_mulu2_i64(t0, t1, avr, ten); \
|
||||
tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
set_avr64(rD(ctx->opcode), z, true); \
|
||||
} else { \
|
||||
tcg_gen_mul_i64(t0, cpu_avrh[rA(ctx->opcode)], ten); \
|
||||
tcg_gen_add_i64(cpu_avrh[rD(ctx->opcode)], t0, t2); \
|
||||
get_avr64(avr, rA(ctx->opcode), true); \
|
||||
tcg_gen_mul_i64(t0, avr, ten); \
|
||||
tcg_gen_add_i64(avr, t0, t2); \
|
||||
set_avr64(rD(ctx->opcode), avr, true); \
|
||||
} \
|
||||
\
|
||||
tcg_temp_free_i64(t0); \
|
||||
tcg_temp_free_i64(t1); \
|
||||
tcg_temp_free_i64(t2); \
|
||||
tcg_temp_free_i64(avr); \
|
||||
tcg_temp_free_i64(ten); \
|
||||
tcg_temp_free_i64(z); \
|
||||
} \
|
||||
@ -232,12 +270,31 @@ GEN_VX_VMUL10(vmul10ecuq, 1, 1);
|
||||
#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
|
||||
static void glue(gen_, name)(DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv_i64 t0; \
|
||||
TCGv_i64 t1; \
|
||||
TCGv_i64 avr; \
|
||||
\
|
||||
if (unlikely(!ctx->altivec_enabled)) { \
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU); \
|
||||
return; \
|
||||
} \
|
||||
tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \
|
||||
tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \
|
||||
t0 = tcg_temp_new_i64(); \
|
||||
t1 = tcg_temp_new_i64(); \
|
||||
avr = tcg_temp_new_i64(); \
|
||||
\
|
||||
get_avr64(t0, rA(ctx->opcode), true); \
|
||||
get_avr64(t1, rB(ctx->opcode), true); \
|
||||
tcg_op(avr, t0, t1); \
|
||||
set_avr64(rD(ctx->opcode), avr, true); \
|
||||
\
|
||||
get_avr64(t0, rA(ctx->opcode), false); \
|
||||
get_avr64(t1, rB(ctx->opcode), false); \
|
||||
tcg_op(avr, t0, t1); \
|
||||
set_avr64(rD(ctx->opcode), avr, false); \
|
||||
\
|
||||
tcg_temp_free_i64(t0); \
|
||||
tcg_temp_free_i64(t1); \
|
||||
tcg_temp_free_i64(avr); \
|
||||
}
|
||||
|
||||
GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16);
|
||||
@ -406,6 +463,7 @@ GEN_VXFORM(vmrglw, 6, 6);
|
||||
static void gen_vmrgew(DisasContext *ctx)
|
||||
{
|
||||
TCGv_i64 tmp;
|
||||
TCGv_i64 avr;
|
||||
int VT, VA, VB;
|
||||
if (unlikely(!ctx->altivec_enabled)) {
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU);
|
||||
@ -415,15 +473,28 @@ static void gen_vmrgew(DisasContext *ctx)
|
||||
VA = rA(ctx->opcode);
|
||||
VB = rB(ctx->opcode);
|
||||
tmp = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(tmp, cpu_avrh[VB], 32);
|
||||
tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VA], tmp, 0, 32);
|
||||
tcg_gen_shri_i64(tmp, cpu_avrl[VB], 32);
|
||||
tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VA], tmp, 0, 32);
|
||||
avr = tcg_temp_new_i64();
|
||||
|
||||
get_avr64(avr, VB, true);
|
||||
tcg_gen_shri_i64(tmp, avr, 32);
|
||||
get_avr64(avr, VA, true);
|
||||
tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
|
||||
set_avr64(VT, avr, true);
|
||||
|
||||
get_avr64(avr, VB, false);
|
||||
tcg_gen_shri_i64(tmp, avr, 32);
|
||||
get_avr64(avr, VA, false);
|
||||
tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
|
||||
set_avr64(VT, avr, false);
|
||||
|
||||
tcg_temp_free_i64(tmp);
|
||||
tcg_temp_free_i64(avr);
|
||||
}
|
||||
|
||||
static void gen_vmrgow(DisasContext *ctx)
|
||||
{
|
||||
TCGv_i64 t0, t1;
|
||||
TCGv_i64 avr;
|
||||
int VT, VA, VB;
|
||||
if (unlikely(!ctx->altivec_enabled)) {
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU);
|
||||
@ -432,9 +503,23 @@ static void gen_vmrgow(DisasContext *ctx)
|
||||
VT = rD(ctx->opcode);
|
||||
VA = rA(ctx->opcode);
|
||||
VB = rB(ctx->opcode);
|
||||
t0 = tcg_temp_new_i64();
|
||||
t1 = tcg_temp_new_i64();
|
||||
avr = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VB], cpu_avrh[VA], 32, 32);
|
||||
tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VB], cpu_avrl[VA], 32, 32);
|
||||
get_avr64(t0, VB, true);
|
||||
get_avr64(t1, VA, true);
|
||||
tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
|
||||
set_avr64(VT, avr, true);
|
||||
|
||||
get_avr64(t0, VB, false);
|
||||
get_avr64(t1, VA, false);
|
||||
tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
|
||||
set_avr64(VT, avr, false);
|
||||
|
||||
tcg_temp_free_i64(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
tcg_temp_free_i64(avr);
|
||||
}
|
||||
|
||||
GEN_VXFORM(vmuloub, 4, 0);
|
||||
@ -790,7 +875,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv_ptr rb, rd; \
|
||||
uint8_t uimm = UIMM4(ctx->opcode); \
|
||||
TCGv_i32 t0 = tcg_temp_new_i32(); \
|
||||
TCGv_i32 t0; \
|
||||
if (unlikely(!ctx->altivec_enabled)) { \
|
||||
gen_exception(ctx, POWERPC_EXCP_VPU); \
|
||||
return; \
|
||||
@ -798,6 +883,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
|
||||
if (uimm > splat_max) { \
|
||||
uimm = 0; \
|
||||
} \
|
||||
t0 = tcg_temp_new_i32(); \
|
||||
tcg_gen_movi_i32(t0, uimm); \
|
||||
rb = gen_avr_ptr(rB(ctx->opcode)); \
|
||||
rd = gen_avr_ptr(rD(ctx->opcode)); \
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9486,7 +9486,7 @@ static bool avr_need_swap(CPUPPCState *env)
|
||||
static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
stfq_p(mem_buf, env->fpr[n]);
|
||||
stfq_p(mem_buf, *cpu_fpr_ptr(env, n));
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
return 8;
|
||||
}
|
||||
@ -9502,7 +9502,7 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
env->fpr[n] = ldfq_p(mem_buf);
|
||||
*cpu_fpr_ptr(env, n) = ldfq_p(mem_buf);
|
||||
return 8;
|
||||
}
|
||||
if (n == 32) {
|
||||
@ -9516,12 +9516,13 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_avr_t *avr = cpu_avr_ptr(env, n);
|
||||
if (!avr_need_swap(env)) {
|
||||
stq_p(mem_buf, env->avr[n].u64[0]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[1]);
|
||||
stq_p(mem_buf, avr->u64[0]);
|
||||
stq_p(mem_buf + 8, avr->u64[1]);
|
||||
} else {
|
||||
stq_p(mem_buf, env->avr[n].u64[1]);
|
||||
stq_p(mem_buf+8, env->avr[n].u64[0]);
|
||||
stq_p(mem_buf, avr->u64[1]);
|
||||
stq_p(mem_buf + 8, avr->u64[0]);
|
||||
}
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
ppc_maybe_bswap_register(env, mem_buf + 8, 8);
|
||||
@ -9543,14 +9544,15 @@ static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_avr_t *avr = cpu_avr_ptr(env, n);
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
ppc_maybe_bswap_register(env, mem_buf + 8, 8);
|
||||
if (!avr_need_swap(env)) {
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf+8);
|
||||
avr->u64[0] = ldq_p(mem_buf);
|
||||
avr->u64[1] = ldq_p(mem_buf + 8);
|
||||
} else {
|
||||
env->avr[n].u64[1] = ldq_p(mem_buf);
|
||||
env->avr[n].u64[0] = ldq_p(mem_buf+8);
|
||||
avr->u64[1] = ldq_p(mem_buf);
|
||||
avr->u64[0] = ldq_p(mem_buf + 8);
|
||||
}
|
||||
return 16;
|
||||
}
|
||||
@ -9623,7 +9625,7 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
static int gdb_get_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
stq_p(mem_buf, env->vsr[n]);
|
||||
stq_p(mem_buf, *cpu_vsrl_ptr(env, n));
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
return 8;
|
||||
}
|
||||
@ -9634,7 +9636,7 @@ static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
{
|
||||
if (n < 32) {
|
||||
ppc_maybe_bswap_register(env, mem_buf, 8);
|
||||
env->vsr[n] = ldq_p(mem_buf);
|
||||
*cpu_vsrl_ptr(env, n) = ldq_p(mem_buf);
|
||||
return 8;
|
||||
}
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user