ppc patch queue 2017-06-30
* More DRC cleanups, these now actually fix a few bugs * Properly implements the openpic timers (they now count and generate interrupts) * Fixes for XICS migration * Fixes for migration of POWER9 RPT guests * The last of the compatibility mode rework -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJZVh9ZAAoJEGw4ysog2bOSZiEP+gLCJ6gToBdBjhoUCBznKOhJ 7IOBqI7LMdZfNXm03PtlETnrjj7556Mmbf5fGXX9wKeaMTNb+qFnzTb5o4T28lpG 38Qy+1H8Z9rFVLpEqdtPA+DBk8c4ulMXSNFQZMhqUFnn91g5zAU9dHGU3Snzr4Xr 6AqkBnMY9sN35n7Mk0hgiaKY7izs/pOfvJTq0Er1BRRjdaQ5fW+uwPxrtAYcBQry rA8XKAaUQiLyMTGOzIJ7ASz9SbwX3ovNmflD1cE0Melq4oezRleynEqX8ZTMpf4K GB2I2sGZDMECjRt9p+nMjVXFlYi4lh/u8QXicIxPR1ujLW+Wt1YijpzDdwJxiIWN f3kG0gTN7h1eqabjOnNYIHyhLrji6MrrvQshgTzImmuUBgvB74MS46Q20A4+UEM9 +3meO5A4nrmj3EBH7HNSwdNZ/YzM+gGBBXKsWU05hEkHes/oW8xgBysljEv+KXzP IxNLnGV48D/m1FgZS7asPk7ihA93t7aYRBY3BDo9EjokprDv8qY++Zrynf71W3t8 2L0vbICRHQMOc/vxV5oS8YZkqR8rgvrkSXZ69WMzT38wvC33+khp1SUU04AbvsxU 059tlCz1vRcKBkVlHsi5u7bG96HVv8vdazMoPMDNHGflKZxZYWUPFeQ/B6YxvjkU zdvDdOvbg0eNwbB4mqBo =Zdw4 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.10-20170630' into staging ppc patch queue 2017-06-30 * More DRC cleanups, these now actually fix a few bugs * Properly implements the openpic timers (they now count and generate interrupts) * Fixes for XICS migration * Fixes for migration of POWER9 RPT guests * The last of the compatibility mode rework # gpg: Signature made Fri 30 Jun 2017 10:52:25 BST # gpg: using RSA key 0x6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.10-20170630: (21 commits) spapr: Clean up DRC set_isolation_state() path spapr: Clean up DRC set_allocation_state path spapr: Make DRC reset force DRC into known state spapr: Split DRC release from DRC detach spapr: Eliminate DRC 'signalled' state variable spapr: Start hotplugged PCI devices in ISOLATED state target-ppc: Enable open-pic timers to count and generate interrupts hw/ppc/spapr.c: consecutive 'spapr->patb_entry = 0' statements spapr: prevent QEMU crash when CPU realization fails target/ppc: Proper cleanup when ppc_cpu_realizefn fails spapr: fix migration of ICPState objects from/to older QEMU xics: directly register ICPState objects to vmstate target/ppc: Fix return value in tcg radix mmu fault handler target/ppc/excp_helper: Take BQL before calling cpu_interrupt() spapr: Fix migration of Radix guests spapr: Add a "no HPT" encoding to HTAB migration stream ppc: Rework CPU compatibility testing across migration pseries: Reset CPU compatibility mode pseries: Move CPU compatibility property to machine qapi: add explicit null to string input and output visitors ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
36f87b4513
@ -45,6 +45,7 @@
|
||||
#include "qemu/bitops.h"
|
||||
#include "qapi/qmp/qerror.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
//#define DEBUG_OPENPIC
|
||||
|
||||
@ -54,8 +55,10 @@ static const int debug_openpic = 1;
|
||||
static const int debug_openpic = 0;
|
||||
#endif
|
||||
|
||||
static int get_current_cpu(void);
|
||||
#define DPRINTF(fmt, ...) do { \
|
||||
if (debug_openpic) { \
|
||||
printf("Core%d: ", get_current_cpu()); \
|
||||
printf(fmt , ## __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
@ -246,9 +249,31 @@ typedef struct IRQSource {
|
||||
#define IDR_EP 0x80000000 /* external pin */
|
||||
#define IDR_CI 0x40000000 /* critical interrupt */
|
||||
|
||||
/* Convert between openpic clock ticks and nanosecs. In the hardware the clock
|
||||
frequency is driven by board inputs to the PIC which the PIC would then
|
||||
divide by 4 or 8. For now hard code to 25MZ.
|
||||
*/
|
||||
#define OPENPIC_TIMER_FREQ_MHZ 25
|
||||
#define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ)
|
||||
static inline uint64_t ns_to_ticks(uint64_t ns)
|
||||
{
|
||||
return ns / OPENPIC_TIMER_NS_PER_TICK;
|
||||
}
|
||||
static inline uint64_t ticks_to_ns(uint64_t ticks)
|
||||
{
|
||||
return ticks * OPENPIC_TIMER_NS_PER_TICK;
|
||||
}
|
||||
|
||||
typedef struct OpenPICTimer {
|
||||
uint32_t tccr; /* Global timer current count register */
|
||||
uint32_t tbcr; /* Global timer base count register */
|
||||
int n_IRQ;
|
||||
bool qemu_timer_active; /* Is the qemu_timer is running? */
|
||||
struct QEMUTimer *qemu_timer;
|
||||
struct OpenPICState *opp; /* Device timer is part of. */
|
||||
/* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last
|
||||
current_count written or read, only defined if qemu_timer_active. */
|
||||
uint64_t origin_time;
|
||||
} OpenPICTimer;
|
||||
|
||||
typedef struct OpenPICMSI {
|
||||
@ -795,6 +820,65 @@ static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled);
|
||||
|
||||
static void qemu_timer_cb(void *opaque)
|
||||
{
|
||||
OpenPICTimer *tmr = opaque;
|
||||
OpenPICState *opp = tmr->opp;
|
||||
uint32_t n_IRQ = tmr->n_IRQ;
|
||||
uint32_t val = tmr->tbcr & ~TBCR_CI;
|
||||
uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
|
||||
|
||||
DPRINTF("%s n_IRQ=%d\n", __func__, n_IRQ);
|
||||
/* Reload current count from base count and setup timer. */
|
||||
tmr->tccr = val | tog;
|
||||
openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
|
||||
/* Raise the interrupt. */
|
||||
opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ);
|
||||
openpic_set_irq(opp, n_IRQ, 1);
|
||||
openpic_set_irq(opp, n_IRQ, 0);
|
||||
}
|
||||
|
||||
/* If enabled is true, arranges for an interrupt to be raised val clocks into
|
||||
the future, if enabled is false cancels the timer. */
|
||||
static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled)
|
||||
{
|
||||
uint64_t ns = ticks_to_ns(val & ~TCCR_TOG);
|
||||
/* A count of zero causes a timer to be set to expire immediately. This
|
||||
effectively stops the simulation since the timer is constantly expiring
|
||||
which prevents guest code execution, so we don't honor that
|
||||
configuration. On real hardware, this situation would generate an
|
||||
interrupt on every clock cycle if the interrupt was unmasked. */
|
||||
if ((ns == 0) || !enabled) {
|
||||
tmr->qemu_timer_active = false;
|
||||
tmr->tccr = tmr->tccr & TCCR_TOG;
|
||||
timer_del(tmr->qemu_timer); /* set timer to never expire. */
|
||||
} else {
|
||||
tmr->qemu_timer_active = true;
|
||||
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||
tmr->origin_time = now;
|
||||
timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns the currrent tccr value, i.e., timer value (in clocks) with
|
||||
appropriate TOG. */
|
||||
static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr)
|
||||
{
|
||||
uint64_t retval;
|
||||
if (!tmr->qemu_timer_active) {
|
||||
retval = tmr->tccr;
|
||||
} else {
|
||||
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
||||
uint64_t used = now - tmr->origin_time; /* nsecs */
|
||||
uint32_t used_ticks = (uint32_t)ns_to_ticks(used);
|
||||
uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks;
|
||||
retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG));
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
unsigned len)
|
||||
{
|
||||
@ -819,10 +903,15 @@ static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
|
||||
case 0x00: /* TCCR */
|
||||
break;
|
||||
case 0x10: /* TBCR */
|
||||
if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
|
||||
(val & TBCR_CI) == 0 &&
|
||||
(opp->timers[idx].tbcr & TBCR_CI) != 0) {
|
||||
opp->timers[idx].tccr &= ~TCCR_TOG;
|
||||
/* Did the enable status change? */
|
||||
if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) {
|
||||
/* Did "Count Inhibit" transition from 1 to 0? */
|
||||
if ((val & TBCR_CI) == 0) {
|
||||
opp->timers[idx].tccr = val & ~TCCR_TOG;
|
||||
}
|
||||
openpic_tmr_set_tmr(&opp->timers[idx],
|
||||
(val & ~TBCR_CI),
|
||||
/*enabled=*/((val & TBCR_CI) == 0));
|
||||
}
|
||||
opp->timers[idx].tbcr = val;
|
||||
break;
|
||||
@ -854,7 +943,7 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
|
||||
idx = (addr >> 6) & 0x3;
|
||||
switch (addr & 0x30) {
|
||||
case 0x00: /* TCCR */
|
||||
retval = opp->timers[idx].tccr;
|
||||
retval = openpic_tmr_get_timer(&opp->timers[idx]);
|
||||
break;
|
||||
case 0x10: /* TBCR */
|
||||
retval = opp->timers[idx].tbcr;
|
||||
@ -1136,7 +1225,10 @@ static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
|
||||
IRQ_resetbit(&dst->raised, irq);
|
||||
}
|
||||
|
||||
if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) {
|
||||
/* Timers and IPIs support multicast. */
|
||||
if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
|
||||
((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
|
||||
DPRINTF("irq is IPI or TMR\n");
|
||||
src->destmask &= ~(1 << cpu);
|
||||
if (src->destmask && !src->level) {
|
||||
/* trigger on CPUs that didn't know about it yet */
|
||||
@ -1341,6 +1433,10 @@ static void openpic_reset(DeviceState *d)
|
||||
for (i = 0; i < OPENPIC_MAX_TMR; i++) {
|
||||
opp->timers[i].tccr = 0;
|
||||
opp->timers[i].tbcr = TBCR_CI;
|
||||
if (opp->timers[i].qemu_timer_active) {
|
||||
timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */
|
||||
opp->timers[i].qemu_timer_active = false;
|
||||
}
|
||||
}
|
||||
/* Go out of RESET state */
|
||||
opp->gcr = 0;
|
||||
@ -1391,6 +1487,15 @@ static void fsl_common_init(OpenPICState *opp)
|
||||
opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
|
||||
opp->src[i].level = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < OPENPIC_MAX_TMR; i++) {
|
||||
opp->timers[i].n_IRQ = opp->irq_tim0 + i;
|
||||
opp->timers[i].qemu_timer_active = false;
|
||||
opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
&qemu_timer_cb,
|
||||
&opp->timers[i]);
|
||||
opp->timers[i].opp = opp;
|
||||
}
|
||||
}
|
||||
|
||||
static void map_list(OpenPICState *opp, const MemReg *list, int *count)
|
||||
|
@ -344,10 +344,14 @@ static void icp_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
|
||||
qemu_register_reset(icp_reset, dev);
|
||||
vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
|
||||
}
|
||||
|
||||
static void icp_unrealize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
ICPState *icp = ICP(dev);
|
||||
|
||||
vmstate_unregister(NULL, &vmstate_icp_server, icp);
|
||||
qemu_unregister_reset(icp_reset, dev);
|
||||
}
|
||||
|
||||
@ -355,7 +359,6 @@ static void icp_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->vmsd = &vmstate_icp_server;
|
||||
dc->realize = icp_realize;
|
||||
dc->unrealize = icp_unrealize;
|
||||
}
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include "hw/pci/pci_host.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "hw/boards.h"
|
||||
#include "hw/audio/soundhw.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/log.h"
|
||||
#include "hw/ide.h"
|
||||
@ -782,9 +781,6 @@ static void ibm_40p_init(MachineState *machine)
|
||||
qbus_walk_children(BUS(isa_bus), prep_set_cmos_checksum, NULL, NULL, NULL,
|
||||
&cmos_checksum);
|
||||
|
||||
/* initialize audio subsystem */
|
||||
soundhw_init();
|
||||
|
||||
/* add some more devices */
|
||||
if (defaults_enabled()) {
|
||||
isa_create_simple(isa_bus, "i8042");
|
||||
|
160
hw/ppc/spapr.c
160
hw/ppc/spapr.c
@ -127,9 +127,49 @@ error:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
|
||||
{
|
||||
/* Dummy entries correspond to unused ICPState objects in older QEMUs,
|
||||
* and newer QEMUs don't even have them. In both cases, we don't want
|
||||
* to send anything on the wire.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
|
||||
.name = "icp/server",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = pre_2_10_vmstate_dummy_icp_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UNUSED(4), /* uint32_t xirr */
|
||||
VMSTATE_UNUSED(1), /* uint8_t pending_priority */
|
||||
VMSTATE_UNUSED(1), /* uint8_t mfrr */
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
|
||||
static void pre_2_10_vmstate_register_dummy_icp(int i)
|
||||
{
|
||||
vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
|
||||
(void *)(uintptr_t) i);
|
||||
}
|
||||
|
||||
static void pre_2_10_vmstate_unregister_dummy_icp(int i)
|
||||
{
|
||||
vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
|
||||
(void *)(uintptr_t) i);
|
||||
}
|
||||
|
||||
static inline int xics_max_server_number(void)
|
||||
{
|
||||
return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads);
|
||||
}
|
||||
|
||||
static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (machine_kernel_irqchip_allowed(machine) &&
|
||||
@ -151,6 +191,17 @@ static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (smc->pre_2_10_has_unused_icps) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < xics_max_server_number(); i++) {
|
||||
/* Dummy entries get deregistered when real ICPState objects
|
||||
* are registered during CPU core hotplug.
|
||||
*/
|
||||
pre_2_10_vmstate_register_dummy_icp(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
||||
@ -979,7 +1030,6 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
|
||||
void *fdt;
|
||||
sPAPRPHBState *phb;
|
||||
char *buf;
|
||||
int smt = kvmppc_smt_threads();
|
||||
|
||||
fdt = g_malloc0(FDT_MAX_SIZE);
|
||||
_FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
|
||||
@ -1019,7 +1069,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
|
||||
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
|
||||
|
||||
/* /interrupt controller */
|
||||
spapr_dt_xics(DIV_ROUND_UP(max_cpus * smt, smp_threads), fdt, PHANDLE_XICP);
|
||||
spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
|
||||
|
||||
ret = spapr_populate_memory(spapr, fdt);
|
||||
if (ret < 0) {
|
||||
@ -1326,7 +1376,6 @@ static void ppc_spapr_reset(void)
|
||||
* Set the GR bit in PATB so that we know there is no HPT. */
|
||||
spapr->patb_entry = PATBE1_GR;
|
||||
} else {
|
||||
spapr->patb_entry = 0;
|
||||
spapr_setup_hpt_and_vrma(spapr);
|
||||
}
|
||||
|
||||
@ -1346,6 +1395,8 @@ static void ppc_spapr_reset(void)
|
||||
if (!spapr->cas_reboot) {
|
||||
spapr_ovec_cleanup(spapr->ov5_cas);
|
||||
spapr->ov5_cas = spapr_ovec_new();
|
||||
|
||||
ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal);
|
||||
}
|
||||
|
||||
fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size);
|
||||
@ -1443,6 +1494,18 @@ static int spapr_post_load(void *opaque, int version_id)
|
||||
err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
|
||||
}
|
||||
|
||||
if (spapr->patb_entry) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
|
||||
bool radix = !!(spapr->patb_entry & PATBE1_GR);
|
||||
bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
|
||||
|
||||
err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
|
||||
if (err) {
|
||||
error_report("Process table config unsupported by the host");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1558,13 +1621,19 @@ static int htab_save_setup(QEMUFile *f, void *opaque)
|
||||
sPAPRMachineState *spapr = opaque;
|
||||
|
||||
/* "Iteration" header */
|
||||
qemu_put_be32(f, spapr->htab_shift);
|
||||
if (!spapr->htab_shift) {
|
||||
qemu_put_be32(f, -1);
|
||||
} else {
|
||||
qemu_put_be32(f, spapr->htab_shift);
|
||||
}
|
||||
|
||||
if (spapr->htab) {
|
||||
spapr->htab_save_index = 0;
|
||||
spapr->htab_first_pass = true;
|
||||
} else {
|
||||
assert(kvm_enabled());
|
||||
if (spapr->htab_shift) {
|
||||
assert(kvm_enabled());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1710,7 +1779,12 @@ static int htab_save_iterate(QEMUFile *f, void *opaque)
|
||||
int rc = 0;
|
||||
|
||||
/* Iteration header */
|
||||
qemu_put_be32(f, 0);
|
||||
if (!spapr->htab_shift) {
|
||||
qemu_put_be32(f, -1);
|
||||
return 0;
|
||||
} else {
|
||||
qemu_put_be32(f, 0);
|
||||
}
|
||||
|
||||
if (!spapr->htab) {
|
||||
assert(kvm_enabled());
|
||||
@ -1744,7 +1818,12 @@ static int htab_save_complete(QEMUFile *f, void *opaque)
|
||||
int fd;
|
||||
|
||||
/* Iteration header */
|
||||
qemu_put_be32(f, 0);
|
||||
if (!spapr->htab_shift) {
|
||||
qemu_put_be32(f, -1);
|
||||
return 0;
|
||||
} else {
|
||||
qemu_put_be32(f, 0);
|
||||
}
|
||||
|
||||
if (!spapr->htab) {
|
||||
int rc;
|
||||
@ -1788,6 +1867,11 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
|
||||
|
||||
section_hdr = qemu_get_be32(f);
|
||||
|
||||
if (section_hdr == -1) {
|
||||
spapr_free_hpt(spapr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (section_hdr) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
@ -2131,7 +2215,7 @@ static void ppc_spapr_init(MachineState *machine)
|
||||
machine->cpu_model = kvm_enabled() ? "host" : smc->tcg_default_cpu;
|
||||
}
|
||||
|
||||
ppc_cpu_parse_features(machine->cpu_model);
|
||||
spapr_cpu_parse_features(spapr);
|
||||
|
||||
spapr_init_cpus(spapr);
|
||||
|
||||
@ -2503,6 +2587,10 @@ static void spapr_machine_initfn(Object *obj)
|
||||
" place of standard EPOW events when possible"
|
||||
" (required for memory hot-unplug support)",
|
||||
NULL);
|
||||
|
||||
ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr,
|
||||
"Maximum permitted CPU compatibility mode",
|
||||
&error_fatal);
|
||||
}
|
||||
|
||||
static void spapr_machine_finalizefn(Object *obj)
|
||||
@ -2548,12 +2636,6 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
|
||||
|
||||
spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
|
||||
addr += SPAPR_MEMORY_BLOCK_SIZE;
|
||||
if (!dev->hotplugged) {
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
/* guests expect coldplugged LMBs to be pre-allocated */
|
||||
drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
|
||||
drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
|
||||
}
|
||||
}
|
||||
/* send hotplug notification to the
|
||||
* guest only in case of hotplugged memory
|
||||
@ -2806,9 +2888,24 @@ static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
Error **errp)
|
||||
{
|
||||
MachineState *ms = MACHINE(qdev_get_machine());
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
|
||||
|
||||
if (smc->pre_2_10_has_unused_icps) {
|
||||
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
|
||||
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
|
||||
const char *typename = object_class_get_name(scc->cpu_class);
|
||||
size_t size = object_type_get_instance_size(typename);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
CPUState *cs = CPU(sc->threads + i * size);
|
||||
|
||||
pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
|
||||
}
|
||||
}
|
||||
|
||||
assert(core_slot);
|
||||
core_slot->cpu = NULL;
|
||||
object_unparent(OBJECT(dev));
|
||||
@ -2860,6 +2957,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
|
||||
MachineClass *mc = MACHINE_GET_CLASS(spapr);
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
||||
sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
|
||||
CPUCore *cc = CPU_CORE(dev);
|
||||
CPUState *cs = CPU(core->threads);
|
||||
@ -2905,17 +3003,23 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
* of hotplugged CPUs.
|
||||
*/
|
||||
spapr_hotplug_req_add_by_index(drc);
|
||||
} else {
|
||||
/*
|
||||
* Set the right DRC states for cold plugged CPU.
|
||||
*/
|
||||
if (drc) {
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
|
||||
drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
|
||||
}
|
||||
}
|
||||
core_slot->cpu = OBJECT(dev);
|
||||
|
||||
if (smc->pre_2_10_has_unused_icps) {
|
||||
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
|
||||
const char *typename = object_class_get_name(scc->cpu_class);
|
||||
size_t size = object_type_get_instance_size(typename);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cc->nr_threads; i++) {
|
||||
sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
|
||||
void *obj = sc->threads + i * size;
|
||||
|
||||
cs = CPU(obj);
|
||||
pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
||||
@ -3356,7 +3460,12 @@ DEFINE_SPAPR_MACHINE(2_10, "2.10", true);
|
||||
* pseries-2.9
|
||||
*/
|
||||
#define SPAPR_COMPAT_2_9 \
|
||||
HW_COMPAT_2_9
|
||||
HW_COMPAT_2_9 \
|
||||
{ \
|
||||
.driver = TYPE_POWERPC_CPU, \
|
||||
.property = "pre-2.10-migration", \
|
||||
.value = "on", \
|
||||
}, \
|
||||
|
||||
static void spapr_machine_2_9_instance_options(MachineState *machine)
|
||||
{
|
||||
@ -3365,9 +3474,12 @@ static void spapr_machine_2_9_instance_options(MachineState *machine)
|
||||
|
||||
static void spapr_machine_2_9_class_options(MachineClass *mc)
|
||||
{
|
||||
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
|
||||
|
||||
spapr_machine_2_10_class_options(mc);
|
||||
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9);
|
||||
mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
|
||||
smc->pre_2_10_has_unused_icps = true;
|
||||
}
|
||||
|
||||
DEFINE_SPAPR_MACHINE(2_9, "2.9", false);
|
||||
|
@ -20,6 +20,57 @@
|
||||
#include "sysemu/numa.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
void spapr_cpu_parse_features(sPAPRMachineState *spapr)
|
||||
{
|
||||
/*
|
||||
* Backwards compatibility hack:
|
||||
*
|
||||
* CPUs had a "compat=" property which didn't make sense for
|
||||
* anything except pseries. It was replaced by "max-cpu-compat"
|
||||
* machine option. This supports old command lines like
|
||||
* -cpu POWER8,compat=power7
|
||||
* By stripping the compat option and applying it to the machine
|
||||
* before passing it on to the cpu level parser.
|
||||
*/
|
||||
gchar **inpieces;
|
||||
int i, j;
|
||||
gchar *compat_str = NULL;
|
||||
|
||||
inpieces = g_strsplit(MACHINE(spapr)->cpu_model, ",", 0);
|
||||
|
||||
/* inpieces[0] is the actual model string */
|
||||
i = 1;
|
||||
j = 1;
|
||||
while (inpieces[i]) {
|
||||
if (g_str_has_prefix(inpieces[i], "compat=")) {
|
||||
/* in case of multiple compat= options */
|
||||
g_free(compat_str);
|
||||
compat_str = inpieces[i];
|
||||
} else {
|
||||
j++;
|
||||
}
|
||||
|
||||
i++;
|
||||
/* Excise compat options from list */
|
||||
inpieces[j] = inpieces[i];
|
||||
}
|
||||
|
||||
if (compat_str) {
|
||||
char *val = compat_str + strlen("compat=");
|
||||
gchar *newprops = g_strjoinv(",", inpieces);
|
||||
|
||||
object_property_set_str(OBJECT(spapr), val, "max-cpu-compat",
|
||||
&error_fatal);
|
||||
|
||||
ppc_cpu_parse_features(newprops);
|
||||
g_free(newprops);
|
||||
} else {
|
||||
ppc_cpu_parse_features(MACHINE(spapr)->cpu_model);
|
||||
}
|
||||
|
||||
g_strfreev(inpieces);
|
||||
}
|
||||
|
||||
static void spapr_cpu_reset(void *opaque)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
@ -67,16 +118,6 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
/* Enable PAPR mode in TCG or KVM */
|
||||
cpu_ppc_set_papr(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
||||
|
||||
if (cpu->max_compat) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
ppc_set_compat(cpu, cpu->max_compat, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_register_reset(spapr_cpu_reset, cpu);
|
||||
spapr_cpu_reset(cpu);
|
||||
}
|
||||
@ -137,7 +178,7 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp)
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
CPUState *cs = CPU(child);
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
Object *obj = NULL;
|
||||
Object *obj;
|
||||
|
||||
object_property_set_bool(child, true, "realized", &local_err);
|
||||
if (local_err) {
|
||||
@ -157,13 +198,14 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp)
|
||||
object_property_add_const_link(obj, ICP_PROP_CPU, child, &error_abort);
|
||||
object_property_set_bool(obj, true, "realized", &local_err);
|
||||
if (local_err) {
|
||||
goto error;
|
||||
goto free_icp;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
error:
|
||||
free_icp:
|
||||
object_unparent(obj);
|
||||
error:
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
|
||||
|
@ -46,30 +46,64 @@ uint32_t spapr_drc_index(sPAPRDRConnector *drc)
|
||||
| (drc->id & DRC_INDEX_ID_MASK);
|
||||
}
|
||||
|
||||
static uint32_t set_isolation_state(sPAPRDRConnector *drc,
|
||||
sPAPRDRIsolationState state)
|
||||
static uint32_t drc_isolate_physical(sPAPRDRConnector *drc)
|
||||
{
|
||||
trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state);
|
||||
|
||||
/* if the guest is configuring a device attached to this DRC, we
|
||||
* should reset the configuration state at this point since it may
|
||||
* no longer be reliable (guest released device and needs to start
|
||||
* over, or unplug occurred so the FDT is no longer valid)
|
||||
*/
|
||||
if (state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
|
||||
g_free(drc->ccs);
|
||||
drc->ccs = NULL;
|
||||
}
|
||||
g_free(drc->ccs);
|
||||
drc->ccs = NULL;
|
||||
|
||||
if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
|
||||
/* cannot unisolate a non-existent resource, and, or resources
|
||||
* which are in an 'UNUSABLE' allocation state. (PAPR 2.7, 13.5.3.5)
|
||||
*/
|
||||
if (!drc->dev ||
|
||||
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
|
||||
|
||||
/* if we're awaiting release, but still in an unconfigured state,
|
||||
* it's likely the guest is still in the process of configuring
|
||||
* the device and is transitioning the devices to an ISOLATED
|
||||
* state as a part of that process. so we only complete the
|
||||
* removal when this transition happens for a device in a
|
||||
* configured state, as suggested by the state diagram from PAPR+
|
||||
* 2.7, 13.4
|
||||
*/
|
||||
if (drc->awaiting_release) {
|
||||
uint32_t drc_index = spapr_drc_index(drc);
|
||||
if (drc->configured) {
|
||||
trace_spapr_drc_set_isolation_state_finalizing(drc_index);
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
} else {
|
||||
trace_spapr_drc_set_isolation_state_deferring(drc_index);
|
||||
}
|
||||
}
|
||||
drc->configured = false;
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static uint32_t drc_unisolate_physical(sPAPRDRConnector *drc)
|
||||
{
|
||||
/* cannot unisolate a non-existent resource, and, or resources
|
||||
* which are in an 'UNUSABLE' allocation state. (PAPR 2.7,
|
||||
* 13.5.3.5)
|
||||
*/
|
||||
if (!drc->dev) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static uint32_t drc_isolate_logical(sPAPRDRConnector *drc)
|
||||
{
|
||||
/* if the guest is configuring a device attached to this DRC, we
|
||||
* should reset the configuration state at this point since it may
|
||||
* no longer be reliable (guest released device and needs to start
|
||||
* over, or unplug occurred so the FDT is no longer valid)
|
||||
*/
|
||||
g_free(drc->ccs);
|
||||
drc->ccs = NULL;
|
||||
|
||||
/*
|
||||
* Fail any requests to ISOLATE the LMB DRC if this LMB doesn't
|
||||
@ -81,66 +115,87 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
|
||||
* If the LMB being removed doesn't belong to a DIMM device that is
|
||||
* actually being unplugged, fail the isolation request here.
|
||||
*/
|
||||
if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB) {
|
||||
if ((state == SPAPR_DR_ISOLATION_STATE_ISOLATED) &&
|
||||
!drc->awaiting_release) {
|
||||
return RTAS_OUT_HW_ERROR;
|
||||
}
|
||||
if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB
|
||||
&& !drc->awaiting_release) {
|
||||
return RTAS_OUT_HW_ERROR;
|
||||
}
|
||||
|
||||
drc->isolation_state = state;
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
|
||||
|
||||
if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
|
||||
/* if we're awaiting release, but still in an unconfigured state,
|
||||
* it's likely the guest is still in the process of configuring
|
||||
* the device and is transitioning the devices to an ISOLATED
|
||||
* state as a part of that process. so we only complete the
|
||||
* removal when this transition happens for a device in a
|
||||
* configured state, as suggested by the state diagram from
|
||||
* PAPR+ 2.7, 13.4
|
||||
*/
|
||||
if (drc->awaiting_release) {
|
||||
uint32_t drc_index = spapr_drc_index(drc);
|
||||
if (drc->configured) {
|
||||
trace_spapr_drc_set_isolation_state_finalizing(drc_index);
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
} else {
|
||||
trace_spapr_drc_set_isolation_state_deferring(drc_index);
|
||||
}
|
||||
/* if we're awaiting release, but still in an unconfigured state,
|
||||
* it's likely the guest is still in the process of configuring
|
||||
* the device and is transitioning the devices to an ISOLATED
|
||||
* state as a part of that process. so we only complete the
|
||||
* removal when this transition happens for a device in a
|
||||
* configured state, as suggested by the state diagram from PAPR+
|
||||
* 2.7, 13.4
|
||||
*/
|
||||
if (drc->awaiting_release) {
|
||||
uint32_t drc_index = spapr_drc_index(drc);
|
||||
if (drc->configured) {
|
||||
trace_spapr_drc_set_isolation_state_finalizing(drc_index);
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
} else {
|
||||
trace_spapr_drc_set_isolation_state_deferring(drc_index);
|
||||
}
|
||||
drc->configured = false;
|
||||
}
|
||||
drc->configured = false;
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static uint32_t set_allocation_state(sPAPRDRConnector *drc,
|
||||
sPAPRDRAllocationState state)
|
||||
static uint32_t drc_unisolate_logical(sPAPRDRConnector *drc)
|
||||
{
|
||||
trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state);
|
||||
/* cannot unisolate a non-existent resource, and, or resources
|
||||
* which are in an 'UNUSABLE' allocation state. (PAPR 2.7,
|
||||
* 13.5.3.5)
|
||||
*/
|
||||
if (!drc->dev ||
|
||||
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
|
||||
if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
|
||||
/* if there's no resource/device associated with the DRC, there's
|
||||
* no way for us to put it in an allocation state consistent with
|
||||
* being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should
|
||||
* result in an RTAS return code of -3 / "no such indicator"
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static uint32_t drc_set_usable(sPAPRDRConnector *drc)
|
||||
{
|
||||
/* if there's no resource/device associated with the DRC, there's
|
||||
* no way for us to put it in an allocation state consistent with
|
||||
* being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should
|
||||
* result in an RTAS return code of -3 / "no such indicator"
|
||||
*/
|
||||
if (!drc->dev) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
if (drc->awaiting_release && drc->awaiting_allocation) {
|
||||
/* kernel is acknowledging a previous hotplug event
|
||||
* while we are already removing it.
|
||||
* it's safe to ignore awaiting_allocation here since we know the
|
||||
* situation is predicated on the guest either already having done
|
||||
* so (boot-time hotplug), or never being able to acquire in the
|
||||
* first place (hotplug followed by immediate unplug).
|
||||
*/
|
||||
if (!drc->dev) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
|
||||
drc->allocation_state = state;
|
||||
if (drc->awaiting_release &&
|
||||
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
|
||||
uint32_t drc_index = spapr_drc_index(drc);
|
||||
trace_spapr_drc_set_allocation_state_finalizing(drc_index);
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
} else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
|
||||
drc->awaiting_allocation = false;
|
||||
}
|
||||
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
|
||||
drc->awaiting_allocation = false;
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
static uint32_t drc_set_unusable(sPAPRDRConnector *drc)
|
||||
{
|
||||
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
|
||||
if (drc->awaiting_release) {
|
||||
uint32_t drc_index = spapr_drc_index(drc);
|
||||
trace_spapr_drc_set_allocation_state_finalizing(drc_index);
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
}
|
||||
|
||||
return RTAS_OUT_SUCCESS;
|
||||
}
|
||||
|
||||
@ -172,12 +227,6 @@ static const char *spapr_drc_name(sPAPRDRConnector *drc)
|
||||
return g_strdup_printf("%s%d", drck->drc_name_prefix, drc->id);
|
||||
}
|
||||
|
||||
/* has the guest been notified of device attachment? */
|
||||
static void set_signalled(sPAPRDRConnector *drc)
|
||||
{
|
||||
drc->signalled = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* dr-entity-sense sensor value
|
||||
* returned via get-sensor-state RTAS calls
|
||||
@ -304,33 +353,12 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
|
||||
}
|
||||
g_assert(fdt || coldplug);
|
||||
|
||||
/* NOTE: setting initial isolation state to UNISOLATED means we can't
|
||||
* detach unless guest has a userspace/kernel that moves this state
|
||||
* back to ISOLATED in response to an unplug event, or this is done
|
||||
* manually by the admin prior. if we force things while the guest
|
||||
* may be accessing the device, we can easily crash the guest, so we
|
||||
* we defer completion of removal in such cases to the reset() hook.
|
||||
*/
|
||||
if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) {
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
|
||||
}
|
||||
drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
|
||||
|
||||
drc->dev = d;
|
||||
drc->fdt = fdt;
|
||||
drc->fdt_start_offset = fdt_start_offset;
|
||||
drc->configured = coldplug;
|
||||
/* 'logical' DR resources such as memory/cpus are in some cases treated
|
||||
* as a pool of resources from which the guest is free to choose from
|
||||
* based on only a count. for resources that can be assigned in this
|
||||
* fashion, we must assume the resource is signalled immediately
|
||||
* since a single hotplug request might make an arbitrary number of
|
||||
* such attached resources available to the guest, as opposed to
|
||||
* 'physical' DR resources such as PCI where each device/resource is
|
||||
* signalled individually.
|
||||
*/
|
||||
drc->signalled = (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI)
|
||||
? true : coldplug;
|
||||
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
|
||||
drc->awaiting_allocation = true;
|
||||
@ -342,49 +370,8 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
|
||||
NULL, 0, NULL);
|
||||
}
|
||||
|
||||
void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
|
||||
static void spapr_drc_release(sPAPRDRConnector *drc)
|
||||
{
|
||||
trace_spapr_drc_detach(spapr_drc_index(drc));
|
||||
|
||||
/* if we've signalled device presence to the guest, or if the guest
|
||||
* has gone ahead and configured the device (via manually-executed
|
||||
* device add via drmgr in guest, namely), we need to wait
|
||||
* for the guest to quiesce the device before completing detach.
|
||||
* Otherwise, we can assume the guest hasn't seen it and complete the
|
||||
* detach immediately. Note that there is a small race window
|
||||
* just before, or during, configuration, which is this context
|
||||
* refers mainly to fetching the device tree via RTAS.
|
||||
* During this window the device access will be arbitrated by
|
||||
* associated DRC, which will simply fail the RTAS calls as invalid.
|
||||
* This is recoverable within guest and current implementations of
|
||||
* drmgr should be able to cope.
|
||||
*/
|
||||
if (!drc->signalled && !drc->configured) {
|
||||
/* if the guest hasn't seen the device we can't rely on it to
|
||||
* set it back to an isolated state via RTAS, so do it here manually
|
||||
*/
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
|
||||
}
|
||||
|
||||
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
|
||||
trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc));
|
||||
drc->awaiting_release = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
|
||||
drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
|
||||
trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc));
|
||||
drc->awaiting_release = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (drc->awaiting_allocation) {
|
||||
drc->awaiting_release = true;
|
||||
trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc));
|
||||
return;
|
||||
}
|
||||
|
||||
drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
|
||||
|
||||
/* Calling release callbacks based on spapr_drc_type(drc). */
|
||||
@ -412,6 +399,32 @@ void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
|
||||
drc->dev = NULL;
|
||||
}
|
||||
|
||||
void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
|
||||
{
|
||||
trace_spapr_drc_detach(spapr_drc_index(drc));
|
||||
|
||||
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
|
||||
trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc));
|
||||
drc->awaiting_release = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
|
||||
drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
|
||||
trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc));
|
||||
drc->awaiting_release = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (drc->awaiting_allocation) {
|
||||
drc->awaiting_release = true;
|
||||
trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc));
|
||||
return;
|
||||
}
|
||||
|
||||
spapr_drc_release(drc);
|
||||
}
|
||||
|
||||
static bool release_pending(sPAPRDRConnector *drc)
|
||||
{
|
||||
return drc->awaiting_release;
|
||||
@ -420,7 +433,6 @@ static bool release_pending(sPAPRDRConnector *drc)
|
||||
static void reset(DeviceState *d)
|
||||
{
|
||||
sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
|
||||
trace_spapr_drc_reset(spapr_drc_index(drc));
|
||||
|
||||
@ -428,32 +440,26 @@ static void reset(DeviceState *d)
|
||||
drc->ccs = NULL;
|
||||
|
||||
/* immediately upon reset we can safely assume DRCs whose devices
|
||||
* are pending removal can be safely removed, and that they will
|
||||
* subsequently be left in an ISOLATED state. move the DRC to this
|
||||
* state in these cases (which will in turn complete any pending
|
||||
* device removals)
|
||||
* are pending removal can be safely removed.
|
||||
*/
|
||||
if (drc->awaiting_release) {
|
||||
drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED);
|
||||
/* generally this should also finalize the removal, but if the device
|
||||
* hasn't yet been configured we normally defer removal under the
|
||||
* assumption that this transition is taking place as part of device
|
||||
* configuration. so check if we're still waiting after this, and
|
||||
* force removal if we are
|
||||
*/
|
||||
if (drc->awaiting_release) {
|
||||
spapr_drc_detach(drc, DEVICE(drc->dev), NULL);
|
||||
}
|
||||
|
||||
/* non-PCI devices may be awaiting a transition to UNUSABLE */
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
|
||||
drc->awaiting_release) {
|
||||
drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE);
|
||||
}
|
||||
spapr_drc_release(drc);
|
||||
}
|
||||
|
||||
if (drck->dr_entity_sense(drc) == SPAPR_DR_ENTITY_SENSE_PRESENT) {
|
||||
drck->set_signalled(drc);
|
||||
drc->awaiting_allocation = false;
|
||||
|
||||
if (drc->dev) {
|
||||
/* A device present at reset is coldplugged */
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
|
||||
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
|
||||
}
|
||||
} else {
|
||||
/* Otherwise device is absent, but might be hotplugged */
|
||||
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
|
||||
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
|
||||
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -479,7 +485,7 @@ static bool spapr_drc_needed(void *opaque)
|
||||
case SPAPR_DR_CONNECTOR_TYPE_LMB:
|
||||
rc = !((drc->isolation_state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) &&
|
||||
(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) &&
|
||||
drc->configured && drc->signalled && !drc->awaiting_release);
|
||||
drc->configured && !drc->awaiting_release);
|
||||
break;
|
||||
case SPAPR_DR_CONNECTOR_TYPE_PHB:
|
||||
case SPAPR_DR_CONNECTOR_TYPE_VIO:
|
||||
@ -501,7 +507,6 @@ static const VMStateDescription vmstate_spapr_drc = {
|
||||
VMSTATE_BOOL(configured, sPAPRDRConnector),
|
||||
VMSTATE_BOOL(awaiting_release, sPAPRDRConnector),
|
||||
VMSTATE_BOOL(awaiting_allocation, sPAPRDRConnector),
|
||||
VMSTATE_BOOL(signalled, sPAPRDRConnector),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
@ -596,10 +601,7 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
|
||||
dk->reset = reset;
|
||||
dk->realize = realize;
|
||||
dk->unrealize = unrealize;
|
||||
drck->set_isolation_state = set_isolation_state;
|
||||
drck->set_allocation_state = set_allocation_state;
|
||||
drck->release_pending = release_pending;
|
||||
drck->set_signalled = set_signalled;
|
||||
/*
|
||||
* Reason: it crashes FIXME find and document the real reason
|
||||
*/
|
||||
@ -611,6 +613,8 @@ static void spapr_drc_physical_class_init(ObjectClass *k, void *data)
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
|
||||
|
||||
drck->dr_entity_sense = physical_entity_sense;
|
||||
drck->isolate = drc_isolate_physical;
|
||||
drck->unisolate = drc_unisolate_physical;
|
||||
}
|
||||
|
||||
static void spapr_drc_logical_class_init(ObjectClass *k, void *data)
|
||||
@ -618,6 +622,8 @@ static void spapr_drc_logical_class_init(ObjectClass *k, void *data)
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
|
||||
|
||||
drck->dr_entity_sense = logical_entity_sense;
|
||||
drck->isolate = drc_isolate_logical;
|
||||
drck->unisolate = drc_unisolate_logical;
|
||||
}
|
||||
|
||||
static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
|
||||
@ -858,24 +864,45 @@ static uint32_t rtas_set_isolation_state(uint32_t idx, uint32_t state)
|
||||
sPAPRDRConnectorClass *drck;
|
||||
|
||||
if (!drc) {
|
||||
return RTAS_OUT_PARAM_ERROR;
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
|
||||
trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state);
|
||||
|
||||
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
return drck->set_isolation_state(drc, state);
|
||||
|
||||
switch (state) {
|
||||
case SPAPR_DR_ISOLATION_STATE_ISOLATED:
|
||||
return drck->isolate(drc);
|
||||
|
||||
case SPAPR_DR_ISOLATION_STATE_UNISOLATED:
|
||||
return drck->unisolate(drc);
|
||||
|
||||
default:
|
||||
return RTAS_OUT_PARAM_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t rtas_set_allocation_state(uint32_t idx, uint32_t state)
|
||||
{
|
||||
sPAPRDRConnector *drc = spapr_drc_by_index(idx);
|
||||
sPAPRDRConnectorClass *drck;
|
||||
|
||||
if (!drc) {
|
||||
return RTAS_OUT_PARAM_ERROR;
|
||||
if (!drc || !object_dynamic_cast(OBJECT(drc), TYPE_SPAPR_DRC_LOGICAL)) {
|
||||
return RTAS_OUT_NO_SUCH_INDICATOR;
|
||||
}
|
||||
|
||||
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
return drck->set_allocation_state(drc, state);
|
||||
trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state);
|
||||
|
||||
switch (state) {
|
||||
case SPAPR_DR_ALLOCATION_STATE_USABLE:
|
||||
return drc_set_usable(drc);
|
||||
|
||||
case SPAPR_DR_ALLOCATION_STATE_UNUSABLE:
|
||||
return drc_set_unusable(drc);
|
||||
|
||||
default:
|
||||
return RTAS_OUT_PARAM_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t rtas_set_dr_indicator(uint32_t idx, uint32_t state)
|
||||
|
@ -475,13 +475,6 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
|
||||
RTAS_LOG_TYPE_EPOW)));
|
||||
}
|
||||
|
||||
static void spapr_hotplug_set_signalled(uint32_t drc_index)
|
||||
{
|
||||
sPAPRDRConnector *drc = spapr_drc_by_index(drc_index);
|
||||
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
|
||||
drck->set_signalled(drc);
|
||||
}
|
||||
|
||||
static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
|
||||
sPAPRDRConnectorType drc_type,
|
||||
union drc_identifier *drc_id)
|
||||
@ -528,9 +521,6 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
|
||||
switch (drc_type) {
|
||||
case SPAPR_DR_CONNECTOR_TYPE_PCI:
|
||||
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
|
||||
if (hp->hotplug_action == RTAS_LOG_V6_HP_ACTION_ADD) {
|
||||
spapr_hotplug_set_signalled(drc_id->index);
|
||||
}
|
||||
break;
|
||||
case SPAPR_DR_CONNECTOR_TYPE_LMB:
|
||||
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
|
||||
|
@ -1045,11 +1045,11 @@ static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t cas_check_pvr(PowerPCCPU *cpu, target_ulong *addr,
|
||||
Error **errp)
|
||||
static uint32_t cas_check_pvr(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
target_ulong *addr, Error **errp)
|
||||
{
|
||||
bool explicit_match = false; /* Matched the CPU's real PVR */
|
||||
uint32_t max_compat = cpu->max_compat;
|
||||
uint32_t max_compat = spapr->max_compat_pvr;
|
||||
uint32_t best_compat = 0;
|
||||
int i;
|
||||
|
||||
@ -1105,7 +1105,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
|
||||
bool guest_radix;
|
||||
Error *local_err = NULL;
|
||||
|
||||
cas_pvr = cas_check_pvr(cpu, &addr, &local_err);
|
||||
cas_pvr = cas_check_pvr(spapr, cpu, &addr, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
return H_HARDWARE;
|
||||
|
@ -53,6 +53,7 @@ struct sPAPRMachineClass {
|
||||
bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */
|
||||
bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */
|
||||
const char *tcg_default_cpu; /* which (TCG) CPU to simulate by default */
|
||||
bool pre_2_10_has_unused_icps;
|
||||
void (*phb_placement)(sPAPRMachineState *spapr, uint32_t index,
|
||||
uint64_t *buid, hwaddr *pio,
|
||||
hwaddr *mmio32, hwaddr *mmio64,
|
||||
@ -86,16 +87,19 @@ struct sPAPRMachineState {
|
||||
uint64_t rtc_offset; /* Now used only during incoming migration */
|
||||
struct PPCTimebase tb;
|
||||
bool has_graphics;
|
||||
sPAPROptionVector *ov5; /* QEMU-supported option vectors */
|
||||
sPAPROptionVector *ov5_cas; /* negotiated (via CAS) option vectors */
|
||||
bool cas_reboot;
|
||||
bool cas_legacy_guest_workaround;
|
||||
|
||||
Notifier epow_notifier;
|
||||
QTAILQ_HEAD(, sPAPREventLogEntry) pending_events;
|
||||
bool use_hotplug_event_source;
|
||||
sPAPREventSource *event_sources;
|
||||
|
||||
/* ibm,client-architecture-support option negotiation */
|
||||
bool cas_reboot;
|
||||
bool cas_legacy_guest_workaround;
|
||||
sPAPROptionVector *ov5; /* QEMU-supported option vectors */
|
||||
sPAPROptionVector *ov5_cas; /* negotiated (via CAS) option vectors */
|
||||
uint32_t max_compat_pvr;
|
||||
|
||||
/* Migration state */
|
||||
int htab_save_index;
|
||||
bool htab_first_pass;
|
||||
@ -635,6 +639,7 @@ void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
|
||||
uint32_t count, uint32_t index);
|
||||
void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
|
||||
uint32_t count, uint32_t index);
|
||||
void spapr_cpu_parse_features(sPAPRMachineState *spapr);
|
||||
void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
|
||||
sPAPRMachineState *spapr);
|
||||
|
||||
|
@ -199,7 +199,6 @@ typedef struct sPAPRDRConnector {
|
||||
sPAPRConfigureConnectorState *ccs;
|
||||
|
||||
bool awaiting_release;
|
||||
bool signalled;
|
||||
bool awaiting_allocation;
|
||||
|
||||
/* device pointer, via link property */
|
||||
@ -216,16 +215,11 @@ typedef struct sPAPRDRConnectorClass {
|
||||
const char *drc_name_prefix; /* used other places in device tree */
|
||||
|
||||
sPAPRDREntitySense (*dr_entity_sense)(sPAPRDRConnector *drc);
|
||||
|
||||
/* accessors for guest-visible (generally via RTAS) DR state */
|
||||
uint32_t (*set_isolation_state)(sPAPRDRConnector *drc,
|
||||
sPAPRDRIsolationState state);
|
||||
uint32_t (*set_allocation_state)(sPAPRDRConnector *drc,
|
||||
sPAPRDRAllocationState state);
|
||||
uint32_t (*isolate)(sPAPRDRConnector *drc);
|
||||
uint32_t (*unisolate)(sPAPRDRConnector *drc);
|
||||
|
||||
/* QEMU interfaces for managing hotplug operations */
|
||||
bool (*release_pending)(sPAPRDRConnector *drc);
|
||||
void (*set_signalled)(sPAPRDRConnector *drc);
|
||||
} sPAPRDRConnectorClass;
|
||||
|
||||
uint32_t spapr_drc_index(sPAPRDRConnector *drc);
|
||||
|
@ -326,6 +326,16 @@ static void parse_type_number(Visitor *v, const char *name, double *obj,
|
||||
*obj = val;
|
||||
}
|
||||
|
||||
static void parse_type_null(Visitor *v, const char *name, Error **errp)
|
||||
{
|
||||
StringInputVisitor *siv = to_siv(v);
|
||||
|
||||
if (!siv->string || siv->string[0]) {
|
||||
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
|
||||
"null");
|
||||
}
|
||||
}
|
||||
|
||||
static void string_input_free(Visitor *v)
|
||||
{
|
||||
StringInputVisitor *siv = to_siv(v);
|
||||
@ -349,6 +359,7 @@ Visitor *string_input_visitor_new(const char *str)
|
||||
v->visitor.type_bool = parse_type_bool;
|
||||
v->visitor.type_str = parse_type_str;
|
||||
v->visitor.type_number = parse_type_number;
|
||||
v->visitor.type_null = parse_type_null;
|
||||
v->visitor.start_list = start_list;
|
||||
v->visitor.next_list = next_list;
|
||||
v->visitor.check_list = check_list;
|
||||
|
@ -256,6 +256,19 @@ static void print_type_number(Visitor *v, const char *name, double *obj,
|
||||
string_output_set(sov, g_strdup_printf("%f", *obj));
|
||||
}
|
||||
|
||||
static void print_type_null(Visitor *v, const char *name, Error **errp)
|
||||
{
|
||||
StringOutputVisitor *sov = to_sov(v);
|
||||
char *out;
|
||||
|
||||
if (sov->human) {
|
||||
out = g_strdup("<null>");
|
||||
} else {
|
||||
out = g_strdup("");
|
||||
}
|
||||
string_output_set(sov, out);
|
||||
}
|
||||
|
||||
static void
|
||||
start_list(Visitor *v, const char *name, GenericList **list, size_t size,
|
||||
Error **errp)
|
||||
@ -341,6 +354,7 @@ Visitor *string_output_visitor_new(bool human, char **result)
|
||||
v->visitor.type_bool = print_type_bool;
|
||||
v->visitor.type_str = print_type_str;
|
||||
v->visitor.type_number = print_type_number;
|
||||
v->visitor.type_null = print_type_null;
|
||||
v->visitor.start_list = start_list;
|
||||
v->visitor.next_list = next_list;
|
||||
v->visitor.end_list = end_list;
|
||||
|
@ -24,9 +24,11 @@
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "cpu-models.h"
|
||||
|
||||
typedef struct {
|
||||
const char *name;
|
||||
uint32_t pvr;
|
||||
uint64_t pcr;
|
||||
uint64_t pcr_level;
|
||||
@ -38,6 +40,7 @@ static const CompatInfo compat_table[] = {
|
||||
* Ordered from oldest to newest - the code relies on this
|
||||
*/
|
||||
{ /* POWER6, ISA2.05 */
|
||||
.name = "power6",
|
||||
.pvr = CPU_POWERPC_LOGICAL_2_05,
|
||||
.pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 |
|
||||
PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS,
|
||||
@ -45,24 +48,28 @@ static const CompatInfo compat_table[] = {
|
||||
.max_threads = 2,
|
||||
},
|
||||
{ /* POWER7, ISA2.06 */
|
||||
.name = "power7",
|
||||
.pvr = CPU_POWERPC_LOGICAL_2_06,
|
||||
.pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS,
|
||||
.pcr_level = PCR_COMPAT_2_06,
|
||||
.max_threads = 4,
|
||||
},
|
||||
{
|
||||
.name = "power7+",
|
||||
.pvr = CPU_POWERPC_LOGICAL_2_06_PLUS,
|
||||
.pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS,
|
||||
.pcr_level = PCR_COMPAT_2_06,
|
||||
.max_threads = 4,
|
||||
},
|
||||
{ /* POWER8, ISA2.07 */
|
||||
.name = "power8",
|
||||
.pvr = CPU_POWERPC_LOGICAL_2_07,
|
||||
.pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07,
|
||||
.pcr_level = PCR_COMPAT_2_07,
|
||||
.max_threads = 8,
|
||||
},
|
||||
{ /* POWER9, ISA3.00 */
|
||||
.name = "power9",
|
||||
.pvr = CPU_POWERPC_LOGICAL_3_00,
|
||||
.pcr = PCR_COMPAT_3_00,
|
||||
.pcr_level = PCR_COMPAT_3_00,
|
||||
@ -189,3 +196,98 @@ int ppc_compat_max_threads(PowerPCCPU *cpu)
|
||||
|
||||
return n_threads;
|
||||
}
|
||||
|
||||
static void ppc_compat_prop_get(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
uint32_t compat_pvr = *((uint32_t *)opaque);
|
||||
const char *value;
|
||||
|
||||
if (!compat_pvr) {
|
||||
value = "";
|
||||
} else {
|
||||
const CompatInfo *compat = compat_by_pvr(compat_pvr);
|
||||
|
||||
g_assert(compat);
|
||||
|
||||
value = compat->name;
|
||||
}
|
||||
|
||||
visit_type_str(v, name, (char **)&value, errp);
|
||||
}
|
||||
|
||||
static void ppc_compat_prop_set(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
char *value;
|
||||
uint32_t compat_pvr;
|
||||
|
||||
visit_type_str(v, name, &value, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (strcmp(value, "") == 0) {
|
||||
compat_pvr = 0;
|
||||
} else {
|
||||
int i;
|
||||
const CompatInfo *compat = NULL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(compat_table); i++) {
|
||||
if (strcmp(value, compat_table[i].name) == 0) {
|
||||
compat = &compat_table[i];
|
||||
break;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!compat) {
|
||||
error_setg(errp, "Invalid compatibility mode \"%s\"", value);
|
||||
goto out;
|
||||
}
|
||||
compat_pvr = compat->pvr;
|
||||
}
|
||||
|
||||
*((uint32_t *)opaque) = compat_pvr;
|
||||
|
||||
out:
|
||||
g_free(value);
|
||||
}
|
||||
|
||||
void ppc_compat_add_property(Object *obj, const char *name,
|
||||
uint32_t *compat_pvr, const char *basedesc,
|
||||
Error **errp)
|
||||
{
|
||||
Error *local_err = NULL;
|
||||
gchar *namesv[ARRAY_SIZE(compat_table) + 1];
|
||||
gchar *names, *desc;
|
||||
int i;
|
||||
|
||||
object_property_add(obj, name, "string",
|
||||
ppc_compat_prop_get, ppc_compat_prop_set, NULL,
|
||||
compat_pvr, &local_err);
|
||||
if (local_err) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(compat_table); i++) {
|
||||
/*
|
||||
* Have to discard const here, because g_strjoinv() takes
|
||||
* (gchar **), not (const gchar **) :(
|
||||
*/
|
||||
namesv[i] = (gchar *)compat_table[i].name;
|
||||
}
|
||||
namesv[ARRAY_SIZE(compat_table)] = NULL;
|
||||
|
||||
names = g_strjoinv(", ", namesv);
|
||||
desc = g_strdup_printf("%s. Valid values are %s.", basedesc, names);
|
||||
object_property_set_description(obj, name, desc, &local_err);
|
||||
|
||||
g_free(names);
|
||||
g_free(desc);
|
||||
|
||||
out:
|
||||
error_propagate(errp, local_err);
|
||||
}
|
||||
|
@ -1189,7 +1189,6 @@ typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass;
|
||||
* PowerPCCPU:
|
||||
* @env: #CPUPPCState
|
||||
* @cpu_dt_id: CPU index used in the device tree. KVM uses this index too
|
||||
* @max_compat: Maximal supported logical PVR from the command line
|
||||
* @compat_pvr: Current logical PVR, zero if in "raw" mode
|
||||
*
|
||||
* A PowerPC CPU.
|
||||
@ -1201,7 +1200,6 @@ struct PowerPCCPU {
|
||||
|
||||
CPUPPCState env;
|
||||
int cpu_dt_id;
|
||||
uint32_t max_compat;
|
||||
uint32_t compat_pvr;
|
||||
PPCVirtualHypervisor *vhyp;
|
||||
Object *intc;
|
||||
@ -1213,6 +1211,7 @@ struct PowerPCCPU {
|
||||
uint64_t mig_insns_flags;
|
||||
uint64_t mig_insns_flags2;
|
||||
uint32_t mig_nb_BATs;
|
||||
bool pre_2_10_migration;
|
||||
};
|
||||
|
||||
static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env)
|
||||
@ -1375,6 +1374,9 @@ void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp);
|
||||
void ppc_set_compat_all(uint32_t compat_pvr, Error **errp);
|
||||
#endif
|
||||
int ppc_compat_max_threads(PowerPCCPU *cpu);
|
||||
void ppc_compat_add_property(Object *obj, const char *name,
|
||||
uint32_t *compat_pvr, const char *basedesc,
|
||||
Error **errp);
|
||||
#endif /* defined(TARGET_PPC64) */
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
|
@ -17,6 +17,7 @@
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/helper-proto.h"
|
||||
#include "exec/exec-all.h"
|
||||
@ -1132,6 +1133,7 @@ void helper_msgsnd(target_ulong rb)
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_mutex_lock_iothread();
|
||||
CPU_FOREACH(cs) {
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *cenv = &cpu->env;
|
||||
@ -1141,5 +1143,6 @@ void helper_msgsnd(target_ulong rb)
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
#endif
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "helper_regs.h"
|
||||
#include "mmu-hash64.h"
|
||||
#include "migration/cpu.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||
{
|
||||
@ -195,6 +196,27 @@ static void cpu_pre_save(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if a given PVR is a "close enough" match to the CPU
|
||||
* object. For TCG and KVM PR it would probably be sufficient to
|
||||
* require an exact PVR match. However for KVM HV the user is
|
||||
* restricted to a PVR exactly matching the host CPU. The correct way
|
||||
* to handle this is to put the guest into an architected
|
||||
* compatibility mode. However, to allow a more forgiving transition
|
||||
* and migration from before this was widely done, we allow migration
|
||||
* between sufficiently similar PVRs, as determined by the CPU class's
|
||||
* pvr_match() hook.
|
||||
*/
|
||||
static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
|
||||
{
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
|
||||
if (pvr == pcc->pvr) {
|
||||
return true;
|
||||
}
|
||||
return pcc->pvr_match(pcc, pvr);
|
||||
}
|
||||
|
||||
static int cpu_post_load(void *opaque, int version_id)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
@ -203,10 +225,31 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||
target_ulong msr;
|
||||
|
||||
/*
|
||||
* We always ignore the source PVR. The user or management
|
||||
* software has to take care of running QEMU in a compatible mode.
|
||||
* If we're operating in compat mode, we should be ok as long as
|
||||
* the destination supports the same compatiblity mode.
|
||||
*
|
||||
* Otherwise, however, we require that the destination has exactly
|
||||
* the same CPU model as the source.
|
||||
*/
|
||||
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
|
||||
|
||||
#if defined(TARGET_PPC64)
|
||||
if (cpu->compat_pvr) {
|
||||
Error *local_err = NULL;
|
||||
|
||||
ppc_set_compat(cpu, cpu->compat_pvr, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
error_free(local_err);
|
||||
return -1;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (!pvr_match(cpu, env->spr[SPR_PVR])) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
env->lr = env->spr[SPR_LR];
|
||||
env->ctr = env->spr[SPR_CTR];
|
||||
cpu_write_xer(env, env->spr[SPR_XER]);
|
||||
@ -560,6 +603,25 @@ static const VMStateDescription vmstate_tlbmas = {
|
||||
}
|
||||
};
|
||||
|
||||
static bool compat_needed(void *opaque)
|
||||
{
|
||||
PowerPCCPU *cpu = opaque;
|
||||
|
||||
assert(!(cpu->compat_pvr && !cpu->vhyp));
|
||||
return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_compat = {
|
||||
.name = "cpu/compat",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = compat_needed,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(compat_pvr, PowerPCCPU),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
const VMStateDescription vmstate_ppc_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 5,
|
||||
@ -613,6 +675,7 @@ const VMStateDescription vmstate_ppc_cpu = {
|
||||
&vmstate_tlb6xx,
|
||||
&vmstate_tlbemb,
|
||||
&vmstate_tlbmas,
|
||||
&vmstate_compat,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
@ -255,5 +255,5 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "mmu-book3s-v3.h"
|
||||
#include "sysemu/qtest.h"
|
||||
|
||||
//#define PPC_DUMP_CPU
|
||||
//#define PPC_DEBUG_SPR
|
||||
@ -8413,73 +8414,38 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
|
||||
pcc->l1_icache_size = 0x10000;
|
||||
}
|
||||
|
||||
static void powerpc_get_compat(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
/*
|
||||
* The CPU used to have a "compat" property which set the
|
||||
* compatibility mode PVR. However, this was conceptually broken - it
|
||||
* only makes sense on the pseries machine type (otherwise the guest
|
||||
* owns the PCR and can control the compatibility mode itself). It's
|
||||
* been replaced with the 'max-cpu-compat' property on the pseries
|
||||
* machine type. For backwards compatibility, pseries specially
|
||||
* parses the -cpu parameter and converts old compat= parameters into
|
||||
* the appropriate machine parameters. This stub implementation of
|
||||
* the parameter catches any uses on explicitly created CPUs.
|
||||
*/
|
||||
static void getset_compat_deprecated(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
char *value = (char *)"";
|
||||
Property *prop = opaque;
|
||||
uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop);
|
||||
|
||||
switch (*max_compat) {
|
||||
case CPU_POWERPC_LOGICAL_2_05:
|
||||
value = (char *)"power6";
|
||||
break;
|
||||
case CPU_POWERPC_LOGICAL_2_06:
|
||||
value = (char *)"power7";
|
||||
break;
|
||||
case CPU_POWERPC_LOGICAL_2_07:
|
||||
value = (char *)"power8";
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
error_report("Internal error: compat is set to %x", *max_compat);
|
||||
abort();
|
||||
break;
|
||||
if (!qtest_enabled()) {
|
||||
error_report("CPU 'compat' property is deprecated and has no effect; "
|
||||
"use max-cpu-compat machine property instead");
|
||||
}
|
||||
|
||||
visit_type_str(v, name, &value, errp);
|
||||
visit_type_null(v, name, NULL);
|
||||
}
|
||||
|
||||
static void powerpc_set_compat(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
Error *error = NULL;
|
||||
char *value = NULL;
|
||||
Property *prop = opaque;
|
||||
uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop);
|
||||
|
||||
visit_type_str(v, name, &value, &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (strcmp(value, "power6") == 0) {
|
||||
*max_compat = CPU_POWERPC_LOGICAL_2_05;
|
||||
} else if (strcmp(value, "power7") == 0) {
|
||||
*max_compat = CPU_POWERPC_LOGICAL_2_06;
|
||||
} else if (strcmp(value, "power8") == 0) {
|
||||
*max_compat = CPU_POWERPC_LOGICAL_2_07;
|
||||
} else {
|
||||
error_setg(errp, "Invalid compatibility mode \"%s\"", value);
|
||||
}
|
||||
|
||||
g_free(value);
|
||||
}
|
||||
|
||||
static PropertyInfo powerpc_compat_propinfo = {
|
||||
static PropertyInfo ppc_compat_deprecated_propinfo = {
|
||||
.name = "str",
|
||||
.description = "compatibility mode, power6/power7/power8",
|
||||
.get = powerpc_get_compat,
|
||||
.set = powerpc_set_compat,
|
||||
.description = "compatibility mode (deprecated)",
|
||||
.get = getset_compat_deprecated,
|
||||
.set = getset_compat_deprecated,
|
||||
};
|
||||
|
||||
#define DEFINE_PROP_POWERPC_COMPAT(_n, _s, _f) \
|
||||
DEFINE_PROP(_n, _s, _f, powerpc_compat_propinfo, uint32_t)
|
||||
|
||||
static Property powerpc_servercpu_properties[] = {
|
||||
DEFINE_PROP_POWERPC_COMPAT("compat", PowerPCCPU, max_compat),
|
||||
{
|
||||
.name = "compat",
|
||||
.info = &ppc_compat_deprecated_propinfo,
|
||||
},
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
@ -9859,14 +9825,14 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
error_append_hint(errp, "Adjust the number of cpus to %d "
|
||||
"or try to raise the number of threads per core\n",
|
||||
cpu->cpu_dt_id * smp_threads / max_smt);
|
||||
return;
|
||||
goto unrealize;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (tcg_enabled()) {
|
||||
if (ppc_fixup_cpu(cpu) != 0) {
|
||||
error_setg(errp, "Unable to emulate selected CPU with TCG");
|
||||
return;
|
||||
goto unrealize;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9875,14 +9841,14 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
error_setg(errp, "CPU does not possess a BookE or 4xx MMU. "
|
||||
"Please use qemu-system-ppc or qemu-system-ppc64 instead "
|
||||
"or choose another CPU model.");
|
||||
return;
|
||||
goto unrealize;
|
||||
}
|
||||
#endif
|
||||
|
||||
create_ppc_opcodes(cpu, &local_err);
|
||||
if (local_err != NULL) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
goto unrealize;
|
||||
}
|
||||
init_ppc_proc(cpu);
|
||||
|
||||
@ -10067,6 +10033,10 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
fflush(stdout);
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
|
||||
unrealize:
|
||||
cpu_exec_unrealizefn(cs);
|
||||
}
|
||||
|
||||
static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
|
||||
@ -10640,6 +10610,8 @@ static gchar *ppc_gdb_arch_name(CPUState *cs)
|
||||
|
||||
static Property ppc_cpu_properties[] = {
|
||||
DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
|
||||
DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration,
|
||||
false),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user