ppc patch queue 2019-08-13 (last minute qemu-4.1 fixes)

Here's a very, very last minute pull request for qemu-4.1.  This fixes
 two nasty bugs with the XIVE interrupt controller in "dual" mode
 (where the guest decides which interrupt controller it wants to use).
 One occurs when resetting the guest while I/O is active, and the other
 with migration of hotplugged CPUs.
 
 The timing here is very unfortunate.  Alas, we only spotted these bugs
 very late, and I was sick last week, delaying analysis and fix even
 further.
 
 This series hasn't had nearly as much testing as I'd really like, but
 I'd still like to squeeze it into qemu-4.1 if possible, since
 definitely fixing two bad bugs seems like an acceptable tradeoff for
 the risk of introducing different bugs.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAl1SXyoACgkQbDjKyiDZ
 s5K3cA//YA/QK3D3aabaVoqLHPWXuMMQIQaLEwsdzgRYBEzg7r/MOaN85ehVZnhz
 Fx5sm43eguircfQMgLdOpRuJ/7Mgpl7jgRfJegsQFoW4UqXh0ZVJru0gv8wLsMxe
 X7eLT2e7uwtnEoy02gqbiHBDiZ51dWSZw6aeSulLFsZsajF9TeBzZ8ENUAeFcLhi
 lphkh8tzeXfbBivriTuuDI2nfGwtwDsw0JTHNbNxdSTtFxXt1mFH5TUHB8W9CgCC
 NTqdxWyGlyQKTeIt7eWM8UJ+/BMHMVT/VhqC1syeG8qVdLNIoC5dA2mh02Gqy3cq
 /KKd12Pd8/bwtYG6PCCXRMudqzeN3AVDSCZ99fEkEyo1lkYlP9o82CTofLHW1Yyz
 qHFmjoKIgi8iTW9lLYK0ITnVdP8R49sgTx1JwNSPse3V77ljqhCQSu/pI3cCUQY7
 5q4W92vhMJpYmGW60iKtp19FPuoPl93DjlO7LMAYs6vmPwcJv5meQ+hq8/cE3K4m
 D2y/bGH9+K9uM41tcwh50oZcWARHsIxg4rb/Bb/rZZmuhIb56ij7KY+TwVe0w5P6
 uFMaRW4AbDHqv+YzaU2jYikPsgD0iJOo8af1PN/FYs8ao4ssRh9ER3d1LOGk8yvr
 ugycphTQK+Ij7cYwoE6m4Ub0l4CXzYHxpxG+6/yypJBhrq3r6B8=
 =rtRF
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190813' into staging

ppc patch queue 2019-08-13 (last minute qemu-4.1 fixes)

Here's a very, very last minute pull request for qemu-4.1.  This fixes
two nasty bugs with the XIVE interrupt controller in "dual" mode
(where the guest decides which interrupt controller it wants to use).
One occurs when resetting the guest while I/O is active, and the other
with migration of hotplugged CPUs.

The timing here is very unfortunate.  Alas, we only spotted these bugs
very late, and I was sick last week, delaying analysis and fix even
further.

This series hasn't had nearly as much testing as I'd really like, but
I'd still like to squeeze it into qemu-4.1 if possible, since
definitely fixing two bad bugs seems like an acceptable tradeoff for
the risk of introducing different bugs.

# gpg: Signature made Tue 13 Aug 2019 07:56:42 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-4.1-20190813:
  spapr/xive: Fix migration of hot-plugged CPUs
  spapr: Reset CAS & IRQ subsystem after devices

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2019-08-13 11:35:30 +01:00
commit 968ff692cf
4 changed files with 50 additions and 15 deletions

View File

@ -72,11 +72,17 @@ static void kvm_cpu_disable_all(void)
* XIVE Thread Interrupt Management context (KVM) * XIVE Thread Interrupt Management context (KVM)
*/ */
static void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
{ {
SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive;
uint64_t state[2]; uint64_t state[2];
int ret; int ret;
/* The KVM XIVE device is not in use yet */
if (xive->fd == -1) {
return;
}
/* word0 and word1 of the OS ring. */ /* word0 and word1 of the OS ring. */
state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
@ -655,7 +661,16 @@ int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
} }
} }
/* Restore the thread interrupt contexts */ /*
* Restore the thread interrupt contexts of initial CPUs.
*
* The context of hotplugged CPUs is restored later, by the
* 'post_load' handler of the XiveTCTX model because they are not
* available at the time the SpaprXive 'post_load' method is
* called. We can not restore the context of all CPUs in the
* 'post_load' handler of XiveTCTX because the machine is not
* necessarily connected to the KVM device at that time.
*/
CPU_FOREACH(cs) { CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs); PowerPCCPU *cpu = POWERPC_CPU(cs);

View File

@ -615,12 +615,31 @@ static int vmstate_xive_tctx_pre_save(void *opaque)
return 0; return 0;
} }
static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
{
Error *local_err = NULL;
if (kvm_irqchip_in_kernel()) {
/*
* Required for hotplugged CPU, for which the state comes
* after all states of the machine.
*/
kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
if (local_err) {
error_report_err(local_err);
return -1;
}
}
return 0;
}
static const VMStateDescription vmstate_xive_tctx = { static const VMStateDescription vmstate_xive_tctx = {
.name = TYPE_XIVE_TCTX, .name = TYPE_XIVE_TCTX,
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.pre_save = vmstate_xive_tctx_pre_save, .pre_save = vmstate_xive_tctx_pre_save,
.post_load = NULL, /* handled by the sPAPRxive model */ .post_load = vmstate_xive_tctx_post_load,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_BUFFER(regs, XiveTCTX), VMSTATE_BUFFER(regs, XiveTCTX),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()

View File

@ -1726,6 +1726,18 @@ static void spapr_machine_reset(MachineState *machine)
spapr_setup_hpt_and_vrma(spapr); spapr_setup_hpt_and_vrma(spapr);
} }
/*
* NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
* We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
* called from vPHB reset handler so we initialize the counter here.
* If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
* must be equally distant from any other node.
* The final value of spapr->gpu_numa_id is going to be written to
* max-associativity-domains in spapr_build_fdt().
*/
spapr->gpu_numa_id = MAX(1, nb_numa_nodes);
qemu_devices_reset();
/* /*
* If this reset wasn't generated by CAS, we should reset our * If this reset wasn't generated by CAS, we should reset our
* negotiated options and start from scratch * negotiated options and start from scratch
@ -1741,18 +1753,6 @@ static void spapr_machine_reset(MachineState *machine)
spapr_irq_msi_reset(spapr); spapr_irq_msi_reset(spapr);
} }
/*
* NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
* We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
* called from vPHB reset handler so we initialize the counter here.
* If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
* must be equally distant from any other node.
* The final value of spapr->gpu_numa_id is going to be written to
* max-associativity-domains in spapr_build_fdt().
*/
spapr->gpu_numa_id = MAX(1, nb_numa_nodes);
qemu_devices_reset();
/* /*
* This is fixing some of the default configuration of the XIVE * This is fixing some of the default configuration of the XIVE
* devices. To be called after the reset of the machine devices. * devices. To be called after the reset of the machine devices.

View File

@ -438,5 +438,6 @@ void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val);
void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp); void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp);
void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp); void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp);
void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp); void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp);
void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp);
#endif /* PPC_XIVE_H */ #endif /* PPC_XIVE_H */