ppc patch queue for 2021-04-21

Here's what I hope is the last ppc related pull request for qemu-6.0.
 
 The 2 patches here revert a behavioural change that after further
 discussion we concluded was a bad idea (adding a timeout for
 possibly-failed hot unplug requests).  Instead it implements a
 different approach to the original problem: we again let unplug
 requests the guest doesn't respond to remain pending indefinitely, but
 no longer allow those to block attempts to retry the same unplug
 again.
 
 The change is a bit more complex than I'd like for this late in the
 freeze.  Nonetheless, I think it's important to merge this for 6.0, so
 we don't allow a release which has the probably-a-bad-idea timeout
 behaviour.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEdfRlhq5hpmzETofcbDjKyiDZs5IFAmBz2eYACgkQbDjKyiDZ
 s5L1bw//XYKENrHBOgP7TCU+q2895PpCn5jxarPMrpX1fee1mlu+ncskZtlMQr8b
 C8zaWiUO9V6ezknVvqTe/4Wx+DmahGyrHwJQ/Fqj6o7C+057jEumPqBTuQFrNwz+
 4noougivOn1bVR5+vwDqAvNvO2xe49RPVd3kchHUe6tYHEa/k2kwqHeSFLZtm52R
 MusDWOD8FTxfY5uIy2LH4i2/Qe4PMn7by9ZJuldDAegHP4qn+Ffv5fO3a+HlDpOC
 KLwIub/PkLl5dtP4lru+PDgIpYwzdzZBDT/wb+OCspb4/ujqufJixt9DglnWQjEW
 3MwNK8ZnaiyrC8v9mFHzmcIJaRFQE55cr8bviB1SupJYJKB5YKuWMKKA+5E3LUuV
 VEbGzdcjP8xv0F7GzARsj8tt5iH54UJYbWLVxNy33tIfLbBrdbOH28DRhnKNZ3Y8
 f8Tz4VOKrqFL2ybQXOmSpitf5/h0nUMFRt2jZdhZLhcQ09b49k9loxPGAQoqB3aW
 HuIF3JSWnyAfFRxQjTsjzBDlgGQlghUEKj0fAqIx6SBhWJq5fg+nJ5hM1BEZIKfa
 gsy/Whs458YcSS/ArPPJ3vWC/RU8JvAAQcgSQF4AAAu/3h4FVnPCJGE8d7Jdsqdr
 KK8Y4y1PsXg7Ar7L1qTGJNzM4PKaAh4m8Mm8LLp6sVr4bAOl654=
 =fIJT
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dg-gitlab/tags/ppc-for-6.0-20210412' into staging

ppc patch queue for 2021-04-21

Here's what I hope is the last ppc related pull request for qemu-6.0.

The 2 patches here revert a behavioural change that after further
discussion we concluded was a bad idea (adding a timeout for
possibly-failed hot unplug requests).  Instead it implements a
different approach to the original problem: we again let unplug
requests the guest doesn't respond to remain pending indefinitely, but
no longer allow those to block attempts to retry the same unplug
again.

The change is a bit more complex than I'd like for this late in the
freeze.  Nonetheless, I think it's important to merge this for 6.0, so
we don't allow a release which has the probably-a-bad-idea timeout
behaviour.

# gpg: Signature made Mon 12 Apr 2021 06:25:58 BST
# gpg:                using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dg-gitlab/tags/ppc-for-6.0-20210412:
  spapr.c: always pulse guest IRQ in spapr_core_unplug_request()
  spapr: rollback 'unplug timeout' for CPU hotunplugs

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-04-13 13:05:07 +01:00
commit dce628a97f
5 changed files with 10 additions and 83 deletions

View File

@ -3777,12 +3777,17 @@ void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev,
if (!spapr_drc_unplug_requested(drc)) { if (!spapr_drc_unplug_requested(drc)) {
spapr_drc_unplug_request(drc); spapr_drc_unplug_request(drc);
spapr_hotplug_req_remove_by_index(drc);
} else {
error_setg(errp, "core-id %d unplug is still pending, %d seconds "
"timeout remaining",
cc->core_id, spapr_drc_unplug_timeout_remaining_sec(drc));
} }
/*
* spapr_hotplug_req_remove_by_index is left unguarded, out of the
* "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
* pulses removing the same CPU. Otherwise, in an failed hotunplug
* attempt (e.g. the kernel will refuse to remove the last online
* CPU), we will never attempt it again because unplug_requested
* will still be 'true' in that case.
*/
spapr_hotplug_req_remove_by_index(drc);
} }
int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr,

View File

@ -57,8 +57,6 @@ static void spapr_drc_release(SpaprDrc *drc)
drck->release(drc->dev); drck->release(drc->dev);
drc->unplug_requested = false; drc->unplug_requested = false;
timer_del(drc->unplug_timeout_timer);
g_free(drc->fdt); g_free(drc->fdt);
drc->fdt = NULL; drc->fdt = NULL;
drc->fdt_start_offset = 0; drc->fdt_start_offset = 0;
@ -372,17 +370,6 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
} while (fdt_depth != 0); } while (fdt_depth != 0);
} }
static void spapr_drc_start_unplug_timeout_timer(SpaprDrc *drc)
{
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
if (drck->unplug_timeout_seconds != 0) {
timer_mod(drc->unplug_timeout_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
drck->unplug_timeout_seconds * 1000);
}
}
void spapr_drc_attach(SpaprDrc *drc, DeviceState *d) void spapr_drc_attach(SpaprDrc *drc, DeviceState *d)
{ {
trace_spapr_drc_attach(spapr_drc_index(drc)); trace_spapr_drc_attach(spapr_drc_index(drc));
@ -409,8 +396,6 @@ void spapr_drc_unplug_request(SpaprDrc *drc)
drc->unplug_requested = true; drc->unplug_requested = true;
spapr_drc_start_unplug_timeout_timer(drc);
if (drc->state != drck->empty_state) { if (drc->state != drck->empty_state) {
trace_spapr_drc_awaiting_quiesce(spapr_drc_index(drc)); trace_spapr_drc_awaiting_quiesce(spapr_drc_index(drc));
return; return;
@ -419,15 +404,6 @@ void spapr_drc_unplug_request(SpaprDrc *drc)
spapr_drc_release(drc); spapr_drc_release(drc);
} }
int spapr_drc_unplug_timeout_remaining_sec(SpaprDrc *drc)
{
if (drc->unplug_requested) {
return timer_deadline_ms(drc->unplug_timeout_timer) / 1000;
}
return 0;
}
bool spapr_drc_reset(SpaprDrc *drc) bool spapr_drc_reset(SpaprDrc *drc)
{ {
SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
@ -499,23 +475,11 @@ static bool spapr_drc_needed(void *opaque)
spapr_drc_unplug_requested(drc); spapr_drc_unplug_requested(drc);
} }
static int spapr_drc_post_load(void *opaque, int version_id)
{
SpaprDrc *drc = opaque;
if (drc->unplug_requested) {
spapr_drc_start_unplug_timeout_timer(drc);
}
return 0;
}
static const VMStateDescription vmstate_spapr_drc = { static const VMStateDescription vmstate_spapr_drc = {
.name = "spapr_drc", .name = "spapr_drc",
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.needed = spapr_drc_needed, .needed = spapr_drc_needed,
.post_load = spapr_drc_post_load,
.fields = (VMStateField []) { .fields = (VMStateField []) {
VMSTATE_UINT32(state, SpaprDrc), VMSTATE_UINT32(state, SpaprDrc),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
@ -526,15 +490,6 @@ static const VMStateDescription vmstate_spapr_drc = {
} }
}; };
static void drc_unplug_timeout_cb(void *opaque)
{
SpaprDrc *drc = opaque;
if (drc->unplug_requested) {
drc->unplug_requested = false;
}
}
static void drc_realize(DeviceState *d, Error **errp) static void drc_realize(DeviceState *d, Error **errp)
{ {
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
@ -557,11 +512,6 @@ static void drc_realize(DeviceState *d, Error **errp)
object_property_add_alias(root_container, link_name, object_property_add_alias(root_container, link_name,
drc->owner, child_name); drc->owner, child_name);
g_free(link_name); g_free(link_name);
drc->unplug_timeout_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
drc_unplug_timeout_cb,
drc);
vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc, vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc); drc);
trace_spapr_drc_realize_complete(spapr_drc_index(drc)); trace_spapr_drc_realize_complete(spapr_drc_index(drc));
@ -579,7 +529,6 @@ static void drc_unrealize(DeviceState *d)
name = g_strdup_printf("%x", spapr_drc_index(drc)); name = g_strdup_printf("%x", spapr_drc_index(drc));
object_property_del(root_container, name); object_property_del(root_container, name);
g_free(name); g_free(name);
timer_free(drc->unplug_timeout_timer);
} }
SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type, SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
@ -721,7 +670,6 @@ static void spapr_drc_cpu_class_init(ObjectClass *k, void *data)
drck->drc_name_prefix = "CPU "; drck->drc_name_prefix = "CPU ";
drck->release = spapr_core_release; drck->release = spapr_core_release;
drck->dt_populate = spapr_core_dt_populate; drck->dt_populate = spapr_core_dt_populate;
drck->unplug_timeout_seconds = 15;
} }
static void spapr_drc_pci_class_init(ObjectClass *k, void *data) static void spapr_drc_pci_class_init(ObjectClass *k, void *data)

View File

@ -187,8 +187,6 @@ typedef struct SpaprDrc {
bool unplug_requested; bool unplug_requested;
void *fdt; void *fdt;
int fdt_start_offset; int fdt_start_offset;
QEMUTimer *unplug_timeout_timer;
} SpaprDrc; } SpaprDrc;
struct SpaprMachineState; struct SpaprMachineState;
@ -211,8 +209,6 @@ typedef struct SpaprDrcClass {
int (*dt_populate)(SpaprDrc *drc, struct SpaprMachineState *spapr, int (*dt_populate)(SpaprDrc *drc, struct SpaprMachineState *spapr,
void *fdt, int *fdt_start_offset, Error **errp); void *fdt, int *fdt_start_offset, Error **errp);
int unplug_timeout_seconds;
} SpaprDrcClass; } SpaprDrcClass;
typedef struct SpaprDrcPhysical { typedef struct SpaprDrcPhysical {
@ -248,7 +244,6 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask);
*/ */
void spapr_drc_attach(SpaprDrc *drc, DeviceState *d); void spapr_drc_attach(SpaprDrc *drc, DeviceState *d);
void spapr_drc_unplug_request(SpaprDrc *drc); void spapr_drc_unplug_request(SpaprDrc *drc);
int spapr_drc_unplug_timeout_remaining_sec(SpaprDrc *drc);
/* /*
* Reset all DRCs, causing pending hot-plug/unplug requests to complete. * Reset all DRCs, causing pending hot-plug/unplug requests to complete.

View File

@ -797,14 +797,6 @@ static inline int64_t get_max_clock_jump(void)
return 60 * NANOSECONDS_PER_SECOND; return 60 * NANOSECONDS_PER_SECOND;
} }
/**
* timer_deadline_ms:
*
* Returns the remaining miliseconds for @timer to expire, or zero
* if the timer is no longer pending.
*/
int64_t timer_deadline_ms(QEMUTimer *timer);
/* /*
* Low level clock functions * Low level clock functions
*/ */

View File

@ -242,19 +242,6 @@ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
return delta; return delta;
} }
/*
* Returns the time remaining for the deadline, in ms.
*/
int64_t timer_deadline_ms(QEMUTimer *timer)
{
if (timer_pending(timer)) {
return qemu_timeout_ns_to_ms(timer->expire_time) -
qemu_clock_get_ms(timer->timer_list->clock->type);
}
return 0;
}
/* Calculate the soonest deadline across all timerlists attached /* Calculate the soonest deadline across all timerlists attached
* to the clock. This is used for the icount timeout so we * to the clock. This is used for the icount timeout so we
* ignore whether or not the clock should be used in deadline * ignore whether or not the clock should be used in deadline