qemu/hw/s390x/event-facility.c
Markus Armbruster d2623129a7 qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists.  Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.

Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent.  Parentage is
also under program control, so this is a programming error, too.

We have a bit over 500 callers.  Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.

The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.

Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL.  Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call.  ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.

When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.

Drop parameter @errp and assert the preconditions instead.

There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification".  Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().

Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-15 07:07:58 +02:00

551 lines
16 KiB
C

/*
* SCLP
* Event Facility
* handles SCLP event types
* - Signal Quiesce - system power down
* - ASCII Console Data - VT220 read and write
*
* Copyright IBM, Corp. 2012
*
* Authors:
* Heinz Graalfs <graalfs@de.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
* option) any later version. See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "hw/s390x/sclp.h"
#include "migration/vmstate.h"
#include "hw/s390x/event-facility.h"
typedef struct SCLPEventsBus {
BusState qbus;
} SCLPEventsBus;
/* we need to save 32 bit chunks for compatibility */
#ifdef HOST_WORDS_BIGENDIAN
#define RECV_MASK_LOWER 1
#define RECV_MASK_UPPER 0
#else /* little endian host */
#define RECV_MASK_LOWER 0
#define RECV_MASK_UPPER 1
#endif
struct SCLPEventFacility {
SysBusDevice parent_obj;
SCLPEventsBus sbus;
/* guest's receive mask */
union {
uint32_t receive_mask_pieces[2];
sccb_mask_t receive_mask;
};
/*
* when false, we keep the same broken, backwards compatible behaviour as
* before, allowing only masks of size exactly 4; when true, we implement
* the architecture correctly, allowing all valid mask sizes. Needed for
* migration toward older versions.
*/
bool allow_all_mask_sizes;
/* length of the receive mask */
uint16_t mask_length;
};
/* return true if any child has event pending set */
static bool event_pending(SCLPEventFacility *ef)
{
BusChild *kid;
SCLPEvent *event;
SCLPEventClass *event_class;
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
DeviceState *qdev = kid->child;
event = DO_UPCAST(SCLPEvent, qdev, qdev);
event_class = SCLP_EVENT_GET_CLASS(event);
if (event->event_pending &&
event_class->get_send_mask() & ef->receive_mask) {
return true;
}
}
return false;
}
static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef)
{
sccb_mask_t mask;
BusChild *kid;
SCLPEventClass *child;
mask = 0;
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
DeviceState *qdev = kid->child;
child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
mask |= child->get_send_mask();
}
return mask;
}
static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef)
{
sccb_mask_t mask;
BusChild *kid;
SCLPEventClass *child;
mask = 0;
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
DeviceState *qdev = kid->child;
child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
mask |= child->get_receive_mask();
}
return mask;
}
static uint16_t write_event_length_check(SCCB *sccb)
{
int slen;
unsigned elen = 0;
EventBufferHeader *event;
WriteEventData *wed = (WriteEventData *) sccb;
event = (EventBufferHeader *) &wed->ebh;
for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
elen = be16_to_cpu(event->length);
if (elen < sizeof(*event) || elen > slen) {
return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
}
event = (void *) event + elen;
}
if (slen) {
return SCLP_RC_INCONSISTENT_LENGTHS;
}
return SCLP_RC_NORMAL_COMPLETION;
}
static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
EventBufferHeader *event_buf, SCCB *sccb)
{
uint16_t rc;
BusChild *kid;
SCLPEvent *event;
SCLPEventClass *ec;
rc = SCLP_RC_INVALID_FUNCTION;
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
DeviceState *qdev = kid->child;
event = (SCLPEvent *) qdev;
ec = SCLP_EVENT_GET_CLASS(event);
if (ec->write_event_data &&
ec->can_handle_event(event_buf->type)) {
rc = ec->write_event_data(event, event_buf);
break;
}
}
return rc;
}
static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
{
uint16_t rc;
int slen;
unsigned elen = 0;
EventBufferHeader *event_buf;
WriteEventData *wed = (WriteEventData *) sccb;
event_buf = &wed->ebh;
rc = SCLP_RC_NORMAL_COMPLETION;
/* loop over all contained event buffers */
for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
elen = be16_to_cpu(event_buf->length);
/* in case of a previous error mark all trailing buffers
* as not accepted */
if (rc != SCLP_RC_NORMAL_COMPLETION) {
event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
} else {
rc = handle_write_event_buf(ef, event_buf, sccb);
}
event_buf = (void *) event_buf + elen;
}
return rc;
}
static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
{
if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
return;
}
if (be16_to_cpu(sccb->h.length) < 8) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
return;
}
/* first do a sanity check of the write events */
sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
/* if no early error, then execute */
if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
sccb->h.response_code =
cpu_to_be16(handle_sccb_write_events(ef, sccb));
}
}
static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
sccb_mask_t mask)
{
uint16_t rc;
int slen;
unsigned elen;
BusChild *kid;
SCLPEvent *event;
SCLPEventClass *ec;
EventBufferHeader *event_buf;
ReadEventData *red = (ReadEventData *) sccb;
event_buf = &red->ebh;
event_buf->length = 0;
slen = sizeof(sccb->data);
rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
DeviceState *qdev = kid->child;
event = (SCLPEvent *) qdev;
ec = SCLP_EVENT_GET_CLASS(event);
if (mask & ec->get_send_mask()) {
if (ec->read_event_data(event, event_buf, &slen)) {
elen = be16_to_cpu(event_buf->length);
event_buf = (EventBufferHeader *) ((char *)event_buf + elen);
rc = SCLP_RC_NORMAL_COMPLETION;
}
}
}
if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
/* architecture suggests to reset variable-length-response bit */
sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
/* with a new length value */
sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
}
return rc;
}
/* copy up to src_len bytes and fill the rest of dst with zeroes */
static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len,
uint16_t src_len)
{
int i;
for (i = 0; i < dst_len; i++) {
dst[i] = i < src_len ? src[i] : 0;
}
}
static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
{
sccb_mask_t sclp_active_selection_mask;
sccb_mask_t sclp_cp_receive_mask;
ReadEventData *red = (ReadEventData *) sccb;
if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
return;
}
switch (sccb->h.function_code) {
case SCLP_UNCONDITIONAL_READ:
sccb->h.response_code = cpu_to_be16(
handle_sccb_read_events(ef, sccb, ef->receive_mask));
break;
case SCLP_SELECTIVE_READ:
/* get active selection mask */
sclp_cp_receive_mask = ef->receive_mask;
copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask,
sizeof(sclp_active_selection_mask), ef->mask_length);
sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask);
if (!sclp_cp_receive_mask ||
(sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
sccb->h.response_code =
cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
} else {
sccb->h.response_code = cpu_to_be16(
handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
}
break;
default:
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
}
}
static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
{
WriteEventMask *we_mask = (WriteEventMask *) sccb;
uint16_t mask_length = be16_to_cpu(we_mask->mask_length);
sccb_mask_t tmp_mask;
if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) ||
((mask_length != 4) && !ef->allow_all_mask_sizes)) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
return;
}
/*
* Note: We currently only support masks up to 8 byte length;
* the remainder is filled up with zeroes. Older Linux
* kernels use a 4 byte mask length, newer ones can use both
* 8 or 4 depending on what is available on the host.
*/
/* keep track of the guest's capability masks */
copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length),
sizeof(tmp_mask), mask_length);
ef->receive_mask = be64_to_cpu(tmp_mask);
/* return the SCLP's capability masks to the guest */
tmp_mask = cpu_to_be64(get_host_receive_mask(ef));
copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));
tmp_mask = cpu_to_be64(get_host_send_mask(ef));
copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
ef->mask_length = mask_length;
}
/* qemu object creation and initialization functions */
#define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
static void sclp_events_bus_realize(BusState *bus, Error **errp)
{
Error *err = NULL;
BusChild *kid;
/* TODO: recursive realization has to be done in common code */
QTAILQ_FOREACH(kid, &bus->children, sibling) {
DeviceState *dev = kid->child;
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
}
}
static void sclp_events_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *bc = BUS_CLASS(klass);
bc->realize = sclp_events_bus_realize;
}
static const TypeInfo sclp_events_bus_info = {
.name = TYPE_SCLP_EVENTS_BUS,
.parent = TYPE_BUS,
.class_init = sclp_events_bus_class_init,
};
static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
{
switch (code & SCLP_CMD_CODE_MASK) {
case SCLP_CMD_READ_EVENT_DATA:
read_event_data(ef, sccb);
break;
case SCLP_CMD_WRITE_EVENT_DATA:
write_event_data(ef, sccb);
break;
case SCLP_CMD_WRITE_EVENT_MASK:
write_event_mask(ef, sccb);
break;
}
}
static bool vmstate_event_facility_mask64_needed(void *opaque)
{
SCLPEventFacility *ef = opaque;
return (ef->receive_mask & 0xFFFFFFFF) != 0;
}
static bool vmstate_event_facility_mask_length_needed(void *opaque)
{
SCLPEventFacility *ef = opaque;
return ef->allow_all_mask_sizes;
}
static const VMStateDescription vmstate_event_facility_mask64 = {
.name = "vmstate-event-facility/mask64",
.version_id = 0,
.minimum_version_id = 0,
.needed = vmstate_event_facility_mask64_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_event_facility_mask_length = {
.name = "vmstate-event-facility/mask_length",
.version_id = 0,
.minimum_version_id = 0,
.needed = vmstate_event_facility_mask_length_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT16(mask_length, SCLPEventFacility),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_event_facility = {
.name = "vmstate-event-facility",
.version_id = 0,
.minimum_version_id = 0,
.fields = (VMStateField[]) {
VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
&vmstate_event_facility_mask64,
&vmstate_event_facility_mask_length,
NULL
}
};
static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value,
Error **errp)
{
SCLPEventFacility *ef = (SCLPEventFacility *)obj;
ef->allow_all_mask_sizes = value;
}
static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp)
{
SCLPEventFacility *ef = (SCLPEventFacility *)obj;
return ef->allow_all_mask_sizes;
}
static void init_event_facility(Object *obj)
{
SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
DeviceState *sdev = DEVICE(obj);
Object *new;
event_facility->mask_length = 4;
event_facility->allow_all_mask_sizes = true;
object_property_add_bool(obj, "allow_all_mask_sizes",
sclp_event_get_allow_all_mask_sizes,
sclp_event_set_allow_all_mask_sizes);
/* Spawn a new bus for SCLP events */
qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
TYPE_SCLP_EVENTS_BUS, sdev, NULL);
new = object_new(TYPE_SCLP_QUIESCE);
object_property_add_child(obj, TYPE_SCLP_QUIESCE, new);
object_unref(new);
qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus));
new = object_new(TYPE_SCLP_CPU_HOTPLUG);
object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new);
object_unref(new);
qdev_set_parent_bus(DEVICE(new), BUS(&event_facility->sbus));
/* the facility will automatically realize the devices via the bus */
}
static void reset_event_facility(DeviceState *dev)
{
SCLPEventFacility *sdev = EVENT_FACILITY(dev);
sdev->receive_mask = 0;
}
static void init_event_facility_class(ObjectClass *klass, void *data)
{
SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(sbdc);
SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
dc->reset = reset_event_facility;
dc->vmsd = &vmstate_event_facility;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
k->command_handler = command_handler;
k->event_pending = event_pending;
}
static const TypeInfo sclp_event_facility_info = {
.name = TYPE_SCLP_EVENT_FACILITY,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_init = init_event_facility,
.instance_size = sizeof(SCLPEventFacility),
.class_init = init_event_facility_class,
.class_size = sizeof(SCLPEventFacilityClass),
};
static void event_realize(DeviceState *qdev, Error **errp)
{
SCLPEvent *event = SCLP_EVENT(qdev);
SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
if (child->init) {
int rc = child->init(event);
if (rc < 0) {
error_setg(errp, "SCLP event initialization failed.");
return;
}
}
}
static void event_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->bus_type = TYPE_SCLP_EVENTS_BUS;
dc->realize = event_realize;
}
static const TypeInfo sclp_event_type_info = {
.name = TYPE_SCLP_EVENT,
.parent = TYPE_DEVICE,
.instance_size = sizeof(SCLPEvent),
.class_init = event_class_init,
.class_size = sizeof(SCLPEventClass),
.abstract = true,
};
static void register_types(void)
{
type_register_static(&sclp_events_bus_info);
type_register_static(&sclp_event_facility_info);
type_register_static(&sclp_event_type_info);
}
type_init(register_types)
BusState *sclp_get_event_facility_bus(void)
{
Object *busobj;
SCLPEventsBus *sbus;
busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL);
sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS);
if (!sbus) {
return NULL;
}
return &sbus->qbus;
}