2012-10-29 06:13:22 +04:00
|
|
|
/*
|
|
|
|
* SCLP Support
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2012
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Christian Borntraeger <borntraeger@de.ibm.com>
|
|
|
|
* Heinz Graalfs <graalfs@linux.vnet.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or (at your
|
|
|
|
* option) any later version. See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "cpu.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/kvm.h"
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/memory.h"
|
2014-01-20 23:51:49 +04:00
|
|
|
#include "sysemu/sysemu.h"
|
2014-08-28 19:25:35 +04:00
|
|
|
#include "exec/address-spaces.h"
|
2015-05-29 14:14:50 +03:00
|
|
|
#include "hw/boards.h"
|
2013-02-04 18:40:22 +04:00
|
|
|
#include "hw/s390x/sclp.h"
|
2013-12-18 13:10:49 +04:00
|
|
|
#include "hw/s390x/event-facility.h"
|
2015-01-09 11:04:38 +03:00
|
|
|
#include "hw/s390x/s390-pci-bus.h"
|
2012-10-29 06:13:22 +04:00
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static inline SCLPDevice *get_sclp_device(void)
|
|
|
|
{
|
|
|
|
return SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
|
|
|
|
}
|
|
|
|
|
2012-10-29 06:13:22 +04:00
|
|
|
/* Provide information about the configuration, CPUs and storage */
|
2015-05-27 11:04:56 +03:00
|
|
|
static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
|
2012-10-29 06:13:22 +04:00
|
|
|
{
|
|
|
|
ReadInfo *read_info = (ReadInfo *) sccb;
|
2015-05-29 14:14:50 +03:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
2014-08-28 19:25:35 +04:00
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
2014-01-20 23:51:49 +04:00
|
|
|
CPUState *cpu;
|
|
|
|
int cpu_count = 0;
|
|
|
|
int i = 0;
|
2014-08-28 19:25:35 +04:00
|
|
|
int rnsize, rnmax;
|
2015-05-29 14:14:50 +03:00
|
|
|
int slots = MIN(machine->ram_slots, s390_get_memslot_count(kvm_state));
|
2014-01-20 23:51:49 +04:00
|
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
cpu_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CPU information */
|
|
|
|
read_info->entries_cpu = cpu_to_be16(cpu_count);
|
|
|
|
read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
|
|
|
|
read_info->highest_cpu = cpu_to_be16(max_cpus);
|
|
|
|
|
|
|
|
for (i = 0; i < cpu_count; i++) {
|
|
|
|
read_info->entries[i].address = i;
|
|
|
|
read_info->entries[i].type = 0;
|
|
|
|
}
|
|
|
|
|
2015-01-09 11:04:38 +03:00
|
|
|
read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
|
|
|
|
SCLP_HAS_PCI_RECONFIG);
|
2012-10-29 06:13:22 +04:00
|
|
|
|
2014-08-28 19:25:35 +04:00
|
|
|
/* Memory Hotplug is only supported for the ccw machine type */
|
|
|
|
if (mhd) {
|
|
|
|
mhd->standby_subregion_size = MEM_SECTION_SIZE;
|
|
|
|
/* Deduct the memory slot already used for core */
|
|
|
|
if (slots > 0) {
|
|
|
|
while ((mhd->standby_subregion_size * (slots - 1)
|
|
|
|
< mhd->standby_mem_size)) {
|
|
|
|
mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Initialize mapping of guest standby memory sections indicating which
|
|
|
|
* are and are not online. Assume all standby memory begins offline.
|
|
|
|
*/
|
|
|
|
if (mhd->standby_state_map == 0) {
|
|
|
|
if (mhd->standby_mem_size % mhd->standby_subregion_size) {
|
|
|
|
mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
|
|
|
|
mhd->standby_subregion_size + 1) *
|
|
|
|
(mhd->standby_subregion_size /
|
|
|
|
MEM_SECTION_SIZE));
|
|
|
|
} else {
|
|
|
|
mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
|
|
|
|
MEM_SECTION_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mhd->padded_ram_size = ram_size + mhd->pad_size;
|
|
|
|
mhd->rzm = 1 << mhd->increment_size;
|
|
|
|
|
|
|
|
read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
|
|
|
|
}
|
|
|
|
|
2015-06-01 14:03:23 +03:00
|
|
|
rnsize = 1 << (sclp->increment_size - 20);
|
2014-08-28 19:25:35 +04:00
|
|
|
if (rnsize <= 128) {
|
|
|
|
read_info->rnsize = rnsize;
|
|
|
|
} else {
|
|
|
|
read_info->rnsize = 0;
|
|
|
|
read_info->rnsize2 = cpu_to_be32(rnsize);
|
|
|
|
}
|
|
|
|
|
2015-06-01 14:04:03 +03:00
|
|
|
rnmax = machine->maxram_size >> sclp->increment_size;
|
2014-08-28 19:25:35 +04:00
|
|
|
if (rnmax < 0x10000) {
|
|
|
|
read_info->rnmax = cpu_to_be16(rnmax);
|
|
|
|
} else {
|
|
|
|
read_info->rnmax = cpu_to_be16(0);
|
|
|
|
read_info->rnmax2 = cpu_to_be64(rnmax);
|
|
|
|
}
|
|
|
|
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb)
|
2014-08-28 19:25:35 +04:00
|
|
|
{
|
|
|
|
int i, assigned;
|
|
|
|
int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
|
|
|
|
ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
|
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
|
|
|
|
2015-05-29 15:06:39 +03:00
|
|
|
if (!mhd) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
2014-08-28 19:25:35 +04:00
|
|
|
|
|
|
|
if ((ram_size >> mhd->increment_size) >= 0x10000) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return information regarding core memory */
|
|
|
|
storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
|
|
|
|
assigned = ram_size >> mhd->increment_size;
|
|
|
|
storage_info->assigned = cpu_to_be16(assigned);
|
|
|
|
|
|
|
|
for (i = 0; i < assigned; i++) {
|
|
|
|
storage_info->entries[i] = cpu_to_be32(subincrement_id);
|
|
|
|
subincrement_id += SCLP_INCREMENT_UNIT;
|
2012-10-29 06:13:22 +04:00
|
|
|
}
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb)
|
2014-08-28 19:25:35 +04:00
|
|
|
{
|
|
|
|
ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
|
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
|
|
|
|
2015-05-29 15:06:39 +03:00
|
|
|
if (!mhd) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
2014-08-28 19:25:35 +04:00
|
|
|
|
|
|
|
if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return information regarding standby memory */
|
|
|
|
storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
|
|
|
|
storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
|
|
|
|
mhd->increment_size);
|
|
|
|
storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
|
|
|
|
mhd->increment_size);
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
|
|
|
|
uint16_t element)
|
2014-08-28 19:25:35 +04:00
|
|
|
{
|
|
|
|
int i, assigned, subincrement_id;
|
|
|
|
AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
|
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
|
|
|
|
2015-05-29 15:06:39 +03:00
|
|
|
if (!mhd) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
2014-08-28 19:25:35 +04:00
|
|
|
|
|
|
|
if (element != 1) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assigned = mhd->standby_mem_size >> mhd->increment_size;
|
|
|
|
attach_info->assigned = cpu_to_be16(assigned);
|
|
|
|
subincrement_id = ((ram_size >> mhd->increment_size) << 16)
|
|
|
|
+ SCLP_STARTING_SUBINCREMENT_ID;
|
|
|
|
for (i = 0; i < assigned; i++) {
|
|
|
|
attach_info->entries[i] = cpu_to_be32(subincrement_id);
|
|
|
|
subincrement_id += SCLP_INCREMENT_UNIT;
|
|
|
|
}
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void assign_storage(SCLPDevice *sclp, SCCB *sccb)
|
2014-08-28 19:25:35 +04:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = NULL;
|
|
|
|
uint64_t this_subregion_size;
|
|
|
|
AssignStorage *assign_info = (AssignStorage *) sccb;
|
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
2015-05-29 15:06:39 +03:00
|
|
|
ram_addr_t assign_addr;
|
2014-08-28 19:25:35 +04:00
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
|
2015-05-29 15:06:39 +03:00
|
|
|
if (!mhd) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
assign_addr = (assign_info->rn - 1) * mhd->rzm;
|
|
|
|
|
2014-08-28 19:25:35 +04:00
|
|
|
if ((assign_addr % MEM_SECTION_SIZE == 0) &&
|
|
|
|
(assign_addr >= mhd->padded_ram_size)) {
|
|
|
|
/* Re-use existing memory region if found */
|
|
|
|
mr = memory_region_find(sysmem, assign_addr, 1).mr;
|
2015-06-13 09:46:54 +03:00
|
|
|
memory_region_unref(mr);
|
2014-08-28 19:25:35 +04:00
|
|
|
if (!mr) {
|
|
|
|
|
|
|
|
MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
|
|
|
|
|
|
|
|
/* offset to align to standby_subregion_size for allocation */
|
|
|
|
ram_addr_t offset = assign_addr -
|
|
|
|
(assign_addr - mhd->padded_ram_size)
|
|
|
|
% mhd->standby_subregion_size;
|
|
|
|
|
|
|
|
/* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
|
|
|
|
char id[16];
|
|
|
|
snprintf(id, 16, "standby.ram%d",
|
|
|
|
(int)((offset - mhd->padded_ram_size) /
|
|
|
|
mhd->standby_subregion_size) + 1);
|
|
|
|
|
|
|
|
/* Allocate a subregion of the calculated standby_subregion_size */
|
|
|
|
if (offset + mhd->standby_subregion_size >
|
|
|
|
mhd->padded_ram_size + mhd->standby_mem_size) {
|
|
|
|
this_subregion_size = mhd->padded_ram_size +
|
|
|
|
mhd->standby_mem_size - offset;
|
|
|
|
} else {
|
|
|
|
this_subregion_size = mhd->standby_subregion_size;
|
|
|
|
}
|
|
|
|
|
Fix bad error handling after memory_region_init_ram()
Symptom:
$ qemu-system-x86_64 -m 10000000
Unexpected error in ram_block_add() at /work/armbru/qemu/exec.c:1456:
upstream-qemu: cannot set up guest memory 'pc.ram': Cannot allocate memory
Aborted (core dumped)
Root cause: commit ef701d7 screwed up handling of out-of-memory
conditions. Before the commit, we report the error and exit(1), in
one place, ram_block_add(). The commit lifts the error handling up
the call chain some, to three places. Fine. Except it uses
&error_abort in these places, changing the behavior from exit(1) to
abort(), and thus undoing the work of commit 3922825 "exec: Don't
abort when we can't allocate guest memory".
The three places are:
* memory_region_init_ram()
Commit 4994653 (right after commit ef701d7) lifted the error
handling further, through memory_region_init_ram(), multiplying the
incorrect use of &error_abort. Later on, imitation of existing
(bad) code may have created more.
* memory_region_init_ram_ptr()
The &error_abort is still there.
* memory_region_init_rom_device()
Doesn't need fixing, because commit 33e0eb5 (soon after commit
ef701d7) lifted the error handling further, and in the process
changed it from &error_abort to passing it up the call chain.
Correct, because the callers are realize() methods.
Fix the error handling after memory_region_init_ram() with a
Coccinelle semantic patch:
@r@
expression mr, owner, name, size, err;
position p;
@@
memory_region_init_ram(mr, owner, name, size,
(
- &error_abort
+ &error_fatal
|
err@p
)
);
@script:python@
p << r.p;
@@
print "%s:%s:%s" % (p[0].file, p[0].line, p[0].column)
When the last argument is &error_abort, it gets replaced by
&error_fatal. This is the fix.
If the last argument is anything else, its position is reported. This
lets us check the fix is complete. Four positions get reported:
* ram_backend_memory_alloc()
Error is passed up the call chain, ultimately through
user_creatable_complete(). As far as I can tell, it's callers all
handle the error sanely.
* fsl_imx25_realize(), fsl_imx31_realize(), dp8393x_realize()
DeviceClass.realize() methods, errors handled sanely further up the
call chain.
We're good. Test case again behaves:
$ qemu-system-x86_64 -m 10000000
qemu-system-x86_64: cannot set up guest memory 'pc.ram': Cannot allocate memory
[Exit 1 ]
The next commits will repair the rest of commit ef701d7's damage.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <1441983105-26376-3-git-send-email-armbru@redhat.com>
Reviewed-by: Peter Crosthwaite <crosthwaite.peter@gmail.com>
2015-09-11 17:51:43 +03:00
|
|
|
memory_region_init_ram(standby_ram, NULL, id, this_subregion_size,
|
|
|
|
&error_fatal);
|
2015-06-13 09:46:54 +03:00
|
|
|
/* This is a hack to make memory hotunplug work again. Once we have
|
|
|
|
* subdevices, we have to unparent them when unassigning memory,
|
|
|
|
* instead of doing it via the ref count of the MemoryRegion. */
|
|
|
|
object_ref(OBJECT(standby_ram));
|
|
|
|
object_unparent(OBJECT(standby_ram));
|
2014-08-28 19:25:35 +04:00
|
|
|
vmstate_register_ram_global(standby_ram);
|
|
|
|
memory_region_add_subregion(sysmem, offset, standby_ram);
|
|
|
|
}
|
|
|
|
/* The specified subregion is no longer in standby */
|
|
|
|
mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
|
|
|
|
/ MEM_SECTION_SIZE] = 1;
|
|
|
|
}
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void unassign_storage(SCLPDevice *sclp, SCCB *sccb)
|
2014-08-28 19:25:35 +04:00
|
|
|
{
|
|
|
|
MemoryRegion *mr = NULL;
|
|
|
|
AssignStorage *assign_info = (AssignStorage *) sccb;
|
|
|
|
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
|
2015-05-29 15:06:39 +03:00
|
|
|
ram_addr_t unassign_addr;
|
2014-08-28 19:25:35 +04:00
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
|
2015-05-29 15:06:39 +03:00
|
|
|
if (!mhd) {
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
unassign_addr = (assign_info->rn - 1) * mhd->rzm;
|
|
|
|
|
2014-08-28 19:25:35 +04:00
|
|
|
/* if the addr is a multiple of 256 MB */
|
|
|
|
if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
|
|
|
|
(unassign_addr >= mhd->padded_ram_size)) {
|
|
|
|
mhd->standby_state_map[(unassign_addr -
|
|
|
|
mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
|
|
|
|
|
|
|
|
/* find the specified memory region and destroy it */
|
|
|
|
mr = memory_region_find(sysmem, unassign_addr, 1).mr;
|
2015-06-13 09:46:54 +03:00
|
|
|
memory_region_unref(mr);
|
2014-08-28 19:25:35 +04:00
|
|
|
if (mr) {
|
|
|
|
int i;
|
|
|
|
int is_removable = 1;
|
|
|
|
ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
|
|
|
|
(unassign_addr - mhd->padded_ram_size)
|
|
|
|
% mhd->standby_subregion_size);
|
|
|
|
/* Mark all affected subregions as 'standby' once again */
|
|
|
|
for (i = 0;
|
|
|
|
i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
|
|
|
|
i++) {
|
|
|
|
|
|
|
|
if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
|
|
|
|
is_removable = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (is_removable) {
|
|
|
|
memory_region_del_subregion(sysmem, mr);
|
2015-06-13 09:46:54 +03:00
|
|
|
object_unref(OBJECT(mr));
|
2014-08-28 19:25:35 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2014-01-20 23:51:49 +04:00
|
|
|
/* Provide information about the CPU */
|
2015-05-27 11:04:56 +03:00
|
|
|
static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
|
2014-01-20 23:51:49 +04:00
|
|
|
{
|
|
|
|
ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
|
|
|
|
CPUState *cpu;
|
|
|
|
int cpu_count = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
cpu_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_info->nr_configured = cpu_to_be16(cpu_count);
|
|
|
|
cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
|
|
|
|
cpu_info->nr_standby = cpu_to_be16(0);
|
|
|
|
|
|
|
|
/* The standby offset is 16-byte for each CPU */
|
|
|
|
cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
|
|
|
|
+ cpu_info->nr_configured*sizeof(CPUEntry));
|
|
|
|
|
|
|
|
for (i = 0; i < cpu_count; i++) {
|
|
|
|
cpu_info->entries[i].address = i;
|
|
|
|
cpu_info->entries[i].type = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code)
|
2012-10-29 06:13:22 +04:00
|
|
|
{
|
2015-05-27 11:04:56 +03:00
|
|
|
SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
|
|
|
|
SCLPEventFacility *ef = sclp->event_facility;
|
2013-12-18 13:10:49 +04:00
|
|
|
SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
|
2012-10-29 06:13:23 +04:00
|
|
|
|
2014-01-20 23:51:48 +04:00
|
|
|
switch (code & SCLP_CMD_CODE_MASK) {
|
2012-10-29 06:13:22 +04:00
|
|
|
case SCLP_CMDW_READ_SCP_INFO:
|
|
|
|
case SCLP_CMDW_READ_SCP_INFO_FORCED:
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->read_SCP_info(sclp, sccb);
|
2012-10-29 06:13:22 +04:00
|
|
|
break;
|
2014-01-20 23:51:49 +04:00
|
|
|
case SCLP_CMDW_READ_CPU_INFO:
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->read_cpu_info(sclp, sccb);
|
2014-01-20 23:51:49 +04:00
|
|
|
break;
|
2014-08-28 19:25:35 +04:00
|
|
|
case SCLP_READ_STORAGE_ELEMENT_INFO:
|
|
|
|
if (code & 0xff00) {
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->read_storage_element1_info(sclp, sccb);
|
2014-08-28 19:25:35 +04:00
|
|
|
} else {
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->read_storage_element0_info(sclp, sccb);
|
2014-08-28 19:25:35 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SCLP_ATTACH_STORAGE_ELEMENT:
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8);
|
2014-08-28 19:25:35 +04:00
|
|
|
break;
|
|
|
|
case SCLP_ASSIGN_STORAGE:
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->assign_storage(sclp, sccb);
|
2014-08-28 19:25:35 +04:00
|
|
|
break;
|
|
|
|
case SCLP_UNASSIGN_STORAGE:
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->unassign_storage(sclp, sccb);
|
2014-08-28 19:25:35 +04:00
|
|
|
break;
|
2015-01-09 11:04:38 +03:00
|
|
|
case SCLP_CMDW_CONFIGURE_PCI:
|
|
|
|
s390_pci_sclp_configure(1, sccb);
|
|
|
|
break;
|
|
|
|
case SCLP_CMDW_DECONFIGURE_PCI:
|
|
|
|
s390_pci_sclp_configure(0, sccb);
|
|
|
|
break;
|
2012-10-29 06:13:22 +04:00
|
|
|
default:
|
2013-12-18 13:10:49 +04:00
|
|
|
efc->command_handler(ef, sccb, code);
|
2012-10-29 06:13:22 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-13 15:55:55 +04:00
|
|
|
int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
|
2012-10-29 06:13:22 +04:00
|
|
|
{
|
2015-05-27 11:04:56 +03:00
|
|
|
SCLPDevice *sclp = get_sclp_device();
|
|
|
|
SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
|
2012-10-29 06:13:22 +04:00
|
|
|
int r = 0;
|
|
|
|
SCCB work_sccb;
|
|
|
|
|
|
|
|
hwaddr sccb_len = sizeof(SCCB);
|
|
|
|
|
|
|
|
/* first some basic checks on program checks */
|
2014-01-13 15:55:55 +04:00
|
|
|
if (env->psw.mask & PSW_MASK_PSTATE) {
|
|
|
|
r = -PGM_PRIVILEGED;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-10-29 06:13:22 +04:00
|
|
|
if (cpu_physical_memory_is_io(sccb)) {
|
|
|
|
r = -PGM_ADDRESSING;
|
|
|
|
goto out;
|
|
|
|
}
|
2014-01-13 15:55:55 +04:00
|
|
|
if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
|
|
|
|
|| (sccb & ~0x7ffffff8UL) != 0) {
|
2012-10-29 06:13:22 +04:00
|
|
|
r = -PGM_SPECIFICATION;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we want to work on a private copy of the sccb, to prevent guests
|
|
|
|
* from playing dirty tricks by modifying the memory content after
|
|
|
|
* the host has checked the values
|
|
|
|
*/
|
|
|
|
cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
|
|
|
|
|
|
|
|
/* Valid sccb sizes */
|
|
|
|
if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
|
|
|
|
be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
|
|
|
|
r = -PGM_SPECIFICATION;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-05-27 11:04:56 +03:00
|
|
|
sclp_c->execute(sclp, (SCCB *)&work_sccb, code);
|
2012-10-29 06:13:22 +04:00
|
|
|
|
|
|
|
cpu_physical_memory_write(sccb, &work_sccb,
|
|
|
|
be16_to_cpu(work_sccb.h.length));
|
|
|
|
|
2015-05-13 16:06:44 +03:00
|
|
|
sclp_c->service_interrupt(sclp, sccb);
|
2012-10-29 06:13:22 +04:00
|
|
|
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-05-13 16:06:44 +03:00
|
|
|
static void service_interrupt(SCLPDevice *sclp, uint32_t sccb)
|
2012-10-29 06:13:22 +04:00
|
|
|
{
|
2015-05-13 16:06:44 +03:00
|
|
|
SCLPEventFacility *ef = sclp->event_facility;
|
2013-12-18 13:10:49 +04:00
|
|
|
SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
|
|
|
|
|
2012-10-29 06:13:23 +04:00
|
|
|
uint32_t param = sccb & ~3;
|
|
|
|
|
|
|
|
/* Indicate whether an event is still pending */
|
2013-12-18 13:10:49 +04:00
|
|
|
param |= efc->event_pending(ef) ? 1 : 0;
|
2012-10-29 06:13:23 +04:00
|
|
|
|
|
|
|
if (!param) {
|
|
|
|
/* No need to send an interrupt, there's nothing to be notified about */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s390_sclp_extint(param);
|
2012-10-29 06:13:22 +04:00
|
|
|
}
|
|
|
|
|
2015-05-13 16:06:44 +03:00
|
|
|
void sclp_service_interrupt(uint32_t sccb)
|
|
|
|
{
|
|
|
|
SCLPDevice *sclp = get_sclp_device();
|
|
|
|
SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
|
|
|
|
|
|
|
|
sclp_c->service_interrupt(sclp, sccb);
|
|
|
|
}
|
|
|
|
|
2012-10-29 06:13:22 +04:00
|
|
|
/* qemu object creation and initialization functions */
|
|
|
|
|
2012-10-29 06:13:23 +04:00
|
|
|
void s390_sclp_init(void)
|
|
|
|
{
|
2015-05-27 10:49:43 +03:00
|
|
|
Object *new = object_new(TYPE_SCLP);
|
2012-10-29 06:13:23 +04:00
|
|
|
|
2015-05-27 10:49:43 +03:00
|
|
|
object_property_add_child(qdev_get_machine(), TYPE_SCLP, new,
|
|
|
|
NULL);
|
|
|
|
object_unref(OBJECT(new));
|
|
|
|
qdev_init_nofail(DEVICE(new));
|
2012-10-29 06:13:23 +04:00
|
|
|
}
|
2014-08-28 19:25:32 +04:00
|
|
|
|
2015-05-27 10:49:43 +03:00
|
|
|
static void sclp_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2015-05-29 14:53:08 +03:00
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
2015-05-27 10:49:43 +03:00
|
|
|
SCLPDevice *sclp = SCLP(dev);
|
|
|
|
Error *l_err = NULL;
|
2015-05-29 14:53:08 +03:00
|
|
|
uint64_t hw_limit;
|
|
|
|
int ret;
|
2015-05-27 10:49:43 +03:00
|
|
|
|
|
|
|
object_property_set_bool(OBJECT(sclp->event_facility), true, "realized",
|
|
|
|
&l_err);
|
|
|
|
if (l_err) {
|
|
|
|
goto error;
|
|
|
|
}
|
2015-05-29 14:53:08 +03:00
|
|
|
|
|
|
|
ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
|
|
|
|
if (ret == -E2BIG) {
|
|
|
|
error_setg(&l_err, "qemu: host supports a maximum of %" PRIu64 " GB",
|
|
|
|
hw_limit >> 30);
|
|
|
|
goto error;
|
|
|
|
} else if (ret) {
|
|
|
|
error_setg(&l_err, "qemu: setting the guest size failed");
|
|
|
|
goto error;
|
|
|
|
}
|
2015-05-27 10:49:43 +03:00
|
|
|
return;
|
|
|
|
error:
|
|
|
|
assert(l_err);
|
|
|
|
error_propagate(errp, l_err);
|
|
|
|
}
|
|
|
|
|
2015-05-29 14:53:08 +03:00
|
|
|
static void sclp_memory_init(SCLPDevice *sclp)
|
|
|
|
{
|
|
|
|
MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
|
ram_addr_t initial_mem = machine->ram_size;
|
|
|
|
ram_addr_t max_mem = machine->maxram_size;
|
|
|
|
ram_addr_t standby_mem = max_mem - initial_mem;
|
|
|
|
ram_addr_t pad_mem = 0;
|
|
|
|
int increment_size = 20;
|
|
|
|
|
|
|
|
/* The storage increment size is a multiple of 1M and is a power of 2.
|
|
|
|
* The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
|
|
|
|
* The variable 'increment_size' is an exponent of 2 that can be
|
|
|
|
* used to calculate the size (in bytes) of an increment. */
|
|
|
|
while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
|
|
|
|
increment_size++;
|
|
|
|
}
|
|
|
|
if (machine->ram_slots) {
|
|
|
|
while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
|
|
|
|
increment_size++;
|
|
|
|
}
|
|
|
|
}
|
2015-06-01 14:03:23 +03:00
|
|
|
sclp->increment_size = increment_size;
|
2015-05-29 14:53:08 +03:00
|
|
|
|
|
|
|
/* The core and standby memory areas need to be aligned with
|
|
|
|
* the increment size. In effect, this can cause the
|
|
|
|
* user-specified memory size to be rounded down to align
|
|
|
|
* with the nearest increment boundary. */
|
|
|
|
initial_mem = initial_mem >> increment_size << increment_size;
|
|
|
|
standby_mem = standby_mem >> increment_size << increment_size;
|
|
|
|
|
|
|
|
/* If the size of ram is not on a MEM_SECTION_SIZE boundary,
|
|
|
|
calculate the pad size necessary to force this boundary. */
|
|
|
|
if (machine->ram_slots && standby_mem) {
|
|
|
|
sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
|
|
|
|
|
|
|
|
if (initial_mem % MEM_SECTION_SIZE) {
|
|
|
|
pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE;
|
|
|
|
}
|
|
|
|
mhd->increment_size = increment_size;
|
|
|
|
mhd->pad_size = pad_mem;
|
|
|
|
mhd->standby_mem_size = standby_mem;
|
|
|
|
}
|
|
|
|
machine->ram_size = initial_mem;
|
|
|
|
machine->maxram_size = initial_mem + pad_mem + standby_mem;
|
|
|
|
/* let's propagate the changed ram size into the global variable. */
|
|
|
|
ram_size = initial_mem;
|
|
|
|
}
|
|
|
|
|
2015-05-27 10:49:43 +03:00
|
|
|
static void sclp_init(Object *obj)
|
|
|
|
{
|
|
|
|
SCLPDevice *sclp = SCLP(obj);
|
|
|
|
Object *new;
|
|
|
|
|
|
|
|
new = object_new(TYPE_SCLP_EVENT_FACILITY);
|
|
|
|
object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL);
|
|
|
|
/* qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS */
|
|
|
|
qdev_set_parent_bus(DEVICE(new), sysbus_get_default());
|
|
|
|
object_unref(new);
|
|
|
|
sclp->event_facility = EVENT_FACILITY(new);
|
2015-05-29 14:53:08 +03:00
|
|
|
|
|
|
|
sclp_memory_init(sclp);
|
2015-05-27 10:49:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sclp_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
2015-05-27 11:04:56 +03:00
|
|
|
SCLPDeviceClass *sc = SCLP_CLASS(oc);
|
2015-05-27 10:49:43 +03:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(oc);
|
|
|
|
|
|
|
|
dc->desc = "SCLP (Service-Call Logical Processor)";
|
|
|
|
dc->realize = sclp_realize;
|
|
|
|
dc->hotpluggable = false;
|
|
|
|
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
2015-05-27 11:04:56 +03:00
|
|
|
|
|
|
|
sc->read_SCP_info = read_SCP_info;
|
|
|
|
sc->read_storage_element0_info = read_storage_element0_info;
|
|
|
|
sc->read_storage_element1_info = read_storage_element1_info;
|
|
|
|
sc->attach_storage_element = attach_storage_element;
|
|
|
|
sc->assign_storage = assign_storage;
|
|
|
|
sc->unassign_storage = unassign_storage;
|
|
|
|
sc->read_cpu_info = sclp_read_cpu_info;
|
|
|
|
sc->execute = sclp_execute;
|
2015-05-13 16:06:44 +03:00
|
|
|
sc->service_interrupt = service_interrupt;
|
2015-05-27 10:49:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static TypeInfo sclp_info = {
|
|
|
|
.name = TYPE_SCLP,
|
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.instance_init = sclp_init,
|
|
|
|
.instance_size = sizeof(SCLPDevice),
|
|
|
|
.class_init = sclp_class_init,
|
|
|
|
.class_size = sizeof(SCLPDeviceClass),
|
|
|
|
};
|
|
|
|
|
2014-08-28 19:25:32 +04:00
|
|
|
sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
|
|
|
|
{
|
|
|
|
DeviceState *dev;
|
|
|
|
dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
|
|
|
|
object_property_add_child(qdev_get_machine(),
|
|
|
|
TYPE_SCLP_MEMORY_HOTPLUG_DEV,
|
|
|
|
OBJECT(dev), NULL);
|
|
|
|
qdev_init_nofail(dev);
|
|
|
|
return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
|
|
|
|
TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
|
|
|
|
{
|
|
|
|
return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
|
|
|
|
TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
|
|
|
|
}
|
|
|
|
|
2015-03-17 15:44:39 +03:00
|
|
|
static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
|
|
|
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
|
|
|
|
}
|
|
|
|
|
2014-08-28 19:25:32 +04:00
|
|
|
static TypeInfo sclp_memory_hotplug_dev_info = {
|
|
|
|
.name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
|
|
|
|
.parent = TYPE_SYS_BUS_DEVICE,
|
|
|
|
.instance_size = sizeof(sclpMemoryHotplugDev),
|
2015-03-17 15:44:39 +03:00
|
|
|
.class_init = sclp_memory_hotplug_dev_class_init,
|
2014-08-28 19:25:32 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&sclp_memory_hotplug_dev_info);
|
2015-05-27 10:49:43 +03:00
|
|
|
type_register_static(&sclp_info);
|
2014-08-28 19:25:32 +04:00
|
|
|
}
|
|
|
|
type_init(register_types);
|