s390x changes:

- support for IDA (indirect addressing in ccws) via ccw data stream
 - support for extended TOD-Clock (z14 feature)
 - various fixes and improvements all over the place
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJZ11JWAAoJEN7Pa5PG8C+vNC0P/2ZadI4c5kErCCGZ4t/A7Q4y
 yNQL8QygvT1qlV4ENHMRgPWo3NfGF+VSgQ7dmNQmxBePa5K4DkdDL5p1ryCXpp3w
 oZNw2tpspdDJ4P+ej9Y1Fmb+6JVbht71Km3bU6SBES/QMUYO9+EfQ1YJzk3G9DFE
 cdXrkuZy2rmcfW95wh/Rq0FxZSsIl7Vjy5XPM2TX69TnKCgoHkG/jcKBa9uZdPEd
 3EB/7T1Us5ULmgpiJxVFsiREa4VPARnpfgCSTZofPdro1ob+Kv0Bs8ke9qy0o8lF
 rHKRvT8//P6xjA9wLU/oQQax/HaC0u9HEsTgoUk65zF2N+MotSskotkZQqJCiVgU
 QispOWUdaYZNj6rcq7Njly7cZJJdroWH7zkZotMUUrzjSbdr6YY35Z5ulyX4MB41
 4Ej3aj6cbBdmmYAemNTjezCu4okc4nfcLSZh7RPfHlTzzF5XXIsvIl/a8KCMFQXe
 pIgxepoRqwKQInv5Csx+DYoyJ58J+6mXg1j/SWvzqH19OSr8PfFu9i/7V0DzCWyp
 LDYS4irZD/YOvJ6LPEmvSKj+VNvCSICbGi0QKiHEqeZC11ag//eefD7U+BHrBbWb
 p4OuvogpHllHSpB+b2YTHZYSfx3Fg3fB88OZuWO4RoCt+ianO8BqF5A0xRtVmmaJ
 so3QERNGdG7arOQ3K0+3
 =0/U5
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171006' into staging

s390x changes:
- support for IDA (indirect addressing in ccws) via ccw data stream
- support for extended TOD-Clock (z14 feature)
- various fixes and improvements all over the place

# gpg: Signature made Fri 06 Oct 2017 10:52:22 BST
# gpg:                using RSA key 0xDECF6B93C6F02FAF
# gpg: Good signature from "Cornelia Huck <conny@cornelia-huck.de>"
# gpg:                 aka "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg:                 aka "Cornelia Huck <cornelia.huck@de.ibm.com>"
# gpg:                 aka "Cornelia Huck <cohuck@kernel.org>"
# gpg:                 aka "Cornelia Huck <cohuck@redhat.com>"
# Primary key fingerprint: C3D0 D66D C362 4FF6 A8C0  18CE DECF 6B93 C6F0 2FAF

* remotes/cohuck/tags/s390x-20171006: (33 commits)
  hw/s390x: Mark the "sclpquiesce" device with user_creatable = false
  s390x/tcg: initialize machine check queue
  s390x/sclp: mark sclp-cpu-hotplug as non-usercreatable
  s390x/sclp: Mark the sclp device with user_creatable = false
  s390/kvm: make TOD setting failures fatal for migration
  s390/kvm: Support for get/set of extended TOD-Clock for guest
  s390x/css: fix css migration compat handling
  s390x: sort some devices into categories
  s390x/tcg: make STFL store into the lowcore
  s390x: introduce and use S390_MAX_CPUS
  target/s390x: get rid of next_core_id
  s390x/cpumodel: fix max STFL(E) bit number
  s390x: raise CPU hotplug irq after really hotplugged
  MAINTAINERS: use KVM s390x maintainers for kvm-stubs.c and kvm_s390x.h
  s390x/3270: handle writes of arbitrary length
  s390x/3270: IDA support for 3270 via CcwDataStream
  Revert "s390x/ccw: create s390 phb conditionally"
  s390x/tcg: make idte/ipte use the new _real mmu
  s390x/tcg: make testblock use the new _real mmu
  s390x/tcg: make stora(g) use the new _real mmu
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-10-06 13:19:02 +01:00
commit a26a98dfb9
31 changed files with 729 additions and 294 deletions

View File

@ -299,6 +299,8 @@ M: Cornelia Huck <cohuck@redhat.com>
M: Alexander Graf <agraf@suse.de>
S: Maintained
F: target/s390x/kvm.c
F: target/s390x/kvm_s390x.h
F: target/s390x/kvm-stub.c
F: target/s390x/ioinst.[ch]
F: target/s390x/machine.c
F: hw/intc/s390_flic.c

View File

@ -30,7 +30,6 @@ typedef struct Terminal3270 {
uint8_t inv[INPUT_BUFFER_SIZE];
uint8_t outv[OUTPUT_BUFFER_SIZE];
int in_len;
int out_len;
bool handshake_done;
guint timer_tag;
} Terminal3270;
@ -145,7 +144,6 @@ static void chr_event(void *opaque, int event)
/* Ensure the initial status correct, always reset them. */
t->in_len = 0;
t->out_len = 0;
t->handshake_done = false;
if (t->timer_tag) {
g_source_remove(t->timer_tag);
@ -182,14 +180,18 @@ static void terminal_init(EmulatedCcw3270Device *dev, Error **errp)
terminal_read, chr_event, NULL, t, NULL, true);
}
static int read_payload_3270(EmulatedCcw3270Device *dev, uint32_t cda,
uint16_t count)
static inline CcwDataStream *get_cds(Terminal3270 *t)
{
return &(CCW_DEVICE(&t->cdev)->sch->cds);
}
static int read_payload_3270(EmulatedCcw3270Device *dev)
{
Terminal3270 *t = TERMINAL_3270(dev);
int len;
len = MIN(count, t->in_len);
cpu_physical_memory_write(cda, t->inv, len);
len = MIN(ccw_dstream_avail(get_cds(t)), t->in_len);
ccw_dstream_write_buf(get_cds(t), t->inv, len);
t->in_len -= len;
return len;
@ -222,13 +224,14 @@ static int insert_IAC_escape_char(uint8_t *outv, int out_len)
* Write 3270 outbound to socket.
* Return the count of 3270 data field if succeeded, zero if failed.
*/
static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd,
uint32_t cda, uint16_t count)
static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd)
{
Terminal3270 *t = TERMINAL_3270(dev);
int retval = 0;
assert(count <= (OUTPUT_BUFFER_SIZE - 3) / 2);
int count = ccw_dstream_avail(get_cds(t));
int bound = (OUTPUT_BUFFER_SIZE - 3) / 2;
int len = MIN(count, bound);
int out_len = 0;
if (!t->handshake_done) {
if (!(t->outv[0] == IAC && t->outv[1] != IAC)) {
@ -243,16 +246,23 @@ static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd,
/* We just say we consumed all data if there's no backend. */
return count;
}
t->outv[0] = cmd;
cpu_physical_memory_read(cda, &t->outv[1], count);
t->out_len = count + 1;
t->out_len = insert_IAC_escape_char(t->outv, t->out_len);
t->outv[t->out_len++] = IAC;
t->outv[t->out_len++] = IAC_EOR;
t->outv[out_len++] = cmd;
do {
ccw_dstream_read_buf(get_cds(t), &t->outv[out_len], len);
count = ccw_dstream_avail(get_cds(t));
out_len += len;
retval = qemu_chr_fe_write_all(&t->chr, t->outv, t->out_len);
return (retval <= 0) ? 0 : (retval - 3);
out_len = insert_IAC_escape_char(t->outv, out_len);
if (!count) {
t->outv[out_len++] = IAC;
t->outv[out_len++] = IAC_EOR;
}
retval = qemu_chr_fe_write_all(&t->chr, t->outv, out_len);
len = MIN(count, bound);
out_len = 0;
} while (len && retval >= 0);
return (retval <= 0) ? 0 : get_cds(t)->count;
}
static Property terminal_properties[] = {

View File

@ -28,7 +28,7 @@ static int handle_payload_3270_read(EmulatedCcw3270Device *dev, CCW1 *ccw)
return -EFAULT;
}
len = ck->read_payload_3270(dev, ccw->cda, ccw->count);
len = ck->read_payload_3270(dev);
ccw_dev->sch->curr_status.scsw.count = ccw->count - len;
return 0;
@ -45,7 +45,7 @@ static int handle_payload_3270_write(EmulatedCcw3270Device *dev, CCW1 *ccw)
return -EFAULT;
}
len = ck->write_payload_3270(dev, ccw->cmd_code, ccw->cda, ccw->count);
len = ck->write_payload_3270(dev, ccw->cmd_code);
if (len <= 0) {
return -EIO;
@ -160,6 +160,7 @@ static void emulated_ccw_3270_class_init(ObjectClass *klass, void *data)
dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
dc->realize = emulated_ccw_3270_realize;
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
}
static const TypeInfo emulated_ccw_3270_info = {

View File

@ -787,6 +787,183 @@ static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
}
return ret;
}
/**
* If out of bounds marks the stream broken. If broken returns -EINVAL,
* otherwise the requested length (may be zero)
*/
static inline int cds_check_len(CcwDataStream *cds, int len)
{
if (cds->at_byte + len > cds->count) {
cds->flags |= CDS_F_STREAM_BROKEN;
}
return cds->flags & CDS_F_STREAM_BROKEN ? -EINVAL : len;
}
static inline bool cds_ccw_addrs_ok(hwaddr addr, int len, bool ccw_fmt1)
{
return (addr + len) < (ccw_fmt1 ? (1UL << 31) : (1UL << 24));
}
static int ccw_dstream_rw_noflags(CcwDataStream *cds, void *buff, int len,
CcwDataStreamOp op)
{
int ret;
ret = cds_check_len(cds, len);
if (ret <= 0) {
return ret;
}
if (!cds_ccw_addrs_ok(cds->cda, len, cds->flags & CDS_F_FMT)) {
return -EINVAL; /* channel program check */
}
if (op == CDS_OP_A) {
goto incr;
}
ret = address_space_rw(&address_space_memory, cds->cda,
MEMTXATTRS_UNSPECIFIED, buff, len, op);
if (ret != MEMTX_OK) {
cds->flags |= CDS_F_STREAM_BROKEN;
return -EINVAL;
}
incr:
cds->at_byte += len;
cds->cda += len;
return 0;
}
/* returns values between 1 and bsz, where bsz is a power of 2 */
static inline uint16_t ida_continuous_left(hwaddr cda, uint64_t bsz)
{
return bsz - (cda & (bsz - 1));
}
static inline uint64_t ccw_ida_block_size(uint8_t flags)
{
if ((flags & CDS_F_C64) && !(flags & CDS_F_I2K)) {
return 1ULL << 12;
}
return 1ULL << 11;
}
static inline int ida_read_next_idaw(CcwDataStream *cds)
{
union {uint64_t fmt2; uint32_t fmt1; } idaw;
int ret;
hwaddr idaw_addr;
bool idaw_fmt2 = cds->flags & CDS_F_C64;
bool ccw_fmt1 = cds->flags & CDS_F_FMT;
if (idaw_fmt2) {
idaw_addr = cds->cda_orig + sizeof(idaw.fmt2) * cds->at_idaw;
if (idaw_addr & 0x07 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) {
return -EINVAL; /* channel program check */
}
ret = address_space_rw(&address_space_memory, idaw_addr,
MEMTXATTRS_UNSPECIFIED, (void *) &idaw.fmt2,
sizeof(idaw.fmt2), false);
cds->cda = be64_to_cpu(idaw.fmt2);
} else {
idaw_addr = cds->cda_orig + sizeof(idaw.fmt1) * cds->at_idaw;
if (idaw_addr & 0x03 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) {
return -EINVAL; /* channel program check */
}
ret = address_space_rw(&address_space_memory, idaw_addr,
MEMTXATTRS_UNSPECIFIED, (void *) &idaw.fmt1,
sizeof(idaw.fmt1), false);
cds->cda = be64_to_cpu(idaw.fmt1);
if (cds->cda & 0x80000000) {
return -EINVAL; /* channel program check */
}
}
++(cds->at_idaw);
if (ret != MEMTX_OK) {
/* assume inaccessible address */
return -EINVAL; /* channel program check */
}
return 0;
}
static int ccw_dstream_rw_ida(CcwDataStream *cds, void *buff, int len,
CcwDataStreamOp op)
{
uint64_t bsz = ccw_ida_block_size(cds->flags);
int ret = 0;
uint16_t cont_left, iter_len;
ret = cds_check_len(cds, len);
if (ret <= 0) {
return ret;
}
if (!cds->at_idaw) {
/* read first idaw */
ret = ida_read_next_idaw(cds);
if (ret) {
goto err;
}
cont_left = ida_continuous_left(cds->cda, bsz);
} else {
cont_left = ida_continuous_left(cds->cda, bsz);
if (cont_left == bsz) {
ret = ida_read_next_idaw(cds);
if (ret) {
goto err;
}
if (cds->cda & (bsz - 1)) {
ret = -EINVAL; /* channel program check */
goto err;
}
}
}
do {
iter_len = MIN(len, cont_left);
if (op != CDS_OP_A) {
ret = address_space_rw(&address_space_memory, cds->cda,
MEMTXATTRS_UNSPECIFIED, buff, iter_len, op);
if (ret != MEMTX_OK) {
/* assume inaccessible address */
ret = -EINVAL; /* channel program check */
goto err;
}
}
cds->at_byte += iter_len;
cds->cda += iter_len;
len -= iter_len;
if (!len) {
break;
}
ret = ida_read_next_idaw(cds);
if (ret) {
goto err;
}
cont_left = bsz;
} while (true);
return ret;
err:
cds->flags |= CDS_F_STREAM_BROKEN;
return ret;
}
void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb)
{
/*
* We don't support MIDA (an optional facility) yet and we
* catch this earlier. Just for expressing the precondition.
*/
g_assert(!(orb->ctrl1 & ORB_CTRL1_MASK_MIDAW));
cds->flags = (orb->ctrl0 & ORB_CTRL0_MASK_I2K ? CDS_F_I2K : 0) |
(orb->ctrl0 & ORB_CTRL0_MASK_C64 ? CDS_F_C64 : 0) |
(orb->ctrl0 & ORB_CTRL0_MASK_FMT ? CDS_F_FMT : 0) |
(ccw->flags & CCW_FLAG_IDA ? CDS_F_IDA : 0);
cds->count = ccw->count;
cds->cda_orig = ccw->cda;
ccw_dstream_rewind(cds);
if (!(cds->flags & CDS_F_IDA)) {
cds->op_handler = ccw_dstream_rw_noflags;
} else {
cds->op_handler = ccw_dstream_rw_ida;
}
}
static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
bool suspend_allowed)
@ -839,6 +1016,7 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
}
/* Look at the command. */
ccw_dstream_init(&sch->cds, &ccw, &(sch->orb));
switch (ccw.cmd_code) {
case CCW_CMD_NOOP:
/* Nothing to do. */
@ -852,8 +1030,8 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
}
}
len = MIN(ccw.count, sizeof(sch->sense_data));
cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
sch->curr_status.scsw.count = ccw.count - len;
ccw_dstream_write_buf(&sch->cds, sch->sense_data, len);
sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds);
memset(sch->sense_data, 0, sizeof(sch->sense_data));
ret = 0;
break;
@ -879,8 +1057,8 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
} else {
sense_id.reserved = 0;
}
cpu_physical_memory_write(ccw.cda, &sense_id, len);
sch->curr_status.scsw.count = ccw.count - len;
ccw_dstream_write_buf(&sch->cds, &sense_id, len);
sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds);
ret = 0;
break;
}

View File

@ -1032,6 +1032,7 @@ static void s390_pci_device_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->desc = "zpci device";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->reset = s390_pci_device_reset;
dc->bus_type = TYPE_S390_PCI_BUS;
dc->realize = s390_pci_device_realize;

View File

@ -55,15 +55,8 @@ S390CPU *s390_cpu_addr2state(uint16_t cpu_addr)
static void s390_init_cpus(MachineState *machine)
{
MachineClass *mc = MACHINE_GET_CLASS(machine);
const char *typename;
gchar **model_pieces;
ObjectClass *oc;
CPUClass *cc;
int i;
if (machine->cpu_model == NULL) {
machine->cpu_model = s390_default_cpu_model_name();
}
if (tcg_enabled() && max_cpus > 1) {
error_report("Number of SMP CPUs requested (%d) exceeds max CPUs "
"supported by TCG (1) on s390x", max_cpus);
@ -73,25 +66,8 @@ static void s390_init_cpus(MachineState *machine)
/* initialize possible_cpus */
mc->possible_cpu_arch_ids(machine);
model_pieces = g_strsplit(machine->cpu_model, ",", 2);
if (!model_pieces[0]) {
error_report("Invalid/empty CPU model name");
exit(1);
}
oc = cpu_class_by_name(TYPE_S390_CPU, model_pieces[0]);
if (!oc) {
error_report("Unable to find CPU definition: %s", model_pieces[0]);
exit(1);
}
typename = object_class_get_name(oc);
cc = CPU_CLASS(oc);
/* after parsing, properties will be applied to all *typename* instances */
cc->parse_features(typename, model_pieces[1], &error_fatal);
g_strfreev(model_pieces);
for (i = 0; i < smp_cpus; i++) {
s390x_new_cpu(typename, i, &error_fatal);
s390x_new_cpu(machine->cpu_type, i, &error_fatal);
}
}
@ -213,13 +189,10 @@ static int gtod_load(QEMUFile *f, void *opaque, int version_id)
r = s390_set_clock(&tod_high, &tod_low);
if (r) {
warn_report("Unable to set guest clock for migration: %s",
strerror(-r));
error_printf("Guest clock will not be restored "
"which could cause the guest to hang.");
error_report("Unable to set KVM guest TOD clock: %s", strerror(-r));
}
return 0;
return r;
}
static SaveVMHandlers savevm_gtod = {
@ -275,6 +248,7 @@ static void ccw_init(MachineState *machine)
{
int ret;
VirtualCssBus *css_bus;
DeviceState *dev;
s390_sclp_init();
s390_memory_init(machine->ram_size);
@ -290,13 +264,14 @@ static void ccw_init(MachineState *machine)
machine->initrd_filename, "s390-ccw.img",
"s390-netboot.img", true);
if (s390_has_feat(S390_FEAT_ZPCI)) {
DeviceState *dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);
object_property_add_child(qdev_get_machine(),
TYPE_S390_PCI_HOST_BRIDGE,
OBJECT(dev), NULL);
qdev_init_nofail(dev);
}
/*
* We cannot easily make the pci host bridge conditional as older QEMUs
* always created it. Doing so would break migration across QEMU versions.
*/
dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);
object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE,
OBJECT(dev), NULL);
qdev_init_nofail(dev);
/* register hypercalls */
virtio_ccw_register_hcalls();
@ -313,6 +288,9 @@ static void ccw_init(MachineState *machine)
ret = css_create_css_image(VIRTUAL_CSSID, true);
}
assert(ret == 0);
if (css_migration_enabled()) {
css_register_vmstate();
}
/* Create VirtIO network adapters */
s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
@ -329,6 +307,10 @@ static void s390_cpu_plug(HotplugHandler *hotplug_dev,
g_assert(!ms->possible_cpus->cpus[cpu->env.core_id].cpu);
ms->possible_cpus->cpus[cpu->env.core_id].cpu = OBJECT(dev);
if (dev->hotplugged) {
raise_irq_cpu_hotplug();
}
}
static void s390_machine_reset(void)
@ -441,11 +423,13 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->no_parallel = 1;
mc->no_sdcard = 1;
mc->use_sclp = 1;
mc->max_cpus = 248;
mc->max_cpus = S390_MAX_CPUS;
mc->has_hotpluggable_cpus = true;
mc->get_hotplug_handler = s390_get_hotplug_handler;
mc->cpu_index_to_instance_props = s390_cpu_index_to_props;
mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids;
/* it is overridden with 'host' cpu *in kvm_arch_init* */
mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu");
hc->plug = s390_machine_device_plug;
hc->unplug_request = s390_machine_device_unplug_request;
nc->nmi_monitor_handler = s390_nmi;
@ -731,9 +715,6 @@ DEFINE_CCW_MACHINE(2_11, "2.11", true);
static void ccw_machine_2_10_instance_options(MachineState *machine)
{
ccw_machine_2_11_instance_options(machine);
if (css_migration_enabled()) {
css_register_vmstate();
}
}
static void ccw_machine_2_10_class_options(MachineClass *mc)

View File

@ -606,6 +606,11 @@ static void sclp_class_init(ObjectClass *oc, void *data)
dc->realize = sclp_realize;
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
/*
* Reason: Creates TYPE_SCLP_EVENT_FACILITY in sclp_init
* which is a non-pluggable sysbus device
*/
dc->user_creatable = false;
sc->read_SCP_info = read_SCP_info;
sc->read_storage_element0_info = read_storage_element0_info;

View File

@ -82,6 +82,12 @@ static void cpu_class_init(ObjectClass *oc, void *data)
k->get_receive_mask = receive_mask;
k->read_event_data = read_event_data;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
/*
* Reason: raise_irq_cpu_hotplug() depends on an unique
* TYPE_SCLP_CPU_HOTPLUG device, which is already created
* by the sclp event facility
*/
dc->user_creatable = false;
}
static const TypeInfo sclp_cpu_info = {

View File

@ -118,8 +118,13 @@ static void quiesce_class_init(ObjectClass *klass, void *data)
dc->reset = quiesce_reset;
dc->vmsd = &vmstate_sclpquiesce;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
k->init = quiesce_init;
/*
* Reason: This is just an internal device - the notifier should
* not be registered multiple times in quiesce_init()
*/
dc->user_creatable = false;
k->init = quiesce_init;
k->get_send_mask = send_mask;
k->get_receive_mask = receive_mask;
k->can_handle_event = can_handle_event;

View File

@ -289,49 +289,19 @@ static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
return -EFAULT;
}
if (is_legacy) {
linfo.queue = address_space_ldq_be(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
linfo.align = address_space_ldl_be(&address_space_memory,
ccw.cda + sizeof(linfo.queue),
MEMTXATTRS_UNSPECIFIED,
NULL);
linfo.index = address_space_lduw_be(&address_space_memory,
ccw.cda + sizeof(linfo.queue)
+ sizeof(linfo.align),
MEMTXATTRS_UNSPECIFIED,
NULL);
linfo.num = address_space_lduw_be(&address_space_memory,
ccw.cda + sizeof(linfo.queue)
+ sizeof(linfo.align)
+ sizeof(linfo.index),
MEMTXATTRS_UNSPECIFIED,
NULL);
ccw_dstream_read(&sch->cds, linfo);
be64_to_cpus(&linfo.queue);
be32_to_cpus(&linfo.align);
be16_to_cpus(&linfo.index);
be16_to_cpus(&linfo.num);
ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
} else {
info.desc = address_space_ldq_be(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
info.index = address_space_lduw_be(&address_space_memory,
ccw.cda + sizeof(info.desc)
+ sizeof(info.res0),
MEMTXATTRS_UNSPECIFIED, NULL);
info.num = address_space_lduw_be(&address_space_memory,
ccw.cda + sizeof(info.desc)
+ sizeof(info.res0)
+ sizeof(info.index),
MEMTXATTRS_UNSPECIFIED, NULL);
info.avail = address_space_ldq_be(&address_space_memory,
ccw.cda + sizeof(info.desc)
+ sizeof(info.res0)
+ sizeof(info.index)
+ sizeof(info.num),
MEMTXATTRS_UNSPECIFIED, NULL);
info.used = address_space_ldq_be(&address_space_memory,
ccw.cda + sizeof(info.desc)
+ sizeof(info.res0)
+ sizeof(info.index)
+ sizeof(info.num)
+ sizeof(info.avail),
MEMTXATTRS_UNSPECIFIED, NULL);
ccw_dstream_read(&sch->cds, info);
be64_to_cpus(&info.desc);
be16_to_cpus(&info.index);
be16_to_cpus(&info.num);
be64_to_cpus(&info.avail);
be64_to_cpus(&info.used);
ret = virtio_ccw_set_vqs(sch, &info, NULL);
}
sch->curr_status.scsw.count = 0;
@ -344,15 +314,13 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
VirtioRevInfo revinfo;
uint8_t status;
VirtioFeatDesc features;
void *config;
hwaddr indicators;
VqConfigBlock vq_config;
VirtioCcwDevice *dev = sch->driver_data;
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
bool check_len;
int len;
hwaddr hw_len;
VirtioThinintInfo *thinint;
VirtioThinintInfo thinint;
if (!dev) {
return -EINVAL;
@ -396,11 +364,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
} else {
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
features.index = address_space_ldub(&address_space_memory,
ccw.cda
+ sizeof(features.features),
MEMTXATTRS_UNSPECIFIED,
NULL);
ccw_dstream_advance(&sch->cds, sizeof(features.features));
ccw_dstream_read(&sch->cds, features.index);
if (features.index == 0) {
if (dev->revision >= 1) {
/* Don't offer legacy features for modern devices. */
@ -419,9 +384,9 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
/* Return zeroes if the guest supports more feature bits. */
features.features = 0;
}
address_space_stl_le(&address_space_memory, ccw.cda,
features.features, MEMTXATTRS_UNSPECIFIED,
NULL);
ccw_dstream_rewind(&sch->cds);
cpu_to_le32s(&features.features);
ccw_dstream_write(&sch->cds, features.features);
sch->curr_status.scsw.count = ccw.count - sizeof(features);
ret = 0;
}
@ -440,15 +405,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
features.index = address_space_ldub(&address_space_memory,
ccw.cda
+ sizeof(features.features),
MEMTXATTRS_UNSPECIFIED,
NULL);
features.features = address_space_ldl_le(&address_space_memory,
ccw.cda,
MEMTXATTRS_UNSPECIFIED,
NULL);
ccw_dstream_read(&sch->cds, features);
le32_to_cpus(&features.features);
if (features.index == 0) {
virtio_set_features(vdev,
(vdev->guest_features & 0xffffffff00000000ULL) |
@ -489,7 +447,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
ret = -EFAULT;
} else {
virtio_bus_get_vdev_config(&dev->bus, vdev->config);
cpu_physical_memory_write(ccw.cda, vdev->config, len);
ccw_dstream_write_buf(&sch->cds, vdev->config, len);
sch->curr_status.scsw.count = ccw.count - len;
ret = 0;
}
@ -502,20 +460,13 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
}
}
len = MIN(ccw.count, vdev->config_len);
hw_len = len;
if (!ccw.cda) {
ret = -EFAULT;
} else {
config = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
if (!config) {
ret = -EFAULT;
} else {
len = hw_len;
memcpy(vdev->config, config, len);
cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len);
if (!ret) {
virtio_bus_set_vdev_config(&dev->bus, vdev->config);
sch->curr_status.scsw.count = ccw.count - len;
ret = 0;
}
}
break;
@ -553,8 +504,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
status = address_space_ldub(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
ccw_dstream_read(&sch->cds, status);
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_ccw_stop_ioeventfd(dev);
}
@ -597,8 +547,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
ccw_dstream_read(&sch->cds, indicators);
be64_to_cpus(&indicators);
dev->indicators = get_indicator(indicators, sizeof(uint64_t));
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
ret = 0;
@ -618,8 +568,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
ccw_dstream_read(&sch->cds, indicators);
be64_to_cpus(&indicators);
dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
ret = 0;
@ -639,67 +589,58 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
vq_config.index = address_space_lduw_be(&address_space_memory,
ccw.cda,
MEMTXATTRS_UNSPECIFIED,
NULL);
ccw_dstream_read(&sch->cds, vq_config.index);
be16_to_cpus(&vq_config.index);
if (vq_config.index >= VIRTIO_QUEUE_MAX) {
ret = -EINVAL;
break;
}
vq_config.num_max = virtio_queue_get_num(vdev,
vq_config.index);
address_space_stw_be(&address_space_memory,
ccw.cda + sizeof(vq_config.index),
vq_config.num_max,
MEMTXATTRS_UNSPECIFIED,
NULL);
cpu_to_be16s(&vq_config.num_max);
ccw_dstream_write(&sch->cds, vq_config.num_max);
sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
ret = 0;
}
break;
case CCW_CMD_SET_IND_ADAPTER:
if (check_len) {
if (ccw.count != sizeof(*thinint)) {
if (ccw.count != sizeof(thinint)) {
ret = -EINVAL;
break;
}
} else if (ccw.count < sizeof(*thinint)) {
} else if (ccw.count < sizeof(thinint)) {
/* Can't execute command. */
ret = -EINVAL;
break;
}
len = sizeof(*thinint);
hw_len = len;
if (!ccw.cda) {
ret = -EFAULT;
} else if (dev->indicators && !sch->thinint_active) {
/* Trigger a command reject. */
ret = -ENOSYS;
} else {
thinint = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
if (!thinint) {
if (ccw_dstream_read(&sch->cds, thinint)) {
ret = -EFAULT;
} else {
uint64_t ind_bit = ldq_be_p(&thinint->ind_bit);
be64_to_cpus(&thinint.ind_bit);
be64_to_cpus(&thinint.summary_indicator);
be64_to_cpus(&thinint.device_indicator);
len = hw_len;
dev->summary_indicator =
get_indicator(ldq_be_p(&thinint->summary_indicator),
sizeof(uint8_t));
get_indicator(thinint.summary_indicator, sizeof(uint8_t));
dev->indicators =
get_indicator(ldq_be_p(&thinint->device_indicator),
ind_bit / 8 + 1);
dev->thinint_isc = thinint->isc;
dev->routes.adapter.ind_offset = ind_bit;
get_indicator(thinint.device_indicator,
thinint.ind_bit / 8 + 1);
dev->thinint_isc = thinint.isc;
dev->routes.adapter.ind_offset = thinint.ind_bit;
dev->routes.adapter.summary_offset = 7;
cpu_physical_memory_unmap(thinint, hw_len, 0, hw_len);
dev->routes.adapter.adapter_id = css_get_adapter_id(
CSS_IO_ADAPTER_VIRTIO,
dev->thinint_isc);
sch->thinint_active = ((dev->indicators != NULL) &&
(dev->summary_indicator != NULL));
sch->curr_status.scsw.count = ccw.count - len;
sch->curr_status.scsw.count = ccw.count - sizeof(thinint);
ret = 0;
}
}
@ -714,13 +655,9 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
ret = -EFAULT;
break;
}
revinfo.revision =
address_space_lduw_be(&address_space_memory, ccw.cda,
MEMTXATTRS_UNSPECIFIED, NULL);
revinfo.length =
address_space_lduw_be(&address_space_memory,
ccw.cda + sizeof(revinfo.revision),
MEMTXATTRS_UNSPECIFIED, NULL);
ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
be16_to_cpus(&revinfo.revision);
be16_to_cpus(&revinfo.length);
if (ccw.count < len + revinfo.length ||
(check_len && ccw.count > len + revinfo.length)) {
ret = -EINVAL;

View File

@ -413,6 +413,7 @@ static void vfio_ccw_class_init(ObjectClass *klass, void *data)
dc->props = vfio_ccw_properties;
dc->vmsd = &vfio_ccw_vmstate;
dc->desc = "VFIO-based subchannel assignment";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->realize = vfio_ccw_realize;
dc->unrealize = vfio_ccw_unrealize;
dc->reset = vfio_ccw_reset;

View File

@ -45,9 +45,8 @@ typedef struct EmulatedCcw3270Class {
CCWDeviceClass parent_class;
void (*init)(EmulatedCcw3270Device *, Error **);
int (*read_payload_3270)(EmulatedCcw3270Device *, uint32_t, uint16_t);
int (*write_payload_3270)(EmulatedCcw3270Device *, uint8_t, uint32_t,
uint16_t);
int (*read_payload_3270)(EmulatedCcw3270Device *);
int (*write_payload_3270)(EmulatedCcw3270Device *, uint8_t);
} EmulatedCcw3270Class;
#endif

View File

@ -75,6 +75,30 @@ typedef struct CMBE {
uint32_t reserved[7];
} QEMU_PACKED CMBE;
typedef enum CcwDataStreamOp {
CDS_OP_R = 0, /* read, false when used as is_write */
CDS_OP_W = 1, /* write, true when used as is_write */
CDS_OP_A = 2 /* advance, should not be used as is_write */
} CcwDataStreamOp;
/* normal usage is via SuchchDev.cds instead of instantiating */
typedef struct CcwDataStream {
#define CDS_F_IDA 0x01
#define CDS_F_MIDA 0x02
#define CDS_F_I2K 0x04
#define CDS_F_C64 0x08
#define CDS_F_FMT 0x10 /* CCW format-1 */
#define CDS_F_STREAM_BROKEN 0x80
uint8_t flags;
uint8_t at_idaw;
uint16_t at_byte;
uint16_t count;
uint32_t cda_orig;
int (*op_handler)(struct CcwDataStream *cds, void *buff, int len,
CcwDataStreamOp op);
hwaddr cda;
} CcwDataStream;
typedef struct SubchDev SubchDev;
struct SubchDev {
/* channel-subsystem related things: */
@ -92,6 +116,7 @@ struct SubchDev {
uint8_t ccw_no_data_cnt;
uint16_t migrated_schid; /* used for missmatch detection */
ORB orb;
CcwDataStream cds;
/* transport-provided data: */
int (*ccw_cb) (SubchDev *, CCW1);
void (*disable_cb)(SubchDev *);
@ -240,4 +265,47 @@ SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
/** Turn on css migration */
void css_register_vmstate(void);
void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb);
static inline void ccw_dstream_rewind(CcwDataStream *cds)
{
cds->at_byte = 0;
cds->at_idaw = 0;
cds->cda = cds->cda_orig;
}
static inline bool ccw_dstream_good(CcwDataStream *cds)
{
return !(cds->flags & CDS_F_STREAM_BROKEN);
}
static inline uint16_t ccw_dstream_residual_count(CcwDataStream *cds)
{
return cds->count - cds->at_byte;
}
static inline uint16_t ccw_dstream_avail(CcwDataStream *cds)
{
return ccw_dstream_good(cds) ? ccw_dstream_residual_count(cds) : 0;
}
static inline int ccw_dstream_advance(CcwDataStream *cds, int len)
{
return cds->op_handler(cds, NULL, len, CDS_OP_A);
}
static inline int ccw_dstream_write_buf(CcwDataStream *cds, void *buff, int len)
{
return cds->op_handler(cds, buff, len, CDS_OP_W);
}
static inline int ccw_dstream_read_buf(CcwDataStream *cds, void *buff, int len)
{
return cds->op_handler(cds, buff, len, CDS_OP_R);
}
#define ccw_dstream_read(cds, v) ccw_dstream_read_buf((cds), &(v), sizeof(v))
#define ccw_dstream_write(cds, v) ccw_dstream_write_buf((cds), &(v), sizeof(v))
#endif

View File

@ -1,6 +1,6 @@
obj-y += cpu.o cpu_models.o cpu_features.o gdbstub.o interrupt.o helper.o
obj-$(CONFIG_TCG) += translate.o cc_helper.o excp_helper.o fpu_helper.o
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o crypto_helper.o
obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o mmu_helper.o diag.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o

View File

@ -54,8 +54,6 @@ typedef struct S390CPUClass {
bool is_migration_safe;
const char *desc;
uint32_t next_core_id;
DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu);
void (*load_normal)(CPUState *cpu);

View File

@ -41,7 +41,6 @@
#include "hw/hw.h"
#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "hw/s390x/sclp.h"
#endif
#define CR0_RESET 0xE0UL
@ -112,6 +111,7 @@ static void s390_cpu_initial_reset(CPUState *s)
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
env->mchk_index = -1;
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,
@ -149,6 +149,7 @@ static void s390_cpu_full_reset(CPUState *s)
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
env->mchk_index = -1;
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,
@ -179,8 +180,9 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
S390CPUClass *scc = S390_CPU_GET_CLASS(dev);
#if !defined(CONFIG_USER_ONLY)
S390CPU *cpu = S390_CPU(dev);
CPUS390XState *env = &cpu->env;
#endif
Error *err = NULL;
/* the model has to be realized before qemu_init_vcpu() due to kvm */
@ -196,11 +198,6 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
max_cpus - 1);
goto out;
}
#else
/* implicitly set for linux-user only */
cpu->env.core_id = scc->next_core_id;
scc->next_core_id++;
#endif
if (cpu_exists(cpu->env.core_id)) {
error_setg(&err, "Unable to add CPU with core-id: %" PRIu32
@ -209,7 +206,9 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
}
/* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */
cs->cpu_index = env->core_id;
cs->cpu_index = cpu->env.core_id;
#endif
cpu_exec_realizefn(cs, &err);
if (err != NULL) {
goto out;
@ -227,13 +226,6 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
#endif
scc->parent_realize(dev, &err);
#if !defined(CONFIG_USER_ONLY)
if (dev->hotplugged) {
raise_irq_cpu_hotplug();
}
#endif
out:
error_propagate(errp, err);
}
@ -357,22 +349,34 @@ unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
{
int r = 0;
if (kvm_enabled()) {
return kvm_s390_get_clock(tod_high, tod_low);
r = kvm_s390_get_clock_ext(tod_high, tod_low);
if (r == -ENXIO) {
return kvm_s390_get_clock(tod_high, tod_low);
}
} else {
/* Fixme TCG */
*tod_high = 0;
*tod_low = 0;
}
/* Fixme TCG */
*tod_high = 0;
*tod_low = 0;
return 0;
return r;
}
int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
{
int r = 0;
if (kvm_enabled()) {
return kvm_s390_set_clock(tod_high, tod_low);
r = kvm_s390_set_clock_ext(tod_high, tod_low);
if (r == -ENXIO) {
return kvm_s390_set_clock(tod_high, tod_low);
}
}
/* Fixme TCG */
return 0;
return r;
}
int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
@ -448,7 +452,9 @@ static gchar *s390_gdb_arch_name(CPUState *cs)
}
static Property s390x_cpu_properties[] = {
#if !defined(CONFIG_USER_ONLY)
DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0),
#endif
DEFINE_PROP_END_OF_LIST()
};

View File

@ -43,12 +43,13 @@
#include "fpu/softfloat.h"
#define NB_MMU_MODES 3
#define NB_MMU_MODES 4
#define TARGET_INSN_START_EXTRA_WORDS 1
#define MMU_MODE0_SUFFIX _primary
#define MMU_MODE1_SUFFIX _secondary
#define MMU_MODE2_SUFFIX _home
#define MMU_MODE3_SUFFIX _real
#define MMU_USER_IDX 0
@ -59,6 +60,8 @@
#define PSW_MCHK_MASK 0x0004000000000000
#define PSW_IO_MASK 0x0200000000000000
#define S390_MAX_CPUS 248
typedef struct PSW {
uint64_t mask;
uint64_t addr;
@ -150,8 +153,10 @@ struct CPUS390XState {
CPU_COMMON
#if !defined(CONFIG_USER_ONLY)
uint32_t core_id; /* PoP "CPU address", same as cpu_index */
uint64_t cpuid;
#endif
uint64_t tod_offset;
uint64_t tod_basetime;
@ -292,6 +297,7 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#undef PSW_SHIFT_ASC
#undef PSW_MASK_CC
#undef PSW_MASK_PM
#undef PSW_SHIFT_MASK_PM
#undef PSW_MASK_64
#undef PSW_MASK_32
#undef PSW_MASK_ESA_ADDR
@ -309,6 +315,7 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#define PSW_SHIFT_ASC 46
#define PSW_MASK_CC 0x0000300000000000ULL
#define PSW_MASK_PM 0x00000F0000000000ULL
#define PSW_SHIFT_MASK_PM 40
#define PSW_MASK_64 0x0000000100000000ULL
#define PSW_MASK_32 0x0000000080000000ULL
#define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
@ -349,6 +356,7 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#define MMU_PRIMARY_IDX 0
#define MMU_SECONDARY_IDX 1
#define MMU_HOME_IDX 2
#define MMU_REAL_IDX 3
static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
{
@ -684,12 +692,14 @@ static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
/* cpu_models.c */
void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_list s390_cpu_list
const char *s390_default_cpu_model_name(void);
/* helper.c */
#define cpu_init(cpu_model) cpu_generic_init(TYPE_S390_CPU, cpu_model)
S390CPU *s390x_new_cpu(const char *typename, uint32_t core_id, Error **errp);
#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU
#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX)
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */

View File

@ -381,7 +381,7 @@ void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
switch (type) {
case S390_FEAT_TYPE_STFL:
nr_bits = 2048;
nr_bits = 16384;
break;
case S390_FEAT_TYPE_PLO:
nr_bits = 256;

View File

@ -825,6 +825,7 @@ static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
S390_FEAT_STFLE,
S390_FEAT_EXTENDED_IMMEDIATE,
S390_FEAT_EXTENDED_TRANSLATION_2,
S390_FEAT_MSA,
S390_FEAT_EXTENDED_TRANSLATION_3,
S390_FEAT_LONG_DISPLACEMENT,
S390_FEAT_LONG_DISPLACEMENT_FAST,
@ -841,6 +842,9 @@ static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
S390_FEAT_STFLE_49,
S390_FEAT_LOCAL_TLB_CLEARING,
S390_FEAT_STFLE_53,
S390_FEAT_MSA_EXT_5,
S390_FEAT_MSA_EXT_3,
S390_FEAT_MSA_EXT_4,
};
int i;
@ -941,11 +945,13 @@ void s390_realize_cpu_model(CPUState *cs, Error **errp)
apply_cpu_model(cpu->model, errp);
#if !defined(CONFIG_USER_ONLY)
cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model);
if (tcg_enabled()) {
/* basic mode, write the cpu address into the first 4 bit of the ID */
cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id);
}
#endif
}
static void get_feature(Object *obj, Visitor *v, const char *name,
@ -1207,9 +1213,6 @@ static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data)
qemu_hw_version());
}
#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU
#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX)
/* Generate type name for a cpu model. Caller has to free the string. */
static char *s390_cpu_type_name(const char *model_name)
{
@ -1232,14 +1235,6 @@ ObjectClass *s390_cpu_class_by_name(const char *name)
return oc;
}
const char *s390_default_cpu_model_name(void)
{
if (kvm_enabled()) {
return "host";
}
return "qemu";
}
static const TypeInfo qemu_s390_cpu_type_info = {
.name = S390_CPU_TYPE_NAME("qemu"),
.parent = TYPE_S390_CPU,

View File

@ -0,0 +1,65 @@
/*
* s390x crypto helpers
*
* Copyright (c) 2017 Red Hat Inc
*
* Authors:
* David Hildenbrand <david@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "internal.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
uint32_t type)
{
const uintptr_t ra = GETPC();
const uint8_t mod = env->regs[0] & 0x80ULL;
const uint8_t fc = env->regs[0] & 0x7fULL;
CPUState *cs = CPU(s390_env_get_cpu(env));
uint8_t subfunc[16] = { 0 };
uint64_t param_addr;
int i;
switch (type) {
case S390_FEAT_TYPE_KMAC:
case S390_FEAT_TYPE_KIMD:
case S390_FEAT_TYPE_KLMD:
case S390_FEAT_TYPE_PCKMO:
case S390_FEAT_TYPE_PCC:
if (mod) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
return 0;
}
break;
}
s390_get_feat_block(type, subfunc);
if (!test_be_bit(fc, subfunc)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_SPECIFICATION, 4);
return 0;
}
switch (fc) {
case 0: /* query subfunction */
for (i = 0; i < 16; i++) {
param_addr = wrap_address(env, env->regs[1] + i);
cpu_stb_data_ra(env, param_addr, subfunc[i], ra);
}
break;
default:
/* we don't implement any other subfunction yet */
g_assert_not_reached();
}
return 0;
}

View File

@ -26,6 +26,7 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "hw/s390x/ioinst.h"
#include "exec/address-spaces.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#endif
@ -87,8 +88,8 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
target_ulong vaddr, raddr;
uint64_t asc;
int prot;
DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
@ -97,18 +98,26 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
orig_vaddr &= TARGET_PAGE_MASK;
vaddr = orig_vaddr;
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
/* Translation ended in exception */
return 1;
if (mmu_idx < MMU_REAL_IDX) {
asc = cpu_mmu_idx_to_asc(mmu_idx);
/* 31-Bit mode */
if (!(env->psw.mask & PSW_MASK_64)) {
vaddr &= 0x7fffffff;
}
if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
return 1;
}
} else if (mmu_idx == MMU_REAL_IDX) {
if (mmu_translate_real(env, vaddr, rw, &raddr, &prot)) {
return 1;
}
} else {
abort();
}
/* check out of RAM access */
if (raddr > ram_size) {
if (!address_space_access_valid(&address_space_memory, raddr,
TARGET_PAGE_SIZE, rw)) {
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
(uint64_t)raddr, (uint64_t)ram_size);
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);

View File

@ -104,7 +104,6 @@ DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(stfle, i32, env, i64)
DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
@ -115,6 +114,7 @@ DEF_HELPER_4(cu21, i32, env, i32, i32, i32)
DEF_HELPER_4(cu24, i32, env, i32, i32, i32)
DEF_HELPER_4(cu41, i32, env, i32, i32, i32)
DEF_HELPER_4(cu42, i32, env, i32, i32, i32)
DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
@ -152,6 +152,7 @@ DEF_HELPER_FLAGS_3(sturg, TCG_CALL_NO_WG, void, env, i64, i64)
DEF_HELPER_1(per_check_exception, void, env)
DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_2(xsch, void, env, i64)
DEF_HELPER_2(csch, void, env, i64)

View File

@ -755,6 +755,8 @@
C(0xb2b8, SRNMB, S, FPE, 0, 0, 0, 0, srnm, 0)
/* SET DFP ROUNDING MODE */
C(0xb2b9, SRNMT, S, DFPR, 0, 0, 0, 0, srnm, 0)
/* SET PROGRAM MASK */
C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0)
/* SHIFT LEFT SINGLE */
D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31)
@ -939,6 +941,19 @@
/* UNPACK UNICODE */
C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0)
/* MSA Instructions */
D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC)
D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO)
D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF)
D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO)
D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC)
D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR)
D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM)
D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC)
D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO)
D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD)
D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD)
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
D(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL)

View File

@ -162,6 +162,20 @@ static inline uint8_t get_per_atmid(CPUS390XState *env)
((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0);
}
static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
{
if (!(env->psw.mask & PSW_MASK_64)) {
if (!(env->psw.mask & PSW_MASK_32)) {
/* 24-Bit mode */
a &= 0x00ffffff;
} else {
/* 31-Bit mode */
a &= 0x7fffffff;
}
}
return a;
}
/* CC optimization */
/* Instead of computing the condition codes after each x86 instruction,
@ -375,6 +389,8 @@ target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
/* mmu_helper.c */
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
target_ulong *addr, int *flags);
/* misc_helper.c */

View File

@ -68,11 +68,21 @@ int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
return -ENOSYS;
}
int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
{
return -ENOSYS;
}
int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
{
return -ENOSYS;
}
int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
{
return -ENOSYS;
}
void kvm_s390_enable_css_support(S390CPU *cpu)
{
}

View File

@ -287,6 +287,9 @@ void kvm_s390_crypto_reset(void)
int kvm_arch_init(MachineState *ms, KVMState *s)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
@ -643,10 +646,26 @@ int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
}
int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
{
int r;
struct kvm_s390_vm_tod_clock gtod;
struct kvm_device_attr attr = {
.group = KVM_S390_VM_TOD,
.attr = KVM_S390_VM_TOD_EXT,
.addr = (uint64_t)&gtod,
};
r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
*tod_high = gtod.epoch_idx;
*tod_low = gtod.tod;
return r;
}
int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
{
int r;
struct kvm_device_attr attr = {
.group = KVM_S390_VM_TOD,
.attr = KVM_S390_VM_TOD_LOW,
@ -663,6 +682,21 @@ int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
}
int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
{
struct kvm_s390_vm_tod_clock gtod = {
.epoch_idx = *tod_high,
.tod = *tod_low,
};
struct kvm_device_attr attr = {
.group = KVM_S390_VM_TOD,
.attr = KVM_S390_VM_TOD_EXT,
.addr = (uint64_t)&gtod,
};
return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
}
/**
* kvm_s390_mem_op:
* @addr: the logical start address in guest memory
@ -1553,22 +1587,37 @@ static int do_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
return 0;
}
struct sigp_save_area {
uint64_t fprs[16]; /* 0x0000 */
uint64_t grs[16]; /* 0x0080 */
PSW psw; /* 0x0100 */
uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
uint32_t prefix; /* 0x0118 */
uint32_t fpc; /* 0x011c */
uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
uint32_t todpr; /* 0x0124 */
uint64_t cputm; /* 0x0128 */
uint64_t ckc; /* 0x0130 */
uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
uint32_t ars[16]; /* 0x0140 */
uint64_t crs[16]; /* 0x0384 */
};
QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);
#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
#define SAVE_AREA_SIZE 512
static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
{
static const uint8_t ar_id = 1;
uint64_t ckc = cpu->env.ckc >> 8;
void *mem;
struct sigp_save_area *sa;
hwaddr len = sizeof(*sa);
int i;
hwaddr len = SAVE_AREA_SIZE;
mem = cpu_physical_memory_map(addr, &len, 1);
if (!mem) {
sa = cpu_physical_memory_map(addr, &len, 1);
if (!sa) {
return -EFAULT;
}
if (len != SAVE_AREA_SIZE) {
cpu_physical_memory_unmap(mem, len, 1, 0);
if (len != sizeof(*sa)) {
cpu_physical_memory_unmap(sa, len, 1, 0);
return -EFAULT;
}
@ -1576,19 +1625,26 @@ static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
}
for (i = 0; i < 16; ++i) {
*((uint64_t *)mem + i) = get_freg(&cpu->env, i)->ll;
sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
}
for (i = 0; i < 16; ++i) {
sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
}
sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
sa->prefix = cpu_to_be32(cpu->env.psa);
sa->fpc = cpu_to_be32(cpu->env.fpc);
sa->todpr = cpu_to_be32(cpu->env.todpr);
sa->cputm = cpu_to_be64(cpu->env.cputm);
sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
for (i = 0; i < 16; ++i) {
sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
}
for (i = 0; i < 16; ++i) {
sa->ars[i] = cpu_to_be64(cpu->env.cregs[i]);
}
memcpy(mem + 128, &cpu->env.regs, 128);
memcpy(mem + 256, &cpu->env.psw, 16);
memcpy(mem + 280, &cpu->env.psa, 4);
memcpy(mem + 284, &cpu->env.fpc, 4);
memcpy(mem + 292, &cpu->env.todpr, 4);
memcpy(mem + 296, &cpu->env.cputm, 8);
memcpy(mem + 304, &ckc, 8);
memcpy(mem + 320, &cpu->env.aregs, 64);
memcpy(mem + 384, &cpu->env.cregs, 128);
cpu_physical_memory_unmap(mem, len, 1, len);
cpu_physical_memory_unmap(sa, len, 1, len);
return 0;
}

View File

@ -29,7 +29,9 @@ int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
int kvm_s390_get_ri(void);
int kvm_s390_get_gs(void);
int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock);
int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_clock);
void kvm_s390_enable_css_support(S390CPU *cpu);
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
int vq, bool assign);

View File

@ -122,20 +122,6 @@ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
}
}
static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
{
if (!(env->psw.mask & PSW_MASK_64)) {
if (!(env->psw.mask & PSW_MASK_32)) {
/* 24-Bit mode */
a &= 0x00ffffff;
} else {
/* 31-Bit mode */
a &= 0x7fffffff;
}
}
return a;
}
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
uint32_t l, uintptr_t ra)
{
@ -1702,17 +1688,9 @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
{
uintptr_t ra = GETPC();
CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t abs_addr;
int i;
real_addr = wrap_address(env, real_addr);
abs_addr = mmu_real2abs(env, real_addr) & TARGET_PAGE_MASK;
if (!address_space_access_valid(&address_space_memory, abs_addr,
TARGET_PAGE_SIZE, true)) {
cpu_restore_state(cs, ra);
program_interrupt(env, PGM_ADDRESSING, 4);
return 1;
}
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
/* Check low-address protection */
if ((env->cregs[0] & CR0_LOWPROT) && real_addr < 0x2000) {
@ -1722,7 +1700,7 @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
}
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
stq_phys(cs->as, abs_addr + i, 0);
cpu_stq_real_ra(env, real_addr + i, 0, ra);
}
return 0;
@ -1897,11 +1875,11 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
for (i = 0; i < entries; i++) {
/* addresses are not wrapped in 24/31bit mode but table index is */
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
entry = ldq_phys(cs->as, raddr);
entry = cpu_ldq_real_ra(env, raddr, ra);
if (!(entry & _REGION_ENTRY_INV)) {
/* we are allowed to not store if already invalid */
entry |= _REGION_ENTRY_INV;
stq_phys(cs->as, raddr, entry);
cpu_stq_real_ra(env, raddr, entry, ra);
}
}
}
@ -1919,6 +1897,7 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
uint32_t m4)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
const uintptr_t ra = GETPC();
uint64_t page = vaddr & TARGET_PAGE_MASK;
uint64_t pte_addr, pte;
@ -1927,9 +1906,9 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
pte_addr += (vaddr & VADDR_PX) >> 9;
/* Mark the page table entry as invalid */
pte = ldq_phys(cs->as, pte_addr);
pte = cpu_ldq_real_ra(env, pte_addr, ra);
pte |= _PAGE_INVALID;
stq_phys(cs->as, pte_addr, pte);
cpu_stq_real_ra(env, pte_addr, pte, ra);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
@ -1973,24 +1952,18 @@ void HELPER(purge)(CPUS390XState *env)
/* load using real address */
uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
return (uint32_t)ldl_phys(cs->as, wrap_address(env, addr));
return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
}
uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
return ldq_phys(cs->as, wrap_address(env, addr));
return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
}
/* store using real address */
void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
stl_phys(cs->as, wrap_address(env, addr), (uint32_t)v1);
cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&
@ -2003,9 +1976,7 @@ void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
CPUState *cs = CPU(s390_env_get_cpu(env));
stq_phys(cs->as, wrap_address(env, addr), v1);
cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
if ((env->psw.mask & PSW_MASK_PER) &&
(env->cregs[9] & PER_CR9_EVENT_STORE) &&

View File

@ -541,13 +541,18 @@ static unsigned do_stfle(CPUS390XState *env, uint64_t words[MAX_STFL_WORDS])
return max_bit / 64;
}
#ifndef CONFIG_USER_ONLY
void HELPER(stfl)(CPUS390XState *env)
{
uint64_t words[MAX_STFL_WORDS];
LowCore *lowcore;
lowcore = cpu_map_lowcore(env);
do_stfle(env, words);
cpu_stl_data(env, 200, words[0] >> 32);
lowcore->stfl_fac_list = cpu_to_be32(words[0] >> 32);
cpu_unmap_lowcore(lowcore);
}
#endif
uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
{

View File

@ -497,3 +497,22 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
g_free(pages);
return ret;
}
/**
* Translate a real address into a physical (absolute) address.
* @param raddr the real address
* @param rw 0 = read, 1 = write, 2 = code fetch
* @param addr the translated address is stored to this pointer
* @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
* @return 0 if the translation was successful, < 0 if a fault occurred
*/
int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
target_ulong *addr, int *flags)
{
/* TODO: low address protection once we flush the tlb on cr changes */
*flags = PAGE_READ | PAGE_WRITE;
*addr = mmu_real2abs(env, raddr);
/* TODO: storage key handling */
return 0;
}

View File

@ -2422,6 +2422,58 @@ static ExitStatus op_iske(DisasContext *s, DisasOps *o)
}
#endif
static ExitStatus op_msa(DisasContext *s, DisasOps *o)
{
int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
TCGv_i32 t_r1, t_r2, t_r3, type;
switch (s->insn->data) {
case S390_FEAT_TYPE_KMCTR:
if (r3 & 1 || !r3) {
gen_program_exception(s, PGM_SPECIFICATION);
return EXIT_NORETURN;
}
/* FALL THROUGH */
case S390_FEAT_TYPE_PPNO:
case S390_FEAT_TYPE_KMF:
case S390_FEAT_TYPE_KMC:
case S390_FEAT_TYPE_KMO:
case S390_FEAT_TYPE_KM:
if (r1 & 1 || !r1) {
gen_program_exception(s, PGM_SPECIFICATION);
return EXIT_NORETURN;
}
/* FALL THROUGH */
case S390_FEAT_TYPE_KMAC:
case S390_FEAT_TYPE_KIMD:
case S390_FEAT_TYPE_KLMD:
if (r2 & 1 || !r2) {
gen_program_exception(s, PGM_SPECIFICATION);
return EXIT_NORETURN;
}
/* FALL THROUGH */
case S390_FEAT_TYPE_PCKMO:
case S390_FEAT_TYPE_PCC:
break;
default:
g_assert_not_reached();
};
t_r1 = tcg_const_i32(r1);
t_r2 = tcg_const_i32(r2);
t_r3 = tcg_const_i32(r3);
type = tcg_const_i32(s->insn->data);
gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
set_cc_static(s);
tcg_temp_free_i32(t_r1);
tcg_temp_free_i32(t_r2);
tcg_temp_free_i32(t_r3);
tcg_temp_free_i32(type);
return NO_EXIT;
}
static ExitStatus op_keb(DisasContext *s, DisasOps *o)
{
gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
@ -2915,7 +2967,6 @@ static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
static ExitStatus op_lura(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_lura(o->out, cpu_env, o->in2);
return NO_EXIT;
}
@ -2923,7 +2974,6 @@ static ExitStatus op_lura(DisasContext *s, DisasOps *o)
static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_lurag(o->out, cpu_env, o->in2);
return NO_EXIT;
}
@ -3796,6 +3846,17 @@ static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_spm(DisasContext *s, DisasOps *o)
{
tcg_gen_extrl_i64_i32(cc_op, o->in1);
tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
set_cc_static(s);
tcg_gen_shri_i64(o->in1, o->in1, 24);
tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
static ExitStatus op_spka(DisasContext *s, DisasOps *o)
{
@ -4065,7 +4126,6 @@ static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
static ExitStatus op_stura(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_stura(cpu_env, o->in2, o->in1);
return NO_EXIT;
}
@ -4073,7 +4133,6 @@ static ExitStatus op_stura(DisasContext *s, DisasOps *o)
static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
{
check_privileged(s);
potential_page_fault(s);
gen_helper_sturg(cpu_env, o->in2, o->in1);
return NO_EXIT;
}
@ -5494,6 +5553,10 @@ enum DisasInsnEnum {
#define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
#define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
#define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
#define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
static const DisasInsn insn_info[] = {
#include "insn-data.def"