- do not abuse memory_region_allocate_system_memory and split the memory
according to KVM memslots in KVM code instead (Paolo, Igor) - change splitting to split at 4TB (Christian) - do not claim s390 (31bit) support in configure (Thomas) - sclp error checking (Janosch, Claudio) - new s390 pci maintainer (Matt, Collin) - fix s390 pci (again) (Matt) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJdkeyTAAoJEBF7vIC1phx89SYP/0in+ZJb85eY5s1LwNuJLAuk XeMlCFhLPvd6rxxmSg355IFj721ck8V33JhsGNRKstOFZw5BpxUdOadwTLDZrzv1 0FReyjmgBNZXH+xJj+LqD2DAxSs9EE1Sd2sLQhxbVzrI/6bYOKlU1IQHs7ssfqQa /F5G1sHzgWkNUOhXazQNCWtqMY+y2KrcpcRX1whr+/tYabE+9XSNphZ+D8fTdac6 URBeg147mVh3I1haZ4uumjT+ycFz/CtOScKcHoqlLiLIHfAcxF4WhMPkeaAzY/DR BS+q6OBnZ+zLKMQ6ydTViWCrHRBIA3UMX4rCuiPwefBpz3YY5KvTPXIlUOf8l/q4 NLw/L3fS7s1UeTUviTHB8toVPveTHT7vrk6RhIgTntpdiDzO4C4AF80V3/fzeMER 4JWYAJfm+9Myodmc4p1nZIkkf18dhfboczDiSIsyYrhxVPNVGErauIc2eGuuNgAL hFDAUWI/0pXue8uVsN7+z9Kp1MwTNSy8LFA+Rq/2ih4VtRp/smDX2Gsm9agB6WkB meuutRFY971CxRwb8SZMET4R6Kxt2Hx/edZlz/kKsxGLFDHjdOiTjPhckz+BaUPa nHv4dR5FsgO6ZP8fujJjTnQ5S3LYht53Zidi5nlbhELJa9FRZw0S0CNXtkKjy2ee bP0MbSPEDCY7jr7G+VNG =mD2v -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20190930' into staging - do not abuse memory_region_allocate_system_memory and split the memory according to KVM memslots in KVM code instead (Paolo, Igor) - change splitting to split at 4TB (Christian) - do not claim s390 (31bit) support in configure (Thomas) - sclp error checking (Janosch, Claudio) - new s390 pci maintainer (Matt, Collin) - fix s390 pci (again) (Matt) # gpg: Signature made Mon 30 Sep 2019 12:52:51 BST # gpg: using RSA key 117BBC80B5A61C7C # gpg: Good signature from "Christian Borntraeger (IBM) <borntraeger@de.ibm.com>" [full] # Primary key fingerprint: F922 9381 A334 08F9 DBAB FBCA 117B BC80 B5A6 1C7C * remotes/borntraeger/tags/s390x-20190930: s390/kvm: split kvm mem slots at 4TB s390: do not call memory_region_allocate_system_memory() multiple times kvm: split too big memory section on several memslots kvm: clear dirty bitmaps from all overlapping memslots kvm: extract kvm_log_clear_one_slot configure: Remove s390 (31-bit mode) from the list of supported CPUs s390x: sclp: Report insufficient SCCB length s390x: sclp: fix error handling for oversize control blocks s390x: sclp: boundary check s390x: sclp: refactor invalid command check s390: PCI: fix IOMMU region init MAINTAINERS: Update S390 PCI Maintainer Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
95e9d74fe4
@ -1206,7 +1206,7 @@ T: git https://github.com/borntraeger/qemu.git s390-next
|
|||||||
L: qemu-s390x@nongnu.org
|
L: qemu-s390x@nongnu.org
|
||||||
|
|
||||||
S390 PCI
|
S390 PCI
|
||||||
M: Collin Walling <walling@linux.ibm.com>
|
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: hw/s390x/s390-pci*
|
F: hw/s390x/s390-pci*
|
||||||
L: qemu-s390x@nongnu.org
|
L: qemu-s390x@nongnu.org
|
||||||
|
@ -140,6 +140,7 @@ bool kvm_direct_msi_allowed;
|
|||||||
bool kvm_ioeventfd_any_length_allowed;
|
bool kvm_ioeventfd_any_length_allowed;
|
||||||
bool kvm_msi_use_devid;
|
bool kvm_msi_use_devid;
|
||||||
static bool kvm_immediate_exit;
|
static bool kvm_immediate_exit;
|
||||||
|
static hwaddr kvm_max_slot_size = ~0;
|
||||||
|
|
||||||
static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
||||||
KVM_CAP_INFO(USER_MEMORY),
|
KVM_CAP_INFO(USER_MEMORY),
|
||||||
@ -437,7 +438,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
|
|||||||
static int kvm_section_update_flags(KVMMemoryListener *kml,
|
static int kvm_section_update_flags(KVMMemoryListener *kml,
|
||||||
MemoryRegionSection *section)
|
MemoryRegionSection *section)
|
||||||
{
|
{
|
||||||
hwaddr start_addr, size;
|
hwaddr start_addr, size, slot_size;
|
||||||
KVMSlot *mem;
|
KVMSlot *mem;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -448,13 +449,18 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
|
|||||||
|
|
||||||
kvm_slots_lock(kml);
|
kvm_slots_lock(kml);
|
||||||
|
|
||||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
while (size && !ret) {
|
||||||
if (!mem) {
|
slot_size = MIN(kvm_max_slot_size, size);
|
||||||
/* We don't have a slot if we want to trap every access. */
|
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
|
||||||
goto out;
|
if (!mem) {
|
||||||
}
|
/* We don't have a slot if we want to trap every access. */
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = kvm_slot_update_flags(kml, mem, section->mr);
|
ret = kvm_slot_update_flags(kml, mem, section->mr);
|
||||||
|
start_addr += slot_size;
|
||||||
|
size -= slot_size;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kvm_slots_unlock(kml);
|
kvm_slots_unlock(kml);
|
||||||
@ -527,11 +533,15 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
|||||||
struct kvm_dirty_log d = {};
|
struct kvm_dirty_log d = {};
|
||||||
KVMSlot *mem;
|
KVMSlot *mem;
|
||||||
hwaddr start_addr, size;
|
hwaddr start_addr, size;
|
||||||
|
hwaddr slot_size, slot_offset = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
size = kvm_align_section(section, &start_addr);
|
size = kvm_align_section(section, &start_addr);
|
||||||
if (size) {
|
while (size) {
|
||||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
MemoryRegionSection subsection = *section;
|
||||||
|
|
||||||
|
slot_size = MIN(kvm_max_slot_size, size);
|
||||||
|
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
|
||||||
if (!mem) {
|
if (!mem) {
|
||||||
/* We don't have a slot if we want to trap every access. */
|
/* We don't have a slot if we want to trap every access. */
|
||||||
goto out;
|
goto out;
|
||||||
@ -549,11 +559,11 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
|||||||
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
|
||||||
* a hope that sizeof(long) won't become >8 any time soon.
|
* a hope that sizeof(long) won't become >8 any time soon.
|
||||||
*/
|
*/
|
||||||
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
|
|
||||||
/*HOST_LONG_BITS*/ 64) / 8;
|
|
||||||
if (!mem->dirty_bmap) {
|
if (!mem->dirty_bmap) {
|
||||||
|
hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
|
||||||
|
/*HOST_LONG_BITS*/ 64) / 8;
|
||||||
/* Allocate on the first log_sync, once and for all */
|
/* Allocate on the first log_sync, once and for all */
|
||||||
mem->dirty_bmap = g_malloc0(size);
|
mem->dirty_bmap = g_malloc0(bitmap_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
d.dirty_bitmap = mem->dirty_bmap;
|
d.dirty_bitmap = mem->dirty_bmap;
|
||||||
@ -564,7 +574,13 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
|
subsection.offset_within_region += slot_offset;
|
||||||
|
subsection.size = int128_make64(slot_size);
|
||||||
|
kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
|
||||||
|
|
||||||
|
slot_offset += slot_size;
|
||||||
|
start_addr += slot_size;
|
||||||
|
size -= slot_size;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
@ -575,63 +591,22 @@ out:
|
|||||||
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
|
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
|
||||||
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
|
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
|
||||||
|
|
||||||
/**
|
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
|
||||||
* kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
|
uint64_t size)
|
||||||
*
|
|
||||||
* NOTE: this will be a no-op if we haven't enabled manual dirty log
|
|
||||||
* protection in the host kernel because in that case this operation
|
|
||||||
* will be done within log_sync().
|
|
||||||
*
|
|
||||||
* @kml: the kvm memory listener
|
|
||||||
* @section: the memory range to clear dirty bitmap
|
|
||||||
*/
|
|
||||||
static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
|
||||||
MemoryRegionSection *section)
|
|
||||||
{
|
{
|
||||||
KVMState *s = kvm_state;
|
KVMState *s = kvm_state;
|
||||||
|
uint64_t end, bmap_start, start_delta, bmap_npages;
|
||||||
struct kvm_clear_dirty_log d;
|
struct kvm_clear_dirty_log d;
|
||||||
uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
|
|
||||||
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
|
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
|
||||||
KVMSlot *mem = NULL;
|
int ret;
|
||||||
int ret, i;
|
|
||||||
|
|
||||||
if (!s->manual_dirty_log_protect) {
|
|
||||||
/* No need to do explicit clear */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
start = section->offset_within_address_space;
|
|
||||||
size = int128_get64(section->size);
|
|
||||||
|
|
||||||
if (!size) {
|
|
||||||
/* Nothing more we can do... */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_slots_lock(kml);
|
|
||||||
|
|
||||||
/* Find any possible slot that covers the section */
|
|
||||||
for (i = 0; i < s->nr_slots; i++) {
|
|
||||||
mem = &kml->slots[i];
|
|
||||||
if (mem->start_addr <= start &&
|
|
||||||
start + size <= mem->start_addr + mem->memory_size) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We should always find one memslot until this point, otherwise
|
|
||||||
* there could be something wrong from the upper layer
|
|
||||||
*/
|
|
||||||
assert(mem && i != s->nr_slots);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to extend either the start or the size or both to
|
* We need to extend either the start or the size or both to
|
||||||
* satisfy the KVM interface requirement. Firstly, do the start
|
* satisfy the KVM interface requirement. Firstly, do the start
|
||||||
* page alignment on 64 host pages
|
* page alignment on 64 host pages
|
||||||
*/
|
*/
|
||||||
bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
|
bmap_start = start & KVM_CLEAR_LOG_MASK;
|
||||||
start_delta = start - mem->start_addr - bmap_start;
|
start_delta = start - bmap_start;
|
||||||
bmap_start /= psize;
|
bmap_start /= psize;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -694,7 +669,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
|||||||
/* It should never overflow. If it happens, say something */
|
/* It should never overflow. If it happens, say something */
|
||||||
assert(bmap_npages <= UINT32_MAX);
|
assert(bmap_npages <= UINT32_MAX);
|
||||||
d.num_pages = bmap_npages;
|
d.num_pages = bmap_npages;
|
||||||
d.slot = mem->slot | (kml->as_id << 16);
|
d.slot = mem->slot | (as_id << 16);
|
||||||
|
|
||||||
if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
|
if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
|
||||||
ret = -errno;
|
ret = -errno;
|
||||||
@ -717,6 +692,66 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
|||||||
size / psize);
|
size / psize);
|
||||||
/* This handles the NULL case well */
|
/* This handles the NULL case well */
|
||||||
g_free(bmap_clear);
|
g_free(bmap_clear);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
|
||||||
|
*
|
||||||
|
* NOTE: this will be a no-op if we haven't enabled manual dirty log
|
||||||
|
* protection in the host kernel because in that case this operation
|
||||||
|
* will be done within log_sync().
|
||||||
|
*
|
||||||
|
* @kml: the kvm memory listener
|
||||||
|
* @section: the memory range to clear dirty bitmap
|
||||||
|
*/
|
||||||
|
static int kvm_physical_log_clear(KVMMemoryListener *kml,
|
||||||
|
MemoryRegionSection *section)
|
||||||
|
{
|
||||||
|
KVMState *s = kvm_state;
|
||||||
|
uint64_t start, size, offset, count;
|
||||||
|
KVMSlot *mem;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
|
if (!s->manual_dirty_log_protect) {
|
||||||
|
/* No need to do explicit clear */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
start = section->offset_within_address_space;
|
||||||
|
size = int128_get64(section->size);
|
||||||
|
|
||||||
|
if (!size) {
|
||||||
|
/* Nothing more we can do... */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
kvm_slots_lock(kml);
|
||||||
|
|
||||||
|
for (i = 0; i < s->nr_slots; i++) {
|
||||||
|
mem = &kml->slots[i];
|
||||||
|
/* Discard slots that are empty or do not overlap the section */
|
||||||
|
if (!mem->memory_size ||
|
||||||
|
mem->start_addr > start + size - 1 ||
|
||||||
|
start > mem->start_addr + mem->memory_size - 1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (start >= mem->start_addr) {
|
||||||
|
/* The slot starts before section or is aligned to it. */
|
||||||
|
offset = start - mem->start_addr;
|
||||||
|
count = MIN(mem->memory_size - offset, size);
|
||||||
|
} else {
|
||||||
|
/* The slot starts after section. */
|
||||||
|
offset = 0;
|
||||||
|
count = MIN(mem->memory_size, size - (mem->start_addr - start));
|
||||||
|
}
|
||||||
|
ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
|
||||||
|
if (ret < 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
kvm_slots_unlock(kml);
|
kvm_slots_unlock(kml);
|
||||||
|
|
||||||
@ -953,6 +988,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_set_max_memslot_size(hwaddr max_slot_size)
|
||||||
|
{
|
||||||
|
g_assert(
|
||||||
|
ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
|
||||||
|
);
|
||||||
|
kvm_max_slot_size = max_slot_size;
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
||||||
MemoryRegionSection *section, bool add)
|
MemoryRegionSection *section, bool add)
|
||||||
{
|
{
|
||||||
@ -960,7 +1003,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
|||||||
int err;
|
int err;
|
||||||
MemoryRegion *mr = section->mr;
|
MemoryRegion *mr = section->mr;
|
||||||
bool writeable = !mr->readonly && !mr->rom_device;
|
bool writeable = !mr->readonly && !mr->rom_device;
|
||||||
hwaddr start_addr, size;
|
hwaddr start_addr, size, slot_size;
|
||||||
void *ram;
|
void *ram;
|
||||||
|
|
||||||
if (!memory_region_is_ram(mr)) {
|
if (!memory_region_is_ram(mr)) {
|
||||||
@ -985,41 +1028,52 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
|
|||||||
kvm_slots_lock(kml);
|
kvm_slots_lock(kml);
|
||||||
|
|
||||||
if (!add) {
|
if (!add) {
|
||||||
mem = kvm_lookup_matching_slot(kml, start_addr, size);
|
do {
|
||||||
if (!mem) {
|
slot_size = MIN(kvm_max_slot_size, size);
|
||||||
goto out;
|
mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
|
||||||
}
|
if (!mem) {
|
||||||
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
goto out;
|
||||||
kvm_physical_sync_dirty_bitmap(kml, section);
|
}
|
||||||
}
|
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
|
||||||
|
kvm_physical_sync_dirty_bitmap(kml, section);
|
||||||
|
}
|
||||||
|
|
||||||
/* unregister the slot */
|
/* unregister the slot */
|
||||||
g_free(mem->dirty_bmap);
|
g_free(mem->dirty_bmap);
|
||||||
mem->dirty_bmap = NULL;
|
mem->dirty_bmap = NULL;
|
||||||
mem->memory_size = 0;
|
mem->memory_size = 0;
|
||||||
mem->flags = 0;
|
mem->flags = 0;
|
||||||
err = kvm_set_user_memory_region(kml, mem, false);
|
err = kvm_set_user_memory_region(kml, mem, false);
|
||||||
if (err) {
|
if (err) {
|
||||||
fprintf(stderr, "%s: error unregistering slot: %s\n",
|
fprintf(stderr, "%s: error unregistering slot: %s\n",
|
||||||
__func__, strerror(-err));
|
__func__, strerror(-err));
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
start_addr += slot_size;
|
||||||
|
size -= slot_size;
|
||||||
|
} while (size);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* register the new slot */
|
/* register the new slot */
|
||||||
mem = kvm_alloc_slot(kml);
|
do {
|
||||||
mem->memory_size = size;
|
slot_size = MIN(kvm_max_slot_size, size);
|
||||||
mem->start_addr = start_addr;
|
mem = kvm_alloc_slot(kml);
|
||||||
mem->ram = ram;
|
mem->memory_size = slot_size;
|
||||||
mem->flags = kvm_mem_flags(mr);
|
mem->start_addr = start_addr;
|
||||||
|
mem->ram = ram;
|
||||||
|
mem->flags = kvm_mem_flags(mr);
|
||||||
|
|
||||||
err = kvm_set_user_memory_region(kml, mem, true);
|
err = kvm_set_user_memory_region(kml, mem, true);
|
||||||
if (err) {
|
if (err) {
|
||||||
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
|
||||||
strerror(-err));
|
strerror(-err));
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
start_addr += slot_size;
|
||||||
|
ram += slot_size;
|
||||||
|
size -= slot_size;
|
||||||
|
} while (size);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kvm_slots_unlock(kml);
|
kvm_slots_unlock(kml);
|
||||||
@ -2859,6 +2913,7 @@ static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
|
|||||||
|
|
||||||
for (i = 0; i < kvm->nr_as; ++i) {
|
for (i = 0; i < kvm->nr_as; ++i) {
|
||||||
if (kvm->as[i].as == as && kvm->as[i].ml) {
|
if (kvm->as[i].as == as && kvm->as[i].ml) {
|
||||||
|
size = MIN(kvm_max_slot_size, size);
|
||||||
return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
|
return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
|
||||||
start_addr, size);
|
start_addr, size);
|
||||||
}
|
}
|
||||||
|
2
configure
vendored
2
configure
vendored
@ -728,7 +728,7 @@ ARCH=
|
|||||||
# Normalise host CPU name and set ARCH.
|
# Normalise host CPU name and set ARCH.
|
||||||
# Note that this case should only have supported host CPUs, not guests.
|
# Note that this case should only have supported host CPUs, not guests.
|
||||||
case "$cpu" in
|
case "$cpu" in
|
||||||
ppc|ppc64|s390|s390x|sparc64|x32|riscv32|riscv64)
|
ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64)
|
||||||
supported_cpu="yes"
|
supported_cpu="yes"
|
||||||
;;
|
;;
|
||||||
ppc64le)
|
ppc64le)
|
||||||
|
@ -377,9 +377,6 @@ static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
|
|||||||
case SCLP_CMD_WRITE_EVENT_MASK:
|
case SCLP_CMD_WRITE_EVENT_MASK:
|
||||||
write_event_mask(ef, sccb);
|
write_event_mask(ef, sccb);
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,10 +695,15 @@ static const MemoryRegionOps s390_msi_ctrl_ops = {
|
|||||||
|
|
||||||
void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
|
void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* The iommu region is initialized against a 0-mapped address space,
|
||||||
|
* so the smallest IOMMU region we can define runs from 0 to the end
|
||||||
|
* of the PCI address space.
|
||||||
|
*/
|
||||||
char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
|
char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
|
||||||
memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
|
memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
|
||||||
TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
|
TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
|
||||||
name, iommu->pal - iommu->pba + 1);
|
name, iommu->pal + 1);
|
||||||
iommu->enabled = true;
|
iommu->enabled = true;
|
||||||
memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
|
memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
|
||||||
g_free(name);
|
g_free(name);
|
||||||
|
@ -154,39 +154,15 @@ static void virtio_ccw_register_hcalls(void)
|
|||||||
virtio_ccw_hcall_early_printk);
|
virtio_ccw_hcall_early_printk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
|
|
||||||
* as the dirty bitmap must be managed by bitops that take an int as
|
|
||||||
* position indicator. If we have a guest beyond that we will split off
|
|
||||||
* new subregions. The split must happen on a segment boundary (1MB).
|
|
||||||
*/
|
|
||||||
#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
|
|
||||||
#define SEG_MSK (~0xfffffULL)
|
|
||||||
#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
|
|
||||||
static void s390_memory_init(ram_addr_t mem_size)
|
static void s390_memory_init(ram_addr_t mem_size)
|
||||||
{
|
{
|
||||||
MemoryRegion *sysmem = get_system_memory();
|
MemoryRegion *sysmem = get_system_memory();
|
||||||
ram_addr_t chunk, offset = 0;
|
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
||||||
unsigned int number = 0;
|
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
gchar *name;
|
|
||||||
|
|
||||||
/* allocate RAM for core */
|
/* allocate RAM for core */
|
||||||
name = g_strdup_printf("s390.ram");
|
memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size);
|
||||||
while (mem_size) {
|
memory_region_add_subregion(sysmem, 0, ram);
|
||||||
MemoryRegion *ram = g_new(MemoryRegion, 1);
|
|
||||||
uint64_t size = mem_size;
|
|
||||||
|
|
||||||
/* KVM does not allow memslots >= 8 TB */
|
|
||||||
chunk = MIN(size, KVM_SLOT_MAX_BYTES);
|
|
||||||
memory_region_allocate_system_memory(ram, NULL, name, chunk);
|
|
||||||
memory_region_add_subregion(sysmem, offset, ram);
|
|
||||||
mem_size -= chunk;
|
|
||||||
offset += chunk;
|
|
||||||
g_free(name);
|
|
||||||
name = g_strdup_printf("s390.ram.%u", ++number);
|
|
||||||
}
|
|
||||||
g_free(name);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Configure the maximum page size. As no memory devices were created
|
* Configure the maximum page size. As no memory devices were created
|
||||||
|
@ -68,6 +68,12 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
|
|||||||
|
|
||||||
read_info->ibc_val = cpu_to_be32(s390_get_ibc_val());
|
read_info->ibc_val = cpu_to_be32(s390_get_ibc_val());
|
||||||
|
|
||||||
|
if (be16_to_cpu(sccb->h.length) <
|
||||||
|
(sizeof(ReadInfo) + cpu_count * sizeof(CPUEntry))) {
|
||||||
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Configuration Characteristic (Extension) */
|
/* Configuration Characteristic (Extension) */
|
||||||
s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR,
|
s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR,
|
||||||
read_info->conf_char);
|
read_info->conf_char);
|
||||||
@ -118,6 +124,12 @@ static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
|
|||||||
cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
|
cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
|
||||||
cpu_info->nr_standby = cpu_to_be16(0);
|
cpu_info->nr_standby = cpu_to_be16(0);
|
||||||
|
|
||||||
|
if (be16_to_cpu(sccb->h.length) <
|
||||||
|
(sizeof(ReadCpuInfo) + cpu_count * sizeof(CPUEntry))) {
|
||||||
|
sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* The standby offset is 16-byte for each CPU */
|
/* The standby offset is 16-byte for each CPU */
|
||||||
cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
|
cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
|
||||||
+ cpu_info->nr_configured*sizeof(CPUEntry));
|
+ cpu_info->nr_configured*sizeof(CPUEntry));
|
||||||
@ -213,14 +225,33 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
|
|||||||
cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
|
cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
|
||||||
|
|
||||||
/* Valid sccb sizes */
|
/* Valid sccb sizes */
|
||||||
if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
|
if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader)) {
|
||||||
be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
|
|
||||||
r = -PGM_SPECIFICATION;
|
r = -PGM_SPECIFICATION;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
sclp_c->execute(sclp, &work_sccb, code);
|
switch (code & SCLP_CMD_CODE_MASK) {
|
||||||
|
case SCLP_CMDW_READ_SCP_INFO:
|
||||||
|
case SCLP_CMDW_READ_SCP_INFO_FORCED:
|
||||||
|
case SCLP_CMDW_READ_CPU_INFO:
|
||||||
|
case SCLP_CMDW_CONFIGURE_IOA:
|
||||||
|
case SCLP_CMDW_DECONFIGURE_IOA:
|
||||||
|
case SCLP_CMD_READ_EVENT_DATA:
|
||||||
|
case SCLP_CMD_WRITE_EVENT_DATA:
|
||||||
|
case SCLP_CMD_WRITE_EVENT_MASK:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
work_sccb.h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
|
||||||
|
goto out_write;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((sccb + be16_to_cpu(work_sccb.h.length)) > ((sccb & PAGE_MASK) + PAGE_SIZE)) {
|
||||||
|
work_sccb.h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
|
||||||
|
goto out_write;
|
||||||
|
}
|
||||||
|
|
||||||
|
sclp_c->execute(sclp, &work_sccb, code);
|
||||||
|
out_write:
|
||||||
cpu_physical_memory_write(sccb, &work_sccb,
|
cpu_physical_memory_write(sccb, &work_sccb,
|
||||||
be16_to_cpu(work_sccb.h.length));
|
be16_to_cpu(work_sccb.h.length));
|
||||||
|
|
||||||
|
@ -41,4 +41,5 @@ typedef struct KVMMemoryListener {
|
|||||||
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
||||||
AddressSpace *as, int as_id);
|
AddressSpace *as, int as_id);
|
||||||
|
|
||||||
|
void kvm_set_max_memslot_size(hwaddr max_slot_size);
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "kvm_s390x.h"
|
#include "kvm_s390x.h"
|
||||||
|
#include "sysemu/kvm_int.h"
|
||||||
#include "qapi/error.h"
|
#include "qapi/error.h"
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "qemu/timer.h"
|
#include "qemu/timer.h"
|
||||||
@ -122,6 +123,14 @@
|
|||||||
*/
|
*/
|
||||||
#define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
|
#define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
|
||||||
(max_cpus + NR_LOCAL_IRQS))
|
(max_cpus + NR_LOCAL_IRQS))
|
||||||
|
/*
|
||||||
|
* KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
|
||||||
|
* as the dirty bitmap must be managed by bitops that take an int as
|
||||||
|
* position indicator. This would end at an unaligned address
|
||||||
|
* (0x7fffff00000). As future variants might provide larger pages
|
||||||
|
* and to make all addresses properly aligned, let us split at 4TB.
|
||||||
|
*/
|
||||||
|
#define KVM_SLOT_MAX_BYTES (4UL * TiB)
|
||||||
|
|
||||||
static CPUWatchpoint hw_watchpoint;
|
static CPUWatchpoint hw_watchpoint;
|
||||||
/*
|
/*
|
||||||
@ -355,6 +364,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
|
|||||||
*/
|
*/
|
||||||
/* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
|
/* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
|
||||||
|
|
||||||
|
kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user