2018-04-23 19:51:16 +03:00
|
|
|
/*
|
|
|
|
* Memory Device Interface
|
|
|
|
*
|
|
|
|
* Copyright ProfitBricks GmbH 2012
|
|
|
|
* Copyright (C) 2014 Red Hat Inc
|
|
|
|
* Copyright (c) 2018 Red Hat Inc
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2023-03-15 20:43:13 +03:00
|
|
|
#include "qemu/error-report.h"
|
2018-04-23 19:51:16 +03:00
|
|
|
#include "hw/mem/memory-device.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#include "qemu/range.h"
|
2018-04-23 19:51:21 +03:00
|
|
|
#include "hw/virtio/vhost.h"
|
|
|
|
#include "sysemu/kvm.h"
|
2023-06-23 15:45:45 +03:00
|
|
|
#include "exec/address-spaces.h"
|
2018-10-05 12:20:24 +03:00
|
|
|
#include "trace.h"
|
2018-04-23 19:51:16 +03:00
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
static bool memory_device_is_empty(const MemoryDeviceState *md)
|
|
|
|
{
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
MemoryRegion *mr;
|
|
|
|
|
|
|
|
/* dropping const here is fine as we don't touch the memory region */
|
|
|
|
mr = mdc->get_memory_region((MemoryDeviceState *)md, &local_err);
|
|
|
|
if (local_err) {
|
2023-11-14 19:11:33 +03:00
|
|
|
/* Not empty, we'll report errors later when containing the MR again. */
|
2023-06-22 13:18:23 +03:00
|
|
|
error_free(local_err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return !mr;
|
|
|
|
}
|
|
|
|
|
2018-04-23 19:51:16 +03:00
|
|
|
static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
|
|
|
|
{
|
|
|
|
const MemoryDeviceState *md_a = MEMORY_DEVICE(a);
|
|
|
|
const MemoryDeviceState *md_b = MEMORY_DEVICE(b);
|
|
|
|
const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a);
|
|
|
|
const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b);
|
|
|
|
const uint64_t addr_a = mdc_a->get_addr(md_a);
|
|
|
|
const uint64_t addr_b = mdc_b->get_addr(md_b);
|
|
|
|
|
|
|
|
if (addr_a > addr_b) {
|
|
|
|
return 1;
|
|
|
|
} else if (addr_a < addr_b) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int memory_device_build_list(Object *obj, void *opaque)
|
|
|
|
{
|
|
|
|
GSList **list = opaque;
|
|
|
|
|
|
|
|
if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
|
|
|
|
DeviceState *dev = DEVICE(obj);
|
|
|
|
if (dev->realized) { /* only realized memory devices matter */
|
|
|
|
*list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object_child_foreach(obj, memory_device_build_list, opaque);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-26 21:57:26 +03:00
|
|
|
static unsigned int memory_device_get_memslots(MemoryDeviceState *md)
|
|
|
|
{
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
|
|
|
|
|
|
|
if (mdc->get_memslots) {
|
|
|
|
return mdc->get_memslots(md);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-09-26 21:57:29 +03:00
|
|
|
/*
|
|
|
|
* Memslots that are reserved by memory devices (required but still reported
|
|
|
|
* as free from KVM / vhost).
|
|
|
|
*/
|
|
|
|
static unsigned int get_reserved_memslots(MachineState *ms)
|
|
|
|
{
|
|
|
|
if (ms->device_memory->used_memslots >
|
|
|
|
ms->device_memory->required_memslots) {
|
|
|
|
/* This is unexpected, and we warned already in the memory notifier. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return ms->device_memory->required_memslots -
|
|
|
|
ms->device_memory->used_memslots;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int memory_devices_get_reserved_memslots(void)
|
|
|
|
{
|
|
|
|
if (!current_machine->device_memory) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return get_reserved_memslots(current_machine);
|
|
|
|
}
|
|
|
|
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
bool memory_devices_memslot_auto_decision_active(void)
|
|
|
|
{
|
|
|
|
if (!current_machine->device_memory) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return current_machine->device_memory->memslot_auto_decision_active;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int memory_device_memslot_decision_limit(MachineState *ms,
|
|
|
|
MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
const unsigned int reserved = get_reserved_memslots(ms);
|
|
|
|
const uint64_t size = memory_region_size(mr);
|
|
|
|
unsigned int max = vhost_get_max_memslots();
|
|
|
|
unsigned int free = vhost_get_free_memslots();
|
|
|
|
uint64_t available_space;
|
|
|
|
unsigned int memslots;
|
|
|
|
|
|
|
|
if (kvm_enabled()) {
|
|
|
|
max = MIN(max, kvm_get_max_memslots());
|
|
|
|
free = MIN(free, kvm_get_free_memslots());
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we only have less overall memslots than what we consider reasonable,
|
|
|
|
* just keep it to a minimum.
|
|
|
|
*/
|
|
|
|
if (max < MEMORY_DEVICES_SAFE_MAX_MEMSLOTS) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Consider our soft-limit across all memory devices. We don't really
|
|
|
|
* expect to exceed this limit in reasonable configurations.
|
|
|
|
*/
|
|
|
|
if (MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT <=
|
|
|
|
ms->device_memory->required_memslots) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
memslots = MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT -
|
|
|
|
ms->device_memory->required_memslots;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Consider the actually still free memslots. This is only relevant if
|
|
|
|
* other memslot consumers would consume *significantly* more memslots than
|
|
|
|
* what we prepared for (> 253). Unlikely, but let's just handle it
|
|
|
|
* cleanly.
|
|
|
|
*/
|
|
|
|
memslots = MIN(memslots, free - reserved);
|
|
|
|
if (memslots < 1 || unlikely(free < reserved)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We cannot have any other memory devices? So give all to this device. */
|
|
|
|
if (size == ms->maxram_size - ms->ram_size) {
|
|
|
|
return memslots;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple heuristic: equally distribute the memslots over the space
|
|
|
|
* still available for memory devices.
|
|
|
|
*/
|
|
|
|
available_space = ms->maxram_size - ms->ram_size -
|
|
|
|
ms->device_memory->used_region_size;
|
|
|
|
memslots = (double)memslots * size / available_space;
|
|
|
|
return memslots < 1 ? 1 : memslots;
|
|
|
|
}
|
|
|
|
|
2023-09-26 21:57:26 +03:00
|
|
|
static void memory_device_check_addable(MachineState *ms, MemoryDeviceState *md,
|
|
|
|
MemoryRegion *mr, Error **errp)
|
2018-04-23 19:51:21 +03:00
|
|
|
{
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
2023-06-23 15:45:53 +03:00
|
|
|
const uint64_t used_region_size = ms->device_memory->used_region_size;
|
2023-06-23 15:45:52 +03:00
|
|
|
const uint64_t size = memory_region_size(mr);
|
2023-09-26 21:57:29 +03:00
|
|
|
const unsigned int reserved_memslots = get_reserved_memslots(ms);
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
unsigned int required_memslots, memslot_limit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Instruct the device to decide how many memslots to use, if applicable,
|
|
|
|
* before we query the number of required memslots the first time.
|
|
|
|
*/
|
|
|
|
if (mdc->decide_memslots) {
|
|
|
|
memslot_limit = memory_device_memslot_decision_limit(ms, mr);
|
|
|
|
mdc->decide_memslots(md, memslot_limit);
|
|
|
|
}
|
|
|
|
required_memslots = memory_device_get_memslots(md);
|
2018-04-23 19:51:21 +03:00
|
|
|
|
2023-09-26 21:57:26 +03:00
|
|
|
/* we will need memory slots for kvm and vhost */
|
2023-09-26 21:57:29 +03:00
|
|
|
if (kvm_enabled() &&
|
|
|
|
kvm_get_free_memslots() < required_memslots + reserved_memslots) {
|
2023-09-26 21:57:26 +03:00
|
|
|
error_setg(errp, "hypervisor has not enough free memory slots left");
|
2018-04-23 19:51:21 +03:00
|
|
|
return;
|
|
|
|
}
|
2023-09-26 21:57:29 +03:00
|
|
|
if (vhost_get_free_memslots() < required_memslots + reserved_memslots) {
|
2023-09-26 21:57:26 +03:00
|
|
|
error_setg(errp, "a used vhost backend has not enough free memory slots left");
|
2018-04-23 19:51:21 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* will we exceed the total amount of memory specified */
|
2018-10-23 18:23:05 +03:00
|
|
|
if (used_region_size + size < used_region_size ||
|
|
|
|
used_region_size + size > ms->maxram_size - ms->ram_size) {
|
2018-04-23 19:51:21 +03:00
|
|
|
error_setg(errp, "not enough space, currently 0x%" PRIx64
|
2018-10-05 12:20:13 +03:00
|
|
|
" in use of total space for memory devices 0x" RAM_ADDR_FMT,
|
2018-04-23 19:51:21 +03:00
|
|
|
used_region_size, ms->maxram_size - ms->ram_size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:21 +03:00
|
|
|
static uint64_t memory_device_get_free_addr(MachineState *ms,
|
|
|
|
const uint64_t *hint,
|
|
|
|
uint64_t align, uint64_t size,
|
|
|
|
Error **errp)
|
2018-04-23 19:51:20 +03:00
|
|
|
{
|
|
|
|
GSList *list = NULL, *item;
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
Range as, new = range_empty;
|
2018-04-23 19:51:20 +03:00
|
|
|
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
range_init_nofail(&as, ms->device_memory->base,
|
|
|
|
memory_region_size(&ms->device_memory->mr));
|
2018-04-23 19:51:20 +03:00
|
|
|
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
/* start of address space indicates the maximum alignment we expect */
|
|
|
|
if (!QEMU_IS_ALIGNED(range_lob(&as), align)) {
|
2020-10-08 11:30:27 +03:00
|
|
|
warn_report("the alignment (0x%" PRIx64 ") exceeds the expected"
|
|
|
|
" maximum alignment, memory will get fragmented and not"
|
|
|
|
" all 'maxmem' might be usable for memory devices.",
|
|
|
|
align);
|
2018-06-07 18:47:04 +03:00
|
|
|
}
|
|
|
|
|
2018-10-23 18:23:04 +03:00
|
|
|
if (hint && !QEMU_IS_ALIGNED(*hint, align)) {
|
2018-04-23 19:51:20 +03:00
|
|
|
error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
|
|
|
|
align);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hint) {
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
if (range_init(&new, *hint, size) || !range_contains_range(&as, &new)) {
|
2018-10-05 12:20:13 +03:00
|
|
|
error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
"], usable range for memory devices [0x%" PRIx64 ":0x%"
|
|
|
|
PRIx64 "]", *hint, size, range_lob(&as),
|
|
|
|
range_size(&as));
|
2018-04-23 19:51:20 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
2020-10-08 11:30:27 +03:00
|
|
|
if (range_init(&new, QEMU_ALIGN_UP(range_lob(&as), align), size)) {
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
error_setg(errp, "can't add memory device, device too big");
|
|
|
|
return 0;
|
|
|
|
}
|
2018-04-23 19:51:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* find address range that will fit new memory device */
|
|
|
|
object_child_foreach(OBJECT(ms), memory_device_build_list, &list);
|
|
|
|
for (item = list; item; item = g_slist_next(item)) {
|
|
|
|
const MemoryDeviceState *md = item->data;
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
uint64_t next_addr;
|
|
|
|
Range tmp;
|
2018-04-23 19:51:20 +03:00
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
if (memory_device_is_empty(md)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
range_init_nofail(&tmp, mdc->get_addr(md),
|
|
|
|
memory_device_get_region_size(md, &error_abort));
|
2018-04-23 19:51:20 +03:00
|
|
|
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
if (range_overlaps_range(&tmp, &new)) {
|
2018-04-23 19:51:20 +03:00
|
|
|
if (hint) {
|
|
|
|
const DeviceState *d = DEVICE(md);
|
2018-10-05 12:20:11 +03:00
|
|
|
error_setg(errp, "address range conflicts with memory device"
|
|
|
|
" id='%s'", d->id ? d->id : "(unnamed)");
|
2018-04-23 19:51:20 +03:00
|
|
|
goto out;
|
|
|
|
}
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
|
|
|
|
next_addr = QEMU_ALIGN_UP(range_upb(&tmp) + 1, align);
|
|
|
|
if (!next_addr || range_init(&new, next_addr, range_size(&new))) {
|
|
|
|
range_make_empty(&new);
|
|
|
|
break;
|
|
|
|
}
|
2019-07-30 03:37:40 +03:00
|
|
|
} else if (range_lob(&tmp) > range_upb(&new)) {
|
|
|
|
break;
|
2018-04-23 19:51:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
if (!range_contains_range(&as, &new)) {
|
2018-04-23 19:51:20 +03:00
|
|
|
error_setg(errp, "could not find position in guest address space for "
|
|
|
|
"memory device - memory fragmented due to alignments");
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
g_slist_free(list);
|
memory-device: rewrite address assignment using ranges
Let's rewrite it properly using ranges. This fixes certain overflows that
are right now possible. E.g.
qemu-system-x86_64 -m 4G,slots=20,maxmem=40G -M pc \
-object memory-backend-file,id=mem1,share,mem-path=/dev/zero,size=2G
-device pc-dimm,memdev=mem1,id=dimm1,addr=-0x40000000
Now properly errors out instead of succeeding. (Note that qapi
parsing of huge uint64_t values is broken and fixes are on the way)
"can't add memory device [0xffffffffa0000000:0x80000000], usable range for
memory devices [0x140000000:0xe00000000]"
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20181214131043.25071-3-david@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2018-12-14 16:10:43 +03:00
|
|
|
return range_lob(&new);
|
2018-04-23 19:51:20 +03:00
|
|
|
}
|
|
|
|
|
2018-04-23 19:51:16 +03:00
|
|
|
MemoryDeviceInfoList *qmp_memory_device_list(void)
|
|
|
|
{
|
|
|
|
GSList *devices = NULL, *item;
|
2021-01-14 01:10:13 +03:00
|
|
|
MemoryDeviceInfoList *list = NULL, **tail = &list;
|
2018-04-23 19:51:16 +03:00
|
|
|
|
|
|
|
object_child_foreach(qdev_get_machine(), memory_device_build_list,
|
|
|
|
&devices);
|
|
|
|
|
|
|
|
for (item = devices; item; item = g_slist_next(item)) {
|
|
|
|
const MemoryDeviceState *md = MEMORY_DEVICE(item->data);
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data);
|
|
|
|
MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1);
|
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
/* Let's query infotmation even for empty memory devices. */
|
2018-04-23 19:51:16 +03:00
|
|
|
mdc->fill_device_info(md, info);
|
|
|
|
|
2021-01-14 01:10:13 +03:00
|
|
|
QAPI_LIST_APPEND(tail, info);
|
2018-04-23 19:51:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
g_slist_free(devices);
|
|
|
|
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int memory_device_plugged_size(Object *obj, void *opaque)
|
|
|
|
{
|
|
|
|
uint64_t *size = opaque;
|
|
|
|
|
|
|
|
if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
|
|
|
|
const DeviceState *dev = DEVICE(obj);
|
|
|
|
const MemoryDeviceState *md = MEMORY_DEVICE(obj);
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
|
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
if (dev->realized && !memory_device_is_empty(md)) {
|
2018-10-05 12:20:15 +03:00
|
|
|
*size += mdc->get_plugged_size(md, &error_abort);
|
2018-04-23 19:51:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object_child_foreach(obj, memory_device_plugged_size, opaque);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t get_plugged_memory_size(void)
|
|
|
|
{
|
|
|
|
uint64_t size = 0;
|
|
|
|
|
|
|
|
memory_device_plugged_size(qdev_get_machine(), &size);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:21 +03:00
|
|
|
void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
|
|
|
|
const uint64_t *legacy_align, Error **errp)
|
|
|
|
{
|
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
|
|
|
Error *local_err = NULL;
|
2020-10-08 11:30:28 +03:00
|
|
|
uint64_t addr, align = 0;
|
2018-10-05 12:20:21 +03:00
|
|
|
MemoryRegion *mr;
|
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
/* We support empty memory devices even without device memory. */
|
|
|
|
if (memory_device_is_empty(md)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-06-23 15:45:52 +03:00
|
|
|
if (!ms->device_memory) {
|
|
|
|
error_setg(errp, "the configuration is not prepared for memory devices"
|
|
|
|
" (e.g., for memory hotplug), consider specifying the"
|
|
|
|
" maxmem option");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:21 +03:00
|
|
|
mr = mdc->get_memory_region(md, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
2023-06-23 15:45:52 +03:00
|
|
|
|
2023-09-26 21:57:26 +03:00
|
|
|
memory_device_check_addable(ms, md, mr, &local_err);
|
2023-06-23 15:45:52 +03:00
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
2018-10-05 12:20:21 +03:00
|
|
|
|
memory-device: reintroduce memory region size check
We used to check that the memory region size is multiples of the overall
requested address alignment for the device memory address.
We removed that check, because there are cases (i.e., hv-balloon) where
devices unconditionally request an address alignment that has a very large
alignment (i.e., 32 GiB), but the actual memory device size might not be
multiples of that alignment.
However, this change:
(a) allows for some practically impossible DIMM sizes, like "1GB+1 byte".
(b) allows for DIMMs that partially cover hugetlb pages, previously
reported in [1].
Both scenarios don't make any sense: we might even waste memory.
So let's reintroduce that check, but only check that the
memory region size is multiples of the memory region alignment (i.e.,
page size, huge page size), but not any additional memory device
requirements communicated using md->get_min_alignment().
The following examples now fail again as expected:
(a) 1M with 2M THP
qemu-system-x86_64 -m 4g,maxmem=16g,slots=1 -S -nodefaults -nographic \
-object memory-backend-ram,id=mem1,size=1M \
-device pc-dimm,id=dimm1,memdev=mem1
-> backend memory size must be multiple of 0x200000
(b) 1G+1byte
qemu-system-x86_64 -m 4g,maxmem=16g,slots=1 -S -nodefaults -nographic \
-object memory-backend-ram,id=mem1,size=1073741825B \
-device pc-dimm,id=dimm1,memdev=mem1
-> backend memory size must be multiple of 0x200000
(c) Unliagned hugetlb size (2M)
qemu-system-x86_64 -m 4g,maxmem=16g,slots=1 -S -nodefaults -nographic \
-object memory-backend-file,id=mem1,mem-path=/dev/hugepages/tmp,size=511M \
-device pc-dimm,id=dimm1,memdev=mem1
backend memory size must be multiple of 0x200000
(d) Unliagned hugetlb size (1G)
qemu-system-x86_64 -m 4g,maxmem=16g,slots=1 -S -nodefaults -nographic \
-object memory-backend-file,id=mem1,mem-path=/dev/hugepages1G/tmp,size=2047M \
-device pc-dimm,id=dimm1,memdev=mem1
-> backend memory size must be multiple of 0x40000000
Note that this fix depends on a hv-balloon change to communicate its
additional alignment requirements using get_min_alignment() instead of
through the memory region.
[1] https://lkml.kernel.org/r/f77d641d500324525ac036fe1827b3070de75fc1.1701088320.git.mprivozn@redhat.com
Message-ID: <20240117135554.787344-3-david@redhat.com>
Reported-by: Zhenyu Zhang <zhenyzha@redhat.com>
Reported-by: Michal Privoznik <mprivozn@redhat.com>
Fixes: eb1b7c4bd413 ("memory-device: Drop size alignment check")
Tested-by: Zhenyu Zhang <zhenyzha@redhat.com>
Tested-by: Mario Casquero <mcasquer@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2024-01-17 16:55:54 +03:00
|
|
|
/*
|
|
|
|
* We always want the memory region size to be multiples of the memory
|
|
|
|
* region alignment: for example, DIMMs with 1G+1byte size don't make
|
|
|
|
* any sense. Note that we don't check that the size is multiples
|
|
|
|
* of any additional alignment requirements the memory device might
|
|
|
|
* have when it comes to the address in physical address space.
|
|
|
|
*/
|
|
|
|
if (!QEMU_IS_ALIGNED(memory_region_size(mr),
|
|
|
|
memory_region_get_alignment(mr))) {
|
|
|
|
error_setg(errp, "backend memory size must be multiple of 0x%"
|
|
|
|
PRIx64, memory_region_get_alignment(mr));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-08 11:30:28 +03:00
|
|
|
if (legacy_align) {
|
|
|
|
align = *legacy_align;
|
|
|
|
} else {
|
|
|
|
if (mdc->get_min_alignment) {
|
|
|
|
align = mdc->get_min_alignment(md);
|
|
|
|
}
|
|
|
|
align = MAX(align, memory_region_get_alignment(mr));
|
|
|
|
}
|
2018-10-05 12:20:21 +03:00
|
|
|
addr = mdc->get_addr(md);
|
|
|
|
addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align,
|
|
|
|
memory_region_size(mr), &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mdc->set_addr(md, addr, &local_err);
|
2018-10-05 12:20:24 +03:00
|
|
|
if (!local_err) {
|
|
|
|
trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "",
|
|
|
|
addr);
|
|
|
|
}
|
2018-10-05 12:20:21 +03:00
|
|
|
out:
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:22 +03:00
|
|
|
void memory_device_plug(MemoryDeviceState *md, MachineState *ms)
|
2018-04-23 19:51:22 +03:00
|
|
|
{
|
2018-10-05 12:20:22 +03:00
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
2023-06-22 13:18:23 +03:00
|
|
|
unsigned int memslots;
|
|
|
|
uint64_t addr;
|
2018-10-05 12:20:22 +03:00
|
|
|
MemoryRegion *mr;
|
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
if (memory_device_is_empty(md)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memslots = memory_device_get_memslots(md);
|
|
|
|
addr = mdc->get_addr(md);
|
|
|
|
|
2018-10-05 12:20:22 +03:00
|
|
|
/*
|
|
|
|
* We expect that a previous call to memory_device_pre_plug() succeeded, so
|
|
|
|
* it can't fail at this point.
|
|
|
|
*/
|
|
|
|
mr = mdc->get_memory_region(md, &error_abort);
|
2018-04-23 19:51:22 +03:00
|
|
|
g_assert(ms->device_memory);
|
|
|
|
|
2023-06-23 15:45:53 +03:00
|
|
|
ms->device_memory->used_region_size += memory_region_size(mr);
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
ms->device_memory->required_memslots += memslots;
|
|
|
|
if (mdc->decide_memslots && memslots > 1) {
|
|
|
|
ms->device_memory->memslot_auto_decision_active++;
|
|
|
|
}
|
|
|
|
|
2018-04-23 19:51:22 +03:00
|
|
|
memory_region_add_subregion(&ms->device_memory->mr,
|
|
|
|
addr - ms->device_memory->base, mr);
|
2018-10-05 12:20:24 +03:00
|
|
|
trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr);
|
2018-04-23 19:51:22 +03:00
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:23 +03:00
|
|
|
void memory_device_unplug(MemoryDeviceState *md, MachineState *ms)
|
2018-04-23 19:51:22 +03:00
|
|
|
{
|
2018-10-05 12:20:23 +03:00
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
const unsigned int memslots = memory_device_get_memslots(md);
|
2018-10-05 12:20:23 +03:00
|
|
|
MemoryRegion *mr;
|
|
|
|
|
2023-06-22 13:18:23 +03:00
|
|
|
if (memory_device_is_empty(md)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:23 +03:00
|
|
|
/*
|
|
|
|
* We expect that a previous call to memory_device_pre_plug() succeeded, so
|
|
|
|
* it can't fail at this point.
|
|
|
|
*/
|
|
|
|
mr = mdc->get_memory_region(md, &error_abort);
|
2018-04-23 19:51:22 +03:00
|
|
|
g_assert(ms->device_memory);
|
|
|
|
|
|
|
|
memory_region_del_subregion(&ms->device_memory->mr, mr);
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
|
|
|
|
if (mdc->decide_memslots && memslots > 1) {
|
|
|
|
ms->device_memory->memslot_auto_decision_active--;
|
|
|
|
}
|
2023-06-23 15:45:53 +03:00
|
|
|
ms->device_memory->used_region_size -= memory_region_size(mr);
|
memory-device,vhost: Support automatic decision on the number of memslots
We want to support memory devices that can automatically decide how many
memslots they will use. In the worst case, they have to use a single
memslot.
The target use cases are virtio-mem and the hyper-v balloon.
Let's calculate a reasonable limit such a memory device may use, and
instruct the device to make a decision based on that limit. Use a simple
heuristic that considers:
* A memslot soft-limit for all memory devices of 256; also, to not
consume too many memslots -- which could harm performance.
* Actually still free and unreserved memslots
* The percentage of the remaining device memory region that memory device
will occupy.
Further, while we properly check before plugging a memory device whether
there still is are free memslots, we have other memslot consumers (such as
boot memory, PCI BARs) that don't perform any checks and might dynamically
consume memslots without any prior reservation. So we might succeed in
plugging a memory device, but once we dynamically map a PCI BAR we would
be in trouble. Doing accounting / reservation / checks for all such
users is problematic (e.g., sometimes we might temporarily split boot
memory into two memslots, triggered by the BIOS).
We use the historic magic memslot number of 509 as orientation to when
supporting 256 memory devices -> memslots (leaving 253 for boot memory and
other devices) has been proven to work reliable. We'll fallback to
suggesting a single memslot if we don't have at least 509 total memslots.
Plugging vhost devices with less than 509 memslots available while we
have memory devices plugged that consume multiple memslots due to
automatic decisions can be problematic. Most configurations might just fail
due to "limit < used + reserved", however, it can also happen that these
memory devices would suddenly consume memslots that would actually be
required by other memslot consumers (boot, PCI BARs) later. Note that this
has always been sketchy with vhost devices that support only a small number
of memslots; but we don't want to make it any worse.So let's keep it simple
and simply reject plugging such vhost devices in such a configuration.
Eventually, all vhost devices that want to be fully compatible with such
memory devices should support a decent number of memslots (>= 509).
Message-ID: <20230926185738.277351-13-david@redhat.com>
Reviewed-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2023-09-26 21:57:32 +03:00
|
|
|
ms->device_memory->required_memslots -= memslots;
|
2018-10-05 12:20:24 +03:00
|
|
|
trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "",
|
|
|
|
mdc->get_addr(md));
|
2018-04-23 19:51:22 +03:00
|
|
|
}
|
|
|
|
|
2018-10-05 12:20:17 +03:00
|
|
|
uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
|
|
|
|
Error **errp)
|
|
|
|
{
|
2018-10-05 12:20:19 +03:00
|
|
|
const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
|
|
|
|
MemoryRegion *mr;
|
2018-10-05 12:20:17 +03:00
|
|
|
|
2018-10-05 12:20:19 +03:00
|
|
|
/* dropping const here is fine as we don't touch the memory region */
|
|
|
|
mr = mdc->get_memory_region((MemoryDeviceState *)md, errp);
|
|
|
|
if (!mr) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return memory_region_size(mr);
|
2018-10-05 12:20:17 +03:00
|
|
|
}
|
|
|
|
|
2023-09-26 21:57:28 +03:00
|
|
|
static void memory_devices_region_mod(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *mrs, bool add)
|
|
|
|
{
|
|
|
|
DeviceMemoryState *dms = container_of(listener, DeviceMemoryState,
|
|
|
|
listener);
|
|
|
|
|
|
|
|
if (!memory_region_is_ram(mrs->mr)) {
|
|
|
|
warn_report("Unexpected memory region mapped into device memory region.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The expectation is that each distinct RAM memory region section in
|
|
|
|
* our region for memory devices consumes exactly one memslot in KVM
|
|
|
|
* and in vhost. For vhost, this is true, except:
|
|
|
|
* * ROM memory regions don't consume a memslot. These get used very
|
|
|
|
* rarely for memory devices (R/O NVDIMMs).
|
|
|
|
* * Memslots without a fd (memory-backend-ram) don't necessarily
|
|
|
|
* consume a memslot. Such setups are quite rare and possibly bogus:
|
|
|
|
* the memory would be inaccessible by such vhost devices.
|
|
|
|
*
|
|
|
|
* So for vhost, in corner cases we might over-estimate the number of
|
|
|
|
* memslots that are currently used or that might still be reserved
|
|
|
|
* (required - used).
|
|
|
|
*/
|
|
|
|
dms->used_memslots += add ? 1 : -1;
|
|
|
|
|
|
|
|
if (dms->used_memslots > dms->required_memslots) {
|
|
|
|
warn_report("Memory devices use more memory slots than indicated as required.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_devices_region_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *mrs)
|
|
|
|
{
|
|
|
|
return memory_devices_region_mod(listener, mrs, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void memory_devices_region_del(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *mrs)
|
|
|
|
{
|
|
|
|
return memory_devices_region_mod(listener, mrs, false);
|
|
|
|
}
|
|
|
|
|
2023-06-23 15:45:45 +03:00
|
|
|
void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size)
|
|
|
|
{
|
|
|
|
g_assert(size);
|
|
|
|
g_assert(!ms->device_memory);
|
|
|
|
ms->device_memory = g_new0(DeviceMemoryState, 1);
|
|
|
|
ms->device_memory->base = base;
|
|
|
|
|
|
|
|
memory_region_init(&ms->device_memory->mr, OBJECT(ms), "device-memory",
|
|
|
|
size);
|
2023-09-26 21:57:28 +03:00
|
|
|
address_space_init(&ms->device_memory->as, &ms->device_memory->mr,
|
|
|
|
"device-memory");
|
2023-06-23 15:45:45 +03:00
|
|
|
memory_region_add_subregion(get_system_memory(), ms->device_memory->base,
|
|
|
|
&ms->device_memory->mr);
|
2023-09-26 21:57:28 +03:00
|
|
|
|
|
|
|
/* Track the number of memslots used by memory devices. */
|
|
|
|
ms->device_memory->listener.region_add = memory_devices_region_add;
|
|
|
|
ms->device_memory->listener.region_del = memory_devices_region_del;
|
|
|
|
memory_listener_register(&ms->device_memory->listener,
|
|
|
|
&ms->device_memory->as);
|
2023-06-23 15:45:45 +03:00
|
|
|
}
|
|
|
|
|
2018-04-23 19:51:16 +03:00
|
|
|
static const TypeInfo memory_device_info = {
|
|
|
|
.name = TYPE_MEMORY_DEVICE,
|
|
|
|
.parent = TYPE_INTERFACE,
|
|
|
|
.class_size = sizeof(MemoryDeviceClass),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void memory_device_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&memory_device_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(memory_device_register_types)
|