2012-05-07 08:03:46 +04:00
|
|
|
/*
|
|
|
|
* QEMU memory mapping
|
|
|
|
*
|
|
|
|
* Copyright Fujitsu, Corp. 2011, 2012
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Wen Congyang <wency@cn.fujitsu.com>
|
|
|
|
*
|
2012-06-10 23:49:18 +04:00
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
2012-05-07 08:03:46 +04:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:50:05 +03:00
|
|
|
#include "qemu/osdep.h"
|
2024-07-22 07:07:38 +03:00
|
|
|
#include "qemu/range.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2013-08-06 14:37:10 +04:00
|
|
|
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/memory_mapping.h"
|
2013-08-06 14:37:10 +04:00
|
|
|
#include "exec/memory.h"
|
|
|
|
#include "exec/address-spaces.h"
|
2021-05-16 20:59:38 +03:00
|
|
|
#include "hw/core/cpu.h"
|
2013-08-06 14:37:10 +04:00
|
|
|
|
|
|
|
//#define DEBUG_GUEST_PHYS_REGION_ADD
|
2012-05-07 08:03:46 +04:00
|
|
|
|
|
|
|
static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
|
|
|
|
MemoryMapping *mapping)
|
|
|
|
{
|
|
|
|
MemoryMapping *p;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(p, &list->head, next) {
|
|
|
|
if (p->phys_addr >= mapping->phys_addr) {
|
|
|
|
QTAILQ_INSERT_BEFORE(p, mapping, next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
QTAILQ_INSERT_TAIL(&list->head, mapping, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void create_new_memory_mapping(MemoryMappingList *list,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr,
|
|
|
|
hwaddr virt_addr,
|
2012-05-07 08:03:46 +04:00
|
|
|
ram_addr_t length)
|
|
|
|
{
|
|
|
|
MemoryMapping *memory_mapping;
|
|
|
|
|
2022-03-15 17:41:56 +03:00
|
|
|
memory_mapping = g_new(MemoryMapping, 1);
|
2012-05-07 08:03:46 +04:00
|
|
|
memory_mapping->phys_addr = phys_addr;
|
|
|
|
memory_mapping->virt_addr = virt_addr;
|
|
|
|
memory_mapping->length = length;
|
|
|
|
list->last_mapping = memory_mapping;
|
|
|
|
list->num++;
|
|
|
|
memory_mapping_list_add_mapping_sorted(list, memory_mapping);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool mapping_contiguous(MemoryMapping *map,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr,
|
|
|
|
hwaddr virt_addr)
|
2012-05-07 08:03:46 +04:00
|
|
|
{
|
|
|
|
return phys_addr == map->phys_addr + map->length &&
|
|
|
|
virt_addr == map->virt_addr + map->length;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [map->phys_addr, map->phys_addr + map->length) and
|
|
|
|
* [phys_addr, phys_addr + length) have intersection?
|
|
|
|
*/
|
|
|
|
static inline bool mapping_have_same_region(MemoryMapping *map,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr,
|
2012-05-07 08:03:46 +04:00
|
|
|
ram_addr_t length)
|
|
|
|
{
|
|
|
|
return !(phys_addr + length < map->phys_addr ||
|
|
|
|
phys_addr >= map->phys_addr + map->length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [map->phys_addr, map->phys_addr + map->length) and
|
|
|
|
* [phys_addr, phys_addr + length) have intersection. The virtual address in the
|
|
|
|
* intersection are the same?
|
|
|
|
*/
|
|
|
|
static inline bool mapping_conflict(MemoryMapping *map,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr,
|
|
|
|
hwaddr virt_addr)
|
2012-05-07 08:03:46 +04:00
|
|
|
{
|
|
|
|
return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* [map->virt_addr, map->virt_addr + map->length) and
|
|
|
|
* [virt_addr, virt_addr + length) have intersection. And the physical address
|
|
|
|
* in the intersection are the same.
|
|
|
|
*/
|
|
|
|
static inline void mapping_merge(MemoryMapping *map,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr virt_addr,
|
2012-05-07 08:03:46 +04:00
|
|
|
ram_addr_t length)
|
|
|
|
{
|
|
|
|
if (virt_addr < map->virt_addr) {
|
|
|
|
map->length += map->virt_addr - virt_addr;
|
|
|
|
map->virt_addr = virt_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((virt_addr + length) >
|
|
|
|
(map->virt_addr + map->length)) {
|
|
|
|
map->length = virt_addr + length - map->virt_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_addr,
|
|
|
|
hwaddr virt_addr,
|
2012-05-07 08:03:46 +04:00
|
|
|
ram_addr_t length)
|
|
|
|
{
|
|
|
|
MemoryMapping *memory_mapping, *last_mapping;
|
|
|
|
|
|
|
|
if (QTAILQ_EMPTY(&list->head)) {
|
|
|
|
create_new_memory_mapping(list, phys_addr, virt_addr, length);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
last_mapping = list->last_mapping;
|
|
|
|
if (last_mapping) {
|
|
|
|
if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
|
|
|
|
last_mapping->length += length;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(memory_mapping, &list->head, next) {
|
|
|
|
if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
|
|
|
|
memory_mapping->length += length;
|
|
|
|
list->last_mapping = memory_mapping;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phys_addr + length < memory_mapping->phys_addr) {
|
|
|
|
/* create a new region before memory_mapping */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
|
|
|
|
if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* merge this region into memory_mapping */
|
|
|
|
mapping_merge(memory_mapping, virt_addr, length);
|
|
|
|
list->last_mapping = memory_mapping;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this region can not be merged into any existed memory mapping. */
|
|
|
|
create_new_memory_mapping(list, phys_addr, virt_addr, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_mapping_list_free(MemoryMappingList *list)
|
|
|
|
{
|
|
|
|
MemoryMapping *p, *q;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
|
|
|
|
QTAILQ_REMOVE(&list->head, p, next);
|
|
|
|
g_free(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
list->num = 0;
|
|
|
|
list->last_mapping = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memory_mapping_list_init(MemoryMappingList *list)
|
|
|
|
{
|
|
|
|
list->num = 0;
|
|
|
|
list->last_mapping = NULL;
|
|
|
|
QTAILQ_INIT(&list->head);
|
|
|
|
}
|
2012-05-07 08:06:40 +04:00
|
|
|
|
2013-08-06 14:37:09 +04:00
|
|
|
void guest_phys_blocks_free(GuestPhysBlockList *list)
|
|
|
|
{
|
|
|
|
GuestPhysBlock *p, *q;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
|
|
|
|
QTAILQ_REMOVE(&list->head, p, next);
|
2016-02-18 08:16:52 +03:00
|
|
|
memory_region_unref(p->mr);
|
2013-08-06 14:37:09 +04:00
|
|
|
g_free(p);
|
|
|
|
}
|
|
|
|
list->num = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void guest_phys_blocks_init(GuestPhysBlockList *list)
|
|
|
|
{
|
|
|
|
list->num = 0;
|
|
|
|
QTAILQ_INIT(&list->head);
|
|
|
|
}
|
|
|
|
|
2013-08-06 14:37:10 +04:00
|
|
|
typedef struct GuestPhysListener {
|
|
|
|
GuestPhysBlockList *list;
|
|
|
|
MemoryListener listener;
|
|
|
|
} GuestPhysListener;
|
|
|
|
|
2021-07-27 11:25:44 +03:00
|
|
|
static void guest_phys_block_add_section(GuestPhysListener *g,
|
2013-08-06 14:37:10 +04:00
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
2021-07-27 11:25:44 +03:00
|
|
|
const hwaddr target_start = section->offset_within_address_space;
|
|
|
|
const hwaddr target_end = target_start + int128_get64(section->size);
|
|
|
|
uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) +
|
|
|
|
section->offset_within_region;
|
|
|
|
GuestPhysBlock *predecessor = NULL;
|
2013-08-06 14:37:10 +04:00
|
|
|
|
|
|
|
/* find continuity in guest physical address space */
|
|
|
|
if (!QTAILQ_EMPTY(&g->list->head)) {
|
|
|
|
hwaddr predecessor_size;
|
|
|
|
|
2018-12-06 15:10:34 +03:00
|
|
|
predecessor = QTAILQ_LAST(&g->list->head);
|
2013-08-06 14:37:10 +04:00
|
|
|
predecessor_size = predecessor->target_end - predecessor->target_start;
|
|
|
|
|
|
|
|
/* the memory API guarantees monotonically increasing traversal */
|
|
|
|
g_assert(predecessor->target_end <= target_start);
|
|
|
|
|
|
|
|
/* we want continuity in both guest-physical and host-virtual memory */
|
|
|
|
if (predecessor->target_end < target_start ||
|
2021-07-27 11:25:43 +03:00
|
|
|
predecessor->host_addr + predecessor_size != host_addr ||
|
|
|
|
predecessor->mr != section->mr) {
|
2013-08-06 14:37:10 +04:00
|
|
|
predecessor = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (predecessor == NULL) {
|
|
|
|
/* isolated mapping, allocate it and add it to the list */
|
|
|
|
GuestPhysBlock *block = g_malloc0(sizeof *block);
|
|
|
|
|
|
|
|
block->target_start = target_start;
|
|
|
|
block->target_end = target_end;
|
|
|
|
block->host_addr = host_addr;
|
2016-02-18 08:16:52 +03:00
|
|
|
block->mr = section->mr;
|
|
|
|
memory_region_ref(section->mr);
|
2013-08-06 14:37:10 +04:00
|
|
|
|
|
|
|
QTAILQ_INSERT_TAIL(&g->list->head, block, next);
|
|
|
|
++g->list->num;
|
|
|
|
} else {
|
|
|
|
/* expand predecessor until @target_end; predecessor's start doesn't
|
|
|
|
* change
|
|
|
|
*/
|
|
|
|
predecessor->target_end = target_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DEBUG_GUEST_PHYS_REGION_ADD
|
2023-01-11 00:29:47 +03:00
|
|
|
fprintf(stderr, "%s: target_start=" HWADDR_FMT_plx " target_end="
|
|
|
|
HWADDR_FMT_plx ": %s (count: %u)\n", __func__, target_start,
|
2013-08-06 14:37:10 +04:00
|
|
|
target_end, predecessor ? "joined" : "added", g->list->num);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
softmmu/memory_mapping: optimize for RamDiscardManager sections
virtio-mem logically plugs/unplugs memory within a sparse memory region
and notifies via the RamDiscardManager interface when parts become
plugged (populated) or unplugged (discarded).
Currently, we end up (via the two users)
1) zeroing all logically unplugged/discarded memory during TPM resets.
2) reading all logically unplugged/discarded memory when dumping, to
figure out the content is zero.
1) is always bad, because we assume unplugged memory stays discarded
(and is already implicitly zero).
2) isn't that bad with anonymous memory, we end up reading the zero
page (slow and unnecessary, though). However, once we use some
file-backed memory (future use case), even reading will populate memory.
Let's cut out all parts marked as not-populated (discarded) via the
RamDiscardManager. As virtio-mem is the single user, this now means that
logically unplugged memory ranges will no longer be included in the
dump, which results in smaller dump files and faster dumping.
virtio-mem has a minimum granularity of 1 MiB (and the default is usually
2 MiB). Theoretically, we can see quite some fragmentation, in practice
we won't have it completely fragmented in 1 MiB pieces. Still, we might
end up with many physical ranges.
Both, the ELF format and kdump seem to be ready to support many
individual ranges (e.g., for ELF it seems to be UINT32_MAX, kdump has a
linear bitmap).
Reviewed-by: Peter Xu <peterx@redhat.com>
Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Claudio Fontana <cfontana@suse.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: "Alex Bennée" <alex.bennee@linaro.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Laurent Vivier <lvivier@redhat.com>
Cc: Stefan Berger <stefanb@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210727082545.17934-5-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-27 11:25:45 +03:00
|
|
|
static int guest_phys_ram_populate_cb(MemoryRegionSection *section,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
GuestPhysListener *g = opaque;
|
|
|
|
|
|
|
|
guest_phys_block_add_section(g, section);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-27 11:25:44 +03:00
|
|
|
static void guest_phys_blocks_region_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
GuestPhysListener *g = container_of(listener, GuestPhysListener, listener);
|
|
|
|
|
|
|
|
/* we only care about RAM */
|
|
|
|
if (!memory_region_is_ram(section->mr) ||
|
|
|
|
memory_region_is_ram_device(section->mr) ||
|
|
|
|
memory_region_is_nonvolatile(section->mr)) {
|
|
|
|
return;
|
|
|
|
}
|
softmmu/memory_mapping: optimize for RamDiscardManager sections
virtio-mem logically plugs/unplugs memory within a sparse memory region
and notifies via the RamDiscardManager interface when parts become
plugged (populated) or unplugged (discarded).
Currently, we end up (via the two users)
1) zeroing all logically unplugged/discarded memory during TPM resets.
2) reading all logically unplugged/discarded memory when dumping, to
figure out the content is zero.
1) is always bad, because we assume unplugged memory stays discarded
(and is already implicitly zero).
2) isn't that bad with anonymous memory, we end up reading the zero
page (slow and unnecessary, though). However, once we use some
file-backed memory (future use case), even reading will populate memory.
Let's cut out all parts marked as not-populated (discarded) via the
RamDiscardManager. As virtio-mem is the single user, this now means that
logically unplugged memory ranges will no longer be included in the
dump, which results in smaller dump files and faster dumping.
virtio-mem has a minimum granularity of 1 MiB (and the default is usually
2 MiB). Theoretically, we can see quite some fragmentation, in practice
we won't have it completely fragmented in 1 MiB pieces. Still, we might
end up with many physical ranges.
Both, the ELF format and kdump seem to be ready to support many
individual ranges (e.g., for ELF it seems to be UINT32_MAX, kdump has a
linear bitmap).
Reviewed-by: Peter Xu <peterx@redhat.com>
Cc: Marc-André Lureau <marcandre.lureau@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Claudio Fontana <cfontana@suse.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: "Alex Bennée" <alex.bennee@linaro.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Laurent Vivier <lvivier@redhat.com>
Cc: Stefan Berger <stefanb@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210727082545.17934-5-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-27 11:25:45 +03:00
|
|
|
|
|
|
|
/* for special sparse regions, only add populated parts */
|
|
|
|
if (memory_region_has_ram_discard_manager(section->mr)) {
|
|
|
|
RamDiscardManager *rdm;
|
|
|
|
|
|
|
|
rdm = memory_region_get_ram_discard_manager(section->mr);
|
|
|
|
ram_discard_manager_replay_populated(rdm, section,
|
|
|
|
guest_phys_ram_populate_cb, g);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-07-27 11:25:44 +03:00
|
|
|
guest_phys_block_add_section(g, section);
|
|
|
|
}
|
|
|
|
|
2013-08-06 14:37:10 +04:00
|
|
|
void guest_phys_blocks_append(GuestPhysBlockList *list)
|
|
|
|
{
|
|
|
|
GuestPhysListener g = { 0 };
|
|
|
|
|
|
|
|
g.list = list;
|
|
|
|
g.listener.region_add = &guest_phys_blocks_region_add;
|
|
|
|
memory_listener_register(&g.listener, &address_space_memory);
|
|
|
|
memory_listener_unregister(&g.listener);
|
|
|
|
}
|
|
|
|
|
2023-10-09 10:52:31 +03:00
|
|
|
static CPUState *find_paging_enabled_cpu(void)
|
2012-05-07 08:06:40 +04:00
|
|
|
{
|
2013-05-30 00:29:20 +04:00
|
|
|
CPUState *cpu;
|
2012-05-07 08:06:40 +04:00
|
|
|
|
2013-06-25 01:50:24 +04:00
|
|
|
CPU_FOREACH(cpu) {
|
2013-05-30 00:29:20 +04:00
|
|
|
if (cpu_paging_enabled(cpu)) {
|
|
|
|
return cpu;
|
2012-05-07 08:06:40 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-10-09 10:53:10 +03:00
|
|
|
bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
|
2013-08-06 14:37:11 +04:00
|
|
|
const GuestPhysBlockList *guest_phys_blocks,
|
|
|
|
Error **errp)
|
2012-05-07 08:06:40 +04:00
|
|
|
{
|
2023-10-09 10:53:10 +03:00
|
|
|
ERRP_GUARD();
|
2013-05-30 00:29:20 +04:00
|
|
|
CPUState *cpu, *first_paging_enabled_cpu;
|
2013-08-06 14:37:11 +04:00
|
|
|
GuestPhysBlock *block;
|
2012-05-07 08:06:40 +04:00
|
|
|
ram_addr_t offset, length;
|
|
|
|
|
2023-10-09 10:52:31 +03:00
|
|
|
first_paging_enabled_cpu = find_paging_enabled_cpu();
|
2012-05-07 08:06:40 +04:00
|
|
|
if (first_paging_enabled_cpu) {
|
2013-06-25 01:50:24 +04:00
|
|
|
for (cpu = first_paging_enabled_cpu; cpu != NULL;
|
|
|
|
cpu = CPU_NEXT(cpu)) {
|
2023-10-09 10:53:10 +03:00
|
|
|
if (!cpu_get_memory_mapping(cpu, list, errp)) {
|
|
|
|
return false;
|
2012-05-07 08:06:40 +04:00
|
|
|
}
|
|
|
|
}
|
2023-10-09 10:53:10 +03:00
|
|
|
return true;
|
2012-05-07 08:06:40 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the guest doesn't use paging, the virtual address is equal to physical
|
|
|
|
* address.
|
|
|
|
*/
|
2013-08-06 14:37:11 +04:00
|
|
|
QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
|
|
|
|
offset = block->target_start;
|
|
|
|
length = block->target_end - block->target_start;
|
2012-05-07 08:06:40 +04:00
|
|
|
create_new_memory_mapping(list, offset, offset, length);
|
|
|
|
}
|
2023-10-09 10:53:10 +03:00
|
|
|
return true;
|
2012-05-07 08:06:40 +04:00
|
|
|
}
|
2012-05-07 08:07:07 +04:00
|
|
|
|
2013-08-06 14:37:11 +04:00
|
|
|
void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
|
|
|
|
const GuestPhysBlockList *guest_phys_blocks)
|
2012-05-07 08:07:07 +04:00
|
|
|
{
|
2013-08-06 14:37:11 +04:00
|
|
|
GuestPhysBlock *block;
|
2012-05-07 08:07:07 +04:00
|
|
|
|
2013-08-06 14:37:11 +04:00
|
|
|
QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
|
|
|
|
create_new_memory_mapping(list, block->target_start, 0,
|
|
|
|
block->target_end - block->target_start);
|
2012-05-07 08:07:07 +04:00
|
|
|
}
|
|
|
|
}
|
2012-05-07 08:10:47 +04:00
|
|
|
|
|
|
|
void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
|
|
|
|
int64_t length)
|
|
|
|
{
|
|
|
|
MemoryMapping *cur, *next;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
|
2024-07-22 07:07:38 +03:00
|
|
|
if (!ranges_overlap(cur->phys_addr, cur->length, begin, length)) {
|
2012-05-07 08:10:47 +04:00
|
|
|
QTAILQ_REMOVE(&list->head, cur, next);
|
2017-05-04 01:38:46 +03:00
|
|
|
g_free(cur);
|
2012-05-07 08:10:47 +04:00
|
|
|
list->num--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur->phys_addr < begin) {
|
|
|
|
cur->length -= begin - cur->phys_addr;
|
|
|
|
if (cur->virt_addr) {
|
|
|
|
cur->virt_addr += begin - cur->phys_addr;
|
|
|
|
}
|
|
|
|
cur->phys_addr = begin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur->phys_addr + cur->length > begin + length) {
|
|
|
|
cur->length -= cur->phys_addr + cur->length - begin - length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|