pc: resizeable ROM blocks
This makes ROM blocks resizeable. This infrastructure is required for other functionality we have queued. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUrme8AAoJECgfDbjSjVRpqmEH/1APnrphAi/CM6rxf2hPyvWj f5yQDNXfeGxrHaW5vux6DvgHUkTng6KGBxz6XMSiwul6MeyRFNDqwbfMhSHjiIum QkT//jqb5xux60kyTLXuIBTPok1SsKDtaTxbvZb0VmZrnkdYeI2CLa1Mq3cQUY0a 8DKnchQEM5lic9bxj+OuLiDFx8QYaMpQlUP9iIvNq6GjX+0zNsWvfPtkMTm00t93 lHKPvD2eVmrgfS5g+lkAwLDahLSjqwDc0YuLABOgDUFsZFz9GAUCHSpt0y8HEBwR 1NhGCfbnyyRl/1OSULtARGQ4Ddwm5dn1i5I4usoP5rLFS7FV5F7xhBu0IZlwgVA= =pFmm -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging pc: resizeable ROM blocks This makes ROM blocks resizeable. This infrastructure is required for other functionality we have queued. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Thu 08 Jan 2015 11:19:24 GMT using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: acpi-build: make ROMs RAM blocks resizeable memory: API to allocate resizeable RAM MR arch_init: support resizing on incoming migration exec: qemu_ram_alloc_resizeable, qemu_ram_resize exec: split length -> used_length/max_length exec: cpu_physical_memory_set/clear_dirty_range memory: add memory_region_set_size Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
aaf0301917
28
arch_init.c
28
arch_init.c
@ -522,7 +522,7 @@ static void migration_bitmap_sync(void)
|
|||||||
address_space_sync_dirty_bitmap(&address_space_memory);
|
address_space_sync_dirty_bitmap(&address_space_memory);
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
migration_bitmap_sync_range(block->mr->ram_addr, block->length);
|
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
|
||||||
}
|
}
|
||||||
trace_migration_bitmap_sync_end(migration_dirty_pages
|
trace_migration_bitmap_sync_end(migration_dirty_pages
|
||||||
- num_dirty_pages_init);
|
- num_dirty_pages_init);
|
||||||
@ -668,7 +668,7 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
|
|||||||
offset >= last_offset) {
|
offset >= last_offset) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (offset >= block->length) {
|
if (offset >= block->used_length) {
|
||||||
offset = 0;
|
offset = 0;
|
||||||
block = QTAILQ_NEXT(block, next);
|
block = QTAILQ_NEXT(block, next);
|
||||||
if (!block) {
|
if (!block) {
|
||||||
@ -727,7 +727,7 @@ uint64_t ram_bytes_total(void)
|
|||||||
uint64_t total = 0;
|
uint64_t total = 0;
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
||||||
total += block->length;
|
total += block->used_length;
|
||||||
|
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
@ -831,7 +831,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
uint64_t block_pages;
|
uint64_t block_pages;
|
||||||
|
|
||||||
block_pages = block->length >> TARGET_PAGE_BITS;
|
block_pages = block->used_length >> TARGET_PAGE_BITS;
|
||||||
migration_dirty_pages += block_pages;
|
migration_dirty_pages += block_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -844,7 +844,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
qemu_put_byte(f, strlen(block->idstr));
|
qemu_put_byte(f, strlen(block->idstr));
|
||||||
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
|
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
|
||||||
qemu_put_be64(f, block->length);
|
qemu_put_be64(f, block->used_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
qemu_mutex_unlock_ramlist();
|
qemu_mutex_unlock_ramlist();
|
||||||
@ -1015,7 +1015,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
|||||||
uint8_t len;
|
uint8_t len;
|
||||||
|
|
||||||
if (flags & RAM_SAVE_FLAG_CONTINUE) {
|
if (flags & RAM_SAVE_FLAG_CONTINUE) {
|
||||||
if (!block || block->length <= offset) {
|
if (!block || block->max_length <= offset) {
|
||||||
error_report("Ack, bad migration stream!");
|
error_report("Ack, bad migration stream!");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1028,7 +1028,8 @@ static inline void *host_from_stream_offset(QEMUFile *f,
|
|||||||
id[len] = 0;
|
id[len] = 0;
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (!strncmp(id, block->idstr, sizeof(id)) && block->length > offset) {
|
if (!strncmp(id, block->idstr, sizeof(id)) &&
|
||||||
|
block->max_length > offset) {
|
||||||
return memory_region_get_ram_ptr(block->mr) + offset;
|
return memory_region_get_ram_ptr(block->mr) + offset;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1085,11 +1086,14 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
|
|||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (!strncmp(id, block->idstr, sizeof(id))) {
|
if (!strncmp(id, block->idstr, sizeof(id))) {
|
||||||
if (block->length != length) {
|
if (length != block->used_length) {
|
||||||
error_report("Length mismatch: %s: 0x" RAM_ADDR_FMT
|
Error *local_err = NULL;
|
||||||
" in != 0x" RAM_ADDR_FMT, id, length,
|
|
||||||
block->length);
|
ret = qemu_ram_resize(block->offset, length, &local_err);
|
||||||
ret = -EINVAL;
|
if (local_err) {
|
||||||
|
error_report("%s", error_get_pretty(local_err));
|
||||||
|
error_free(local_err);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
136
exec.c
136
exec.c
@ -75,6 +75,11 @@ static MemoryRegion io_mem_unassigned;
|
|||||||
/* RAM is mmap-ed with MAP_SHARED */
|
/* RAM is mmap-ed with MAP_SHARED */
|
||||||
#define RAM_SHARED (1 << 1)
|
#define RAM_SHARED (1 << 1)
|
||||||
|
|
||||||
|
/* Only a portion of RAM (used_length) is actually used, and migrated.
|
||||||
|
* This used_length size can change across reboots.
|
||||||
|
*/
|
||||||
|
#define RAM_RESIZEABLE (1 << 2)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
|
||||||
@ -812,11 +817,11 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
|
|||||||
|
|
||||||
/* The list is protected by the iothread lock here. */
|
/* The list is protected by the iothread lock here. */
|
||||||
block = ram_list.mru_block;
|
block = ram_list.mru_block;
|
||||||
if (block && addr - block->offset < block->length) {
|
if (block && addr - block->offset < block->max_length) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (addr - block->offset < block->length) {
|
if (addr - block->offset < block->max_length) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -850,7 +855,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
|
|||||||
{
|
{
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return;
|
return;
|
||||||
cpu_physical_memory_clear_dirty_range(start, length, client);
|
cpu_physical_memory_clear_dirty_range_type(start, length, client);
|
||||||
|
|
||||||
if (tcg_enabled()) {
|
if (tcg_enabled()) {
|
||||||
tlb_reset_dirty_range_all(start, length);
|
tlb_reset_dirty_range_all(start, length);
|
||||||
@ -1186,7 +1191,7 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
|
|||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
ram_addr_t end, next = RAM_ADDR_MAX;
|
ram_addr_t end, next = RAM_ADDR_MAX;
|
||||||
|
|
||||||
end = block->offset + block->length;
|
end = block->offset + block->max_length;
|
||||||
|
|
||||||
QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
|
||||||
if (next_block->offset >= end) {
|
if (next_block->offset >= end) {
|
||||||
@ -1214,7 +1219,7 @@ ram_addr_t last_ram_offset(void)
|
|||||||
ram_addr_t last = 0;
|
ram_addr_t last = 0;
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
||||||
last = MAX(last, block->offset + block->length);
|
last = MAX(last, block->offset + block->max_length);
|
||||||
|
|
||||||
return last;
|
return last;
|
||||||
}
|
}
|
||||||
@ -1296,6 +1301,49 @@ static int memory_try_enable_merging(void *addr, size_t len)
|
|||||||
return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
|
return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Only legal before guest might have detected the memory size: e.g. on
|
||||||
|
* incoming migration, or right after reset.
|
||||||
|
*
|
||||||
|
* As memory core doesn't know how is memory accessed, it is up to
|
||||||
|
* resize callback to update device state and/or add assertions to detect
|
||||||
|
* misuse, if necessary.
|
||||||
|
*/
|
||||||
|
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
|
||||||
|
{
|
||||||
|
RAMBlock *block = find_ram_block(base);
|
||||||
|
|
||||||
|
assert(block);
|
||||||
|
|
||||||
|
if (block->used_length == newsize) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(block->flags & RAM_RESIZEABLE)) {
|
||||||
|
error_setg_errno(errp, EINVAL,
|
||||||
|
"Length mismatch: %s: 0x" RAM_ADDR_FMT
|
||||||
|
" in != 0x" RAM_ADDR_FMT, block->idstr,
|
||||||
|
newsize, block->used_length);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (block->max_length < newsize) {
|
||||||
|
error_setg_errno(errp, EINVAL,
|
||||||
|
"Length too large: %s: 0x" RAM_ADDR_FMT
|
||||||
|
" > 0x" RAM_ADDR_FMT, block->idstr,
|
||||||
|
newsize, block->max_length);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
|
||||||
|
block->used_length = newsize;
|
||||||
|
cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
|
||||||
|
memory_region_set_size(block->mr, newsize);
|
||||||
|
if (block->resized) {
|
||||||
|
block->resized(block->idstr, newsize, block->host);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
@ -1305,13 +1353,14 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
|
|
||||||
/* This assumes the iothread lock is taken here too. */
|
/* This assumes the iothread lock is taken here too. */
|
||||||
qemu_mutex_lock_ramlist();
|
qemu_mutex_lock_ramlist();
|
||||||
new_block->offset = find_ram_offset(new_block->length);
|
new_block->offset = find_ram_offset(new_block->max_length);
|
||||||
|
|
||||||
if (!new_block->host) {
|
if (!new_block->host) {
|
||||||
if (xen_enabled()) {
|
if (xen_enabled()) {
|
||||||
xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
|
xen_ram_alloc(new_block->offset, new_block->max_length,
|
||||||
|
new_block->mr);
|
||||||
} else {
|
} else {
|
||||||
new_block->host = phys_mem_alloc(new_block->length,
|
new_block->host = phys_mem_alloc(new_block->max_length,
|
||||||
&new_block->mr->align);
|
&new_block->mr->align);
|
||||||
if (!new_block->host) {
|
if (!new_block->host) {
|
||||||
error_setg_errno(errp, errno,
|
error_setg_errno(errp, errno,
|
||||||
@ -1320,13 +1369,13 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
qemu_mutex_unlock_ramlist();
|
qemu_mutex_unlock_ramlist();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
memory_try_enable_merging(new_block->host, new_block->length);
|
memory_try_enable_merging(new_block->host, new_block->max_length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Keep the list sorted from biggest to smallest block. */
|
/* Keep the list sorted from biggest to smallest block. */
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (block->length < new_block->length) {
|
if (block->max_length < new_block->max_length) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1350,14 +1399,15 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
old_ram_size, new_ram_size);
|
old_ram_size, new_ram_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
|
cpu_physical_memory_set_dirty_range(new_block->offset,
|
||||||
|
new_block->used_length);
|
||||||
|
|
||||||
qemu_ram_setup_dump(new_block->host, new_block->length);
|
qemu_ram_setup_dump(new_block->host, new_block->max_length);
|
||||||
qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
|
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
|
||||||
qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
|
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
|
||||||
|
|
||||||
if (kvm_enabled()) {
|
if (kvm_enabled()) {
|
||||||
kvm_setup_guest_memory(new_block->host, new_block->length);
|
kvm_setup_guest_memory(new_block->host, new_block->max_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new_block->offset;
|
return new_block->offset;
|
||||||
@ -1391,7 +1441,8 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
|||||||
size = TARGET_PAGE_ALIGN(size);
|
size = TARGET_PAGE_ALIGN(size);
|
||||||
new_block = g_malloc0(sizeof(*new_block));
|
new_block = g_malloc0(sizeof(*new_block));
|
||||||
new_block->mr = mr;
|
new_block->mr = mr;
|
||||||
new_block->length = size;
|
new_block->used_length = size;
|
||||||
|
new_block->max_length = size;
|
||||||
new_block->flags = share ? RAM_SHARED : 0;
|
new_block->flags = share ? RAM_SHARED : 0;
|
||||||
new_block->host = file_ram_alloc(new_block, size,
|
new_block->host = file_ram_alloc(new_block, size,
|
||||||
mem_path, errp);
|
mem_path, errp);
|
||||||
@ -1410,7 +1461,12 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
static
|
||||||
|
ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
||||||
|
void (*resized)(const char*,
|
||||||
|
uint64_t length,
|
||||||
|
void *host),
|
||||||
|
void *host, bool resizeable,
|
||||||
MemoryRegion *mr, Error **errp)
|
MemoryRegion *mr, Error **errp)
|
||||||
{
|
{
|
||||||
RAMBlock *new_block;
|
RAMBlock *new_block;
|
||||||
@ -1418,14 +1474,21 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
|||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
size = TARGET_PAGE_ALIGN(size);
|
size = TARGET_PAGE_ALIGN(size);
|
||||||
|
max_size = TARGET_PAGE_ALIGN(max_size);
|
||||||
new_block = g_malloc0(sizeof(*new_block));
|
new_block = g_malloc0(sizeof(*new_block));
|
||||||
new_block->mr = mr;
|
new_block->mr = mr;
|
||||||
new_block->length = size;
|
new_block->resized = resized;
|
||||||
|
new_block->used_length = size;
|
||||||
|
new_block->max_length = max_size;
|
||||||
|
assert(max_size >= size);
|
||||||
new_block->fd = -1;
|
new_block->fd = -1;
|
||||||
new_block->host = host;
|
new_block->host = host;
|
||||||
if (host) {
|
if (host) {
|
||||||
new_block->flags |= RAM_PREALLOC;
|
new_block->flags |= RAM_PREALLOC;
|
||||||
}
|
}
|
||||||
|
if (resizeable) {
|
||||||
|
new_block->flags |= RAM_RESIZEABLE;
|
||||||
|
}
|
||||||
addr = ram_block_add(new_block, &local_err);
|
addr = ram_block_add(new_block, &local_err);
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
g_free(new_block);
|
g_free(new_block);
|
||||||
@ -1435,9 +1498,24 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
|||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
|
MemoryRegion *mr, Error **errp)
|
||||||
|
{
|
||||||
|
return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
|
||||||
|
}
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
|
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
|
||||||
{
|
{
|
||||||
return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
|
return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
|
||||||
|
}
|
||||||
|
|
||||||
|
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
|
||||||
|
void (*resized)(const char*,
|
||||||
|
uint64_t length,
|
||||||
|
void *host),
|
||||||
|
MemoryRegion *mr, Error **errp)
|
||||||
|
{
|
||||||
|
return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_ram_free_from_ptr(ram_addr_t addr)
|
void qemu_ram_free_from_ptr(ram_addr_t addr)
|
||||||
@ -1475,11 +1553,11 @@ void qemu_ram_free(ram_addr_t addr)
|
|||||||
xen_invalidate_map_cache_entry(block->host);
|
xen_invalidate_map_cache_entry(block->host);
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
} else if (block->fd >= 0) {
|
} else if (block->fd >= 0) {
|
||||||
munmap(block->host, block->length);
|
munmap(block->host, block->max_length);
|
||||||
close(block->fd);
|
close(block->fd);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
qemu_anon_ram_free(block->host, block->length);
|
qemu_anon_ram_free(block->host, block->max_length);
|
||||||
}
|
}
|
||||||
g_free(block);
|
g_free(block);
|
||||||
break;
|
break;
|
||||||
@ -1499,7 +1577,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
|
|||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
offset = addr - block->offset;
|
offset = addr - block->offset;
|
||||||
if (offset < block->length) {
|
if (offset < block->max_length) {
|
||||||
vaddr = ramblock_ptr(block, offset);
|
vaddr = ramblock_ptr(block, offset);
|
||||||
if (block->flags & RAM_PREALLOC) {
|
if (block->flags & RAM_PREALLOC) {
|
||||||
;
|
;
|
||||||
@ -1575,7 +1653,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
|
|||||||
return xen_map_cache(addr, 0, 0);
|
return xen_map_cache(addr, 0, 0);
|
||||||
} else if (block->host == NULL) {
|
} else if (block->host == NULL) {
|
||||||
block->host =
|
block->host =
|
||||||
xen_map_cache(block->offset, block->length, 1);
|
xen_map_cache(block->offset, block->max_length, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ramblock_ptr(block, addr - block->offset);
|
return ramblock_ptr(block, addr - block->offset);
|
||||||
@ -1594,9 +1672,9 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
|
|||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (addr - block->offset < block->length) {
|
if (addr - block->offset < block->max_length) {
|
||||||
if (addr - block->offset + *size > block->length)
|
if (addr - block->offset + *size > block->max_length)
|
||||||
*size = block->length - addr + block->offset;
|
*size = block->max_length - addr + block->offset;
|
||||||
return ramblock_ptr(block, addr - block->offset);
|
return ramblock_ptr(block, addr - block->offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1619,7 +1697,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
block = ram_list.mru_block;
|
block = ram_list.mru_block;
|
||||||
if (block && block->host && host - block->host < block->length) {
|
if (block && block->host && host - block->host < block->max_length) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1628,7 +1706,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
|
|||||||
if (block->host == NULL) {
|
if (block->host == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (host - block->host < block->length) {
|
if (host - block->host < block->max_length) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2882,7 +2960,7 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
|||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
func(block->host, block->offset, block->length, opaque);
|
func(block->host, block->offset, block->used_length, opaque);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -724,12 +724,22 @@ static void rom_insert(Rom *rom)
|
|||||||
QTAILQ_INSERT_TAIL(&roms, rom, next);
|
QTAILQ_INSERT_TAIL(&roms, rom, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fw_cfg_resized(const char *id, uint64_t length, void *host)
|
||||||
|
{
|
||||||
|
if (fw_cfg) {
|
||||||
|
fw_cfg_modify_file(fw_cfg, id + strlen("/rom@"), host, length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void *rom_set_mr(Rom *rom, Object *owner, const char *name)
|
static void *rom_set_mr(Rom *rom, Object *owner, const char *name)
|
||||||
{
|
{
|
||||||
void *data;
|
void *data;
|
||||||
|
|
||||||
rom->mr = g_malloc(sizeof(*rom->mr));
|
rom->mr = g_malloc(sizeof(*rom->mr));
|
||||||
memory_region_init_ram(rom->mr, owner, name, rom->datasize, &error_abort);
|
memory_region_init_resizeable_ram(rom->mr, owner, name,
|
||||||
|
rom->datasize, rom->romsize,
|
||||||
|
fw_cfg_resized,
|
||||||
|
&error_abort);
|
||||||
memory_region_set_readonly(rom->mr, true);
|
memory_region_set_readonly(rom->mr, true);
|
||||||
vmstate_register_ram_global(rom->mr);
|
vmstate_register_ram_global(rom->mr);
|
||||||
|
|
||||||
@ -824,7 +834,7 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||||
hwaddr addr, const char *fw_file_name,
|
size_t max_len, hwaddr addr, const char *fw_file_name,
|
||||||
FWCfgReadCallback fw_callback, void *callback_opaque)
|
FWCfgReadCallback fw_callback, void *callback_opaque)
|
||||||
{
|
{
|
||||||
Rom *rom;
|
Rom *rom;
|
||||||
@ -833,7 +843,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
|||||||
rom = g_malloc0(sizeof(*rom));
|
rom = g_malloc0(sizeof(*rom));
|
||||||
rom->name = g_strdup(name);
|
rom->name = g_strdup(name);
|
||||||
rom->addr = addr;
|
rom->addr = addr;
|
||||||
rom->romsize = len;
|
rom->romsize = max_len ? max_len : len;
|
||||||
rom->datasize = len;
|
rom->datasize = len;
|
||||||
rom->data = g_malloc0(rom->datasize);
|
rom->data = g_malloc0(rom->datasize);
|
||||||
memcpy(rom->data, blob, len);
|
memcpy(rom->data, blob, len);
|
||||||
@ -853,7 +863,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
|||||||
|
|
||||||
fw_cfg_add_file_callback(fw_cfg, fw_file_name,
|
fw_cfg_add_file_callback(fw_cfg, fw_file_name,
|
||||||
fw_callback, callback_opaque,
|
fw_callback, callback_opaque,
|
||||||
data, rom->romsize);
|
data, rom->datasize);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,9 @@
|
|||||||
|
|
||||||
#define ACPI_BUILD_TABLE_SIZE 0x20000
|
#define ACPI_BUILD_TABLE_SIZE 0x20000
|
||||||
|
|
||||||
|
/* Reserve RAM space for tables: add another order of magnitude. */
|
||||||
|
#define ACPI_BUILD_TABLE_MAX_SIZE 0x200000
|
||||||
|
|
||||||
/* #define DEBUG_ACPI_BUILD */
|
/* #define DEBUG_ACPI_BUILD */
|
||||||
#ifdef DEBUG_ACPI_BUILD
|
#ifdef DEBUG_ACPI_BUILD
|
||||||
#define ACPI_BUILD_DPRINTF(fmt, ...) \
|
#define ACPI_BUILD_DPRINTF(fmt, ...) \
|
||||||
@ -1718,6 +1721,11 @@ static void acpi_build_update(void *build_opaque, uint32_t offset)
|
|||||||
acpi_build(build_state->guest_info, &tables);
|
acpi_build(build_state->guest_info, &tables);
|
||||||
|
|
||||||
assert(acpi_data_len(tables.table_data) == build_state->table_size);
|
assert(acpi_data_len(tables.table_data) == build_state->table_size);
|
||||||
|
|
||||||
|
/* Make sure RAM size is correct - in case it got changed by migration */
|
||||||
|
qemu_ram_resize(build_state->table_ram, build_state->table_size,
|
||||||
|
&error_abort);
|
||||||
|
|
||||||
memcpy(qemu_get_ram_ptr(build_state->table_ram), tables.table_data->data,
|
memcpy(qemu_get_ram_ptr(build_state->table_ram), tables.table_data->data,
|
||||||
build_state->table_size);
|
build_state->table_size);
|
||||||
|
|
||||||
@ -1734,10 +1742,10 @@ static void acpi_build_reset(void *build_opaque)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob,
|
static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob,
|
||||||
const char *name)
|
const char *name, uint64_t max_size)
|
||||||
{
|
{
|
||||||
return rom_add_blob(name, blob->data, acpi_data_len(blob), -1, name,
|
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
|
||||||
acpi_build_update, build_state);
|
name, acpi_build_update, build_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_acpi_build = {
|
static const VMStateDescription vmstate_acpi_build = {
|
||||||
@ -1781,11 +1789,12 @@ void acpi_setup(PcGuestInfo *guest_info)
|
|||||||
|
|
||||||
/* Now expose it all to Guest */
|
/* Now expose it all to Guest */
|
||||||
build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data,
|
build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data,
|
||||||
ACPI_BUILD_TABLE_FILE);
|
ACPI_BUILD_TABLE_FILE,
|
||||||
|
ACPI_BUILD_TABLE_MAX_SIZE);
|
||||||
assert(build_state->table_ram != RAM_ADDR_MAX);
|
assert(build_state->table_ram != RAM_ADDR_MAX);
|
||||||
build_state->table_size = acpi_data_len(tables.table_data);
|
build_state->table_size = acpi_data_len(tables.table_data);
|
||||||
|
|
||||||
acpi_add_rom_blob(NULL, tables.linker, "etc/table-loader");
|
acpi_add_rom_blob(NULL, tables.linker, "etc/table-loader", 0);
|
||||||
|
|
||||||
fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
|
fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
|
||||||
tables.tcpalog->data, acpi_data_len(tables.tcpalog));
|
tables.tcpalog->data, acpi_data_len(tables.tcpalog));
|
||||||
|
@ -73,7 +73,8 @@ static inline void hwsetup_free(HWSetup *hw)
|
|||||||
static inline void hwsetup_create_rom(HWSetup *hw,
|
static inline void hwsetup_create_rom(HWSetup *hw,
|
||||||
hwaddr base)
|
hwaddr base)
|
||||||
{
|
{
|
||||||
rom_add_blob("hwsetup", hw->data, TARGET_PAGE_SIZE, base, NULL, NULL, NULL);
|
rom_add_blob("hwsetup", hw->data, TARGET_PAGE_SIZE,
|
||||||
|
TARGET_PAGE_SIZE, base, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void hwsetup_add_u8(HWSetup *hw, uint8_t u)
|
static inline void hwsetup_add_u8(HWSetup *hw, uint8_t u)
|
||||||
|
@ -299,11 +299,15 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|||||||
|
|
||||||
/* memory API */
|
/* memory API */
|
||||||
|
|
||||||
typedef struct RAMBlock {
|
typedef struct RAMBlock RAMBlock;
|
||||||
|
|
||||||
|
struct RAMBlock {
|
||||||
struct MemoryRegion *mr;
|
struct MemoryRegion *mr;
|
||||||
uint8_t *host;
|
uint8_t *host;
|
||||||
ram_addr_t offset;
|
ram_addr_t offset;
|
||||||
ram_addr_t length;
|
ram_addr_t used_length;
|
||||||
|
ram_addr_t max_length;
|
||||||
|
void (*resized)(const char*, uint64_t length, void *host);
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
char idstr[256];
|
char idstr[256];
|
||||||
/* Reads can take either the iothread or the ramlist lock.
|
/* Reads can take either the iothread or the ramlist lock.
|
||||||
@ -311,11 +315,11 @@ typedef struct RAMBlock {
|
|||||||
*/
|
*/
|
||||||
QTAILQ_ENTRY(RAMBlock) next;
|
QTAILQ_ENTRY(RAMBlock) next;
|
||||||
int fd;
|
int fd;
|
||||||
} RAMBlock;
|
};
|
||||||
|
|
||||||
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
|
||||||
{
|
{
|
||||||
assert(offset < block->length);
|
assert(offset < block->used_length);
|
||||||
assert(block->host);
|
assert(block->host);
|
||||||
return (char *)block->host + offset;
|
return (char *)block->host + offset;
|
||||||
}
|
}
|
||||||
|
@ -321,6 +321,30 @@ void memory_region_init_ram(MemoryRegion *mr,
|
|||||||
uint64_t size,
|
uint64_t size,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* memory_region_init_resizeable_ram: Initialize memory region with resizeable
|
||||||
|
* RAM. Accesses into the region will
|
||||||
|
* modify memory directly. Only an initial
|
||||||
|
* portion of this RAM is actually used.
|
||||||
|
* The used size can change across reboots.
|
||||||
|
*
|
||||||
|
* @mr: the #MemoryRegion to be initialized.
|
||||||
|
* @owner: the object that tracks the region's reference count
|
||||||
|
* @name: the name of the region.
|
||||||
|
* @size: used size of the region.
|
||||||
|
* @max_size: max size of the region.
|
||||||
|
* @resized: callback to notify owner about used size change.
|
||||||
|
* @errp: pointer to Error*, to store an error if it happens.
|
||||||
|
*/
|
||||||
|
void memory_region_init_resizeable_ram(MemoryRegion *mr,
|
||||||
|
struct Object *owner,
|
||||||
|
const char *name,
|
||||||
|
uint64_t size,
|
||||||
|
uint64_t max_size,
|
||||||
|
void (*resized)(const char*,
|
||||||
|
uint64_t length,
|
||||||
|
void *host),
|
||||||
|
Error **errp);
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
/**
|
/**
|
||||||
* memory_region_init_ram_from_file: Initialize RAM memory region with a
|
* memory_region_init_ram_from_file: Initialize RAM memory region with a
|
||||||
@ -877,6 +901,16 @@ void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
|
|||||||
*/
|
*/
|
||||||
void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
|
void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* memory_region_set_size: dynamically update the size of a region.
|
||||||
|
*
|
||||||
|
* Dynamically updates the size of a region.
|
||||||
|
*
|
||||||
|
* @mr: the region to be updated
|
||||||
|
* @size: used size of the region.
|
||||||
|
*/
|
||||||
|
void memory_region_set_size(MemoryRegion *mr, uint64_t size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* memory_region_set_alias_offset: dynamically update a memory alias's offset
|
* memory_region_set_alias_offset: dynamically update a memory alias's offset
|
||||||
*
|
*
|
||||||
|
@ -28,12 +28,19 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
|||||||
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
MemoryRegion *mr, Error **errp);
|
MemoryRegion *mr, Error **errp);
|
||||||
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
|
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
|
||||||
|
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
|
||||||
|
void (*resized)(const char*,
|
||||||
|
uint64_t length,
|
||||||
|
void *host),
|
||||||
|
MemoryRegion *mr, Error **errp);
|
||||||
int qemu_get_ram_fd(ram_addr_t addr);
|
int qemu_get_ram_fd(ram_addr_t addr);
|
||||||
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
|
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
|
||||||
void *qemu_get_ram_ptr(ram_addr_t addr);
|
void *qemu_get_ram_ptr(ram_addr_t addr);
|
||||||
void qemu_ram_free(ram_addr_t addr);
|
void qemu_ram_free(ram_addr_t addr);
|
||||||
void qemu_ram_free_from_ptr(ram_addr_t addr);
|
void qemu_ram_free_from_ptr(ram_addr_t addr);
|
||||||
|
|
||||||
|
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
|
||||||
|
|
||||||
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
|
||||||
ram_addr_t length,
|
ram_addr_t length,
|
||||||
unsigned client)
|
unsigned client)
|
||||||
@ -172,9 +179,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
|
|||||||
}
|
}
|
||||||
#endif /* not _WIN32 */
|
#endif /* not _WIN32 */
|
||||||
|
|
||||||
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start,
|
||||||
ram_addr_t length,
|
ram_addr_t length,
|
||||||
unsigned client)
|
unsigned client)
|
||||||
{
|
{
|
||||||
unsigned long end, page;
|
unsigned long end, page;
|
||||||
|
|
||||||
@ -184,6 +191,15 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
|||||||
bitmap_clear(ram_list.dirty_memory[client], page, end - page);
|
bitmap_clear(ram_list.dirty_memory[client], page, end - page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
|
||||||
|
ram_addr_t length)
|
||||||
|
{
|
||||||
|
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_MIGRATION);
|
||||||
|
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_VGA);
|
||||||
|
cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_CODE);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
|
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
|
||||||
unsigned client);
|
unsigned client);
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ int rom_add_file(const char *file, const char *fw_dir,
|
|||||||
hwaddr addr, int32_t bootindex,
|
hwaddr addr, int32_t bootindex,
|
||||||
bool option_rom);
|
bool option_rom);
|
||||||
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||||
hwaddr addr, const char *fw_file_name,
|
size_t max_len, hwaddr addr, const char *fw_file_name,
|
||||||
FWCfgReadCallback fw_callback, void *callback_opaque);
|
FWCfgReadCallback fw_callback, void *callback_opaque);
|
||||||
int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
||||||
size_t romsize, hwaddr addr);
|
size_t romsize, hwaddr addr);
|
||||||
@ -83,7 +83,7 @@ void do_info_roms(Monitor *mon, const QDict *qdict);
|
|||||||
#define rom_add_file_fixed(_f, _a, _i) \
|
#define rom_add_file_fixed(_f, _a, _i) \
|
||||||
rom_add_file(_f, NULL, _a, _i, false)
|
rom_add_file(_f, NULL, _a, _i, false)
|
||||||
#define rom_add_blob_fixed(_f, _b, _l, _a) \
|
#define rom_add_blob_fixed(_f, _b, _l, _a) \
|
||||||
rom_add_blob(_f, _b, _l, _a, NULL, NULL, NULL)
|
rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL)
|
||||||
|
|
||||||
#define PC_ROM_MIN_VGA 0xc0000
|
#define PC_ROM_MIN_VGA 0xc0000
|
||||||
#define PC_ROM_MIN_OPTION 0xc8000
|
#define PC_ROM_MIN_OPTION 0xc8000
|
||||||
|
33
memory.c
33
memory.c
@ -1152,6 +1152,23 @@ void memory_region_init_ram(MemoryRegion *mr,
|
|||||||
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
|
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void memory_region_init_resizeable_ram(MemoryRegion *mr,
|
||||||
|
Object *owner,
|
||||||
|
const char *name,
|
||||||
|
uint64_t size,
|
||||||
|
uint64_t max_size,
|
||||||
|
void (*resized)(const char*,
|
||||||
|
uint64_t length,
|
||||||
|
void *host),
|
||||||
|
Error **errp)
|
||||||
|
{
|
||||||
|
memory_region_init(mr, owner, name, size);
|
||||||
|
mr->ram = true;
|
||||||
|
mr->terminates = true;
|
||||||
|
mr->destructor = memory_region_destructor_ram;
|
||||||
|
mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
void memory_region_init_ram_from_file(MemoryRegion *mr,
|
void memory_region_init_ram_from_file(MemoryRegion *mr,
|
||||||
struct Object *owner,
|
struct Object *owner,
|
||||||
@ -1707,6 +1724,22 @@ void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
|
|||||||
memory_region_transaction_commit();
|
memory_region_transaction_commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void memory_region_set_size(MemoryRegion *mr, uint64_t size)
|
||||||
|
{
|
||||||
|
Int128 s = int128_make64(size);
|
||||||
|
|
||||||
|
if (size == UINT64_MAX) {
|
||||||
|
s = int128_2_64();
|
||||||
|
}
|
||||||
|
if (int128_eq(s, mr->size)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
memory_region_transaction_begin();
|
||||||
|
mr->size = s;
|
||||||
|
memory_region_update_pending = true;
|
||||||
|
memory_region_transaction_commit();
|
||||||
|
}
|
||||||
|
|
||||||
static void memory_region_readd_subregion(MemoryRegion *mr)
|
static void memory_region_readd_subregion(MemoryRegion *mr)
|
||||||
{
|
{
|
||||||
MemoryRegion *container = mr->container;
|
MemoryRegion *container = mr->container;
|
||||||
|
Loading…
Reference in New Issue
Block a user