* RAMBlock vs. MemoryRegion cleanups from Fam
* mru_section optimization from Fam * memory.txt improvements from Peter and Xiaoqiang * i8257 fix from Hervé * -daemonize fix * Cleanups and small fixes from Alex, Praneith, Wei -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJW3bWrAAoJEL/70l94x66De2oH/ik70S43ooJVPLePPspWefAm 9CuMmv6PK8KhMKqhmZWfneOdjwRYx7U1OYa9KsZErLlSmCul7p8o4JV/Nx+nGgUP L4LCJIutEsEyZ1U8dG0jPuq5DyHScCRHPF7IpSid5dpOK5VA4MCmBZgHd4JvwJhi VE1xl/5w9xgIkuxQSC8ybLr7SVLeS9qOaDZxci6mRC3ZAJ2NtqR7IpmpmghsXNtP iY69a6lSzbfYvJ+ToPFhx1KwcjbZUrV5UNC4ceQ9tYihj89Oi+DOjh5mZ5IuumEb OjKMFoKNaHUbY3nPTYqmlHamT8OZnQf1NhCXkd9EMqPGiHywsYbWzIOy7mxVQ68= =H40p -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * RAMBlock vs. MemoryRegion cleanups from Fam * mru_section optimization from Fam * memory.txt improvements from Peter and Xiaoqiang * i8257 fix from Hervé * -daemonize fix * Cleanups and small fixes from Alex, Praneith, Wei # gpg: Signature made Mon 07 Mar 2016 17:08:59 GMT using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: scsi-bus: Remove tape command from scsi_req_xfer kvm/irqchip: use bitmap utility for gsi tracking MAINTAINERS: Add entry for include/sysemu/kvm*.h doc/memory.txt: correct description of MemoryRegionOps fields doc/memory.txt: correct a logic error icount: possible options for sleep are on or off exec: Introduce AddressSpaceDispatch.mru_section exec: Factor out section_covers_addr exec: Pass RAMBlock pointer to qemu_ram_free memory: Drop MemoryRegion.ram_addr memory: Implement memory_region_get_ram_addr with mr->ram_block memory: Move assignment to ram_block to memory_region_init_* exec: Return RAMBlock pointer from allocating functions i8257: fix Terminal Count status log: do not log if QEMU is daemonized but without -D Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
97556fe80e
@ -234,6 +234,7 @@ L: kvm@vger.kernel.org
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: kvm-*
|
F: kvm-*
|
||||||
F: */kvm.*
|
F: */kvm.*
|
||||||
|
F: include/sysemu/kvm*.h
|
||||||
|
|
||||||
ARM
|
ARM
|
||||||
M: Peter Maydell <peter.maydell@linaro.org>
|
M: Peter Maydell <peter.maydell@linaro.org>
|
||||||
|
4
cpus.c
4
cpus.c
@ -630,7 +630,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
|||||||
icount_align_option = qemu_opt_get_bool(opts, "align", false);
|
icount_align_option = qemu_opt_get_bool(opts, "align", false);
|
||||||
|
|
||||||
if (icount_align_option && !icount_sleep) {
|
if (icount_align_option && !icount_sleep) {
|
||||||
error_setg(errp, "align=on and sleep=no are incompatible");
|
error_setg(errp, "align=on and sleep=off are incompatible");
|
||||||
}
|
}
|
||||||
if (strcmp(option, "auto") != 0) {
|
if (strcmp(option, "auto") != 0) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
@ -643,7 +643,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
|
|||||||
} else if (icount_align_option) {
|
} else if (icount_align_option) {
|
||||||
error_setg(errp, "shift=auto and align=on are incompatible");
|
error_setg(errp, "shift=auto and align=on are incompatible");
|
||||||
} else if (!icount_sleep) {
|
} else if (!icount_sleep) {
|
||||||
error_setg(errp, "shift=auto and sleep=no are incompatible");
|
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
||||||
}
|
}
|
||||||
|
|
||||||
use_icount = 2;
|
use_icount = 2;
|
||||||
|
4
cputlb.c
4
cputlb.c
@ -416,8 +416,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
|
|||||||
/* Write access calls the I/O callback. */
|
/* Write access calls the I/O callback. */
|
||||||
te->addr_write = address | TLB_MMIO;
|
te->addr_write = address | TLB_MMIO;
|
||||||
} else if (memory_region_is_ram(section->mr)
|
} else if (memory_region_is_ram(section->mr)
|
||||||
&& cpu_physical_memory_is_clean(section->mr->ram_addr
|
&& cpu_physical_memory_is_clean(
|
||||||
+ xlat)) {
|
memory_region_get_ram_addr(section->mr) + xlat)) {
|
||||||
te->addr_write = address | TLB_NOTDIRTY;
|
te->addr_write = address | TLB_NOTDIRTY;
|
||||||
} else {
|
} else {
|
||||||
te->addr_write = address;
|
te->addr_write = address;
|
||||||
|
@ -180,8 +180,8 @@ aliases that leave holes then the lower priority region will appear in these
|
|||||||
holes too.)
|
holes too.)
|
||||||
|
|
||||||
For example, suppose we have a container A of size 0x8000 with two subregions
|
For example, suppose we have a container A of size 0x8000 with two subregions
|
||||||
B and C. B is a container mapped at 0x2000, size 0x4000, priority 1; C is
|
B and C. B is a container mapped at 0x2000, size 0x4000, priority 2; C is
|
||||||
an MMIO region mapped at 0x0, size 0x6000, priority 2. B currently has two
|
an MMIO region mapped at 0x0, size 0x6000, priority 1. B currently has two
|
||||||
of its own subregions: D of size 0x1000 at offset 0 and E of size 0x1000 at
|
of its own subregions: D of size 0x1000 at offset 0 and E of size 0x1000 at
|
||||||
offset 0x2000. As a diagram:
|
offset 0x2000. As a diagram:
|
||||||
|
|
||||||
@ -297,8 +297,9 @@ various constraints can be supplied to control how these callbacks are called:
|
|||||||
- .valid.min_access_size, .valid.max_access_size define the access sizes
|
- .valid.min_access_size, .valid.max_access_size define the access sizes
|
||||||
(in bytes) which the device accepts; accesses outside this range will
|
(in bytes) which the device accepts; accesses outside this range will
|
||||||
have device and bus specific behaviour (ignored, or machine check)
|
have device and bus specific behaviour (ignored, or machine check)
|
||||||
- .valid.aligned specifies that the device only accepts naturally aligned
|
- .valid.unaligned specifies that the *device being modelled* supports
|
||||||
accesses. Unaligned accesses invoke device and bus specific behaviour.
|
unaligned accesses; if false, unaligned accesses will invoke the
|
||||||
|
appropriate bus or CPU specific behaviour.
|
||||||
- .impl.min_access_size, .impl.max_access_size define the access sizes
|
- .impl.min_access_size, .impl.max_access_size define the access sizes
|
||||||
(in bytes) supported by the *implementation*; other access sizes will be
|
(in bytes) supported by the *implementation*; other access sizes will be
|
||||||
emulated using the ones available. For example a 4-byte write will be
|
emulated using the ones available. For example a 4-byte write will be
|
||||||
@ -306,5 +307,5 @@ various constraints can be supplied to control how these callbacks are called:
|
|||||||
- .impl.unaligned specifies that the *implementation* supports unaligned
|
- .impl.unaligned specifies that the *implementation* supports unaligned
|
||||||
accesses; if false, unaligned accesses will be emulated by two aligned
|
accesses; if false, unaligned accesses will be emulated by two aligned
|
||||||
accesses.
|
accesses.
|
||||||
- .old_mmio can be used to ease porting from code using
|
- .old_mmio eases the porting of code that was formerly using
|
||||||
cpu_register_io_memory(). It should not be used in new code.
|
cpu_register_io_memory(). It should not be used in new code.
|
||||||
|
107
exec.c
107
exec.c
@ -135,6 +135,7 @@ typedef struct PhysPageMap {
|
|||||||
struct AddressSpaceDispatch {
|
struct AddressSpaceDispatch {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
|
||||||
|
MemoryRegionSection *mru_section;
|
||||||
/* This is a multi-level map on the physical address space.
|
/* This is a multi-level map on the physical address space.
|
||||||
* The bottom level has pointers to MemoryRegionSections.
|
* The bottom level has pointers to MemoryRegionSections.
|
||||||
*/
|
*/
|
||||||
@ -307,6 +308,17 @@ static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool section_covers_addr(const MemoryRegionSection *section,
|
||||||
|
hwaddr addr)
|
||||||
|
{
|
||||||
|
/* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
|
||||||
|
* the section must cover the entire address space.
|
||||||
|
*/
|
||||||
|
return section->size.hi ||
|
||||||
|
range_covers_byte(section->offset_within_address_space,
|
||||||
|
section->size.lo, addr);
|
||||||
|
}
|
||||||
|
|
||||||
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
|
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
|
||||||
Node *nodes, MemoryRegionSection *sections)
|
Node *nodes, MemoryRegionSection *sections)
|
||||||
{
|
{
|
||||||
@ -322,9 +334,7 @@ static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
|
|||||||
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
|
lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sections[lp.ptr].size.hi ||
|
if (section_covers_addr(§ions[lp.ptr], addr)) {
|
||||||
range_covers_byte(sections[lp.ptr].offset_within_address_space,
|
|
||||||
sections[lp.ptr].size.lo, addr)) {
|
|
||||||
return §ions[lp.ptr];
|
return §ions[lp.ptr];
|
||||||
} else {
|
} else {
|
||||||
return §ions[PHYS_SECTION_UNASSIGNED];
|
return §ions[PHYS_SECTION_UNASSIGNED];
|
||||||
@ -342,14 +352,25 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
|
|||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
bool resolve_subpage)
|
bool resolve_subpage)
|
||||||
{
|
{
|
||||||
MemoryRegionSection *section;
|
MemoryRegionSection *section = atomic_read(&d->mru_section);
|
||||||
subpage_t *subpage;
|
subpage_t *subpage;
|
||||||
|
bool update;
|
||||||
|
|
||||||
section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
|
if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
|
||||||
|
section_covers_addr(section, addr)) {
|
||||||
|
update = false;
|
||||||
|
} else {
|
||||||
|
section = phys_page_find(d->phys_map, addr, d->map.nodes,
|
||||||
|
d->map.sections);
|
||||||
|
update = true;
|
||||||
|
}
|
||||||
if (resolve_subpage && section->mr->subpage) {
|
if (resolve_subpage && section->mr->subpage) {
|
||||||
subpage = container_of(section->mr, subpage_t, iomem);
|
subpage = container_of(section->mr, subpage_t, iomem);
|
||||||
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
|
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
|
||||||
}
|
}
|
||||||
|
if (update) {
|
||||||
|
atomic_set(&d->mru_section, section);
|
||||||
|
}
|
||||||
return section;
|
return section;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1554,7 +1575,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
static void ram_block_add(RAMBlock *new_block, Error **errp)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
RAMBlock *last_block = NULL;
|
RAMBlock *last_block = NULL;
|
||||||
@ -1573,7 +1594,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
if (err) {
|
if (err) {
|
||||||
error_propagate(errp, err);
|
error_propagate(errp, err);
|
||||||
qemu_mutex_unlock_ramlist();
|
qemu_mutex_unlock_ramlist();
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
new_block->host = phys_mem_alloc(new_block->max_length,
|
new_block->host = phys_mem_alloc(new_block->max_length,
|
||||||
@ -1583,7 +1603,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
"cannot set up guest memory '%s'",
|
"cannot set up guest memory '%s'",
|
||||||
memory_region_name(new_block->mr));
|
memory_region_name(new_block->mr));
|
||||||
qemu_mutex_unlock_ramlist();
|
qemu_mutex_unlock_ramlist();
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
memory_try_enable_merging(new_block->host, new_block->max_length);
|
memory_try_enable_merging(new_block->host, new_block->max_length);
|
||||||
}
|
}
|
||||||
@ -1631,22 +1650,19 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|||||||
kvm_setup_guest_memory(new_block->host, new_block->max_length);
|
kvm_setup_guest_memory(new_block->host, new_block->max_length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new_block->offset;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
||||||
bool share, const char *mem_path,
|
bool share, const char *mem_path,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
RAMBlock *new_block;
|
RAMBlock *new_block;
|
||||||
ram_addr_t addr;
|
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
if (xen_enabled()) {
|
if (xen_enabled()) {
|
||||||
error_setg(errp, "-mem-path not supported with Xen");
|
error_setg(errp, "-mem-path not supported with Xen");
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (phys_mem_alloc != qemu_anon_ram_alloc) {
|
if (phys_mem_alloc != qemu_anon_ram_alloc) {
|
||||||
@ -1657,7 +1673,7 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
|||||||
*/
|
*/
|
||||||
error_setg(errp,
|
error_setg(errp,
|
||||||
"-mem-path not supported with this accelerator");
|
"-mem-path not supported with this accelerator");
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = HOST_PAGE_ALIGN(size);
|
size = HOST_PAGE_ALIGN(size);
|
||||||
@ -1670,29 +1686,28 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
|||||||
mem_path, errp);
|
mem_path, errp);
|
||||||
if (!new_block->host) {
|
if (!new_block->host) {
|
||||||
g_free(new_block);
|
g_free(new_block);
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = ram_block_add(new_block, &local_err);
|
ram_block_add(new_block, &local_err);
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
g_free(new_block);
|
g_free(new_block);
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
return addr;
|
return new_block;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static
|
static
|
||||||
ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
||||||
void (*resized)(const char*,
|
void (*resized)(const char*,
|
||||||
uint64_t length,
|
uint64_t length,
|
||||||
void *host),
|
void *host),
|
||||||
void *host, bool resizeable,
|
void *host, bool resizeable,
|
||||||
MemoryRegion *mr, Error **errp)
|
MemoryRegion *mr, Error **errp)
|
||||||
{
|
{
|
||||||
RAMBlock *new_block;
|
RAMBlock *new_block;
|
||||||
ram_addr_t addr;
|
|
||||||
Error *local_err = NULL;
|
Error *local_err = NULL;
|
||||||
|
|
||||||
size = HOST_PAGE_ALIGN(size);
|
size = HOST_PAGE_ALIGN(size);
|
||||||
@ -1711,29 +1726,27 @@ ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
|
|||||||
if (resizeable) {
|
if (resizeable) {
|
||||||
new_block->flags |= RAM_RESIZEABLE;
|
new_block->flags |= RAM_RESIZEABLE;
|
||||||
}
|
}
|
||||||
addr = ram_block_add(new_block, &local_err);
|
ram_block_add(new_block, &local_err);
|
||||||
if (local_err) {
|
if (local_err) {
|
||||||
g_free(new_block);
|
g_free(new_block);
|
||||||
error_propagate(errp, local_err);
|
error_propagate(errp, local_err);
|
||||||
return -1;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
return new_block;
|
||||||
mr->ram_block = new_block;
|
|
||||||
return addr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
MemoryRegion *mr, Error **errp)
|
MemoryRegion *mr, Error **errp)
|
||||||
{
|
{
|
||||||
return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
|
return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
|
RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
|
||||||
{
|
{
|
||||||
return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
|
return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
|
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
|
||||||
void (*resized)(const char*,
|
void (*resized)(const char*,
|
||||||
uint64_t length,
|
uint64_t length,
|
||||||
void *host),
|
void *host),
|
||||||
@ -1759,22 +1772,15 @@ static void reclaim_ramblock(RAMBlock *block)
|
|||||||
g_free(block);
|
g_free(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_ram_free(ram_addr_t addr)
|
void qemu_ram_free(RAMBlock *block)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
|
||||||
|
|
||||||
qemu_mutex_lock_ramlist();
|
qemu_mutex_lock_ramlist();
|
||||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
QLIST_REMOVE_RCU(block, next);
|
||||||
if (addr == block->offset) {
|
ram_list.mru_block = NULL;
|
||||||
QLIST_REMOVE_RCU(block, next);
|
/* Write list before version */
|
||||||
ram_list.mru_block = NULL;
|
smp_wmb();
|
||||||
/* Write list before version */
|
ram_list.version++;
|
||||||
smp_wmb();
|
call_rcu(block, reclaim_ramblock, rcu);
|
||||||
ram_list.version++;
|
|
||||||
call_rcu(block, reclaim_ramblock, rcu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
qemu_mutex_unlock_ramlist();
|
qemu_mutex_unlock_ramlist();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2707,7 +2713,8 @@ MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* RAM case */
|
/* RAM case */
|
||||||
ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr + addr1);
|
ptr = qemu_get_ram_ptr(mr->ram_block,
|
||||||
|
memory_region_get_ram_addr(mr) + addr1);
|
||||||
memcpy(buf, ptr, l);
|
memcpy(buf, ptr, l);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,6 +342,10 @@ static void i8257_channel_run(I8257State *d, int ichan)
|
|||||||
r->now[COUNT], (r->base[COUNT] + 1) << ncont);
|
r->now[COUNT], (r->base[COUNT] + 1) << ncont);
|
||||||
r->now[COUNT] = n;
|
r->now[COUNT] = n;
|
||||||
ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
|
ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
|
||||||
|
if (n == (r->base[COUNT] + 1) << ncont) {
|
||||||
|
ldebug("transfer done\n");
|
||||||
|
d->status |= (1 << ichan);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i8257_dma_run(void *opaque)
|
static void i8257_dma_run(void *opaque)
|
||||||
|
@ -400,7 +400,7 @@ static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr,
|
|||||||
|
|
||||||
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
|
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
|
||||||
s->ivshmem_size, ptr);
|
s->ivshmem_size, ptr);
|
||||||
qemu_set_ram_fd(s->ivshmem.ram_addr, fd);
|
qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem), fd);
|
||||||
vmstate_register_ram(&s->ivshmem, DEVICE(s));
|
vmstate_register_ram(&s->ivshmem, DEVICE(s));
|
||||||
memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
|
memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
|
||||||
|
|
||||||
@ -661,7 +661,8 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
|
|||||||
}
|
}
|
||||||
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
|
memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
|
||||||
"ivshmem.bar2", s->ivshmem_size, map_ptr);
|
"ivshmem.bar2", s->ivshmem_size, map_ptr);
|
||||||
qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd);
|
qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem),
|
||||||
|
incoming_fd);
|
||||||
vmstate_register_ram(&s->ivshmem, DEVICE(s));
|
vmstate_register_ram(&s->ivshmem, DEVICE(s));
|
||||||
|
|
||||||
IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
|
IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
|
||||||
@ -996,8 +997,10 @@ static void pci_ivshmem_exit(PCIDevice *dev)
|
|||||||
strerror(errno));
|
strerror(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((fd = qemu_get_ram_fd(s->ivshmem.ram_addr)) != -1)
|
fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem));
|
||||||
|
if (fd != -1) {
|
||||||
close(fd);
|
close(fd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
|
vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
|
||||||
|
@ -989,7 +989,6 @@ static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
|
|||||||
}
|
}
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case READ_10:
|
case READ_10:
|
||||||
case RECOVER_BUFFERED_DATA:
|
|
||||||
case READ_12:
|
case READ_12:
|
||||||
case READ_16:
|
case READ_16:
|
||||||
cmd->xfer *= dev->blocksize;
|
cmd->xfer *= dev->blocksize;
|
||||||
|
@ -169,7 +169,6 @@ struct MemoryRegion {
|
|||||||
bool flush_coalesced_mmio;
|
bool flush_coalesced_mmio;
|
||||||
bool global_locking;
|
bool global_locking;
|
||||||
uint8_t dirty_log_mask;
|
uint8_t dirty_log_mask;
|
||||||
ram_addr_t ram_addr;
|
|
||||||
RAMBlock *ram_block;
|
RAMBlock *ram_block;
|
||||||
Object *owner;
|
Object *owner;
|
||||||
const MemoryRegionIOMMUOps *iommu_ops;
|
const MemoryRegionIOMMUOps *iommu_ops;
|
||||||
@ -978,14 +977,8 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
|
|||||||
/**
|
/**
|
||||||
* memory_region_get_ram_addr: Get the ram address associated with a memory
|
* memory_region_get_ram_addr: Get the ram address associated with a memory
|
||||||
* region
|
* region
|
||||||
*
|
|
||||||
* DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
|
|
||||||
* code is being reworked.
|
|
||||||
*/
|
*/
|
||||||
static inline ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
|
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
|
||||||
{
|
|
||||||
return mr->ram_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t memory_region_get_alignment(const MemoryRegion *mr);
|
uint64_t memory_region_get_alignment(const MemoryRegion *mr);
|
||||||
/**
|
/**
|
||||||
|
@ -94,21 +94,21 @@ ram_addr_t last_ram_offset(void);
|
|||||||
void qemu_mutex_lock_ramlist(void);
|
void qemu_mutex_lock_ramlist(void);
|
||||||
void qemu_mutex_unlock_ramlist(void);
|
void qemu_mutex_unlock_ramlist(void);
|
||||||
|
|
||||||
ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
|
||||||
bool share, const char *mem_path,
|
bool share, const char *mem_path,
|
||||||
Error **errp);
|
Error **errp);
|
||||||
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
MemoryRegion *mr, Error **errp);
|
MemoryRegion *mr, Error **errp);
|
||||||
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
|
RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
|
||||||
ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
|
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
|
||||||
void (*resized)(const char*,
|
void (*resized)(const char*,
|
||||||
uint64_t length,
|
uint64_t length,
|
||||||
void *host),
|
void *host),
|
||||||
MemoryRegion *mr, Error **errp);
|
MemoryRegion *mr, Error **errp);
|
||||||
int qemu_get_ram_fd(ram_addr_t addr);
|
int qemu_get_ram_fd(ram_addr_t addr);
|
||||||
void qemu_set_ram_fd(ram_addr_t addr, int fd);
|
void qemu_set_ram_fd(ram_addr_t addr, int fd);
|
||||||
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
|
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
|
||||||
void qemu_ram_free(ram_addr_t addr);
|
void qemu_ram_free(RAMBlock *block);
|
||||||
|
|
||||||
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
|
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
|
||||||
|
|
||||||
|
37
kvm-all.c
37
kvm-all.c
@ -89,7 +89,7 @@ struct KVMState
|
|||||||
#ifdef KVM_CAP_IRQ_ROUTING
|
#ifdef KVM_CAP_IRQ_ROUTING
|
||||||
struct kvm_irq_routing *irq_routes;
|
struct kvm_irq_routing *irq_routes;
|
||||||
int nr_allocated_irq_routes;
|
int nr_allocated_irq_routes;
|
||||||
uint32_t *used_gsi_bitmap;
|
unsigned long *used_gsi_bitmap;
|
||||||
unsigned int gsi_count;
|
unsigned int gsi_count;
|
||||||
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
|
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
|
||||||
#endif
|
#endif
|
||||||
@ -366,7 +366,8 @@ static void kvm_log_stop(MemoryListener *listener,
|
|||||||
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
|
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
|
||||||
unsigned long *bitmap)
|
unsigned long *bitmap)
|
||||||
{
|
{
|
||||||
ram_addr_t start = section->offset_within_region + section->mr->ram_addr;
|
ram_addr_t start = section->offset_within_region +
|
||||||
|
memory_region_get_ram_addr(section->mr);
|
||||||
ram_addr_t pages = int128_get64(section->size) / getpagesize();
|
ram_addr_t pages = int128_get64(section->size) / getpagesize();
|
||||||
|
|
||||||
cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
|
cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
|
||||||
@ -950,12 +951,12 @@ typedef struct KVMMSIRoute {
|
|||||||
|
|
||||||
static void set_gsi(KVMState *s, unsigned int gsi)
|
static void set_gsi(KVMState *s, unsigned int gsi)
|
||||||
{
|
{
|
||||||
s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
|
set_bit(gsi, s->used_gsi_bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_gsi(KVMState *s, unsigned int gsi)
|
static void clear_gsi(KVMState *s, unsigned int gsi)
|
||||||
{
|
{
|
||||||
s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
|
clear_bit(gsi, s->used_gsi_bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_init_irq_routing(KVMState *s)
|
void kvm_init_irq_routing(KVMState *s)
|
||||||
@ -964,17 +965,9 @@ void kvm_init_irq_routing(KVMState *s)
|
|||||||
|
|
||||||
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
|
gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
|
||||||
if (gsi_count > 0) {
|
if (gsi_count > 0) {
|
||||||
unsigned int gsi_bits, i;
|
|
||||||
|
|
||||||
/* Round up so we can search ints using ffs */
|
/* Round up so we can search ints using ffs */
|
||||||
gsi_bits = ALIGN(gsi_count, 32);
|
s->used_gsi_bitmap = bitmap_new(gsi_count);
|
||||||
s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
|
|
||||||
s->gsi_count = gsi_count;
|
s->gsi_count = gsi_count;
|
||||||
|
|
||||||
/* Mark any over-allocated bits as already in use */
|
|
||||||
for (i = gsi_count; i < gsi_bits; i++) {
|
|
||||||
set_gsi(s, i);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
|
||||||
@ -1104,9 +1097,7 @@ static void kvm_flush_dynamic_msi_routes(KVMState *s)
|
|||||||
|
|
||||||
static int kvm_irqchip_get_virq(KVMState *s)
|
static int kvm_irqchip_get_virq(KVMState *s)
|
||||||
{
|
{
|
||||||
uint32_t *word = s->used_gsi_bitmap;
|
int next_virq;
|
||||||
int max_words = ALIGN(s->gsi_count, 32) / 32;
|
|
||||||
int i, zeroes;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
|
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
|
||||||
@ -1119,16 +1110,12 @@ static int kvm_irqchip_get_virq(KVMState *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Return the lowest unused GSI in the bitmap */
|
/* Return the lowest unused GSI in the bitmap */
|
||||||
for (i = 0; i < max_words; i++) {
|
next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
|
||||||
zeroes = ctz32(~word[i]);
|
if (next_virq >= s->gsi_count) {
|
||||||
if (zeroes == 32) {
|
return -ENOSPC;
|
||||||
continue;
|
} else {
|
||||||
}
|
return next_virq;
|
||||||
|
|
||||||
return zeroes + i * 32;
|
|
||||||
}
|
}
|
||||||
return -ENOSPC;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
|
static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
|
||||||
|
56
memory.c
56
memory.c
@ -902,12 +902,12 @@ static void memory_region_destructor_none(MemoryRegion *mr)
|
|||||||
|
|
||||||
static void memory_region_destructor_ram(MemoryRegion *mr)
|
static void memory_region_destructor_ram(MemoryRegion *mr)
|
||||||
{
|
{
|
||||||
qemu_ram_free(mr->ram_addr);
|
qemu_ram_free(mr->ram_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void memory_region_destructor_rom_device(MemoryRegion *mr)
|
static void memory_region_destructor_rom_device(MemoryRegion *mr)
|
||||||
{
|
{
|
||||||
qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
|
qemu_ram_free(mr->ram_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool memory_region_need_escape(char c)
|
static bool memory_region_need_escape(char c)
|
||||||
@ -1038,7 +1038,6 @@ static void memory_region_initfn(Object *obj)
|
|||||||
ObjectProperty *op;
|
ObjectProperty *op;
|
||||||
|
|
||||||
mr->ops = &unassigned_mem_ops;
|
mr->ops = &unassigned_mem_ops;
|
||||||
mr->ram_addr = RAM_ADDR_INVALID;
|
|
||||||
mr->enabled = true;
|
mr->enabled = true;
|
||||||
mr->romd_mode = true;
|
mr->romd_mode = true;
|
||||||
mr->global_locking = true;
|
mr->global_locking = true;
|
||||||
@ -1274,7 +1273,7 @@ void memory_region_init_ram(MemoryRegion *mr,
|
|||||||
mr->ram = true;
|
mr->ram = true;
|
||||||
mr->terminates = true;
|
mr->terminates = true;
|
||||||
mr->destructor = memory_region_destructor_ram;
|
mr->destructor = memory_region_destructor_ram;
|
||||||
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
|
mr->ram_block = qemu_ram_alloc(size, mr, errp);
|
||||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1292,7 +1291,8 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr,
|
|||||||
mr->ram = true;
|
mr->ram = true;
|
||||||
mr->terminates = true;
|
mr->terminates = true;
|
||||||
mr->destructor = memory_region_destructor_ram;
|
mr->destructor = memory_region_destructor_ram;
|
||||||
mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
|
mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
|
||||||
|
mr, errp);
|
||||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1309,7 +1309,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr,
|
|||||||
mr->ram = true;
|
mr->ram = true;
|
||||||
mr->terminates = true;
|
mr->terminates = true;
|
||||||
mr->destructor = memory_region_destructor_ram;
|
mr->destructor = memory_region_destructor_ram;
|
||||||
mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
|
mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
|
||||||
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -1328,7 +1328,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
|
|||||||
|
|
||||||
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
/* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
|
||||||
}
|
}
|
||||||
|
|
||||||
void memory_region_set_skip_dump(MemoryRegion *mr)
|
void memory_region_set_skip_dump(MemoryRegion *mr)
|
||||||
@ -1362,7 +1362,7 @@ void memory_region_init_rom_device(MemoryRegion *mr,
|
|||||||
mr->terminates = true;
|
mr->terminates = true;
|
||||||
mr->rom_device = true;
|
mr->rom_device = true;
|
||||||
mr->destructor = memory_region_destructor_rom_device;
|
mr->destructor = memory_region_destructor_rom_device;
|
||||||
mr->ram_addr = qemu_ram_alloc(size, mr, errp);
|
mr->ram_block = qemu_ram_alloc(size, mr, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void memory_region_init_iommu(MemoryRegion *mr,
|
void memory_region_init_iommu(MemoryRegion *mr,
|
||||||
@ -1527,24 +1527,26 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
|
|||||||
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
|
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
|
||||||
hwaddr size, unsigned client)
|
hwaddr size, unsigned client)
|
||||||
{
|
{
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
|
return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
|
||||||
|
size, client);
|
||||||
}
|
}
|
||||||
|
|
||||||
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
|
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
|
||||||
hwaddr size)
|
hwaddr size)
|
||||||
{
|
{
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size,
|
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
|
||||||
|
size,
|
||||||
memory_region_get_dirty_log_mask(mr));
|
memory_region_get_dirty_log_mask(mr));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
|
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
|
||||||
hwaddr size, unsigned client)
|
hwaddr size, unsigned client)
|
||||||
{
|
{
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr,
|
return cpu_physical_memory_test_and_clear_dirty(
|
||||||
size, client);
|
memory_region_get_ram_addr(mr) + addr, size, client);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1587,9 +1589,9 @@ void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
|
|||||||
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
|
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
|
||||||
hwaddr size, unsigned client)
|
hwaddr size, unsigned client)
|
||||||
{
|
{
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size,
|
cpu_physical_memory_test_and_clear_dirty(
|
||||||
client);
|
memory_region_get_ram_addr(mr) + addr, size, client);
|
||||||
}
|
}
|
||||||
|
|
||||||
int memory_region_get_fd(MemoryRegion *mr)
|
int memory_region_get_fd(MemoryRegion *mr)
|
||||||
@ -1598,9 +1600,9 @@ int memory_region_get_fd(MemoryRegion *mr)
|
|||||||
return memory_region_get_fd(mr->alias);
|
return memory_region_get_fd(mr->alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
|
|
||||||
return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK);
|
return qemu_get_ram_fd(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
||||||
@ -1613,18 +1615,24 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
|||||||
offset += mr->alias_offset;
|
offset += mr->alias_offset;
|
||||||
mr = mr->alias;
|
mr = mr->alias;
|
||||||
}
|
}
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
ptr = qemu_get_ram_ptr(mr->ram_block, mr->ram_addr & TARGET_PAGE_MASK);
|
ptr = qemu_get_ram_ptr(mr->ram_block,
|
||||||
|
memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return ptr + offset;
|
return ptr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
|
||||||
|
{
|
||||||
|
return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
|
||||||
|
}
|
||||||
|
|
||||||
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
|
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
|
||||||
{
|
{
|
||||||
assert(mr->ram_addr != RAM_ADDR_INVALID);
|
assert(mr->ram_block);
|
||||||
|
|
||||||
qemu_ram_resize(mr->ram_addr, newsize, errp);
|
qemu_ram_resize(memory_region_get_ram_addr(mr), newsize, errp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
|
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
|
||||||
|
@ -3276,7 +3276,7 @@ re-inject them.
|
|||||||
ETEXI
|
ETEXI
|
||||||
|
|
||||||
DEF("icount", HAS_ARG, QEMU_OPTION_icount, \
|
DEF("icount", HAS_ARG, QEMU_OPTION_icount, \
|
||||||
"-icount [shift=N|auto][,align=on|off][,sleep=no,rr=record|replay,rrfile=<filename>]\n" \
|
"-icount [shift=N|auto][,align=on|off][,sleep=on|off,rr=record|replay,rrfile=<filename>]\n" \
|
||||||
" enable virtual instruction counter with 2^N clock ticks per\n" \
|
" enable virtual instruction counter with 2^N clock ticks per\n" \
|
||||||
" instruction, enable aligning the host and virtual clocks\n" \
|
" instruction, enable aligning the host and virtual clocks\n" \
|
||||||
" or disable real time cpu sleeping\n", QEMU_ARCH_ALL)
|
" or disable real time cpu sleeping\n", QEMU_ARCH_ALL)
|
||||||
@ -3289,8 +3289,8 @@ then the virtual cpu speed will be automatically adjusted to keep virtual
|
|||||||
time within a few seconds of real time.
|
time within a few seconds of real time.
|
||||||
|
|
||||||
When the virtual cpu is sleeping, the virtual time will advance at default
|
When the virtual cpu is sleeping, the virtual time will advance at default
|
||||||
speed unless @option{sleep=no} is specified.
|
speed unless @option{sleep=on|off} is specified.
|
||||||
With @option{sleep=no}, the virtual time will jump to the next timer deadline
|
With @option{sleep=on|off}, the virtual time will jump to the next timer deadline
|
||||||
instantly whenever the virtual cpu goes to sleep mode and will not advance
|
instantly whenever the virtual cpu goes to sleep mode and will not advance
|
||||||
if no timer is enabled. This behavior give deterministic execution times from
|
if no timer is enabled. This behavior give deterministic execution times from
|
||||||
the guest point of view.
|
the guest point of view.
|
||||||
|
@ -352,7 +352,7 @@ def memory_region_get_ram_ptr(memory_region):
|
|||||||
return (memory_region_get_ram_ptr(memory_region["alias"].dereference())
|
return (memory_region_get_ram_ptr(memory_region["alias"].dereference())
|
||||||
+ memory_region["alias_offset"])
|
+ memory_region["alias_offset"])
|
||||||
|
|
||||||
return qemu_get_ram_ptr(memory_region["ram_addr"] & TARGET_PAGE_MASK)
|
return qemu_get_ram_ptr(memory_region["ram_block"]["offset"])
|
||||||
|
|
||||||
|
|
||||||
def get_guest_phys_blocks():
|
def get_guest_phys_blocks():
|
||||||
|
@ -56,7 +56,8 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
|
|||||||
#ifdef CONFIG_TRACE_LOG
|
#ifdef CONFIG_TRACE_LOG
|
||||||
qemu_loglevel |= LOG_TRACE;
|
qemu_loglevel |= LOG_TRACE;
|
||||||
#endif
|
#endif
|
||||||
if ((qemu_loglevel || is_daemonized()) && !qemu_logfile) {
|
if (!qemu_logfile &&
|
||||||
|
(is_daemonized() ? logfilename != NULL : qemu_loglevel)) {
|
||||||
if (logfilename) {
|
if (logfilename) {
|
||||||
qemu_logfile = fopen(logfilename, log_append ? "a" : "w");
|
qemu_logfile = fopen(logfilename, log_append ? "a" : "w");
|
||||||
if (!qemu_logfile) {
|
if (!qemu_logfile) {
|
||||||
@ -72,6 +73,7 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Default to stderr if no log file specified */
|
/* Default to stderr if no log file specified */
|
||||||
|
assert(!is_daemonized());
|
||||||
qemu_logfile = stderr;
|
qemu_logfile = stderr;
|
||||||
}
|
}
|
||||||
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
|
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
|
||||||
@ -89,7 +91,8 @@ void do_qemu_set_log(int log_flags, bool use_own_buffers)
|
|||||||
log_append = 1;
|
log_append = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!qemu_loglevel && !is_daemonized() && qemu_logfile) {
|
if (qemu_logfile &&
|
||||||
|
(is_daemonized() ? logfilename == NULL : !qemu_loglevel)) {
|
||||||
qemu_log_close();
|
qemu_log_close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user