- miscellaneous cleanups for TCG (Emilio) and NBD (Bogdan)
- next part in the thread-safe address_space_* saga: atomic access to the bounce buffer and the map_clients list, from Fam - optional support for linking with tcmalloc, also from Fam - reapplying Peter Crosthwaite's "Respect as_translate_internal length clamp" after fixing the SPARC fallout. - build system fix from Wei Liu - small acpi-build and ioport cleanup by myself -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABCAAGBQJVQJd4AAoJEL/70l94x66DYFYH/3ifhqWZsd4dfJri0CGAHI4i SpPmNeouc8W+F/3lwf6Inrh5NnTgd5QzoUBMQaWVkQKwUiWls8g2mXkT3jo0iDqT /B40YXnZjNm20MixNaZmk9AsOF6OqPM8EMufau874k5zTlx3tCGAW1QD+I1N7WK7 DfsFsIUD1svo2prn55fSoitMG1TIVPnpcklb4YGJRbAacQYUDhr5KAIhT1quDR2R 93BvToyQmPqRQ4YKqnJLp8HAkL4FaJumfFZVvyh2cZvyaYGN/RVdi2Dw985dJDPX /z4enE4GCAs4RDw3lZ1RDbiZDqpT2ibFgASg/arX3SxzqHirOGvMdkOjO99r9j4= =aLjh -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging - miscellaneous cleanups for TCG (Emilio) and NBD (Bogdan) - next part in the thread-safe address_space_* saga: atomic access to the bounce buffer and the map_clients list, from Fam - optional support for linking with tcmalloc, also from Fam - reapplying Peter Crosthwaite's "Respect as_translate_internal length clamp" after fixing the SPARC fallout. - build system fix from Wei Liu - small acpi-build and ioport cleanup by myself # gpg: Signature made Wed Apr 29 09:34:00 2015 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (22 commits) nbd/trivial: fix type cast for ioctl translate-all: use bitmap helpers for PageDesc's bitmap target-i386: disable LINT0 after reset Makefile.target: prepend $libs_softmmu to $LIBS milkymist: do not modify libs-softmmu configure: Add support for tcmalloc exec: Respect as_translate_internal length clamp ioport: reserve the whole range of an I/O port in the AddressSpace ioport: loosen assertions on emulation of 16-bit ports ioport: remove wrong comment ide: there is only one data port gus: clean up MemoryRegionPortio sb16: remove useless mixer_write_indexw sun4m: fix slavio sysctrl and led register sizes acpi-build: remove dependency from ram_addr.h memory: add memory_region_ram_resize dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel exec: Notify cpu_register_map_client caller if the bounce buffer is available exec: Protect map_client_list with mutex linux-user, bsd-user: Remove two calls to cpu_exec_init_all ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
06feaacfb4
@ -134,7 +134,7 @@ obj-$(CONFIG_KVM) += kvm-all.o
|
||||
obj-y += memory.o savevm.o cputlb.o
|
||||
obj-y += memory_mapping.o
|
||||
obj-y += dump.o
|
||||
LIBS+=$(libs_softmmu)
|
||||
LIBS := $(libs_softmmu) $(LIBS)
|
||||
|
||||
# xen support
|
||||
obj-$(CONFIG_XEN) += xen-common.o
|
||||
|
@ -905,7 +905,6 @@ int main(int argc, char **argv)
|
||||
#endif
|
||||
}
|
||||
tcg_exec_init(0);
|
||||
cpu_exec_init_all();
|
||||
/* NOTE: we need to init the CPU at this stage to get
|
||||
qemu_host_page_size */
|
||||
cpu = cpu_init(cpu_model);
|
||||
|
24
configure
vendored
24
configure
vendored
@ -336,6 +336,7 @@ libssh2=""
|
||||
vhdx=""
|
||||
quorum=""
|
||||
numa=""
|
||||
tcmalloc="no"
|
||||
|
||||
# parse CC options first
|
||||
for opt do
|
||||
@ -1134,6 +1135,10 @@ for opt do
|
||||
;;
|
||||
--enable-numa) numa="yes"
|
||||
;;
|
||||
--disable-tcmalloc) tcmalloc="no"
|
||||
;;
|
||||
--enable-tcmalloc) tcmalloc="yes"
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: unknown option $opt"
|
||||
echo "Try '$0 --help' for more information"
|
||||
@ -1407,6 +1412,8 @@ Advanced options (experts only):
|
||||
--enable-quorum enable quorum block filter support
|
||||
--disable-numa disable libnuma support
|
||||
--enable-numa enable libnuma support
|
||||
--disable-tcmalloc disable tcmalloc support
|
||||
--enable-tcmalloc enable tcmalloc support
|
||||
|
||||
NOTE: The object files are built at the place where configure is launched
|
||||
EOF
|
||||
@ -3330,6 +3337,22 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# tcmalloc probe
|
||||
|
||||
if test "$tcmalloc" = "yes" ; then
|
||||
cat > $TMPC << EOF
|
||||
#include <stdlib.h>
|
||||
int main(void) { malloc(1); return 0; }
|
||||
EOF
|
||||
|
||||
if compile_prog "" "-ltcmalloc" ; then
|
||||
LIBS="-ltcmalloc $LIBS"
|
||||
else
|
||||
feature_not_found "tcmalloc" "install gperftools devel"
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
# signalfd probe
|
||||
signalfd="no"
|
||||
@ -4441,6 +4464,7 @@ echo "lzo support $lzo"
|
||||
echo "snappy support $snappy"
|
||||
echo "bzip2 support $bzip2"
|
||||
echo "NUMA host support $numa"
|
||||
echo "tcmalloc support $tcmalloc"
|
||||
|
||||
if test "$sdl_too_old" = "yes"; then
|
||||
echo "-> Your SDL version is too old - please upgrade to have SDL support"
|
||||
|
@ -93,14 +93,6 @@ static void reschedule_dma(void *opaque)
|
||||
dma_blk_cb(dbs, 0);
|
||||
}
|
||||
|
||||
static void continue_after_map_failure(void *opaque)
|
||||
{
|
||||
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
|
||||
|
||||
dbs->bh = qemu_bh_new(reschedule_dma, dbs);
|
||||
qemu_bh_schedule(dbs->bh);
|
||||
}
|
||||
|
||||
static void dma_blk_unmap(DMAAIOCB *dbs)
|
||||
{
|
||||
int i;
|
||||
@ -162,7 +154,9 @@ static void dma_blk_cb(void *opaque, int ret)
|
||||
|
||||
if (dbs->iov.size == 0) {
|
||||
trace_dma_map_wait(dbs);
|
||||
cpu_register_map_client(dbs, continue_after_map_failure);
|
||||
dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
|
||||
reschedule_dma, dbs);
|
||||
cpu_register_map_client(dbs->bh);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -184,6 +178,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
|
||||
if (dbs->acb) {
|
||||
blk_aio_cancel_async(dbs->acb);
|
||||
}
|
||||
if (dbs->bh) {
|
||||
cpu_unregister_map_client(dbs->bh);
|
||||
qemu_bh_delete(dbs->bh);
|
||||
dbs->bh = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
85
exec.c
85
exec.c
@ -380,7 +380,6 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||
IOMMUTLBEntry iotlb;
|
||||
MemoryRegionSection *section;
|
||||
MemoryRegion *mr;
|
||||
hwaddr len = *plen;
|
||||
|
||||
rcu_read_lock();
|
||||
for (;;) {
|
||||
@ -395,7 +394,7 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
|
||||
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
|
||||
| (addr & iotlb.addr_mask));
|
||||
len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
|
||||
*plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
|
||||
if (!(iotlb.perm & (1 << is_write))) {
|
||||
mr = &io_mem_unassigned;
|
||||
break;
|
||||
@ -406,10 +405,9 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
|
||||
|
||||
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
|
||||
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
|
||||
len = MIN(page, len);
|
||||
*plen = MIN(page, *plen);
|
||||
}
|
||||
|
||||
*plen = len;
|
||||
*xlat = addr;
|
||||
rcu_read_unlock();
|
||||
return mr;
|
||||
@ -429,15 +427,6 @@ address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
|
||||
}
|
||||
#endif
|
||||
|
||||
void cpu_exec_init_all(void)
|
||||
{
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
qemu_mutex_init(&ram_list.mutex);
|
||||
memory_map_init();
|
||||
io_mem_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static int cpu_common_post_load(void *opaque, int version_id)
|
||||
@ -2518,48 +2507,79 @@ typedef struct {
|
||||
void *buffer;
|
||||
hwaddr addr;
|
||||
hwaddr len;
|
||||
bool in_use;
|
||||
} BounceBuffer;
|
||||
|
||||
static BounceBuffer bounce;
|
||||
|
||||
typedef struct MapClient {
|
||||
void *opaque;
|
||||
void (*callback)(void *opaque);
|
||||
QEMUBH *bh;
|
||||
QLIST_ENTRY(MapClient) link;
|
||||
} MapClient;
|
||||
|
||||
QemuMutex map_client_list_lock;
|
||||
static QLIST_HEAD(map_client_list, MapClient) map_client_list
|
||||
= QLIST_HEAD_INITIALIZER(map_client_list);
|
||||
|
||||
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
|
||||
static void cpu_unregister_map_client_do(MapClient *client)
|
||||
{
|
||||
MapClient *client = g_malloc(sizeof(*client));
|
||||
|
||||
client->opaque = opaque;
|
||||
client->callback = callback;
|
||||
QLIST_INSERT_HEAD(&map_client_list, client, link);
|
||||
return client;
|
||||
}
|
||||
|
||||
static void cpu_unregister_map_client(void *_client)
|
||||
{
|
||||
MapClient *client = (MapClient *)_client;
|
||||
|
||||
QLIST_REMOVE(client, link);
|
||||
g_free(client);
|
||||
}
|
||||
|
||||
static void cpu_notify_map_clients(void)
|
||||
static void cpu_notify_map_clients_locked(void)
|
||||
{
|
||||
MapClient *client;
|
||||
|
||||
while (!QLIST_EMPTY(&map_client_list)) {
|
||||
client = QLIST_FIRST(&map_client_list);
|
||||
client->callback(client->opaque);
|
||||
cpu_unregister_map_client(client);
|
||||
qemu_bh_schedule(client->bh);
|
||||
cpu_unregister_map_client_do(client);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_register_map_client(QEMUBH *bh)
|
||||
{
|
||||
MapClient *client = g_malloc(sizeof(*client));
|
||||
|
||||
qemu_mutex_lock(&map_client_list_lock);
|
||||
client->bh = bh;
|
||||
QLIST_INSERT_HEAD(&map_client_list, client, link);
|
||||
if (!atomic_read(&bounce.in_use)) {
|
||||
cpu_notify_map_clients_locked();
|
||||
}
|
||||
qemu_mutex_unlock(&map_client_list_lock);
|
||||
}
|
||||
|
||||
void cpu_exec_init_all(void)
|
||||
{
|
||||
qemu_mutex_init(&ram_list.mutex);
|
||||
memory_map_init();
|
||||
io_mem_init();
|
||||
qemu_mutex_init(&map_client_list_lock);
|
||||
}
|
||||
|
||||
void cpu_unregister_map_client(QEMUBH *bh)
|
||||
{
|
||||
MapClient *client;
|
||||
|
||||
qemu_mutex_lock(&map_client_list_lock);
|
||||
QLIST_FOREACH(client, &map_client_list, link) {
|
||||
if (client->bh == bh) {
|
||||
cpu_unregister_map_client_do(client);
|
||||
break;
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&map_client_list_lock);
|
||||
}
|
||||
|
||||
static void cpu_notify_map_clients(void)
|
||||
{
|
||||
qemu_mutex_lock(&map_client_list_lock);
|
||||
cpu_notify_map_clients_locked();
|
||||
qemu_mutex_unlock(&map_client_list_lock);
|
||||
}
|
||||
|
||||
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
|
||||
{
|
||||
MemoryRegion *mr;
|
||||
@ -2606,7 +2626,7 @@ void *address_space_map(AddressSpace *as,
|
||||
l = len;
|
||||
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
if (bounce.buffer) {
|
||||
if (atomic_xchg(&bounce.in_use, true)) {
|
||||
return NULL;
|
||||
}
|
||||
/* Avoid unbounded allocations */
|
||||
@ -2678,6 +2698,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
|
||||
qemu_vfree(bounce.buffer);
|
||||
bounce.buffer = NULL;
|
||||
memory_region_unref(bounce.mr);
|
||||
atomic_mb_set(&bounce.in_use, false);
|
||||
cpu_notify_map_clients();
|
||||
}
|
||||
|
||||
|
@ -71,13 +71,6 @@ IO_READ_PROTO (gus_readb)
|
||||
return gus_read (&s->emu, nport, 1);
|
||||
}
|
||||
|
||||
IO_READ_PROTO (gus_readw)
|
||||
{
|
||||
GUSState *s = opaque;
|
||||
|
||||
return gus_read (&s->emu, nport, 2);
|
||||
}
|
||||
|
||||
IO_WRITE_PROTO (gus_writeb)
|
||||
{
|
||||
GUSState *s = opaque;
|
||||
@ -85,13 +78,6 @@ IO_WRITE_PROTO (gus_writeb)
|
||||
gus_write (&s->emu, nport, 1, val);
|
||||
}
|
||||
|
||||
IO_WRITE_PROTO (gus_writew)
|
||||
{
|
||||
GUSState *s = opaque;
|
||||
|
||||
gus_write (&s->emu, nport, 2, val);
|
||||
}
|
||||
|
||||
static int write_audio (GUSState *s, int samples)
|
||||
{
|
||||
int net = 0;
|
||||
@ -236,17 +222,13 @@ static const VMStateDescription vmstate_gus = {
|
||||
|
||||
static const MemoryRegionPortio gus_portio_list1[] = {
|
||||
{0x000, 1, 1, .write = gus_writeb },
|
||||
{0x000, 1, 2, .write = gus_writew },
|
||||
{0x006, 10, 1, .read = gus_readb, .write = gus_writeb },
|
||||
{0x006, 10, 2, .read = gus_readw, .write = gus_writew },
|
||||
{0x100, 8, 1, .read = gus_readb, .write = gus_writeb },
|
||||
{0x100, 8, 2, .read = gus_readw, .write = gus_writew },
|
||||
PORTIO_END_OF_LIST (),
|
||||
};
|
||||
|
||||
static const MemoryRegionPortio gus_portio_list2[] = {
|
||||
{0, 1, 1, .read = gus_readb },
|
||||
{0, 1, 2, .read = gus_readw },
|
||||
{0, 2, 1, .read = gus_readb },
|
||||
PORTIO_END_OF_LIST (),
|
||||
};
|
||||
|
||||
|
@ -1121,12 +1121,6 @@ static IO_WRITE_PROTO (mixer_write_datab)
|
||||
s->mixer_regs[s->mixer_nreg] = val;
|
||||
}
|
||||
|
||||
static IO_WRITE_PROTO (mixer_write_indexw)
|
||||
{
|
||||
mixer_write_indexb (opaque, nport, val & 0xff);
|
||||
mixer_write_datab (opaque, nport, (val >> 8) & 0xff);
|
||||
}
|
||||
|
||||
static IO_READ_PROTO (mixer_read)
|
||||
{
|
||||
SB16State *s = opaque;
|
||||
@ -1345,7 +1339,6 @@ static const VMStateDescription vmstate_sb16 = {
|
||||
|
||||
static const MemoryRegionPortio sb16_ioport_list[] = {
|
||||
{ 4, 1, 1, .write = mixer_write_indexb },
|
||||
{ 4, 1, 2, .write = mixer_write_indexw },
|
||||
{ 5, 1, 1, .read = mixer_read, .write = mixer_write_datab },
|
||||
{ 6, 1, 1, .read = dsp_read, .write = dsp_write },
|
||||
{ 10, 1, 1, .read = dsp_read },
|
||||
|
@ -835,12 +835,12 @@ err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
size_t max_len, hwaddr addr, const char *fw_file_name,
|
||||
FWCfgReadCallback fw_callback, void *callback_opaque)
|
||||
{
|
||||
Rom *rom;
|
||||
ram_addr_t ret = RAM_ADDR_MAX;
|
||||
MemoryRegion *mr = NULL;
|
||||
|
||||
rom = g_malloc0(sizeof(*rom));
|
||||
rom->name = g_strdup(name);
|
||||
@ -858,7 +858,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
|
||||
if (rom_file_has_mr) {
|
||||
data = rom_set_mr(rom, OBJECT(fw_cfg), devpath);
|
||||
ret = memory_region_get_ram_addr(rom->mr);
|
||||
mr = rom->mr;
|
||||
} else {
|
||||
data = rom->data;
|
||||
}
|
||||
@ -867,7 +867,7 @@ ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
fw_callback, callback_opaque,
|
||||
data, rom->datasize);
|
||||
}
|
||||
return ret;
|
||||
return mr;
|
||||
}
|
||||
|
||||
/* This function is specific for elf program because we don't need to allocate
|
||||
|
@ -21,7 +21,7 @@ common-obj-$(CONFIG_ZAURUS) += tc6393xb.o
|
||||
ifeq ($(CONFIG_MILKYMIST_TMU2),y)
|
||||
common-obj-y += milkymist-tmu2.o
|
||||
milkymist-tmu2.o-cflags := $(OPENGL_CFLAGS)
|
||||
libs_softmmu += $(OPENGL_LIBS)
|
||||
milkymist-tmu2.o-libs += $(OPENGL_LIBS)
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_OMAP) += omap_dss.o
|
||||
|
@ -58,7 +58,6 @@
|
||||
|
||||
#include "qapi/qmp/qint.h"
|
||||
#include "qom/qom-qobject.h"
|
||||
#include "exec/ram_addr.h"
|
||||
|
||||
/* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
|
||||
* -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
|
||||
@ -1323,13 +1322,13 @@ static inline void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre)
|
||||
typedef
|
||||
struct AcpiBuildState {
|
||||
/* Copy of table in RAM (for patching). */
|
||||
ram_addr_t table_ram;
|
||||
MemoryRegion *table_mr;
|
||||
/* Is table patched? */
|
||||
uint8_t patched;
|
||||
PcGuestInfo *guest_info;
|
||||
void *rsdp;
|
||||
ram_addr_t rsdp_ram;
|
||||
ram_addr_t linker_ram;
|
||||
MemoryRegion *rsdp_mr;
|
||||
MemoryRegion *linker_mr;
|
||||
} AcpiBuildState;
|
||||
|
||||
static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg)
|
||||
@ -1513,15 +1512,15 @@ void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
|
||||
g_array_free(table_offsets, true);
|
||||
}
|
||||
|
||||
static void acpi_ram_update(ram_addr_t ram, GArray *data)
|
||||
static void acpi_ram_update(MemoryRegion *mr, GArray *data)
|
||||
{
|
||||
uint32_t size = acpi_data_len(data);
|
||||
|
||||
/* Make sure RAM size is correct - in case it got changed e.g. by migration */
|
||||
qemu_ram_resize(ram, size, &error_abort);
|
||||
memory_region_ram_resize(mr, size, &error_abort);
|
||||
|
||||
memcpy(qemu_get_ram_ptr(ram), data->data, size);
|
||||
cpu_physical_memory_set_dirty_range_nocode(ram, size);
|
||||
memcpy(memory_region_get_ram_ptr(mr), data->data, size);
|
||||
memory_region_set_dirty(mr, 0, size);
|
||||
}
|
||||
|
||||
static void acpi_build_update(void *build_opaque, uint32_t offset)
|
||||
@ -1539,15 +1538,15 @@ static void acpi_build_update(void *build_opaque, uint32_t offset)
|
||||
|
||||
acpi_build(build_state->guest_info, &tables);
|
||||
|
||||
acpi_ram_update(build_state->table_ram, tables.table_data);
|
||||
acpi_ram_update(build_state->table_mr, tables.table_data);
|
||||
|
||||
if (build_state->rsdp) {
|
||||
memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp));
|
||||
} else {
|
||||
acpi_ram_update(build_state->rsdp_ram, tables.rsdp);
|
||||
acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
|
||||
}
|
||||
|
||||
acpi_ram_update(build_state->linker_ram, tables.linker);
|
||||
acpi_ram_update(build_state->linker_mr, tables.linker);
|
||||
acpi_build_tables_cleanup(&tables, true);
|
||||
}
|
||||
|
||||
@ -1557,8 +1556,9 @@ static void acpi_build_reset(void *build_opaque)
|
||||
build_state->patched = 0;
|
||||
}
|
||||
|
||||
static ram_addr_t acpi_add_rom_blob(AcpiBuildState *build_state, GArray *blob,
|
||||
const char *name, uint64_t max_size)
|
||||
static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
|
||||
GArray *blob, const char *name,
|
||||
uint64_t max_size)
|
||||
{
|
||||
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
|
||||
name, acpi_build_update, build_state);
|
||||
@ -1604,12 +1604,12 @@ void acpi_setup(PcGuestInfo *guest_info)
|
||||
acpi_build(build_state->guest_info, &tables);
|
||||
|
||||
/* Now expose it all to Guest */
|
||||
build_state->table_ram = acpi_add_rom_blob(build_state, tables.table_data,
|
||||
build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
|
||||
ACPI_BUILD_TABLE_FILE,
|
||||
ACPI_BUILD_TABLE_MAX_SIZE);
|
||||
assert(build_state->table_ram != RAM_ADDR_MAX);
|
||||
assert(build_state->table_mr != NULL);
|
||||
|
||||
build_state->linker_ram =
|
||||
build_state->linker_mr =
|
||||
acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0);
|
||||
|
||||
fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
|
||||
@ -1627,10 +1627,10 @@ void acpi_setup(PcGuestInfo *guest_info)
|
||||
fw_cfg_add_file_callback(guest_info->fw_cfg, ACPI_BUILD_RSDP_FILE,
|
||||
acpi_build_update, build_state,
|
||||
build_state->rsdp, rsdp_size);
|
||||
build_state->rsdp_ram = (ram_addr_t)-1;
|
||||
build_state->rsdp_mr = NULL;
|
||||
} else {
|
||||
build_state->rsdp = NULL;
|
||||
build_state->rsdp_ram = acpi_add_rom_blob(build_state, tables.rsdp,
|
||||
build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp,
|
||||
ACPI_BUILD_RSDP_FILE, 0);
|
||||
}
|
||||
|
||||
|
@ -2436,8 +2436,8 @@ void ide_init2(IDEBus *bus, qemu_irq irq)
|
||||
|
||||
static const MemoryRegionPortio ide_portio_list[] = {
|
||||
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
|
||||
{ 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew },
|
||||
{ 0, 4, 4, .read = ide_data_readl, .write = ide_data_writel },
|
||||
{ 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
|
||||
{ 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
|
||||
PORTIO_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -243,15 +243,6 @@ static void apic_reset_common(DeviceState *dev)
|
||||
info->vapic_base_update(s);
|
||||
|
||||
apic_init_reset(dev);
|
||||
|
||||
if (bsp) {
|
||||
/*
|
||||
* LINT0 delivery mode on CPU #0 is set to ExtInt at initialization
|
||||
* time typically by BIOS, so PIC interrupt can be delivered to the
|
||||
* processor when local APIC is enabled.
|
||||
*/
|
||||
s->lvt[APIC_LVT_LINT0] = 0x700;
|
||||
}
|
||||
}
|
||||
|
||||
/* This function is only used for old state version 1 and 2 */
|
||||
|
@ -68,6 +68,7 @@ typedef struct APCState {
|
||||
} APCState;
|
||||
|
||||
#define MISC_SIZE 1
|
||||
#define LED_SIZE 2
|
||||
#define SYSCTRL_SIZE 4
|
||||
|
||||
#define AUX1_TC 0x02
|
||||
@ -452,13 +453,13 @@ static int slavio_misc_init1(SysBusDevice *sbd)
|
||||
/* 16 bit registers */
|
||||
/* ss600mp diag LEDs */
|
||||
memory_region_init_io(&s->led_iomem, OBJECT(s), &slavio_led_mem_ops, s,
|
||||
"leds", MISC_SIZE);
|
||||
"leds", LED_SIZE);
|
||||
sysbus_init_mmio(sbd, &s->led_iomem);
|
||||
|
||||
/* 32 bit registers */
|
||||
/* System control */
|
||||
memory_region_init_io(&s->sysctrl_iomem, OBJECT(s), &slavio_sysctrl_mem_ops, s,
|
||||
"system-control", MISC_SIZE);
|
||||
"system-control", SYSCTRL_SIZE);
|
||||
sysbus_init_mmio(sbd, &s->sysctrl_iomem);
|
||||
|
||||
/* AUX 1 (Misc System Functions) */
|
||||
|
@ -82,7 +82,8 @@ void *cpu_physical_memory_map(hwaddr addr,
|
||||
int is_write);
|
||||
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
|
||||
int is_write, hwaddr access_len);
|
||||
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
|
||||
void cpu_register_map_client(QEMUBH *bh);
|
||||
void cpu_unregister_map_client(QEMUBH *bh);
|
||||
|
||||
bool cpu_physical_memory_is_io(hwaddr phys_addr);
|
||||
|
||||
|
@ -627,6 +627,18 @@ int memory_region_get_fd(MemoryRegion *mr);
|
||||
*/
|
||||
void *memory_region_get_ram_ptr(MemoryRegion *mr);
|
||||
|
||||
/* memory_region_ram_resize: Resize a RAM region.
|
||||
*
|
||||
* Only legal before guest might have detected the memory size: e.g. on
|
||||
* incoming migration, or right after reset.
|
||||
*
|
||||
* @mr: a memory region created with @memory_region_init_resizeable_ram.
|
||||
* @newsize: the new size the region
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*/
|
||||
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
|
||||
Error **errp);
|
||||
|
||||
/**
|
||||
* memory_region_set_log: Turn dirty logging on or off for a region.
|
||||
*
|
||||
|
@ -68,9 +68,11 @@ extern bool rom_file_has_mr;
|
||||
int rom_add_file(const char *file, const char *fw_dir,
|
||||
hwaddr addr, int32_t bootindex,
|
||||
bool option_rom);
|
||||
ram_addr_t rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
size_t max_len, hwaddr addr, const char *fw_file_name,
|
||||
FWCfgReadCallback fw_callback, void *callback_opaque);
|
||||
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
|
||||
size_t max_len, hwaddr addr,
|
||||
const char *fw_file_name,
|
||||
FWCfgReadCallback fw_callback,
|
||||
void *callback_opaque);
|
||||
int rom_add_elf_program(const char *name, void *data, size_t datasize,
|
||||
size_t romsize, hwaddr addr);
|
||||
int rom_load_all(void);
|
||||
|
26
ioport.c
26
ioport.c
@ -191,9 +191,14 @@ static uint64_t portio_read(void *opaque, hwaddr addr, unsigned size)
|
||||
data = mrp->read(mrpio->portio_opaque, mrp->base + addr);
|
||||
} else if (size == 2) {
|
||||
mrp = find_portio(mrpio, addr, 1, false);
|
||||
assert(mrp);
|
||||
data = mrp->read(mrpio->portio_opaque, mrp->base + addr) |
|
||||
(mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8);
|
||||
if (mrp) {
|
||||
data = mrp->read(mrpio->portio_opaque, mrp->base + addr);
|
||||
if (addr + 1 < mrp->offset + mrp->len) {
|
||||
data |= mrp->read(mrpio->portio_opaque, mrp->base + addr + 1) << 8;
|
||||
} else {
|
||||
data |= 0xff00;
|
||||
}
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
@ -208,11 +213,14 @@ static void portio_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
mrp->write(mrpio->portio_opaque, mrp->base + addr, data);
|
||||
} else if (size == 2) {
|
||||
mrp = find_portio(mrpio, addr, 1, true);
|
||||
assert(mrp);
|
||||
if (mrp) {
|
||||
mrp->write(mrpio->portio_opaque, mrp->base + addr, data & 0xff);
|
||||
if (addr + 1 < mrp->offset + mrp->len) {
|
||||
mrp->write(mrpio->portio_opaque, mrp->base + addr + 1, data >> 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const MemoryRegionOps portio_ops = {
|
||||
.read = portio_read,
|
||||
@ -243,10 +251,6 @@ static void portio_list_add_1(PortioList *piolist,
|
||||
mrpio->ports[i].base = start + off_low;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use an alias so that the callback is called with an absolute address,
|
||||
* rather than an offset relative to to start + off_low.
|
||||
*/
|
||||
memory_region_init_io(&mrpio->mr, piolist->owner, &portio_ops, mrpio,
|
||||
piolist->name, off_high - off_low);
|
||||
if (piolist->flush_coalesced_mmio) {
|
||||
@ -269,7 +273,7 @@ void portio_list_add(PortioList *piolist,
|
||||
|
||||
/* Handle the first entry specially. */
|
||||
off_last = off_low = pio_start->offset;
|
||||
off_high = off_low + pio_start->len;
|
||||
off_high = off_low + pio_start->len + pio_start->size - 1;
|
||||
count = 1;
|
||||
|
||||
for (pio = pio_start + 1; pio->size != 0; pio++, count++) {
|
||||
@ -284,10 +288,10 @@ void portio_list_add(PortioList *piolist,
|
||||
/* ... and start collecting anew. */
|
||||
pio_start = pio;
|
||||
off_low = off_last;
|
||||
off_high = off_low + pio->len;
|
||||
off_high = off_low + pio->len + pio_start->size - 1;
|
||||
count = 0;
|
||||
} else if (off_last + pio->len > off_high) {
|
||||
off_high = off_last + pio->len;
|
||||
off_high = off_last + pio->len + pio_start->size - 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3934,7 +3934,6 @@ int main(int argc, char **argv, char **envp)
|
||||
#endif
|
||||
}
|
||||
tcg_exec_init(0);
|
||||
cpu_exec_init_all();
|
||||
/* NOTE: we need to init the CPU at this stage to get
|
||||
qemu_host_page_size */
|
||||
cpu = cpu_init(cpu_model);
|
||||
|
7
memory.c
7
memory.c
@ -1523,6 +1523,13 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr)
|
||||
return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
|
||||
}
|
||||
|
||||
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
|
||||
{
|
||||
assert(mr->terminates);
|
||||
|
||||
qemu_ram_resize(mr->ram_addr, newsize, errp);
|
||||
}
|
||||
|
||||
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
|
||||
{
|
||||
FlatView *view;
|
||||
|
2
nbd.c
2
nbd.c
@ -681,7 +681,7 @@ int nbd_init(int fd, int csock, uint32_t flags, off_t size)
|
||||
|
||||
TRACE("Setting size to %zd block(s)", (size_t)(size / BDRV_SECTOR_SIZE));
|
||||
|
||||
if (ioctl(fd, NBD_SET_SIZE_BLOCKS, size / (size_t)BDRV_SECTOR_SIZE) < 0) {
|
||||
if (ioctl(fd, NBD_SET_SIZE_BLOCKS, (size_t)(size / BDRV_SECTOR_SIZE)) < 0) {
|
||||
int serrno = errno;
|
||||
LOG("Failed setting size (in blocks)");
|
||||
return -serrno;
|
||||
|
@ -59,6 +59,7 @@
|
||||
|
||||
#include "exec/cputlb.h"
|
||||
#include "translate-all.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
//#define DEBUG_TB_INVALIDATE
|
||||
@ -79,7 +80,7 @@ typedef struct PageDesc {
|
||||
/* in order to optimize self modifying code, we count the number
|
||||
of lookups we do to a given page to use a bitmap */
|
||||
unsigned int code_write_count;
|
||||
uint8_t *code_bitmap;
|
||||
unsigned long *code_bitmap;
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
unsigned long flags;
|
||||
#endif
|
||||
@ -389,18 +390,6 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
void **lp;
|
||||
int i;
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* We can't use g_malloc because it may recurse into a locked mutex. */
|
||||
# define ALLOC(P, SIZE) \
|
||||
do { \
|
||||
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
|
||||
} while (0)
|
||||
#else
|
||||
# define ALLOC(P, SIZE) \
|
||||
do { P = g_malloc0(SIZE); } while (0)
|
||||
#endif
|
||||
|
||||
/* Level 1. Always allocated. */
|
||||
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
|
||||
|
||||
@ -412,7 +401,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
ALLOC(p, sizeof(void *) * V_L2_SIZE);
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
*lp = p;
|
||||
}
|
||||
|
||||
@ -424,12 +413,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
if (!alloc) {
|
||||
return NULL;
|
||||
}
|
||||
ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
|
||||
pd = g_new0(PageDesc, V_L2_SIZE);
|
||||
*lp = pd;
|
||||
}
|
||||
|
||||
#undef ALLOC
|
||||
|
||||
return pd + (index & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
@ -978,39 +965,12 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
|
||||
}
|
||||
|
||||
static inline void set_bits(uint8_t *tab, int start, int len)
|
||||
{
|
||||
int end, mask, end1;
|
||||
|
||||
end = start + len;
|
||||
tab += start >> 3;
|
||||
mask = 0xff << (start & 7);
|
||||
if ((start & ~7) == (end & ~7)) {
|
||||
if (start < end) {
|
||||
mask &= ~(0xff << (end & 7));
|
||||
*tab |= mask;
|
||||
}
|
||||
} else {
|
||||
*tab++ |= mask;
|
||||
start = (start + 8) & ~7;
|
||||
end1 = end & ~7;
|
||||
while (start < end1) {
|
||||
*tab++ = 0xff;
|
||||
start += 8;
|
||||
}
|
||||
if (start < end) {
|
||||
mask = ~(0xff << (end & 7));
|
||||
*tab |= mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void build_page_bitmap(PageDesc *p)
|
||||
{
|
||||
int n, tb_start, tb_end;
|
||||
TranslationBlock *tb;
|
||||
|
||||
p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
|
||||
p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
|
||||
|
||||
tb = p->first_tb;
|
||||
while (tb != NULL) {
|
||||
@ -1029,7 +989,7 @@ static void build_page_bitmap(PageDesc *p)
|
||||
tb_start = 0;
|
||||
tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
|
||||
}
|
||||
set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
|
||||
bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
|
||||
tb = tb->page_next[n];
|
||||
}
|
||||
}
|
||||
@ -1219,7 +1179,6 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
||||
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
|
||||
{
|
||||
PageDesc *p;
|
||||
int offset, b;
|
||||
|
||||
#if 0
|
||||
if (1) {
|
||||
@ -1235,8 +1194,11 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
|
||||
return;
|
||||
}
|
||||
if (p->code_bitmap) {
|
||||
offset = start & ~TARGET_PAGE_MASK;
|
||||
b = p->code_bitmap[offset >> 3] >> (offset & 7);
|
||||
unsigned int nr;
|
||||
unsigned long b;
|
||||
|
||||
nr = start & ~TARGET_PAGE_MASK;
|
||||
b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
|
||||
if (b & ((1 << len) - 1)) {
|
||||
goto do_invalidate;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user