Various testing, tcg and plugin updates
- fix bug in gdbstub tests that leave hanging QEMUs - tweak s390x travis test - re-factor guest_base handling - support "notes" in disassembler output - include guest address notes in out_asm - cleanup plugin headers and and constify hwaddr - updates MAINTAINERS for cpu-common.c -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAl6+qegACgkQ+9DbCVqe KkT2sQf+Kcypx3RzZXrMrqKKSWDOmyvEIjRwwyCTBgkjBE2vU7lVlkWAL5DkRxiN MBPpR5zwlU1enRFUVhB//M1kj+lOLh/WeLvipE6FE5c45/onU1KNXo1LQnUHOIkT /j9mMxrPL4beVhUH1PZyJNQo0sPHcB9mELLCUXenxBVv29ym/WZ90ORbNaB6lQE+ PSH99K3PFCFo/UIQA612dypfR130C2rikHd19/mfvAXYTuE4p52G83sutqB+3eg7 CiahqEIwGDV+g4pxN4FA1xopRjCVvUZahaVGRDY3gzCAZi4ug2/ROoZOta9jP6SR n986kWycqJwn42X6yFPTzcEpz/84sg== =GIEt -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-tcg-plugins-150520-2' into staging Various testing, tcg and plugin updates - fix bug in gdbstub tests that leave hanging QEMUs - tweak s390x travis test - re-factor guest_base handling - support "notes" in disassembler output - include guest address notes in out_asm - cleanup plugin headers and and constify hwaddr - updates MAINTAINERS for cpu-common.c # gpg: Signature made Fri 15 May 2020 15:40:40 BST # gpg: using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44 # gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full] # Primary key fingerprint: 6685 AE99 E751 67BC AFC8 DF35 FBD0 DB09 5A9E 2A44 * remotes/stsquad/tags/pull-testing-tcg-plugins-150520-2: MAINTAINERS: update the orphaned cpus-common.c file qemu/qemu-plugin: Make qemu_plugin_hwaddr_is_io() hwaddr argument const qemu/plugin: Move !CONFIG_PLUGIN stubs altogether qemu/plugin: Trivial code movement translate-all: include guest address in out_asm output disas: add optional note support to cap_disas disas: include an optional note for the start of disassembly accel/tcg: don't disable exec_tb trace events accel/tcg: Relax va restrictions on 64-bit guests exec/cpu-all: Use bool for have_guest_base linux-user: completely re-write init_guest_space travis.yml: Improve the --disable-tcg test on s390x tests/guest-debug: catch hanging guests Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
66706192de
18
.travis.yml
18
.travis.yml
@ -502,9 +502,10 @@ jobs:
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
|
||||
- name: "[s390x] GCC check (KVM)"
|
||||
- name: "[s390x] Clang (disable-tcg)"
|
||||
arch: s390x
|
||||
dist: bionic
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
@ -528,21 +529,10 @@ jobs:
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check-unit"
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools"
|
||||
script:
|
||||
- ( cd ${SRC_DIR} ; git submodule update --init roms/SLOF )
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
if [ "$BUILD_RC" -eq 0 ] ; then
|
||||
mv pc-bios/s390-ccw/*.img pc-bios/ ;
|
||||
${TEST_CMD} ;
|
||||
else
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm
|
||||
--disable-tools --host-cc=clang --cxx=clang++"
|
||||
|
||||
# Release builds
|
||||
# The make-release script expect a QEMU version, so our tag must start with a 'v'.
|
||||
|
@ -115,6 +115,7 @@ M: Richard Henderson <rth@twiddle.net>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: cpus.c
|
||||
F: cpus-common.c
|
||||
F: exec.c
|
||||
F: accel/tcg/
|
||||
F: accel/stubs/tcg-stub.c
|
||||
|
@ -1,10 +1,10 @@
|
||||
# See docs/devel/tracing.txt for syntax documentation.
|
||||
|
||||
# TCG related tracing (mostly disabled by default)
|
||||
# TCG related tracing
|
||||
# cpu-exec.c
|
||||
disable exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
disable exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
disable exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x"
|
||||
exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
|
||||
exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x"
|
||||
|
||||
# translate-all.c
|
||||
translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
|
||||
|
@ -173,8 +173,13 @@ struct page_collection {
|
||||
#define TB_FOR_EACH_JMP(head_tb, tb, n) \
|
||||
TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
|
||||
|
||||
/* In system mode we want L1_MAP to be based on ram offsets,
|
||||
while in user mode we want it to be based on virtual addresses. */
|
||||
/*
|
||||
* In system mode we want L1_MAP to be based on ram offsets,
|
||||
* while in user mode we want it to be based on virtual addresses.
|
||||
*
|
||||
* TODO: For user mode, see the caveat re host vs guest virtual
|
||||
* address spaces near GUEST_ADDR_MAX.
|
||||
*/
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
|
||||
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
|
||||
@ -182,7 +187,7 @@ struct page_collection {
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
|
||||
#endif
|
||||
#else
|
||||
# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
|
||||
# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
|
||||
#endif
|
||||
|
||||
/* Size of the L2 (and L3, etc) page tables. */
|
||||
@ -1789,14 +1794,43 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
|
||||
qemu_log_in_addr_range(tb->pc)) {
|
||||
FILE *logfile = qemu_log_lock();
|
||||
int code_size, data_size = 0;
|
||||
g_autoptr(GString) note = g_string_new("[tb header & initial instruction]");
|
||||
size_t chunk_start = 0;
|
||||
int insn = 0;
|
||||
qemu_log("OUT: [size=%d]\n", gen_code_size);
|
||||
if (tcg_ctx->data_gen_ptr) {
|
||||
size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
|
||||
size_t data_size = gen_code_size - code_size;
|
||||
size_t i;
|
||||
code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
|
||||
data_size = gen_code_size - code_size;
|
||||
} else {
|
||||
code_size = gen_code_size;
|
||||
}
|
||||
|
||||
log_disas(tb->tc.ptr, code_size);
|
||||
/* Dump header and the first instruction */
|
||||
chunk_start = tcg_ctx->gen_insn_end_off[insn];
|
||||
log_disas(tb->tc.ptr, chunk_start, note->str);
|
||||
|
||||
/*
|
||||
* Dump each instruction chunk, wrapping up empty chunks into
|
||||
* the next instruction. The whole array is offset so the
|
||||
* first entry is the beginning of the 2nd instruction.
|
||||
*/
|
||||
while (insn <= tb->icount && chunk_start < code_size) {
|
||||
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
|
||||
if (chunk_end > chunk_start) {
|
||||
g_string_printf(note, "[guest addr: " TARGET_FMT_lx "]",
|
||||
tcg_ctx->gen_insn_data[insn][0]);
|
||||
log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start,
|
||||
note->str);
|
||||
chunk_start = chunk_end;
|
||||
}
|
||||
insn++;
|
||||
}
|
||||
|
||||
/* Finally dump any data we may have after the block */
|
||||
if (data_size) {
|
||||
int i;
|
||||
qemu_log(" data: [size=%d]\n", data_size);
|
||||
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
||||
if (sizeof(tcg_target_ulong) == 8) {
|
||||
qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
|
||||
@ -1808,8 +1842,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
*(uint32_t *)(tcg_ctx->data_gen_ptr + i));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_disas(tb->tc.ptr, gen_code_size);
|
||||
}
|
||||
qemu_log("\n");
|
||||
qemu_log_flush();
|
||||
@ -2497,9 +2529,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
|
||||
/* This function should never be called with addresses outside the
|
||||
guest address space. If this assert fires, it probably indicates
|
||||
a missing call to h2g_valid. */
|
||||
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
|
||||
assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
|
||||
#endif
|
||||
assert(end - 1 <= GUEST_ADDR_MAX);
|
||||
assert(start < end);
|
||||
assert_memory_lock();
|
||||
|
||||
|
@ -42,7 +42,7 @@
|
||||
int singlestep;
|
||||
unsigned long mmap_min_addr;
|
||||
unsigned long guest_base;
|
||||
int have_guest_base;
|
||||
bool have_guest_base;
|
||||
unsigned long reserved_va;
|
||||
|
||||
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
|
||||
@ -828,7 +828,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
} else if (!strcmp(r, "B")) {
|
||||
guest_base = strtol(argv[optind++], NULL, 0);
|
||||
have_guest_base = 1;
|
||||
have_guest_base = true;
|
||||
} else if (!strcmp(r, "drop-ld-preload")) {
|
||||
(void) envlist_unsetenv(envlist, "LD_PRELOAD");
|
||||
} else if (!strcmp(r, "bsd")) {
|
||||
|
37
disas.c
37
disas.c
@ -260,7 +260,8 @@ static void cap_dump_insn_units(disassemble_info *info, cs_insn *insn,
|
||||
}
|
||||
}
|
||||
|
||||
static void cap_dump_insn(disassemble_info *info, cs_insn *insn)
|
||||
static void cap_dump_insn(disassemble_info *info, cs_insn *insn,
|
||||
const char *note)
|
||||
{
|
||||
fprintf_function print = info->fprintf_func;
|
||||
int i, n, split;
|
||||
@ -281,7 +282,11 @@ static void cap_dump_insn(disassemble_info *info, cs_insn *insn)
|
||||
}
|
||||
|
||||
/* Print the actual instruction. */
|
||||
print(info->stream, " %-8s %s\n", insn->mnemonic, insn->op_str);
|
||||
print(info->stream, " %-8s %s", insn->mnemonic, insn->op_str);
|
||||
if (note) {
|
||||
print(info->stream, "\t\t%s", note);
|
||||
}
|
||||
print(info->stream, "\n");
|
||||
|
||||
/* Dump any remaining part of the insn on subsequent lines. */
|
||||
for (i = split; i < n; i += split) {
|
||||
@ -313,7 +318,7 @@ static bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size)
|
||||
size -= tsize;
|
||||
|
||||
while (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
|
||||
cap_dump_insn(info, insn);
|
||||
cap_dump_insn(info, insn, NULL);
|
||||
}
|
||||
|
||||
/* If the target memory is not consumed, go back for more... */
|
||||
@ -342,7 +347,8 @@ static bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size)
|
||||
}
|
||||
|
||||
/* Disassemble SIZE bytes at CODE for the host. */
|
||||
static bool cap_disas_host(disassemble_info *info, void *code, size_t size)
|
||||
static bool cap_disas_host(disassemble_info *info, void *code, size_t size,
|
||||
const char *note)
|
||||
{
|
||||
csh handle;
|
||||
const uint8_t *cbuf;
|
||||
@ -358,7 +364,8 @@ static bool cap_disas_host(disassemble_info *info, void *code, size_t size)
|
||||
pc = (uintptr_t)code;
|
||||
|
||||
while (cs_disasm_iter(handle, &cbuf, &size, &pc, insn)) {
|
||||
cap_dump_insn(info, insn);
|
||||
cap_dump_insn(info, insn, note);
|
||||
note = NULL;
|
||||
}
|
||||
if (size != 0) {
|
||||
(*info->fprintf_func)(info->stream,
|
||||
@ -402,7 +409,7 @@ static bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
|
||||
csize += tsize;
|
||||
|
||||
if (cs_disasm_iter(handle, &cbuf, &csize, &pc, insn)) {
|
||||
cap_dump_insn(info, insn);
|
||||
cap_dump_insn(info, insn, NULL);
|
||||
if (--count <= 0) {
|
||||
break;
|
||||
}
|
||||
@ -416,7 +423,7 @@ static bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count)
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
#else
|
||||
# define cap_disas_target(i, p, s) false
|
||||
# define cap_disas_host(i, p, s) false
|
||||
# define cap_disas_host(i, p, s, n) false
|
||||
# define cap_disas_monitor(i, p, c) false
|
||||
# define cap_disas_plugin(i, p, c) false
|
||||
#endif /* CONFIG_CAPSTONE */
|
||||
@ -586,7 +593,7 @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
|
||||
}
|
||||
|
||||
/* Disassemble this for me please... (debugging). */
|
||||
void disas(FILE *out, void *code, unsigned long size)
|
||||
void disas(FILE *out, void *code, unsigned long size, const char *note)
|
||||
{
|
||||
uintptr_t pc;
|
||||
int count;
|
||||
@ -664,7 +671,7 @@ void disas(FILE *out, void *code, unsigned long size)
|
||||
print_insn = print_insn_hppa;
|
||||
#endif
|
||||
|
||||
if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
|
||||
if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size, note)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -674,10 +681,16 @@ void disas(FILE *out, void *code, unsigned long size)
|
||||
for (pc = (uintptr_t)code; size > 0; pc += count, size -= count) {
|
||||
fprintf(out, "0x%08" PRIxPTR ": ", pc);
|
||||
count = print_insn(pc, &s.info);
|
||||
fprintf(out, "\n");
|
||||
if (count < 0)
|
||||
break;
|
||||
if (note) {
|
||||
fprintf(out, "\t\t%s", note);
|
||||
note = NULL;
|
||||
}
|
||||
fprintf(out, "\n");
|
||||
if (count < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Look up symbol for debugging purpose. Returns "" if unknown. */
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "cpu.h"
|
||||
|
||||
/* Disassemble this for me please... (debugging). */
|
||||
void disas(FILE *out, void *code, unsigned long size);
|
||||
void disas(FILE *out, void *code, unsigned long size, const char *note);
|
||||
void target_disas(FILE *out, CPUState *cpu, target_ulong code,
|
||||
target_ulong size);
|
||||
|
||||
|
@ -159,15 +159,30 @@ static inline void tswap64s(uint64_t *s)
|
||||
* This allows the guest address space to be offset to a convenient location.
|
||||
*/
|
||||
extern unsigned long guest_base;
|
||||
extern int have_guest_base;
|
||||
extern bool have_guest_base;
|
||||
extern unsigned long reserved_va;
|
||||
|
||||
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
|
||||
#define GUEST_ADDR_MAX (~0ul)
|
||||
/*
|
||||
* Limit the guest addresses as best we can.
|
||||
*
|
||||
* When not using -R reserved_va, we cannot really limit the guest
|
||||
* to less address space than the host. For 32-bit guests, this
|
||||
* acts as a sanity check that we're not giving the guest an address
|
||||
* that it cannot even represent. For 64-bit guests... the address
|
||||
* might not be what the real kernel would give, but it is at least
|
||||
* representable in the guest.
|
||||
*
|
||||
* TODO: Improve address allocation to avoid this problem, and to
|
||||
* avoid setting bits at the top of guest addresses that might need
|
||||
* to be used for tags.
|
||||
*/
|
||||
#if MIN(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32
|
||||
# define GUEST_ADDR_MAX_ UINT32_MAX
|
||||
#else
|
||||
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \
|
||||
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
|
||||
# define GUEST_ADDR_MAX_ (~0ul)
|
||||
#endif
|
||||
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
|
||||
|
||||
#else
|
||||
|
||||
#include "exec/hwaddr.h"
|
||||
|
@ -56,13 +56,13 @@ static inline void log_target_disas(CPUState *cpu, target_ulong start,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void log_disas(void *code, unsigned long size)
|
||||
static inline void log_disas(void *code, unsigned long size, const char *note)
|
||||
{
|
||||
QemuLogFile *logfile;
|
||||
rcu_read_lock();
|
||||
logfile = atomic_rcu_read(&qemu_logfile);
|
||||
if (logfile) {
|
||||
disas(logfile->fd, code, size);
|
||||
disas(logfile->fd, code, size, note);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -13,6 +13,22 @@
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/option.h"
|
||||
|
||||
/*
|
||||
* Events that plugins can subscribe to.
|
||||
*/
|
||||
enum qemu_plugin_event {
|
||||
QEMU_PLUGIN_EV_VCPU_INIT,
|
||||
QEMU_PLUGIN_EV_VCPU_EXIT,
|
||||
QEMU_PLUGIN_EV_VCPU_TB_TRANS,
|
||||
QEMU_PLUGIN_EV_VCPU_IDLE,
|
||||
QEMU_PLUGIN_EV_VCPU_RESUME,
|
||||
QEMU_PLUGIN_EV_VCPU_SYSCALL,
|
||||
QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
|
||||
QEMU_PLUGIN_EV_FLUSH,
|
||||
QEMU_PLUGIN_EV_ATEXIT,
|
||||
QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
|
||||
};
|
||||
|
||||
/*
|
||||
* Option parsing/processing.
|
||||
* Note that we can load an arbitrary number of plugins.
|
||||
@ -30,38 +46,6 @@ static inline void qemu_plugin_add_opts(void)
|
||||
|
||||
void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head);
|
||||
int qemu_plugin_load_list(QemuPluginList *head);
|
||||
#else /* !CONFIG_PLUGIN */
|
||||
static inline void qemu_plugin_add_opts(void)
|
||||
{ }
|
||||
|
||||
static inline void qemu_plugin_opt_parse(const char *optarg,
|
||||
QemuPluginList *head)
|
||||
{
|
||||
error_report("plugin interface not enabled in this build");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static inline int qemu_plugin_load_list(QemuPluginList *head)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_PLUGIN */
|
||||
|
||||
/*
|
||||
* Events that plugins can subscribe to.
|
||||
*/
|
||||
enum qemu_plugin_event {
|
||||
QEMU_PLUGIN_EV_VCPU_INIT,
|
||||
QEMU_PLUGIN_EV_VCPU_EXIT,
|
||||
QEMU_PLUGIN_EV_VCPU_TB_TRANS,
|
||||
QEMU_PLUGIN_EV_VCPU_IDLE,
|
||||
QEMU_PLUGIN_EV_VCPU_RESUME,
|
||||
QEMU_PLUGIN_EV_VCPU_SYSCALL,
|
||||
QEMU_PLUGIN_EV_VCPU_SYSCALL_RET,
|
||||
QEMU_PLUGIN_EV_FLUSH,
|
||||
QEMU_PLUGIN_EV_ATEXIT,
|
||||
QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */
|
||||
};
|
||||
|
||||
union qemu_plugin_cb_sig {
|
||||
qemu_plugin_simple_cb_t simple;
|
||||
@ -182,8 +166,6 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb)
|
||||
return insn;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PLUGIN
|
||||
|
||||
void qemu_plugin_vcpu_init_hook(CPUState *cpu);
|
||||
void qemu_plugin_vcpu_exit_hook(CPUState *cpu);
|
||||
void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb);
|
||||
@ -207,6 +189,21 @@ void qemu_plugin_disable_mem_helpers(CPUState *cpu);
|
||||
|
||||
#else /* !CONFIG_PLUGIN */
|
||||
|
||||
static inline void qemu_plugin_add_opts(void)
|
||||
{ }
|
||||
|
||||
static inline void qemu_plugin_opt_parse(const char *optarg,
|
||||
QemuPluginList *head)
|
||||
{
|
||||
error_report("plugin interface not enabled in this build");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static inline int qemu_plugin_load_list(QemuPluginList *head)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qemu_plugin_vcpu_init_hook(CPUState *cpu)
|
||||
{ }
|
||||
|
||||
|
@ -331,7 +331,7 @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
|
||||
* to return information about it. For non-IO accesses the device
|
||||
* offset will be into the appropriate block of RAM.
|
||||
*/
|
||||
bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr);
|
||||
bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr);
|
||||
uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr);
|
||||
|
||||
typedef void
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "qemu/queue.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/units.h"
|
||||
#include "qemu/selfmap.h"
|
||||
|
||||
#ifdef _ARCH_PPC64
|
||||
#undef ARCH_DLINFO
|
||||
@ -382,68 +383,30 @@ enum {
|
||||
|
||||
/* The commpage only exists for 32 bit kernels */
|
||||
|
||||
/* Return 1 if the proposed guest space is suitable for the guest.
|
||||
* Return 0 if the proposed guest space isn't suitable, but another
|
||||
* address space should be tried.
|
||||
* Return -1 if there is no way the proposed guest space can be
|
||||
* valid regardless of the base.
|
||||
* The guest code may leave a page mapped and populate it if the
|
||||
* address is suitable.
|
||||
*/
|
||||
static int init_guest_commpage(unsigned long guest_base,
|
||||
unsigned long guest_size)
|
||||
#define ARM_COMMPAGE (intptr_t)0xffff0f00u
|
||||
|
||||
static bool init_guest_commpage(void)
|
||||
{
|
||||
unsigned long real_start, test_page_addr;
|
||||
void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
|
||||
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
|
||||
/* We need to check that we can force a fault on access to the
|
||||
* commpage at 0xffff0fxx
|
||||
*/
|
||||
test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
|
||||
|
||||
/* If the commpage lies within the already allocated guest space,
|
||||
* then there is no way we can allocate it.
|
||||
*
|
||||
* You may be thinking that that this check is redundant because
|
||||
* we already validated the guest size against MAX_RESERVED_VA;
|
||||
* but if qemu_host_page_mask is unusually large, then
|
||||
* test_page_addr may be lower.
|
||||
*/
|
||||
if (test_page_addr >= guest_base
|
||||
&& test_page_addr < (guest_base + guest_size)) {
|
||||
return -1;
|
||||
if (addr == MAP_FAILED) {
|
||||
perror("Allocating guest commpage");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
if (addr != want) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Note it needs to be writeable to let us initialise it */
|
||||
real_start = (unsigned long)
|
||||
mmap((void *)test_page_addr, qemu_host_page_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
/* Set kernel helper versions; rest of page is 0. */
|
||||
__put_user(5, (uint32_t *)g2h(0xffff0ffcu));
|
||||
|
||||
/* If we can't map it then try another address */
|
||||
if (real_start == -1ul) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (real_start != test_page_addr) {
|
||||
/* OS didn't put the page where we asked - unmap and reject */
|
||||
munmap((void *)real_start, qemu_host_page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Leave the page mapped
|
||||
* Populate it (mmap should have left it all 0'd)
|
||||
*/
|
||||
|
||||
/* Kernel helper versions */
|
||||
__put_user(5, (uint32_t *)g2h(0xffff0ffcul));
|
||||
|
||||
/* Now it's populated make it RO */
|
||||
if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
|
||||
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
|
||||
perror("Protecting guest commpage");
|
||||
exit(-1);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
return 1; /* All good */
|
||||
return true;
|
||||
}
|
||||
|
||||
#define ELF_HWCAP get_elf_hwcap()
|
||||
@ -2075,240 +2038,268 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
|
||||
return sp;
|
||||
}
|
||||
|
||||
unsigned long init_guest_space(unsigned long host_start,
|
||||
unsigned long host_size,
|
||||
unsigned long guest_start,
|
||||
bool fixed)
|
||||
#ifndef ARM_COMMPAGE
|
||||
#define ARM_COMMPAGE 0
|
||||
#define init_guest_commpage() true
|
||||
#endif
|
||||
|
||||
static void pgb_fail_in_use(const char *image_name)
|
||||
{
|
||||
error_report("%s: requires virtual address space that is in use "
|
||||
"(omit the -B option or choose a different value)",
|
||||
image_name);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
abi_ulong guest_hiaddr, long align)
|
||||
{
|
||||
const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
|
||||
void *addr, *test;
|
||||
|
||||
if (!QEMU_IS_ALIGNED(guest_base, align)) {
|
||||
fprintf(stderr, "Requested guest base 0x%lx does not satisfy "
|
||||
"host minimum alignment (0x%lx)\n",
|
||||
guest_base, align);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
/* Sanity check the guest binary. */
|
||||
if (reserved_va) {
|
||||
if (guest_hiaddr > reserved_va) {
|
||||
error_report("%s: requires more than reserved virtual "
|
||||
"address space (0x%" PRIx64 " > 0x%lx)",
|
||||
image_name, (uint64_t)guest_hiaddr, reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else {
|
||||
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
|
||||
error_report("%s: requires more virtual address space "
|
||||
"than the host can provide (0x%" PRIx64 ")",
|
||||
image_name, (uint64_t)guest_hiaddr - guest_base);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Expand the allocation to the entire reserved_va.
|
||||
* Exclude the mmap_min_addr hole.
|
||||
*/
|
||||
if (reserved_va) {
|
||||
guest_loaddr = (guest_base >= mmap_min_addr ? 0
|
||||
: mmap_min_addr - guest_base);
|
||||
guest_hiaddr = reserved_va;
|
||||
}
|
||||
|
||||
/* Reserve the address space for the binary, or reserved_va. */
|
||||
test = g2h(guest_loaddr);
|
||||
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
|
||||
if (test != addr) {
|
||||
pgb_fail_in_use(image_name);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return value for guest_base, or -1 if no hole found. */
|
||||
static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
|
||||
long align)
|
||||
{
|
||||
GSList *maps, *iter;
|
||||
uintptr_t this_start, this_end, next_start, brk;
|
||||
intptr_t ret = -1;
|
||||
|
||||
assert(QEMU_IS_ALIGNED(guest_loaddr, align));
|
||||
|
||||
maps = read_self_maps();
|
||||
|
||||
/* Read brk after we've read the maps, which will malloc. */
|
||||
brk = (uintptr_t)sbrk(0);
|
||||
|
||||
/* The first hole is before the first map entry. */
|
||||
this_start = mmap_min_addr;
|
||||
|
||||
for (iter = maps; iter;
|
||||
this_start = next_start, iter = g_slist_next(iter)) {
|
||||
uintptr_t align_start, hole_size;
|
||||
|
||||
this_end = ((MapInfo *)iter->data)->start;
|
||||
next_start = ((MapInfo *)iter->data)->end;
|
||||
align_start = ROUND_UP(this_start, align);
|
||||
|
||||
/* Skip holes that are too small. */
|
||||
if (align_start >= this_end) {
|
||||
continue;
|
||||
}
|
||||
hole_size = this_end - align_start;
|
||||
if (hole_size < guest_size) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* If this hole contains brk, give ourselves some room to grow. */
|
||||
if (this_start <= brk && brk < this_end) {
|
||||
hole_size -= guest_size;
|
||||
if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) {
|
||||
align_start += 1 * GiB;
|
||||
} else if (hole_size >= 16 * MiB) {
|
||||
align_start += 16 * MiB;
|
||||
} else {
|
||||
align_start = (this_end - guest_size) & -align;
|
||||
if (align_start < this_start) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Record the lowest successful match. */
|
||||
if (ret < 0) {
|
||||
ret = align_start - guest_loaddr;
|
||||
}
|
||||
/* If this hole contains the identity map, select it. */
|
||||
if (align_start <= guest_loaddr &&
|
||||
guest_loaddr + guest_size <= this_end) {
|
||||
ret = 0;
|
||||
}
|
||||
/* If this hole ends above the identity map, stop looking. */
|
||||
if (this_end >= guest_loaddr) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
free_self_maps(maps);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
|
||||
abi_ulong orig_hiaddr, long align)
|
||||
{
|
||||
uintptr_t loaddr = orig_loaddr;
|
||||
uintptr_t hiaddr = orig_hiaddr;
|
||||
uintptr_t addr;
|
||||
|
||||
if (hiaddr != orig_hiaddr) {
|
||||
error_report("%s: requires virtual address space that the "
|
||||
"host cannot provide (0x%" PRIx64 ")",
|
||||
image_name, (uint64_t)orig_hiaddr);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
loaddr &= -align;
|
||||
if (ARM_COMMPAGE) {
|
||||
/*
|
||||
* Extend the allocation to include the commpage.
|
||||
* For a 64-bit host, this is just 4GiB; for a 32-bit host,
|
||||
* the address arithmetic will wrap around, but the difference
|
||||
* will produce the correct allocation size.
|
||||
*/
|
||||
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
|
||||
hiaddr = (uintptr_t)4 << 30;
|
||||
} else {
|
||||
loaddr = ARM_COMMPAGE & -align;
|
||||
}
|
||||
}
|
||||
|
||||
addr = pgb_find_hole(loaddr, hiaddr - loaddr, align);
|
||||
if (addr == -1) {
|
||||
/*
|
||||
* If ARM_COMMPAGE, there *might* be a non-consecutive allocation
|
||||
* that can satisfy both. But as the normal arm32 link base address
|
||||
* is ~32k, and we extend down to include the commpage, making the
|
||||
* overhead only ~96k, this is unlikely.
|
||||
*/
|
||||
error_report("%s: Unable to allocate %#zx bytes of "
|
||||
"virtual address space", image_name,
|
||||
(size_t)(hiaddr - loaddr));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
guest_base = addr;
|
||||
}
|
||||
|
||||
static void pgb_dynamic(const char *image_name, long align)
|
||||
{
|
||||
/*
|
||||
* The executable is dynamic and does not require a fixed address.
|
||||
* All we need is a commpage that satisfies align.
|
||||
* If we do not need a commpage, leave guest_base == 0.
|
||||
*/
|
||||
if (ARM_COMMPAGE) {
|
||||
uintptr_t addr, commpage;
|
||||
|
||||
/* 64-bit hosts should have used reserved_va. */
|
||||
assert(sizeof(uintptr_t) == 4);
|
||||
|
||||
/*
|
||||
* By putting the commpage at the first hole, that puts guest_base
|
||||
* just above that, and maximises the positive guest addresses.
|
||||
*/
|
||||
commpage = ARM_COMMPAGE & -align;
|
||||
addr = pgb_find_hole(commpage, -commpage, align);
|
||||
assert(addr != -1);
|
||||
guest_base = addr;
|
||||
}
|
||||
}
|
||||
|
||||
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
|
||||
abi_ulong guest_hiaddr, long align)
|
||||
{
|
||||
const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
|
||||
void *addr, *test;
|
||||
|
||||
if (guest_hiaddr > reserved_va) {
|
||||
error_report("%s: requires more than reserved virtual "
|
||||
"address space (0x%" PRIx64 " > 0x%lx)",
|
||||
image_name, (uint64_t)guest_hiaddr, reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
/* Widen the "image" to the entire reserved address space. */
|
||||
pgb_static(image_name, 0, reserved_va, align);
|
||||
|
||||
/* Reserve the memory on the host. */
|
||||
assert(guest_base != 0);
|
||||
test = g2h(0);
|
||||
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
error_report("Unable to reserve 0x%lx bytes of virtual address "
|
||||
"space for use as guest address space (check your "
|
||||
"virtual memory ulimit setting or reserve less "
|
||||
"using -R option)", reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
assert(addr == test);
|
||||
}
|
||||
|
||||
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
||||
abi_ulong guest_hiaddr)
|
||||
{
|
||||
/* In order to use host shmat, we must be able to honor SHMLBA. */
|
||||
unsigned long align = MAX(SHMLBA, qemu_host_page_size);
|
||||
unsigned long current_start, aligned_start;
|
||||
int flags;
|
||||
uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
|
||||
|
||||
assert(host_start || host_size);
|
||||
|
||||
/* If just a starting address is given, then just verify that
|
||||
* address. */
|
||||
if (host_start && !host_size) {
|
||||
#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
|
||||
if (init_guest_commpage(host_start, host_size) != 1) {
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
#endif
|
||||
return host_start;
|
||||
if (have_guest_base) {
|
||||
pgb_have_guest_base(image_name, guest_loaddr, guest_hiaddr, align);
|
||||
} else if (reserved_va) {
|
||||
pgb_reserved_va(image_name, guest_loaddr, guest_hiaddr, align);
|
||||
} else if (guest_loaddr) {
|
||||
pgb_static(image_name, guest_loaddr, guest_hiaddr, align);
|
||||
} else {
|
||||
pgb_dynamic(image_name, align);
|
||||
}
|
||||
|
||||
/* Setup the initial flags and start address. */
|
||||
current_start = host_start & -align;
|
||||
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
|
||||
if (fixed) {
|
||||
flags |= MAP_FIXED;
|
||||
}
|
||||
|
||||
/* Otherwise, a non-zero size region of memory needs to be mapped
|
||||
* and validated. */
|
||||
|
||||
#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
|
||||
/* On 32-bit ARM, we need to map not just the usable memory, but
|
||||
* also the commpage. Try to find a suitable place by allocating
|
||||
* a big chunk for all of it. If host_start, then the naive
|
||||
* strategy probably does good enough.
|
||||
*/
|
||||
if (!host_start) {
|
||||
unsigned long guest_full_size, host_full_size, real_start;
|
||||
|
||||
guest_full_size =
|
||||
(0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
|
||||
host_full_size = guest_full_size - guest_start;
|
||||
real_start = (unsigned long)
|
||||
mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
|
||||
if (real_start == (unsigned long)-1) {
|
||||
if (host_size < host_full_size - qemu_host_page_size) {
|
||||
/* We failed to map a continous segment, but we're
|
||||
* allowed to have a gap between the usable memory and
|
||||
* the commpage where other things can be mapped.
|
||||
* This sparseness gives us more flexibility to find
|
||||
* an address range.
|
||||
*/
|
||||
goto naive;
|
||||
}
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
munmap((void *)real_start, host_full_size);
|
||||
if (real_start & (align - 1)) {
|
||||
/* The same thing again, but with extra
|
||||
* so that we can shift around alignment.
|
||||
*/
|
||||
unsigned long real_size = host_full_size + qemu_host_page_size;
|
||||
real_start = (unsigned long)
|
||||
mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
|
||||
if (real_start == (unsigned long)-1) {
|
||||
if (host_size < host_full_size - qemu_host_page_size) {
|
||||
goto naive;
|
||||
}
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
munmap((void *)real_start, real_size);
|
||||
real_start = ROUND_UP(real_start, align);
|
||||
}
|
||||
current_start = real_start;
|
||||
}
|
||||
naive:
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
unsigned long real_start, real_size, aligned_size;
|
||||
aligned_size = real_size = host_size;
|
||||
|
||||
/* Do not use mmap_find_vma here because that is limited to the
|
||||
* guest address space. We are going to make the
|
||||
* guest address space fit whatever we're given.
|
||||
/* Reserve and initialize the commpage. */
|
||||
if (!init_guest_commpage()) {
|
||||
/*
|
||||
* With have_guest_base, the user has selected the address and
|
||||
* we are trying to work with that. Otherwise, we have selected
|
||||
* free space and init_guest_commpage must succeeded.
|
||||
*/
|
||||
real_start = (unsigned long)
|
||||
mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0);
|
||||
if (real_start == (unsigned long)-1) {
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
|
||||
/* Check to see if the address is valid. */
|
||||
if (host_start && real_start != current_start) {
|
||||
qemu_log_mask(CPU_LOG_PAGE, "invalid %lx && %lx != %lx\n",
|
||||
host_start, real_start, current_start);
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
/* Ensure the address is properly aligned. */
|
||||
if (real_start & (align - 1)) {
|
||||
/* Ideally, we adjust like
|
||||
*
|
||||
* pages: [ ][ ][ ][ ][ ]
|
||||
* old: [ real ]
|
||||
* [ aligned ]
|
||||
* new: [ real ]
|
||||
* [ aligned ]
|
||||
*
|
||||
* But if there is something else mapped right after it,
|
||||
* then obviously it won't have room to grow, and the
|
||||
* kernel will put the new larger real someplace else with
|
||||
* unknown alignment (if we made it to here, then
|
||||
* fixed=false). Which is why we grow real by a full page
|
||||
* size, instead of by part of one; so that even if we get
|
||||
* moved, we can still guarantee alignment. But this does
|
||||
* mean that there is a padding of < 1 page both before
|
||||
* and after the aligned range; the "after" could could
|
||||
* cause problems for ARM emulation where it could butt in
|
||||
* to where we need to put the commpage.
|
||||
*/
|
||||
munmap((void *)real_start, host_size);
|
||||
real_size = aligned_size + align;
|
||||
real_start = (unsigned long)
|
||||
mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
|
||||
if (real_start == (unsigned long)-1) {
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
aligned_start = ROUND_UP(real_start, align);
|
||||
} else {
|
||||
aligned_start = real_start;
|
||||
}
|
||||
|
||||
#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
|
||||
/* On 32-bit ARM, we need to also be able to map the commpage. */
|
||||
int valid = init_guest_commpage(aligned_start - guest_start,
|
||||
aligned_size + guest_start);
|
||||
if (valid == -1) {
|
||||
munmap((void *)real_start, real_size);
|
||||
return (unsigned long)-1;
|
||||
} else if (valid == 0) {
|
||||
goto try_again;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* If nothing has said `return -1` or `goto try_again` yet,
|
||||
* then the address we have is good.
|
||||
*/
|
||||
break;
|
||||
|
||||
try_again:
|
||||
/* That address didn't work. Unmap and try a different one.
|
||||
* The address the host picked because is typically right at
|
||||
* the top of the host address space and leaves the guest with
|
||||
* no usable address space. Resort to a linear search. We
|
||||
* already compensated for mmap_min_addr, so this should not
|
||||
* happen often. Probably means we got unlucky and host
|
||||
* address space randomization put a shared library somewhere
|
||||
* inconvenient.
|
||||
*
|
||||
* This is probably a good strategy if host_start, but is
|
||||
* probably a bad strategy if not, which means we got here
|
||||
* because of trouble with ARM commpage setup.
|
||||
*/
|
||||
if (munmap((void *)real_start, real_size) != 0) {
|
||||
error_report("%s: failed to unmap %lx:%lx (%s)", __func__,
|
||||
real_start, real_size, strerror(errno));
|
||||
abort();
|
||||
}
|
||||
current_start += align;
|
||||
if (host_start == current_start) {
|
||||
/* Theoretically possible if host doesn't have any suitably
|
||||
* aligned areas. Normally the first mmap will fail.
|
||||
*/
|
||||
return (unsigned long)-1;
|
||||
}
|
||||
assert(have_guest_base);
|
||||
pgb_fail_in_use(image_name);
|
||||
}
|
||||
|
||||
qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
|
||||
|
||||
return aligned_start;
|
||||
assert(QEMU_IS_ALIGNED(guest_base, align));
|
||||
qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space "
|
||||
"@ 0x%" PRIx64 "\n", (uint64_t)guest_base);
|
||||
}
|
||||
|
||||
static void probe_guest_base(const char *image_name,
|
||||
abi_ulong loaddr, abi_ulong hiaddr)
|
||||
{
|
||||
/* Probe for a suitable guest base address, if the user has not set
|
||||
* it explicitly, and set guest_base appropriately.
|
||||
* In case of error we will print a suitable message and exit.
|
||||
*/
|
||||
const char *errmsg;
|
||||
if (!have_guest_base && !reserved_va) {
|
||||
unsigned long host_start, real_start, host_size;
|
||||
|
||||
/* Round addresses to page boundaries. */
|
||||
loaddr &= qemu_host_page_mask;
|
||||
hiaddr = HOST_PAGE_ALIGN(hiaddr);
|
||||
|
||||
if (loaddr < mmap_min_addr) {
|
||||
host_start = HOST_PAGE_ALIGN(mmap_min_addr);
|
||||
} else {
|
||||
host_start = loaddr;
|
||||
if (host_start != loaddr) {
|
||||
errmsg = "Address overflow loading ELF binary";
|
||||
goto exit_errmsg;
|
||||
}
|
||||
}
|
||||
host_size = hiaddr - loaddr;
|
||||
|
||||
/* Setup the initial guest memory space with ranges gleaned from
|
||||
* the ELF image that is being loaded.
|
||||
*/
|
||||
real_start = init_guest_space(host_start, host_size, loaddr, false);
|
||||
if (real_start == (unsigned long)-1) {
|
||||
errmsg = "Unable to find space for application";
|
||||
goto exit_errmsg;
|
||||
}
|
||||
guest_base = real_start - loaddr;
|
||||
|
||||
qemu_log_mask(CPU_LOG_PAGE, "Relocating guest address space from 0x"
|
||||
TARGET_ABI_FMT_lx " to 0x%lx\n",
|
||||
loaddr, real_start);
|
||||
}
|
||||
return;
|
||||
|
||||
exit_errmsg:
|
||||
fprintf(stderr, "%s: %s\n", image_name, errmsg);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
|
||||
/* Load an ELF image into the address space.
|
||||
|
||||
IMAGE_NAME is the filename of the image, to use in error messages.
|
||||
@ -2399,6 +2390,12 @@ static void load_elf_image(const char *image_name, int image_fd,
|
||||
* MMAP_MIN_ADDR or the QEMU application itself.
|
||||
*/
|
||||
probe_guest_base(image_name, loaddr, hiaddr);
|
||||
} else {
|
||||
/*
|
||||
* The binary is dynamic, but we still need to
|
||||
* select guest_base. In this case we pass a size.
|
||||
*/
|
||||
probe_guest_base(image_name, 0, hiaddr - loaddr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -441,6 +441,12 @@ static int load_flat_file(struct linux_binprm * bprm,
|
||||
indx_len = MAX_SHARED_LIBS * sizeof(abi_ulong);
|
||||
indx_len = (indx_len + 15) & ~(abi_ulong)15;
|
||||
|
||||
/*
|
||||
* Alloate the address space.
|
||||
*/
|
||||
probe_guest_base(bprm->filename, 0,
|
||||
text_len + data_len + extra + indx_len);
|
||||
|
||||
/*
|
||||
* there are a couple of cases here, the separate code/data
|
||||
* case, and then the fully copied to RAM case which lumps
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "qemu-version.h"
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/shm.h>
|
||||
|
||||
#include "qapi/error.h"
|
||||
#include "qemu.h"
|
||||
@ -58,7 +59,7 @@ static const char *cpu_type;
|
||||
static const char *seed_optarg;
|
||||
unsigned long mmap_min_addr;
|
||||
unsigned long guest_base;
|
||||
int have_guest_base;
|
||||
bool have_guest_base;
|
||||
|
||||
/*
|
||||
* Used to implement backwards-compatibility for the `-strace`, and
|
||||
@ -333,7 +334,7 @@ static void handle_arg_cpu(const char *arg)
|
||||
static void handle_arg_guest_base(const char *arg)
|
||||
{
|
||||
guest_base = strtol(arg, NULL, 0);
|
||||
have_guest_base = 1;
|
||||
have_guest_base = true;
|
||||
}
|
||||
|
||||
static void handle_arg_reserved_va(const char *arg)
|
||||
@ -747,28 +748,6 @@ int main(int argc, char **argv, char **envp)
|
||||
target_environ = envlist_to_environ(envlist, NULL);
|
||||
envlist_free(envlist);
|
||||
|
||||
/*
|
||||
* Now that page sizes are configured in tcg_exec_init() we can do
|
||||
* proper page alignment for guest_base.
|
||||
*/
|
||||
guest_base = HOST_PAGE_ALIGN(guest_base);
|
||||
|
||||
if (reserved_va || have_guest_base) {
|
||||
guest_base = init_guest_space(guest_base, reserved_va, 0,
|
||||
have_guest_base);
|
||||
if (guest_base == (unsigned long)-1) {
|
||||
fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
|
||||
"space for use as guest address space (check your virtual "
|
||||
"memory ulimit setting or reserve less using -R option)\n",
|
||||
reserved_va);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (reserved_va) {
|
||||
mmap_next_start = reserved_va;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read in mmap_min_addr kernel parameter. This value is used
|
||||
* When loading the ELF image to determine whether guest_base
|
||||
|
@ -219,18 +219,27 @@ void init_qemu_uname_release(void);
|
||||
void fork_start(void);
|
||||
void fork_end(int child);
|
||||
|
||||
/* Creates the initial guest address space in the host memory space using
|
||||
* the given host start address hint and size. The guest_start parameter
|
||||
* specifies the start address of the guest space. guest_base will be the
|
||||
* difference between the host start address computed by this function and
|
||||
* guest_start. If fixed is specified, then the mapped address space must
|
||||
* start at host_start. The real start address of the mapped memory space is
|
||||
* returned or -1 if there was an error.
|
||||
/**
|
||||
* probe_guest_base:
|
||||
* @image_name: the executable being loaded
|
||||
* @loaddr: the lowest fixed address in the executable
|
||||
* @hiaddr: the highest fixed address in the executable
|
||||
*
|
||||
* Creates the initial guest address space in the host memory space.
|
||||
*
|
||||
* If @loaddr == 0, then no address in the executable is fixed,
|
||||
* i.e. it is fully relocatable. In that case @hiaddr is the size
|
||||
* of the executable.
|
||||
*
|
||||
* This function will not return if a valid value for guest_base
|
||||
* cannot be chosen. On return, the executable loader can expect
|
||||
*
|
||||
* target_mmap(loaddr, hiaddr - loaddr, ...)
|
||||
*
|
||||
* to succeed.
|
||||
*/
|
||||
unsigned long init_guest_space(unsigned long host_start,
|
||||
unsigned long host_size,
|
||||
unsigned long guest_start,
|
||||
bool fixed);
|
||||
void probe_guest_base(const char *image_name,
|
||||
abi_ulong loaddr, abi_ulong hiaddr);
|
||||
|
||||
#include "qemu/log.h"
|
||||
|
||||
|
@ -275,10 +275,10 @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
|
||||
}
|
||||
#endif
|
||||
|
||||
bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr)
|
||||
bool qemu_plugin_hwaddr_is_io(const struct qemu_plugin_hwaddr *haddr)
|
||||
{
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
return hwaddr->is_io;
|
||||
return haddr->is_io;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
@ -10,22 +10,11 @@
|
||||
|
||||
#define TARGET_LONG_BITS 64
|
||||
#define TARGET_PAGE_BITS 13
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* ??? The kernel likes to give addresses in high memory. If the host has
|
||||
* more virtual address space than the guest, this can lead to impossible
|
||||
* allocations. Honor the long-standing assumption that only kernel addrs
|
||||
* are negative, but otherwise allow allocations anywhere. This could lead
|
||||
* to tricky emulation problems for programs doing tagged addressing, but
|
||||
* that's far fewer than encounter the impossible allocation problem.
|
||||
*/
|
||||
#define TARGET_PHYS_ADDR_SPACE_BITS 63
|
||||
#define TARGET_VIRT_ADDR_SPACE_BITS 63
|
||||
#else
|
||||
|
||||
/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
|
||||
#define TARGET_PHYS_ADDR_SPACE_BITS 44
|
||||
#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
|
||||
#endif
|
||||
|
||||
#define NB_MMU_MODES 3
|
||||
|
||||
#endif
|
||||
|
@ -1092,7 +1092,7 @@ void tcg_prologue_init(TCGContext *s)
|
||||
size_t data_size = prologue_size - code_size;
|
||||
size_t i;
|
||||
|
||||
log_disas(buf0, code_size);
|
||||
log_disas(buf0, code_size, NULL);
|
||||
|
||||
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
|
||||
if (sizeof(tcg_target_ulong) == 8) {
|
||||
@ -1106,7 +1106,7 @@ void tcg_prologue_init(TCGContext *s)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_disas(buf0, prologue_size);
|
||||
log_disas(buf0, prologue_size, NULL);
|
||||
}
|
||||
qemu_log("\n");
|
||||
qemu_log_flush();
|
||||
|
@ -80,4 +80,10 @@ if __name__ == '__main__':
|
||||
print("GDB crashed? SKIPPING")
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
inferior.wait(2)
|
||||
except subprocess.TimeoutExpired:
|
||||
print("GDB never connected? Killed guest")
|
||||
inferior.kill()
|
||||
|
||||
exit(result)
|
||||
|
Loading…
Reference in New Issue
Block a user