testing and gdbstub updates:
- enable ccache for gitlab builds - fix various test info leakages for non V=1 - update style to allow loop vars - bump FreeBSD to v13.2 - clean-up gdbstub tests - various gdbstub doc and refactorings -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmTvS2AACgkQ+9DbCVqe KkRiRwgAhsinp2/KgnvkD0n6deQy/JWg9MfYIvvZacKEakIfQvCDoJ752AUZzUTw ggQ+W2KuaoHTzwG+AOMLdzulkmspQ8xeFuD2aIpFjRMnZrO9jN2T4L0vcGLAd95c 9QLqPeH8xRdhuK28+ILuYzKOKBcefQ44ufMLpxrS2iNITEsSg/Tw3MU91hbct49g 3OR4bD1ueG5Ib/lXp8V/4GnRmfLdnp3k0i/6OHriq7Mpz4Lia67WblVsPEple66U n7JCo2sI5/m+6p2tvKs7rH60xc8s1Za3kbK4ggEq3LVRfzVOordZqO+1ep6wklTY 6nP9Ry9nZG3gqCmcNXfhoofm0vHaZA== =Km9m -----END PGP SIGNATURE----- Merge tag 'pull-maintainer-ominbus-300823-1' of https://gitlab.com/stsquad/qemu into staging testing and gdbstub updates: - enable ccache for gitlab builds - fix various test info leakages for non V=1 - update style to allow loop vars - bump FreeBSD to v13.2 - clean-up gdbstub tests - various gdbstub doc and refactorings # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmTvS2AACgkQ+9DbCVqe # KkRiRwgAhsinp2/KgnvkD0n6deQy/JWg9MfYIvvZacKEakIfQvCDoJ752AUZzUTw # ggQ+W2KuaoHTzwG+AOMLdzulkmspQ8xeFuD2aIpFjRMnZrO9jN2T4L0vcGLAd95c # 9QLqPeH8xRdhuK28+ILuYzKOKBcefQ44ufMLpxrS2iNITEsSg/Tw3MU91hbct49g # 3OR4bD1ueG5Ib/lXp8V/4GnRmfLdnp3k0i/6OHriq7Mpz4Lia67WblVsPEple66U # n7JCo2sI5/m+6p2tvKs7rH60xc8s1Za3kbK4ggEq3LVRfzVOordZqO+1ep6wklTY # 6nP9Ry9nZG3gqCmcNXfhoofm0vHaZA== # =Km9m # -----END PGP SIGNATURE----- # gpg: Signature made Wed 30 Aug 2023 10:00:00 EDT # gpg: using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44 # gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6685 AE99 E751 67BC AFC8 DF35 FBD0 DB09 5A9E 2A44 * tag 'pull-maintainer-ominbus-300823-1' of https://gitlab.com/stsquad/qemu: gdbstub: move comment for gdb_register_coprocessor gdbstub: replace global gdb_has_xml with a function gdbstub: refactor get_feature_xml gdbstub: remove unused user_ctx field gdbstub: fixes cases where wrong threads were reported to GDB on SIGINT tests/tcg: clean-up gdb confirm/pagination settings tests: remove test-gdbstub.py .gitlab-ci.d/cirrus.yml: Update FreeBSD to v13.2 docs/style: permit inline loop variables tests/tcg: remove quoting for info output tests/docker: cleanup non-verbose output gitlab: enable ccache for many build jobs Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
db1a88a5ac
@ -2,11 +2,21 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
before_script:
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
@ -20,6 +30,7 @@
|
||||
then
|
||||
make -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- ccache --show-stats
|
||||
|
||||
# We jump some hoops in common_test_job_template to avoid
|
||||
# rebuilding all the object files we skip in the artifacts
|
||||
|
@ -50,7 +50,7 @@ x64-freebsd-13-build:
|
||||
NAME: freebsd-13
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-1
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-13-2
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
|
@ -2,10 +2,20 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
timeout: 80m
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
|
||||
--target-list-exclude="arm-softmmu cris-softmmu
|
||||
@ -18,6 +28,7 @@
|
||||
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
|
||||
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
|
||||
fi
|
||||
- ccache --show-stats
|
||||
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
@ -29,7 +40,15 @@
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
timeout: 30m
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
@ -40,7 +59,14 @@
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
|
@ -5,13 +5,14 @@
|
||||
- windows
|
||||
- windows-1809
|
||||
cache:
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
key: "$CI_JOB_NAME"
|
||||
paths:
|
||||
- msys64/var/cache
|
||||
- ccache
|
||||
when: always
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 80m
|
||||
timeout: 100m
|
||||
variables:
|
||||
# This feature doesn't (currently) work with PowerShell, it stops
|
||||
# the echo'ing of commands being run and doesn't show any timing
|
||||
@ -72,6 +73,7 @@
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
$MINGW_TARGET-capstone
|
||||
$MINGW_TARGET-ccache
|
||||
$MINGW_TARGET-curl
|
||||
$MINGW_TARGET-cyrus-sasl
|
||||
$MINGW_TARGET-dtc
|
||||
@ -101,11 +103,18 @@
|
||||
- Write-Output "Running build at $(Get-Date -Format u)"
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
|
||||
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
|
||||
- $env:CCACHE_MAXSIZE = "500M"
|
||||
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
|
||||
- $env:CC = "ccache gcc"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure --enable-fdt=system $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make"
|
||||
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
|
||||
- Write-Output "Finished build at $(Get-Date -Format u)"
|
||||
|
||||
msys2-64bit:
|
||||
|
@ -188,3 +188,10 @@ If you've got access to a CentOS Stream 8 x86_64 host that can be
|
||||
used as a gitlab-CI runner, you can set this variable to enable the
|
||||
tests that require this kind of host. The runner should be tagged with
|
||||
both "centos_stream_8" and "x86_64".
|
||||
|
||||
CCACHE_DISABLE
|
||||
~~~~~~~~~~~~~~
|
||||
The jobs are configured to use "ccache" by default since this typically
|
||||
reduces compilation time, at the cost of increased storage. If the
|
||||
use of "ccache" is suspected to be hurting the overall job execution
|
||||
time, setting the "CCACHE_DISABLE=1" env variable to disable it.
|
||||
|
@ -204,7 +204,14 @@ Declarations
|
||||
|
||||
Mixed declarations (interleaving statements and declarations within
|
||||
blocks) are generally not allowed; declarations should be at the beginning
|
||||
of blocks.
|
||||
of blocks. To avoid accidental re-use it is permissible to declare
|
||||
loop variables inside for loops:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(thing); i++) {
|
||||
/* do something loopy */
|
||||
}
|
||||
|
||||
Every now and then, an exception is made for declarations inside a
|
||||
#ifdef or #ifndef block: if the code looks nicer, such declarations can
|
||||
|
@ -75,8 +75,6 @@ void gdb_init_gdbserver_state(void)
|
||||
gdbserver_state.sstep_flags &= gdbserver_state.supported_sstep_flags;
|
||||
}
|
||||
|
||||
bool gdb_has_xml;
|
||||
|
||||
/* writes 2*len+1 bytes in buf */
|
||||
void gdb_memtohex(GString *buf, const uint8_t *mem, int len)
|
||||
{
|
||||
@ -351,67 +349,75 @@ static CPUState *gdb_get_cpu(uint32_t pid, uint32_t tid)
|
||||
}
|
||||
}
|
||||
|
||||
bool gdb_has_xml(void)
|
||||
{
|
||||
return !!gdb_get_cpu_process(gdbserver_state.g_cpu)->target_xml;
|
||||
}
|
||||
|
||||
static const char *get_feature_xml(const char *p, const char **newp,
|
||||
GDBProcess *process)
|
||||
{
|
||||
size_t len;
|
||||
int i;
|
||||
const char *name;
|
||||
CPUState *cpu = gdb_get_first_cpu_in_process(process);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
size_t len;
|
||||
|
||||
len = 0;
|
||||
while (p[len] && p[len] != ':')
|
||||
len++;
|
||||
*newp = p + len;
|
||||
/*
|
||||
* qXfer:features:read:ANNEX:OFFSET,LENGTH'
|
||||
* ^p ^newp
|
||||
*/
|
||||
char *term = strchr(p, ':');
|
||||
*newp = term + 1;
|
||||
len = term - p;
|
||||
|
||||
name = NULL;
|
||||
/* Is it the main target xml? */
|
||||
if (strncmp(p, "target.xml", len) == 0) {
|
||||
char *buf = process->target_xml;
|
||||
const size_t buf_sz = sizeof(process->target_xml);
|
||||
|
||||
/* Generate the XML description for this CPU. */
|
||||
if (!buf[0]) {
|
||||
if (!process->target_xml) {
|
||||
GDBRegisterState *r;
|
||||
GString *xml = g_string_new("<?xml version=\"1.0\"?>");
|
||||
|
||||
g_string_append(xml,
|
||||
"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
|
||||
"<target>");
|
||||
|
||||
pstrcat(buf, buf_sz,
|
||||
"<?xml version=\"1.0\"?>"
|
||||
"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
|
||||
"<target>");
|
||||
if (cc->gdb_arch_name) {
|
||||
gchar *arch = cc->gdb_arch_name(cpu);
|
||||
pstrcat(buf, buf_sz, "<architecture>");
|
||||
pstrcat(buf, buf_sz, arch);
|
||||
pstrcat(buf, buf_sz, "</architecture>");
|
||||
g_free(arch);
|
||||
g_autofree gchar *arch = cc->gdb_arch_name(cpu);
|
||||
g_string_append_printf(xml,
|
||||
"<architecture>%s</architecture>",
|
||||
arch);
|
||||
}
|
||||
pstrcat(buf, buf_sz, "<xi:include href=\"");
|
||||
pstrcat(buf, buf_sz, cc->gdb_core_xml_file);
|
||||
pstrcat(buf, buf_sz, "\"/>");
|
||||
g_string_append(xml, "<xi:include href=\"");
|
||||
g_string_append(xml, cc->gdb_core_xml_file);
|
||||
g_string_append(xml, "\"/>");
|
||||
for (r = cpu->gdb_regs; r; r = r->next) {
|
||||
pstrcat(buf, buf_sz, "<xi:include href=\"");
|
||||
pstrcat(buf, buf_sz, r->xml);
|
||||
pstrcat(buf, buf_sz, "\"/>");
|
||||
g_string_append(xml, "<xi:include href=\"");
|
||||
g_string_append(xml, r->xml);
|
||||
g_string_append(xml, "\"/>");
|
||||
}
|
||||
pstrcat(buf, buf_sz, "</target>");
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
if (cc->gdb_get_dynamic_xml) {
|
||||
char *xmlname = g_strndup(p, len);
|
||||
const char *xml = cc->gdb_get_dynamic_xml(cpu, xmlname);
|
||||
g_string_append(xml, "</target>");
|
||||
|
||||
g_free(xmlname);
|
||||
process->target_xml = g_string_free(xml, false);
|
||||
return process->target_xml;
|
||||
}
|
||||
}
|
||||
/* Is it dynamically generated by the target? */
|
||||
if (cc->gdb_get_dynamic_xml) {
|
||||
g_autofree char *xmlname = g_strndup(p, len);
|
||||
const char *xml = cc->gdb_get_dynamic_xml(cpu, xmlname);
|
||||
if (xml) {
|
||||
return xml;
|
||||
}
|
||||
}
|
||||
for (i = 0; ; i++) {
|
||||
name = xml_builtin[i][0];
|
||||
if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
|
||||
break;
|
||||
/* Is it one of the encoded gdb-xml/ files? */
|
||||
for (int i = 0; xml_builtin[i][0]; i++) {
|
||||
const char *name = xml_builtin[i][0];
|
||||
if ((strncmp(name, p, len) == 0) &&
|
||||
strlen(name) == len) {
|
||||
return xml_builtin[i][1];
|
||||
}
|
||||
}
|
||||
return name ? xml_builtin[i][1] : NULL;
|
||||
|
||||
/* failed */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
|
||||
@ -450,12 +456,6 @@ static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register a supplemental set of CPU registers. If g_pos is nonzero it
|
||||
specifies the first register number and these registers are included in
|
||||
a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
|
||||
gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
|
||||
*/
|
||||
|
||||
void gdb_register_coprocessor(CPUState *cpu,
|
||||
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
|
||||
int num_regs, const char *xml, int g_pos)
|
||||
@ -597,6 +597,15 @@ static int gdb_handle_vcont(const char *p)
|
||||
* or incorrect parameters passed.
|
||||
*/
|
||||
res = 0;
|
||||
|
||||
/*
|
||||
* target_count and last_target keep track of how many CPUs we are going to
|
||||
* step or resume, and a pointer to the state structure of one of them,
|
||||
* respectivelly
|
||||
*/
|
||||
int target_count = 0;
|
||||
CPUState *last_target = NULL;
|
||||
|
||||
while (*p) {
|
||||
if (*p++ != ';') {
|
||||
return -ENOTSUP;
|
||||
@ -637,6 +646,9 @@ static int gdb_handle_vcont(const char *p)
|
||||
while (cpu) {
|
||||
if (newstates[cpu->cpu_index] == 1) {
|
||||
newstates[cpu->cpu_index] = cur_action;
|
||||
|
||||
target_count++;
|
||||
last_target = cpu;
|
||||
}
|
||||
|
||||
cpu = gdb_next_attached_cpu(cpu);
|
||||
@ -654,6 +666,9 @@ static int gdb_handle_vcont(const char *p)
|
||||
while (cpu) {
|
||||
if (newstates[cpu->cpu_index] == 1) {
|
||||
newstates[cpu->cpu_index] = cur_action;
|
||||
|
||||
target_count++;
|
||||
last_target = cpu;
|
||||
}
|
||||
|
||||
cpu = gdb_next_cpu_in_process(cpu);
|
||||
@ -671,11 +686,25 @@ static int gdb_handle_vcont(const char *p)
|
||||
/* only use if no previous match occourred */
|
||||
if (newstates[cpu->cpu_index] == 1) {
|
||||
newstates[cpu->cpu_index] = cur_action;
|
||||
|
||||
target_count++;
|
||||
last_target = cpu;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we're about to resume a specific set of CPUs/threads, make it so that
|
||||
* in case execution gets interrupted, we can send GDB a stop reply with a
|
||||
* correct value. it doesn't really matter which CPU we tell GDB the signal
|
||||
* happened in (VM pauses stop all of them anyway), so long as it is one of
|
||||
* the ones we resumed/single stepped here.
|
||||
*/
|
||||
if (target_count > 0) {
|
||||
gdbserver_state.c_cpu = last_target;
|
||||
}
|
||||
|
||||
gdbserver_state.signal = signal;
|
||||
gdb_continue_partial(newstates);
|
||||
return res;
|
||||
@ -807,7 +836,7 @@ static inline int startswith(const char *string, const char *pattern)
|
||||
return !strncmp(string, pattern, strlen(pattern));
|
||||
}
|
||||
|
||||
static int process_string_cmd(void *user_ctx, const char *data,
|
||||
static int process_string_cmd(const char *data,
|
||||
const GdbCmdParseEntry *cmds, int num_cmds)
|
||||
{
|
||||
int i;
|
||||
@ -834,7 +863,7 @@ static int process_string_cmd(void *user_ctx, const char *data,
|
||||
}
|
||||
|
||||
gdbserver_state.allow_stop_reply = cmd->allow_stop_reply;
|
||||
cmd->handler(params, user_ctx);
|
||||
cmd->handler(params, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -852,7 +881,7 @@ static void run_cmd_parser(const char *data, const GdbCmdParseEntry *cmd)
|
||||
|
||||
/* In case there was an error during the command parsing we must
|
||||
* send a NULL packet to indicate the command is not supported */
|
||||
if (process_string_cmd(NULL, data, cmd, 1)) {
|
||||
if (process_string_cmd(data, cmd, 1)) {
|
||||
gdb_put_packet("");
|
||||
}
|
||||
}
|
||||
@ -1052,7 +1081,7 @@ static void handle_set_reg(GArray *params, void *user_ctx)
|
||||
{
|
||||
int reg_size;
|
||||
|
||||
if (!gdb_has_xml) {
|
||||
if (!gdb_get_cpu_process(gdbserver_state.g_cpu)->target_xml) {
|
||||
gdb_put_packet("");
|
||||
return;
|
||||
}
|
||||
@ -1073,7 +1102,7 @@ static void handle_get_reg(GArray *params, void *user_ctx)
|
||||
{
|
||||
int reg_size;
|
||||
|
||||
if (!gdb_has_xml) {
|
||||
if (!gdb_get_cpu_process(gdbserver_state.g_cpu)->target_xml) {
|
||||
gdb_put_packet("");
|
||||
return;
|
||||
}
|
||||
@ -1365,7 +1394,7 @@ static void handle_v_commands(GArray *params, void *user_ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
if (process_string_cmd(NULL, get_param(params, 0)->data,
|
||||
if (process_string_cmd(get_param(params, 0)->data,
|
||||
gdb_v_commands_table,
|
||||
ARRAY_SIZE(gdb_v_commands_table))) {
|
||||
gdb_put_packet("");
|
||||
@ -1540,7 +1569,6 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
gdb_has_xml = true;
|
||||
p = get_param(params, 0)->data;
|
||||
xml = get_feature_xml(p, &p, process);
|
||||
if (!xml) {
|
||||
@ -1709,13 +1737,13 @@ static void handle_gen_query(GArray *params, void *user_ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!process_string_cmd(NULL, get_param(params, 0)->data,
|
||||
if (!process_string_cmd(get_param(params, 0)->data,
|
||||
gdb_gen_query_set_common_table,
|
||||
ARRAY_SIZE(gdb_gen_query_set_common_table))) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (process_string_cmd(NULL, get_param(params, 0)->data,
|
||||
if (process_string_cmd(get_param(params, 0)->data,
|
||||
gdb_gen_query_table,
|
||||
ARRAY_SIZE(gdb_gen_query_table))) {
|
||||
gdb_put_packet("");
|
||||
@ -1728,13 +1756,13 @@ static void handle_gen_set(GArray *params, void *user_ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!process_string_cmd(NULL, get_param(params, 0)->data,
|
||||
if (!process_string_cmd(get_param(params, 0)->data,
|
||||
gdb_gen_query_set_common_table,
|
||||
ARRAY_SIZE(gdb_gen_query_set_common_table))) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (process_string_cmd(NULL, get_param(params, 0)->data,
|
||||
if (process_string_cmd(get_param(params, 0)->data,
|
||||
gdb_gen_set_table,
|
||||
ARRAY_SIZE(gdb_gen_set_table))) {
|
||||
gdb_put_packet("");
|
||||
@ -2216,6 +2244,6 @@ void gdb_create_default_process(GDBState *s)
|
||||
process = &s->processes[s->process_num - 1];
|
||||
process->pid = pid;
|
||||
process->attached = false;
|
||||
process->target_xml[0] = '\0';
|
||||
process->target_xml = NULL;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,8 @@ typedef struct GDBProcess {
|
||||
uint32_t pid;
|
||||
bool attached;
|
||||
|
||||
char target_xml[1024];
|
||||
/* If gdb sends qXfer:features:read:target.xml this will be populated */
|
||||
char *target_xml;
|
||||
} GDBProcess;
|
||||
|
||||
enum RSState {
|
||||
|
@ -97,7 +97,6 @@ static void gdb_chr_event(void *opaque, QEMUChrEvent event)
|
||||
|
||||
vm_stop(RUN_STATE_PAUSED);
|
||||
replay_gdb_attached();
|
||||
gdb_has_xml = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -198,7 +198,6 @@ static void gdb_accept_init(int fd)
|
||||
gdbserver_state.c_cpu = gdb_first_attached_cpu();
|
||||
gdbserver_state.g_cpu = gdbserver_state.c_cpu;
|
||||
gdbserver_user_state.fd = fd;
|
||||
gdb_has_xml = false;
|
||||
}
|
||||
|
||||
static bool gdb_accept_socket(int gdb_fd)
|
||||
|
@ -14,6 +14,16 @@
|
||||
/* Get or set a register. Returns the size of the register. */
|
||||
typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
|
||||
typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
|
||||
|
||||
/**
|
||||
* gdb_register_coprocessor() - register a supplemental set of registers
|
||||
* @cpu - the CPU associated with registers
|
||||
* @get_reg - get function (gdb reading)
|
||||
* @set_reg - set function (gdb modifying)
|
||||
* @num_regs - number of registers in set
|
||||
* @xml - xml name of set
|
||||
* @gpos - non-zero to append to "general" register set at @gpos
|
||||
*/
|
||||
void gdb_register_coprocessor(CPUState *cpu,
|
||||
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
|
||||
int num_regs, const char *xml, int g_pos);
|
||||
@ -31,12 +41,12 @@ int gdbserver_start(const char *port_or_device);
|
||||
void gdb_set_stop_cpu(CPUState *cpu);
|
||||
|
||||
/**
|
||||
* gdb_has_xml:
|
||||
* This is an ugly hack to cope with both new and old gdb.
|
||||
* If gdb sends qXfer:features:read then assume we're talking to a newish
|
||||
* gdb that understands target descriptions.
|
||||
* gdb_has_xml() - report of gdb supports modern target descriptions
|
||||
*
|
||||
* This will report true if the gdb negotiated qXfer:features:read
|
||||
* target descriptions.
|
||||
*/
|
||||
extern bool gdb_has_xml;
|
||||
bool gdb_has_xml(void);
|
||||
|
||||
/* in gdbstub-xml.c, generated by scripts/feature_to_c.sh */
|
||||
extern const char *const xml_builtin[][2];
|
||||
|
@ -48,7 +48,7 @@ int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||
}
|
||||
if (n < 24) {
|
||||
/* FPA registers. */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return gdb_get_zeroes(mem_buf, 12);
|
||||
@ -56,7 +56,7 @@ int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||
switch (n) {
|
||||
case 24:
|
||||
/* FPA status register. */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return gdb_get_reg32(mem_buf, 0);
|
||||
@ -102,7 +102,7 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
}
|
||||
if (n < 24) { /* 16-23 */
|
||||
/* FPA registers (ignored). */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return 12;
|
||||
@ -110,7 +110,7 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
switch (n) {
|
||||
case 24:
|
||||
/* FPA status register (ignored). */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return 4;
|
||||
|
@ -56,7 +56,7 @@ static int ppc_gdb_register_len(int n)
|
||||
return sizeof(target_ulong);
|
||||
case 32 ... 63:
|
||||
/* fprs */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return 8;
|
||||
@ -76,7 +76,7 @@ static int ppc_gdb_register_len(int n)
|
||||
return sizeof(target_ulong);
|
||||
case 70:
|
||||
/* fpscr */
|
||||
if (gdb_has_xml) {
|
||||
if (gdb_has_xml()) {
|
||||
return 0;
|
||||
}
|
||||
return sizeof(target_ulong);
|
||||
|
@ -46,9 +46,9 @@ docker-image-%: $(DOCKER_FILES_DIR)/%.docker
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
$(if $(NOUSER),, \
|
||||
--build-arg USER=$(USER) \
|
||||
--build-arg UID=$(UID)) \
|
||||
-t qemu/$* - < $<, \
|
||||
"BUILD", $1)
|
||||
--build-arg UID=$(UID)) \
|
||||
-t qemu/$* - < $< $(if $V,,> /dev/null),\
|
||||
"BUILD", $*)
|
||||
|
||||
# Special rule for debootstraped binfmt linux-user images
|
||||
docker-binfmt-image-debian-%: $(DOCKER_FILES_DIR)/debian-bootstrap.docker
|
||||
|
@ -15,6 +15,7 @@ RUN apt-get update && \
|
||||
# Install common build utilities
|
||||
apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
ccache \
|
||||
xz-utils \
|
||||
ca-certificates \
|
||||
bison \
|
||||
@ -27,7 +28,12 @@ RUN apt-get update && \
|
||||
python3-wheel && \
|
||||
# Install QEMU build deps for use in CI
|
||||
DEBIAN_FRONTEND=noninteractive eatmydata \
|
||||
apt build-dep -yy --arch-only qemu
|
||||
apt build-dep -yy --arch-only qemu && \
|
||||
mkdir -p /usr/libexec/ccache-wrappers && \
|
||||
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/c++ && \
|
||||
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/cc && \
|
||||
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/g++ && \
|
||||
ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc
|
||||
|
||||
RUN /usr/bin/pip3 install tomli
|
||||
|
||||
@ -35,6 +41,7 @@ ENV TOOLCHAIN_INSTALL /opt
|
||||
ENV TOOLCHAIN_RELEASE 16.0.0
|
||||
ENV TOOLCHAIN_BASENAME "clang+llvm-${TOOLCHAIN_RELEASE}-cross-hexagon-unknown-linux-musl"
|
||||
ENV TOOLCHAIN_URL https://codelinaro.jfrog.io/artifactory/codelinaro-toolchain-for-hexagon/v${TOOLCHAIN_RELEASE}/${TOOLCHAIN_BASENAME}.tar.xz
|
||||
ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers"
|
||||
|
||||
RUN curl -#SL "$TOOLCHAIN_URL" | tar -xJC "$TOOLCHAIN_INSTALL"
|
||||
ENV PATH $PATH:${TOOLCHAIN_INSTALL}/${TOOLCHAIN_BASENAME}/x86_64-linux-gnu/bin
|
||||
|
@ -83,6 +83,8 @@ if __name__ == '__main__':
|
||||
gdb_cmd += " %s" % (args.gdb_args)
|
||||
# run quietly and ignore .gdbinit
|
||||
gdb_cmd += " -q -n -batch"
|
||||
# disable pagination
|
||||
gdb_cmd += " -ex 'set pagination off'"
|
||||
# disable prompts in case of crash
|
||||
gdb_cmd += " -ex 'set confirm off'"
|
||||
# connect to remote
|
||||
|
@ -1,177 +0,0 @@
|
||||
#
|
||||
# This script needs to be run on startup
|
||||
# qemu -kernel ${KERNEL} -s -S
|
||||
# and then:
|
||||
# gdb ${KERNEL}.vmlinux -x ${QEMU_SRC}/tests/guest-debug/test-gdbstub.py
|
||||
|
||||
import gdb
|
||||
|
||||
failcount = 0
|
||||
|
||||
|
||||
def report(cond, msg):
|
||||
"Report success/fail of test"
|
||||
if cond:
|
||||
print ("PASS: %s" % (msg))
|
||||
else:
|
||||
print ("FAIL: %s" % (msg))
|
||||
global failcount
|
||||
failcount += 1
|
||||
|
||||
|
||||
def check_step():
|
||||
"Step an instruction, check it moved."
|
||||
start_pc = gdb.parse_and_eval('$pc')
|
||||
gdb.execute("si")
|
||||
end_pc = gdb.parse_and_eval('$pc')
|
||||
|
||||
return not (start_pc == end_pc)
|
||||
|
||||
|
||||
def check_break(sym_name):
|
||||
"Setup breakpoint, continue and check we stopped."
|
||||
sym, ok = gdb.lookup_symbol(sym_name)
|
||||
bp = gdb.Breakpoint(sym_name)
|
||||
|
||||
gdb.execute("c")
|
||||
|
||||
# hopefully we came back
|
||||
end_pc = gdb.parse_and_eval('$pc')
|
||||
print ("%s == %s %d" % (end_pc, sym.value(), bp.hit_count))
|
||||
bp.delete()
|
||||
|
||||
# can we test we hit bp?
|
||||
return end_pc == sym.value()
|
||||
|
||||
|
||||
# We need to do hbreak manually as the python interface doesn't export it
|
||||
def check_hbreak(sym_name):
|
||||
"Setup hardware breakpoint, continue and check we stopped."
|
||||
sym, ok = gdb.lookup_symbol(sym_name)
|
||||
gdb.execute("hbreak %s" % (sym_name))
|
||||
gdb.execute("c")
|
||||
|
||||
# hopefully we came back
|
||||
end_pc = gdb.parse_and_eval('$pc')
|
||||
print ("%s == %s" % (end_pc, sym.value()))
|
||||
|
||||
if end_pc == sym.value():
|
||||
gdb.execute("d 1")
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class WatchPoint(gdb.Breakpoint):
|
||||
|
||||
def get_wpstr(self, sym_name):
|
||||
"Setup sym and wp_str for given symbol."
|
||||
self.sym, ok = gdb.lookup_symbol(sym_name)
|
||||
wp_addr = gdb.parse_and_eval(sym_name).address
|
||||
self.wp_str = '*(%(type)s)(&%(address)s)' % dict(
|
||||
type = wp_addr.type, address = sym_name)
|
||||
|
||||
return(self.wp_str)
|
||||
|
||||
def __init__(self, sym_name, type):
|
||||
wp_str = self.get_wpstr(sym_name)
|
||||
super(WatchPoint, self).__init__(wp_str, gdb.BP_WATCHPOINT, type)
|
||||
|
||||
def stop(self):
|
||||
end_pc = gdb.parse_and_eval('$pc')
|
||||
print ("HIT WP @ %s" % (end_pc))
|
||||
return True
|
||||
|
||||
|
||||
def do_one_watch(sym, wtype, text):
|
||||
|
||||
wp = WatchPoint(sym, wtype)
|
||||
gdb.execute("c")
|
||||
report_str = "%s for %s (%s)" % (text, sym, wp.sym.value())
|
||||
|
||||
if wp.hit_count > 0:
|
||||
report(True, report_str)
|
||||
wp.delete()
|
||||
else:
|
||||
report(False, report_str)
|
||||
|
||||
|
||||
def check_watches(sym_name):
|
||||
"Watch a symbol for any access."
|
||||
|
||||
# Should hit for any read
|
||||
do_one_watch(sym_name, gdb.WP_ACCESS, "awatch")
|
||||
|
||||
# Again should hit for reads
|
||||
do_one_watch(sym_name, gdb.WP_READ, "rwatch")
|
||||
|
||||
# Finally when it is written
|
||||
do_one_watch(sym_name, gdb.WP_WRITE, "watch")
|
||||
|
||||
|
||||
class CatchBreakpoint(gdb.Breakpoint):
|
||||
def __init__(self, sym_name):
|
||||
super(CatchBreakpoint, self).__init__(sym_name)
|
||||
self.sym, ok = gdb.lookup_symbol(sym_name)
|
||||
|
||||
def stop(self):
|
||||
end_pc = gdb.parse_and_eval('$pc')
|
||||
print ("CB: %s == %s" % (end_pc, self.sym.value()))
|
||||
if end_pc == self.sym.value():
|
||||
report(False, "Hit final catchpoint")
|
||||
|
||||
|
||||
def run_test():
|
||||
"Run through the tests one by one"
|
||||
|
||||
print ("Checking we can step the first few instructions")
|
||||
step_ok = 0
|
||||
for i in range(3):
|
||||
if check_step():
|
||||
step_ok += 1
|
||||
|
||||
report(step_ok == 3, "single step in boot code")
|
||||
|
||||
print ("Checking HW breakpoint works")
|
||||
break_ok = check_hbreak("kernel_init")
|
||||
report(break_ok, "hbreak @ kernel_init")
|
||||
|
||||
# Can't set this up until we are in the kernel proper
|
||||
# if we make it to run_init_process we've over-run and
|
||||
# one of the tests failed
|
||||
print ("Setup catch-all for run_init_process")
|
||||
cbp = CatchBreakpoint("run_init_process")
|
||||
cpb2 = CatchBreakpoint("try_to_run_init_process")
|
||||
|
||||
print ("Checking Normal breakpoint works")
|
||||
break_ok = check_break("wait_for_completion")
|
||||
report(break_ok, "break @ wait_for_completion")
|
||||
|
||||
print ("Checking watchpoint works")
|
||||
check_watches("system_state")
|
||||
|
||||
#
|
||||
# This runs as the script it sourced (via -x)
|
||||
#
|
||||
|
||||
try:
|
||||
print ("Connecting to remote")
|
||||
gdb.execute("target remote localhost:1234")
|
||||
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
|
||||
except:
|
||||
print ("GDB Exception: %s" % (sys.exc_info()[0]))
|
||||
failcount += 1
|
||||
import code
|
||||
code.InteractiveConsole(locals=globals()).interact()
|
||||
raise
|
||||
|
||||
# Finally kill the inferior and exit gdb with a count of failures
|
||||
gdb.execute("kill")
|
||||
exit(failcount)
|
@ -14,7 +14,7 @@ AARCH64_TESTS=fcvt pcalign-a64 lse2-fault
|
||||
fcvt: LDFLAGS+=-lm
|
||||
|
||||
run-fcvt: fcvt
|
||||
$(call run-test,$<,$(QEMU) $<, "$< on $(TARGET_NAME)")
|
||||
$(call run-test,$<,$(QEMU) $<)
|
||||
$(call diff-out,$<,$(AARCH64_SRC)/fcvt.ref)
|
||||
|
||||
config-cc.mak: Makefile
|
||||
|
@ -76,9 +76,6 @@ except (gdb.error, AttributeError):
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except:
|
||||
|
@ -66,9 +66,6 @@ except (gdb.error, AttributeError):
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except:
|
||||
|
97
tests/tcg/multiarch/gdbstub/interrupt.py
Normal file
97
tests/tcg/multiarch/gdbstub/interrupt.py
Normal file
@ -0,0 +1,97 @@
|
||||
from __future__ import print_function
|
||||
#
|
||||
# Test some of the softmmu debug features with the multiarch memory
|
||||
# test. It is a port of the original vmlinux focused test case but
|
||||
# using the "memory" test instead.
|
||||
#
|
||||
# This is launched via tests/guest-debug/run-test.py
|
||||
#
|
||||
|
||||
import gdb
|
||||
import sys
|
||||
|
||||
failcount = 0
|
||||
|
||||
|
||||
def report(cond, msg):
|
||||
"Report success/fail of test"
|
||||
if cond:
|
||||
print("PASS: %s" % (msg))
|
||||
else:
|
||||
print("FAIL: %s" % (msg))
|
||||
global failcount
|
||||
failcount += 1
|
||||
|
||||
|
||||
def check_interrupt(thread):
|
||||
"""
|
||||
Check that, if thread is resumed, we go back to the same thread when the
|
||||
program gets interrupted.
|
||||
"""
|
||||
|
||||
# Switch to the thread we're going to be running the test in.
|
||||
print("thread ", thread.num)
|
||||
gdb.execute("thr %d" % thread.num)
|
||||
|
||||
# Enter the loop() function on this thread.
|
||||
#
|
||||
# While there are cleaner ways to do this, we want to minimize the number of
|
||||
# side effects on the gdbstub's internal state, since those may mask bugs.
|
||||
# Ideally, there should be no difference between what we're doing here and
|
||||
# the program reaching the loop() function on its own.
|
||||
#
|
||||
# For this to be safe, we only need the prologue of loop() to not have
|
||||
# instructions that may have problems with what we're doing here. We don't
|
||||
# have to worry about anything else, as this function never returns.
|
||||
gdb.execute("set $pc = loop")
|
||||
|
||||
# Continue and then interrupt the task.
|
||||
gdb.post_event(lambda: gdb.execute("interrupt"))
|
||||
gdb.execute("c")
|
||||
|
||||
# Check whether the thread we're in after the interruption is the same we
|
||||
# ran continue from.
|
||||
return (thread.num == gdb.selected_thread().num)
|
||||
|
||||
|
||||
def run_test():
|
||||
"""
|
||||
Test if interrupting the code always lands us on the same thread when
|
||||
running with scheduler-lock enabled.
|
||||
"""
|
||||
|
||||
gdb.execute("set scheduler-locking on")
|
||||
for thread in gdb.selected_inferior().threads():
|
||||
report(check_interrupt(thread),
|
||||
"thread %d resumes correctly on interrupt" % thread.num)
|
||||
|
||||
|
||||
#
|
||||
# This runs as the script it sourced (via -x, via run-test.py)
|
||||
#
|
||||
try:
|
||||
inferior = gdb.selected_inferior()
|
||||
arch = inferior.architecture()
|
||||
print("ATTACHED: %s" % arch.name())
|
||||
except (gdb.error, AttributeError):
|
||||
print("SKIPPING (not connected)", file=sys.stderr)
|
||||
exit(0)
|
||||
|
||||
if gdb.parse_and_eval('$pc') == 0:
|
||||
print("SKIP: PC not set")
|
||||
exit(0)
|
||||
if len(gdb.selected_inferior().threads()) == 1:
|
||||
print("SKIP: set to run on a single thread")
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
print("GDB Exception: %s" % (sys.exc_info()[0]))
|
||||
failcount += 1
|
||||
pass
|
||||
|
||||
# Finally kill the inferior and exit gdb with a count of failures
|
||||
gdb.execute("kill")
|
||||
exit(failcount)
|
@ -115,9 +115,6 @@ if gdb.parse_and_eval('$pc') == 0:
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
|
@ -73,10 +73,6 @@ if gdb.parse_and_eval('$pc') == 0:
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
|
@ -51,10 +51,6 @@ def main():
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except gdb.error:
|
||||
|
@ -42,10 +42,6 @@ if gdb.parse_and_eval('$pc') == 0:
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
|
@ -45,10 +45,6 @@ if gdb.parse_and_eval('$pc') == 0:
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
|
@ -27,7 +27,15 @@ run-gdbstub-memory: memory
|
||||
"-monitor none -display none -chardev file$(COMMA)path=$<.out$(COMMA)id=output $(QEMU_OPTS)" \
|
||||
--bin $< --test $(MULTIARCH_SRC)/gdbstub/memory.py, \
|
||||
softmmu gdbstub support)
|
||||
|
||||
run-gdbstub-interrupt: interrupt
|
||||
$(call run-test, $@, $(GDB_SCRIPT) \
|
||||
--gdb $(HAVE_GDB_BIN) \
|
||||
--qemu $(QEMU) \
|
||||
--output $<.gdb.out \
|
||||
--qargs \
|
||||
"-smp 2 -monitor none -display none -chardev file$(COMMA)path=$<.out$(COMMA)id=output $(QEMU_OPTS)" \
|
||||
--bin $< --test $(MULTIARCH_SRC)/gdbstub/interrupt.py, \
|
||||
softmmu gdbstub support)
|
||||
run-gdbstub-untimely-packet: hello
|
||||
$(call run-test, $@, $(GDB_SCRIPT) \
|
||||
--gdb $(HAVE_GDB_BIN) \
|
||||
@ -37,10 +45,10 @@ run-gdbstub-untimely-packet: hello
|
||||
--qemu $(QEMU) \
|
||||
--bin $< --qargs \
|
||||
"-monitor none -display none -chardev file$(COMMA)path=untimely-packet.out$(COMMA)id=output $(QEMU_OPTS)", \
|
||||
"softmmu gdbstub untimely packets")
|
||||
softmmu gdbstub untimely packets)
|
||||
$(call quiet-command, \
|
||||
(! grep -Fq 'Packet instead of Ack, ignoring it' untimely-packet.gdb.err), \
|
||||
"GREP", "file untimely-packet.gdb.err")
|
||||
"GREP", file untimely-packet.gdb.err)
|
||||
else
|
||||
run-gdbstub-%:
|
||||
$(call skip-test, "gdbstub test $*", "no guest arch support")
|
||||
@ -50,4 +58,4 @@ run-gdbstub-%:
|
||||
$(call skip-test, "gdbstub test $*", "need working gdb")
|
||||
endif
|
||||
|
||||
MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-untimely-packet
|
||||
MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt run-gdbstub-untimely-packet
|
||||
|
28
tests/tcg/multiarch/system/interrupt.c
Normal file
28
tests/tcg/multiarch/system/interrupt.c
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* External interruption test. This test is structured in such a way that it
|
||||
* passes the cases that require it to exit, but we can make it enter an
|
||||
* infinite loop from GDB.
|
||||
*
|
||||
* We don't have the benefit of libc, just builtin C primitives and
|
||||
* whatever is in minilib.
|
||||
*/
|
||||
|
||||
#include <minilib.h>
|
||||
|
||||
void loop(void)
|
||||
{
|
||||
do {
|
||||
/*
|
||||
* Loop forever. Just make sure the condition is always a constant
|
||||
* expression, so that this loop is not UB, as per the C
|
||||
* standard.
|
||||
*/
|
||||
} while (1);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -61,10 +61,6 @@ if gdb.parse_and_eval("$pc") == 0:
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except (gdb.error):
|
||||
|
@ -49,10 +49,6 @@ def main():
|
||||
exit(0)
|
||||
|
||||
try:
|
||||
# These are not very useful in scripts
|
||||
gdb.execute("set pagination off")
|
||||
gdb.execute("set confirm off")
|
||||
|
||||
# Run the actual tests
|
||||
run_test()
|
||||
except gdb.error:
|
||||
|
Loading…
Reference in New Issue
Block a user