Testing, gdbstub and plugin updates:

- fix some test/tcg license headers to GPLv2+
   - bump up check-tcg timeout to 120s
   - avoid re-building VM images too often
   - update OpenBSD to 7.4
   - use GDBFeature to build gdbstub XML
   - unify plugin vcpu count under qemu_plugin_num_vcpus
   - avoid spurious idle/resume callbacks on new vCPUs
   - ensure nios2-linux-user processes async work
   - call vcpu_init plugin callback through async work
   - define plugin helpers when registers being read
   - add plugin API for reading register values
   - add support for register tracking to execlog
   - update plugin docs with assumptions
   - mention plugins can trigger tb_flush in mttcg design doc
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXfAv0ACgkQ+9DbCVqe
 KkQyogf/X6T5lWsdZGb22FOYzaTLf5gfCPXArIVN+GsjSae3dU6qy/qVM1VRJQPw
 mH8kvMY7QO5V9M2tL33WtZZg6hqWypXYU+Hit6sMmveKYMKS9ESEX28x3yybgt8Y
 fyDywNODX7bs8Wb6NQjVkZvTmM2llrHEtQXPffaXaPyxOAzlGTV9Mf3Sop9rk4nG
 8IchzLmOOQ7XnVst/KRyq+29oOYsbyUtj13tNeWBZ5iXFDT6Q/nGwPQ12U2Ztn9N
 FZvyzGG707dFaEDxIr4pl7n+lHJto29LMlSXlocANwG6wFNP3nfkSw/dXw3nkZZK
 pOfrQKvnnunJKBd7495LYZxTDe505Q==
 =/k97
 -----END PGP SIGNATURE-----

Merge tag 'pull-maintainer-updates-280224-1' of https://gitlab.com/stsquad/qemu into staging

Testing, gdbstub and plugin updates:

  - fix some test/tcg license headers to GPLv2+
  - bump up check-tcg timeout to 120s
  - avoid re-building VM images too often
  - update OpenBSD to 7.4
  - use GDBFeature to build gdbstub XML
  - unify plugin vcpu count under qemu_plugin_num_vcpus
  - avoid spurious idle/resume callbacks on new vCPUs
  - ensure nios2-linux-user processes async work
  - call vcpu_init plugin callback through async work
  - define plugin helpers when registers being read
  - add plugin API for reading register values
  - add support for register tracking to execlog
  - update plugin docs with assumptions
  - mention plugins can trigger tb_flush in mttcg design doc

# -----BEGIN PGP SIGNATURE-----
#
# iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmXfAv0ACgkQ+9DbCVqe
# KkQyogf/X6T5lWsdZGb22FOYzaTLf5gfCPXArIVN+GsjSae3dU6qy/qVM1VRJQPw
# mH8kvMY7QO5V9M2tL33WtZZg6hqWypXYU+Hit6sMmveKYMKS9ESEX28x3yybgt8Y
# fyDywNODX7bs8Wb6NQjVkZvTmM2llrHEtQXPffaXaPyxOAzlGTV9Mf3Sop9rk4nG
# 8IchzLmOOQ7XnVst/KRyq+29oOYsbyUtj13tNeWBZ5iXFDT6Q/nGwPQ12U2Ztn9N
# FZvyzGG707dFaEDxIr4pl7n+lHJto29LMlSXlocANwG6wFNP3nfkSw/dXw3nkZZK
# pOfrQKvnnunJKBd7495LYZxTDe505Q==
# =/k97
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 28 Feb 2024 09:55:09 GMT
# gpg:                using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full]
# Primary key fingerprint: 6685 AE99 E751 67BC AFC8  DF35 FBD0 DB09 5A9E 2A44

* tag 'pull-maintainer-updates-280224-1' of https://gitlab.com/stsquad/qemu: (29 commits)
  docs/devel: plugins can trigger a tb flush
  docs/devel: document some plugin assumptions
  docs/devel: lift example and plugin API sections up
  contrib/plugins: extend execlog to track register changes
  contrib/plugins: fix imatch
  tests/tcg: expand insn test case to exercise register API
  plugins: add an API to read registers
  plugins: create CPUPluginState and migrate plugin_mask
  gdbstub: expose api to find registers
  plugins: Use different helpers when reading registers
  cpu: call plugin init hook asynchronously
  linux-user: ensure nios2 processes queued work
  plugins: fix order of init/idle/resume callback
  plugins: add qemu_plugin_num_vcpus function
  plugins: remove previous n_vcpus functions from API
  gdbstub: Add members to identify registers to GDBFeature
  hw/core/cpu: Remove gdb_get_dynamic_xml member
  gdbstub: Infer number of core registers from XML
  gdbstub: Simplify XML lookup
  gdbstub: Change gdb_get_reg_cb and gdb_set_reg_cb
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-02-28 14:23:07 +00:00
commit d316f1b146
65 changed files with 1237 additions and 661 deletions

View File

@ -43,6 +43,7 @@
* CPU's index into a TCG temp, since the first callback did it already.
*/
#include "qemu/osdep.h"
#include "qemu/plugin.h"
#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
@ -79,6 +80,7 @@ enum plugin_gen_from {
enum plugin_gen_cb {
PLUGIN_GEN_CB_UDATA,
PLUGIN_GEN_CB_UDATA_R,
PLUGIN_GEN_CB_INLINE,
PLUGIN_GEN_CB_MEM,
PLUGIN_GEN_ENABLE_MEM_HELPER,
@ -90,7 +92,10 @@ enum plugin_gen_cb {
* These helpers are stubs that get dynamically switched out for calls
* direct to the plugin if they are subscribed to.
*/
void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
{ }
void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
{ }
void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
@ -98,7 +103,7 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
static void gen_empty_udata_cb(void)
static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
@ -106,12 +111,22 @@ static void gen_empty_udata_cb(void)
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
gen_helper(cpu_index, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb_no_wg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
}
static void gen_empty_udata_cb_no_rwg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
}
/*
* For now we only support addi_i64.
* When we support more ops, we can generate one empty inline cb for each.
@ -192,7 +207,8 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
gen_empty_mem_helper);
/* fall through */
case PLUGIN_GEN_FROM_TB:
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
break;
default:
@ -588,6 +604,12 @@ static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
}
static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op)
{
inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op);
}
static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op)
{
@ -602,6 +624,14 @@ static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
}
static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
}
static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
@ -721,6 +751,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_tb_udata(plugin_tb, op);
break;
case PLUGIN_GEN_CB_UDATA_R:
plugin_gen_tb_udata_r(plugin_tb, op);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_tb_inline(plugin_tb, op);
break;
@ -737,6 +770,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_UDATA_R:
plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(plugin_tb, op, insn_idx);
break;
@ -796,7 +832,7 @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
{
bool ret = false;
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) {
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
int i;

View File

@ -1,4 +1,5 @@
#ifdef CONFIG_PLUGIN
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_wg, TCG_CALL_NO_WG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_rwg, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
#endif

View File

@ -767,7 +767,7 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
policy = LRU;
cores = sys ? qemu_plugin_n_vcpus() : 1;
cores = sys ? info->system.smp_vcpus : 1;
for (i = 0; i < argc; i++) {
char *opt = argv[i];

View File

@ -1,7 +1,7 @@
/*
* Copyright (C) 2021, Alexandre Iooss <erdnaxe@crans.org>
*
* Log instruction execution with memory access.
* Log instruction execution with memory access and register changes
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
@ -15,29 +15,40 @@
#include <qemu-plugin.h>
typedef struct {
struct qemu_plugin_register *handle;
GByteArray *last;
GByteArray *new;
const char *name;
} Register;
typedef struct CPU {
/* Store last executed instruction on each vCPU as a GString */
GString *last_exec;
/* Ptr array of Register */
GPtrArray *registers;
} CPU;
QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
/* Store last executed instruction on each vCPU as a GString */
static GPtrArray *last_exec;
static GArray *cpus;
static GRWLock expand_array_lock;
static GPtrArray *imatches;
static GArray *amatches;
static GPtrArray *rmatches;
static bool disas_assist;
static GMutex add_reg_name_lock;
static GPtrArray *all_reg_names;
/*
* Expand last_exec array.
*
* As we could have multiple threads trying to do this we need to
* serialise the expansion under a lock.
*/
static void expand_last_exec(int cpu_index)
static CPU *get_cpu(int vcpu_index)
{
g_rw_lock_writer_lock(&expand_array_lock);
while (cpu_index >= last_exec->len) {
GString *s = g_string_new(NULL);
g_ptr_array_add(last_exec, s);
}
g_rw_lock_writer_unlock(&expand_array_lock);
CPU *c;
g_rw_lock_reader_lock(&expand_array_lock);
c = &g_array_index(cpus, CPU, vcpu_index);
g_rw_lock_reader_unlock(&expand_array_lock);
return c;
}
/**
@ -46,13 +57,10 @@ static void expand_last_exec(int cpu_index)
static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info,
uint64_t vaddr, void *udata)
{
GString *s;
CPU *c = get_cpu(cpu_index);
GString *s = c->last_exec;
/* Find vCPU in array */
g_rw_lock_reader_lock(&expand_array_lock);
g_assert(cpu_index < last_exec->len);
s = g_ptr_array_index(last_exec, cpu_index);
g_rw_lock_reader_unlock(&expand_array_lock);
/* Indicate type of memory access */
if (qemu_plugin_mem_is_store(info)) {
@ -73,32 +81,91 @@ static void vcpu_mem(unsigned int cpu_index, qemu_plugin_meminfo_t info,
}
/**
* Log instruction execution
* Log instruction execution, outputting the last one.
*
* vcpu_insn_exec() is a copy and paste of vcpu_insn_exec_with_regs()
* without the checking of register values when we've attempted to
* optimise with disas_assist.
*/
static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
static void insn_check_regs(CPU *cpu)
{
GString *s;
for (int n = 0; n < cpu->registers->len; n++) {
Register *reg = cpu->registers->pdata[n];
int sz;
/* Find or create vCPU in array */
g_rw_lock_reader_lock(&expand_array_lock);
if (cpu_index >= last_exec->len) {
g_rw_lock_reader_unlock(&expand_array_lock);
expand_last_exec(cpu_index);
g_rw_lock_reader_lock(&expand_array_lock);
g_byte_array_set_size(reg->new, 0);
sz = qemu_plugin_read_register(reg->handle, reg->new);
g_assert(sz == reg->last->len);
if (memcmp(reg->last->data, reg->new->data, sz)) {
GByteArray *temp = reg->last;
g_string_append_printf(cpu->last_exec, ", %s -> 0x", reg->name);
/* TODO: handle BE properly */
for (int i = sz; i >= 0; i--) {
g_string_append_printf(cpu->last_exec, "%02x",
reg->new->data[i]);
}
reg->last = reg->new;
reg->new = temp;
}
}
s = g_ptr_array_index(last_exec, cpu_index);
g_rw_lock_reader_unlock(&expand_array_lock);
}
/* Log last instruction while checking registers */
static void vcpu_insn_exec_with_regs(unsigned int cpu_index, void *udata)
{
CPU *cpu = get_cpu(cpu_index);
/* Print previous instruction in cache */
if (s->len) {
qemu_plugin_outs(s->str);
if (cpu->last_exec->len) {
if (cpu->registers) {
insn_check_regs(cpu);
}
qemu_plugin_outs(cpu->last_exec->str);
qemu_plugin_outs("\n");
}
/* Store new instruction in cache */
/* vcpu_mem will add memory access information to last_exec */
g_string_printf(s, "%u, ", cpu_index);
g_string_append(s, (char *)udata);
g_string_printf(cpu->last_exec, "%u, ", cpu_index);
g_string_append(cpu->last_exec, (char *)udata);
}
/* Log last instruction while checking registers, ignore next */
static void vcpu_insn_exec_only_regs(unsigned int cpu_index, void *udata)
{
CPU *cpu = get_cpu(cpu_index);
/* Print previous instruction in cache */
if (cpu->last_exec->len) {
if (cpu->registers) {
insn_check_regs(cpu);
}
qemu_plugin_outs(cpu->last_exec->str);
qemu_plugin_outs("\n");
}
/* reset */
cpu->last_exec->len = 0;
}
/* Log last instruction without checking regs, setup next */
static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
{
CPU *cpu = get_cpu(cpu_index);
/* Print previous instruction in cache */
if (cpu->last_exec->len) {
qemu_plugin_outs(cpu->last_exec->str);
qemu_plugin_outs("\n");
}
/* Store new instruction in cache */
/* vcpu_mem will add memory access information to last_exec */
g_string_printf(cpu->last_exec, "%u, ", cpu_index);
g_string_append(cpu->last_exec, (char *)udata);
}
/**
@ -111,6 +178,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{
struct qemu_plugin_insn *insn;
bool skip = (imatches || amatches);
bool check_regs_this = rmatches;
bool check_regs_next = false;
size_t n = qemu_plugin_tb_n_insns(tb);
for (size_t i = 0; i < n; i++) {
@ -131,7 +200,8 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
/*
* If we are filtering we better check out if we have any
* hits. The skip "latches" so we can track memory accesses
* after the instruction we care about.
* after the instruction we care about. Also enable register
* checking on the next instruction.
*/
if (skip && imatches) {
int j;
@ -139,6 +209,7 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
char *m = g_ptr_array_index(imatches, j);
if (g_str_has_prefix(insn_disas, m)) {
skip = false;
check_regs_next = rmatches;
}
}
}
@ -153,8 +224,39 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
}
}
/*
* Check the disassembly to see if a register we care about
* will be affected by this instruction. This relies on the
* dissembler doing something sensible for the registers we
* care about.
*/
if (disas_assist && rmatches) {
check_regs_next = false;
gchar *args = g_strstr_len(insn_disas, -1, " ");
for (int n = 0; n < all_reg_names->len; n++) {
gchar *reg = g_ptr_array_index(all_reg_names, n);
if (g_strrstr(args, reg)) {
check_regs_next = true;
skip = false;
}
}
}
/*
* We now have 3 choices:
*
* - Log insn
* - Log insn while checking registers
* - Don't log this insn but check if last insn changed registers
*/
if (skip) {
g_free(insn_disas);
if (check_regs_this) {
qemu_plugin_register_vcpu_insn_exec_cb(insn,
vcpu_insn_exec_only_regs,
QEMU_PLUGIN_CB_R_REGS,
NULL);
}
} else {
uint32_t insn_opcode;
insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
@ -167,30 +269,124 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
QEMU_PLUGIN_MEM_RW, NULL);
/* Register callback on instruction */
qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
QEMU_PLUGIN_CB_NO_REGS, output);
if (check_regs_this) {
qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec_with_regs,
QEMU_PLUGIN_CB_R_REGS,
output);
} else {
qemu_plugin_register_vcpu_insn_exec_cb(
insn, vcpu_insn_exec,
QEMU_PLUGIN_CB_NO_REGS,
output);
}
/* reset skip */
skip = (imatches || amatches);
}
/* set regs for next */
if (disas_assist && rmatches) {
check_regs_this = check_regs_next;
}
g_free(insn_disas);
}
}
static Register *init_vcpu_register(qemu_plugin_reg_descriptor *desc)
{
Register *reg = g_new0(Register, 1);
g_autofree gchar *lower = g_utf8_strdown(desc->name, -1);
int r;
reg->handle = desc->handle;
reg->name = g_intern_string(lower);
reg->last = g_byte_array_new();
reg->new = g_byte_array_new();
/* read the initial value */
r = qemu_plugin_read_register(reg->handle, reg->last);
g_assert(r > 0);
return reg;
}
static GPtrArray *registers_init(int vcpu_index)
{
g_autoptr(GPtrArray) registers = g_ptr_array_new();
g_autoptr(GArray) reg_list = qemu_plugin_get_registers();
if (rmatches && reg_list->len) {
/*
* Go through each register in the complete list and
* see if we want to track it.
*/
for (int r = 0; r < reg_list->len; r++) {
qemu_plugin_reg_descriptor *rd = &g_array_index(
reg_list, qemu_plugin_reg_descriptor, r);
for (int p = 0; p < rmatches->len; p++) {
g_autoptr(GPatternSpec) pat = g_pattern_spec_new(rmatches->pdata[p]);
g_autofree gchar *rd_lower = g_utf8_strdown(rd->name, -1);
if (g_pattern_match_string(pat, rd->name) ||
g_pattern_match_string(pat, rd_lower)) {
Register *reg = init_vcpu_register(rd);
g_ptr_array_add(registers, reg);
/* we need a list of regnames at TB translation time */
if (disas_assist) {
g_mutex_lock(&add_reg_name_lock);
if (!g_ptr_array_find(all_reg_names, reg->name, NULL)) {
g_ptr_array_add(all_reg_names, reg->name);
}
g_mutex_unlock(&add_reg_name_lock);
}
}
}
}
}
return registers->len ? g_steal_pointer(&registers) : NULL;
}
/*
* Initialise a new vcpu/thread with:
* - last_exec tracking data
* - list of tracked registers
* - initial value of registers
*
* As we could have multiple threads trying to do this we need to
* serialise the expansion under a lock.
*/
static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
{
CPU *c;
g_rw_lock_writer_lock(&expand_array_lock);
if (vcpu_index >= cpus->len) {
g_array_set_size(cpus, vcpu_index + 1);
}
g_rw_lock_writer_unlock(&expand_array_lock);
c = get_cpu(vcpu_index);
c->last_exec = g_string_new(NULL);
c->registers = registers_init(vcpu_index);
}
/**
* On plugin exit, print last instruction in cache
*/
static void plugin_exit(qemu_plugin_id_t id, void *p)
{
guint i;
GString *s;
for (i = 0; i < last_exec->len; i++) {
s = g_ptr_array_index(last_exec, i);
if (s->str) {
qemu_plugin_outs(s->str);
g_rw_lock_reader_lock(&expand_array_lock);
for (i = 0; i < cpus->len; i++) {
CPU *c = get_cpu(i);
if (c->last_exec && c->last_exec->str) {
qemu_plugin_outs(c->last_exec->str);
qemu_plugin_outs("\n");
}
}
g_rw_lock_reader_unlock(&expand_array_lock);
}
/* Add a match to the array of matches */
@ -199,7 +395,7 @@ static void parse_insn_match(char *match)
if (!imatches) {
imatches = g_ptr_array_new();
}
g_ptr_array_add(imatches, match);
g_ptr_array_add(imatches, g_strdup(match));
}
static void parse_vaddr_match(char *match)
@ -212,6 +408,18 @@ static void parse_vaddr_match(char *match)
g_array_append_val(amatches, v);
}
/*
* We have to wait until vCPUs are started before we can check the
* patterns find anything.
*/
static void add_regpat(char *regpat)
{
if (!rmatches) {
rmatches = g_ptr_array_new();
}
g_ptr_array_add(rmatches, g_strdup(regpat));
}
/**
* Install the plugin
*/
@ -223,11 +431,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
* Initialize dynamic array to cache vCPU instruction. In user mode
* we don't know the size before emulation.
*/
if (info->system_emulation) {
last_exec = g_ptr_array_sized_new(info->system.max_vcpus);
} else {
last_exec = g_ptr_array_new();
}
cpus = g_array_sized_new(true, true, sizeof(CPU),
info->system_emulation ? info->system.max_vcpus : 1);
for (int i = 0; i < argc; i++) {
char *opt = argv[i];
@ -236,13 +441,22 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
parse_insn_match(tokens[1]);
} else if (g_strcmp0(tokens[0], "afilter") == 0) {
parse_vaddr_match(tokens[1]);
} else if (g_strcmp0(tokens[0], "reg") == 0) {
add_regpat(tokens[1]);
} else if (g_strcmp0(tokens[0], "rdisas") == 0) {
if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &disas_assist)) {
fprintf(stderr, "boolean argument parsing failed: %s\n", opt);
return -1;
}
all_reg_names = g_ptr_array_new();
} else {
fprintf(stderr, "option parsing failed: %s\n", opt);
return -1;
}
}
/* Register translation block and exit callbacks */
/* Register init, translation block and exit callbacks */
qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);

View File

@ -109,6 +109,7 @@ including:
- debugging operations (breakpoint insertion/removal)
- some CPU helper functions
- linux-user spawning its first thread
- operations related to TCG Plugins
This is done with the async_safe_run_on_cpu() mechanism to ensure all
vCPUs are quiescent when changes are being made to shared global

View File

@ -112,6 +112,55 @@ details are opaque to plugins. The plugin is able to query select
details of instructions and system configuration only through the
exported *qemu_plugin* functions.
However the following assumptions can be made:
Translation Blocks
++++++++++++++++++
All code will go through a translation phase although not all
translations will be necessarily be executed. You need to instrument
actual executions to track what is happening.
It is quite normal to see the same address translated multiple times.
If you want to track the code in system emulation you should examine
the underlying physical address (``qemu_plugin_insn_haddr``) to take
into account the effects of virtual memory although if the system does
paging this will change too.
Not all instructions in a block will always execute so if its
important to track individual instruction execution you need to
instrument them directly. However asynchronous interrupts will not
change control flow mid-block.
Instructions
++++++++++++
Instruction instrumentation runs before the instruction executes. You
can be can be sure the instruction will be dispatched, but you can't
be sure it will complete. Generally this will be because of a
synchronous exception (e.g. SIGILL) triggered by the instruction
attempting to execute. If you want to be sure you will need to
instrument the next instruction as well. See the ``execlog.c`` plugin
for examples of how to track this and finalise details after execution.
Memory Accesses
+++++++++++++++
Memory callbacks are called after a successful load or store.
Unsuccessful operations (i.e. faults) will not be visible to memory
instrumentation although the execution side effects can be observed
(e.g. entering a exception handler).
System Idle and Resume States
+++++++++++++++++++++++++++++
The ``qemu_plugin_register_vcpu_idle_cb`` and
``qemu_plugin_register_vcpu_resume_cb`` functions can be used to track
when CPUs go into and return from sleep states when waiting for
external I/O. Be aware though that these may occur less frequently
than in real HW due to the inefficiencies of emulation giving less
chance for the CPU to idle.
Internals
---------
@ -143,7 +192,7 @@ requested. The plugin isn't completely uninstalled until the safe work
has executed while all vCPUs are quiescent.
Example Plugins
---------------
===============
There are a number of plugins included with QEMU and you are
encouraged to contribute your own plugins plugins upstream. There is a
@ -497,6 +546,22 @@ arguments if required::
$ qemu-system-arm $(QEMU_ARGS) \
-plugin ./contrib/plugins/libexeclog.so,ifilter=st1w,afilter=0x40001808 -d plugin
This plugin can also dump registers when they change value. Specify the name of the
registers with multiple ``reg`` options. You can also use glob style matching if you wish::
$ qemu-system-arm $(QEMU_ARGS) \
-plugin ./contrib/plugins/libexeclog.so,reg=\*_el2,reg=sp -d plugin
Be aware that each additional register to check will slow down
execution quite considerably. You can optimise the number of register
checks done by using the rdisas option. This will only instrument
instructions that mention the registers in question in disassembly.
This is not foolproof as some instructions implicitly change
instructions. You can use the ifilter to catch these cases:
$ qemu-system-arm $(QEMU_ARGS) \
-plugin ./contrib/plugins/libexeclog.so,ifilter=msr,ifilter=blr,reg=x30,reg=\*_el1,rdisas=on
- contrib/plugins/cache.c
Cache modelling plugin that measures the performance of a given L1 cache
@ -575,12 +640,11 @@ The plugin has a number of arguments, all of them are optional:
configuration arguments implies ``l2=on``.
(default: N = 2097152 (2MB), B = 64, A = 16)
API
---
Plugin API
==========
The following API is generated from the inline documentation in
``include/qemu/qemu-plugin.h``. Please ensure any updates to the API
include the full kernel-doc annotations.
.. kernel-doc:: include/qemu/qemu-plugin.h

View File

@ -47,10 +47,9 @@
typedef struct GDBRegisterState {
int base_reg;
int num_regs;
gdb_get_reg_cb get_reg;
gdb_set_reg_cb set_reg;
const char *xml;
const GDBFeature *feature;
} GDBRegisterState;
GDBState gdbserver_state;
@ -353,6 +352,7 @@ static const char *get_feature_xml(const char *p, const char **newp,
{
CPUState *cpu = gdb_get_first_cpu_in_process(process);
CPUClass *cc = CPU_GET_CLASS(cpu);
GDBRegisterState *r;
size_t len;
/*
@ -366,7 +366,6 @@ static const char *get_feature_xml(const char *p, const char **newp,
/* Is it the main target xml? */
if (strncmp(p, "target.xml", len) == 0) {
if (!process->target_xml) {
GDBRegisterState *r;
g_autoptr(GPtrArray) xml = g_ptr_array_new_with_free_func(g_free);
g_ptr_array_add(
@ -381,18 +380,12 @@ static const char *get_feature_xml(const char *p, const char **newp,
g_markup_printf_escaped("<architecture>%s</architecture>",
cc->gdb_arch_name(cpu)));
}
g_ptr_array_add(
xml,
g_markup_printf_escaped("<xi:include href=\"%s\"/>",
cc->gdb_core_xml_file));
if (cpu->gdb_regs) {
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
g_ptr_array_add(
xml,
g_markup_printf_escaped("<xi:include href=\"%s\"/>",
r->xml));
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
g_ptr_array_add(
xml,
g_markup_printf_escaped("<xi:include href=\"%s\"/>",
r->feature->xmlname));
}
g_ptr_array_add(xml, g_strdup("</target>"));
g_ptr_array_add(xml, NULL);
@ -401,20 +394,11 @@ static const char *get_feature_xml(const char *p, const char **newp,
}
return process->target_xml;
}
/* Is it dynamically generated by the target? */
if (cc->gdb_get_dynamic_xml) {
g_autofree char *xmlname = g_strndup(p, len);
const char *xml = cc->gdb_get_dynamic_xml(cpu, xmlname);
if (xml) {
return xml;
}
}
/* Is it one of the encoded gdb-xml/ files? */
for (int i = 0; gdb_static_features[i].xmlname; i++) {
const char *name = gdb_static_features[i].xmlname;
if ((strncmp(name, p, len) == 0) &&
strlen(name) == len) {
return gdb_static_features[i].xml;
/* Is it one of the features? */
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (strncmp(p, r->feature->xmlname, len) == 0) {
return r->feature->xml;
}
}
@ -435,9 +419,10 @@ void gdb_feature_builder_init(GDBFeatureBuilder *builder, GDBFeature *feature,
builder->feature = feature;
builder->xml = g_ptr_array_new();
g_ptr_array_add(builder->xml, header);
builder->regs = g_ptr_array_new();
builder->base_reg = base_reg;
feature->xmlname = xmlname;
feature->num_regs = 0;
feature->name = name;
}
void gdb_feature_builder_append_tag(const GDBFeatureBuilder *builder,
@ -456,10 +441,12 @@ void gdb_feature_builder_append_reg(const GDBFeatureBuilder *builder,
const char *type,
const char *group)
{
if (builder->feature->num_regs < regnum) {
builder->feature->num_regs = regnum;
if (builder->regs->len <= regnum) {
g_ptr_array_set_size(builder->regs, regnum + 1);
}
builder->regs->pdata[regnum] = (gpointer *)name;
if (group) {
gdb_feature_builder_append_tag(
builder,
@ -485,6 +472,9 @@ void gdb_feature_builder_end(const GDBFeatureBuilder *builder)
}
g_ptr_array_free(builder->xml, TRUE);
builder->feature->num_regs = builder->regs->len;
builder->feature->regs = (void *)g_ptr_array_free(builder->regs, FALSE);
}
const GDBFeature *gdb_find_static_feature(const char *xmlname)
@ -500,22 +490,44 @@ const GDBFeature *gdb_find_static_feature(const char *xmlname)
g_assert_not_reached();
}
static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
GArray *gdb_get_register_list(CPUState *cpu)
{
GArray *results = g_array_new(true, true, sizeof(GDBRegDesc));
/* registers are only available once the CPU is initialised */
if (!cpu->gdb_regs) {
return results;
}
for (int f = 0; f < cpu->gdb_regs->len; f++) {
GDBRegisterState *r = &g_array_index(cpu->gdb_regs, GDBRegisterState, f);
for (int i = 0; i < r->feature->num_regs; i++) {
const char *name = r->feature->regs[i];
GDBRegDesc desc = {
r->base_reg + i,
name,
r->feature->name
};
g_array_append_val(results, desc);
}
}
return results;
}
int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) {
return cc->gdb_read_register(cpu, buf, reg);
}
if (cpu->gdb_regs) {
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
return r->get_reg(env, buf, reg - r->base_reg);
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (r->base_reg <= reg && reg < r->base_reg + r->feature->num_regs) {
return r->get_reg(cpu, buf, reg - r->base_reg);
}
}
return 0;
@ -524,58 +536,79 @@ static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu_env(cpu);
GDBRegisterState *r;
if (reg < cc->gdb_num_core_regs) {
return cc->gdb_write_register(cpu, mem_buf, reg);
}
if (cpu->gdb_regs) {
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
return r->set_reg(env, mem_buf, reg - r->base_reg);
}
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (r->base_reg <= reg && reg < r->base_reg + r->feature->num_regs) {
return r->set_reg(cpu, mem_buf, reg - r->base_reg);
}
}
return 0;
}
static void gdb_register_feature(CPUState *cpu, int base_reg,
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
const GDBFeature *feature)
{
GDBRegisterState s = {
.base_reg = base_reg,
.get_reg = get_reg,
.set_reg = set_reg,
.feature = feature
};
g_array_append_val(cpu->gdb_regs, s);
}
void gdb_init_cpu(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
const GDBFeature *feature;
cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState));
if (cc->gdb_core_xml_file) {
feature = gdb_find_static_feature(cc->gdb_core_xml_file);
gdb_register_feature(cpu, 0,
cc->gdb_read_register, cc->gdb_write_register,
feature);
cpu->gdb_num_regs = cpu->gdb_num_g_regs = feature->num_regs;
}
if (cc->gdb_num_core_regs) {
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
}
}
void gdb_register_coprocessor(CPUState *cpu,
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
int num_regs, const char *xml, int g_pos)
const GDBFeature *feature, int g_pos)
{
GDBRegisterState *s;
guint i;
int base_reg = cpu->gdb_num_regs;
if (cpu->gdb_regs) {
for (i = 0; i < cpu->gdb_regs->len; i++) {
/* Check for duplicates. */
s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (strcmp(s->xml, xml) == 0) {
return;
}
for (i = 0; i < cpu->gdb_regs->len; i++) {
/* Check for duplicates. */
s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
if (s->feature == feature) {
return;
}
} else {
cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState));
i = 0;
}
g_array_set_size(cpu->gdb_regs, i + 1);
s = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
s->base_reg = cpu->gdb_num_regs;
s->num_regs = num_regs;
s->get_reg = get_reg;
s->set_reg = set_reg;
s->xml = xml;
gdb_register_feature(cpu, base_reg, get_reg, set_reg, feature);
/* Add to end of list. */
cpu->gdb_num_regs += num_regs;
cpu->gdb_num_regs += feature->num_regs;
if (g_pos) {
if (g_pos != s->base_reg) {
if (g_pos != base_reg) {
error_report("Error: Bad gdb register numbering for '%s', "
"expected %d got %d", xml, g_pos, s->base_reg);
"expected %d got %d", feature->xml, g_pos, base_reg);
} else {
cpu->gdb_num_g_regs = cpu->gdb_num_regs;
}

View File

@ -27,6 +27,7 @@
#include "qemu/main-loop.h"
#include "exec/log.h"
#include "exec/cpu-common.h"
#include "exec/gdbstub.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
#include "sysemu/tcg.h"
@ -193,6 +194,13 @@ static void cpu_common_parse_features(const char *typename, char *features,
}
}
#ifdef CONFIG_PLUGIN
static void qemu_plugin_vcpu_init__async(CPUState *cpu, run_on_cpu_data unused)
{
qemu_plugin_vcpu_init_hook(cpu);
}
#endif
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
@ -216,10 +224,13 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp)
cpu_resume(cpu);
}
/* Plugin initialization must wait until the cpu is fully realized. */
/* Plugin initialization must wait until the cpu start executing code */
#ifdef CONFIG_PLUGIN
if (tcg_enabled()) {
qemu_plugin_vcpu_init_hook(cpu);
cpu->plugin_state = qemu_plugin_create_vcpu_state();
async_run_on_cpu(cpu, qemu_plugin_vcpu_init__async, RUN_ON_CPU_NULL);
}
#endif
/* NOTE: latest generic point where the cpu is fully realized */
}
@ -240,11 +251,10 @@ static void cpu_common_unrealizefn(DeviceState *dev)
static void cpu_common_initfn(Object *obj)
{
CPUState *cpu = CPU(obj);
CPUClass *cc = CPU_GET_CLASS(obj);
gdb_init_cpu(cpu);
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
/* user-mode doesn't have configurable SMP topology */
/* the default value is changed by qemu_init_vcpu() for system-mode */
cpu->nr_cores = 1;
@ -264,6 +274,7 @@ static void cpu_common_finalize(Object *obj)
{
CPUState *cpu = CPU(obj);
g_array_free(cpu->gdb_regs, TRUE);
qemu_lockcnt_destroy(&cpu->in_ioctl_lock);
qemu_mutex_destroy(&cpu->work_mutex);
}

View File

@ -13,19 +13,28 @@
typedef struct GDBFeature {
const char *xmlname;
const char *xml;
const char *name;
const char * const *regs;
int num_regs;
} GDBFeature;
typedef struct GDBFeatureBuilder {
GDBFeature *feature;
GPtrArray *xml;
GPtrArray *regs;
int base_reg;
} GDBFeatureBuilder;
/* Get or set a register. Returns the size of the register. */
typedef int (*gdb_get_reg_cb)(CPUArchState *env, GByteArray *buf, int reg);
typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
typedef int (*gdb_get_reg_cb)(CPUState *cpu, GByteArray *buf, int reg);
typedef int (*gdb_set_reg_cb)(CPUState *cpu, uint8_t *buf, int reg);
/**
* gdb_init_cpu(): Initialize the CPU for gdbstub.
* @cpu: The CPU to be initialized.
*/
void gdb_init_cpu(CPUState *cpu);
/**
* gdb_register_coprocessor() - register a supplemental set of registers
@ -38,7 +47,7 @@ typedef int (*gdb_set_reg_cb)(CPUArchState *env, uint8_t *buf, int reg);
*/
void gdb_register_coprocessor(CPUState *cpu,
gdb_get_reg_cb get_reg, gdb_set_reg_cb set_reg,
int num_regs, const char *xml, int g_pos);
const GDBFeature *feature, int g_pos);
/**
* gdbserver_start: start the gdb server
@ -102,6 +111,34 @@ void gdb_feature_builder_end(const GDBFeatureBuilder *builder);
*/
const GDBFeature *gdb_find_static_feature(const char *xmlname);
/**
* gdb_read_register() - Read a register associated with a CPU.
* @cpu: The CPU associated with the register.
* @buf: The buffer that the read register will be appended to.
* @reg: The register's number returned by gdb_find_feature_register().
*
* Return: The number of read bytes.
*/
int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
/**
* typedef GDBRegDesc - a register description from gdbstub
*/
typedef struct {
int gdb_reg;
const char *name;
const char *feature_name;
} GDBRegDesc;
/**
* gdb_get_register_list() - Return list of all registers for CPU
* @cpu: The CPU being searched
*
* Returns a GArray of GDBRegDesc, caller frees array but not the
* const strings.
*/
GArray *gdb_get_register_list(CPUState *cpu);
void gdb_set_stop_cpu(CPUState *cpu);
/* in gdbstub-xml.c, generated by scripts/feature_to_c.py */

View File

@ -31,7 +31,6 @@
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
#include "qemu/plugin-event.h"
#include "qom/object.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
@ -126,15 +125,13 @@ struct SysemuCPUOps;
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
* breakpoint. Used by AVR to handle a gdb mis-feature with
* its Harvard architecture split code and data.
* @gdb_num_core_regs: Number of core registers accessible to GDB.
* @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer
* from @gdb_core_xml_file.
* @gdb_core_xml_file: File name for core registers GDB XML description.
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
* before the insn which triggers a watchpoint rather than after it.
* @gdb_arch_name: Optional callback that returns the architecture name known
* to GDB. The caller must free the returned string with g_free.
* @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
* gdb stub. Returns a pointer to the XML contents for the specified XML file
* or NULL if the CPU doesn't have a dynamically generated content for it.
* @disas_set_info: Setup architecture specific components of disassembly info
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
* address before attempting to match it against watchpoints.
@ -166,7 +163,6 @@ struct CPUClass {
const char *gdb_core_xml_file;
const gchar * (*gdb_arch_name)(CPUState *cpu);
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
@ -437,7 +433,8 @@ struct qemu_work_item;
* @kvm_fd: vCPU file descriptor for KVM.
* @work_mutex: Lock to prevent multiple access to @work_list.
* @work_list: List of pending asynchronous work.
* @plugin_mask: Plugin event bitmap. Modified only via async work.
* @plugin_mem_cbs: active plugin memory callbacks
* @plugin_state: per-CPU plugin state
* @ignore_memory_transaction_failures: Cached copy of the MachineState
* flag of the same name: allows the board to suppress calling of the
* CPU do_transaction_failed hook function.
@ -529,10 +526,13 @@ struct CPUState {
/* Use by accel-block: CPU is executing an ioctl() */
QemuLockCnt in_ioctl_lock;
DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX);
#ifdef CONFIG_PLUGIN
/*
* The callback pointer stays in the main CPUState as it is
* accessed via TCG (see gen_empty_mem_helper).
*/
GArray *plugin_mem_cbs;
CPUPluginState *plugin_state;
#endif
/* TODO Move common fields from CPUArchState here. */

View File

@ -73,6 +73,7 @@ enum plugin_dyn_cb_type {
enum plugin_dyn_cb_subtype {
PLUGIN_CB_REGULAR,
PLUGIN_CB_REGULAR_R,
PLUGIN_CB_INLINE,
PLUGIN_N_CB_SUBTYPES,
};
@ -185,6 +186,19 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb,
return insn;
}
/**
* struct CPUPluginState - per-CPU state for plugins
* @event_mask: plugin event bitmap. Modified only via async work.
*/
struct CPUPluginState {
DECLARE_BITMAP(event_mask, QEMU_PLUGIN_EV_MAX);
};
/**
* qemu_plugin_create_vcpu_state: allocate plugin state
*/
CPUPluginState *qemu_plugin_create_vcpu_state(void);
void qemu_plugin_vcpu_init_hook(CPUState *cpu);
void qemu_plugin_vcpu_exit_hook(CPUState *cpu);
void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb);

View File

@ -11,6 +11,7 @@
#ifndef QEMU_QEMU_PLUGIN_H
#define QEMU_QEMU_PLUGIN_H
#include <glib.h>
#include <inttypes.h>
#include <stdbool.h>
#include <stddef.h>
@ -50,11 +51,13 @@ typedef uint64_t qemu_plugin_id_t;
*
* The plugins export the API they were built against by exposing the
* symbol qemu_plugin_version which can be checked.
*
* version 2: removed qemu_plugin_n_vcpus and qemu_plugin_n_max_vcpus
*/
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
#define QEMU_PLUGIN_VERSION 1
#define QEMU_PLUGIN_VERSION 2
/**
* struct qemu_info_t - system information for plugins
@ -227,8 +230,8 @@ struct qemu_plugin_insn;
* @QEMU_PLUGIN_CB_R_REGS: callback reads the CPU's regs
* @QEMU_PLUGIN_CB_RW_REGS: callback reads and writes the CPU's regs
*
* Note: currently unused, plugins cannot read or change system
* register state.
* Note: currently QEMU_PLUGIN_CB_RW_REGS is unused, plugins cannot change
* system register state.
*/
enum qemu_plugin_cb_flags {
QEMU_PLUGIN_CB_NO_REGS,
@ -643,11 +646,8 @@ QEMU_PLUGIN_API
void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
qemu_plugin_udata_cb_t cb, void *userdata);
/* returns -1 in user-mode */
int qemu_plugin_n_vcpus(void);
/* returns -1 in user-mode */
int qemu_plugin_n_max_vcpus(void);
/* returns how many vcpus were started at this point */
int qemu_plugin_num_vcpus(void);
/**
* qemu_plugin_outs() - output string via QEMU's logging system
@ -708,4 +708,49 @@ uint64_t qemu_plugin_end_code(void);
QEMU_PLUGIN_API
uint64_t qemu_plugin_entry_code(void);
/** struct qemu_plugin_register - Opaque handle for register access */
struct qemu_plugin_register;
/**
* typedef qemu_plugin_reg_descriptor - register descriptions
*
* @handle: opaque handle for retrieving value with qemu_plugin_read_register
* @name: register name
* @feature: optional feature descriptor, can be NULL
*/
typedef struct {
struct qemu_plugin_register *handle;
const char *name;
const char *feature;
} qemu_plugin_reg_descriptor;
/**
* qemu_plugin_get_registers() - return register list for current vCPU
*
* Returns a potentially empty GArray of qemu_plugin_reg_descriptor.
* Caller frees the array (but not the const strings).
*
* Should be used from a qemu_plugin_register_vcpu_init_cb() callback
* after the vCPU is initialised, i.e. in the vCPU context.
*/
QEMU_PLUGIN_API
GArray *qemu_plugin_get_registers(void);
/**
* qemu_plugin_read_register() - read register for current vCPU
*
* @handle: a @qemu_plugin_reg_handle handle
* @buf: A GByteArray for the data owned by the plugin
*
* This function is only available in a context that register read access is
* explicitly requested via the QEMU_PLUGIN_CB_R_REGS flag.
*
* Returns the size of the read register. The content of @buf is in target byte
* order. On failure returns -1.
*/
QEMU_PLUGIN_API
int qemu_plugin_read_register(struct qemu_plugin_register *handle,
GByteArray *buf);
#endif /* QEMU_QEMU_PLUGIN_H */

View File

@ -42,6 +42,7 @@ typedef struct CompatProperty CompatProperty;
typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUAddressSpace CPUAddressSpace;
typedef struct CPUArchState CPUArchState;
typedef struct CPUPluginState CPUPluginState;
typedef struct CpuInfoFast CpuInfoFast;
typedef struct CPUJumpCache CPUJumpCache;
typedef struct CPUState CPUState;

View File

@ -32,6 +32,7 @@ void cpu_loop(CPUNios2State *env)
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);
switch (trapnr) {
case EXCP_INTERRUPT:

View File

@ -8,6 +8,7 @@
*
* qemu_plugin_tb
* qemu_plugin_insn
* qemu_plugin_register
*
* Which can then be passed back into the API to do additional things.
* As such all the public functions in here are exported in
@ -35,10 +36,12 @@
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "qemu/plugin.h"
#include "qemu/log.h"
#include "tcg/tcg.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "exec/ram_addr.h"
#include "disas/disas.h"
#include "plugin.h"
@ -89,7 +92,11 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
void *udata)
{
if (!tb->mem_only) {
plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
int index = flags == QEMU_PLUGIN_CB_R_REGS ||
flags == QEMU_PLUGIN_CB_RW_REGS ?
PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
plugin_register_dyn_cb__udata(&tb->cbs[index],
cb, flags, udata);
}
}
@ -109,7 +116,11 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *udata)
{
if (!insn->mem_only) {
plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR],
int index = flags == QEMU_PLUGIN_CB_R_REGS ||
flags == QEMU_PLUGIN_CB_RW_REGS ?
PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][index],
cb, flags, udata);
}
}
@ -342,34 +353,9 @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
#endif
}
/*
* Queries to the number and potential maximum number of vCPUs there
* will be. This helps the plugin dimension per-vcpu arrays.
*/
#ifndef CONFIG_USER_ONLY
static MachineState * get_ms(void)
int qemu_plugin_num_vcpus(void)
{
return MACHINE(qdev_get_machine());
}
#endif
int qemu_plugin_n_vcpus(void)
{
#ifdef CONFIG_USER_ONLY
return -1;
#else
return get_ms()->smp.cpus;
#endif
}
int qemu_plugin_n_max_vcpus(void)
{
#ifdef CONFIG_USER_ONLY
return -1;
#else
return get_ms()->smp.max_cpus;
#endif
return plugin_num_vcpus();
}
/*
@ -427,3 +413,55 @@ uint64_t qemu_plugin_entry_code(void)
#endif
return entry;
}
/*
* Create register handles.
*
* We need to create a handle for each register so the plugin
* infrastructure can call gdbstub to read a register. They are
* currently just a pointer encapsulation of the gdb_reg but in
* future may hold internal plugin state so its important plugin
* authors are not tempted to treat them as numbers.
*
* We also construct a result array with those handles and some
* ancillary data the plugin might find useful.
*/
static GArray *create_register_handles(GArray *gdbstub_regs)
{
GArray *find_data = g_array_new(true, true,
sizeof(qemu_plugin_reg_descriptor));
for (int i = 0; i < gdbstub_regs->len; i++) {
GDBRegDesc *grd = &g_array_index(gdbstub_regs, GDBRegDesc, i);
qemu_plugin_reg_descriptor desc;
/* skip "un-named" regs */
if (!grd->name) {
continue;
}
/* Create a record for the plugin */
desc.handle = GINT_TO_POINTER(grd->gdb_reg);
desc.name = g_intern_string(grd->name);
desc.feature = g_intern_string(grd->feature_name);
g_array_append_val(find_data, desc);
}
return find_data;
}
GArray *qemu_plugin_get_registers(void)
{
g_assert(current_cpu);
g_autoptr(GArray) regs = gdb_get_register_list(current_cpu);
return create_register_handles(regs);
}
int qemu_plugin_read_register(struct qemu_plugin_register *reg, GByteArray *buf)
{
g_assert(current_cpu);
return gdb_read_register(current_cpu, buf, GPOINTER_TO_INT(reg));
}

View File

@ -17,6 +17,7 @@
#include "qapi/error.h"
#include "qemu/lockable.h"
#include "qemu/option.h"
#include "qemu/plugin.h"
#include "qemu/rcu_queue.h"
#include "qemu/xxhash.h"
#include "qemu/rcu.h"
@ -53,7 +54,8 @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
{
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
bitmap_copy(cpu->plugin_state->event_mask,
&data.host_ulong, QEMU_PLUGIN_EV_MAX);
tcg_flush_jmp_cache(cpu);
}
@ -208,11 +210,17 @@ plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
do_plugin_register_cb(id, ev, func, udata);
}
CPUPluginState *qemu_plugin_create_vcpu_state(void)
{
return g_new0(CPUPluginState, 1);
}
void qemu_plugin_vcpu_init_hook(CPUState *cpu)
{
bool success;
qemu_rec_mutex_lock(&plugin.lock);
plugin.num_vcpus = MAX(plugin.num_vcpus, cpu->cpu_index + 1);
plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
&cpu->cpu_index);
@ -355,7 +363,7 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
struct qemu_plugin_cb *cb, *next;
enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
if (!test_bit(ev, cpu->plugin_mask)) {
if (!test_bit(ev, cpu->plugin_state->event_mask)) {
return;
}
@ -377,7 +385,7 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
struct qemu_plugin_cb *cb, *next;
enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
if (!test_bit(ev, cpu->plugin_mask)) {
if (!test_bit(ev, cpu->plugin_state->event_mask)) {
return;
}
@ -390,12 +398,17 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
{
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
/* idle and resume cb may be called before init, ignore in this case */
if (cpu->cpu_index < plugin.num_vcpus) {
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
}
}
void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
{
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
if (cpu->cpu_index < plugin.num_vcpus) {
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
}
}
void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
@ -570,3 +583,8 @@ static void __attribute__((__constructor__)) plugin_init(void)
QHT_MODE_AUTO_RESIZE);
atexit(qemu_plugin_atexit_cb);
}
int plugin_num_vcpus(void)
{
return plugin.num_vcpus;
}

View File

@ -15,7 +15,7 @@
#include <gmodule.h>
#include "qemu/qht.h"
#define QEMU_PLUGIN_MIN_VERSION 0
#define QEMU_PLUGIN_MIN_VERSION 2
/* global state */
struct qemu_plugin_state {
@ -44,6 +44,8 @@ struct qemu_plugin_state {
* the code cache is flushed.
*/
struct qht dyn_cb_arr_ht;
/* How many vcpus were started */
int num_vcpus;
};
@ -97,4 +99,6 @@ void plugin_register_vcpu_mem_cb(GArray **arr,
void exec_inline_op(struct qemu_plugin_dyn_cb *cb);
int plugin_num_vcpus(void);
#endif /* PLUGIN_H */

View File

@ -3,6 +3,7 @@
qemu_plugin_end_code;
qemu_plugin_entry_code;
qemu_plugin_get_hwaddr;
qemu_plugin_get_registers;
qemu_plugin_hwaddr_device_name;
qemu_plugin_hwaddr_is_io;
qemu_plugin_hwaddr_phys_addr;
@ -16,10 +17,10 @@
qemu_plugin_mem_is_sign_extended;
qemu_plugin_mem_is_store;
qemu_plugin_mem_size_shift;
qemu_plugin_n_max_vcpus;
qemu_plugin_n_vcpus;
qemu_plugin_num_vcpus;
qemu_plugin_outs;
qemu_plugin_path_to_binary;
qemu_plugin_read_register;
qemu_plugin_register_atexit_cb;
qemu_plugin_register_flush_cb;
qemu_plugin_register_vcpu_exit_cb;

View File

@ -50,7 +50,9 @@ for input in sys.argv[1:]:
sys.stderr.write(f'unexpected start tag: {element.tag}\n')
exit(1)
feature_name = element.attrib['name']
regnum = 0
regnames = []
regnums = []
tags = ['feature']
for event, element in events:
@ -67,6 +69,7 @@ for input in sys.argv[1:]:
if 'regnum' in element.attrib:
regnum = int(element.attrib['regnum'])
regnames.append(element.attrib['name'])
regnums.append(regnum)
regnum += 1
@ -85,6 +88,15 @@ for input in sys.argv[1:]:
writeliteral(8, bytes(os.path.basename(input), 'utf-8'))
sys.stdout.write(',\n')
writeliteral(8, read)
sys.stdout.write(f',\n {num_regs},\n }},\n')
sys.stdout.write(',\n')
writeliteral(8, bytes(feature_name, 'utf-8'))
sys.stdout.write(',\n (const char * const []) {\n')
for index, regname in enumerate(regnames):
sys.stdout.write(f' [{regnums[index] - base_reg}] =\n')
writeliteral(16, bytes(regname, 'utf-8'))
sys.stdout.write(',\n')
sys.stdout.write(f' }},\n {num_regs},\n }},\n')
sys.stdout.write(' { NULL }\n};\n')

View File

@ -2515,9 +2515,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &arm_sysemu_ops;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_arch_name = arm_gdb_arch_name;
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = arm_disas_set_info;

View File

@ -25,6 +25,7 @@
#include "hw/registerfields.h"
#include "cpu-qom.h"
#include "exec/cpu-defs.h"
#include "exec/gdbstub.h"
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
@ -117,23 +118,21 @@
*/
/**
* DynamicGDBXMLInfo:
* @desc: Contains the XML descriptions.
* @num: Number of the registers in this XML seen by GDB.
* DynamicGDBFeatureInfo:
* @desc: Contains the feature descriptions.
* @data: A union with data specific to the set of registers
* @cpregs_keys: Array that contains the corresponding Key of
* a given cpreg with the same order of the cpreg
* in the XML description.
*/
typedef struct DynamicGDBXMLInfo {
char *desc;
int num;
typedef struct DynamicGDBFeatureInfo {
GDBFeature desc;
union {
struct {
uint32_t *keys;
} cpregs;
} data;
} DynamicGDBXMLInfo;
} DynamicGDBFeatureInfo;
/* CPU state for each instance of a generic timer (in cp15 c14) */
typedef struct ARMGenericTimer {
@ -855,10 +854,10 @@ struct ArchCPU {
uint64_t *cpreg_vmstate_values;
int32_t cpreg_vmstate_array_len;
DynamicGDBXMLInfo dyn_sysreg_xml;
DynamicGDBXMLInfo dyn_svereg_xml;
DynamicGDBXMLInfo dyn_m_systemreg_xml;
DynamicGDBXMLInfo dyn_m_secextreg_xml;
DynamicGDBFeatureInfo dyn_sysreg_feature;
DynamicGDBFeatureInfo dyn_svereg_feature;
DynamicGDBFeatureInfo dyn_m_systemreg_feature;
DynamicGDBFeatureInfo dyn_m_secextreg_feature;
/* Timers used by the generic (architected) timer */
QEMUTimer *gt_timer[NUM_GTIMERS];
@ -1160,12 +1159,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
/* Returns the dynamically generated XML for the gdb stub.
* Returns a pointer to the XML contents for the specified XML file or NULL
* if the XML name doesn't match the predefined one.
*/
const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname);
int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s);
int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,

View File

@ -793,7 +793,6 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;
cc->gdb_core_xml_file = "aarch64-core.xml";
cc->gdb_arch_name = aarch64_gdb_arch_name;

View File

@ -26,11 +26,11 @@
#include "cpu-features.h"
#include "cpregs.h"
typedef struct RegisterSysregXmlParam {
typedef struct RegisterSysregFeatureParam {
CPUState *cs;
GString *s;
GDBFeatureBuilder builder;
int n;
} RegisterSysregXmlParam;
} RegisterSysregFeatureParam;
/* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
whatever the target description contains. Due to a historical mishap
@ -106,9 +106,10 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 0;
}
static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
static int vfp_gdb_get_reg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
/* VFP data registers are always little-endian. */
@ -130,9 +131,10 @@ static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
return 0;
}
static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
static int vfp_gdb_set_reg(CPUState *cs, uint8_t *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
if (reg < nregs) {
@ -156,8 +158,11 @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
static int vfp_gdb_get_sysreg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0:
return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
@ -167,8 +172,11 @@ static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
return 0;
}
static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
static int vfp_gdb_set_sysreg(CPUState *cs, uint8_t *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0:
env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
@ -180,8 +188,11 @@ static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
static int mve_gdb_get_reg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0:
return gdb_get_reg32(buf, env->v7m.vpr);
@ -190,8 +201,11 @@ static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
}
}
static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
static int mve_gdb_set_reg(CPUState *cs, uint8_t *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0:
env->v7m.vpr = ldl_p(buf);
@ -210,13 +224,14 @@ static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
* We return the number of bytes copied
*/
static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
static int arm_gdb_get_sysreg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
const ARMCPRegInfo *ri;
uint32_t key;
key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
key = cpu->dyn_sysreg_feature.data.cpregs.keys[reg];
ri = get_arm_cp_reginfo(cpu->cp_regs, key);
if (ri) {
if (cpreg_field_is_64bit(ri)) {
@ -228,39 +243,37 @@ static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
return 0;
}
static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
static int arm_gdb_set_sysreg(CPUState *cs, uint8_t *buf, int reg)
{
return 0;
}
static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml,
static void arm_gen_one_feature_sysreg(GDBFeatureBuilder *builder,
DynamicGDBFeatureInfo *dyn_feature,
ARMCPRegInfo *ri, uint32_t ri_key,
int bitsize, int regnum)
int bitsize, int n)
{
g_string_append_printf(s, "<reg name=\"%s\"", ri->name);
g_string_append_printf(s, " bitsize=\"%d\"", bitsize);
g_string_append_printf(s, " regnum=\"%d\"", regnum);
g_string_append_printf(s, " group=\"cp_regs\"/>");
dyn_xml->data.cpregs.keys[dyn_xml->num] = ri_key;
dyn_xml->num++;
gdb_feature_builder_append_reg(builder, ri->name, bitsize, n,
"int", "cp_regs");
dyn_feature->data.cpregs.keys[n] = ri_key;
}
static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
gpointer p)
static void arm_register_sysreg_for_feature(gpointer key, gpointer value,
gpointer p)
{
uint32_t ri_key = (uintptr_t)key;
ARMCPRegInfo *ri = value;
RegisterSysregXmlParam *param = (RegisterSysregXmlParam *)p;
GString *s = param->s;
RegisterSysregFeatureParam *param = p;
ARMCPU *cpu = ARM_CPU(param->cs);
CPUARMState *env = &cpu->env;
DynamicGDBXMLInfo *dyn_xml = &cpu->dyn_sysreg_xml;
DynamicGDBFeatureInfo *dyn_feature = &cpu->dyn_sysreg_feature;
if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_NO_GDB))) {
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
if (ri->state == ARM_CP_STATE_AA64) {
arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64,
param->n++);
arm_gen_one_feature_sysreg(&param->builder, dyn_feature,
ri, ri_key, 64, param->n++);
}
} else {
if (ri->state == ARM_CP_STATE_AA32) {
@ -269,32 +282,32 @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
return;
}
if (ri->type & ARM_CP_64BIT) {
arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64,
param->n++);
arm_gen_one_feature_sysreg(&param->builder, dyn_feature,
ri, ri_key, 64, param->n++);
} else {
arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 32,
param->n++);
arm_gen_one_feature_sysreg(&param->builder, dyn_feature,
ri, ri_key, 32, param->n++);
}
}
}
}
}
static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
static GDBFeature *arm_gen_dynamic_sysreg_feature(CPUState *cs, int base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
RegisterSysregXmlParam param = {cs, s, base_reg};
RegisterSysregFeatureParam param = {cs};
gsize num_regs = g_hash_table_size(cpu->cp_regs);
cpu->dyn_sysreg_xml.num = 0;
cpu->dyn_sysreg_xml.data.cpregs.keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs));
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.qemu.gdb.arm.sys.regs\">");
g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_xml, &param);
g_string_append_printf(s, "</feature>");
cpu->dyn_sysreg_xml.desc = g_string_free(s, false);
return cpu->dyn_sysreg_xml.num;
gdb_feature_builder_init(&param.builder,
&cpu->dyn_sysreg_feature.desc,
"org.qemu.gdb.arm.sys.regs",
"system-registers.xml",
base_reg);
cpu->dyn_sysreg_feature.data.cpregs.keys = g_new(uint32_t, num_regs);
g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_feature, &param);
gdb_feature_builder_end(&param.builder);
return &cpu->dyn_sysreg_feature.desc;
}
#ifdef CONFIG_TCG
@ -369,8 +382,11 @@ static int m_sysreg_get(CPUARMState *env, GByteArray *buf,
return gdb_get_reg32(buf, *ptr);
}
static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg)
static int arm_gdb_get_m_systemreg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
/*
* Here, we emulate MRS instruction, where CONTROL has a mix of
* banked and non-banked bits.
@ -381,36 +397,34 @@ static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg)
return m_sysreg_get(env, buf, reg, env->v7m.secure);
}
static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg)
static int arm_gdb_set_m_systemreg(CPUState *cs, uint8_t *buf, int reg)
{
return 0; /* TODO */
}
static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg)
static GDBFeature *arm_gen_dynamic_m_systemreg_feature(CPUState *cs,
int base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
GString *s = g_string_new(NULL);
int base_reg = orig_base_reg;
GDBFeatureBuilder builder;
int reg = 0;
int i;
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.m-system\">\n");
gdb_feature_builder_init(&builder, &cpu->dyn_m_systemreg_feature.desc,
"org.gnu.gdb.arm.m-system", "arm-m-system.xml",
base_reg);
for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
if (arm_feature(env, m_sysreg_def[i].feature)) {
g_string_append_printf(s,
"<reg name=\"%s\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
gdb_feature_builder_append_reg(&builder, m_sysreg_def[i].name, 32,
reg++, "int", NULL);
}
}
g_string_append_printf(s, "</feature>");
cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false);
cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg;
gdb_feature_builder_end(&builder);
return cpu->dyn_m_systemreg_xml.num;
return &cpu->dyn_m_systemreg_feature.desc;
}
#ifndef CONFIG_USER_ONLY
@ -418,63 +432,48 @@ static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg)
* For user-only, we see the non-secure registers via m_systemreg above.
* For secext, encode the non-secure view as even and secure view as odd.
*/
static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg)
static int arm_gdb_get_m_secextreg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
return m_sysreg_get(env, buf, reg >> 1, reg & 1);
}
static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg)
static int arm_gdb_set_m_secextreg(CPUState *cs, uint8_t *buf, int reg)
{
return 0; /* TODO */
}
static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg)
static GDBFeature *arm_gen_dynamic_m_secextreg_feature(CPUState *cs,
int base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
int base_reg = orig_base_reg;
GDBFeatureBuilder builder;
char *name;
int reg = 0;
int i;
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.secext\">\n");
gdb_feature_builder_init(&builder, &cpu->dyn_m_secextreg_feature.desc,
"org.gnu.gdb.arm.secext", "arm-m-secext.xml",
base_reg);
for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) {
g_string_append_printf(s,
"<reg name=\"%s_ns\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
g_string_append_printf(s,
"<reg name=\"%s_s\" bitsize=\"32\" regnum=\"%d\"/>\n",
m_sysreg_def[i].name, base_reg++);
name = g_strconcat(m_sysreg_def[i].name, "_ns", NULL);
gdb_feature_builder_append_reg(&builder, name, 32, reg++,
"int", NULL);
name = g_strconcat(m_sysreg_def[i].name, "_s", NULL);
gdb_feature_builder_append_reg(&builder, name, 32, reg++,
"int", NULL);
}
g_string_append_printf(s, "</feature>");
cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false);
cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg;
gdb_feature_builder_end(&builder);
return cpu->dyn_m_secextreg_xml.num;
return &cpu->dyn_m_secextreg_feature.desc;
}
#endif
#endif /* CONFIG_TCG */
const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
{
ARMCPU *cpu = ARM_CPU(cs);
if (strcmp(xmlname, "system-registers.xml") == 0) {
return cpu->dyn_sysreg_xml.desc;
} else if (strcmp(xmlname, "sve-registers.xml") == 0) {
return cpu->dyn_svereg_xml.desc;
} else if (strcmp(xmlname, "arm-m-system.xml") == 0) {
return cpu->dyn_m_systemreg_xml.desc;
#ifndef CONFIG_USER_ONLY
} else if (strcmp(xmlname, "arm-m-secext.xml") == 0) {
return cpu->dyn_m_secextreg_xml.desc;
#endif
}
return NULL;
}
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
{
CPUState *cs = CPU(cpu);
@ -487,14 +486,14 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
*/
#ifdef TARGET_AARCH64
if (isar_feature_aa64_sve(&cpu->isar)) {
int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs);
GDBFeature *feature = arm_gen_dynamic_svereg_feature(cs, cs->gdb_num_regs);
gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg,
aarch64_gdb_set_sve_reg, nreg,
"sve-registers.xml", 0);
aarch64_gdb_set_sve_reg, feature, 0);
} else {
gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg,
aarch64_gdb_set_fpu_reg,
34, "aarch64-fpu.xml", 0);
gdb_find_static_feature("aarch64-fpu.xml"),
0);
}
/*
* Note that we report pauth information via the feature name
@ -505,19 +504,22 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
if (isar_feature_aa64_pauth(&cpu->isar)) {
gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg,
aarch64_gdb_set_pauth_reg,
4, "aarch64-pauth.xml", 0);
gdb_find_static_feature("aarch64-pauth.xml"),
0);
}
#endif
} else {
if (arm_feature(env, ARM_FEATURE_NEON)) {
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
49, "arm-neon.xml", 0);
gdb_find_static_feature("arm-neon.xml"),
0);
} else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
33, "arm-vfp3.xml", 0);
gdb_find_static_feature("arm-vfp3.xml"),
0);
} else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
17, "arm-vfp.xml", 0);
gdb_find_static_feature("arm-vfp.xml"), 0);
}
if (!arm_feature(env, ARM_FEATURE_M)) {
/*
@ -525,29 +527,29 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
* expose to gdb.
*/
gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg,
2, "arm-vfp-sysregs.xml", 0);
gdb_find_static_feature("arm-vfp-sysregs.xml"),
0);
}
}
if (cpu_isar_feature(aa32_mve, cpu) && tcg_enabled()) {
gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg,
1, "arm-m-profile-mve.xml", 0);
gdb_find_static_feature("arm-m-profile-mve.xml"),
0);
}
gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
"system-registers.xml", 0);
arm_gen_dynamic_sysreg_feature(cs, cs->gdb_num_regs),
0);
#ifdef CONFIG_TCG
if (arm_feature(env, ARM_FEATURE_M) && tcg_enabled()) {
gdb_register_coprocessor(cs,
arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg,
arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs),
"arm-m-system.xml", 0);
arm_gen_dynamic_m_systemreg_feature(cs, cs->gdb_num_regs), 0);
#ifndef CONFIG_USER_ONLY
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
gdb_register_coprocessor(cs,
arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg,
arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs),
"arm-m-secext.xml", 0);
arm_gen_dynamic_m_secextreg_feature(cs, cs->gdb_num_regs), 0);
}
#endif
}

View File

@ -72,8 +72,11 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 0;
}
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg)
int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0 ... 31:
{
@ -92,8 +95,11 @@ int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg)
}
}
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg)
int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0 ... 31:
/* 128 bit FP register */
@ -116,9 +122,10 @@ int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg)
}
}
int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg)
int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
/* The first 32 registers are the zregs */
@ -164,9 +171,10 @@ int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg)
return 0;
}
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg)
{
ARMCPU *cpu = env_archcpu(env);
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
/* The first 32 registers are the zregs */
switch (reg) {
@ -210,8 +218,11 @@ int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg)
int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
switch (reg) {
case 0: /* pauth_dmask */
case 1: /* pauth_cmask */
@ -241,13 +252,13 @@ int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg)
}
}
int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg)
int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg)
{
/* All pseudo registers are read-only. */
return 0;
}
static void output_vector_union_type(GString *s, int reg_width,
static void output_vector_union_type(GDBFeatureBuilder *builder, int reg_width,
const char *name)
{
struct TypeSize {
@ -282,10 +293,10 @@ static void output_vector_union_type(GString *s, int reg_width,
/* First define types and totals in a whole VL */
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
g_string_append_printf(s,
"<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>",
name, vec_lanes[i].sz, vec_lanes[i].suffix,
vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size);
gdb_feature_builder_append_tag(
builder, "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>",
name, vec_lanes[i].sz, vec_lanes[i].suffix,
vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size);
}
/*
@ -296,86 +307,77 @@ static void output_vector_union_type(GString *s, int reg_width,
for (i = 0; i < ARRAY_SIZE(suf); i++) {
int bits = 8 << i;
g_string_append_printf(s, "<union id=\"%sn%c\">", name, suf[i]);
gdb_feature_builder_append_tag(builder, "<union id=\"%sn%c\">",
name, suf[i]);
for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
if (vec_lanes[j].size == bits) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%s%c%c\"/>",
vec_lanes[j].suffix, name,
vec_lanes[j].sz, vec_lanes[j].suffix);
gdb_feature_builder_append_tag(
builder, "<field name=\"%c\" type=\"%s%c%c\"/>",
vec_lanes[j].suffix, name,
vec_lanes[j].sz, vec_lanes[j].suffix);
}
}
g_string_append(s, "</union>");
gdb_feature_builder_append_tag(builder, "</union>");
}
/* And now the final union of unions */
g_string_append_printf(s, "<union id=\"%s\">", name);
gdb_feature_builder_append_tag(builder, "<union id=\"%s\">", name);
for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%sn%c\"/>",
suf[i], name, suf[i]);
gdb_feature_builder_append_tag(builder,
"<field name=\"%c\" type=\"%sn%c\"/>",
suf[i], name, suf[i]);
}
g_string_append(s, "</union>");
gdb_feature_builder_append_tag(builder, "</union>");
}
int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg)
GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg)
{
ARMCPU *cpu = ARM_CPU(cs);
GString *s = g_string_new(NULL);
DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
int reg_width = cpu->sve_max_vq * 128;
int pred_width = cpu->sve_max_vq * 16;
int base_reg = orig_base_reg;
GDBFeatureBuilder builder;
char *name;
int reg = 0;
int i;
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
gdb_feature_builder_init(&builder, &cpu->dyn_svereg_feature.desc,
"org.gnu.gdb.aarch64.sve", "sve-registers.xml",
base_reg);
/* Create the vector union type. */
output_vector_union_type(s, reg_width, "svev");
output_vector_union_type(&builder, reg_width, "svev");
/* Create the predicate vector type. */
g_string_append_printf(s,
"<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
pred_width / 8);
gdb_feature_builder_append_tag(
&builder, "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
pred_width / 8);
/* Define the vector registers. */
for (i = 0; i < 32; i++) {
g_string_append_printf(s,
"<reg name=\"z%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svev\"/>",
i, reg_width, base_reg++);
name = g_strdup_printf("z%d", i);
gdb_feature_builder_append_reg(&builder, name, reg_width, reg++,
"svev", NULL);
}
/* fpscr & status registers */
g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);
g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
" regnum=\"%d\" group=\"float\""
" type=\"int\"/>", base_reg++);
gdb_feature_builder_append_reg(&builder, "fpsr", 32, reg++,
"int", "float");
gdb_feature_builder_append_reg(&builder, "fpcr", 32, reg++,
"int", "float");
/* Define the predicate registers. */
for (i = 0; i < 16; i++) {
g_string_append_printf(s,
"<reg name=\"p%d\" bitsize=\"%d\""
" regnum=\"%d\" type=\"svep\"/>",
i, pred_width, base_reg++);
name = g_strdup_printf("p%d", i);
gdb_feature_builder_append_reg(&builder, name, pred_width, reg++,
"svep", NULL);
}
g_string_append_printf(s,
"<reg name=\"ffr\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"svep\"/>",
pred_width, base_reg++);
gdb_feature_builder_append_reg(&builder, "ffr", pred_width, reg++,
"svep", "vector");
/* Define the vector length pseudo-register. */
g_string_append_printf(s,
"<reg name=\"vg\" bitsize=\"64\""
" regnum=\"%d\" type=\"int\"/>",
base_reg++);
gdb_feature_builder_append_reg(&builder, "vg", 64, reg++, "int", NULL);
g_string_append_printf(s, "</feature>");
gdb_feature_builder_end(&builder);
info->desc = g_string_free(s, false);
info->num = base_reg - orig_base_reg;
return info->num;
return &cpu->dyn_svereg_feature.desc;
}

View File

@ -1451,13 +1451,13 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
}
#ifdef TARGET_AARCH64
int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);

View File

@ -251,7 +251,6 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_read_register = avr_cpu_gdb_read_register;
cc->gdb_write_register = avr_cpu_gdb_write_register;
cc->gdb_adjust_breakpoint = avr_cpu_gdb_adjust_breakpoint;
cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "avr-cpu.xml";
cc->tcg_ops = &avr_tcg_ops;
}

View File

@ -319,8 +319,7 @@ static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
gdb_register_coprocessor(cs, hexagon_hvx_gdb_read_register,
hexagon_hvx_gdb_write_register,
NUM_VREGS + NUM_QREGS,
"hexagon-hvx.xml", 0);
gdb_find_static_feature("hexagon-hvx.xml"), 0);
qemu_init_vcpu(cs);
cpu_reset(cs);
@ -363,7 +362,6 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
cc->get_pc = hexagon_cpu_get_pc;
cc->gdb_read_register = hexagon_gdb_read_register;
cc->gdb_write_register = hexagon_gdb_write_register;
cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS;
cc->gdb_stop_before_watchpoint = true;
cc->gdb_core_xml_file = "hexagon-core.xml";
cc->disas_set_info = hexagon_cpu_disas_set_info;

View File

@ -81,8 +81,11 @@ static int gdb_get_qreg(CPUHexagonState *env, GByteArray *mem_buf, int n)
return total;
}
int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n)
int hexagon_hvx_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
HexagonCPU *cpu = HEXAGON_CPU(cs);
CPUHexagonState *env = &cpu->env;
if (n < NUM_VREGS) {
return gdb_get_vreg(env, mem_buf, n);
}
@ -115,8 +118,11 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n)
return MAX_VEC_SIZE_BYTES / 8;
}
int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n)
int hexagon_hvx_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
HexagonCPU *cpu = HEXAGON_CPU(cs);
CPUHexagonState *env = &cpu->env;
if (n < NUM_VREGS) {
return gdb_put_vreg(env, mem_buf, n);
}

View File

@ -33,8 +33,8 @@
int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int hexagon_hvx_gdb_read_register(CPUHexagonState *env, GByteArray *mem_buf, int n);
int hexagon_hvx_gdb_write_register(CPUHexagonState *env, uint8_t *mem_buf, int n);
int hexagon_hvx_gdb_read_register(CPUState *env, GByteArray *mem_buf, int n);
int hexagon_hvx_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n);
void hexagon_debug_vreg(CPUHexagonState *env, int regnum);
void hexagon_debug_qreg(CPUHexagonState *env, int regnum);

View File

@ -7990,10 +7990,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->gdb_arch_name = x86_gdb_arch_name;
#ifdef TARGET_X86_64
cc->gdb_core_xml_file = "i386-64bit.xml";
cc->gdb_num_core_regs = 66;
#else
cc->gdb_core_xml_file = "i386-32bit.xml";
cc->gdb_num_core_regs = 50;
#endif
cc->disas_set_info = x86_disas_set_info;

View File

@ -815,7 +815,6 @@ static void loongarch32_cpu_class_init(ObjectClass *c, void *data)
{
CPUClass *cc = CPU_CLASS(c);
cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "loongarch-base32.xml";
cc->gdb_arch_name = loongarch32_gdb_arch_name;
}
@ -829,7 +828,6 @@ static void loongarch64_cpu_class_init(ObjectClass *c, void *data)
{
CPUClass *cc = CPU_CLASS(c);
cc->gdb_num_core_regs = 35;
cc->gdb_core_xml_file = "loongarch-base64.xml";
cc->gdb_arch_name = loongarch64_gdb_arch_name;
}

View File

@ -84,9 +84,11 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return length;
}
static int loongarch_gdb_get_fpu(CPULoongArchState *env,
GByteArray *mem_buf, int n)
static int loongarch_gdb_get_fpu(CPUState *cs, GByteArray *mem_buf, int n)
{
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
CPULoongArchState *env = &cpu->env;
if (0 <= n && n < 32) {
return gdb_get_reg64(mem_buf, env->fpr[n].vreg.D(0));
} else if (32 <= n && n < 40) {
@ -97,9 +99,10 @@ static int loongarch_gdb_get_fpu(CPULoongArchState *env,
return 0;
}
static int loongarch_gdb_set_fpu(CPULoongArchState *env,
uint8_t *mem_buf, int n)
static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n)
{
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
CPULoongArchState *env = &cpu->env;
int length = 0;
if (0 <= n && n < 32) {
@ -118,5 +121,5 @@ static int loongarch_gdb_set_fpu(CPULoongArchState *env,
void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs)
{
gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu,
41, "loongarch-fpu.xml", 0);
gdb_find_static_feature("loongarch-fpu.xml"), 0);
}

View File

@ -570,7 +570,6 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
#endif
cc->disas_set_info = m68k_cpu_disas_set_info;
cc->gdb_num_core_regs = 18;
cc->tcg_ops = &m68k_tcg_ops;
}

View File

@ -29,8 +29,11 @@
#define SIGNBIT (1u << 31)
static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
if (n < 8) {
float_status s;
return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s));
@ -46,8 +49,11 @@ static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
return 0;
}
static int cf_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
if (n < 8) {
float_status s;
env->fregs[n].d = float64_to_floatx80(ldq_p(mem_buf), &s);
@ -66,8 +72,11 @@ static int cf_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
return 0;
}
static int m68k_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
static int m68k_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
if (n < 8) {
int len = gdb_get_reg16(mem_buf, env->fregs[n].l.upper);
len += gdb_get_reg16(mem_buf, 0);
@ -85,8 +94,11 @@ static int m68k_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
return 0;
}
static int m68k_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
static int m68k_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
if (n < 8) {
env->fregs[n].l.upper = lduw_be_p(mem_buf);
env->fregs[n].l.lower = ldq_be_p(mem_buf + 4);
@ -112,10 +124,10 @@ void m68k_cpu_init_gdb(M68kCPU *cpu)
if (m68k_feature(env, M68K_FEATURE_CF_FPU)) {
gdb_register_coprocessor(cs, cf_fpu_gdb_get_reg, cf_fpu_gdb_set_reg,
11, "cf-fp.xml", 18);
gdb_find_static_feature("cf-fp.xml"), 18);
} else if (m68k_feature(env, M68K_FEATURE_FPU)) {
gdb_register_coprocessor(cs, m68k_fpu_gdb_get_reg,
m68k_fpu_gdb_set_reg, 11, "m68k-fp.xml", 18);
gdb_register_coprocessor(cs, m68k_fpu_gdb_get_reg, m68k_fpu_gdb_set_reg,
gdb_find_static_feature("m68k-fp.xml"), 18);
}
/* TODO: Add [E]MAC registers. */
}

View File

@ -313,8 +313,9 @@ static void mb_cpu_initfn(Object *obj)
CPUMBState *env = &cpu->env;
gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect,
mb_cpu_gdb_write_stack_protect, 2,
"microblaze-stack-protect.xml", 0);
mb_cpu_gdb_write_stack_protect,
gdb_find_static_feature("microblaze-stack-protect.xml"),
0);
set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
@ -443,7 +444,6 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->sysemu_ops = &mb_sysemu_ops;
#endif
device_class_set_props(dc, mb_properties);
cc->gdb_num_core_regs = 32 + 25;
cc->gdb_core_xml_file = "microblaze-core.xml";
cc->disas_set_info = mb_disas_set_info;

View File

@ -381,8 +381,8 @@ G_NORETURN void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int mb_cpu_gdb_read_stack_protect(CPUArchState *cpu, GByteArray *buf, int reg);
int mb_cpu_gdb_write_stack_protect(CPUArchState *cpu, uint8_t *buf, int reg);
int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *buf, int reg);
int mb_cpu_gdb_write_stack_protect(CPUState *cs, uint8_t *buf, int reg);
static inline uint32_t mb_cpu_read_msr(const CPUMBState *env)
{

View File

@ -49,14 +49,9 @@ enum {
int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUClass *cc = CPU_GET_CLASS(cs);
CPUMBState *env = &cpu->env;
uint32_t val;
if (n > cc->gdb_num_core_regs) {
return 0;
}
switch (n) {
case 1 ... 31:
val = env->regs[n];
@ -94,8 +89,10 @@ int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return gdb_get_reg32(mem_buf, val);
}
int mb_cpu_gdb_read_stack_protect(CPUMBState *env, GByteArray *mem_buf, int n)
int mb_cpu_gdb_read_stack_protect(CPUState *cs, GByteArray *mem_buf, int n)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
uint32_t val;
switch (n) {
@ -153,8 +150,11 @@ int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 4;
}
int mb_cpu_gdb_write_stack_protect(CPUMBState *env, uint8_t *mem_buf, int n)
int mb_cpu_gdb_write_stack_protect(CPUState *cs, uint8_t *mem_buf, int n)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
switch (n) {
case GDB_SP_SHL:
env->slr = ldl_p(mem_buf);

View File

@ -20,6 +20,7 @@
#ifndef QEMU_PPC_CPU_QOM_H
#define QEMU_PPC_CPU_QOM_H
#include "exec/gdbstub.h"
#include "hw/core/cpu.h"
#ifdef TARGET_PPC64

View File

@ -1492,8 +1492,7 @@ struct PowerPCCPUClass {
int bfd_mach;
uint32_t l1_dcache_size, l1_icache_size;
#ifndef CONFIG_USER_ONLY
unsigned int gdb_num_sprs;
const char *gdb_spr_xml;
GDBFeature gdb_spr;
#endif
const PPCHash64Options *hash64_opts;
struct ppc_radix_page_info *radix_page_info;
@ -1546,8 +1545,6 @@ int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
#ifndef CONFIG_USER_ONLY
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu);
const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name);
#endif
int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s);

View File

@ -6682,10 +6682,6 @@ static void init_ppc_proc(PowerPCCPU *cpu)
/* PowerPC implementation specific initialisations (SPRs, timers, ...) */
(*pcc->init_proc)(env);
#if !defined(CONFIG_USER_ONLY)
ppc_gdb_gen_spr_xml(cpu);
#endif
/* MSR bits & flags consistency checks */
if (env->msr_mask & (1 << 25)) {
switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
@ -7389,9 +7385,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
#endif
cc->gdb_num_core_regs = 71;
#ifndef CONFIG_USER_ONLY
cc->gdb_get_dynamic_xml = ppc_gdb_get_dynamic_xml;
#endif
#ifdef USE_APPLE_GDB
cc->gdb_read_register = ppc_cpu_gdb_read_register_apple;
cc->gdb_write_register = ppc_cpu_gdb_write_register_apple;

View File

@ -300,15 +300,23 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
}
#ifndef CONFIG_USER_ONLY
void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
static void gdb_gen_spr_feature(CPUState *cs)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
GString *xml;
char *spr_name;
GDBFeatureBuilder builder;
unsigned int num_regs = 0;
int i;
if (pcc->gdb_spr.xml) {
return;
}
gdb_feature_builder_init(&builder, &pcc->gdb_spr,
"org.qemu.power.spr", "power-spr.xml",
cs->gdb_num_regs);
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
ppc_spr_t *spr = &env->spr_cb[i];
@ -326,45 +334,13 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
*/
spr->gdb_id = num_regs;
num_regs++;
gdb_feature_builder_append_reg(&builder, g_ascii_strdown(spr->name, -1),
TARGET_LONG_BITS, num_regs,
"int", "spr");
}
if (pcc->gdb_spr_xml) {
return;
}
xml = g_string_new("<?xml version=\"1.0\"?>");
g_string_append(xml, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append(xml, "<feature name=\"org.qemu.power.spr\">");
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
ppc_spr_t *spr = &env->spr_cb[i];
if (!spr->name) {
continue;
}
spr_name = g_ascii_strdown(spr->name, -1);
g_string_append_printf(xml, "<reg name=\"%s\"", spr_name);
g_free(spr_name);
g_string_append_printf(xml, " bitsize=\"%d\"", TARGET_LONG_BITS);
g_string_append(xml, " group=\"spr\"/>");
}
g_string_append(xml, "</feature>");
pcc->gdb_num_sprs = num_regs;
pcc->gdb_spr_xml = g_string_free(xml, false);
}
const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
if (strcmp(xml_name, "power-spr.xml") == 0) {
return pcc->gdb_spr_xml;
}
return NULL;
gdb_feature_builder_end(&builder);
}
#endif
@ -383,8 +359,10 @@ static int gdb_find_spr_idx(CPUPPCState *env, int n)
return -1;
}
static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n)
static int gdb_get_spr_reg(CPUState *cs, GByteArray *buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int reg;
int len;
@ -424,8 +402,10 @@ static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n)
return len;
}
static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_spr_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int reg;
int len;
@ -453,8 +433,10 @@ static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
}
#endif
static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n)
static int gdb_get_float_reg(CPUState *cs, GByteArray *buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
uint8_t *mem_buf;
if (n < 32) {
gdb_get_reg64(buf, *cpu_fpr_ptr(env, n));
@ -471,8 +453,11 @@ static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_float_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
ppc_maybe_bswap_register(env, mem_buf, 8);
*cpu_fpr_ptr(env, n) = ldq_p(mem_buf);
@ -486,8 +471,10 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
static int gdb_get_avr_reg(CPUState *cs, GByteArray *buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
uint8_t *mem_buf;
if (n < 32) {
@ -512,8 +499,11 @@ static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_avr_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
ppc_avr_t *avr = cpu_avr_ptr(env, n);
ppc_maybe_bswap_register(env, mem_buf, 16);
@ -534,8 +524,11 @@ static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n)
static int gdb_get_spe_reg(CPUState *cs, GByteArray *buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
#if defined(TARGET_PPC64)
gdb_get_reg32(buf, env->gpr[n] >> 32);
@ -558,8 +551,11 @@ static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_spe_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
#if defined(TARGET_PPC64)
target_ulong lo = (uint32_t)env->gpr[n];
@ -587,8 +583,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n)
static int gdb_get_vsx_reg(CPUState *cs, GByteArray *buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n));
ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8);
@ -597,8 +596,11 @@ static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_vsx_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
if (n < 32) {
ppc_maybe_bswap_register(env, mem_buf, 8);
*cpu_vsrl_ptr(env, n) = ldq_p(mem_buf);
@ -620,22 +622,24 @@ void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *pcc)
{
if (pcc->insns_flags & PPC_FLOAT) {
gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
33, "power-fpu.xml", 0);
gdb_find_static_feature("power-fpu.xml"), 0);
}
if (pcc->insns_flags & PPC_ALTIVEC) {
gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
34, "power-altivec.xml", 0);
gdb_find_static_feature("power-altivec.xml"),
0);
}
if (pcc->insns_flags & PPC_SPE) {
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
34, "power-spe.xml", 0);
gdb_find_static_feature("power-spe.xml"), 0);
}
if (pcc->insns_flags2 & PPC2_VSX) {
gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
32, "power-vsx.xml", 0);
gdb_find_static_feature("power-vsx.xml"), 0);
}
#ifndef CONFIG_USER_ONLY
gdb_gen_spr_feature(cs);
gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg,
pcc->gdb_num_sprs, "power-spr.xml", 0);
&pcc->gdb_spr, 0);
#endif
}

View File

@ -2300,19 +2300,6 @@ static const gchar *riscv_gdb_arch_name(CPUState *cs)
}
}
static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
{
RISCVCPU *cpu = RISCV_CPU(cs);
if (strcmp(xmlname, "riscv-csr.xml") == 0) {
return cpu->dyn_csr_xml;
} else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
return cpu->dyn_vreg_xml;
}
return NULL;
}
#ifndef CONFIG_USER_ONLY
static int64_t riscv_get_arch_id(CPUState *cs)
{
@ -2352,7 +2339,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
cc->get_pc = riscv_cpu_get_pc;
cc->gdb_read_register = riscv_cpu_gdb_read_register;
cc->gdb_write_register = riscv_cpu_gdb_write_register;
cc->gdb_num_core_regs = 33;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = riscv_cpu_disas_set_info;
#ifndef CONFIG_USER_ONLY
@ -2360,7 +2346,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
cc->get_arch_id = riscv_get_arch_id;
#endif
cc->gdb_arch_name = riscv_gdb_arch_name;
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
device_class_set_props(dc, riscv_cpu_properties);
}

View File

@ -24,6 +24,7 @@
#include "hw/registerfields.h"
#include "hw/qdev-properties.h"
#include "exec/cpu-defs.h"
#include "exec/gdbstub.h"
#include "qemu/cpu-float.h"
#include "qom/object.h"
#include "qemu/int128.h"
@ -445,8 +446,8 @@ struct ArchCPU {
CPURISCVState env;
char *dyn_csr_xml;
char *dyn_vreg_xml;
GDBFeature dyn_csr_feature;
GDBFeature dyn_vreg_feature;
/* Configuration Settings */
RISCVCPUConfig cfg;

View File

@ -108,8 +108,11 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return length;
}
static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n)
static int riscv_gdb_get_fpu(CPUState *cs, GByteArray *buf, int n)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (n < 32) {
if (env->misa_ext & RVD) {
return gdb_get_reg64(buf, env->fpr[n]);
@ -121,8 +124,11 @@ static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n)
return 0;
}
static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
static int riscv_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (n < 32) {
env->fpr[n] = ldq_p(mem_buf); /* always 64-bit */
return sizeof(uint64_t);
@ -130,9 +136,11 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
return 0;
}
static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
static int riscv_gdb_get_vector(CPUState *cs, GByteArray *buf, int n)
{
uint16_t vlenb = riscv_cpu_cfg(env)->vlenb;
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
uint16_t vlenb = cpu->cfg.vlenb;
if (n < 32) {
int i;
int cnt = 0;
@ -146,9 +154,11 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n)
return 0;
}
static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n)
static int riscv_gdb_set_vector(CPUState *cs, uint8_t *mem_buf, int n)
{
uint16_t vlenb = riscv_cpu_cfg(env)->vlenb;
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
uint16_t vlenb = cpu->cfg.vlenb;
if (n < 32) {
int i;
for (i = 0; i < vlenb; i += 8) {
@ -160,8 +170,11 @@ static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n)
return 0;
}
static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n)
static int riscv_gdb_get_csr(CPUState *cs, GByteArray *buf, int n)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (n < CSR_TABLE_SIZE) {
target_ulong val = 0;
int result;
@ -174,8 +187,11 @@ static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n)
return 0;
}
static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n)
static int riscv_gdb_set_csr(CPUState *cs, uint8_t *mem_buf, int n)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (n < CSR_TABLE_SIZE) {
target_ulong val = ldtul_p(mem_buf);
int result;
@ -188,25 +204,31 @@ static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n)
return 0;
}
static int riscv_gdb_get_virtual(CPURISCVState *cs, GByteArray *buf, int n)
static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
{
if (n == 0) {
#ifdef CONFIG_USER_ONLY
return gdb_get_regl(buf, 0);
#else
return gdb_get_regl(buf, cs->priv);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
return gdb_get_regl(buf, env->priv);
#endif
}
return 0;
}
static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n)
static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
{
if (n == 0) {
#ifndef CONFIG_USER_ONLY
cs->priv = ldtul_p(mem_buf) & 0x3;
if (cs->priv == PRV_RESERVED) {
cs->priv = PRV_S;
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
env->priv = ldtul_p(mem_buf) & 0x3;
if (env->priv == PRV_RESERVED) {
env->priv = PRV_S;
}
#endif
return sizeof(target_ulong);
@ -214,14 +236,15 @@ static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n)
return 0;
}
static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
static GDBFeature *riscv_gen_dynamic_csr_feature(CPUState *cs, int base_reg)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cs);
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
GString *s = g_string_new(NULL);
GDBFeatureBuilder builder;
riscv_csr_predicate_fn predicate;
int bitsize = riscv_cpu_max_xlen(mcc);
const char *name;
int i;
#if !defined(CONFIG_USER_ONLY)
@ -233,9 +256,9 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
bitsize = 64;
}
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">");
gdb_feature_builder_init(&builder, &cpu->dyn_csr_feature,
"org.gnu.gdb.riscv.csr", "riscv-csr.xml",
base_reg);
for (i = 0; i < CSR_TABLE_SIZE; i++) {
if (env->priv_ver < csr_ops[i].min_priv_ver) {
@ -243,72 +266,62 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
}
predicate = csr_ops[i].predicate;
if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) {
if (csr_ops[i].name) {
g_string_append_printf(s, "<reg name=\"%s\"", csr_ops[i].name);
} else {
g_string_append_printf(s, "<reg name=\"csr%03x\"", i);
name = csr_ops[i].name;
if (!name) {
name = g_strdup_printf("csr%03x", i);
}
g_string_append_printf(s, " bitsize=\"%d\"", bitsize);
g_string_append_printf(s, " regnum=\"%d\"/>", base_reg + i);
gdb_feature_builder_append_reg(&builder, name, bitsize, i,
"int", NULL);
}
}
g_string_append_printf(s, "</feature>");
cpu->dyn_csr_xml = g_string_free(s, false);
gdb_feature_builder_end(&builder);
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
#endif
return CSR_TABLE_SIZE;
return &cpu->dyn_csr_feature;
}
static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg)
static GDBFeature *ricsv_gen_dynamic_vector_feature(CPUState *cs, int base_reg)
{
RISCVCPU *cpu = RISCV_CPU(cs);
GString *s = g_string_new(NULL);
g_autoptr(GString) ts = g_string_new("");
int reg_width = cpu->cfg.vlenb << 3;
int num_regs = 0;
int reg_width = cpu->cfg.vlenb;
GDBFeatureBuilder builder;
int i;
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.vector\">");
gdb_feature_builder_init(&builder, &cpu->dyn_vreg_feature,
"org.gnu.gdb.riscv.vector", "riscv-vector.xml",
base_reg);
/* First define types and totals in a whole VL */
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
int count = reg_width / vec_lanes[i].size;
g_string_printf(ts, "%s", vec_lanes[i].id);
g_string_append_printf(s,
"<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
ts->str, vec_lanes[i].gdb_type, count);
gdb_feature_builder_append_tag(
&builder, "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
vec_lanes[i].id, vec_lanes[i].gdb_type, count);
}
/* Define unions */
g_string_append_printf(s, "<union id=\"riscv_vector\">");
gdb_feature_builder_append_tag(&builder, "<union id=\"riscv_vector\">");
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
g_string_append_printf(s, "<field name=\"%c\" type=\"%s\"/>",
vec_lanes[i].suffix,
vec_lanes[i].id);
gdb_feature_builder_append_tag(&builder,
"<field name=\"%c\" type=\"%s\"/>",
vec_lanes[i].suffix, vec_lanes[i].id);
}
g_string_append(s, "</union>");
gdb_feature_builder_append_tag(&builder, "</union>");
/* Define vector registers */
for (i = 0; i < 32; i++) {
g_string_append_printf(s,
"<reg name=\"v%d\" bitsize=\"%d\""
" regnum=\"%d\" group=\"vector\""
" type=\"riscv_vector\"/>",
i, reg_width, base_reg++);
num_regs++;
gdb_feature_builder_append_reg(&builder, g_strdup_printf("v%d", i),
reg_width, i, "riscv_vector", "vector");
}
g_string_append_printf(s, "</feature>");
gdb_feature_builder_end(&builder);
cpu->dyn_vreg_xml = g_string_free(s, false);
return num_regs;
return &cpu->dyn_vreg_feature;
}
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
@ -318,38 +331,40 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
CPURISCVState *env = &cpu->env;
if (env->misa_ext & RVD) {
gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
32, "riscv-64bit-fpu.xml", 0);
gdb_find_static_feature("riscv-64bit-fpu.xml"),
0);
} else if (env->misa_ext & RVF) {
gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
32, "riscv-32bit-fpu.xml", 0);
gdb_find_static_feature("riscv-32bit-fpu.xml"),
0);
}
if (env->misa_ext & RVV) {
int base_reg = cs->gdb_num_regs;
gdb_register_coprocessor(cs, riscv_gdb_get_vector,
riscv_gdb_set_vector,
ricsv_gen_dynamic_vector_xml(cs, base_reg),
"riscv-vector.xml", 0);
ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs),
0);
}
switch (mcc->misa_mxl_max) {
case MXL_RV32:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
1, "riscv-32bit-virtual.xml", 0);
gdb_find_static_feature("riscv-32bit-virtual.xml"),
0);
break;
case MXL_RV64:
case MXL_RV128:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
1, "riscv-64bit-virtual.xml", 0);
gdb_find_static_feature("riscv-64bit-virtual.xml"),
0);
break;
default:
g_assert_not_reached();
}
if (cpu->cfg.ext_zicsr) {
int base_reg = cs->gdb_num_regs;
gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
riscv_gen_dynamic_csr_xml(cs, base_reg),
"riscv-csr.xml", 0);
riscv_gen_dynamic_csr_feature(cs, cs->gdb_num_regs),
0);
}
}

View File

@ -221,7 +221,6 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
cc->gdb_write_register = rx_cpu_gdb_write_register;
cc->disas_set_info = rx_cpu_disas_set_info;
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "rx-core.xml";
cc->tcg_ops = &rx_tcg_ops;
}

View File

@ -368,7 +368,6 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
s390_cpu_class_init_sysemu(cc);
#endif
cc->disas_set_info = s390_cpu_disas_set_info;
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
cc->gdb_core_xml_file = "s390x-core64.xml";
cc->gdb_arch_name = s390_gdb_arch_name;

View File

@ -491,8 +491,6 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
#define S390_R13_REGNUM 15
#define S390_R14_REGNUM 16
#define S390_R15_REGNUM 17
/* Total Core Registers. */
#define S390_NUM_CORE_REGS 18
static inline void setcc(S390CPU *cpu, uint64_t cc)
{

View File

@ -67,11 +67,12 @@ int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
/* the values represent the positions in s390-acr.xml */
#define S390_A0_REGNUM 0
#define S390_A15_REGNUM 15
/* total number of registers in s390-acr.xml */
#define S390_NUM_AC_REGS 16
static int cpu_read_ac_reg(CPUS390XState *env, GByteArray *buf, int n)
static int cpu_read_ac_reg(CPUState *cs, GByteArray *buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_A0_REGNUM ... S390_A15_REGNUM:
return gdb_get_reg32(buf, env->aregs[n]);
@ -80,8 +81,11 @@ static int cpu_read_ac_reg(CPUS390XState *env, GByteArray *buf, int n)
}
}
static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_ac_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_A0_REGNUM ... S390_A15_REGNUM:
env->aregs[n] = ldl_p(mem_buf);
@ -96,11 +100,12 @@ static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_FPC_REGNUM 0
#define S390_F0_REGNUM 1
#define S390_F15_REGNUM 16
/* total number of registers in s390-fpr.xml */
#define S390_NUM_FP_REGS 17
static int cpu_read_fp_reg(CPUS390XState *env, GByteArray *buf, int n)
static int cpu_read_fp_reg(CPUState *cs, GByteArray *buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_FPC_REGNUM:
return gdb_get_reg32(buf, env->fpc);
@ -111,8 +116,11 @@ static int cpu_read_fp_reg(CPUS390XState *env, GByteArray *buf, int n)
}
}
static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_fp_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_FPC_REGNUM:
env->fpc = ldl_p(mem_buf);
@ -130,11 +138,11 @@ static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_V15L_REGNUM 15
#define S390_V16_REGNUM 16
#define S390_V31_REGNUM 31
/* total number of registers in s390-vx.xml */
#define S390_NUM_VREGS 32
static int cpu_read_vreg(CPUS390XState *env, GByteArray *buf, int n)
static int cpu_read_vreg(CPUState *cs, GByteArray *buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
int ret;
switch (n) {
@ -152,8 +160,11 @@ static int cpu_read_vreg(CPUS390XState *env, GByteArray *buf, int n)
return ret;
}
static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_vreg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_V0L_REGNUM ... S390_V15L_REGNUM:
env->vregs[n][1] = ldtul_p(mem_buf + 8);
@ -170,12 +181,13 @@ static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
/* the values represent the positions in s390-cr.xml */
#define S390_C0_REGNUM 0
#define S390_C15_REGNUM 15
/* total number of registers in s390-cr.xml */
#define S390_NUM_C_REGS 16
#ifndef CONFIG_USER_ONLY
static int cpu_read_c_reg(CPUS390XState *env, GByteArray *buf, int n)
static int cpu_read_c_reg(CPUState *cs, GByteArray *buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_C0_REGNUM ... S390_C15_REGNUM:
return gdb_get_regl(buf, env->cregs[n]);
@ -184,8 +196,11 @@ static int cpu_read_c_reg(CPUS390XState *env, GByteArray *buf, int n)
}
}
static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_c_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_C0_REGNUM ... S390_C15_REGNUM:
env->cregs[n] = ldtul_p(mem_buf);
@ -204,11 +219,12 @@ static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_VIRT_CPUTM_REGNUM 1
#define S390_VIRT_BEA_REGNUM 2
#define S390_VIRT_PREFIX_REGNUM 3
/* total number of registers in s390-virt.xml */
#define S390_NUM_VIRT_REGS 4
static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
static int cpu_read_virt_reg(CPUState *cs, GByteArray *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_VIRT_CKC_REGNUM:
return gdb_get_regl(mem_buf, env->ckc);
@ -223,24 +239,27 @@ static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
}
}
static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_virt_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_VIRT_CKC_REGNUM:
env->ckc = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_CPUTM_REGNUM:
env->cputm = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_BEA_REGNUM:
env->gbea = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
cpu_synchronize_post_init(cs);
return 8;
case S390_VIRT_PREFIX_REGNUM:
env->psa = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
cpu_synchronize_post_init(cs);
return 8;
default:
return 0;
@ -252,11 +271,12 @@ static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_VIRT_KVM_PFT_REGNUM 1
#define S390_VIRT_KVM_PFS_REGNUM 2
#define S390_VIRT_KVM_PFC_REGNUM 3
/* total number of registers in s390-virt-kvm.xml */
#define S390_NUM_VIRT_KVM_REGS 4
static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
static int cpu_read_virt_kvm_reg(CPUState *cs, GByteArray *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_VIRT_KVM_PP_REGNUM:
return gdb_get_regl(mem_buf, env->pp);
@ -271,8 +291,11 @@ static int cpu_read_virt_kvm_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
}
}
static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_virt_kvm_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
switch (n) {
case S390_VIRT_KVM_PP_REGNUM:
env->pp = ldtul_p(mem_buf);
@ -301,16 +324,20 @@ static int cpu_write_virt_kvm_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
#define S390_GS_GSD_REGNUM 1
#define S390_GS_GSSM_REGNUM 2
#define S390_GS_GSEPLA_REGNUM 3
/* total number of registers in s390-gs.xml */
#define S390_NUM_GS_REGS 4
static int cpu_read_gs_reg(CPUS390XState *env, GByteArray *buf, int n)
static int cpu_read_gs_reg(CPUState *cs, GByteArray *buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
return gdb_get_regl(buf, env->gscb[n]);
}
static int cpu_write_gs_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
static int cpu_write_gs_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
env->gscb[n] = ldtul_p(mem_buf);
cpu_synchronize_post_init(env_cpu(env));
return 8;
@ -320,33 +347,33 @@ void s390_cpu_gdb_init(CPUState *cs)
{
gdb_register_coprocessor(cs, cpu_read_ac_reg,
cpu_write_ac_reg,
S390_NUM_AC_REGS, "s390-acr.xml", 0);
gdb_find_static_feature("s390-acr.xml"), 0);
gdb_register_coprocessor(cs, cpu_read_fp_reg,
cpu_write_fp_reg,
S390_NUM_FP_REGS, "s390-fpr.xml", 0);
gdb_find_static_feature("s390-fpr.xml"), 0);
gdb_register_coprocessor(cs, cpu_read_vreg,
cpu_write_vreg,
S390_NUM_VREGS, "s390-vx.xml", 0);
gdb_find_static_feature("s390-vx.xml"), 0);
gdb_register_coprocessor(cs, cpu_read_gs_reg,
cpu_write_gs_reg,
S390_NUM_GS_REGS, "s390-gs.xml", 0);
gdb_find_static_feature("s390-gs.xml"), 0);
#ifndef CONFIG_USER_ONLY
gdb_register_coprocessor(cs, cpu_read_c_reg,
cpu_write_c_reg,
S390_NUM_C_REGS, "s390-cr.xml", 0);
gdb_find_static_feature("s390-cr.xml"), 0);
gdb_register_coprocessor(cs, cpu_read_virt_reg,
cpu_write_virt_reg,
S390_NUM_VIRT_REGS, "s390-virt.xml", 0);
gdb_find_static_feature("s390-virt.xml"), 0);
if (kvm_enabled()) {
gdb_register_coprocessor(cs, cpu_read_virt_kvm_reg,
cpu_write_virt_kvm_reg,
S390_NUM_VIRT_KVM_REGS, "s390-virt-kvm.xml",
gdb_find_static_feature("s390-virt-kvm.xml"),
0);
}
#endif

View File

@ -46,6 +46,25 @@ typedef struct {
char *disas;
} Instruction;
/*
* Initialise a new vcpu with reading the register list
*/
static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index)
{
g_autoptr(GArray) reg_list = qemu_plugin_get_registers();
g_autoptr(GByteArray) reg_value = g_byte_array_new();
if (reg_list) {
for (int i = 0; i < reg_list->len; i++) {
qemu_plugin_reg_descriptor *rd = &g_array_index(
reg_list, qemu_plugin_reg_descriptor, i);
int count = qemu_plugin_read_register(rd->handle, reg_value);
g_assert(count > 0);
}
}
}
static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
{
unsigned int i = cpu_index % MAX_CPUS;
@ -212,6 +231,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
sizes = g_array_new(true, true, sizeof(unsigned long));
}
/* Register init, translation block and exit callbacks */
qemu_plugin_register_vcpu_init_cb(id, vcpu_init);
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
return 0;

View File

@ -93,12 +93,9 @@ QEMU_OPTS=
# If TCG debugging, or TCI is enabled things are a lot slower
# ??? Makefile no longer has any indication that TCI is enabled,
# but for the record:
# 15s original default
# 60s with --enable-debug
# 90s with --enable-tcg-interpreter
TIMEOUT=90
# so we have to set our timeout for that. The current worst case
# offender is the system memory test running under TCI.
TIMEOUT=120
ifeq ($(filter %-softmmu, $(TARGET)),)
# The order we include is important. We include multiarch first and

View File

@ -1,10 +1,10 @@
/*
* Semihosting Tests - AArch64 helper
*
* Copyright (c) 2019
* Copyright (c) 2019, 2024
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
uintptr_t __semi_call(uintptr_t type, uintptr_t arg0)

View File

@ -1,10 +1,10 @@
/*
* Semihosting Tests - ARM Helper
*
* Copyright (c) 2019
* Copyright (c) 2019, 2024
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
uintptr_t __semi_call(uintptr_t type, uintptr_t arg0)

View File

@ -2,12 +2,12 @@
* i386 boot code, based on qemu-bmibug.
*
* Copyright 2019 Doug Gale
* Copyright 2019 Linaro
* Copyright 2019, 2024 Linaro
*
* This work is licensed under the terms of the GNU GPL, version 3 or later.
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
.section .head

View File

@ -1,10 +1,10 @@
/*
* linux-user semihosting console
*
* Copyright (c) 2019
* Copyright (c) 2024
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#define SYS_READC 0x07

View File

@ -1,10 +1,10 @@
/*
* linux-user semihosting checks
*
* Copyright (c) 2019
* Copyright (c) 2019, 2024
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#define SYS_WRITE0 0x04

View File

@ -1,9 +1,9 @@
/*
* Floating Point Convert Doubles to Various
*
* Copyright (c) 2019 Linaro
* Copyright (c) 2019, 2024 Linaro
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <stdio.h>

View File

@ -1,9 +1,9 @@
/*
* Floating Point Convert Single to Various
*
* Copyright (c) 2019 Linaro
* Copyright (c) 2019, 2024 Linaro
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <stdio.h>

View File

@ -1,9 +1,9 @@
/*
* Common Float Helpers
*
* Copyright (c) 2019 Linaro
* Copyright (c) 2019, 2024 Linaro
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <inttypes.h>

View File

@ -1,9 +1,9 @@
/*
* Fused Multiply Add (Single)
*
* Copyright (c) 2019 Linaro
* Copyright (c) 2019, 2024 Linaro
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <stdio.h>

View File

@ -5,9 +5,9 @@
* floating point constants useful for exercising the edge cases in
* floating point tests.
*
* Copyright (c) 2019 Linaro
* Copyright (c) 2019, 2024 Linaro
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/* we want additional float type definitions */

View File

@ -1,10 +1,10 @@
/*
* Semihosting Tests - RiscV64 Helper
*
* Copyright (c) 2021
* Copyright (c) 2021, 2024
* Written by Alex Bennée <alex.bennee@linaro.org>
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
uintptr_t __semi_call(uintptr_t type, uintptr_t arg0)

View File

@ -1,16 +1,16 @@
/*
* x86_64 boot and support code
*
* Copyright 2019 Linaro
* Copyright 2019, 2024 Linaro
*
* This work is licensed under the terms of the GNU GPL, version 3 or later.
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
* Unlike the i386 version we instead use Xen's PVHVM booting header
* which should drop us automatically into 32 bit mode ready to go. I've
* nabbed bits of the Linux kernel setup to achieve this.
*
* SPDX-License-Identifier: GPL-3.0-or-later
* SPDX-License-Identifier: GPL-2.0-or-later
*/
.section .head

View File

@ -102,7 +102,7 @@ $(IMAGES_DIR)/%.img: $(SRC_PATH)/tests/vm/% \
$(if $(LOG_CONSOLE),--log-console) \
--source-path $(SRC_PATH) \
--image "$@" \
--force \
$(if $(filter-out check-venv, $?), --force) \
--build-image $@, \
" VM-IMAGE $*")

View File

@ -646,9 +646,9 @@ def main(vmcls, config=None):
vm = vmcls(args, config=config)
if args.build_image:
if os.path.exists(args.image) and not args.force:
sys.stderr.writelines(["Image file exists: %s\n" % args.image,
sys.stderr.writelines(["Image file exists, skipping build: %s\n" % args.image,
"Use --force option to overwrite\n"])
return 1
return 0
return vm.build_image(args.image)
if args.build_qemu:
vm.add_source_dir(args.build_qemu)

View File

@ -22,8 +22,8 @@ class OpenBSDVM(basevm.BaseVM):
name = "openbsd"
arch = "x86_64"
link = "https://cdn.openbsd.org/pub/OpenBSD/7.2/amd64/install72.iso"
csum = "0369ef40a3329efcb978c578c7fdc7bda71e502aecec930a74b44160928c91d3"
link = "https://cdn.openbsd.org/pub/OpenBSD/7.4/amd64/install74.iso"
csum = "a1001736ed9fe2307965b5fcdb426ae11f9b80d26eb21e404a705144a0a224a0"
size = "20G"
pkgs = [
# tools
@ -99,10 +99,10 @@ class OpenBSDVM(basevm.BaseVM):
self.console_wait_send("(I)nstall", "i\n")
self.console_wait_send("Terminal type", "xterm\n")
self.console_wait_send("System hostname", "openbsd\n")
self.console_wait_send("Which network interface", "vio0\n")
self.console_wait_send("Network interface to configure", "vio0\n")
self.console_wait_send("IPv4 address", "autoconf\n")
self.console_wait_send("IPv6 address", "none\n")
self.console_wait_send("Which network interface", "done\n")
self.console_wait_send("Network interface to configure", "done\n")
self.console_wait("Password for root account")
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Password for root account")
@ -124,6 +124,7 @@ class OpenBSDVM(basevm.BaseVM):
self.console_wait_send("Allow root ssh login", "yes\n")
self.console_wait_send("timezone", "UTC\n")
self.console_wait_send("root disk", "\n")
self.console_wait_send("Encrypt the root disk with a passphrase", "no\n")
self.console_wait_send("(W)hole disk", "\n")
self.console_wait_send("(A)uto layout", "c\n")