2003-03-07 02:23:54 +03:00
|
|
|
/*
|
2012-02-06 10:02:55 +04:00
|
|
|
* emulator main execution loop
|
2007-09-17 01:08:06 +04:00
|
|
|
*
|
2005-04-07 00:47:48 +04:00
|
|
|
* Copyright (c) 2003-2005 Fabrice Bellard
|
2003-03-07 02:23:54 +03:00
|
|
|
*
|
2003-03-23 23:17:16 +03:00
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2019-01-23 17:08:56 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2003-03-07 02:23:54 +03:00
|
|
|
*
|
2003-03-23 23:17:16 +03:00
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
2003-03-07 02:23:54 +03:00
|
|
|
*
|
2003-03-23 23:17:16 +03:00
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-17 00:47:01 +04:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-03-07 02:23:54 +03:00
|
|
|
*/
|
2019-05-23 17:35:08 +03:00
|
|
|
|
2016-01-26 21:16:56 +03:00
|
|
|
#include "qemu/osdep.h"
|
2020-08-19 14:17:19 +03:00
|
|
|
#include "qemu/qemu-print.h"
|
2021-09-08 12:35:43 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qapi/type-helpers.h"
|
2021-02-04 19:39:23 +03:00
|
|
|
#include "hw/core/tcg-cpu-ops.h"
|
2017-06-02 09:06:44 +03:00
|
|
|
#include "trace.h"
|
2012-10-24 13:12:21 +04:00
|
|
|
#include "disas/disas.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2020-01-01 14:23:00 +03:00
|
|
|
#include "tcg/tcg.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/atomic.h"
|
2015-01-21 14:09:14 +03:00
|
|
|
#include "qemu/rcu.h"
|
2016-01-07 16:55:28 +03:00
|
|
|
#include "exec/log.h"
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2015-09-17 19:23:31 +03:00
|
|
|
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
|
|
|
|
#include "hw/i386/apic.h"
|
|
|
|
#endif
|
2017-03-03 14:01:16 +03:00
|
|
|
#include "sysemu/cpus.h"
|
2020-08-19 14:17:19 +03:00
|
|
|
#include "exec/cpu-all.h"
|
|
|
|
#include "sysemu/cpu-timers.h"
|
2022-12-19 20:09:43 +03:00
|
|
|
#include "exec/replay-core.h"
|
2021-09-08 12:35:43 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2023-06-11 11:58:22 +03:00
|
|
|
#include "exec/helper-proto-common.h"
|
2022-08-15 23:13:05 +03:00
|
|
|
#include "tb-jmp-cache.h"
|
2021-05-24 20:04:53 +03:00
|
|
|
#include "tb-hash.h"
|
|
|
|
#include "tb-context.h"
|
2023-09-14 21:57:15 +03:00
|
|
|
#include "internal-common.h"
|
2023-09-14 21:57:14 +03:00
|
|
|
#include "internal-target.h"
|
2014-07-25 13:56:31 +04:00
|
|
|
|
|
|
|
/* -icount align implementation. */
|
|
|
|
|
|
|
|
typedef struct SyncClocks {
|
|
|
|
int64_t diff_clk;
|
|
|
|
int64_t last_cpu_icount;
|
2014-07-25 13:56:32 +04:00
|
|
|
int64_t realtime_clock;
|
2014-07-25 13:56:31 +04:00
|
|
|
} SyncClocks;
|
|
|
|
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
/* Allow the guest to have a max 3ms advance.
|
|
|
|
* The difference between the 2 clocks could therefore
|
|
|
|
* oscillate around 0.
|
|
|
|
*/
|
|
|
|
#define VM_CLOCK_ADVANCE 3000000
|
2014-07-25 13:56:32 +04:00
|
|
|
#define THRESHOLD_REDUCE 1.5
|
|
|
|
#define MAX_DELAY_PRINT_RATE 2000000000LL
|
|
|
|
#define MAX_NB_PRINTS 100
|
2014-07-25 13:56:31 +04:00
|
|
|
|
2022-12-19 20:09:40 +03:00
|
|
|
int64_t max_delay;
|
|
|
|
int64_t max_advance;
|
2020-08-19 14:17:19 +03:00
|
|
|
|
2019-03-29 00:54:23 +03:00
|
|
|
static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
2014-07-25 13:56:31 +04:00
|
|
|
{
|
|
|
|
int64_t cpu_icount;
|
|
|
|
|
|
|
|
if (!icount_align_option) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:46:45 +03:00
|
|
|
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
2020-08-31 17:18:34 +03:00
|
|
|
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
2014-07-25 13:56:31 +04:00
|
|
|
sc->last_cpu_icount = cpu_icount;
|
|
|
|
|
|
|
|
if (sc->diff_clk > VM_CLOCK_ADVANCE) {
|
|
|
|
#ifndef _WIN32
|
|
|
|
struct timespec sleep_delay, rem_delay;
|
|
|
|
sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
|
|
|
|
sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
|
|
|
|
if (nanosleep(&sleep_delay, &rem_delay) < 0) {
|
2015-01-28 12:09:55 +03:00
|
|
|
sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
|
2014-07-25 13:56:31 +04:00
|
|
|
} else {
|
|
|
|
sc->diff_clk = 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
Sleep(sc->diff_clk / SCALE_MS);
|
|
|
|
sc->diff_clk = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-25 13:56:32 +04:00
|
|
|
static void print_delay(const SyncClocks *sc)
|
|
|
|
{
|
|
|
|
static float threshold_delay;
|
|
|
|
static int64_t last_realtime_clock;
|
|
|
|
static int nb_prints;
|
|
|
|
|
|
|
|
if (icount_align_option &&
|
|
|
|
sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
|
|
|
|
nb_prints < MAX_NB_PRINTS) {
|
|
|
|
if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
|
|
|
|
(-sc->diff_clk / (float)1000000000LL <
|
|
|
|
(threshold_delay - THRESHOLD_REDUCE))) {
|
|
|
|
threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
|
2020-08-19 14:17:19 +03:00
|
|
|
qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
|
|
|
|
threshold_delay - 1,
|
|
|
|
threshold_delay);
|
2014-07-25 13:56:32 +04:00
|
|
|
nb_prints++;
|
|
|
|
last_realtime_clock = sc->realtime_clock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-29 00:54:23 +03:00
|
|
|
static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
2014-07-25 13:56:31 +04:00
|
|
|
{
|
|
|
|
if (!icount_align_option) {
|
|
|
|
return;
|
|
|
|
}
|
2015-01-28 12:16:37 +03:00
|
|
|
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
|
|
|
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
2019-03-29 00:54:23 +03:00
|
|
|
sc->last_cpu_icount
|
2023-09-14 01:46:45 +03:00
|
|
|
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
2014-07-25 13:56:33 +04:00
|
|
|
if (sc->diff_clk < max_delay) {
|
|
|
|
max_delay = sc->diff_clk;
|
|
|
|
}
|
|
|
|
if (sc->diff_clk > max_advance) {
|
|
|
|
max_advance = sc->diff_clk;
|
|
|
|
}
|
2014-07-25 13:56:32 +04:00
|
|
|
|
|
|
|
/* Print every 2s max if the guest is late. We limit the number
|
|
|
|
of printed messages to NB_PRINT_MAX(currently 100) */
|
|
|
|
print_delay(sc);
|
2014-07-25 13:56:31 +04:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG USER ONLY */
|
2003-03-07 02:23:54 +03:00
|
|
|
|
2021-07-18 01:18:40 +03:00
|
|
|
uint32_t curr_cflags(CPUState *cpu)
|
|
|
|
{
|
2021-07-18 01:18:41 +03:00
|
|
|
uint32_t cflags = cpu->tcg_cflags;
|
|
|
|
|
2021-07-18 01:18:43 +03:00
|
|
|
/*
|
2021-07-19 23:43:46 +03:00
|
|
|
* Record gdb single-step. We should be exiting the TB by raising
|
|
|
|
* EXCP_DEBUG, but to simplify other tests, disable chaining too.
|
|
|
|
*
|
2021-07-18 01:18:43 +03:00
|
|
|
* For singlestep and -d nochain, suppress goto_tb so that
|
|
|
|
* we can log -d cpu,exec after every TB.
|
|
|
|
*/
|
2021-07-19 23:43:46 +03:00
|
|
|
if (unlikely(cpu->singlestep_enabled)) {
|
|
|
|
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
|
2023-04-17 19:40:34 +03:00
|
|
|
} else if (qatomic_read(&one_insn_per_tb)) {
|
2021-07-18 01:18:43 +03:00
|
|
|
cflags |= CF_NO_GOTO_TB | 1;
|
|
|
|
} else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
2021-07-18 01:18:42 +03:00
|
|
|
cflags |= CF_NO_GOTO_TB;
|
2021-07-18 01:18:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return cflags;
|
2021-07-18 01:18:40 +03:00
|
|
|
}
|
|
|
|
|
2022-08-16 21:53:18 +03:00
|
|
|
struct tb_desc {
|
2023-06-21 16:56:27 +03:00
|
|
|
vaddr pc;
|
|
|
|
uint64_t cs_base;
|
2022-08-16 21:53:18 +03:00
|
|
|
CPUArchState *env;
|
2022-08-15 23:00:57 +03:00
|
|
|
tb_page_addr_t page_addr0;
|
2022-08-16 21:53:18 +03:00
|
|
|
uint32_t flags;
|
|
|
|
uint32_t cflags;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool tb_lookup_cmp(const void *p, const void *d)
|
|
|
|
{
|
|
|
|
const TranslationBlock *tb = p;
|
|
|
|
const struct tb_desc *desc = d;
|
|
|
|
|
2023-02-27 16:51:47 +03:00
|
|
|
if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
|
2022-09-20 14:21:40 +03:00
|
|
|
tb_page_addr0(tb) == desc->page_addr0 &&
|
2022-08-16 21:53:18 +03:00
|
|
|
tb->cs_base == desc->cs_base &&
|
|
|
|
tb->flags == desc->flags &&
|
|
|
|
tb_cflags(tb) == desc->cflags) {
|
|
|
|
/* check next page if needed */
|
2022-09-20 14:21:40 +03:00
|
|
|
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
|
|
|
|
if (tb_phys_page1 == -1) {
|
2022-08-16 21:53:18 +03:00
|
|
|
return true;
|
|
|
|
} else {
|
2022-08-15 23:00:57 +03:00
|
|
|
tb_page_addr_t phys_page1;
|
2023-06-21 16:56:27 +03:00
|
|
|
vaddr virt_page1;
|
2022-08-16 21:53:18 +03:00
|
|
|
|
2022-08-23 04:50:46 +03:00
|
|
|
/*
|
|
|
|
* We know that the first page matched, and an otherwise valid TB
|
|
|
|
* encountered an incomplete instruction at the end of that page,
|
|
|
|
* therefore we know that generating a new TB from the current PC
|
|
|
|
* must also require reading from the next page -- even if the
|
|
|
|
* second pages do not match, and therefore the resulting insn
|
|
|
|
* is different for the new TB. Therefore any exception raised
|
|
|
|
* here by the faulting lookup is not premature.
|
|
|
|
*/
|
2022-08-15 23:00:57 +03:00
|
|
|
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
|
|
|
|
phys_page1 = get_page_addr_code(desc->env, virt_page1);
|
2022-09-20 14:21:40 +03:00
|
|
|
if (tb_phys_page1 == phys_page1) {
|
2022-08-16 21:53:18 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-21 16:56:27 +03:00
|
|
|
static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
|
|
|
uint64_t cs_base, uint32_t flags,
|
2022-08-16 21:53:18 +03:00
|
|
|
uint32_t cflags)
|
|
|
|
{
|
|
|
|
tb_page_addr_t phys_pc;
|
|
|
|
struct tb_desc desc;
|
|
|
|
uint32_t h;
|
|
|
|
|
2023-09-14 03:22:49 +03:00
|
|
|
desc.env = cpu_env(cpu);
|
2022-08-16 21:53:18 +03:00
|
|
|
desc.cs_base = cs_base;
|
|
|
|
desc.flags = flags;
|
|
|
|
desc.cflags = cflags;
|
|
|
|
desc.pc = pc;
|
|
|
|
phys_pc = get_page_addr_code(desc.env, pc);
|
|
|
|
if (phys_pc == -1) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-08-15 23:00:57 +03:00
|
|
|
desc.page_addr0 = phys_pc;
|
2023-02-27 16:51:39 +03:00
|
|
|
h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
|
accel/tcg: include cs_base in our hash calculations
We weren't using cs_base in the hash calculations before. Since the
arm front end moved a chunk of flags in a378206a20 (target/arm: Move
mode specific TB flags to tb->cs_base) they comprise of an important
part of the execution state.
Widen the tb_hash_func to include cs_base and expand to qemu_xxhash8()
to accommodate it.
My initial benchmark shows very little difference in the
runtime.
Before:
armhf
➜ hyperfine -w 2 -m 20 "./arm-softmmu/qemu-system-arm -cpu cortex-a15 -machine type=virt,highmem=off -display none -m 2048 -serial mon:stdio -netdev user,id=unet,hostfwd=tcp::2222-:22 -device virtio-net-pci,netdev=unet -device virtio-scsi-pci -blockdev driver=raw,node-name=hd,discard=unmap,file.driver=host_device,file.filename=/dev/zen-disk/debian-bullseye-armhf -device scsi-hd,drive=hd -smp 4 -kernel /home/alex/lsrc/linux.git/builds/arm/arch/arm/boot/zImage -append 'console=ttyAMA0 root=/dev/sda2 systemd.unit=benchmark.service' -snapshot"
Benchmark 1: ./arm-softmmu/qemu-system-arm -cpu cortex-a15 -machine type=virt,highmem=off -display none -m 2048 -serial mon:stdio -netdev user,id=unet,hostfwd=tcp::2222-:22 -device virtio-net-pci,netdev=unet -device virtio-scsi-pci -blockdev driver=raw,node-name=hd,discard=unmap,file.driver=host_device,file.filename=/dev/zen-disk/debian-bullseye-armhf -device scsi-hd,drive=hd -smp 4 -kernel /home/alex/lsrc/linux.git/builds/arm/arch/arm/boot/zImage -append 'console=ttyAMA0 root=/dev/sda2 systemd.unit=benchmark.service' -snapshot
Time (mean ± σ): 24.627 s ± 2.708 s [User: 34.309 s, System: 1.797 s]
Range (min … max): 22.345 s … 29.864 s 20 runs
arm64
➜ hyperfine -w 2 -n 20 "./qemu-system-aarch64 -cpu max,pauth-impdef=on -machine type=virt,virtualization=on,gic-version=3 -display none -serial mon:stdio -netdev user,id=unet,hostfwd=tcp::2222-:22,hostfwd=tcp::1234-:1234 -device virtio-net-pci,netdev=unet -device virtio-scsi-pci -blockdev driver=raw,node-name=hd,discard=unmap,file.driver=host_device,file.filename=/dev/zen-disk/debian-bullseye-arm64 -device scsi-hd,drive=hd -smp 4 -kernel ~/lsrc/linux.git/builds/arm64/arch/arm64/boot/Image.gz -append 'console=ttyAMA0 root=/dev/sda2 systemd.unit=benchmark-pigz.service' -snapshot"
Benchmark 1: 20
Time (mean ± σ): 62.559 s ± 2.917 s [User: 189.115 s, System: 4.089 s]
Range (min … max): 59.997 s … 70.153 s 10 runs
After:
armhf
Benchmark 1: ./arm-softmmu/qemu-system-arm -cpu cortex-a15 -machine type=virt,highmem=off -display none -m 2048 -serial mon:stdio -netdev user,id=unet,hostfwd=tcp::2222-:22 -device virtio-net-pci,netdev=unet -device virtio-scsi-pci -blockdev driver=raw,node-name=hd,discard=unmap,file.driver=host_device,file.filename=/dev/zen-disk/debian-bullseye-armhf -device scsi-hd,drive=hd -smp 4 -kernel /home/alex/lsrc/linux.git/builds/arm/arch/arm/boot/zImage -append 'console=ttyAMA0 root=/dev/sda2 systemd.unit=benchmark.service' -snapshot
Time (mean ± σ): 24.223 s ± 2.151 s [User: 34.284 s, System: 1.906 s]
Range (min … max): 22.000 s … 28.476 s 20 runs
arm64
hyperfine -w 2 -n 20 "./qemu-system-aarch64 -cpu max,pauth-impdef=on -machine type=virt,virtualization=on,gic-version=3 -display none -serial mon:stdio -netdev user,id=unet,hostfwd=tcp::2222-:22,hostfwd=tcp::1234-:1234 -device virtio-net-pci,netdev=unet -device virtio-scsi-pci -blockdev driver=raw,node-name=hd,discard=unmap,file.driver=host_device,file.filename=/dev/zen-disk/debian-bullseye-arm64 -device scsi-hd,drive=hd -smp 4 -kernel ~/lsrc/linux.git/builds/arm64/arch/arm64/boot/Image.gz -append 'console=ttyAMA0 root=/dev/sda2 systemd.unit=benchmark-pigz.service' -snapshot"
Benchmark 1: 20
Time (mean ± σ): 62.769 s ± 1.978 s [User: 188.431 s, System: 5.269 s]
Range (min … max): 60.285 s … 66.868 s 10 runs
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230526165401.574474-12-alex.bennee@linaro.org
Message-Id: <20230524133952.3971948-11-alex.bennee@linaro.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2023-05-26 19:54:01 +03:00
|
|
|
flags, cs_base, cflags);
|
2022-08-16 21:53:18 +03:00
|
|
|
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
|
|
|
|
}
|
|
|
|
|
2021-06-29 22:31:19 +03:00
|
|
|
/* Might cause an exception, so have a longjmp destination ready */
|
2023-06-21 16:56:27 +03:00
|
|
|
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
|
|
|
|
uint64_t cs_base, uint32_t flags,
|
|
|
|
uint32_t cflags)
|
2021-06-29 22:31:19 +03:00
|
|
|
{
|
|
|
|
TranslationBlock *tb;
|
2022-08-12 19:53:53 +03:00
|
|
|
CPUJumpCache *jc;
|
2021-06-29 22:31:19 +03:00
|
|
|
uint32_t hash;
|
|
|
|
|
|
|
|
/* we should never be trying to look up an INVALID tb */
|
|
|
|
tcg_debug_assert(!(cflags & CF_INVALID));
|
|
|
|
|
|
|
|
hash = tb_jmp_cache_hash_func(pc);
|
2022-08-12 19:53:53 +03:00
|
|
|
jc = cpu->tb_jmp_cache;
|
2021-06-29 22:31:19 +03:00
|
|
|
|
2024-01-22 18:34:09 +03:00
|
|
|
tb = qatomic_read(&jc->array[hash].tb);
|
|
|
|
if (likely(tb &&
|
|
|
|
jc->array[hash].pc == pc &&
|
|
|
|
tb->cs_base == cs_base &&
|
|
|
|
tb->flags == flags &&
|
|
|
|
tb_cflags(tb) == cflags)) {
|
|
|
|
goto hit;
|
2021-06-29 22:31:19 +03:00
|
|
|
}
|
2023-02-27 16:51:46 +03:00
|
|
|
|
2024-01-22 18:34:09 +03:00
|
|
|
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
|
|
|
if (tb == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
jc->array[hash].pc = pc;
|
|
|
|
qatomic_set(&jc->array[hash].tb, tb);
|
|
|
|
|
|
|
|
hit:
|
|
|
|
/*
|
|
|
|
* As long as tb is not NULL, the contents are consistent. Therefore,
|
|
|
|
* the virtual PC has to match for non-CF_PCREL translations.
|
|
|
|
*/
|
|
|
|
assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
|
2021-06-29 22:31:19 +03:00
|
|
|
return tb;
|
|
|
|
}
|
|
|
|
|
2023-06-21 16:56:27 +03:00
|
|
|
static void log_cpu_exec(vaddr pc, CPUState *cpu,
|
2022-08-15 23:16:06 +03:00
|
|
|
const TranslationBlock *tb)
|
2021-06-29 23:17:18 +03:00
|
|
|
{
|
2022-08-15 23:16:06 +03:00
|
|
|
if (qemu_log_in_addr_range(pc)) {
|
2021-06-29 23:17:18 +03:00
|
|
|
qemu_log_mask(CPU_LOG_EXEC,
|
2023-04-02 01:28:18 +03:00
|
|
|
"Trace %d: %p [%08" PRIx64
|
2023-07-17 13:05:08 +03:00
|
|
|
"/%016" VADDR_PRIx "/%08x/%08x] %s\n",
|
2021-06-30 18:31:46 +03:00
|
|
|
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
|
|
|
|
tb->flags, tb->cflags, lookup_symbol(pc));
|
2021-06-29 23:17:18 +03:00
|
|
|
|
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
|
2022-04-17 21:29:47 +03:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 21:29:49 +03:00
|
|
|
if (logfile) {
|
|
|
|
int flags = 0;
|
2021-06-29 23:17:18 +03:00
|
|
|
|
2022-04-17 21:29:49 +03:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
|
|
|
|
flags |= CPU_DUMP_FPU;
|
|
|
|
}
|
2021-06-29 23:17:18 +03:00
|
|
|
#if defined(TARGET_I386)
|
2022-04-17 21:29:49 +03:00
|
|
|
flags |= CPU_DUMP_CCOP;
|
2021-06-29 23:17:18 +03:00
|
|
|
#endif
|
2023-04-10 15:44:50 +03:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
|
|
|
|
flags |= CPU_DUMP_VPU;
|
|
|
|
}
|
2022-04-17 21:29:54 +03:00
|
|
|
cpu_dump_state(cpu, logfile, flags);
|
2022-04-17 21:29:49 +03:00
|
|
|
qemu_log_unlock(logfile);
|
|
|
|
}
|
2021-06-29 23:17:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-21 16:56:27 +03:00
|
|
|
static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
|
2022-10-25 23:24:22 +03:00
|
|
|
uint32_t *cflags)
|
2021-07-19 22:03:21 +03:00
|
|
|
{
|
|
|
|
CPUBreakpoint *bp;
|
|
|
|
bool match_page = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Singlestep overrides breakpoints.
|
|
|
|
* This requirement is visible in the record-replay tests, where
|
|
|
|
* we would fail to make forward progress in reverse-continue.
|
|
|
|
*
|
|
|
|
* TODO: gdb singlestep should only override gdb breakpoints,
|
|
|
|
* so that one could (gdb) singlestep into the guest kernel's
|
|
|
|
* architectural breakpoint handler.
|
|
|
|
*/
|
|
|
|
if (cpu->singlestep_enabled) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
|
|
|
|
/*
|
|
|
|
* If we have an exact pc match, trigger the breakpoint.
|
|
|
|
* Otherwise, note matches within the page.
|
|
|
|
*/
|
|
|
|
if (pc == bp->pc) {
|
|
|
|
bool match_bp = false;
|
|
|
|
|
|
|
|
if (bp->flags & BP_GDB) {
|
|
|
|
match_bp = true;
|
|
|
|
} else if (bp->flags & BP_CPU) {
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
g_assert_not_reached();
|
|
|
|
#else
|
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
assert(cc->tcg_ops->debug_check_breakpoint);
|
|
|
|
match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
if (match_bp) {
|
|
|
|
cpu->exception_index = EXCP_DEBUG;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
|
|
|
|
match_page = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Within the same page as a breakpoint, single-step,
|
|
|
|
* returning to helper_lookup_tb_ptr after each insn looking
|
|
|
|
* for the actual breakpoint.
|
|
|
|
*
|
|
|
|
* TODO: Perhaps better to record all of the TBs associated
|
|
|
|
* with a given virtual page that contains a breakpoint, and
|
|
|
|
* then invalidate them when a new overlapping breakpoint is
|
|
|
|
* set on the page. Non-overlapping TBs would not be
|
|
|
|
* invalidated, nor would any TB need to be invalidated as
|
|
|
|
* breakpoints are removed.
|
|
|
|
*/
|
|
|
|
if (match_page) {
|
|
|
|
*cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-06-21 16:56:27 +03:00
|
|
|
static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
|
2022-10-25 23:24:22 +03:00
|
|
|
uint32_t *cflags)
|
|
|
|
{
|
|
|
|
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
|
|
|
|
check_for_breakpoints_slow(cpu, pc, cflags);
|
|
|
|
}
|
|
|
|
|
2021-06-29 22:28:29 +03:00
|
|
|
/**
|
|
|
|
* helper_lookup_tb_ptr: quick check for next tb
|
|
|
|
* @env: current cpu state
|
|
|
|
*
|
|
|
|
* Look for an existing TB matching the current cpu state.
|
|
|
|
* If found, return the code pointer. If not found, return
|
|
|
|
* the tcg epilogue so that we return into cpu_tb_exec.
|
|
|
|
*/
|
|
|
|
const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
|
|
|
{
|
|
|
|
CPUState *cpu = env_cpu(env);
|
|
|
|
TranslationBlock *tb;
|
2023-06-21 16:56:24 +03:00
|
|
|
vaddr pc;
|
|
|
|
uint64_t cs_base;
|
2021-07-19 22:03:21 +03:00
|
|
|
uint32_t flags, cflags;
|
2021-06-29 22:28:29 +03:00
|
|
|
|
|
|
|
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
|
|
|
|
2021-07-19 22:03:21 +03:00
|
|
|
cflags = curr_cflags(cpu);
|
|
|
|
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
|
|
|
cpu_loop_exit(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
2021-06-29 22:28:29 +03:00
|
|
|
if (tb == NULL) {
|
|
|
|
return tcg_code_gen_epilogue;
|
|
|
|
}
|
2021-06-29 23:17:18 +03:00
|
|
|
|
2022-08-15 23:16:06 +03:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
|
|
|
|
log_cpu_exec(pc, cpu, tb);
|
|
|
|
}
|
2021-06-29 23:17:18 +03:00
|
|
|
|
2021-06-29 22:28:29 +03:00
|
|
|
return tb->tc.ptr;
|
|
|
|
}
|
|
|
|
|
2013-02-22 22:10:02 +04:00
|
|
|
/* Execute a TB, and fix up the CPU state afterwards if necessary */
|
cfi: Initial support for cfi-icall in QEMU
LLVM/Clang, supports runtime checks for forward-edge Control-Flow
Integrity (CFI).
CFI on indirect function calls (cfi-icall) ensures that, in indirect
function calls, the function called is of the right signature for the
pointer type defined at compile time.
For this check to work, the code must always respect the function
signature when using function pointer, the function must be defined
at compile time, and be compiled with link-time optimization.
This rules out, for example, shared libraries that are dynamically loaded
(given that functions are not known at compile time), and code that is
dynamically generated at run-time.
This patch:
1) Introduces the CONFIG_CFI flag to support cfi in QEMU
2) Introduces a decorator to allow the definition of "sensitive"
functions, where a non-instrumented function may be called at runtime
through a pointer. The decorator will take care of disabling cfi-icall
checks on such functions, when cfi is enabled.
3) Marks functions currently in QEMU that exhibit such behavior,
in particular:
- The function in TCG that calls pre-compiled TBs
- The function in TCI that interprets instructions
- Functions in the plugin infrastructures that jump to callbacks
- Functions in util that directly call a signal handler
Signed-off-by: Daniele Buono <dbuono@linux.vnet.ibm.com>
Acked-by: Alex Bennée <alex.bennee@linaro.org
Message-Id: <20201204230615.2392-3-dbuono@linux.vnet.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-12-05 02:06:12 +03:00
|
|
|
/*
|
|
|
|
* Disable CFI checks.
|
|
|
|
* TCG creates binary blobs at runtime, with the transformed code.
|
|
|
|
* A TB is a blob of binary code, created at runtime and called with an
|
|
|
|
* indirect function call. Since such function did not exist at compile time,
|
|
|
|
* the CFI runtime has no way to verify its signature and would fail.
|
|
|
|
* TCG is not considered a security-sensitive part of QEMU so this does not
|
|
|
|
* affect the impact of CFI in environment with high security requirements
|
|
|
|
*/
|
2020-10-29 23:18:12 +03:00
|
|
|
static inline TranslationBlock * QEMU_DISABLE_CFI
|
|
|
|
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
2013-02-22 22:10:02 +04:00
|
|
|
{
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUArchState *env = cpu_env(cpu);
|
2016-04-21 15:58:23 +03:00
|
|
|
uintptr_t ret;
|
|
|
|
TranslationBlock *last_tb;
|
2020-10-28 22:05:44 +03:00
|
|
|
const void *tb_ptr = itb->tc.ptr;
|
2016-03-15 17:30:19 +03:00
|
|
|
|
2022-08-15 23:16:06 +03:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
|
|
|
|
log_cpu_exec(log_pc(cpu, itb), cpu, itb);
|
|
|
|
}
|
2013-11-06 11:29:39 +04:00
|
|
|
|
2021-01-13 06:28:07 +03:00
|
|
|
qemu_thread_jit_execute();
|
2016-04-21 15:58:23 +03:00
|
|
|
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
2023-09-16 01:41:39 +03:00
|
|
|
cpu->neg.can_do_io = true;
|
2023-03-15 20:43:10 +03:00
|
|
|
qemu_plugin_disable_mem_helpers(cpu);
|
2020-10-29 23:18:12 +03:00
|
|
|
/*
|
|
|
|
* TODO: Delay swapping back to the read-write region of the TB
|
|
|
|
* until we actually need to modify the TB. The read-only copy,
|
|
|
|
* coming from the rx region, shares the same host TLB entry as
|
|
|
|
* the code that executed the exit_tb opcode that arrived here.
|
|
|
|
* If we insist on touching both the RX and the RW pages, we
|
|
|
|
* double the host TLB pressure.
|
|
|
|
*/
|
|
|
|
last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
|
|
|
|
*tb_exit = ret & TB_EXIT_MASK;
|
|
|
|
|
|
|
|
trace_exec_tb_exit(last_tb, *tb_exit);
|
2014-08-01 20:08:57 +04:00
|
|
|
|
2020-10-29 23:18:12 +03:00
|
|
|
if (*tb_exit > TB_EXIT_IDX1) {
|
2013-02-22 22:10:02 +04:00
|
|
|
/* We didn't start executing this TB (eg because the instruction
|
|
|
|
* counter hit zero); we must restore the guest PC to the address
|
|
|
|
* of the start of the TB.
|
|
|
|
*/
|
2013-06-28 21:31:32 +04:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
2022-08-15 23:16:06 +03:00
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
if (cc->tcg_ops->synchronize_from_tb) {
|
|
|
|
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
|
2013-06-28 21:31:32 +04:00
|
|
|
} else {
|
2023-02-27 16:51:39 +03:00
|
|
|
tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
|
2013-06-28 21:31:32 +04:00
|
|
|
assert(cc->set_pc);
|
2023-02-27 16:51:47 +03:00
|
|
|
cc->set_pc(cpu, last_tb->pc);
|
2022-08-15 23:16:06 +03:00
|
|
|
}
|
|
|
|
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
2023-06-21 16:56:27 +03:00
|
|
|
vaddr pc = log_pc(cpu, last_tb);
|
2022-08-15 23:16:06 +03:00
|
|
|
if (qemu_log_in_addr_range(pc)) {
|
2023-07-17 13:05:08 +03:00
|
|
|
qemu_log("Stopped execution of TB chain before %p [%016"
|
2023-06-21 16:56:27 +03:00
|
|
|
VADDR_PRIx "] %s\n",
|
2022-08-15 23:16:06 +03:00
|
|
|
last_tb->tc.ptr, pc, lookup_symbol(pc));
|
|
|
|
}
|
2013-06-28 21:31:32 +04:00
|
|
|
}
|
2013-02-22 22:10:02 +04:00
|
|
|
}
|
2021-07-19 04:12:12 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If gdb single-step, and we haven't raised another exception,
|
|
|
|
* raise a debug exception. Single-step with another exception
|
|
|
|
* is handled in cpu_handle_exception.
|
|
|
|
*/
|
|
|
|
if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
|
|
|
|
cpu->exception_index = EXCP_DEBUG;
|
|
|
|
cpu_loop_exit(cpu);
|
|
|
|
}
|
|
|
|
|
2020-10-29 23:18:12 +03:00
|
|
|
return last_tb;
|
2013-02-22 22:10:02 +04:00
|
|
|
}
|
|
|
|
|
2008-06-29 05:03:05 +04:00
|
|
|
|
2020-12-12 18:55:16 +03:00
|
|
|
static void cpu_exec_enter(CPUState *cpu)
|
|
|
|
{
|
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
if (cc->tcg_ops->cpu_exec_enter) {
|
|
|
|
cc->tcg_ops->cpu_exec_enter(cpu);
|
2020-12-12 18:55:17 +03:00
|
|
|
}
|
2020-12-12 18:55:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_exec_exit(CPUState *cpu)
|
2016-06-30 08:12:55 +03:00
|
|
|
{
|
2017-02-23 21:29:15 +03:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
2020-12-12 18:55:16 +03:00
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
if (cc->tcg_ops->cpu_exec_exit) {
|
|
|
|
cc->tcg_ops->cpu_exec_exit(cpu);
|
2020-12-12 18:55:17 +03:00
|
|
|
}
|
2020-12-12 18:55:16 +03:00
|
|
|
}
|
|
|
|
|
2023-07-06 10:45:13 +03:00
|
|
|
static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
|
|
|
{
|
|
|
|
/* Non-buggy compilers preserve this; assert the correct value. */
|
|
|
|
g_assert(cpu == current_cpu);
|
|
|
|
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
clear_helper_retaddr();
|
|
|
|
if (have_mmap_lock()) {
|
|
|
|
mmap_unlock();
|
|
|
|
}
|
2023-07-06 19:55:48 +03:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* For softmmu, a tlb_fill fault during translation will land here,
|
|
|
|
* and we need to release any page locks held. In system mode we
|
|
|
|
* have one tcg_ctx per thread, so we know it was this cpu doing
|
|
|
|
* the translation.
|
|
|
|
*
|
|
|
|
* Alternative 1: Install a cleanup to be called via an exception
|
|
|
|
* handling safe longjmp. It seems plausible that all our hosts
|
|
|
|
* support such a thing. We'd have to properly register unwind info
|
|
|
|
* for the JIT for EH, rather that just for GDB.
|
|
|
|
*
|
|
|
|
* Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
|
|
|
|
* capture the cpu_loop_exit longjmp, perform the cleanup, and
|
|
|
|
* jump again to arrive here.
|
|
|
|
*/
|
|
|
|
if (tcg_ctx->gen_tb) {
|
|
|
|
tb_unlock_pages(tcg_ctx->gen_tb);
|
|
|
|
tcg_ctx->gen_tb = NULL;
|
|
|
|
}
|
2023-07-06 10:45:13 +03:00
|
|
|
#endif
|
2024-01-02 18:35:25 +03:00
|
|
|
if (bql_locked()) {
|
|
|
|
bql_unlock();
|
2023-07-06 10:45:13 +03:00
|
|
|
}
|
|
|
|
assert_no_pages_locked();
|
|
|
|
}
|
|
|
|
|
2020-12-12 18:55:16 +03:00
|
|
|
void cpu_exec_step_atomic(CPUState *cpu)
|
|
|
|
{
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUArchState *env = cpu_env(cpu);
|
2016-06-30 08:12:55 +03:00
|
|
|
TranslationBlock *tb;
|
2023-06-21 16:56:24 +03:00
|
|
|
vaddr pc;
|
|
|
|
uint64_t cs_base;
|
2021-07-18 01:18:44 +03:00
|
|
|
uint32_t flags, cflags;
|
2020-10-29 23:18:12 +03:00
|
|
|
int tb_exit;
|
2016-06-30 08:12:55 +03:00
|
|
|
|
2017-02-23 21:29:15 +03:00
|
|
|
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
|
2020-02-14 17:49:52 +03:00
|
|
|
start_exclusive();
|
2020-09-22 10:42:41 +03:00
|
|
|
g_assert(cpu == current_cpu);
|
|
|
|
g_assert(!cpu->running);
|
|
|
|
cpu->running = true;
|
2020-02-14 17:49:52 +03:00
|
|
|
|
2021-02-24 19:58:07 +03:00
|
|
|
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
|
|
|
|
|
2021-07-18 01:18:44 +03:00
|
|
|
cflags = curr_cflags(cpu);
|
|
|
|
/* Execute in a serial context. */
|
|
|
|
cflags &= ~CF_PARALLEL;
|
|
|
|
/* After 1 insn, return and release the exclusive lock. */
|
|
|
|
cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
|
2021-07-19 22:03:21 +03:00
|
|
|
/*
|
|
|
|
* No need to check_for_breakpoints here.
|
|
|
|
* We only arrive in cpu_exec_step_atomic after beginning execution
|
|
|
|
* of an insn that includes an atomic operation we can't handle.
|
|
|
|
* Any breakpoint for this insn will have been recognized earlier.
|
|
|
|
*/
|
2021-07-18 01:18:44 +03:00
|
|
|
|
|
|
|
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
2017-07-11 21:29:37 +03:00
|
|
|
if (tb == NULL) {
|
|
|
|
mmap_lock();
|
2017-08-01 22:40:16 +03:00
|
|
|
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
2017-07-11 21:29:37 +03:00
|
|
|
mmap_unlock();
|
|
|
|
}
|
2017-02-23 21:29:15 +03:00
|
|
|
|
2020-12-12 18:55:16 +03:00
|
|
|
cpu_exec_enter(cpu);
|
2017-02-23 21:29:15 +03:00
|
|
|
/* execute the generated code */
|
2017-07-11 21:29:37 +03:00
|
|
|
trace_exec_tb(tb, pc);
|
2020-10-29 23:18:12 +03:00
|
|
|
cpu_tb_exec(cpu, tb, &tb_exit);
|
2020-12-12 18:55:16 +03:00
|
|
|
cpu_exec_exit(cpu);
|
2017-02-23 21:29:15 +03:00
|
|
|
} else {
|
2023-07-06 10:45:13 +03:00
|
|
|
cpu_exec_longjmp_cleanup(cpu);
|
2017-02-23 21:29:15 +03:00
|
|
|
}
|
2017-11-02 19:35:36 +03:00
|
|
|
|
2020-02-14 17:49:52 +03:00
|
|
|
/*
|
|
|
|
* As we start the exclusive region before codegen we must still
|
|
|
|
* be in the region if we longjump out of either the codegen or
|
|
|
|
* the execution.
|
|
|
|
*/
|
|
|
|
g_assert(cpu_in_exclusive_context(cpu));
|
2020-09-22 10:42:41 +03:00
|
|
|
cpu->running = false;
|
2020-02-14 17:49:52 +03:00
|
|
|
end_exclusive();
|
2016-06-30 08:12:55 +03:00
|
|
|
}
|
|
|
|
|
2017-08-01 08:02:31 +03:00
|
|
|
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
|
|
|
|
{
|
2022-12-06 01:55:40 +03:00
|
|
|
/*
|
|
|
|
* Get the rx view of the structure, from which we find the
|
|
|
|
* executable code address, and tb_target_set_jmp_target can
|
|
|
|
* produce a pc-relative displacement to jmp_target_addr[n].
|
|
|
|
*/
|
|
|
|
const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
|
|
|
|
uintptr_t offset = tb->jmp_insn_offset[n];
|
|
|
|
uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
|
|
|
|
uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
|
|
|
|
|
2022-11-27 05:54:23 +03:00
|
|
|
tb->jmp_target_addr[n] = addr;
|
2022-12-06 01:55:40 +03:00
|
|
|
tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
|
2017-08-01 08:02:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tb_add_jump(TranslationBlock *tb, int n,
|
|
|
|
TranslationBlock *tb_next)
|
|
|
|
{
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
uintptr_t old;
|
|
|
|
|
2021-01-13 06:28:07 +03:00
|
|
|
qemu_thread_jit_write();
|
2017-08-01 08:02:31 +03:00
|
|
|
assert(n < ARRAY_SIZE(tb->jmp_list_next));
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
qemu_spin_lock(&tb_next->jmp_lock);
|
|
|
|
|
|
|
|
/* make sure the destination TB is valid */
|
|
|
|
if (tb_next->cflags & CF_INVALID) {
|
|
|
|
goto out_unlock_next;
|
|
|
|
}
|
|
|
|
/* Atomically claim the jump destination slot only if it was NULL */
|
2020-09-23 13:56:46 +03:00
|
|
|
old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
|
|
|
|
(uintptr_t)tb_next);
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
if (old) {
|
|
|
|
goto out_unlock_next;
|
2017-08-01 08:02:31 +03:00
|
|
|
}
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
|
|
|
|
/* patch the native jump address */
|
|
|
|
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
|
|
|
|
|
|
|
|
/* add in TB jmp list */
|
|
|
|
tb->jmp_list_next[n] = tb_next->jmp_list_head;
|
|
|
|
tb_next->jmp_list_head = (uintptr_t)tb | n;
|
|
|
|
|
|
|
|
qemu_spin_unlock(&tb_next->jmp_lock);
|
|
|
|
|
2022-08-15 23:16:06 +03:00
|
|
|
qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
|
|
|
|
tb->tc.ptr, n, tb_next->tc.ptr);
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
return;
|
2017-08-01 08:02:31 +03:00
|
|
|
|
translate-all: protect TB jumps with a per-destination-TB lock
This applies to both user-mode and !user-mode emulation.
Instead of relying on a global lock, protect the list of incoming
jumps with tb->jmp_lock. This lock also protects tb->cflags,
so update all tb->cflags readers outside tb->jmp_lock to use
atomic reads via tb_cflags().
In order to find the destination TB (and therefore its jmp_lock)
from the origin TB, we introduce tb->jmp_dest[].
I considered not using a linked list of jumps, which simplifies
code and makes the struct smaller. However, it unnecessarily increases
memory usage, which results in a performance decrease. See for
instance these numbers booting+shutting down debian-arm:
Time (s) Rel. err (%) Abs. err (s) Rel. slowdown (%)
------------------------------------------------------------------------------
before 20.88 0.74 0.154512 0.
after 20.81 0.38 0.079078 -0.33524904
GTree 21.02 0.28 0.058856 0.67049808
GHashTable + xxhash 21.63 1.08 0.233604 3.5919540
Using a hash table or a binary tree to keep track of the jumps
doesn't really pay off, not only due to the increased memory usage,
but also because most TBs have only 0 or 1 jumps to them. The maximum
number of jumps when booting debian-arm that I measured is 35, but
as we can see in the histogram below a TB with that many incoming jumps
is extremely rare; the average TB has 0.80 incoming jumps.
n_jumps: 379208; avg jumps/tb: 0.801099
dist: [0.0,1.0)|▄█▁▁▁▁▁▁▁▁▁▁▁ ▁▁▁▁▁▁ ▁▁▁ ▁▁▁ ▁|[34.0,35.0]
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-08-03 03:34:06 +03:00
|
|
|
out_unlock_next:
|
|
|
|
qemu_spin_unlock(&tb_next->jmp_lock);
|
|
|
|
return;
|
2017-08-01 08:02:31 +03:00
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:47 +03:00
|
|
|
static inline bool cpu_handle_halt(CPUState *cpu)
|
|
|
|
{
|
2021-09-12 20:27:02 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2016-05-11 13:21:47 +03:00
|
|
|
if (cpu->halted) {
|
2021-09-12 20:27:02 +03:00
|
|
|
#if defined(TARGET_I386)
|
2020-10-03 20:12:51 +03:00
|
|
|
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
2016-05-11 13:21:47 +03:00
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2016-05-11 13:21:47 +03:00
|
|
|
apic_poll_irq(x86_cpu->apic_state);
|
|
|
|
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2016-05-11 13:21:47 +03:00
|
|
|
}
|
2021-09-12 20:27:02 +03:00
|
|
|
#endif /* TARGET_I386 */
|
2016-05-11 13:21:47 +03:00
|
|
|
if (!cpu_has_work(cpu)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu->halted = 0;
|
|
|
|
}
|
2021-09-12 20:27:02 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2016-05-11 13:21:47 +03:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:48 +03:00
|
|
|
static inline void cpu_handle_debug_exception(CPUState *cpu)
|
2011-03-15 14:26:13 +03:00
|
|
|
{
|
2014-09-12 17:06:48 +04:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
2011-03-15 14:26:13 +03:00
|
|
|
CPUWatchpoint *wp;
|
|
|
|
|
2013-08-26 20:23:18 +04:00
|
|
|
if (!cpu->watchpoint_hit) {
|
|
|
|
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
2011-03-15 14:26:13 +03:00
|
|
|
wp->flags &= ~BP_WATCHPOINT_HIT;
|
|
|
|
}
|
|
|
|
}
|
2014-09-12 17:06:48 +04:00
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
if (cc->tcg_ops->debug_excp_handler) {
|
|
|
|
cc->tcg_ops->debug_excp_handler(cpu);
|
2020-12-12 18:55:18 +03:00
|
|
|
}
|
2011-03-15 14:26:13 +03:00
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:48 +03:00
|
|
|
static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
|
|
|
{
|
2017-11-14 11:18:18 +03:00
|
|
|
if (cpu->exception_index < 0) {
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (replay_has_exception()
|
2023-09-14 01:46:45 +03:00
|
|
|
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
2021-02-13 16:03:19 +03:00
|
|
|
/* Execute just one insn to trigger exception pending in the log */
|
2022-01-31 14:25:40 +03:00
|
|
|
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
2023-11-10 19:21:23 +03:00
|
|
|
| CF_NOIRQ | 1;
|
2017-11-14 11:18:18 +03:00
|
|
|
}
|
|
|
|
#endif
|
2021-02-13 16:03:19 +03:00
|
|
|
return false;
|
2017-11-14 11:18:18 +03:00
|
|
|
}
|
|
|
|
if (cpu->exception_index >= EXCP_INTERRUPT) {
|
|
|
|
/* exit request from the cpu execution loop */
|
|
|
|
*ret = cpu->exception_index;
|
|
|
|
if (*ret == EXCP_DEBUG) {
|
|
|
|
cpu_handle_debug_exception(cpu);
|
|
|
|
}
|
|
|
|
cpu->exception_index = -1;
|
|
|
|
return true;
|
|
|
|
} else {
|
2016-05-11 13:21:48 +03:00
|
|
|
#if defined(CONFIG_USER_ONLY)
|
2017-11-14 11:18:18 +03:00
|
|
|
/* if user mode only, we simulate a fake exception
|
|
|
|
which will be handled outside the cpu execution
|
|
|
|
loop */
|
2016-05-11 13:21:48 +03:00
|
|
|
#if defined(TARGET_I386)
|
2017-11-14 11:18:18 +03:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
2021-09-11 19:54:15 +03:00
|
|
|
cc->tcg_ops->fake_user_interrupt(cpu);
|
|
|
|
#endif /* TARGET_I386 */
|
2017-11-14 11:18:18 +03:00
|
|
|
*ret = cpu->exception_index;
|
|
|
|
cpu->exception_index = -1;
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
if (replay_exception()) {
|
2016-05-11 13:21:48 +03:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
2021-02-04 19:39:23 +03:00
|
|
|
cc->tcg_ops->do_interrupt(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2016-05-11 13:21:48 +03:00
|
|
|
cpu->exception_index = -1;
|
2020-07-16 22:39:47 +03:00
|
|
|
|
|
|
|
if (unlikely(cpu->singlestep_enabled)) {
|
|
|
|
/*
|
|
|
|
* After processing the exception, ensure an EXCP_DEBUG is
|
|
|
|
* raised when single-stepping so that GDB doesn't miss the
|
|
|
|
* next instruction.
|
|
|
|
*/
|
|
|
|
*ret = EXCP_DEBUG;
|
|
|
|
cpu_handle_debug_exception(cpu);
|
|
|
|
return true;
|
|
|
|
}
|
2017-11-14 11:18:18 +03:00
|
|
|
} else if (!replay_has_interrupt()) {
|
|
|
|
/* give a chance to iothread in replay mode */
|
|
|
|
*ret = EXCP_INTERRUPT;
|
2016-05-11 13:21:48 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-11 19:54:33 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2020-10-03 20:12:51 +03:00
|
|
|
/*
|
|
|
|
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
|
|
|
|
* "real" interrupt event later. It does not need to be recorded for
|
|
|
|
* replay purposes.
|
|
|
|
*/
|
|
|
|
static inline bool need_replay_interrupt(int interrupt_request)
|
|
|
|
{
|
|
|
|
#if defined(TARGET_I386)
|
|
|
|
return !(interrupt_request & CPU_INTERRUPT_POLL);
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
2021-09-11 19:54:33 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2020-10-03 20:12:51 +03:00
|
|
|
|
2024-01-24 13:16:34 +03:00
|
|
|
static inline bool icount_exit_request(CPUState *cpu)
|
|
|
|
{
|
|
|
|
if (!icount_enabled()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
|
|
|
|
}
|
|
|
|
|
2017-01-27 12:57:18 +03:00
|
|
|
static inline bool cpu_handle_interrupt(CPUState *cpu,
|
2016-05-11 13:21:49 +03:00
|
|
|
TranslationBlock **last_tb)
|
|
|
|
{
|
2021-11-29 17:09:26 +03:00
|
|
|
/*
|
|
|
|
* If we have requested custom cflags with CF_NOIRQ we should
|
|
|
|
* skip checking here. Any pending interrupts will get picked up
|
|
|
|
* by the next TB we execute under normal cflags.
|
|
|
|
*/
|
|
|
|
if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-11-14 11:18:18 +03:00
|
|
|
/* Clear the interrupt flag now since we're processing
|
|
|
|
* cpu->interrupt_request and cpu->exit_request.
|
2017-11-29 22:13:19 +03:00
|
|
|
* Ensure zeroing happens before reading cpu->exit_request or
|
|
|
|
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
2017-11-14 11:18:18 +03:00
|
|
|
*/
|
2023-09-14 01:46:45 +03:00
|
|
|
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
2016-05-11 13:21:49 +03:00
|
|
|
|
2020-09-23 13:56:46 +03:00
|
|
|
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
|
|
|
int interrupt_request;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_lock();
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
|
|
|
interrupt_request = cpu->interrupt_request;
|
2016-05-11 13:21:49 +03:00
|
|
|
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
|
|
|
|
/* Mask out external interrupts for this step. */
|
|
|
|
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
|
|
|
|
}
|
|
|
|
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
|
|
|
cpu->exception_index = EXCP_DEBUG;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2017-01-27 12:57:18 +03:00
|
|
|
return true;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
2021-09-11 19:54:33 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2016-05-11 13:21:49 +03:00
|
|
|
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
|
|
|
|
/* Do nothing */
|
|
|
|
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
|
|
|
|
replay_interrupt();
|
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
|
|
|
cpu->halted = 1;
|
|
|
|
cpu->exception_index = EXCP_HLT;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2017-01-27 12:57:18 +03:00
|
|
|
return true;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
|
|
|
#if defined(TARGET_I386)
|
|
|
|
else if (interrupt_request & CPU_INTERRUPT_INIT) {
|
|
|
|
X86CPU *x86_cpu = X86_CPU(cpu);
|
|
|
|
CPUArchState *env = &x86_cpu->env;
|
|
|
|
replay_interrupt();
|
2017-02-16 14:30:05 +03:00
|
|
|
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
|
2016-05-11 13:21:49 +03:00
|
|
|
do_cpu_init(x86_cpu);
|
|
|
|
cpu->exception_index = EXCP_HALTED;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2017-01-27 12:57:18 +03:00
|
|
|
return true;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
else if (interrupt_request & CPU_INTERRUPT_RESET) {
|
|
|
|
replay_interrupt();
|
|
|
|
cpu_reset(cpu);
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2017-01-27 12:57:18 +03:00
|
|
|
return true;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
2021-09-11 19:54:33 +03:00
|
|
|
#endif /* !TARGET_I386 */
|
2016-05-11 13:21:49 +03:00
|
|
|
/* The target hook has 3 exit conditions:
|
|
|
|
False when the interrupt isn't processed,
|
|
|
|
True when it is, and we should restart on a new TB,
|
|
|
|
and via longjmp via cpu_loop_exit. */
|
|
|
|
else {
|
2021-09-11 19:54:33 +03:00
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
|
2021-02-04 19:39:23 +03:00
|
|
|
if (cc->tcg_ops->cpu_exec_interrupt &&
|
|
|
|
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
2020-10-03 20:12:51 +03:00
|
|
|
if (need_replay_interrupt(interrupt_request)) {
|
|
|
|
replay_interrupt();
|
|
|
|
}
|
2020-07-17 19:26:59 +03:00
|
|
|
/*
|
|
|
|
* After processing the interrupt, ensure an EXCP_DEBUG is
|
|
|
|
* raised when single-stepping so that GDB doesn't miss the
|
|
|
|
* next instruction.
|
|
|
|
*/
|
2022-02-25 03:52:42 +03:00
|
|
|
if (unlikely(cpu->singlestep_enabled)) {
|
|
|
|
cpu->exception_index = EXCP_DEBUG;
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2022-02-25 03:52:42 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
cpu->exception_index = -1;
|
2016-05-11 13:21:49 +03:00
|
|
|
*last_tb = NULL;
|
|
|
|
}
|
2016-05-12 19:52:17 +03:00
|
|
|
/* The target hook may have updated the 'cpu->interrupt_request';
|
|
|
|
* reload the 'interrupt_request' value */
|
|
|
|
interrupt_request = cpu->interrupt_request;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
2021-09-11 19:54:33 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2016-05-12 19:52:17 +03:00
|
|
|
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
|
2016-05-11 13:21:49 +03:00
|
|
|
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
|
|
|
|
/* ensure that no TB jump will be modified as
|
|
|
|
the program flow was changed */
|
|
|
|
*last_tb = NULL;
|
|
|
|
}
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
|
|
|
|
|
|
|
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
|
2024-01-02 18:35:25 +03:00
|
|
|
bql_unlock();
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
tcg: drop global lock during TCG code execution
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here.
These numbers demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com>
[FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex]
Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
[EGC: fixed iothread lock for cpu-exec IRQ handling]
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: -smp single-threaded fix, clean commit msg, BQL fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
[PM: target-arm changes]
Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
|
|
|
|
2017-02-07 09:54:57 +03:00
|
|
|
/* Finally, check if we need to exit to the main loop. */
|
2024-01-24 13:16:34 +03:00
|
|
|
if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_set(&cpu->exit_request, 0);
|
2018-02-27 12:51:41 +03:00
|
|
|
if (cpu->exception_index == -1) {
|
|
|
|
cpu->exception_index = EXCP_INTERRUPT;
|
|
|
|
}
|
2017-01-27 12:57:18 +03:00
|
|
|
return true;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
2017-01-27 12:57:18 +03:00
|
|
|
|
|
|
|
return false;
|
2016-05-11 13:21:49 +03:00
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:50 +03:00
|
|
|
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
2023-06-21 16:56:27 +03:00
|
|
|
vaddr pc, TranslationBlock **last_tb,
|
|
|
|
int *tb_exit)
|
2016-05-11 13:21:50 +03:00
|
|
|
{
|
2017-01-27 13:25:33 +03:00
|
|
|
int32_t insns_left;
|
2016-05-11 13:21:50 +03:00
|
|
|
|
2022-08-15 23:16:06 +03:00
|
|
|
trace_exec_tb(tb, pc);
|
2020-10-29 23:18:12 +03:00
|
|
|
tb = cpu_tb_exec(cpu, tb, tb_exit);
|
2017-01-27 13:25:33 +03:00
|
|
|
if (*tb_exit != TB_EXIT_REQUESTED) {
|
|
|
|
*last_tb = tb;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
*last_tb = NULL;
|
2023-09-14 01:46:45 +03:00
|
|
|
insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
|
2017-01-27 13:25:33 +03:00
|
|
|
if (insns_left < 0) {
|
2017-02-23 21:29:12 +03:00
|
|
|
/* Something asked us to stop executing chained TBs; just
|
|
|
|
* continue round the main loop. Whatever requested the exit
|
2017-03-03 18:39:18 +03:00
|
|
|
* will also have set something else (eg exit_request or
|
2017-11-14 11:18:18 +03:00
|
|
|
* interrupt_request) which will be handled by
|
|
|
|
* cpu_handle_interrupt. cpu_handle_interrupt will also
|
|
|
|
* clear cpu->icount_decr.u16.high.
|
2016-05-11 13:21:50 +03:00
|
|
|
*/
|
2017-01-27 13:25:33 +03:00
|
|
|
return;
|
2016-05-11 13:21:50 +03:00
|
|
|
}
|
2017-01-27 13:25:33 +03:00
|
|
|
|
|
|
|
/* Instruction counter expired. */
|
2020-08-19 14:17:19 +03:00
|
|
|
assert(icount_enabled());
|
2017-01-27 13:25:33 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2017-04-05 14:35:48 +03:00
|
|
|
/* Ensure global icount has gone forward */
|
2020-08-31 17:18:34 +03:00
|
|
|
icount_update(cpu);
|
2017-04-05 14:35:48 +03:00
|
|
|
/* Refill decrementer and continue execution. */
|
2021-07-25 20:44:04 +03:00
|
|
|
insns_left = MIN(0xffff, cpu->icount_budget);
|
2023-09-14 01:46:45 +03:00
|
|
|
cpu->neg.icount_decr.u16.low = insns_left;
|
2017-04-05 14:35:48 +03:00
|
|
|
cpu->icount_extra = cpu->icount_budget - insns_left;
|
2021-02-13 16:03:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the next tb has more instructions than we have left to
|
|
|
|
* execute we need to ensure we find/generate a TB with exactly
|
|
|
|
* insns_left instructions in it.
|
|
|
|
*/
|
2021-07-25 20:44:05 +03:00
|
|
|
if (insns_left > 0 && insns_left < tb->icount) {
|
|
|
|
assert(insns_left <= CF_COUNT_MASK);
|
|
|
|
assert(cpu->icount_extra == 0);
|
2021-02-13 16:03:18 +03:00
|
|
|
cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
|
2016-05-11 13:21:50 +03:00
|
|
|
}
|
2017-01-27 13:25:33 +03:00
|
|
|
#endif
|
2016-05-11 13:21:50 +03:00
|
|
|
}
|
|
|
|
|
2003-03-07 02:23:54 +03:00
|
|
|
/* main execution loop */
|
|
|
|
|
2023-01-07 21:12:51 +03:00
|
|
|
static int __attribute__((noinline))
|
|
|
|
cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
2003-03-07 02:23:54 +03:00
|
|
|
{
|
2016-05-11 13:21:49 +03:00
|
|
|
int ret;
|
2017-01-29 12:55:14 +03:00
|
|
|
|
|
|
|
/* if an exception is pending, we execute it here */
|
|
|
|
while (!cpu_handle_exception(cpu, &ret)) {
|
|
|
|
TranslationBlock *last_tb = NULL;
|
|
|
|
int tb_exit = 0;
|
|
|
|
|
|
|
|
while (!cpu_handle_interrupt(cpu, &last_tb)) {
|
2017-10-13 20:50:02 +03:00
|
|
|
TranslationBlock *tb;
|
2023-06-21 16:56:24 +03:00
|
|
|
vaddr pc;
|
|
|
|
uint64_t cs_base;
|
2021-07-20 01:40:57 +03:00
|
|
|
uint32_t flags, cflags;
|
|
|
|
|
2023-09-14 03:22:49 +03:00
|
|
|
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
|
2021-07-19 22:03:21 +03:00
|
|
|
|
2021-07-20 01:40:57 +03:00
|
|
|
/*
|
|
|
|
* When requested, use an exact setting for cflags for the next
|
|
|
|
* execution. This is used for icount, precise smc, and stop-
|
|
|
|
* after-access watchpoints. Since this request should never
|
|
|
|
* have CF_INVALID set, -1 is a convenient invalid value that
|
|
|
|
* does not require tcg headers for cpu_common_reset.
|
|
|
|
*/
|
|
|
|
cflags = cpu->cflags_next_tb;
|
2017-10-13 20:50:02 +03:00
|
|
|
if (cflags == -1) {
|
2021-02-24 19:58:08 +03:00
|
|
|
cflags = curr_cflags(cpu);
|
2017-10-13 20:50:02 +03:00
|
|
|
} else {
|
|
|
|
cpu->cflags_next_tb = -1;
|
|
|
|
}
|
|
|
|
|
2021-07-19 22:03:21 +03:00
|
|
|
if (check_for_breakpoints(cpu, pc, &cflags)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-07-20 01:40:57 +03:00
|
|
|
|
|
|
|
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
|
|
|
|
if (tb == NULL) {
|
2023-04-01 04:52:33 +03:00
|
|
|
CPUJumpCache *jc;
|
2022-08-15 23:13:05 +03:00
|
|
|
uint32_t h;
|
|
|
|
|
2021-07-20 01:40:57 +03:00
|
|
|
mmap_lock();
|
|
|
|
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
|
|
|
mmap_unlock();
|
2023-04-01 04:52:33 +03:00
|
|
|
|
2021-07-20 01:40:57 +03:00
|
|
|
/*
|
|
|
|
* We add the TB in the virtual pc hash table
|
|
|
|
* for the fast lookup
|
|
|
|
*/
|
2022-08-15 23:13:05 +03:00
|
|
|
h = tb_jmp_cache_hash_func(pc);
|
2023-04-01 04:52:33 +03:00
|
|
|
jc = cpu->tb_jmp_cache;
|
2024-01-22 18:34:09 +03:00
|
|
|
jc->array[h].pc = pc;
|
|
|
|
qatomic_set(&jc->array[h].tb, tb);
|
2021-07-20 01:40:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
/*
|
|
|
|
* We don't take care of direct jumps when address mapping
|
|
|
|
* changes in system emulation. So it's not safe to make a
|
|
|
|
* direct jump to a TB spanning two pages because the mapping
|
|
|
|
* for the second page can change.
|
|
|
|
*/
|
2022-09-20 14:21:40 +03:00
|
|
|
if (tb_page_addr1(tb) != -1) {
|
2021-07-20 01:40:57 +03:00
|
|
|
last_tb = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/* See if we can patch the calling TB. */
|
|
|
|
if (last_tb) {
|
|
|
|
tb_add_jump(last_tb, tb_exit, tb);
|
|
|
|
}
|
|
|
|
|
2022-08-15 23:16:06 +03:00
|
|
|
cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
|
2021-07-20 01:40:57 +03:00
|
|
|
|
2017-01-29 12:55:14 +03:00
|
|
|
/* Try to align the host and virtual clocks
|
|
|
|
if the guest is in advance */
|
2023-01-07 21:12:51 +03:00
|
|
|
align_clocks(sc, cpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
|
|
|
|
{
|
|
|
|
/* Prepare setjmp context for exception handling. */
|
|
|
|
if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
|
2023-07-06 10:45:13 +03:00
|
|
|
cpu_exec_longjmp_cleanup(cpu);
|
2023-01-07 21:12:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return cpu_exec_loop(cpu, sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
int cpu_exec(CPUState *cpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
SyncClocks sc = { 0 };
|
|
|
|
|
|
|
|
/* replay_interrupt may need current_cpu */
|
|
|
|
current_cpu = cpu;
|
|
|
|
|
|
|
|
if (cpu_handle_halt(cpu)) {
|
|
|
|
return EXCP_HALTED;
|
2017-01-29 12:55:14 +03:00
|
|
|
}
|
2003-06-24 17:22:59 +04:00
|
|
|
|
2024-01-24 10:41:56 +03:00
|
|
|
RCU_READ_LOCK_GUARD();
|
2023-01-07 21:12:51 +03:00
|
|
|
cpu_exec_enter(cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate difference between guest clock and host clock.
|
|
|
|
* This delay includes the delay of the last cycle, so
|
|
|
|
* what we have to do is sleep until it is 0. As for the
|
|
|
|
* advance/delay we gain here, we try to fix it next time.
|
|
|
|
*/
|
|
|
|
init_delay_params(&sc, cpu);
|
|
|
|
|
|
|
|
ret = cpu_exec_setjmp(cpu, &sc);
|
|
|
|
|
2020-12-12 18:55:16 +03:00
|
|
|
cpu_exec_exit(cpu);
|
2003-03-07 02:23:54 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2020-08-19 14:17:19 +03:00
|
|
|
|
2023-10-03 15:30:24 +03:00
|
|
|
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
2021-02-04 19:39:11 +03:00
|
|
|
{
|
|
|
|
static bool tcg_target_initialized;
|
|
|
|
CPUClass *cc = CPU_GET_CLASS(cpu);
|
|
|
|
|
|
|
|
if (!tcg_target_initialized) {
|
2021-02-04 19:39:23 +03:00
|
|
|
cc->tcg_ops->initialize();
|
2021-02-04 19:39:11 +03:00
|
|
|
tcg_target_initialized = true;
|
|
|
|
}
|
|
|
|
|
2022-10-31 05:26:36 +03:00
|
|
|
cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
|
|
|
|
tlb_init(cpu);
|
2021-02-04 19:39:11 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
tcg_iommu_init_notifier_list(cpu);
|
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
2022-10-31 05:26:36 +03:00
|
|
|
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
2023-10-03 15:30:24 +03:00
|
|
|
|
|
|
|
return true;
|
2021-02-04 19:39:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* undo the initializations in reverse order */
|
|
|
|
void tcg_exec_unrealizefn(CPUState *cpu)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
tcg_iommu_free_notifier_list(cpu);
|
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
|
|
|
|
|
|
|
tlb_destroy(cpu);
|
2023-01-24 21:01:18 +03:00
|
|
|
g_free_rcu(cpu->tb_jmp_cache, rcu);
|
2021-02-04 19:39:11 +03:00
|
|
|
}
|