2017-06-02 09:06:43 +03:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator, accelerator interfaces
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
* Copyright (c) 2014 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2019-05-23 17:35:05 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2022-12-19 20:09:43 +03:00
|
|
|
#include "exec/replay-core.h"
|
2020-08-19 14:17:19 +03:00
|
|
|
#include "sysemu/cpu-timers.h"
|
2023-09-30 05:54:54 +03:00
|
|
|
#include "tcg/startup.h"
|
2023-03-28 04:32:36 +03:00
|
|
|
#include "tcg/oversized-guest.h"
|
2020-01-21 14:03:49 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/error-report.h"
|
2021-02-04 19:39:24 +03:00
|
|
|
#include "qemu/accel.h"
|
2023-04-17 19:40:34 +03:00
|
|
|
#include "qemu/atomic.h"
|
2019-11-13 17:16:44 +03:00
|
|
|
#include "qapi/qapi-builtin-visit.h"
|
2021-06-03 23:32:11 +03:00
|
|
|
#include "qemu/units.h"
|
2021-03-10 07:52:45 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
#include "hw/boards.h"
|
|
|
|
#endif
|
2023-09-14 21:57:14 +03:00
|
|
|
#include "internal-target.h"
|
2021-02-04 19:39:24 +03:00
|
|
|
|
2020-09-03 23:43:22 +03:00
|
|
|
struct TCGState {
|
2019-11-13 12:36:01 +03:00
|
|
|
AccelState parent_obj;
|
|
|
|
|
|
|
|
bool mttcg_enabled;
|
2023-04-17 19:40:32 +03:00
|
|
|
bool one_insn_per_tb;
|
2020-10-29 06:50:29 +03:00
|
|
|
int splitwx_enabled;
|
2019-11-13 17:16:44 +03:00
|
|
|
unsigned long tb_size;
|
2020-09-03 23:43:22 +03:00
|
|
|
};
|
|
|
|
typedef struct TCGState TCGState;
|
2019-11-13 12:36:01 +03:00
|
|
|
|
|
|
|
#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg")
|
|
|
|
|
2020-09-01 00:07:33 +03:00
|
|
|
DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
|
|
|
|
TYPE_TCG_ACCEL)
|
2017-06-02 09:06:43 +03:00
|
|
|
|
2019-11-14 12:40:27 +03:00
|
|
|
/*
|
|
|
|
* We default to false if we know other options have been enabled
|
|
|
|
* which are currently incompatible with MTTCG. Otherwise when each
|
|
|
|
* guest (target) has been updated to support:
|
|
|
|
* - atomic instructions
|
|
|
|
* - memory ordering primitives (barriers)
|
|
|
|
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
|
|
|
|
*
|
|
|
|
* Once a guest architecture has been converted to the new primitives
|
2022-03-03 18:57:27 +03:00
|
|
|
* there is one remaining limitation to check:
|
|
|
|
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
|
2019-11-14 12:40:27 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
static bool default_mttcg_enabled(void)
|
|
|
|
{
|
2020-08-19 14:17:19 +03:00
|
|
|
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
|
2019-11-14 12:40:27 +03:00
|
|
|
return false;
|
2022-03-03 18:57:27 +03:00
|
|
|
}
|
2019-11-14 12:40:27 +03:00
|
|
|
#ifdef TARGET_SUPPORTS_MTTCG
|
2022-03-03 18:57:27 +03:00
|
|
|
# ifndef TCG_GUEST_DEFAULT_MO
|
|
|
|
# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
|
|
|
|
# endif
|
|
|
|
return true;
|
2019-11-14 12:40:27 +03:00
|
|
|
#else
|
2022-03-03 18:57:27 +03:00
|
|
|
return false;
|
2019-11-14 12:40:27 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_accel_instance_init(Object *obj)
|
|
|
|
{
|
2019-11-13 12:36:01 +03:00
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
s->mttcg_enabled = default_mttcg_enabled();
|
2020-10-29 06:50:29 +03:00
|
|
|
|
|
|
|
/* If debugging enabled, default "auto on", otherwise off. */
|
2021-02-04 19:39:24 +03:00
|
|
|
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
|
2020-10-29 06:50:29 +03:00
|
|
|
s->splitwx_enabled = -1;
|
|
|
|
#else
|
|
|
|
s->splitwx_enabled = 0;
|
|
|
|
#endif
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
|
|
|
|
2020-07-06 20:39:45 +03:00
|
|
|
bool mttcg_enabled;
|
2023-04-17 19:40:34 +03:00
|
|
|
bool one_insn_per_tb;
|
2020-07-06 20:39:45 +03:00
|
|
|
|
2021-03-10 02:22:23 +03:00
|
|
|
static int tcg_init_machine(MachineState *ms)
|
2017-06-02 09:06:43 +03:00
|
|
|
{
|
2020-01-21 14:03:48 +03:00
|
|
|
TCGState *s = TCG_STATE(current_accel());
|
2021-03-10 07:52:45 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
unsigned max_cpus = 1;
|
|
|
|
#else
|
|
|
|
unsigned max_cpus = ms->smp.max_cpus;
|
|
|
|
#endif
|
2019-11-13 12:36:01 +03:00
|
|
|
|
2021-03-10 02:42:16 +03:00
|
|
|
tcg_allowed = true;
|
2019-11-13 12:36:01 +03:00
|
|
|
mttcg_enabled = s->mttcg_enabled;
|
2021-03-10 02:42:16 +03:00
|
|
|
|
|
|
|
page_init();
|
|
|
|
tb_htable_init();
|
2021-03-10 07:52:45 +03:00
|
|
|
tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
|
2021-03-10 02:42:16 +03:00
|
|
|
|
|
|
|
#if defined(CONFIG_SOFTMMU)
|
|
|
|
/*
|
|
|
|
* There's no guest base to take into account, so go ahead and
|
|
|
|
* initialize the prologue now.
|
|
|
|
*/
|
2023-09-30 05:35:26 +03:00
|
|
|
tcg_prologue_init();
|
2021-03-10 02:42:16 +03:00
|
|
|
#endif
|
|
|
|
|
2017-06-02 09:06:43 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-13 12:36:01 +03:00
|
|
|
static char *tcg_get_thread(Object *obj, Error **errp)
|
2019-11-14 12:40:27 +03:00
|
|
|
{
|
2019-11-13 12:36:01 +03:00
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
return g_strdup(s->mttcg_enabled ? "multi" : "single");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
if (strcmp(value, "multi") == 0) {
|
2019-11-14 12:40:27 +03:00
|
|
|
if (TCG_OVERSIZED_GUEST) {
|
|
|
|
error_setg(errp, "No MTTCG when guest word size > hosts");
|
2020-08-19 14:17:19 +03:00
|
|
|
} else if (icount_enabled()) {
|
2019-11-14 12:40:27 +03:00
|
|
|
error_setg(errp, "No MTTCG when icount is enabled");
|
|
|
|
} else {
|
|
|
|
#ifndef TARGET_SUPPORTS_MTTCG
|
|
|
|
warn_report("Guest not yet converted to MTTCG - "
|
|
|
|
"you may get unexpected results");
|
|
|
|
#endif
|
2019-11-13 12:36:01 +03:00
|
|
|
s->mttcg_enabled = true;
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
2019-11-13 12:36:01 +03:00
|
|
|
} else if (strcmp(value, "single") == 0) {
|
|
|
|
s->mttcg_enabled = false;
|
2019-11-14 12:40:27 +03:00
|
|
|
} else {
|
2019-11-13 12:36:01 +03:00
|
|
|
error_setg(errp, "Invalid 'thread' setting %s", value);
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 17:16:44 +03:00
|
|
|
static void tcg_get_tb_size(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
uint32_t value = s->tb_size;
|
|
|
|
|
|
|
|
visit_type_uint32(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_tb_size(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
uint32_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
if (!visit_type_uint32(v, name, &value, errp)) {
|
2019-11-13 17:16:44 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->tb_size = value;
|
|
|
|
}
|
|
|
|
|
2020-10-29 06:50:29 +03:00
|
|
|
static bool tcg_get_splitwx(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
return s->splitwx_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
s->splitwx_enabled = value;
|
|
|
|
}
|
|
|
|
|
2023-04-17 19:40:32 +03:00
|
|
|
static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
return s->one_insn_per_tb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
s->one_insn_per_tb = value;
|
2023-04-17 19:40:34 +03:00
|
|
|
/* Set the global also: this changes the behaviour */
|
|
|
|
qatomic_set(&one_insn_per_tb, value);
|
2023-04-17 19:40:32 +03:00
|
|
|
}
|
|
|
|
|
2022-09-29 14:42:23 +03:00
|
|
|
static int tcg_gdbstub_supported_sstep_flags(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In replay mode all events will come from the log and can't be
|
|
|
|
* suppressed otherwise we would break determinism. However as those
|
|
|
|
* events are tied to the number of executed instructions we won't see
|
|
|
|
* them occurring every time we single step.
|
|
|
|
*/
|
|
|
|
if (replay_mode != REPLAY_MODE_NONE) {
|
|
|
|
return SSTEP_ENABLE;
|
|
|
|
} else {
|
|
|
|
return SSTEP_ENABLE | SSTEP_NOIRQ | SSTEP_NOTIMER;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:06:43 +03:00
|
|
|
static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
AccelClass *ac = ACCEL_CLASS(oc);
|
|
|
|
ac->name = "tcg";
|
2021-03-10 02:22:23 +03:00
|
|
|
ac->init_machine = tcg_init_machine;
|
2023-10-03 15:30:25 +03:00
|
|
|
ac->cpu_common_realize = tcg_exec_realizefn;
|
|
|
|
ac->cpu_common_unrealize = tcg_exec_unrealizefn;
|
2017-06-02 09:06:43 +03:00
|
|
|
ac->allowed = &tcg_allowed;
|
2022-09-29 14:42:23 +03:00
|
|
|
ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
|
2017-06-02 09:06:43 +03:00
|
|
|
|
2019-11-13 12:36:01 +03:00
|
|
|
object_class_property_add_str(oc, "thread",
|
|
|
|
tcg_get_thread,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
tcg_set_thread);
|
2019-11-13 17:16:44 +03:00
|
|
|
|
|
|
|
object_class_property_add(oc, "tb-size", "int",
|
|
|
|
tcg_get_tb_size, tcg_set_tb_size,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
NULL, NULL);
|
2019-11-13 17:16:44 +03:00
|
|
|
object_class_property_set_description(oc, "tb-size",
|
2020-05-05 18:29:15 +03:00
|
|
|
"TCG translation block cache size");
|
2019-11-13 17:16:44 +03:00
|
|
|
|
2020-10-29 06:50:29 +03:00
|
|
|
object_class_property_add_bool(oc, "split-wx",
|
|
|
|
tcg_get_splitwx, tcg_set_splitwx);
|
|
|
|
object_class_property_set_description(oc, "split-wx",
|
|
|
|
"Map jit pages into separate RW and RX regions");
|
2023-04-17 19:40:32 +03:00
|
|
|
|
|
|
|
object_class_property_add_bool(oc, "one-insn-per-tb",
|
|
|
|
tcg_get_one_insn_per_tb,
|
|
|
|
tcg_set_one_insn_per_tb);
|
|
|
|
object_class_property_set_description(oc, "one-insn-per-tb",
|
|
|
|
"Only put one guest insn in each translation block");
|
2019-11-13 12:36:01 +03:00
|
|
|
}
|
2017-06-02 09:06:43 +03:00
|
|
|
|
|
|
|
static const TypeInfo tcg_accel_type = {
|
|
|
|
.name = TYPE_TCG_ACCEL,
|
|
|
|
.parent = TYPE_ACCEL,
|
2019-11-14 12:40:27 +03:00
|
|
|
.instance_init = tcg_accel_instance_init,
|
2017-06-02 09:06:43 +03:00
|
|
|
.class_init = tcg_accel_class_init,
|
2019-11-13 12:36:01 +03:00
|
|
|
.instance_size = sizeof(TCGState),
|
2017-06-02 09:06:43 +03:00
|
|
|
};
|
2021-06-24 13:38:30 +03:00
|
|
|
module_obj(TYPE_TCG_ACCEL);
|
2017-06-02 09:06:43 +03:00
|
|
|
|
|
|
|
static void register_accel_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&tcg_accel_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(register_accel_types);
|