2017-06-02 09:06:43 +03:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator, accelerator interfaces
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
* Copyright (c) 2014 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "sysemu/accel.h"
|
2019-05-23 17:35:05 +03:00
|
|
|
#include "sysemu/tcg.h"
|
2017-06-02 09:06:43 +03:00
|
|
|
#include "qom/object.h"
|
2019-03-29 00:54:23 +03:00
|
|
|
#include "cpu.h"
|
2017-07-04 16:57:28 +03:00
|
|
|
#include "sysemu/cpus.h"
|
|
|
|
#include "qemu/main-loop.h"
|
2019-11-14 12:40:27 +03:00
|
|
|
#include "tcg/tcg.h"
|
2020-01-21 14:03:49 +03:00
|
|
|
#include "qapi/error.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "hw/boards.h"
|
2019-11-13 17:16:44 +03:00
|
|
|
#include "qapi/qapi-builtin-visit.h"
|
2019-11-13 12:36:01 +03:00
|
|
|
|
2020-09-03 23:43:22 +03:00
|
|
|
struct TCGState {
|
2019-11-13 12:36:01 +03:00
|
|
|
AccelState parent_obj;
|
|
|
|
|
|
|
|
bool mttcg_enabled;
|
2019-11-13 17:16:44 +03:00
|
|
|
unsigned long tb_size;
|
2020-09-03 23:43:22 +03:00
|
|
|
};
|
|
|
|
typedef struct TCGState TCGState;
|
2019-11-13 12:36:01 +03:00
|
|
|
|
|
|
|
#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg")
|
|
|
|
|
|
|
|
#define TCG_STATE(obj) \
|
|
|
|
OBJECT_CHECK(TCGState, (obj), TYPE_TCG_ACCEL)
|
2017-06-02 09:06:43 +03:00
|
|
|
|
2017-07-04 16:57:28 +03:00
|
|
|
/* mask must never be zero, except for A20 change call */
|
|
|
|
static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
|
|
|
{
|
|
|
|
int old_mask;
|
|
|
|
g_assert(qemu_mutex_iothread_locked());
|
|
|
|
|
|
|
|
old_mask = cpu->interrupt_request;
|
|
|
|
cpu->interrupt_request |= mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If called from iothread context, wake the target cpu in
|
|
|
|
* case its halted.
|
|
|
|
*/
|
|
|
|
if (!qemu_cpu_is_self(cpu)) {
|
|
|
|
qemu_cpu_kick(cpu);
|
|
|
|
} else {
|
2019-03-29 00:54:23 +03:00
|
|
|
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
2017-07-04 16:57:28 +03:00
|
|
|
if (use_icount &&
|
|
|
|
!cpu->can_do_io
|
|
|
|
&& (mask & ~old_mask) != 0) {
|
|
|
|
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-14 12:40:27 +03:00
|
|
|
/*
|
|
|
|
* We default to false if we know other options have been enabled
|
|
|
|
* which are currently incompatible with MTTCG. Otherwise when each
|
|
|
|
* guest (target) has been updated to support:
|
|
|
|
* - atomic instructions
|
|
|
|
* - memory ordering primitives (barriers)
|
|
|
|
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
|
|
|
|
*
|
|
|
|
* Once a guest architecture has been converted to the new primitives
|
|
|
|
* there are two remaining limitations to check.
|
|
|
|
*
|
|
|
|
* - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
|
|
|
|
* - The host must have a stronger memory order than the guest
|
|
|
|
*
|
|
|
|
* It may be possible in future to support strong guests on weak hosts
|
|
|
|
* but that will require tagging all load/stores in a guest with their
|
|
|
|
* implicit memory order requirements which would likely slow things
|
|
|
|
* down a lot.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool check_tcg_memory_orders_compatible(void)
|
|
|
|
{
|
|
|
|
#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
|
|
|
|
return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool default_mttcg_enabled(void)
|
|
|
|
{
|
|
|
|
if (use_icount || TCG_OVERSIZED_GUEST) {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
#ifdef TARGET_SUPPORTS_MTTCG
|
|
|
|
return check_tcg_memory_orders_compatible();
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_accel_instance_init(Object *obj)
|
|
|
|
{
|
2019-11-13 12:36:01 +03:00
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
s->mttcg_enabled = default_mttcg_enabled();
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
|
|
|
|
2017-06-02 09:06:43 +03:00
|
|
|
static int tcg_init(MachineState *ms)
|
|
|
|
{
|
2020-01-21 14:03:48 +03:00
|
|
|
TCGState *s = TCG_STATE(current_accel());
|
2019-11-13 12:36:01 +03:00
|
|
|
|
2019-11-13 17:16:44 +03:00
|
|
|
tcg_exec_init(s->tb_size * 1024 * 1024);
|
2017-07-04 16:57:28 +03:00
|
|
|
cpu_interrupt_handler = tcg_handle_interrupt;
|
2019-11-13 12:36:01 +03:00
|
|
|
mttcg_enabled = s->mttcg_enabled;
|
2017-06-02 09:06:43 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-13 12:36:01 +03:00
|
|
|
static char *tcg_get_thread(Object *obj, Error **errp)
|
2019-11-14 12:40:27 +03:00
|
|
|
{
|
2019-11-13 12:36:01 +03:00
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
return g_strdup(s->mttcg_enabled ? "multi" : "single");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_thread(Object *obj, const char *value, Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
|
|
|
|
if (strcmp(value, "multi") == 0) {
|
2019-11-14 12:40:27 +03:00
|
|
|
if (TCG_OVERSIZED_GUEST) {
|
|
|
|
error_setg(errp, "No MTTCG when guest word size > hosts");
|
|
|
|
} else if (use_icount) {
|
|
|
|
error_setg(errp, "No MTTCG when icount is enabled");
|
|
|
|
} else {
|
|
|
|
#ifndef TARGET_SUPPORTS_MTTCG
|
|
|
|
warn_report("Guest not yet converted to MTTCG - "
|
|
|
|
"you may get unexpected results");
|
|
|
|
#endif
|
|
|
|
if (!check_tcg_memory_orders_compatible()) {
|
|
|
|
warn_report("Guest expects a stronger memory ordering "
|
|
|
|
"than the host provides");
|
|
|
|
error_printf("This may cause strange/hard to debug errors\n");
|
|
|
|
}
|
2019-11-13 12:36:01 +03:00
|
|
|
s->mttcg_enabled = true;
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
2019-11-13 12:36:01 +03:00
|
|
|
} else if (strcmp(value, "single") == 0) {
|
|
|
|
s->mttcg_enabled = false;
|
2019-11-14 12:40:27 +03:00
|
|
|
} else {
|
2019-11-13 12:36:01 +03:00
|
|
|
error_setg(errp, "Invalid 'thread' setting %s", value);
|
2019-11-14 12:40:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 17:16:44 +03:00
|
|
|
static void tcg_get_tb_size(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
uint32_t value = s->tb_size;
|
|
|
|
|
|
|
|
visit_type_uint32(v, name, &value, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_set_tb_size(Object *obj, Visitor *v,
|
|
|
|
const char *name, void *opaque,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
TCGState *s = TCG_STATE(obj);
|
|
|
|
uint32_t value;
|
|
|
|
|
error: Eliminate error_propagate() with Coccinelle, part 1
When all we do with an Error we receive into a local variable is
propagating to somewhere else, we can just as well receive it there
right away. Convert
if (!foo(..., &err)) {
...
error_propagate(errp, err);
...
return ...
}
to
if (!foo(..., errp)) {
...
...
return ...
}
where nothing else needs @err. Coccinelle script:
@rule1 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
binary operator op;
constant c1, c2;
symbol false;
@@
if (
(
- fun(args, &err, args2)
+ fun(args, errp, args2)
|
- !fun(args, &err, args2)
+ !fun(args, errp, args2)
|
- fun(args, &err, args2) op c1
+ fun(args, errp, args2) op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
)
}
@rule2 forall@
identifier fun, err, errp, lbl;
expression list args, args2;
expression var;
binary operator op;
constant c1, c2;
symbol false;
@@
- var = fun(args, &err, args2);
+ var = fun(args, errp, args2);
... when != err
if (
(
var
|
!var
|
var op c1
)
)
{
... when != err
when != lbl:
when strict
- error_propagate(errp, err);
... when != err
(
return;
|
return c2;
|
return false;
|
return var;
)
}
@depends on rule1 || rule2@
identifier err;
@@
- Error *err = NULL;
... when != err
Not exactly elegant, I'm afraid.
The "when != lbl:" is necessary to avoid transforming
if (fun(args, &err)) {
goto out
}
...
out:
error_propagate(errp, err);
even though other paths to label out still need the error_propagate().
For an actual example, see sclp_realize().
Without the "when strict", Coccinelle transforms vfio_msix_setup(),
incorrectly. I don't know what exactly "when strict" does, only that
it helps here.
The match of return is narrower than what I want, but I can't figure
out how to express "return where the operand doesn't use @err". For
an example where it's too narrow, see vfio_intx_enable().
Silently fails to convert hw/arm/armsse.c, because Coccinelle gets
confused by ARMSSE being used both as typedef and function-like macro
there. Converted manually.
Line breaks tidied up manually. One nested declaration of @local_err
deleted manually. Preexisting unwanted blank line dropped in
hw/riscv/sifive_e.c.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20200707160613.848843-35-armbru@redhat.com>
2020-07-07 19:06:02 +03:00
|
|
|
if (!visit_type_uint32(v, name, &value, errp)) {
|
2019-11-13 17:16:44 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->tb_size = value;
|
|
|
|
}
|
|
|
|
|
2017-06-02 09:06:43 +03:00
|
|
|
static void tcg_accel_class_init(ObjectClass *oc, void *data)
|
|
|
|
{
|
|
|
|
AccelClass *ac = ACCEL_CLASS(oc);
|
|
|
|
ac->name = "tcg";
|
|
|
|
ac->init_machine = tcg_init;
|
|
|
|
ac->allowed = &tcg_allowed;
|
|
|
|
|
2019-11-13 12:36:01 +03:00
|
|
|
object_class_property_add_str(oc, "thread",
|
|
|
|
tcg_get_thread,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
tcg_set_thread);
|
2019-11-13 17:16:44 +03:00
|
|
|
|
|
|
|
object_class_property_add(oc, "tb-size", "int",
|
|
|
|
tcg_get_tb_size, tcg_set_tb_size,
|
qom: Drop parameter @errp of object_property_add() & friends
The only way object_property_add() can fail is when a property with
the same name already exists. Since our property names are all
hardcoded, failure is a programming error, and the appropriate way to
handle it is passing &error_abort.
Same for its variants, except for object_property_add_child(), which
additionally fails when the child already has a parent. Parentage is
also under program control, so this is a programming error, too.
We have a bit over 500 callers. Almost half of them pass
&error_abort, slightly fewer ignore errors, one test case handles
errors, and the remaining few callers pass them to their own callers.
The previous few commits demonstrated once again that ignoring
programming errors is a bad idea.
Of the few ones that pass on errors, several violate the Error API.
The Error ** argument must be NULL, &error_abort, &error_fatal, or a
pointer to a variable containing NULL. Passing an argument of the
latter kind twice without clearing it in between is wrong: if the
first call sets an error, it no longer points to NULL for the second
call. ich9_pm_add_properties(), sparc32_ledma_realize(),
sparc32_dma_realize(), xilinx_axidma_realize(), xilinx_enet_realize()
are wrong that way.
When the one appropriate choice of argument is &error_abort, letting
users pick the argument is a bad idea.
Drop parameter @errp and assert the preconditions instead.
There's one exception to "duplicate property name is a programming
error": the way object_property_add() implements the magic (and
undocumented) "automatic arrayification". Don't drop @errp there.
Instead, rename object_property_add() to object_property_try_add(),
and add the obvious wrapper object_property_add().
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200505152926.18877-15-armbru@redhat.com>
[Two semantic rebase conflicts resolved]
2020-05-05 18:29:22 +03:00
|
|
|
NULL, NULL);
|
2019-11-13 17:16:44 +03:00
|
|
|
object_class_property_set_description(oc, "tb-size",
|
2020-05-05 18:29:15 +03:00
|
|
|
"TCG translation block cache size");
|
2019-11-13 17:16:44 +03:00
|
|
|
|
2019-11-13 12:36:01 +03:00
|
|
|
}
|
2017-06-02 09:06:43 +03:00
|
|
|
|
|
|
|
static const TypeInfo tcg_accel_type = {
|
|
|
|
.name = TYPE_TCG_ACCEL,
|
|
|
|
.parent = TYPE_ACCEL,
|
2019-11-14 12:40:27 +03:00
|
|
|
.instance_init = tcg_accel_instance_init,
|
2017-06-02 09:06:43 +03:00
|
|
|
.class_init = tcg_accel_class_init,
|
2019-11-13 12:36:01 +03:00
|
|
|
.instance_size = sizeof(TCGState),
|
2017-06-02 09:06:43 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
static void register_accel_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&tcg_accel_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(register_accel_types);
|