qemu/qom/cpu.c

459 lines
12 KiB
C
Raw Normal View History

/*
* QEMU CPU model
*
* Copyright (c) 2012-2014 SUSE LINUX Products GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>
*/
#include "qemu/osdep.h"
2016-03-14 11:01:28 +03:00
#include "qapi/error.h"
#include "qemu-common.h"
#include "qom/cpu.h"
#include "sysemu/hw_accel.h"
#include "qemu/notify.h"
#include "qemu/log.h"
#include "exec/log.h"
#include "exec/cpu-common.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"
#include "trace-root.h"
CPUInterruptHandler cpu_interrupt_handler;
CPUState *cpu_by_arch_id(int64_t id)
{
CPUState *cpu;
CPU_FOREACH(cpu) {
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->get_arch_id(cpu) == id) {
return cpu;
}
}
return NULL;
}
bool cpu_exists(int64_t id)
{
return !!cpu_by_arch_id(id);
}
CPUState *cpu_create(const char *typename)
{
Error *err = NULL;
CPUState *cpu = CPU(object_new(typename));
object_property_set_bool(OBJECT(cpu), true, "realized", &err);
if (err != NULL) {
error_report_err(err);
object_unref(OBJECT(cpu));
exit(EXIT_FAILURE);
}
return cpu;
}
bool cpu_paging_enabled(const CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return cc->get_paging_enabled(cpu);
}
static bool cpu_common_get_paging_enabled(const CPUState *cpu)
{
return false;
}
void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->get_memory_mapping(cpu, list, errp);
}
static void cpu_common_get_memory_mapping(CPUState *cpu,
MemoryMappingList *list,
Error **errp)
{
error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
}
tcg: drop global lock during TCG code execution This finally allows TCG to benefit from the iothread introduction: Drop the global mutex while running pure TCG CPU code. Reacquire the lock when entering MMIO or PIO emulation, or when leaving the TCG loop. We have to revert a few optimization for the current TCG threading model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not kicking it in qemu_cpu_kick. We also need to disable RAM block reordering until we have a more efficient locking mechanism at hand. Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here. These numbers demonstrate where we gain something: 20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm 20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm The guest CPU was fully loaded, but the iothread could still run mostly independent on a second core. Without the patch we don't get beyond 32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm 32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm We don't benefit significantly, though, when the guest is not fully loading a host CPU. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com> [FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex] Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> [EGC: fixed iothread lock for cpu-exec IRQ handling] Signed-off-by: Emilio G. Cota <cota@braap.org> [AJB: -smp single-threaded fix, clean commit msg, BQL fixes] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com> [PM: target-arm changes] Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
/* Resetting the IRQ comes from across the code base so we take the
* BQL here if we need to. cpu_interrupt assumes it is held.*/
void cpu_reset_interrupt(CPUState *cpu, int mask)
{
tcg: drop global lock during TCG code execution This finally allows TCG to benefit from the iothread introduction: Drop the global mutex while running pure TCG CPU code. Reacquire the lock when entering MMIO or PIO emulation, or when leaving the TCG loop. We have to revert a few optimization for the current TCG threading model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not kicking it in qemu_cpu_kick. We also need to disable RAM block reordering until we have a more efficient locking mechanism at hand. Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here. These numbers demonstrate where we gain something: 20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm 20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm The guest CPU was fully loaded, but the iothread could still run mostly independent on a second core. Without the patch we don't get beyond 32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm 32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm We don't benefit significantly, though, when the guest is not fully loading a host CPU. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com> [FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex] Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> [EGC: fixed iothread lock for cpu-exec IRQ handling] Signed-off-by: Emilio G. Cota <cota@braap.org> [AJB: -smp single-threaded fix, clean commit msg, BQL fixes] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com> [PM: target-arm changes] Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
bool need_lock = !qemu_mutex_iothread_locked();
if (need_lock) {
qemu_mutex_lock_iothread();
}
cpu->interrupt_request &= ~mask;
tcg: drop global lock during TCG code execution This finally allows TCG to benefit from the iothread introduction: Drop the global mutex while running pure TCG CPU code. Reacquire the lock when entering MMIO or PIO emulation, or when leaving the TCG loop. We have to revert a few optimization for the current TCG threading model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not kicking it in qemu_cpu_kick. We also need to disable RAM block reordering until we have a more efficient locking mechanism at hand. Still, a Linux x86 UP guest and my Musicpal ARM model boot fine here. These numbers demonstrate where we gain something: 20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm 20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm The guest CPU was fully loaded, but the iothread could still run mostly independent on a second core. Without the patch we don't get beyond 32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm 32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm We don't benefit significantly, though, when the guest is not fully loading a host CPU. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Message-Id: <1439220437-23957-10-git-send-email-fred.konrad@greensocs.com> [FK: Rebase, fix qemu_devices_reset deadlock, rm address_space_* mutex] Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com> [EGC: fixed iothread lock for cpu-exec IRQ handling] Signed-off-by: Emilio G. Cota <cota@braap.org> [AJB: -smp single-threaded fix, clean commit msg, BQL fixes] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com> [PM: target-arm changes] Acked-by: Peter Maydell <peter.maydell@linaro.org>
2017-02-23 21:29:11 +03:00
if (need_lock) {
qemu_mutex_unlock_iothread();
}
}
void cpu_exit(CPUState *cpu)
{
atomic_set(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
atomic_set(&cpu->icount_decr.u16.high, -1);
}
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
void *opaque)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return (*cc->write_elf32_qemunote)(f, cpu, opaque);
}
static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
CPUState *cpu, void *opaque)
{
return 0;
}
int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
}
static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
CPUState *cpu, int cpuid,
void *opaque)
{
return -1;
}
int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
void *opaque)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return (*cc->write_elf64_qemunote)(f, cpu, opaque);
}
static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
CPUState *cpu, void *opaque)
{
return 0;
}
int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
}
static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
CPUState *cpu, int cpuid,
void *opaque)
{
return -1;
}
static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
{
return 0;
}
static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
{
return 0;
}
static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
{
/* If no extra check is required, QEMU watchpoint match can be considered
* as an architectural match.
*/
return true;
}
bool target_words_bigendian(void);
static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
{
return target_words_bigendian();
}
static void cpu_common_noop(CPUState *cpu)
{
}
static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
{
return false;
}
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
GuestPanicInformation *res = NULL;
if (cc->get_crash_info) {
res = cc->get_crash_info(cpu);
}
return res;
}
void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->dump_state) {
cpu_synchronize_state(cpu);
cc->dump_state(cpu, f, cpu_fprintf, flags);
}
}
void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->dump_statistics) {
cc->dump_statistics(cpu, f, cpu_fprintf, flags);
}
}
void cpu_reset(CPUState *cpu)
{
CPUClass *klass = CPU_GET_CLASS(cpu);
if (klass->reset != NULL) {
(*klass->reset)(cpu);
}
trace_guest_cpu_reset(cpu);
}
static void cpu_common_reset(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
log_cpu_state(cpu, cc->reset_dump_flags);
}
cpu->interrupt_request = 0;
cpu->halted = 0;
cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0;
cpu->icount_extra = 0;
cpu->icount_decr.u32 = 0;
cpu->can_do_io = 1;
cpu->exception_index = -1;
cpu->crash_occurred = false;
cpu->cflags_next_tb = -1;
if (tcg_enabled()) {
tcg: consistently access cpu->tb_jmp_cache atomically Some code paths can lead to atomic accesses racing with memset() on cpu->tb_jmp_cache, which can result in torn reads/writes and is undefined behaviour in C11. These torn accesses are unlikely to show up as bugs, but from code inspection they seem possible. For example, tb_phys_invalidate does: /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { atomic_set(&cpu->tb_jmp_cache[h], NULL); } } Here atomic_set might race with a concurrent memset (such as the ones scheduled via "unsafe" async work, e.g. tlb_flush_page) and therefore we might end up with a torn pointer (or who knows what, because we are under undefined behaviour). This patch converts parallel accesses to cpu->tb_jmp_cache to use atomic primitives, thereby bringing these accesses back to defined behaviour. The price to pay is to potentially execute more instructions when clearing cpu->tb_jmp_cache, but given how infrequently they happen and the small size of the cache, the performance impact I have measured is within noise range when booting debian-arm. Note that under "safe async" work (e.g. do_tb_flush) we could use memset because no other vcpus are running. However I'm keeping these accesses atomic as well to keep things simple and to avoid confusing analysis tools such as ThreadSanitizer. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Emilio G. Cota <cota@braap.org> Message-Id: <1497486973-25845-1-git-send-email-cota@braap.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
2017-06-15 03:36:13 +03:00
cpu_tb_jmp_cache_clear(cpu);
tcg_flush_softmmu_tlb(cpu);
}
}
static bool cpu_common_has_work(CPUState *cs)
{
return false;
}
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
{
CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
assert(cpu_model && cc->class_by_name);
return cc->class_by_name(cpu_model);
}
static void cpu_common_parse_features(const char *typename, char *features,
Error **errp)
{
char *val;
static bool cpu_globals_initialized;
/* Single "key=value" string being parsed */
char *featurestr = features ? strtok(features, ",") : NULL;
/* should be called only once, catch invalid users */
assert(!cpu_globals_initialized);
cpu_globals_initialized = true;
while (featurestr) {
val = strchr(featurestr, '=');
if (val) {
GlobalProperty *prop = g_new0(typeof(*prop), 1);
*val = 0;
val++;
prop->driver = typename;
prop->property = g_strdup(featurestr);
prop->value = g_strdup(val);
prop->errp = &error_fatal;
qdev_prop_register_global(prop);
} else {
error_setg(errp, "Expected key=value format, found %s.",
featurestr);
return;
}
featurestr = strtok(NULL, ",");
}
}
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
Object *machine = qdev_get_machine();
/* qdev_get_machine() can return something that's not TYPE_MACHINE
* if this is one of the user-only emulators; in that case there's
* no need to check the ignore_memory_transaction_failures board flag.
*/
if (object_dynamic_cast(machine, TYPE_MACHINE)) {
ObjectClass *oc = object_get_class(machine);
MachineClass *mc = MACHINE_CLASS(oc);
if (mc) {
cpu->ignore_memory_transaction_failures =
mc->ignore_memory_transaction_failures;
}
}
if (dev->hotplugged) {
cpu_synchronize_post_init(cpu);
cpu_resume(cpu);
}
/* NOTE: latest generic point where the cpu is fully realized */
trace_init_vcpu(cpu);
}
static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
/* NOTE: latest generic point before the cpu is fully unrealized */
trace_fini_vcpu(cpu);
cpu_exec_unrealizefn(cpu);
}
static void cpu_common_initfn(Object *obj)
{
CPUState *cpu = CPU(obj);
CPUClass *cc = CPU_GET_CLASS(obj);
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
linux-user-i386: Fix crash on cpuid Running cpuid instructions with a simple run like: i386-linux-user/qemu-i386 tests/tcg/sha1-i386 Results in the following assert: #0 0x00007ffff64246f5 in raise () from /lib64/libc.so.6 #1 0x00007ffff64262fa in abort () from /lib64/libc.so.6 #2 0x00007ffff7937ec5 in g_assertion_message () from /lib64/libglib-2.0.so.0 #3 0x00007ffff7937f5a in g_assertion_message_expr () from /lib64/libglib-2.0.so.0 #4 0x000055555561b54c in apicid_bitwidth_for_count (count=0) at /home/elmarco/src/qemu/include/hw/i386/topology.h:58 #5 0x000055555561b58a in apicid_smt_width (nr_cores=0, nr_threads=0) at /home/elmarco/src/qemu/include/hw/i386/topology.h:67 #6 0x000055555561b5c3 in apicid_core_offset (nr_cores=0, nr_threads=0) at /home/elmarco/src/qemu/include/hw/i386/topology.h:82 #7 0x000055555561b5e3 in apicid_pkg_offset (nr_cores=0, nr_threads=0) at /home/elmarco/src/qemu/include/hw/i386/topology.h:89 #8 0x000055555561dd86 in cpu_x86_cpuid (env=0x555557999550, index=4, count=3, eax=0x7fffffffcae8, ebx=0x7fffffffcaec, ecx=0x7fffffffcaf0, edx=0x7fffffffcaf4) at /home/elmarco/src/qemu/target-i386/cpu.c:2405 #9 0x0000555555638e8e in helper_cpuid (env=0x555557999550) at /home/elmarco/src/qemu/target-i386/misc_helper.c:106 #10 0x000055555599dc5e in static_code_gen_buffer () #11 0x00005555555952f8 in cpu_tb_exec (cpu=0x5555579912d0, itb=0x7ffff4371ab0) at /home/elmarco/src/qemu/cpu-exec.c:166 #12 0x0000555555595c8e in cpu_loop_exec_tb (cpu=0x5555579912d0, tb=0x7ffff4371ab0, last_tb=0x7fffffffd088, tb_exit=0x7fffffffd084, sc=0x7fffffffd0a0) at /home/elmarco/src/qemu/cpu-exec.c:517 #13 0x0000555555595e50 in cpu_exec (cpu=0x5555579912d0) at /home/elmarco/src/qemu/cpu-exec.c:612 #14 0x00005555555c065b in cpu_loop (env=0x555557999550) at /home/elmarco/src/qemu/linux-user/main.c:297 #15 0x00005555555c25b2 in main (argc=2, argv=0x7fffffffd848, envp=0x7fffffffd860) at /home/elmarco/src/qemu/linux-user/main.c:4803 The fields are set in qemu_init_vcpu() with softmmu, but it's a stub with linux-user. Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2016-09-16 18:50:23 +03:00
/* *-user doesn't have configurable SMP topology */
/* the default value is changed by qemu_init_vcpu() for softmmu */
cpu->nr_cores = 1;
cpu->nr_threads = 1;
qemu_mutex_init(&cpu->work_mutex);
QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&cpu->watchpoints);
cpu_exec_initfn(cpu);
}
cpu: Convert cpu_index into a bitmap Currently CPUState::cpu_index is monotonically increasing and a newly created CPU always gets the next higher index. The next available index is calculated by counting the existing number of CPUs. This is fine as long as we only add CPUs, but there are architectures which are starting to support CPU removal, too. For an architecture like PowerPC which derives its CPU identifier (device tree ID) from cpu_index, the existing logic of generating cpu_index values causes problems. With the currently proposed method of handling vCPU removal by parking the vCPU fd in QEMU (Ref: http://lists.gnu.org/archive/html/qemu-devel/2015-02/msg02604.html), generating cpu_index this way will not work for PowerPC. This patch changes the way cpu_index is handed out by maintaining a bit map of the CPUs that tracks both addition and removal of CPUs. The CPU bitmap allocation logic is part of cpu_exec_init(), which is called by instance_init routines of various CPU targets. Newly added cpu_exec_exit() API handles the deallocation part and this routine is called from generic CPU instance_finalize. Note: This new CPU enumeration is for !CONFIG_USER_ONLY only. CONFIG_USER_ONLY continues to have the old enumeration logic. Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com> [AF: max_cpus -> MAX_CPUMASK_BITS] Signed-off-by: Andreas Färber <afaerber@suse.de>
2015-06-24 05:31:13 +03:00
static void cpu_common_finalize(Object *obj)
{
}
static int64_t cpu_common_get_arch_id(CPUState *cpu)
{
return cpu->cpu_index;
}
static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
{
return addr;
}
static void generic_handle_interrupt(CPUState *cpu, int mask)
{
cpu->interrupt_request |= mask;
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
}
CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
static void cpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
CPUClass *k = CPU_CLASS(klass);
k->parse_features = cpu_common_parse_features;
k->reset = cpu_common_reset;
k->get_arch_id = cpu_common_get_arch_id;
k->has_work = cpu_common_has_work;
k->get_paging_enabled = cpu_common_get_paging_enabled;
k->get_memory_mapping = cpu_common_get_memory_mapping;
k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
k->write_elf32_note = cpu_common_write_elf32_note;
k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
k->write_elf64_note = cpu_common_write_elf64_note;
k->gdb_read_register = cpu_common_gdb_read_register;
k->gdb_write_register = cpu_common_gdb_write_register;
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
k->debug_excp_handler = cpu_common_noop;
k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
k->cpu_exec_enter = cpu_common_noop;
k->cpu_exec_exit = cpu_common_noop;
k->cpu_exec_interrupt = cpu_common_exec_interrupt;
k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
dc->realize = cpu_common_realizefn;
dc->unrealize = cpu_common_unrealizefn;
dc->props = cpu_common_props;
/*
* Reason: CPUs still need special care by board code: wiring up
* IRQs, adding reset handlers, halting non-first CPUs, ...
*/
qdev: Replace cannot_instantiate_with_device_add_yet with !user_creatable cannot_instantiate_with_device_add_yet was introduced by commit efec3dd631d94160288392721a5f9c39e50fb2bc to replace no_user. It was supposed to be a temporary measure. When it was introduced, we had 54 cannot_instantiate_with_device_add_yet=true lines in the code. Today (3 years later) this number has not shrunk: we now have 57 cannot_instantiate_with_device_add_yet=true lines. I think it is safe to say it is not a temporary measure, and we won't see the flag go away soon. Instead of a long field name that misleads people to believe it is temporary, replace it a shorter and less misleading field: user_creatable. Except for code comments, changes were generated using the following Coccinelle patch: @@ expression DC; @@ ( -DC->cannot_instantiate_with_device_add_yet = false; +DC->user_creatable = true; | -DC->cannot_instantiate_with_device_add_yet = true; +DC->user_creatable = false; ) @@ typedef ObjectClass; expression dc; identifier class, data; @@ static void device_class_init(ObjectClass *class, void *data) { ... dc->hotpluggable = true; +dc->user_creatable = true; ... } @@ @@ struct DeviceClass { ... -bool cannot_instantiate_with_device_add_yet; +bool user_creatable; ... } @@ expression DC; @@ ( -!DC->cannot_instantiate_with_device_add_yet +DC->user_creatable | -DC->cannot_instantiate_with_device_add_yet +!DC->user_creatable ) Cc: Alistair Francis <alistair.francis@xilinx.com> Cc: Laszlo Ersek <lersek@redhat.com> Cc: Marcel Apfelbaum <marcel@redhat.com> Cc: Markus Armbruster <armbru@redhat.com> Cc: Peter Maydell <peter.maydell@linaro.org> Cc: Thomas Huth <thuth@redhat.com> Acked-by: Alistair Francis <alistair.francis@xilinx.com> Reviewed-by: Thomas Huth <thuth@redhat.com> Reviewed-by: Marcel Apfelbaum <marcel@redhat.com> Acked-by: Marcel Apfelbaum <marcel@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> Message-Id: <20170503203604.31462-2-ehabkost@redhat.com> [ehabkost: kept "TODO remove once we're there" comment] Reviewed-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
2017-05-03 23:35:44 +03:00
dc->user_creatable = false;
}
static const TypeInfo cpu_type_info = {
.name = TYPE_CPU,
.parent = TYPE_DEVICE,
.instance_size = sizeof(CPUState),
.instance_init = cpu_common_initfn,
cpu: Convert cpu_index into a bitmap Currently CPUState::cpu_index is monotonically increasing and a newly created CPU always gets the next higher index. The next available index is calculated by counting the existing number of CPUs. This is fine as long as we only add CPUs, but there are architectures which are starting to support CPU removal, too. For an architecture like PowerPC which derives its CPU identifier (device tree ID) from cpu_index, the existing logic of generating cpu_index values causes problems. With the currently proposed method of handling vCPU removal by parking the vCPU fd in QEMU (Ref: http://lists.gnu.org/archive/html/qemu-devel/2015-02/msg02604.html), generating cpu_index this way will not work for PowerPC. This patch changes the way cpu_index is handed out by maintaining a bit map of the CPUs that tracks both addition and removal of CPUs. The CPU bitmap allocation logic is part of cpu_exec_init(), which is called by instance_init routines of various CPU targets. Newly added cpu_exec_exit() API handles the deallocation part and this routine is called from generic CPU instance_finalize. Note: This new CPU enumeration is for !CONFIG_USER_ONLY only. CONFIG_USER_ONLY continues to have the old enumeration logic. Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Peter Crosthwaite <crosthwaite.peter@gmail.com> [AF: max_cpus -> MAX_CPUMASK_BITS] Signed-off-by: Andreas Färber <afaerber@suse.de>
2015-06-24 05:31:13 +03:00
.instance_finalize = cpu_common_finalize,
.abstract = true,
.class_size = sizeof(CPUClass),
.class_init = cpu_class_init,
};
static void cpu_register_types(void)
{
type_register_static(&cpu_type_info);
}
type_init(cpu_register_types)