* more of Peter Crosthwaite's multiarch preparation patches

* unlocked MMIO support in KVM
 * support for compilation with ICC
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQEcBAABCAAGBQJVmnuoAAoJEL/70l94x66DKTUH/RFrc20KXRkn/Pb/8qHY/wFz
 Wt3YaS5VYPHElHbxHSdpwlV3K50FAX4QaC25Dnw4dsTelyxe5k7od+I7x8PQxD9v
 3N+mFFF1BV6PqXTPVnUCnb14EXprJX524E97O6Z3lDGcwSLHDxeveSCk3IvMFErz
 JzP3vtigSvtdPPQXlGcndP/r1EXeVjgNIsZ+NKaI/kmoSz1fHFrCN8hTnrxA9RSI
 ZPhfmgHI5EMFtAf/HiZID6GSHOHajgeRT2bIiiy1okS++no0uRZlVMvcnFNPZHoG
 e9XCGBXJSdmCoi7sIgShXirKszxYkRTbCyxxjz6aYfhrQzo0h+Yn9OPuvQrgynE=
 =+YEv
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

* more of Peter Crosthwaite's multiarch preparation patches
* unlocked MMIO support in KVM
* support for compilation with ICC

# gpg: Signature made Mon Jul  6 13:59:20 2015 BST using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream:
  exec: skip MMIO regions correctly in cpu_physical_memory_write_rom_internal
  Stop including qemu-common.h in memory.h
  kvm: Switch to unlocked MMIO
  acpi: mark PMTIMER as unlocked
  kvm: Switch to unlocked PIO
  kvm: First step to push iothread lock out of inner run loop
  memory: let address_space_rw/ld*/st* run outside the BQL
  exec: pull qemu_flush_coalesced_mmio_buffer() into address_space_rw/ld*/st*
  memory: Add global-locking property to memory regions
  main-loop: introduce qemu_mutex_iothread_locked
  main-loop: use qemu_mutex_lock_iothread consistently
  Fix irq route entries exceeding KVM_MAX_IRQ_ROUTES
  cpu-defs: Move out TB_JMP defines
  include/exec: Move tb hash functions out
  include/exec: Move standard exceptions to cpu-all.h
  cpu-defs: Move CPU_TEMP_BUF_NLONGS to tcg
  memory_mapping: Rework cpu related includes
  cutils: allow compilation with icc
  qemu-common: add VEC_OR macro

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-07-06 14:03:44 +01:00
commit 7edd8e4660
26 changed files with 299 additions and 73 deletions

View File

@ -27,6 +27,7 @@
#include "exec/address-spaces.h"
#include "exec/memory-internal.h"
#include "qemu/rcu.h"
#include "exec/tb-hash.h"
/* -icount align implementation. */

19
cpus.c
View File

@ -954,7 +954,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
CPUState *cpu = arg;
int r;
qemu_mutex_lock(&qemu_global_mutex);
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
@ -1034,10 +1034,10 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
qemu_mutex_lock_iothread();
qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
qemu_mutex_lock(&qemu_global_mutex);
CPU_FOREACH(cpu) {
cpu->thread_id = qemu_get_thread_id();
cpu->created = true;
@ -1146,10 +1146,21 @@ bool qemu_in_vcpu_thread(void)
return current_cpu && qemu_cpu_is_self(current_cpu);
}
static __thread bool iothread_locked = false;
bool qemu_mutex_iothread_locked(void)
{
return iothread_locked;
}
void qemu_mutex_lock_iothread(void)
{
atomic_inc(&iothread_requesting_mutex);
if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
/* In the simple case there is no need to bump the VCPU thread out of
* TCG code execution.
*/
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
!first_cpu || !first_cpu->thread) {
qemu_mutex_lock(&qemu_global_mutex);
atomic_dec(&iothread_requesting_mutex);
} else {
@ -1160,10 +1171,12 @@ void qemu_mutex_lock_iothread(void)
atomic_dec(&iothread_requesting_mutex);
qemu_cond_broadcast(&qemu_io_proceeded_cond);
}
iothread_locked = true;
}
void qemu_mutex_unlock_iothread(void)
{
iothread_locked = false;
qemu_mutex_unlock(&qemu_global_mutex);
}

83
exec.c
View File

@ -48,6 +48,7 @@
#endif
#include "exec/cpu-all.h"
#include "qemu/rcu_queue.h"
#include "qemu/main-loop.h"
#include "exec/cputlb.h"
#include "translate-all.h"
@ -352,6 +353,18 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
*xlat = addr + section->offset_within_region;
mr = section->mr;
/* MMIO registers can be expected to perform full-width accesses based only
* on their address, without considering adjacent registers that could
* decode to completely different MemoryRegions. When such registers
* exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
* regions overlap wildly. For this reason we cannot clamp the accesses
* here.
*
* If the length is small (as is the case for address_space_ldl/stl),
* everything works fine. If the incoming length is large, however,
* the caller really has to do the clamping through memory_access_size.
*/
if (memory_region_is_ram(mr)) {
diff = int128_sub(section->size, int128_make64(addr));
*plen = int128_get64(int128_min(diff, int128_make64(*plen)));
@ -2316,6 +2329,29 @@ static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
return l;
}
static bool prepare_mmio_access(MemoryRegion *mr)
{
bool unlocked = !qemu_mutex_iothread_locked();
bool release_lock = false;
if (unlocked && mr->global_locking) {
qemu_mutex_lock_iothread();
unlocked = false;
release_lock = true;
}
if (mr->flush_coalesced_mmio) {
if (unlocked) {
qemu_mutex_lock_iothread();
}
qemu_flush_coalesced_mmio_buffer();
if (unlocked) {
qemu_mutex_unlock_iothread();
}
}
return release_lock;
}
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
uint8_t *buf, int len, bool is_write)
{
@ -2325,6 +2361,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
hwaddr addr1;
MemoryRegion *mr;
MemTxResult result = MEMTX_OK;
bool release_lock = false;
rcu_read_lock();
while (len > 0) {
@ -2333,6 +2370,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
if (is_write) {
if (!memory_access_is_direct(mr, is_write)) {
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
/* XXX: could force current_cpu to NULL to avoid
potential bugs */
@ -2374,6 +2412,7 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
} else {
if (!memory_access_is_direct(mr, is_write)) {
/* I/O case */
release_lock |= prepare_mmio_access(mr);
l = memory_access_size(mr, l, addr1);
switch (l) {
case 8:
@ -2409,6 +2448,12 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
memcpy(buf, ptr, l);
}
}
if (release_lock) {
qemu_mutex_unlock_iothread();
release_lock = false;
}
len -= l;
buf += l;
addr += l;
@ -2458,7 +2503,7 @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
if (!(memory_region_is_ram(mr) ||
memory_region_is_romd(mr))) {
/* do nothing */
l = memory_access_size(mr, l, addr1);
} else {
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
@ -2735,10 +2780,13 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, false);
if (l < 4 || !memory_access_is_direct(mr, false)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
@ -2771,6 +2819,9 @@ static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
return val;
}
@ -2823,11 +2874,14 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
hwaddr l = 8;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 8 || !memory_access_is_direct(mr, false)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
@ -2860,6 +2914,9 @@ static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
return val;
}
@ -2932,11 +2989,14 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
hwaddr l = 2;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
false);
if (l < 2 || !memory_access_is_direct(mr, false)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
#if defined(TARGET_WORDS_BIGENDIAN)
@ -2969,6 +3029,9 @@ static inline uint32_t address_space_lduw_internal(AddressSpace *as,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
return val;
}
@ -3021,11 +3084,14 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
hwaddr addr1;
MemTxResult r;
uint8_t dirty_log_mask;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
} else {
addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
@ -3040,6 +3106,9 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
}
@ -3060,11 +3129,14 @@ static inline void address_space_stl_internal(AddressSpace *as,
hwaddr l = 4;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l,
true);
if (l < 4 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
@ -3096,6 +3168,9 @@ static inline void address_space_stl_internal(AddressSpace *as,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
}
@ -3165,10 +3240,13 @@ static inline void address_space_stw_internal(AddressSpace *as,
hwaddr l = 2;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
rcu_read_lock();
mr = address_space_translate(as, addr, &addr1, &l, true);
if (l < 2 || !memory_access_is_direct(mr, true)) {
release_lock |= prepare_mmio_access(mr);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
@ -3200,6 +3278,9 @@ static inline void address_space_stw_internal(AddressSpace *as,
if (result) {
*result = r;
}
if (release_lock) {
qemu_mutex_unlock_iothread();
}
rcu_read_unlock();
}

View File

@ -528,6 +528,7 @@ void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar);
memory_region_init_io(&ar->tmr.io, memory_region_owner(parent),
&acpi_pm_tmr_ops, ar, "acpi-tmr", 4);
memory_region_clear_global_locking(&ar->tmr.io);
memory_region_add_subregion(parent, 8, &ar->tmr.io);
}

View File

@ -26,6 +26,12 @@
#include "qom/cpu.h"
#include "qemu/rcu.h"
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
/* some important defines:
*
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned

View File

@ -13,6 +13,8 @@
#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/fprintf-fn.h"
#include "qemu/typedefs.h"
/**
* CPUListState:

View File

@ -56,20 +56,6 @@ typedef uint64_t target_ulong;
#error TARGET_LONG_SIZE undefined
#endif
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
#if !defined(CONFIG_USER_ONLY)
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
@ -161,7 +147,6 @@ typedef struct CPUIOTLBEntry {
#endif
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
/* soft mmu support */ \
CPU_COMMON_TLB \

View File

@ -195,26 +195,6 @@ struct TBContext {
int tb_invalidated_flag;
};
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
}
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
{
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
void tb_free(TranslationBlock *tb);
void tb_flush(CPUArchState *env);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);

View File

@ -23,7 +23,6 @@
#include <stdint.h>
#include <stdbool.h>
#include "qemu-common.h"
#include "exec/cpu-common.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
@ -180,6 +179,7 @@ struct MemoryRegion {
bool rom_device;
bool warning_printed; /* For reservations */
bool flush_coalesced_mmio;
bool global_locking;
MemoryRegion *alias;
hwaddr alias_offset;
int32_t priority;
@ -824,6 +824,31 @@ void memory_region_set_flush_coalesced(MemoryRegion *mr);
*/
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
/**
* memory_region_set_global_locking: Declares the access processing requires
* QEMU's global lock.
*
* When this is invoked, accesses to the memory region will be processed while
* holding the global lock of QEMU. This is the default behavior of memory
* regions.
*
* @mr: the memory region to be updated.
*/
void memory_region_set_global_locking(MemoryRegion *mr);
/**
* memory_region_clear_global_locking: Declares that access processing does
* not depend on the QEMU global lock.
*
* By clearing this property, accesses to the memory region will be processed
* outside of QEMU's global lock (unless the lock is held on when issuing the
* access request). In this case, the device model implementing the access
* handlers is responsible for synchronization of concurrency.
*
* @mr: the memory region to be updated.
*/
void memory_region_clear_global_locking(MemoryRegion *mr);
/**
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
* is written to a location.

51
include/exec/tb-hash.h Normal file
View File

@ -0,0 +1,51 @@
/*
* internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef EXEC_TB_HASH
#define EXEC_TB_HASH
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
}
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
{
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
#endif

View File

@ -14,6 +14,7 @@
#include "exec/memory.h"
#include "hw/irq.h"
#include "qemu/notify.h"
#include "cpu.h"
/* armv7m.c */
qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq,

View File

@ -15,6 +15,7 @@
#include "qemu/compiler.h"
#include "config-host.h"
#include "qemu/typedefs.h"
#include "qemu/fprintf-fn.h"
#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
#define WORDS_ALIGNED
@ -85,9 +86,6 @@
# error Unknown pointer size
#endif
typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
#ifdef _WIN32
#define fsync _commit
#if !defined(lseek)
@ -455,6 +453,7 @@ void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size);
#define VECTYPE __vector unsigned char
#define SPLAT(p) vec_splat(vec_ld(0, p), 0)
#define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
#define VEC_OR(v1, v2) ((v1) | (v2))
/* altivec.h may redefine the bool macro as vector type.
* Reset it to POSIX semantics. */
#define bool _Bool
@ -463,10 +462,12 @@ void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size);
#define VECTYPE __m128i
#define SPLAT(p) _mm_set1_epi8(*(p))
#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
#define VEC_OR(v1, v2) (_mm_or_si128(v1, v2))
#else
#define VECTYPE unsigned long
#define SPLAT(p) (*(p) * (~0UL / 255))
#define ALL_EQ(v1, v2) ((v1) == (v2))
#define VEC_OR(v1, v2) ((v1) | (v2))
#endif
#define BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR 8

17
include/qemu/fprintf-fn.h Normal file
View File

@ -0,0 +1,17 @@
/*
* Typedef for fprintf-alike function pointers.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef QEMU_FPRINTF_FN_H
#define QEMU_FPRINTF_FN_H 1
#include "qemu/compiler.h"
#include <stdio.h>
typedef int (*fprintf_function)(FILE *f, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
#endif

View File

@ -222,6 +222,16 @@ void qemu_set_fd_handler(int fd,
int qemu_add_child_watch(pid_t pid);
#endif
/**
* qemu_mutex_iothread_locked: Return lock status of the main loop mutex.
*
* The main loop mutex is the coarsest lock in QEMU, and as such it
* must always be taken outside other locks. This function helps
* functions take different paths depending on whether the current
* thread is running within the main loop mutex.
*/
bool qemu_mutex_iothread_locked(void);
/**
* qemu_mutex_lock_iothread: Lock the main loop mutex.
*

View File

@ -1099,9 +1099,17 @@ static int kvm_irqchip_get_virq(KVMState *s)
uint32_t *word = s->used_gsi_bitmap;
int max_words = ALIGN(s->gsi_count, 32) / 32;
int i, zeroes;
bool retry = true;
again:
/*
* PIC and IOAPIC share the first 16 GSI numbers, thus the available
* GSI numbers are more than the number of IRQ route. Allocating a GSI
* number can succeed even though a new route entry cannot be added.
* When this happens, flush dynamic MSI entries to free IRQ route entries.
*/
if (!s->direct_msi && s->irq_routes->nr == s->gsi_count) {
kvm_flush_dynamic_msi_routes(s);
}
/* Return the lowest unused GSI in the bitmap */
for (i = 0; i < max_words; i++) {
zeroes = ctz32(~word[i]);
@ -1111,11 +1119,6 @@ again:
return zeroes + i * 32;
}
if (!s->direct_msi && retry) {
retry = false;
kvm_flush_dynamic_msi_routes(s);
goto again;
}
return -ENOSPC;
}
@ -1752,6 +1755,8 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
qemu_mutex_unlock_iothread();
do {
MemTxAttrs attrs;
@ -1770,11 +1775,9 @@ int kvm_cpu_exec(CPUState *cpu)
*/
qemu_cpu_kick_self();
}
qemu_mutex_unlock_iothread();
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
qemu_mutex_lock_iothread();
attrs = kvm_arch_post_run(cpu, run);
if (run_ret < 0) {
@ -1801,6 +1804,7 @@ int kvm_cpu_exec(CPUState *cpu)
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
/* Called outside BQL */
kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
@ -1810,6 +1814,7 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
/* Called outside BQL */
address_space_rw(&address_space_memory,
run->mmio.phys_addr, attrs,
run->mmio.data,
@ -1857,6 +1862,8 @@ int kvm_cpu_exec(CPUState *cpu)
}
} while (ret == 0);
qemu_mutex_lock_iothread();
if (ret < 0) {
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);

View File

@ -396,9 +396,6 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
{
uint64_t tmp;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
tmp = mr->ops->read(mr->opaque, addr, size);
trace_memory_region_ops_read(mr, addr, tmp, size);
*value |= (tmp & mask) << shift;
@ -416,9 +413,6 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
uint64_t tmp = 0;
MemTxResult r;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
trace_memory_region_ops_read(mr, addr, tmp, size);
*value |= (tmp & mask) << shift;
@ -451,9 +445,6 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
{
uint64_t tmp;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
tmp = (*value >> shift) & mask;
trace_memory_region_ops_write(mr, addr, tmp, size);
mr->ops->write(mr->opaque, addr, tmp, size);
@ -470,9 +461,6 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
{
uint64_t tmp;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
tmp = (*value >> shift) & mask;
trace_memory_region_ops_write(mr, addr, tmp, size);
return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
@ -1012,6 +1000,7 @@ static void memory_region_initfn(Object *obj)
mr->ram_addr = RAM_ADDR_INVALID;
mr->enabled = true;
mr->romd_mode = true;
mr->global_locking = true;
mr->destructor = memory_region_destructor_none;
QTAILQ_INIT(&mr->subregions);
QTAILQ_INIT(&mr->coalesced);
@ -1646,6 +1635,16 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr)
}
}
void memory_region_set_global_locking(MemoryRegion *mr)
{
mr->global_locking = true;
}
void memory_region_clear_global_locking(MemoryRegion *mr)
{
mr->global_locking = false;
}
void memory_region_add_eventfd(MemoryRegion *mr,
hwaddr addr,
unsigned size,

View File

@ -13,8 +13,8 @@
#include <glib.h>
#include "qemu-common.h"
#include "cpu.h"
#include "exec/cpu-all.h"
#include "sysemu/memory_mapping.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"

View File

@ -1,6 +1,11 @@
#include "qemu-common.h"
#include "qemu/main-loop.h"
bool qemu_mutex_iothread_locked(void)
{
return true;
}
void qemu_mutex_lock_iothread(void)
{
}

View File

@ -2191,7 +2191,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
/* Inject NMI */
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
qemu_mutex_lock_iothread();
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
qemu_mutex_unlock_iothread();
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
@ -2200,6 +2203,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
if (!kvm_irqchip_in_kernel()) {
qemu_mutex_lock_iothread();
}
/* Force the VCPU out of its inner loop to process any INIT requests
* or (for userspace APIC, but it is cheap to combine the checks here)
* pending TPR access reports.
@ -2243,6 +2250,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
DPRINTF("setting tpr\n");
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
qemu_mutex_unlock_iothread();
}
}
@ -2256,8 +2265,17 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
} else {
env->eflags &= ~IF_MASK;
}
/* We need to protect the apic state against concurrent accesses from
* different threads in case the userspace irqchip is used. */
if (!kvm_irqchip_in_kernel()) {
qemu_mutex_lock_iothread();
}
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
if (!kvm_irqchip_in_kernel()) {
qemu_mutex_unlock_iothread();
}
return cpu_get_mem_attrs(env);
}
@ -2550,13 +2568,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
qemu_mutex_lock_iothread();
ret = kvm_handle_halt(cpu);
qemu_mutex_unlock_iothread();
break;
case KVM_EXIT_SET_TPR:
ret = 0;
break;
case KVM_EXIT_TPR_ACCESS:
qemu_mutex_lock_iothread();
ret = kvm_handle_tpr_access(cpu);
qemu_mutex_unlock_iothread();
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
@ -2582,7 +2604,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
qemu_mutex_lock_iothread();
ret = kvm_handle_debug(cpu, &run->debug.arch);
qemu_mutex_unlock_iothread();
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);

View File

@ -99,6 +99,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
int r;
struct kvm_mips_interrupt intr;
qemu_mutex_lock_iothread();
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_io_interrupts_pending(cpu)) {
intr.cpu = -1;
@ -109,6 +111,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
__func__, cs->cpu_index, intr.irq);
}
}
qemu_mutex_unlock_iothread();
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)

View File

@ -1242,6 +1242,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
int r;
unsigned irq;
qemu_mutex_lock_iothread();
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
if (!cap_interrupt_level &&
@ -1269,6 +1271,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
qemu_mutex_unlock_iothread();
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
@ -1570,6 +1574,8 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
CPUPPCState *env = &cpu->env;
int ret;
qemu_mutex_lock_iothread();
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
@ -1620,6 +1626,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
}
qemu_mutex_unlock_iothread();
return ret;
}

View File

@ -2007,6 +2007,8 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
S390CPU *cpu = S390_CPU(cs);
int ret = 0;
qemu_mutex_lock_iothread();
switch (run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
ret = handle_intercept(cpu);
@ -2027,6 +2029,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
break;
}
qemu_mutex_unlock_iothread();
if (ret == 0) {
ret = EXCP_INTERRUPT;

View File

@ -17,8 +17,8 @@
#include "qemu/error-report.h"
#include "exec/address-spaces.h"
#include "sysemu/kvm.h"
#include "cpu.h"
#include "sysemu/kvm.h"
/* #define DEBUG_S390 */
/* #define DEBUG_S390_PTE */

View File

@ -29,6 +29,8 @@
#include "qemu/bitops.h"
#include "tcg-target.h"
#define CPU_TEMP_BUF_NLONGS 128
/* Default target word size to pointer size. */
#ifndef TCG_TARGET_REG_BITS
# if UINTPTR_MAX == UINT32_MAX

View File

@ -58,6 +58,7 @@
#endif
#include "exec/cputlb.h"
#include "exec/tb-hash.h"
#include "translate-all.h"
#include "qemu/bitmap.h"
#include "qemu/timer.h"

View File

@ -207,13 +207,13 @@ size_t buffer_find_nonzero_offset(const void *buf, size_t len)
for (i = BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR;
i < len / sizeof(VECTYPE);
i += BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR) {
VECTYPE tmp0 = p[i + 0] | p[i + 1];
VECTYPE tmp1 = p[i + 2] | p[i + 3];
VECTYPE tmp2 = p[i + 4] | p[i + 5];
VECTYPE tmp3 = p[i + 6] | p[i + 7];
VECTYPE tmp01 = tmp0 | tmp1;
VECTYPE tmp23 = tmp2 | tmp3;
if (!ALL_EQ(tmp01 | tmp23, zero)) {
VECTYPE tmp0 = VEC_OR(p[i + 0], p[i + 1]);
VECTYPE tmp1 = VEC_OR(p[i + 2], p[i + 3]);
VECTYPE tmp2 = VEC_OR(p[i + 4], p[i + 5]);
VECTYPE tmp3 = VEC_OR(p[i + 6], p[i + 7]);
VECTYPE tmp01 = VEC_OR(tmp0, tmp1);
VECTYPE tmp23 = VEC_OR(tmp2, tmp3);
if (!ALL_EQ(VEC_OR(tmp01, tmp23), zero)) {
break;
}
}