2003-08-11 01:35:13 +04:00
|
|
|
/*
|
|
|
|
* common defines for all CPUs
|
2007-09-17 01:08:06 +04:00
|
|
|
*
|
2003-08-11 01:35:13 +04:00
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 15:33:53 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2003-08-11 01:35:13 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-17 00:47:01 +04:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-08-11 01:35:13 +04:00
|
|
|
*/
|
|
|
|
#ifndef CPU_DEFS_H
|
|
|
|
#define CPU_DEFS_H
|
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
#ifndef NEED_CPU_H
|
|
|
|
#error cpu.h included from common code
|
|
|
|
#endif
|
|
|
|
|
2016-03-15 17:36:13 +03:00
|
|
|
#include "qemu/host-utils.h"
|
cputlb: serialize tlb updates with env->tlb_lock
Currently we rely on atomic operations for cross-CPU invalidations.
There are two cases that these atomics miss: cross-CPU invalidations
can race with either (1) vCPU threads flushing their TLB, which
happens via memset, or (2) vCPUs calling tlb_reset_dirty on their TLB,
which updates .addr_write with a regular store. This results in
undefined behaviour, since we're mixing regular and atomic ops
on concurrent accesses.
Fix it by using tlb_lock, a per-vCPU lock. All updaters of tlb_table
and the corresponding victim cache now hold the lock.
The readers that do not hold tlb_lock must use atomic reads when
reading .addr_write, since this field can be updated by other threads;
the conversion to atomic reads is done in the next patch.
Note that an alternative fix would be to expand the use of atomic ops.
However, in the case of TLB flushes this would have a huge performance
impact, since (1) TLB flushes can happen very frequently and (2) we
currently use a full memory barrier to flush each TLB entry, and a TLB
has many entries. Instead, acquiring the lock is barely slower than a
full memory barrier since it is uncontended, and with a single lock
acquisition we can flush the entire TLB.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <20181009174557.16125-6-cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2018-10-09 20:45:56 +03:00
|
|
|
#include "qemu/thread.h"
|
2017-07-03 13:12:21 +03:00
|
|
|
#ifdef CONFIG_TCG
|
2015-05-05 10:18:23 +03:00
|
|
|
#include "tcg-target.h"
|
2017-07-03 13:12:21 +03:00
|
|
|
#endif
|
2013-05-28 16:02:38 +04:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/hwaddr.h"
|
2013-05-28 16:02:38 +04:00
|
|
|
#endif
|
2015-04-26 18:49:24 +03:00
|
|
|
#include "exec/memattrs.h"
|
2019-07-09 18:20:52 +03:00
|
|
|
#include "hw/core/cpu.h"
|
2003-08-11 01:35:13 +04:00
|
|
|
|
2019-03-22 21:51:19 +03:00
|
|
|
#include "cpu-param.h"
|
|
|
|
|
2004-01-24 18:26:06 +03:00
|
|
|
#ifndef TARGET_LONG_BITS
|
2019-03-22 21:51:19 +03:00
|
|
|
# error TARGET_LONG_BITS must be defined in cpu-param.h
|
|
|
|
#endif
|
|
|
|
#ifndef NB_MMU_MODES
|
|
|
|
# error NB_MMU_MODES must be defined in cpu-param.h
|
|
|
|
#endif
|
|
|
|
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
|
|
|
|
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
|
|
|
|
#endif
|
|
|
|
#ifndef TARGET_VIRT_ADDR_SPACE_BITS
|
|
|
|
# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
|
|
|
|
#endif
|
|
|
|
#ifndef TARGET_PAGE_BITS
|
|
|
|
# ifdef TARGET_PAGE_BITS_VARY
|
|
|
|
# ifndef TARGET_PAGE_BITS_MIN
|
|
|
|
# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
|
|
|
|
# endif
|
|
|
|
# else
|
|
|
|
# error TARGET_PAGE_BITS must be defined in cpu-param.h
|
|
|
|
# endif
|
2004-01-24 18:26:06 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
|
|
|
|
|
2004-04-26 01:25:15 +04:00
|
|
|
/* target_ulong is the type of a virtual address */
|
2004-01-24 18:26:06 +03:00
|
|
|
#if TARGET_LONG_SIZE == 4
|
2013-04-17 18:26:41 +04:00
|
|
|
typedef int32_t target_long;
|
|
|
|
typedef uint32_t target_ulong;
|
2005-01-04 02:35:10 +03:00
|
|
|
#define TARGET_FMT_lx "%08x"
|
2007-04-04 11:58:14 +04:00
|
|
|
#define TARGET_FMT_ld "%d"
|
2007-09-19 09:46:03 +04:00
|
|
|
#define TARGET_FMT_lu "%u"
|
2004-01-24 18:26:06 +03:00
|
|
|
#elif TARGET_LONG_SIZE == 8
|
2013-04-17 18:26:41 +04:00
|
|
|
typedef int64_t target_long;
|
|
|
|
typedef uint64_t target_ulong;
|
2006-06-25 22:15:32 +04:00
|
|
|
#define TARGET_FMT_lx "%016" PRIx64
|
2007-04-04 11:58:14 +04:00
|
|
|
#define TARGET_FMT_ld "%" PRId64
|
2007-09-19 09:46:03 +04:00
|
|
|
#define TARGET_FMT_lu "%" PRIu64
|
2004-01-24 18:26:06 +03:00
|
|
|
#else
|
|
|
|
#error TARGET_LONG_SIZE undefined
|
|
|
|
#endif
|
|
|
|
|
2017-07-03 13:12:21 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
2019-03-22 23:52:09 +03:00
|
|
|
|
implementing victim TLB for QEMU system emulated TLB
QEMU system mode page table walks are expensive. Taken by running QEMU
qemu-system-x86_64 system mode on Intel PIN , a TLB miss and walking a
4-level page tables in guest Linux OS takes ~450 X86 instructions on
average.
QEMU system mode TLB is implemented using a directly-mapped hashtable.
This structure suffers from conflict misses. Increasing the
associativity of the TLB may not be the solution to conflict misses as
all the ways may have to be walked in serial.
A victim TLB is a TLB used to hold translations evicted from the
primary TLB upon replacement. The victim TLB lies between the main TLB
and its refill path. Victim TLB is of greater associativity (fully
associative in this patch). It takes longer to lookup the victim TLB,
but its likely better than a full page table walk. The memory
translation path is changed as follows :
Before Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. TLB refill.
5. Do the memory access.
6. Return to code cache.
After Victim TLB:
1. Inline TLB lookup
2. Exit code cache on TLB miss.
3. Check for unaligned, IO accesses
4. Victim TLB lookup.
5. If victim TLB misses, TLB refill
6. Do the memory access.
7. Return to code cache
The advantage is that victim TLB can offer more associativity to a
directly mapped TLB and thus potentially fewer page table walks while
still keeping the time taken to flush within reasonable limits.
However, placing a victim TLB before the refill path increase TLB
refill path as the victim TLB is consulted before the TLB refill. The
performance results demonstrate that the pros outweigh the cons.
some performance results taken on SPECINT2006 train
datasets and kernel boot and qemu configure script on an
Intel(R) Xeon(R) CPU E5620 @ 2.40GHz Linux machine are shown in the
Google Doc link below.
https://docs.google.com/spreadsheets/d/1eiItzekZwNQOal_h-5iJmC4tMDi051m9qidi5_nwvH4/edit?usp=sharing
In summary, victim TLB improves the performance of qemu-system-x86_64 by
11% on average on SPECINT2006, kernelboot and qemu configscript and with
highest improvement of in 26% in 456.hmmer. And victim TLB does not result
in any performance degradation in any of the measured benchmarks. Furthermore,
the implemented victim TLB is architecture independent and is expected to
benefit other architectures in QEMU as well.
Although there are measurement fluctuations, the performance
improvement is very significant and by no means in the range of
noises.
Signed-off-by: Xin Tong <trent.tong@gmail.com>
Message-id: 1407202523-23553-1-git-send-email-trent.tong@gmail.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2014-08-05 05:35:23 +04:00
|
|
|
/* use a fully associative victim tlb of 8 entries */
|
|
|
|
#define CPU_VTLB_SIZE 8
|
2003-08-11 01:35:13 +04:00
|
|
|
|
2010-04-05 03:28:53 +04:00
|
|
|
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
|
2008-01-31 12:22:27 +03:00
|
|
|
#define CPU_TLB_ENTRY_BITS 4
|
|
|
|
#else
|
|
|
|
#define CPU_TLB_ENTRY_BITS 5
|
|
|
|
#endif
|
|
|
|
|
2019-01-16 20:01:13 +03:00
|
|
|
#define CPU_TLB_DYN_MIN_BITS 6
|
|
|
|
#define CPU_TLB_DYN_DEFAULT_BITS 8
|
|
|
|
|
|
|
|
# if HOST_LONG_BITS == 32
|
|
|
|
/* Make sure we do not require a double-word shift for the TLB load */
|
|
|
|
# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
|
|
|
|
# else /* HOST_LONG_BITS == 64 */
|
|
|
|
/*
|
|
|
|
* Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
|
|
|
|
* 2**34 == 16G of address space. This is roughly what one would expect a
|
|
|
|
* TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
|
|
|
|
* Skylake's Level-2 STLB has 16 1G entries.
|
|
|
|
* Also, make sure we do not size the TLB past the guest's address space.
|
|
|
|
*/
|
osdep: Make MIN/MAX evaluate arguments only once
I'm not aware of any immediate bugs in qemu where a second runtime
evaluation of the arguments to MIN() or MAX() causes a problem, but
proactively preventing such abuse is easier than falling prey to an
unintended case down the road. At any rate, here's the conversation
that sparked the current patch:
https://lists.gnu.org/archive/html/qemu-devel/2018-12/msg05718.html
Update the MIN/MAX macros to only evaluate their argument once at
runtime; this uses typeof(1 ? (a) : (b)) to ensure that we are
promoting the temporaries to the same type as the final comparison (we
have to trigger type promotion, as typeof(bitfield) won't compile; and
we can't use typeof((a) + (b)) or even typeof((a) + 0), as some of our
uses of MAX are on void* pointers where such addition is undefined).
However, we are unable to work around gcc refusing to compile ({}) in
a constant context (such as the array length of a static variable),
even when only used in the dead branch of a __builtin_choose_expr(),
so we have to provide a second macro pair MIN_CONST and MAX_CONST for
use when both arguments are known to be compile-time constants and
where the result must also be usable as a constant; this second form
evaluates arguments multiple times but that doesn't matter for
constants. By using a void expression as the expansion if a
non-constant is presented to this second form, we can enlist the
compiler to ensure the double evaluation is not attempted on
non-constants.
Alas, as both macros now rely on compiler intrinsics, they are no
longer usable in preprocessor #if conditions; those will just have to
be open-coded or the logic rewritten into #define or runtime 'if'
conditions (but where the compiler dead-code-elimination will probably
still apply).
I tested that both gcc 10.1.1 and clang 10.0.0 produce errors for all
forms of macro mis-use. As the errors can sometimes be cryptic, I'm
demonstrating the gcc output:
Use of MIN when MIN_CONST is needed:
In file included from /home/eblake/qemu/qemu-img.c:25:
/home/eblake/qemu/include/qemu/osdep.h:249:5: error: braced-group within expression allowed only inside a function
249 | ({ \
| ^
/home/eblake/qemu/qemu-img.c:92:12: note: in expansion of macro ‘MIN’
92 | char array[MIN(1, 2)] = "";
| ^~~
Use of MIN_CONST when MIN is needed:
/home/eblake/qemu/qemu-img.c: In function ‘is_allocated_sectors’:
/home/eblake/qemu/qemu-img.c:1225:15: error: void value not ignored as it ought to be
1225 | i = MIN_CONST(i, n);
| ^
Use of MIN in the preprocessor:
In file included from /home/eblake/qemu/accel/tcg/translate-all.c:20:
/home/eblake/qemu/accel/tcg/translate-all.c: In function ‘page_check_range’:
/home/eblake/qemu/include/qemu/osdep.h:249:6: error: token "{" is not valid in preprocessor expressions
249 | ({ \
| ^
Fix the resulting callsites that used #if or computed a compile-time
constant min or max to use the new macros. cpu-defs.h is interesting,
as CPU_TLB_DYN_MAX_BITS is sometimes used as a constant and sometimes
dynamic.
It may be worth improving glib's MIN/MAX definitions to be saner, but
that is a task for another day.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20200625162602.700741-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-06-25 19:26:02 +03:00
|
|
|
# ifdef TARGET_PAGE_BITS_VARY
|
|
|
|
# define CPU_TLB_DYN_MAX_BITS \
|
2019-01-16 20:01:13 +03:00
|
|
|
MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
|
osdep: Make MIN/MAX evaluate arguments only once
I'm not aware of any immediate bugs in qemu where a second runtime
evaluation of the arguments to MIN() or MAX() causes a problem, but
proactively preventing such abuse is easier than falling prey to an
unintended case down the road. At any rate, here's the conversation
that sparked the current patch:
https://lists.gnu.org/archive/html/qemu-devel/2018-12/msg05718.html
Update the MIN/MAX macros to only evaluate their argument once at
runtime; this uses typeof(1 ? (a) : (b)) to ensure that we are
promoting the temporaries to the same type as the final comparison (we
have to trigger type promotion, as typeof(bitfield) won't compile; and
we can't use typeof((a) + (b)) or even typeof((a) + 0), as some of our
uses of MAX are on void* pointers where such addition is undefined).
However, we are unable to work around gcc refusing to compile ({}) in
a constant context (such as the array length of a static variable),
even when only used in the dead branch of a __builtin_choose_expr(),
so we have to provide a second macro pair MIN_CONST and MAX_CONST for
use when both arguments are known to be compile-time constants and
where the result must also be usable as a constant; this second form
evaluates arguments multiple times but that doesn't matter for
constants. By using a void expression as the expansion if a
non-constant is presented to this second form, we can enlist the
compiler to ensure the double evaluation is not attempted on
non-constants.
Alas, as both macros now rely on compiler intrinsics, they are no
longer usable in preprocessor #if conditions; those will just have to
be open-coded or the logic rewritten into #define or runtime 'if'
conditions (but where the compiler dead-code-elimination will probably
still apply).
I tested that both gcc 10.1.1 and clang 10.0.0 produce errors for all
forms of macro mis-use. As the errors can sometimes be cryptic, I'm
demonstrating the gcc output:
Use of MIN when MIN_CONST is needed:
In file included from /home/eblake/qemu/qemu-img.c:25:
/home/eblake/qemu/include/qemu/osdep.h:249:5: error: braced-group within expression allowed only inside a function
249 | ({ \
| ^
/home/eblake/qemu/qemu-img.c:92:12: note: in expansion of macro ‘MIN’
92 | char array[MIN(1, 2)] = "";
| ^~~
Use of MIN_CONST when MIN is needed:
/home/eblake/qemu/qemu-img.c: In function ‘is_allocated_sectors’:
/home/eblake/qemu/qemu-img.c:1225:15: error: void value not ignored as it ought to be
1225 | i = MIN_CONST(i, n);
| ^
Use of MIN in the preprocessor:
In file included from /home/eblake/qemu/accel/tcg/translate-all.c:20:
/home/eblake/qemu/accel/tcg/translate-all.c: In function ‘page_check_range’:
/home/eblake/qemu/include/qemu/osdep.h:249:6: error: token "{" is not valid in preprocessor expressions
249 | ({ \
| ^
Fix the resulting callsites that used #if or computed a compile-time
constant min or max to use the new macros. cpu-defs.h is interesting,
as CPU_TLB_DYN_MAX_BITS is sometimes used as a constant and sometimes
dynamic.
It may be worth improving glib's MIN/MAX definitions to be saner, but
that is a task for another day.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20200625162602.700741-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-06-25 19:26:02 +03:00
|
|
|
# else
|
|
|
|
# define CPU_TLB_DYN_MAX_BITS \
|
|
|
|
MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
|
|
|
|
# endif
|
2019-01-16 20:01:13 +03:00
|
|
|
# endif
|
|
|
|
|
2003-08-11 01:35:13 +04:00
|
|
|
typedef struct CPUTLBEntry {
|
2008-06-09 04:20:13 +04:00
|
|
|
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
|
|
|
|
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
|
|
|
|
go directly to ram.
|
2003-10-28 00:12:17 +03:00
|
|
|
bit 3 : indicates that the entry is invalid
|
|
|
|
bit 2..0 : zero
|
|
|
|
*/
|
2015-07-06 00:08:53 +03:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
target_ulong addr_read;
|
|
|
|
target_ulong addr_write;
|
|
|
|
target_ulong addr_code;
|
|
|
|
/* Addend to virtual address to get host address. IO accesses
|
|
|
|
use the corresponding iotlb value. */
|
|
|
|
uintptr_t addend;
|
|
|
|
};
|
|
|
|
/* padding to get a power of two size */
|
|
|
|
uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
|
|
|
|
};
|
2003-08-11 01:35:13 +04:00
|
|
|
} CPUTLBEntry;
|
|
|
|
|
2013-06-04 20:51:59 +04:00
|
|
|
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
|
2010-04-05 03:28:53 +04:00
|
|
|
|
2015-04-26 18:49:23 +03:00
|
|
|
/* The IOTLB is not accessed directly inline by generated TCG code,
|
|
|
|
* so the CPUIOTLBEntry layout is not as critical as that of the
|
|
|
|
* CPUTLBEntry. (This is also why we don't want to combine the two
|
|
|
|
* structs into one.)
|
|
|
|
*/
|
|
|
|
typedef struct CPUIOTLBEntry {
|
2018-06-15 16:57:14 +03:00
|
|
|
/*
|
|
|
|
* @addr contains:
|
|
|
|
* - in the lower TARGET_PAGE_BITS, a physical section number
|
|
|
|
* - with the lower TARGET_PAGE_BITS masked off, an offset which
|
|
|
|
* must be added to the virtual address to obtain:
|
|
|
|
* + the ram_addr_t of the target RAM (if the physical section
|
|
|
|
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
|
|
|
|
* + the offset within the target MemoryRegion (otherwise)
|
|
|
|
*/
|
2015-04-26 18:49:23 +03:00
|
|
|
hwaddr addr;
|
2015-04-26 18:49:24 +03:00
|
|
|
MemTxAttrs attrs;
|
2015-04-26 18:49:23 +03:00
|
|
|
} CPUIOTLBEntry;
|
|
|
|
|
2019-03-22 23:52:09 +03:00
|
|
|
/*
|
|
|
|
* Data elements that are per MMU mode, minus the bits accessed by
|
|
|
|
* the TCG fast path.
|
|
|
|
*/
|
2018-10-17 21:48:40 +03:00
|
|
|
typedef struct CPUTLBDesc {
|
|
|
|
/*
|
|
|
|
* Describe a region covering all of the large pages allocated
|
|
|
|
* into the tlb. When any page within this region is flushed,
|
|
|
|
* we must flush the entire tlb. The region is matched if
|
|
|
|
* (addr & large_page_mask) == large_page_addr.
|
|
|
|
*/
|
|
|
|
target_ulong large_page_addr;
|
|
|
|
target_ulong large_page_mask;
|
2019-03-22 18:36:40 +03:00
|
|
|
/* host time (in ns) at the beginning of the time window */
|
|
|
|
int64_t window_begin_ns;
|
|
|
|
/* maximum number of entries observed in the window */
|
|
|
|
size_t window_max_entries;
|
2019-03-22 23:52:09 +03:00
|
|
|
size_t n_used_entries;
|
2018-10-19 22:46:18 +03:00
|
|
|
/* The next index to use in the tlb victim table. */
|
|
|
|
size_t vindex;
|
2019-03-22 23:52:09 +03:00
|
|
|
/* The tlb victim table, in two parts. */
|
|
|
|
CPUTLBEntry vtable[CPU_VTLB_SIZE];
|
|
|
|
CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
|
|
|
|
/* The iotlb. */
|
|
|
|
CPUIOTLBEntry *iotlb;
|
2018-10-17 21:48:40 +03:00
|
|
|
} CPUTLBDesc;
|
|
|
|
|
2019-03-22 23:52:09 +03:00
|
|
|
/*
|
|
|
|
* Data elements that are per MMU mode, accessed by the fast path.
|
2019-03-23 08:03:39 +03:00
|
|
|
* The structure is aligned to aid loading the pair with one insn.
|
2019-03-22 23:52:09 +03:00
|
|
|
*/
|
|
|
|
typedef struct CPUTLBDescFast {
|
|
|
|
/* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
|
|
|
|
uintptr_t mask;
|
|
|
|
/* The array of tlb entries itself. */
|
|
|
|
CPUTLBEntry *table;
|
2019-03-23 08:03:39 +03:00
|
|
|
} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
|
2019-03-22 23:52:09 +03:00
|
|
|
|
2018-10-23 05:57:11 +03:00
|
|
|
/*
|
|
|
|
* Data elements that are shared between all MMU modes.
|
|
|
|
*/
|
|
|
|
typedef struct CPUTLBCommon {
|
2019-03-22 23:52:09 +03:00
|
|
|
/* Serialize updates to f.table and d.vtable, and others as noted. */
|
2018-10-23 05:57:11 +03:00
|
|
|
QemuSpin lock;
|
2018-10-20 22:04:57 +03:00
|
|
|
/*
|
|
|
|
* Within dirty, for each bit N, modifications have been made to
|
|
|
|
* mmu_idx N since the last time that mmu_idx was flushed.
|
|
|
|
* Protected by tlb_c.lock.
|
|
|
|
*/
|
|
|
|
uint16_t dirty;
|
2018-10-20 00:36:43 +03:00
|
|
|
/*
|
|
|
|
* Statistics. These are not lock protected, but are read and
|
|
|
|
* written atomically. This allows the monitor to print a snapshot
|
|
|
|
* of the stats without interfering with the cpu.
|
|
|
|
*/
|
|
|
|
size_t full_flush_count;
|
|
|
|
size_t part_flush_count;
|
|
|
|
size_t elide_flush_count;
|
2018-10-23 05:57:11 +03:00
|
|
|
} CPUTLBCommon;
|
|
|
|
|
|
|
|
/*
|
2019-03-22 23:52:09 +03:00
|
|
|
* The entire softmmu tlb, for all MMU modes.
|
2018-10-23 05:57:11 +03:00
|
|
|
* The meaning of each of the MMU modes is defined in the target code.
|
2019-03-23 08:03:39 +03:00
|
|
|
* Since this is placed within CPUNegativeOffsetState, the smallest
|
|
|
|
* negative offsets are at the end of the struct.
|
2018-10-23 05:57:11 +03:00
|
|
|
*/
|
2018-10-21 20:24:26 +03:00
|
|
|
|
2019-03-22 23:52:09 +03:00
|
|
|
typedef struct CPUTLB {
|
|
|
|
CPUTLBCommon c;
|
2019-03-23 08:03:39 +03:00
|
|
|
CPUTLBDesc d[NB_MMU_MODES];
|
|
|
|
CPUTLBDescFast f[NB_MMU_MODES];
|
2019-03-22 23:52:09 +03:00
|
|
|
} CPUTLB;
|
2010-03-12 19:54:58 +03:00
|
|
|
|
2019-03-23 08:03:39 +03:00
|
|
|
/* This will be used by TCG backends to compute offsets. */
|
|
|
|
#define TLB_MASK_TABLE_OFS(IDX) \
|
|
|
|
((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
|
2010-03-12 19:54:58 +03:00
|
|
|
|
2019-03-22 23:52:09 +03:00
|
|
|
#else
|
2010-03-12 19:54:58 +03:00
|
|
|
|
2019-03-23 08:03:39 +03:00
|
|
|
typedef struct CPUTLB { } CPUTLB;
|
2010-03-12 19:54:58 +03:00
|
|
|
|
2019-03-22 23:52:09 +03:00
|
|
|
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
|
2005-11-20 13:32:34 +03:00
|
|
|
|
2019-03-23 03:16:06 +03:00
|
|
|
/*
|
2019-08-28 19:53:07 +03:00
|
|
|
* This structure must be placed in ArchCPU immediately
|
2019-03-23 03:16:06 +03:00
|
|
|
* before CPUArchState, as a field named "neg".
|
|
|
|
*/
|
|
|
|
typedef struct CPUNegativeOffsetState {
|
2019-03-23 08:03:39 +03:00
|
|
|
CPUTLB tlb;
|
2019-03-29 00:54:23 +03:00
|
|
|
IcountDecr icount_decr;
|
2019-03-23 03:16:06 +03:00
|
|
|
} CPUNegativeOffsetState;
|
|
|
|
|
2003-08-11 01:35:13 +04:00
|
|
|
#endif
|