accel/tcg: Introduce tb_{set_}page_addr{0,1}
This data structure will be replaced for user-only: add accessors. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
4c88475c9f
commit
28905cfbd5
@ -187,13 +187,14 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
const struct tb_desc *desc = d;
|
||||
|
||||
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
|
||||
tb->page_addr[0] == desc->page_addr0 &&
|
||||
tb_page_addr0(tb) == desc->page_addr0 &&
|
||||
tb->cs_base == desc->cs_base &&
|
||||
tb->flags == desc->flags &&
|
||||
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
|
||||
tb_cflags(tb) == desc->cflags) {
|
||||
/* check next page if needed */
|
||||
if (tb->page_addr[1] == -1) {
|
||||
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
|
||||
if (tb_phys_page1 == -1) {
|
||||
return true;
|
||||
} else {
|
||||
tb_page_addr_t phys_page1;
|
||||
@ -210,7 +211,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
|
||||
*/
|
||||
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
|
||||
phys_page1 = get_page_addr_code(desc->env, virt_page1);
|
||||
if (tb->page_addr[1] == phys_page1) {
|
||||
if (tb_phys_page1 == phys_page1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1019,7 +1020,7 @@ int cpu_exec(CPUState *cpu)
|
||||
* direct jump to a TB spanning two pages because the mapping
|
||||
* for the second page can change.
|
||||
*/
|
||||
if (tb->page_addr[1] != -1) {
|
||||
if (tb_page_addr1(tb) != -1) {
|
||||
last_tb = NULL;
|
||||
}
|
||||
#endif
|
||||
|
@ -44,8 +44,8 @@ static bool tb_cmp(const void *ap, const void *bp)
|
||||
a->flags == b->flags &&
|
||||
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
|
||||
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
|
||||
a->page_addr[0] == b->page_addr[0] &&
|
||||
a->page_addr[1] == b->page_addr[1]);
|
||||
tb_page_addr0(a) == tb_page_addr0(b) &&
|
||||
tb_page_addr1(a) == tb_page_addr1(b));
|
||||
}
|
||||
|
||||
void tb_htable_init(void)
|
||||
@ -273,7 +273,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
qemu_spin_unlock(&tb->jmp_lock);
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
phys_pc = tb->page_addr[0];
|
||||
phys_pc = tb_page_addr0(tb);
|
||||
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
|
||||
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
|
||||
if (!qht_remove(&tb_ctx.htable, tb, h)) {
|
||||
@ -282,10 +282,11 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
|
||||
/* remove the TB from the page list */
|
||||
if (rm_from_page_list) {
|
||||
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
|
||||
p = page_find(phys_pc >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(p, tb);
|
||||
if (tb->page_addr[1] != -1) {
|
||||
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
|
||||
phys_pc = tb_page_addr1(tb);
|
||||
if (phys_pc != -1) {
|
||||
p = page_find(phys_pc >> TARGET_PAGE_BITS);
|
||||
tb_page_remove(p, tb);
|
||||
}
|
||||
}
|
||||
@ -358,16 +359,16 @@ static inline void page_unlock_tb(const TranslationBlock *tb) { }
|
||||
/* lock the page(s) of a TB in the correct acquisition order */
|
||||
static void page_lock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
|
||||
page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
|
||||
}
|
||||
|
||||
static void page_unlock_tb(const TranslationBlock *tb)
|
||||
{
|
||||
PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
|
||||
PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
page_unlock(p1);
|
||||
if (unlikely(tb->page_addr[1] != -1)) {
|
||||
PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
|
||||
if (unlikely(tb_page_addr1(tb) != -1)) {
|
||||
PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
|
||||
|
||||
if (p2 != p1) {
|
||||
page_unlock(p2);
|
||||
@ -382,7 +383,7 @@ static void page_unlock_tb(const TranslationBlock *tb)
|
||||
*/
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
{
|
||||
if (page_addr == -1 && tb->page_addr[0] != -1) {
|
||||
if (page_addr == -1 && tb_page_addr0(tb) != -1) {
|
||||
page_lock_tb(tb);
|
||||
do_tb_phys_invalidate(tb, true);
|
||||
page_unlock_tb(tb);
|
||||
@ -516,11 +517,11 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
if (n == 0) {
|
||||
/* NOTE: tb_end may be after the end of the page, but
|
||||
it is not a problem */
|
||||
tb_start = tb->page_addr[0];
|
||||
tb_start = tb_page_addr0(tb);
|
||||
tb_end = tb_start + tb->size;
|
||||
} else {
|
||||
tb_start = tb->page_addr[1];
|
||||
tb_end = tb_start + ((tb->page_addr[0] + tb->size)
|
||||
tb_start = tb_page_addr1(tb);
|
||||
tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
|
||||
& ~TARGET_PAGE_MASK);
|
||||
}
|
||||
if (!(tb_end <= start || tb_start >= end)) {
|
||||
|
@ -698,9 +698,9 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
|
||||
}
|
||||
assert_page_locked(pd);
|
||||
PAGE_FOR_EACH_TB(pd, tb, n) {
|
||||
if (page_trylock_add(set, tb->page_addr[0]) ||
|
||||
(tb->page_addr[1] != -1 &&
|
||||
page_trylock_add(set, tb->page_addr[1]))) {
|
||||
if (page_trylock_add(set, tb_page_addr0(tb)) ||
|
||||
(tb_page_addr1(tb) != -1 &&
|
||||
page_trylock_add(set, tb_page_addr1(tb)))) {
|
||||
/* drop all locks, and reacquire in order */
|
||||
g_tree_foreach(set->tree, page_entry_unlock, NULL);
|
||||
goto retry;
|
||||
@ -771,8 +771,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
tb->flags = flags;
|
||||
tb->cflags = cflags;
|
||||
tb->trace_vcpu_dstate = *cpu->trace_dstate;
|
||||
tb->page_addr[0] = phys_pc;
|
||||
tb->page_addr[1] = -1;
|
||||
tb_set_page_addr0(tb, phys_pc);
|
||||
tb_set_page_addr1(tb, -1);
|
||||
tcg_ctx->tb_cflags = cflags;
|
||||
tb_overflow:
|
||||
|
||||
@ -970,7 +970,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
* a temporary one-insn TB, and we have nothing left to do. Return early
|
||||
* before attempting to link to other TBs or add to the lookup table.
|
||||
*/
|
||||
if (tb->page_addr[0] == -1) {
|
||||
if (tb_page_addr0(tb) == -1) {
|
||||
return tb;
|
||||
}
|
||||
|
||||
@ -985,7 +985,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
* No explicit memory barrier is required -- tb_link_page() makes the
|
||||
* TB visible in a consistent state.
|
||||
*/
|
||||
existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
|
||||
existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
|
||||
/* if the TB already exists, discard what we just translated */
|
||||
if (unlikely(existing_tb != tb)) {
|
||||
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
|
||||
@ -1140,7 +1140,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb->page_addr[1] != -1) {
|
||||
if (tb_page_addr1(tb) != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
|
||||
|
@ -157,7 +157,7 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
|
||||
tb = db->tb;
|
||||
|
||||
/* Use slow path if first page is MMIO. */
|
||||
if (unlikely(tb->page_addr[0] == -1)) {
|
||||
if (unlikely(tb_page_addr0(tb) == -1)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -169,13 +169,14 @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
|
||||
host = db->host_addr[1];
|
||||
base = TARGET_PAGE_ALIGN(db->pc_first);
|
||||
if (host == NULL) {
|
||||
tb->page_addr[1] =
|
||||
tb_page_addr_t phys_page =
|
||||
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
|
||||
/* We cannot handle MMIO as second page. */
|
||||
assert(phys_page != -1);
|
||||
tb_set_page_addr1(tb, phys_page);
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
page_protect(end);
|
||||
#endif
|
||||
/* We cannot handle MMIO as second page. */
|
||||
assert(tb->page_addr[1] != -1);
|
||||
host = db->host_addr[1];
|
||||
}
|
||||
|
||||
|
@ -610,6 +610,28 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
|
||||
return qatomic_read(&tb->cflags);
|
||||
}
|
||||
|
||||
static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
|
||||
{
|
||||
return tb->page_addr[0];
|
||||
}
|
||||
|
||||
static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
|
||||
{
|
||||
return tb->page_addr[1];
|
||||
}
|
||||
|
||||
static inline void tb_set_page_addr0(TranslationBlock *tb,
|
||||
tb_page_addr_t addr)
|
||||
{
|
||||
tb->page_addr[0] = addr;
|
||||
}
|
||||
|
||||
static inline void tb_set_page_addr1(TranslationBlock *tb,
|
||||
tb_page_addr_t addr)
|
||||
{
|
||||
tb->page_addr[1] = addr;
|
||||
}
|
||||
|
||||
/* current cflags for hashing/comparison */
|
||||
uint32_t curr_cflags(CPUState *cpu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user