tcg/loongarch64: Introduce prepare_host_addr

Merge tcg_out_tlb_load, add_qemu_ldst_label, tcg_out_test_alignment,
tcg_out_zext_addr_if_32_bit, and some code that lived in both
tcg_out_qemu_ld and tcg_out_qemu_st into one function that returns
HostAddress and TCGLabelQemuLdst structures.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-23 17:02:44 +01:00
parent 7131d3cf72
commit e63eed328f

View File

@ -818,81 +818,12 @@ static void * const qemu_st_helpers[4] = {
[MO_64] = helper_le_stq_mmu, [MO_64] = helper_le_stq_mmu,
}; };
/* We expect to use a 12-bit negative offset from ENV. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
{ {
tcg_out_opc_b(s, 0); tcg_out_opc_b(s, 0);
return reloc_br_sd10k16(s->code_ptr - 1, target); return reloc_br_sd10k16(s->code_ptr - 1, target);
} }
/*
* Emits common code for TLB addend lookup, that eventually loads the
* addend in TCG_REG_TMP2.
*/
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
tcg_insn_unit **label_ptr, bool is_load)
{
MemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
tcg_target_long compare_mask;
int mem_index = get_mmuidx(oi);
int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
/* Load the tlb comparator and the addend. */
tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
is_load ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
offsetof(CPUTLBEntry, addend));
/* We don't support unaligned accesses. */
if (a_bits < s_bits) {
a_bits = s_bits;
}
/* Clear the non-page, non-alignment bits from the address. */
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
/* Compare masked address with the TLB entry. */
label_ptr[0] = s->code_ptr;
tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
/* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
}
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
TCGType type,
TCGReg datalo, TCGReg addrlo,
void *raddr, tcg_insn_unit **label_ptr)
{
TCGLabelQemuLdst *label = new_ldst_label(s);
label->is_ld = is_ld;
label->oi = oi;
label->type = type;
label->datalo_reg = datalo;
label->datahi_reg = 0; /* unused */
label->addrlo_reg = addrlo;
label->addrhi_reg = 0; /* unused */
label->raddr = tcg_splitwx_to_rx(raddr);
label->label_ptr[0] = label_ptr[0];
}
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{ {
MemOpIdx oi = l->oi; MemOpIdx oi = l->oi;
@ -941,33 +872,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return tcg_out_goto(s, l->raddr); return tcg_out_goto(s, l->raddr);
} }
#else #else
/*
* Alignment helpers for user-mode emulation
*/
static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
unsigned a_bits)
{
TCGLabelQemuLdst *l = new_ldst_label(s);
l->is_ld = is_ld;
l->addrlo_reg = addr_reg;
/*
* Without micro-architecture details, we don't know which of bstrpick or
* andi is faster, so use bstrpick as it's not constrained by imm field
* width. (Not to say alignments >= 2^12 are going to happen any time
* soon, though)
*/
tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
l->label_ptr[0] = s->code_ptr;
tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
l->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
{ {
/* resolve label address */ /* resolve label address */
@ -997,27 +901,102 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */
/*
* `ext32u` the address register into the temp register given,
* if target is 32-bit, no-op otherwise.
*
* Returns the address register ready for use with TLB addend.
*/
static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
TCGReg addr, TCGReg tmp)
{
if (TARGET_LONG_BITS == 32) {
tcg_out_ext32u(s, tmp, addr);
return tmp;
}
return addr;
}
typedef struct { typedef struct {
TCGReg base; TCGReg base;
TCGReg index; TCGReg index;
} HostAddress; } HostAddress;
/*
* For softmmu, perform the TLB load and compare.
* For useronly, perform any required alignment tests.
* In both cases, return a TCGLabelQemuLdst structure if the slow path
* is required and fill in @h with the host address for the fast path.
*/
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
TCGReg addr_reg, MemOpIdx oi,
bool is_ld)
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
unsigned a_bits = get_alignment_bits(opc);
#ifdef CONFIG_SOFTMMU
unsigned s_bits = opc & MO_SIZE;
int mem_index = get_mmuidx(oi);
int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
tcg_target_long compare_mask;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
/* Load the tlb comparator and the addend. */
tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
is_ld ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
offsetof(CPUTLBEntry, addend));
/* We don't support unaligned accesses. */
if (a_bits < s_bits) {
a_bits = s_bits;
}
/* Clear the non-page, non-alignment bits from the address. */
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg);
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
h->index = TCG_REG_TMP2;
#else
if (a_bits) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
/*
* Without micro-architecture details, we don't know which of
* bstrpick or andi is faster, so use bstrpick as it's not
* constrained by imm field width. Not to say alignments >= 2^12
* are going to happen any time soon.
*/
tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
ldst->label_ptr[0] = s->code_ptr;
tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
}
h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
#endif
if (TARGET_LONG_BITS == 32) {
h->base = TCG_REG_TMP0;
tcg_out_ext32u(s, h->base, addr_reg);
} else {
h->base = addr_reg;
}
return ldst;
}
static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type, static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
TCGReg rd, HostAddress h) TCGReg rd, HostAddress h)
{ {
@ -1057,29 +1036,17 @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type) MemOpIdx oi, TCGType data_type)
{ {
MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst;
HostAddress h; HostAddress h;
#ifdef CONFIG_SOFTMMU ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
tcg_insn_unit *label_ptr[1]; tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1); if (ldst) {
h.index = TCG_REG_TMP2; ldst->type = data_type;
#else ldst->datalo_reg = data_reg;
unsigned a_bits = get_alignment_bits(opc); ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
if (a_bits) {
tcg_out_test_alignment(s, true, addr_reg, a_bits);
} }
h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
#endif
h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#endif
} }
static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc, static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
@ -1109,29 +1076,17 @@ static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type) MemOpIdx oi, TCGType data_type)
{ {
MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst;
HostAddress h; HostAddress h;
#ifdef CONFIG_SOFTMMU ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
tcg_insn_unit *label_ptr[1]; tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0); if (ldst) {
h.index = TCG_REG_TMP2; ldst->type = data_type;
#else ldst->datalo_reg = data_reg;
unsigned a_bits = get_alignment_bits(opc); ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
if (a_bits) {
tcg_out_test_alignment(s, false, addr_reg, a_bits);
} }
h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
#endif
h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
tcg_out_qemu_st_indexed(s, opc, data_reg, h);
#ifdef CONFIG_SOFTMMU
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
s->code_ptr, label_ptr);
#endif
} }
/* /*