6f703a4841
In structure CPUArchState some struct elements are only used in TCG mode, and it is not used in KVM mode. Macro CONFIG_TCG is added to make it simpiler in KVM mode, also there is the same modification in c code when these structure elements are used. When VM runs in KVM mode, TLB entries are not used and do not need migrate. It is only useful when it runs in TCG mode. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20240506011912.2108842-1-maobibo@loongson.cn> Signed-off-by: Song Gao <gaosong@loongson.cn>
240 lines
7.5 KiB
C
240 lines
7.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* LoongArch CPU helpers for qemu
|
|
*
|
|
* Copyright (c) 2024 Loongson Technology Corporation Limited
|
|
*
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "internals.h"
|
|
#include "cpu-csr.h"
|
|
|
|
#ifdef CONFIG_TCG
|
|
static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
int access_type, int index, int mmu_idx)
|
|
{
|
|
LoongArchTLB *tlb = &env->tlb[index];
|
|
uint64_t plv = mmu_idx;
|
|
uint64_t tlb_entry, tlb_ppn;
|
|
uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
|
|
|
|
if (index >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
n = (address >> tlb_ps) & 0x1;/* Odd or even */
|
|
|
|
tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
|
|
tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
|
|
tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
|
|
tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
|
|
if (is_la64(env)) {
|
|
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
|
|
tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
|
|
tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
|
|
tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
|
|
} else {
|
|
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
|
|
tlb_nx = 0;
|
|
tlb_nr = 0;
|
|
tlb_rplv = 0;
|
|
}
|
|
|
|
/* Remove sw bit between bit12 -- bit PS*/
|
|
tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1));
|
|
|
|
/* Check access rights */
|
|
if (!tlb_v) {
|
|
return TLBRET_INVALID;
|
|
}
|
|
|
|
if (access_type == MMU_INST_FETCH && tlb_nx) {
|
|
return TLBRET_XI;
|
|
}
|
|
|
|
if (access_type == MMU_DATA_LOAD && tlb_nr) {
|
|
return TLBRET_RI;
|
|
}
|
|
|
|
if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
|
|
((tlb_rplv == 1) && (plv != tlb_plv))) {
|
|
return TLBRET_PE;
|
|
}
|
|
|
|
if ((access_type == MMU_DATA_STORE) && !tlb_d) {
|
|
return TLBRET_DIRTY;
|
|
}
|
|
|
|
*physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
|
|
(address & MAKE_64BIT_MASK(0, tlb_ps));
|
|
*prot = PAGE_READ;
|
|
if (tlb_d) {
|
|
*prot |= PAGE_WRITE;
|
|
}
|
|
if (!tlb_nx) {
|
|
*prot |= PAGE_EXEC;
|
|
}
|
|
return TLBRET_MATCH;
|
|
}
|
|
|
|
/*
|
|
* One tlb entry holds an adjacent odd/even pair, the vpn is the
|
|
* content of the virtual page number divided by 2. So the
|
|
* compare vpn is bit[47:15] for 16KiB page. while the vppn
|
|
* field in tlb entry contains bit[47:13], so need adjust.
|
|
* virt_vpn = vaddr[47:13]
|
|
*/
|
|
bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
|
|
int *index)
|
|
{
|
|
LoongArchTLB *tlb;
|
|
uint16_t csr_asid, tlb_asid, stlb_idx;
|
|
uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
|
|
int i, compare_shift;
|
|
uint64_t vpn, tlb_vppn;
|
|
|
|
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
|
|
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
|
|
stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
|
|
compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
|
|
/* Search STLB */
|
|
for (i = 0; i < 8; ++i) {
|
|
tlb = &env->tlb[i * 256 + stlb_idx];
|
|
tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
|
|
if (tlb_e) {
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
|
|
if ((tlb_g == 1 || tlb_asid == csr_asid) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
*index = i * 256 + stlb_idx;
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Search MTLB */
|
|
for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
|
|
tlb = &env->tlb[i];
|
|
tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
|
|
if (tlb_e) {
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
|
|
if ((tlb_g == 1 || tlb_asid == csr_asid) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
*index = i;
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
int index, match;
|
|
|
|
match = loongarch_tlb_search(env, address, &index);
|
|
if (match) {
|
|
return loongarch_map_tlb_entry(env, physical, prot,
|
|
address, access_type, index, mmu_idx);
|
|
}
|
|
|
|
return TLBRET_NOMATCH;
|
|
}
|
|
#else
|
|
static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
return TLBRET_NOMATCH;
|
|
}
|
|
#endif
|
|
|
|
static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
|
|
target_ulong dmw)
|
|
{
|
|
if (is_la64(env)) {
|
|
return va & TARGET_VIRT_MASK;
|
|
} else {
|
|
uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
|
|
return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
|
|
(pseg << R_CSR_DMW_32_VSEG_SHIFT);
|
|
}
|
|
}
|
|
|
|
int get_physical_address(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
int user_mode = mmu_idx == MMU_USER_IDX;
|
|
int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
|
|
uint32_t plv, base_c, base_v;
|
|
int64_t addr_high;
|
|
uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
|
|
uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
|
|
|
|
/* Check PG and DA */
|
|
if (da & !pg) {
|
|
*physical = address & TARGET_PHYS_MASK;
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
return TLBRET_MATCH;
|
|
}
|
|
|
|
plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
|
|
if (is_la64(env)) {
|
|
base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
|
|
} else {
|
|
base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
|
|
}
|
|
/* Check direct map window */
|
|
for (int i = 0; i < 4; i++) {
|
|
if (is_la64(env)) {
|
|
base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
|
|
} else {
|
|
base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
|
|
}
|
|
if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
|
|
*physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
return TLBRET_MATCH;
|
|
}
|
|
}
|
|
|
|
/* Check valid extension */
|
|
addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
|
|
if (!(addr_high == 0 || addr_high == -1)) {
|
|
return TLBRET_BADADDR;
|
|
}
|
|
|
|
/* Mapped address */
|
|
return loongarch_map_address(env, physical, prot, address,
|
|
access_type, mmu_idx);
|
|
}
|
|
|
|
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|
{
|
|
CPULoongArchState *env = cpu_env(cs);
|
|
hwaddr phys_addr;
|
|
int prot;
|
|
|
|
if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
|
|
cpu_mmu_index(cs, false)) != 0) {
|
|
return -1;
|
|
}
|
|
return phys_addr;
|
|
}
|