MIPS patches 2017-07-21
Changes: * Add Enhanced Virtual Addressing (EVA) support -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.5 (GNU/Linux) iQIVAwUAWXFmCyI464bV95fCAQJHpA//QvuBgHmN2z7Idp27IW84K0WL1IIz52hA K98ZS/31CAHRNKN/4TqpZyFERYvmAvOrMTR3TEJISrl8M28dXNEaANjRu+RNJ+nk Q410VG3Hr//C9GHVqMQ5SRMY8MgGGnBFpkwSW7O1Qn1cQiEB1PvFV5wZVpCEgJoO x2KZvzMJNSYsWrmvFc79CvE0m/K5frO4L/XMKoSdu51cVy4zQdI5NS/G6JugTN1j 1p5x724Ic6duSlUZD91EvwUZEk88aeXGCaauMgiHYCkWNbY6he39GKRmRnhGYcIa 9olFHW1axKaE1F+1J99eggz3XDLNfr4zsiBnmzMmi+ajJDAVO1vWcQWLUXH7cd1y ToxOnKel83EdDFfl+yaAGH4Ig/RRqUFaB7Qq3QOjEHlM5sCJ8vsLUuplCyBIdV6d /BaS0v99FNt4qSSYpwRd/cPbPmYl0DbQatfcgsX2g4WSH3SyjZBg3L7NyFWmOAAg fscLFafA5ic6XHmSKwsqmzPbOJcUXYrNnxLHw+Smg60hiqoWgF+4j34lDLEMsY1T rIptlpU4GeXs05d+Q/ABEH7wcPJHPjoWTEkPzaFjXjuL+c9ZnQqnrI4j9gXsqY6u Km8SoTPfqdwj5AW7KS4RrNz6g2f+GnXS4t8p9JK2krciH+gPVVBQEmf4t3qQAwPC y2uVsoHtC2E= =qlJH -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/yongbok/tags/mips-20170721' into staging MIPS patches 2017-07-21 Changes: * Add Enhanced Virtual Addressing (EVA) support # gpg: Signature made Fri 21 Jul 2017 03:25:15 BST # gpg: using RSA key 0x2238EB86D5F797C2 # gpg: Good signature from "Yongbok Kim <yongbok.kim@imgtec.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 8600 4CF5 3415 A5D9 4CFA 2B5C 2238 EB86 D5F7 97C2 * remotes/yongbok/tags/mips-20170721: target/mips: Enable CP0_EBase.WG on MIPS64 CPUs target/mips: Add EVA support to P5600 target/mips: Implement segmentation control target/mips: Add segmentation control registers target/mips: Add an MMU mode for ERL target/mips: Abstract mmu_idx from hflags target/mips: Check memory permissions with mem_idx target/mips: Decode microMIPS EVA load & store instructions target/mips: Decode MIPS32 EVA load & store instructions target/mips: Prepare loads/stores for EVA target/mips: Add CP0_Ebase.WG (write gate) support target/mips: Weaken TLB flush on UX,SX,KX,ASID changes target/mips: Fix TLBWI shadow flush for EHINV,XI,RI target/mips: Fix MIPS64 MFC0 UserLocal on BE host Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
ff9b547502
@ -134,7 +134,7 @@ struct CPUMIPSFPUContext {
|
||||
#define FP_UNIMPLEMENTED 32
|
||||
};
|
||||
|
||||
#define NB_MMU_MODES 3
|
||||
#define NB_MMU_MODES 4
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 2
|
||||
|
||||
typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
|
||||
@ -306,6 +306,36 @@ struct CPUMIPSState {
|
||||
#define CP0PG_XIE 30
|
||||
#define CP0PG_ELPA 29
|
||||
#define CP0PG_IEC 27
|
||||
target_ulong CP0_SegCtl0;
|
||||
target_ulong CP0_SegCtl1;
|
||||
target_ulong CP0_SegCtl2;
|
||||
#define CP0SC_PA 9
|
||||
#define CP0SC_PA_MASK (0x7FULL << CP0SC_PA)
|
||||
#define CP0SC_PA_1GMASK (0x7EULL << CP0SC_PA)
|
||||
#define CP0SC_AM 4
|
||||
#define CP0SC_AM_MASK (0x7ULL << CP0SC_AM)
|
||||
#define CP0SC_AM_UK 0ULL
|
||||
#define CP0SC_AM_MK 1ULL
|
||||
#define CP0SC_AM_MSK 2ULL
|
||||
#define CP0SC_AM_MUSK 3ULL
|
||||
#define CP0SC_AM_MUSUK 4ULL
|
||||
#define CP0SC_AM_USK 5ULL
|
||||
#define CP0SC_AM_UUSK 7ULL
|
||||
#define CP0SC_EU 3
|
||||
#define CP0SC_EU_MASK (1ULL << CP0SC_EU)
|
||||
#define CP0SC_C 0
|
||||
#define CP0SC_C_MASK (0x7ULL << CP0SC_C)
|
||||
#define CP0SC_MASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
|
||||
CP0SC_PA_MASK)
|
||||
#define CP0SC_1GMASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
|
||||
CP0SC_PA_1GMASK)
|
||||
#define CP0SC0_MASK (CP0SC_MASK | (CP0SC_MASK << 16))
|
||||
#define CP0SC1_XAM 59
|
||||
#define CP0SC1_XAM_MASK (0x7ULL << CP0SC1_XAM)
|
||||
#define CP0SC1_MASK (CP0SC_MASK | (CP0SC_MASK << 16) | CP0SC1_XAM_MASK)
|
||||
#define CP0SC2_XR 56
|
||||
#define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR)
|
||||
#define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK)
|
||||
int32_t CP0_Wired;
|
||||
int32_t CP0_SRSConf0_rw_bitmask;
|
||||
int32_t CP0_SRSConf0;
|
||||
@ -399,7 +429,9 @@ struct CPUMIPSState {
|
||||
#define CP0Ca_EC 2
|
||||
target_ulong CP0_EPC;
|
||||
int32_t CP0_PRid;
|
||||
int32_t CP0_EBase;
|
||||
target_ulong CP0_EBase;
|
||||
target_ulong CP0_EBaseWG_rw_bitmask;
|
||||
#define CP0EBase_WG 11
|
||||
target_ulong CP0_CMGCRBase;
|
||||
int32_t CP0_Config0;
|
||||
#define CP0C0_M 31
|
||||
@ -447,6 +479,7 @@ struct CPUMIPSState {
|
||||
#define CP0C3_MSAP 28
|
||||
#define CP0C3_BP 27
|
||||
#define CP0C3_BI 26
|
||||
#define CP0C3_SC 25
|
||||
#define CP0C3_IPLW 21
|
||||
#define CP0C3_MMAR 18
|
||||
#define CP0C3_MCU 17
|
||||
@ -548,7 +581,7 @@ struct CPUMIPSState {
|
||||
#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
|
||||
uint32_t hflags; /* CPU State */
|
||||
/* TMASK defines different execution modes */
|
||||
#define MIPS_HFLAG_TMASK 0xF5807FF
|
||||
#define MIPS_HFLAG_TMASK 0x1F5807FF
|
||||
#define MIPS_HFLAG_MODE 0x00007 /* execution modes */
|
||||
/* The KSU flags must be the lowest bits in hflags. The flag order
|
||||
must be the same as defined for CP0 Status. This allows to use
|
||||
@ -598,6 +631,7 @@ struct CPUMIPSState {
|
||||
#define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
|
||||
#define MIPS_HFLAG_ELPA 0x4000000
|
||||
#define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */
|
||||
#define MIPS_HFLAG_ERL 0x10000000 /* error level flag */
|
||||
target_ulong btarget; /* Jump / branch target */
|
||||
target_ulong bcond; /* Branch condition (if needed) */
|
||||
|
||||
@ -695,10 +729,21 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
|
||||
#define MMU_MODE0_SUFFIX _kernel
|
||||
#define MMU_MODE1_SUFFIX _super
|
||||
#define MMU_MODE2_SUFFIX _user
|
||||
#define MMU_MODE3_SUFFIX _error
|
||||
#define MMU_USER_IDX 2
|
||||
|
||||
static inline int hflags_mmu_index(uint32_t hflags)
|
||||
{
|
||||
if (hflags & MIPS_HFLAG_ERL) {
|
||||
return 3; /* ERL */
|
||||
} else {
|
||||
return hflags & MIPS_HFLAG_KSU;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
|
||||
{
|
||||
return env->hflags & MIPS_HFLAG_KSU;
|
||||
return hflags_mmu_index(env->hflags);
|
||||
}
|
||||
|
||||
static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
|
||||
@ -962,7 +1007,10 @@ static inline void compute_hflags(CPUMIPSState *env)
|
||||
MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
|
||||
MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 |
|
||||
MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE |
|
||||
MIPS_HFLAG_ELPA);
|
||||
MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL);
|
||||
if (env->CP0_Status & (1 << CP0St_ERL)) {
|
||||
env->hflags |= MIPS_HFLAG_ERL;
|
||||
}
|
||||
if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
|
||||
!(env->CP0_Status & (1 << CP0St_ERL)) &&
|
||||
!(env->hflags & MIPS_HFLAG_DM)) {
|
||||
|
@ -107,15 +107,107 @@ int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
|
||||
return TLBRET_NOMATCH;
|
||||
}
|
||||
|
||||
static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
|
||||
{
|
||||
/*
|
||||
* Interpret access control mode and mmu_idx.
|
||||
* AdE? TLB?
|
||||
* AM K S U E K S U E
|
||||
* UK 0 0 1 1 0 0 - - 0
|
||||
* MK 1 0 1 1 0 1 - - !eu
|
||||
* MSK 2 0 0 1 0 1 1 - !eu
|
||||
* MUSK 3 0 0 0 0 1 1 1 !eu
|
||||
* MUSUK 4 0 0 0 0 0 1 1 0
|
||||
* USK 5 0 0 1 0 0 0 - 0
|
||||
* - 6 - - - - - - - -
|
||||
* UUSK 7 0 0 0 0 0 0 0 0
|
||||
*/
|
||||
int32_t adetlb_mask;
|
||||
|
||||
switch (mmu_idx) {
|
||||
case 3 /* ERL */:
|
||||
/* If EU is set, always unmapped */
|
||||
if (eu) {
|
||||
return 0;
|
||||
}
|
||||
/* fall through */
|
||||
case MIPS_HFLAG_KM:
|
||||
/* Never AdE, TLB mapped if AM={1,2,3} */
|
||||
adetlb_mask = 0x70000000;
|
||||
goto check_tlb;
|
||||
|
||||
case MIPS_HFLAG_SM:
|
||||
/* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
|
||||
adetlb_mask = 0xc0380000;
|
||||
goto check_ade;
|
||||
|
||||
case MIPS_HFLAG_UM:
|
||||
/* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
|
||||
adetlb_mask = 0xe4180000;
|
||||
/* fall through */
|
||||
check_ade:
|
||||
/* does this AM cause AdE in current execution mode */
|
||||
if ((adetlb_mask << am) < 0) {
|
||||
return TLBRET_BADADDR;
|
||||
}
|
||||
adetlb_mask <<= 8;
|
||||
/* fall through */
|
||||
check_tlb:
|
||||
/* is this AM mapped in current execution mode */
|
||||
return ((adetlb_mask << am) < 0);
|
||||
default:
|
||||
assert(0);
|
||||
return TLBRET_BADADDR;
|
||||
};
|
||||
}
|
||||
|
||||
static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
|
||||
int *prot, target_ulong real_address,
|
||||
int rw, int access_type, int mmu_idx,
|
||||
unsigned int am, bool eu,
|
||||
target_ulong segmask,
|
||||
hwaddr physical_base)
|
||||
{
|
||||
int mapped = is_seg_am_mapped(am, eu, mmu_idx);
|
||||
|
||||
if (mapped < 0) {
|
||||
/* is_seg_am_mapped can report TLBRET_BADADDR */
|
||||
return mapped;
|
||||
} else if (mapped) {
|
||||
/* The segment is TLB mapped */
|
||||
return env->tlb->map_address(env, physical, prot, real_address, rw,
|
||||
access_type);
|
||||
} else {
|
||||
/* The segment is unmapped */
|
||||
*physical = physical_base | (real_address & segmask);
|
||||
*prot = PAGE_READ | PAGE_WRITE;
|
||||
return TLBRET_MATCH;
|
||||
}
|
||||
}
|
||||
|
||||
static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
|
||||
int *prot, target_ulong real_address,
|
||||
int rw, int access_type, int mmu_idx,
|
||||
uint16_t segctl, target_ulong segmask)
|
||||
{
|
||||
unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
|
||||
bool eu = (segctl >> CP0SC_EU) & 1;
|
||||
hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
|
||||
|
||||
return get_seg_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx, am, eu, segmask,
|
||||
pa & ~(hwaddr)segmask);
|
||||
}
|
||||
|
||||
static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
|
||||
int *prot, target_ulong real_address,
|
||||
int rw, int access_type)
|
||||
int rw, int access_type, int mmu_idx)
|
||||
{
|
||||
/* User mode can only access useg/xuseg */
|
||||
int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM;
|
||||
int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM;
|
||||
int kernel_mode = !user_mode && !supervisor_mode;
|
||||
#if defined(TARGET_MIPS64)
|
||||
int user_mode = mmu_idx == MIPS_HFLAG_UM;
|
||||
int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
|
||||
int kernel_mode = !user_mode && !supervisor_mode;
|
||||
int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
|
||||
int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
|
||||
int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
|
||||
@ -148,12 +240,16 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
|
||||
|
||||
if (address <= USEG_LIMIT) {
|
||||
/* useg */
|
||||
if (env->CP0_Status & (1 << CP0St_ERL)) {
|
||||
*physical = address & 0xFFFFFFFF;
|
||||
*prot = PAGE_READ | PAGE_WRITE;
|
||||
uint16_t segctl;
|
||||
|
||||
if (address >= 0x40000000UL) {
|
||||
segctl = env->CP0_SegCtl2;
|
||||
} else {
|
||||
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
|
||||
segctl = env->CP0_SegCtl2 >> 16;
|
||||
}
|
||||
ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx, segctl,
|
||||
0x3FFFFFFF);
|
||||
#if defined(TARGET_MIPS64)
|
||||
} else if (address < 0x4000000000000000ULL) {
|
||||
/* xuseg */
|
||||
@ -172,10 +268,33 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
|
||||
}
|
||||
} else if (address < 0xC000000000000000ULL) {
|
||||
/* xkphys */
|
||||
if (kernel_mode && KX &&
|
||||
(address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
|
||||
*physical = address & env->PAMask;
|
||||
*prot = PAGE_READ | PAGE_WRITE;
|
||||
if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
|
||||
/* KX/SX/UX bit to check for each xkphys EVA access mode */
|
||||
static const uint8_t am_ksux[8] = {
|
||||
[CP0SC_AM_UK] = (1u << CP0St_KX),
|
||||
[CP0SC_AM_MK] = (1u << CP0St_KX),
|
||||
[CP0SC_AM_MSK] = (1u << CP0St_SX),
|
||||
[CP0SC_AM_MUSK] = (1u << CP0St_UX),
|
||||
[CP0SC_AM_MUSUK] = (1u << CP0St_UX),
|
||||
[CP0SC_AM_USK] = (1u << CP0St_SX),
|
||||
[6] = (1u << CP0St_KX),
|
||||
[CP0SC_AM_UUSK] = (1u << CP0St_UX),
|
||||
};
|
||||
unsigned int am = CP0SC_AM_UK;
|
||||
unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
|
||||
|
||||
if (xr & (1 << ((address >> 59) & 0x7))) {
|
||||
am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
|
||||
}
|
||||
/* Does CP0_Status.KX/SX/UX permit the access mode (am) */
|
||||
if (env->CP0_Status & am_ksux[am]) {
|
||||
ret = get_seg_physical_address(env, physical, prot,
|
||||
real_address, rw, access_type,
|
||||
mmu_idx, am, false, env->PAMask,
|
||||
0);
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
@ -190,35 +309,25 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
|
||||
#endif
|
||||
} else if (address < (int32_t)KSEG1_BASE) {
|
||||
/* kseg0 */
|
||||
if (kernel_mode) {
|
||||
*physical = address - (int32_t)KSEG0_BASE;
|
||||
*prot = PAGE_READ | PAGE_WRITE;
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx,
|
||||
env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
|
||||
} else if (address < (int32_t)KSEG2_BASE) {
|
||||
/* kseg1 */
|
||||
if (kernel_mode) {
|
||||
*physical = address - (int32_t)KSEG1_BASE;
|
||||
*prot = PAGE_READ | PAGE_WRITE;
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx,
|
||||
env->CP0_SegCtl1, 0x1FFFFFFF);
|
||||
} else if (address < (int32_t)KSEG3_BASE) {
|
||||
/* sseg (kseg2) */
|
||||
if (supervisor_mode || kernel_mode) {
|
||||
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx,
|
||||
env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
|
||||
} else {
|
||||
/* kseg3 */
|
||||
/* XXX: debug segment is not emulated */
|
||||
if (kernel_mode) {
|
||||
ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
|
||||
} else {
|
||||
ret = TLBRET_BADADDR;
|
||||
}
|
||||
ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
|
||||
access_type, mmu_idx,
|
||||
env->CP0_SegCtl0, 0x1FFFFFFF);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -290,7 +399,7 @@ void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
|
||||
#if defined(TARGET_MIPS64)
|
||||
if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
|
||||
/* Access to at least one of the 64-bit segments has been disabled */
|
||||
cpu_mips_tlb_flush(env);
|
||||
tlb_flush(CPU(mips_env_get_cpu(env)));
|
||||
}
|
||||
#endif
|
||||
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
|
||||
@ -413,11 +522,12 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
|
||||
hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
MIPSCPU *cpu = MIPS_CPU(cs);
|
||||
CPUMIPSState *env = &cpu->env;
|
||||
hwaddr phys_addr;
|
||||
int prot;
|
||||
|
||||
if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0,
|
||||
ACCESS_INT) != 0) {
|
||||
if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT,
|
||||
cpu_mmu_index(env, false)) != 0) {
|
||||
return -1;
|
||||
}
|
||||
return phys_addr;
|
||||
@ -449,7 +559,7 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
|
||||
correctly */
|
||||
access_type = ACCESS_INT;
|
||||
ret = get_physical_address(env, &physical, &prot,
|
||||
address, rw, access_type);
|
||||
address, rw, access_type, mmu_idx);
|
||||
switch (ret) {
|
||||
case TLBRET_MATCH:
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
@ -487,8 +597,8 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int r
|
||||
|
||||
/* data access */
|
||||
access_type = ACCESS_INT;
|
||||
ret = get_physical_address(env, &physical, &prot,
|
||||
address, rw, access_type);
|
||||
ret = get_physical_address(env, &physical, &prot, address, rw, access_type,
|
||||
cpu_mmu_index(env, false));
|
||||
if (ret != TLBRET_MATCH) {
|
||||
raise_mmu_exception(env, address, rw, ret);
|
||||
return -1LL;
|
||||
@ -721,15 +831,17 @@ void mips_cpu_do_interrupt(CPUState *cs)
|
||||
#if defined(TARGET_MIPS64)
|
||||
int R = env->CP0_BadVAddr >> 62;
|
||||
int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
|
||||
int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
|
||||
int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
|
||||
|
||||
if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
|
||||
(!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
|
||||
if ((R != 0 || UX) && (R != 3 || KX) &&
|
||||
(!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
|
||||
offset = 0x080;
|
||||
else
|
||||
} else {
|
||||
#endif
|
||||
offset = 0x000;
|
||||
#if defined(TARGET_MIPS64)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
goto set_EPC;
|
||||
case EXCP_TLBS:
|
||||
@ -740,15 +852,17 @@ void mips_cpu_do_interrupt(CPUState *cs)
|
||||
#if defined(TARGET_MIPS64)
|
||||
int R = env->CP0_BadVAddr >> 62;
|
||||
int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
|
||||
int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
|
||||
int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
|
||||
|
||||
if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
|
||||
(!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
|
||||
if ((R != 0 || UX) && (R != 3 || KX) &&
|
||||
(!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
|
||||
offset = 0x080;
|
||||
else
|
||||
} else {
|
||||
#endif
|
||||
offset = 0x000;
|
||||
#if defined(TARGET_MIPS64)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
goto set_EPC;
|
||||
case EXCP_AdEL:
|
||||
@ -831,11 +945,7 @@ void mips_cpu_do_interrupt(CPUState *cs)
|
||||
goto set_EPC;
|
||||
case EXCP_CACHE:
|
||||
cause = 30;
|
||||
if (env->CP0_Status & (1 << CP0St_BEV)) {
|
||||
offset = 0x100;
|
||||
} else {
|
||||
offset = 0x20000100;
|
||||
}
|
||||
offset = 0x100;
|
||||
set_EPC:
|
||||
if (!(env->CP0_Status & (1 << CP0St_EXL))) {
|
||||
env->CP0_EPC = exception_resume_pc(env);
|
||||
@ -861,9 +971,15 @@ void mips_cpu_do_interrupt(CPUState *cs)
|
||||
env->hflags &= ~MIPS_HFLAG_BMASK;
|
||||
if (env->CP0_Status & (1 << CP0St_BEV)) {
|
||||
env->active_tc.PC = env->exception_base + 0x200;
|
||||
} else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
|
||||
env->CP0_Config5 & (1 << CP0C5_CV))) {
|
||||
/* Force KSeg1 for cache errors */
|
||||
env->active_tc.PC = (int32_t)KSEG1_BASE |
|
||||
(env->CP0_EBase & 0x1FFFF000);
|
||||
} else {
|
||||
env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff);
|
||||
env->active_tc.PC = env->CP0_EBase & ~0xfff;
|
||||
}
|
||||
|
||||
env->active_tc.PC += offset;
|
||||
set_hflags_for_handler(env);
|
||||
env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
|
||||
|
@ -115,6 +115,9 @@ DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_context, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_pagemask, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_segctl0, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_segctl1, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_segctl2, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_wired, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
|
||||
DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
|
||||
|
@ -211,8 +211,8 @@ const VMStateDescription vmstate_tlb = {
|
||||
|
||||
const VMStateDescription vmstate_mips_cpu = {
|
||||
.name = "cpu",
|
||||
.version_id = 8,
|
||||
.minimum_version_id = 8,
|
||||
.version_id = 10,
|
||||
.minimum_version_id = 10,
|
||||
.post_load = cpu_post_load,
|
||||
.fields = (VMStateField[]) {
|
||||
/* Active TC */
|
||||
@ -252,6 +252,9 @@ const VMStateDescription vmstate_mips_cpu = {
|
||||
VMSTATE_UINTTL(env.CP0_Context, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_PageMask, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU),
|
||||
VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU),
|
||||
VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU),
|
||||
VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_Wired, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU),
|
||||
@ -272,7 +275,7 @@ const VMStateDescription vmstate_mips_cpu = {
|
||||
VMSTATE_INT32(env.CP0_Cause, MIPSCPU),
|
||||
VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_PRid, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_EBase, MIPSCPU),
|
||||
VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_Config0, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_Config1, MIPSCPU),
|
||||
VMSTATE_INT32(env.CP0_Config2, MIPSCPU),
|
||||
|
@ -67,6 +67,7 @@ static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
|
||||
case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
|
||||
default: \
|
||||
case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
|
||||
case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
@ -94,6 +95,9 @@ static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
|
||||
case 1: cpu_##insn##_super_ra(env, addr, val, retaddr); break; \
|
||||
default: \
|
||||
case 2: cpu_##insn##_user_ra(env, addr, val, retaddr); break; \
|
||||
case 3: \
|
||||
cpu_##insn##_error_ra(env, addr, val, retaddr); \
|
||||
break; \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
@ -1318,6 +1322,30 @@ void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
|
||||
restore_pamask(env);
|
||||
}
|
||||
|
||||
void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
CPUState *cs = CPU(mips_env_get_cpu(env));
|
||||
|
||||
env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
|
||||
tlb_flush(cs);
|
||||
}
|
||||
|
||||
void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
CPUState *cs = CPU(mips_env_get_cpu(env));
|
||||
|
||||
env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
|
||||
tlb_flush(cs);
|
||||
}
|
||||
|
||||
void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
CPUState *cs = CPU(mips_env_get_cpu(env));
|
||||
|
||||
env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
|
||||
tlb_flush(cs);
|
||||
}
|
||||
|
||||
void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
if (env->insn_flags & ISA_MIPS32R6) {
|
||||
@ -1416,7 +1444,7 @@ void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
|
||||
/* If the ASID changes, flush qemu's TLB. */
|
||||
if ((old & env->CP0_EntryHi_ASID_mask) !=
|
||||
(val & env->CP0_EntryHi_ASID_mask)) {
|
||||
cpu_mips_tlb_flush(env);
|
||||
tlb_flush(CPU(mips_env_get_cpu(env)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1450,7 +1478,10 @@ void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
|
||||
old, old & env->CP0_Cause & CP0Ca_IP_mask,
|
||||
val, val & env->CP0_Cause & CP0Ca_IP_mask,
|
||||
env->CP0_Cause);
|
||||
switch (env->hflags & MIPS_HFLAG_KSU) {
|
||||
switch (cpu_mmu_index(env, false)) {
|
||||
case 3:
|
||||
qemu_log(", ERL\n");
|
||||
break;
|
||||
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
|
||||
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
|
||||
case MIPS_HFLAG_KM: qemu_log("\n"); break;
|
||||
@ -1515,14 +1546,22 @@ target_ulong helper_mftc0_ebase(CPUMIPSState *env)
|
||||
|
||||
void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
|
||||
target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
|
||||
if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
|
||||
mask |= ~0x3FFFFFFF;
|
||||
}
|
||||
env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
|
||||
}
|
||||
|
||||
void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
|
||||
{
|
||||
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
|
||||
CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
|
||||
other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
|
||||
target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
|
||||
if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
|
||||
mask |= ~0x3FFFFFFF;
|
||||
}
|
||||
other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
|
||||
}
|
||||
|
||||
target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
|
||||
@ -2029,7 +2068,7 @@ void r4k_helper_tlbwi(CPUMIPSState *env)
|
||||
int idx;
|
||||
target_ulong VPN;
|
||||
uint16_t ASID;
|
||||
bool G, V0, D0, V1, D1;
|
||||
bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
|
||||
|
||||
idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
|
||||
tlb = &env->tlb->mmu.r4k.tlb[idx];
|
||||
@ -2038,17 +2077,25 @@ void r4k_helper_tlbwi(CPUMIPSState *env)
|
||||
VPN &= env->SEGMask;
|
||||
#endif
|
||||
ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
|
||||
EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
|
||||
G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
|
||||
V0 = (env->CP0_EntryLo0 & 2) != 0;
|
||||
D0 = (env->CP0_EntryLo0 & 4) != 0;
|
||||
XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
|
||||
RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
|
||||
V1 = (env->CP0_EntryLo1 & 2) != 0;
|
||||
D1 = (env->CP0_EntryLo1 & 4) != 0;
|
||||
XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
|
||||
RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
|
||||
|
||||
/* Discard cached TLB entries, unless tlbwi is just upgrading access
|
||||
permissions on the current entry. */
|
||||
if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
|
||||
(!tlb->EHINV && EHINV) ||
|
||||
(tlb->V0 && !V0) || (tlb->D0 && !D0) ||
|
||||
(tlb->V1 && !V1) || (tlb->D1 && !D1)) {
|
||||
(!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
|
||||
(tlb->V1 && !V1) || (tlb->D1 && !D1) ||
|
||||
(!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
|
||||
r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
|
||||
}
|
||||
|
||||
@ -2228,7 +2275,10 @@ static void debug_post_eret(CPUMIPSState *env)
|
||||
qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
|
||||
if (env->hflags & MIPS_HFLAG_DM)
|
||||
qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
|
||||
switch (env->hflags & MIPS_HFLAG_KSU) {
|
||||
switch (cpu_mmu_index(env, false)) {
|
||||
case 3:
|
||||
qemu_log(", ERL\n");
|
||||
break;
|
||||
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
|
||||
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
|
||||
case MIPS_HFLAG_KM: qemu_log("\n"); break;
|
||||
|
@ -427,6 +427,24 @@ enum {
|
||||
OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3,
|
||||
OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3,
|
||||
|
||||
/* EVA */
|
||||
OPC_LWLE = 0x19 | OPC_SPECIAL3,
|
||||
OPC_LWRE = 0x1A | OPC_SPECIAL3,
|
||||
OPC_CACHEE = 0x1B | OPC_SPECIAL3,
|
||||
OPC_SBE = 0x1C | OPC_SPECIAL3,
|
||||
OPC_SHE = 0x1D | OPC_SPECIAL3,
|
||||
OPC_SCE = 0x1E | OPC_SPECIAL3,
|
||||
OPC_SWE = 0x1F | OPC_SPECIAL3,
|
||||
OPC_SWLE = 0x21 | OPC_SPECIAL3,
|
||||
OPC_SWRE = 0x22 | OPC_SPECIAL3,
|
||||
OPC_PREFE = 0x23 | OPC_SPECIAL3,
|
||||
OPC_LBUE = 0x28 | OPC_SPECIAL3,
|
||||
OPC_LHUE = 0x29 | OPC_SPECIAL3,
|
||||
OPC_LBE = 0x2C | OPC_SPECIAL3,
|
||||
OPC_LHE = 0x2D | OPC_SPECIAL3,
|
||||
OPC_LLE = 0x2E | OPC_SPECIAL3,
|
||||
OPC_LWE = 0x2F | OPC_SPECIAL3,
|
||||
|
||||
/* R6 */
|
||||
R6_OPC_PREF = 0x35 | OPC_SPECIAL3,
|
||||
R6_OPC_CACHE = 0x25 | OPC_SPECIAL3,
|
||||
@ -1431,6 +1449,8 @@ typedef struct DisasContext {
|
||||
bool bp;
|
||||
uint64_t PAMask;
|
||||
bool mvh;
|
||||
bool eva;
|
||||
bool sc;
|
||||
int CP0_LLAddr_shift;
|
||||
bool ps;
|
||||
bool vp;
|
||||
@ -2029,7 +2049,8 @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
|
||||
/* load/store instructions. */
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define OP_LD_ATOMIC(insn,fname) \
|
||||
static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
|
||||
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
|
||||
DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv t0 = tcg_temp_new(); \
|
||||
tcg_gen_mov_tl(t0, arg1); \
|
||||
@ -2040,9 +2061,10 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
|
||||
}
|
||||
#else
|
||||
#define OP_LD_ATOMIC(insn,fname) \
|
||||
static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
|
||||
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
|
||||
DisasContext *ctx) \
|
||||
{ \
|
||||
gen_helper_1e1i(insn, ret, arg1, ctx->mem_idx); \
|
||||
gen_helper_1e1i(insn, ret, arg1, mem_idx); \
|
||||
}
|
||||
#endif
|
||||
OP_LD_ATOMIC(ll,ld32s);
|
||||
@ -2053,7 +2075,8 @@ OP_LD_ATOMIC(lld,ld64);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
|
||||
static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
|
||||
static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, int mem_idx, \
|
||||
DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv t0 = tcg_temp_new(); \
|
||||
TCGLabel *l1 = gen_new_label(); \
|
||||
@ -2077,10 +2100,11 @@ static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx)
|
||||
}
|
||||
#else
|
||||
#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
|
||||
static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
|
||||
static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, int mem_idx, \
|
||||
DisasContext *ctx) \
|
||||
{ \
|
||||
TCGv t0 = tcg_temp_new(); \
|
||||
gen_helper_1e2i(insn, t0, arg1, arg2, ctx->mem_idx); \
|
||||
gen_helper_1e2i(insn, t0, arg1, arg2, mem_idx); \
|
||||
gen_store_gpr(t0, rt); \
|
||||
tcg_temp_free(t0); \
|
||||
}
|
||||
@ -2123,6 +2147,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
int rt, int base, int16_t offset)
|
||||
{
|
||||
TCGv t0, t1, t2;
|
||||
int mem_idx = ctx->mem_idx;
|
||||
|
||||
if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) {
|
||||
/* Loongson CPU uses a load to zero register for prefetch.
|
||||
@ -2137,32 +2162,32 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
switch (opc) {
|
||||
#if defined(TARGET_MIPS64)
|
||||
case OPC_LWU:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL |
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL |
|
||||
ctx->default_tcg_memop_mask);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LD:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ |
|
||||
ctx->default_tcg_memop_mask);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LLD:
|
||||
case R6_OPC_LLD:
|
||||
op_ld_lld(t0, t0, ctx);
|
||||
op_ld_lld(t0, t0, mem_idx, ctx);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LDL:
|
||||
t1 = tcg_temp_new();
|
||||
/* Do a byte access to possibly trigger a page
|
||||
fault with the unaligned address. */
|
||||
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
|
||||
tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
|
||||
tcg_gen_andi_tl(t1, t0, 7);
|
||||
#ifndef TARGET_WORDS_BIGENDIAN
|
||||
tcg_gen_xori_tl(t1, t1, 7);
|
||||
#endif
|
||||
tcg_gen_shli_tl(t1, t1, 3);
|
||||
tcg_gen_andi_tl(t0, t0, ~7);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
|
||||
tcg_gen_shl_tl(t0, t0, t1);
|
||||
t2 = tcg_const_tl(-1);
|
||||
tcg_gen_shl_tl(t2, t2, t1);
|
||||
@ -2177,14 +2202,14 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
t1 = tcg_temp_new();
|
||||
/* Do a byte access to possibly trigger a page
|
||||
fault with the unaligned address. */
|
||||
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
|
||||
tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
|
||||
tcg_gen_andi_tl(t1, t0, 7);
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
tcg_gen_xori_tl(t1, t1, 7);
|
||||
#endif
|
||||
tcg_gen_shli_tl(t1, t1, 3);
|
||||
tcg_gen_andi_tl(t0, t0, ~7);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
|
||||
tcg_gen_shr_tl(t0, t0, t1);
|
||||
tcg_gen_xori_tl(t1, t1, 63);
|
||||
t2 = tcg_const_tl(0xfffffffffffffffeull);
|
||||
@ -2200,7 +2225,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
t1 = tcg_const_tl(pc_relative_pc(ctx));
|
||||
gen_op_addr_add(ctx, t0, t0, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
#endif
|
||||
@ -2208,44 +2233,62 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
t1 = tcg_const_tl(pc_relative_pc(ctx));
|
||||
gen_op_addr_add(ctx, t0, t0, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LWE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LW:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL |
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL |
|
||||
ctx->default_tcg_memop_mask);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LHE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LH:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW |
|
||||
ctx->default_tcg_memop_mask);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LHUE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LHU:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUW |
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW |
|
||||
ctx->default_tcg_memop_mask);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LBE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LB:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_SB);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LBUE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LBU:
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_UB);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LWLE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LWL:
|
||||
t1 = tcg_temp_new();
|
||||
/* Do a byte access to possibly trigger a page
|
||||
fault with the unaligned address. */
|
||||
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
|
||||
tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
|
||||
tcg_gen_andi_tl(t1, t0, 3);
|
||||
#ifndef TARGET_WORDS_BIGENDIAN
|
||||
tcg_gen_xori_tl(t1, t1, 3);
|
||||
#endif
|
||||
tcg_gen_shli_tl(t1, t1, 3);
|
||||
tcg_gen_andi_tl(t0, t0, ~3);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
|
||||
tcg_gen_shl_tl(t0, t0, t1);
|
||||
t2 = tcg_const_tl(-1);
|
||||
tcg_gen_shl_tl(t2, t2, t1);
|
||||
@ -2257,18 +2300,21 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
tcg_gen_ext32s_tl(t0, t0);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LWRE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LWR:
|
||||
t1 = tcg_temp_new();
|
||||
/* Do a byte access to possibly trigger a page
|
||||
fault with the unaligned address. */
|
||||
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
|
||||
tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
|
||||
tcg_gen_andi_tl(t1, t0, 3);
|
||||
#ifdef TARGET_WORDS_BIGENDIAN
|
||||
tcg_gen_xori_tl(t1, t1, 3);
|
||||
#endif
|
||||
tcg_gen_shli_tl(t1, t1, 3);
|
||||
tcg_gen_andi_tl(t0, t0, ~3);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
|
||||
tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
|
||||
tcg_gen_shr_tl(t0, t0, t1);
|
||||
tcg_gen_xori_tl(t1, t1, 31);
|
||||
t2 = tcg_const_tl(0xfffffffeull);
|
||||
@ -2281,9 +2327,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
|
||||
tcg_gen_ext32s_tl(t0, t0);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
case OPC_LLE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_LL:
|
||||
case R6_OPC_LL:
|
||||
op_ld_ll(t0, t0, ctx);
|
||||
op_ld_ll(t0, t0, mem_idx, ctx);
|
||||
gen_store_gpr(t0, rt);
|
||||
break;
|
||||
}
|
||||
@ -2296,38 +2345,54 @@ static void gen_st (DisasContext *ctx, uint32_t opc, int rt,
|
||||
{
|
||||
TCGv t0 = tcg_temp_new();
|
||||
TCGv t1 = tcg_temp_new();
|
||||
int mem_idx = ctx->mem_idx;
|
||||
|
||||
gen_base_offset_addr(ctx, t0, base, offset);
|
||||
gen_load_gpr(t1, rt);
|
||||
switch (opc) {
|
||||
#if defined(TARGET_MIPS64)
|
||||
case OPC_SD:
|
||||
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
|
||||
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
|
||||
ctx->default_tcg_memop_mask);
|
||||
break;
|
||||
case OPC_SDL:
|
||||
gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
|
||||
gen_helper_0e2i(sdl, t1, t0, mem_idx);
|
||||
break;
|
||||
case OPC_SDR:
|
||||
gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
|
||||
gen_helper_0e2i(sdr, t1, t0, mem_idx);
|
||||
break;
|
||||
#endif
|
||||
case OPC_SWE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SW:
|
||||
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
|
||||
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
|
||||
ctx->default_tcg_memop_mask);
|
||||
break;
|
||||
case OPC_SHE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SH:
|
||||
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
|
||||
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
|
||||
ctx->default_tcg_memop_mask);
|
||||
break;
|
||||
case OPC_SBE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SB:
|
||||
tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_8);
|
||||
tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
|
||||
break;
|
||||
case OPC_SWLE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SWL:
|
||||
gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
|
||||
gen_helper_0e2i(swl, t1, t0, mem_idx);
|
||||
break;
|
||||
case OPC_SWRE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SWR:
|
||||
gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
|
||||
gen_helper_0e2i(swr, t1, t0, mem_idx);
|
||||
break;
|
||||
}
|
||||
tcg_temp_free(t0);
|
||||
@ -2340,6 +2405,7 @@ static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt,
|
||||
int base, int16_t offset)
|
||||
{
|
||||
TCGv t0, t1;
|
||||
int mem_idx = ctx->mem_idx;
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
t0 = tcg_temp_local_new();
|
||||
@ -2354,12 +2420,15 @@ static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt,
|
||||
#if defined(TARGET_MIPS64)
|
||||
case OPC_SCD:
|
||||
case R6_OPC_SCD:
|
||||
op_st_scd(t1, t0, rt, ctx);
|
||||
op_st_scd(t1, t0, rt, mem_idx, ctx);
|
||||
break;
|
||||
#endif
|
||||
case OPC_SCE:
|
||||
mem_idx = MIPS_HFLAG_UM;
|
||||
/* fall through */
|
||||
case OPC_SC:
|
||||
case R6_OPC_SC:
|
||||
op_st_sc(t1, t0, rt, ctx);
|
||||
op_st_sc(t1, t0, rt, mem_idx, ctx);
|
||||
break;
|
||||
}
|
||||
tcg_temp_free(t1);
|
||||
@ -5144,8 +5213,9 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
goto cp0_unimplemented;
|
||||
case 2:
|
||||
CP0_CHECK(ctx->ulri);
|
||||
tcg_gen_ld32s_tl(arg, cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
|
||||
tcg_gen_ld_tl(arg, cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
|
||||
tcg_gen_ext32s_tl(arg, arg);
|
||||
rn = "UserLocal";
|
||||
break;
|
||||
default:
|
||||
@ -5163,6 +5233,24 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
|
||||
rn = "PageGrain";
|
||||
break;
|
||||
case 2:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
|
||||
tcg_gen_ext32s_tl(arg, arg);
|
||||
rn = "SegCtl0";
|
||||
break;
|
||||
case 3:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
|
||||
tcg_gen_ext32s_tl(arg, arg);
|
||||
rn = "SegCtl1";
|
||||
break;
|
||||
case 4:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
|
||||
tcg_gen_ext32s_tl(arg, arg);
|
||||
rn = "SegCtl2";
|
||||
break;
|
||||
default:
|
||||
goto cp0_unimplemented;
|
||||
}
|
||||
@ -5331,7 +5419,8 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
break;
|
||||
case 1:
|
||||
check_insn(ctx, ISA_MIPS32R2);
|
||||
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
|
||||
tcg_gen_ext32s_tl(arg, arg);
|
||||
rn = "EBase";
|
||||
break;
|
||||
case 3:
|
||||
@ -5816,6 +5905,21 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
rn = "PageGrain";
|
||||
ctx->bstate = BS_STOP;
|
||||
break;
|
||||
case 2:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl0(cpu_env, arg);
|
||||
rn = "SegCtl0";
|
||||
break;
|
||||
case 3:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl1(cpu_env, arg);
|
||||
rn = "SegCtl1";
|
||||
break;
|
||||
case 4:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl2(cpu_env, arg);
|
||||
rn = "SegCtl2";
|
||||
break;
|
||||
default:
|
||||
goto cp0_unimplemented;
|
||||
}
|
||||
@ -6477,6 +6581,21 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
|
||||
rn = "PageGrain";
|
||||
break;
|
||||
case 2:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
|
||||
rn = "SegCtl0";
|
||||
break;
|
||||
case 3:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
|
||||
rn = "SegCtl1";
|
||||
break;
|
||||
case 4:
|
||||
CP0_CHECK(ctx->sc);
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
|
||||
rn = "SegCtl2";
|
||||
break;
|
||||
default:
|
||||
goto cp0_unimplemented;
|
||||
}
|
||||
@ -6642,7 +6761,7 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
break;
|
||||
case 1:
|
||||
check_insn(ctx, ISA_MIPS32R2);
|
||||
gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
|
||||
tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
|
||||
rn = "EBase";
|
||||
break;
|
||||
case 3:
|
||||
@ -7113,6 +7232,21 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
|
||||
gen_helper_mtc0_pagegrain(cpu_env, arg);
|
||||
rn = "PageGrain";
|
||||
break;
|
||||
case 2:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl0(cpu_env, arg);
|
||||
rn = "SegCtl0";
|
||||
break;
|
||||
case 3:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl1(cpu_env, arg);
|
||||
rn = "SegCtl1";
|
||||
break;
|
||||
case 4:
|
||||
CP0_CHECK(ctx->sc);
|
||||
gen_helper_mtc0_segctl2(cpu_env, arg);
|
||||
rn = "SegCtl2";
|
||||
break;
|
||||
default:
|
||||
goto cp0_unimplemented;
|
||||
}
|
||||
@ -12449,19 +12583,45 @@ enum {
|
||||
LWR = 0x1,
|
||||
SWR = 0x9,
|
||||
PREF = 0x2,
|
||||
/* 0xa is reserved */
|
||||
ST_EVA = 0xa,
|
||||
LL = 0x3,
|
||||
SC = 0xb,
|
||||
LDL = 0x4,
|
||||
SDL = 0xc,
|
||||
LDR = 0x5,
|
||||
SDR = 0xd,
|
||||
/* 0x6 is reserved */
|
||||
LD_EVA = 0x6,
|
||||
LWU = 0xe,
|
||||
LLD = 0x7,
|
||||
SCD = 0xf
|
||||
};
|
||||
|
||||
/* POOL32C LD-EVA encoding of minor opcode field (bits 11..9) */
|
||||
|
||||
enum {
|
||||
LBUE = 0x0,
|
||||
LHUE = 0x1,
|
||||
LWLE = 0x2,
|
||||
LWRE = 0x3,
|
||||
LBE = 0x4,
|
||||
LHE = 0x5,
|
||||
LLE = 0x6,
|
||||
LWE = 0x7,
|
||||
};
|
||||
|
||||
/* POOL32C ST-EVA encoding of minor opcode field (bits 11..9) */
|
||||
|
||||
enum {
|
||||
SWLE = 0x0,
|
||||
SWRE = 0x1,
|
||||
PREFE = 0x2,
|
||||
CACHEE = 0x3,
|
||||
SBE = 0x4,
|
||||
SHE = 0x5,
|
||||
SCE = 0x6,
|
||||
SWE = 0x7,
|
||||
};
|
||||
|
||||
/* POOL32F encoding of minor opcode field (bits 5..0) */
|
||||
|
||||
enum {
|
||||
@ -13762,7 +13922,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
uint16_t insn;
|
||||
int rt, rs, rd, rr;
|
||||
int16_t imm;
|
||||
uint32_t op, minor, mips32_op;
|
||||
uint32_t op, minor, minor2, mips32_op;
|
||||
uint32_t cond, fmt, cc;
|
||||
|
||||
insn = cpu_lduw_code(env, ctx->pc + 2);
|
||||
@ -14707,7 +14867,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
gen_ld(ctx, mips32_op, rt, rs, offset);
|
||||
break;
|
||||
do_st_lr:
|
||||
gen_st(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12));
|
||||
gen_st(ctx, mips32_op, rt, rs, offset);
|
||||
break;
|
||||
case SC:
|
||||
gen_st_cond(ctx, OPC_SC, rt, rs, offset);
|
||||
@ -14719,6 +14879,91 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
gen_st_cond(ctx, OPC_SCD, rt, rs, offset);
|
||||
break;
|
||||
#endif
|
||||
case LD_EVA:
|
||||
if (!ctx->eva) {
|
||||
MIPS_INVAL("pool32c ld-eva");
|
||||
generate_exception_end(ctx, EXCP_RI);
|
||||
break;
|
||||
}
|
||||
check_cp0_enabled(ctx);
|
||||
|
||||
minor2 = (ctx->opcode >> 9) & 0x7;
|
||||
offset = sextract32(ctx->opcode, 0, 9);
|
||||
switch (minor2) {
|
||||
case LBUE:
|
||||
mips32_op = OPC_LBUE;
|
||||
goto do_ld_lr;
|
||||
case LHUE:
|
||||
mips32_op = OPC_LHUE;
|
||||
goto do_ld_lr;
|
||||
case LWLE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
mips32_op = OPC_LWLE;
|
||||
goto do_ld_lr;
|
||||
case LWRE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
mips32_op = OPC_LWRE;
|
||||
goto do_ld_lr;
|
||||
case LBE:
|
||||
mips32_op = OPC_LBE;
|
||||
goto do_ld_lr;
|
||||
case LHE:
|
||||
mips32_op = OPC_LHE;
|
||||
goto do_ld_lr;
|
||||
case LLE:
|
||||
mips32_op = OPC_LLE;
|
||||
goto do_ld_lr;
|
||||
case LWE:
|
||||
mips32_op = OPC_LWE;
|
||||
goto do_ld_lr;
|
||||
};
|
||||
break;
|
||||
case ST_EVA:
|
||||
if (!ctx->eva) {
|
||||
MIPS_INVAL("pool32c st-eva");
|
||||
generate_exception_end(ctx, EXCP_RI);
|
||||
break;
|
||||
}
|
||||
check_cp0_enabled(ctx);
|
||||
|
||||
minor2 = (ctx->opcode >> 9) & 0x7;
|
||||
offset = sextract32(ctx->opcode, 0, 9);
|
||||
switch (minor2) {
|
||||
case SWLE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
mips32_op = OPC_SWLE;
|
||||
goto do_st_lr;
|
||||
case SWRE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
mips32_op = OPC_SWRE;
|
||||
goto do_st_lr;
|
||||
case PREFE:
|
||||
/* Treat as no-op */
|
||||
if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) {
|
||||
/* hint codes 24-31 are reserved and signal RI */
|
||||
generate_exception(ctx, EXCP_RI);
|
||||
}
|
||||
break;
|
||||
case CACHEE:
|
||||
/* Treat as no-op */
|
||||
if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
|
||||
gen_cache_operation(ctx, rt, rs, offset);
|
||||
}
|
||||
break;
|
||||
case SBE:
|
||||
mips32_op = OPC_SBE;
|
||||
goto do_st_lr;
|
||||
case SHE:
|
||||
mips32_op = OPC_SHE;
|
||||
goto do_st_lr;
|
||||
case SCE:
|
||||
gen_st_cond(ctx, OPC_SCE, rt, rs, offset);
|
||||
break;
|
||||
case SWE:
|
||||
mips32_op = OPC_SWE;
|
||||
goto do_st_lr;
|
||||
};
|
||||
break;
|
||||
case PREF:
|
||||
/* Treat as no-op */
|
||||
if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) {
|
||||
@ -18011,13 +18256,57 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
|
||||
{
|
||||
int rs, rt, rd, sa;
|
||||
uint32_t op1, op2;
|
||||
int16_t imm;
|
||||
|
||||
rs = (ctx->opcode >> 21) & 0x1f;
|
||||
rt = (ctx->opcode >> 16) & 0x1f;
|
||||
rd = (ctx->opcode >> 11) & 0x1f;
|
||||
sa = (ctx->opcode >> 6) & 0x1f;
|
||||
imm = sextract32(ctx->opcode, 7, 9);
|
||||
|
||||
op1 = MASK_SPECIAL3(ctx->opcode);
|
||||
|
||||
/*
|
||||
* EVA loads and stores overlap Loongson 2E instructions decoded by
|
||||
* decode_opc_special3_legacy(), so be careful to allow their decoding when
|
||||
* EVA is absent.
|
||||
*/
|
||||
if (ctx->eva) {
|
||||
switch (op1) {
|
||||
case OPC_LWLE ... OPC_LWRE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
/* fall through */
|
||||
case OPC_LBUE ... OPC_LHUE:
|
||||
case OPC_LBE ... OPC_LWE:
|
||||
check_cp0_enabled(ctx);
|
||||
gen_ld(ctx, op1, rt, rs, imm);
|
||||
return;
|
||||
case OPC_SWLE ... OPC_SWRE:
|
||||
check_insn_opc_removed(ctx, ISA_MIPS32R6);
|
||||
/* fall through */
|
||||
case OPC_SBE ... OPC_SHE:
|
||||
case OPC_SWE:
|
||||
check_cp0_enabled(ctx);
|
||||
gen_st(ctx, op1, rt, rs, imm);
|
||||
return;
|
||||
case OPC_SCE:
|
||||
check_cp0_enabled(ctx);
|
||||
gen_st_cond(ctx, op1, rt, rs, imm);
|
||||
return;
|
||||
case OPC_CACHEE:
|
||||
check_cp0_enabled(ctx);
|
||||
if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
|
||||
gen_cache_operation(ctx, rt, rs, imm);
|
||||
}
|
||||
/* Treat as NOP. */
|
||||
return;
|
||||
case OPC_PREFE:
|
||||
check_cp0_enabled(ctx);
|
||||
/* Treat as NOP. */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
switch (op1) {
|
||||
case OPC_EXT:
|
||||
case OPC_INS:
|
||||
@ -19916,6 +20205,8 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||
ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
|
||||
ctx.PAMask = env->PAMask;
|
||||
ctx.mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1;
|
||||
ctx.eva = (env->CP0_Config5 >> CP0C5_EVA) & 1;
|
||||
ctx.sc = (env->CP0_Config3 >> CP0C3_SC) & 1;
|
||||
ctx.CP0_LLAddr_shift = env->CP0_LLAddr_shift;
|
||||
ctx.cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1;
|
||||
/* Restore delay slot state from the tb context. */
|
||||
@ -19931,7 +20222,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
ctx.mem_idx = MIPS_HFLAG_UM;
|
||||
#else
|
||||
ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU;
|
||||
ctx.mem_idx = hflags_mmu_index(ctx.hflags);
|
||||
#endif
|
||||
ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
|
||||
MO_UNALN : MO_ALIGN;
|
||||
@ -20300,6 +20591,7 @@ void cpu_state_reset(CPUMIPSState *env)
|
||||
env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
|
||||
env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
|
||||
env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
|
||||
env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask;
|
||||
env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
|
||||
env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
|
||||
env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
|
||||
@ -20350,7 +20642,7 @@ void cpu_state_reset(CPUMIPSState *env)
|
||||
if (kvm_enabled()) {
|
||||
env->CP0_EBase |= 0x40000000;
|
||||
} else {
|
||||
env->CP0_EBase |= 0x80000000;
|
||||
env->CP0_EBase |= (int32_t)0x80000000;
|
||||
}
|
||||
if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) {
|
||||
env->CP0_CMGCRBase = 0x1fbf8000 >> 4;
|
||||
@ -20401,6 +20693,29 @@ void cpu_state_reset(CPUMIPSState *env)
|
||||
env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure default legacy segmentation control. We use this regardless of
|
||||
* whether segmentation control is presented to the guest.
|
||||
*/
|
||||
/* KSeg3 (seg0 0xE0000000..0xFFFFFFFF) */
|
||||
env->CP0_SegCtl0 = (CP0SC_AM_MK << CP0SC_AM);
|
||||
/* KSeg2 (seg1 0xC0000000..0xDFFFFFFF) */
|
||||
env->CP0_SegCtl0 |= ((CP0SC_AM_MSK << CP0SC_AM)) << 16;
|
||||
/* KSeg1 (seg2 0xA0000000..0x9FFFFFFF) */
|
||||
env->CP0_SegCtl1 = (0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) |
|
||||
(2 << CP0SC_C);
|
||||
/* KSeg0 (seg3 0x80000000..0x9FFFFFFF) */
|
||||
env->CP0_SegCtl1 |= ((0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) |
|
||||
(3 << CP0SC_C)) << 16;
|
||||
/* USeg (seg4 0x40000000..0x7FFFFFFF) */
|
||||
env->CP0_SegCtl2 = (2 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) |
|
||||
(1 << CP0SC_EU) | (2 << CP0SC_C);
|
||||
/* USeg (seg5 0x00000000..0x3FFFFFFF) */
|
||||
env->CP0_SegCtl2 |= ((0 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) |
|
||||
(1 << CP0SC_EU) | (2 << CP0SC_C)) << 16;
|
||||
/* XKPhys (note, SegCtl2.XR = 0, so XAM won't be used) */
|
||||
env->CP0_SegCtl1 |= (CP0SC_AM_UK << CP0SC1_XAM);
|
||||
#endif
|
||||
if ((env->insn_flags & ISA_MIPS32R6) &&
|
||||
(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
|
||||
|
@ -101,6 +101,7 @@ struct mips_def_t {
|
||||
int32_t CP0_SRSConf4;
|
||||
int32_t CP0_PageGrain_rw_bitmask;
|
||||
int32_t CP0_PageGrain;
|
||||
target_ulong CP0_EBaseWG_rw_bitmask;
|
||||
int insn_flags;
|
||||
enum mips_mmu_types mmu_type;
|
||||
};
|
||||
@ -420,9 +421,9 @@ static const mips_def_t mips_defs[] =
|
||||
},
|
||||
{
|
||||
/* FIXME:
|
||||
* Config3: CMGCR, SC, PW, VZ, CTXTC, CDMM, TL
|
||||
* Config3: CMGCR, PW, VZ, CTXTC, CDMM, TL
|
||||
* Config4: MMUExtDef
|
||||
* Config5: EVA, MRP
|
||||
* Config5: MRP
|
||||
* FIR(FCR0): Has2008
|
||||
* */
|
||||
.name = "P5600",
|
||||
@ -435,13 +436,14 @@ static const mips_def_t mips_defs[] =
|
||||
(1 << CP0C1_PC) | (1 << CP0C1_FP),
|
||||
.CP0_Config2 = MIPS_CONFIG2,
|
||||
.CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) |
|
||||
(1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
|
||||
(1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
|
||||
(1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_SC) |
|
||||
(1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1 << CP0C3_LPA) |
|
||||
(1 << CP0C3_VInt),
|
||||
.CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) |
|
||||
(0x1c << CP0C4_KScrExist),
|
||||
.CP0_Config4_rw_bitmask = 0,
|
||||
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_MVH) | (1 << CP0C5_LLB) |
|
||||
(1 << CP0C5_MRP),
|
||||
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_EVA) | (1 << CP0C5_MVH) |
|
||||
(1 << CP0C5_LLB) | (1 << CP0C5_MRP),
|
||||
.CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
|
||||
(1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
|
||||
(1 << CP0C5_FRE) | (1 << CP0C5_UFR),
|
||||
@ -452,6 +454,7 @@ static const mips_def_t mips_defs[] =
|
||||
.CP0_Status_rw_bitmask = 0x3C68FF1F,
|
||||
.CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) |
|
||||
(1 << CP0PG_ELPA) | (1 << CP0PG_IEC),
|
||||
.CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
|
||||
.CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_UFRP) | (1 << FCR0_HAS2008) |
|
||||
(1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
|
||||
(1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID),
|
||||
@ -637,6 +640,7 @@ static const mips_def_t mips_defs[] =
|
||||
.SYNCI_Step = 32,
|
||||
.CCRes = 2,
|
||||
.CP0_Status_rw_bitmask = 0x36FBFFFF,
|
||||
.CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
|
||||
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
|
||||
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
|
||||
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
|
||||
@ -720,6 +724,7 @@ static const mips_def_t mips_defs[] =
|
||||
.CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
|
||||
(1U << CP0PG_RIE),
|
||||
.CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
|
||||
.CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
|
||||
.CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
|
||||
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
|
||||
(1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV),
|
||||
|
Loading…
Reference in New Issue
Block a user