tcg: Reset data_gen_ptr correctly

tcg/riscv: Implement host vector support
 tcg/ppc: Fix tcg_out_rlw_rc
 target/i386: Walk NPT in guest real mode
 target/i386: Use probe_access_full_mmu in ptw_translate
 linux-user: Fix build failure caused by missing __u64 on musl
 linux-user: Emulate /proc/self/maps under mmap_lock
 linux-user/riscv: Fix definition of RISCV_HWPROBE_EXT_ZVFHMIN
 linux-user/ppc: Fix sigmask endianness issue in sigreturn
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcYbccdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV97TwgAmg27QFCdiTrqZgs2
 P1AO40zTgyTAwWx2gykaEuDWNhz/uSWvlBRN0/636wqGPkbJtrRHYM26og4BAThh
 o172/IwiZqfKOR1ndHl9j3BrtmrlIlaEEjiikqy1MTZF127irV6JWoJE1mSUrAxy
 3Cm1K4gnK/e1+LdWf4Lj+K2lE6PpAK/ppKggzOXhtEgKiH1l4bUCl/Fq54wqphUn
 YS+cpmgQDCkXFfmPbQqie0HDpe3bhb75qIDQrbC5JcZdHqV73rTwSZvfUOmS/5Re
 18K6nfAXXT+Zm0IrJMey/7b1jUWF3nMUVCTuLvmhSOwBAkIvTVYHko9CjvLtM6YH
 UHu3yA==
 =V393
 -----END PGP SIGNATURE-----

Merge tag 'pull-tcg-20241022' of https://gitlab.com/rth7680/qemu into staging

tcg: Reset data_gen_ptr correctly
tcg/riscv: Implement host vector support
tcg/ppc: Fix tcg_out_rlw_rc
target/i386: Walk NPT in guest real mode
target/i386: Use probe_access_full_mmu in ptw_translate
linux-user: Fix build failure caused by missing __u64 on musl
linux-user: Emulate /proc/self/maps under mmap_lock
linux-user/riscv: Fix definition of RISCV_HWPROBE_EXT_ZVFHMIN
linux-user/ppc: Fix sigmask endianness issue in sigreturn

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmcYbccdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV97TwgAmg27QFCdiTrqZgs2
# P1AO40zTgyTAwWx2gykaEuDWNhz/uSWvlBRN0/636wqGPkbJtrRHYM26og4BAThh
# o172/IwiZqfKOR1ndHl9j3BrtmrlIlaEEjiikqy1MTZF127irV6JWoJE1mSUrAxy
# 3Cm1K4gnK/e1+LdWf4Lj+K2lE6PpAK/ppKggzOXhtEgKiH1l4bUCl/Fq54wqphUn
# YS+cpmgQDCkXFfmPbQqie0HDpe3bhb75qIDQrbC5JcZdHqV73rTwSZvfUOmS/5Re
# 18K6nfAXXT+Zm0IrJMey/7b1jUWF3nMUVCTuLvmhSOwBAkIvTVYHko9CjvLtM6YH
# UHu3yA==
# =V393
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 23 Oct 2024 04:30:15 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* tag 'pull-tcg-20241022' of https://gitlab.com/rth7680/qemu: (24 commits)
  linux-user/riscv: Fix definition of RISCV_HWPROBE_EXT_ZVFHMIN
  linux-user: Fix build failure caused by missing __u64 on musl
  linux-user: Trace rt_sigprocmask's sigsets
  linux-user/ppc: Fix sigmask endianness issue in sigreturn
  linux-user: Emulate /proc/self/maps under mmap_lock
  target/i386: Remove ra parameter from ptw_translate
  target/i386: Use probe_access_full_mmu in ptw_translate
  target/i386: Walk NPT in guest real mode
  include/exec: Improve probe_access_full{, _mmu} documentation
  tcg/ppc: Fix tcg_out_rlw_rc
  tcg/riscv: Enable native vector support for TCG host
  tcg/riscv: Implement vector roti/v/x ops
  tcg/riscv: Implement vector shi/s/v ops
  tcg/riscv: Implement vector min/max ops
  tcg/riscv: Implement vector sat/mul ops
  tcg/riscv: Accept constant first argument to sub_vec
  tcg/riscv: Implement vector neg ops
  tcg/riscv: Implement vector cmp/cmpsel ops
  tcg/riscv: Add support for basic vector opcodes
  tcg/riscv: Implement vector mov/dup{m/i}
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2024-10-24 15:21:42 +01:00
commit 6b3756503b
19 changed files with 1152 additions and 175 deletions

View File

@ -4808,7 +4808,7 @@ static void decode_inst_operands(rv_decode *dec, rv_isa isa)
break;
case rv_codec_vsetivli:
dec->rd = operand_rd(inst);
dec->imm = operand_vimm(inst);
dec->imm = extract32(inst, 15, 5);
dec->vzimm = operand_vzimm10(inst);
break;
case rv_codec_zcb_lb:

View File

@ -290,7 +290,7 @@ enum {
#define rv_fmt_fd_vs2 "O\t3,F"
#define rv_fmt_vd_vm "O\tDm"
#define rv_fmt_vsetvli "O\t0,1,v"
#define rv_fmt_vsetivli "O\t0,u,v"
#define rv_fmt_vsetivli "O\t0,i,v"
#define rv_fmt_rs1_rs2_zce_ldst "O\t2,i(1)"
#define rv_fmt_push_rlist "O\tx,-i"
#define rv_fmt_pop_rlist "O\tx,i"

View File

@ -10,9 +10,11 @@
#define CPUINFO_ZBA (1u << 1)
#define CPUINFO_ZBB (1u << 2)
#define CPUINFO_ZICOND (1u << 3)
#define CPUINFO_ZVE64X (1u << 4)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
extern unsigned riscv_lg2_vlenb;
/*
* We cannot rely on constructor ordering, so other constructors must

View File

@ -368,6 +368,13 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size,
* The CPUTLBEntryFull structure returned via @pfull is transient
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*
* This function will not fault if @nonfault is set, but will
* return TLB_INVALID_MASK if the page is not mapped, or is not
* accessible with @access_type.
*
* This function will return TLB_MMIO in order to force the access
* to be handled out-of-line if plugins wish to instrument the access.
*/
int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
@ -375,22 +382,14 @@ int probe_access_full(CPUArchState *env, vaddr addr, int size,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
/**
* probe_access_mmu() - Like probe_access_full except cannot fault and
* doesn't trigger instrumentation.
* probe_access_full_mmu:
* Like probe_access_full, except:
*
* @env: CPUArchState
* @vaddr: virtual address to probe
* @size: size of the probe
* @access_type: read, write or execute permission
* @mmu_idx: softmmu index
* @phost: ptr to return value host address or NULL
* @pfull: ptr to return value CPUTLBEntryFull structure or NULL
*
* The CPUTLBEntryFull structure returned via @pfull is transient
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*
* Returns: TLB flags as per probe_access_flags()
* This function is intended to be used for page table accesses by
* the target mmu itself. Since such page walking happens while
* handling another potential mmu fault, this function never raises
* exceptions (akin to @nonfault true for probe_access_full).
* Likewise this function does not trigger plugin instrumentation.
*/
int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,

View File

@ -521,6 +521,12 @@ struct TCGContext {
struct qemu_plugin_insn *plugin_insn;
#endif
/* For host-specific values. */
#ifdef __riscv
MemOp riscv_cur_vsew;
TCGType riscv_cur_type;
#endif
GHashTable *const_table[TCG_TYPE_COUNT];
TCGTempSet free_temps[TCG_TYPE_COUNT];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */

View File

@ -628,7 +628,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
return 1;
target_to_host_sigset_internal(&blocked, &set);
target_to_host_sigset(&blocked, &set);
set_sigmask(&blocked);
restore_user_regs(env, mcp, sig);

View File

@ -160,20 +160,21 @@ static const char * const target_signal_name[] = {
#undef MAKE_SIG_ENTRY
};
static void
print_signal_1(abi_ulong arg)
{
if (arg < ARRAY_SIZE(target_signal_name)) {
qemu_log("%s", target_signal_name[arg]);
} else {
qemu_log(TARGET_ABI_FMT_lu, arg);
}
}
static void
print_signal(abi_ulong arg, int last)
{
const char *signal_name = NULL;
if (arg < ARRAY_SIZE(target_signal_name)) {
signal_name = target_signal_name[arg];
}
if (signal_name == NULL) {
print_raw_param("%ld", arg, last);
return;
}
qemu_log("%s%s", signal_name, get_comma(last));
print_signal_1(arg);
qemu_log("%s", get_comma(last));
}
static void print_si_code(int arg)
@ -718,6 +719,51 @@ print_ipc(CPUArchState *cpu_env, const struct syscallname *name,
}
#endif
#ifdef TARGET_NR_rt_sigprocmask
static void print_target_sigset_t_1(target_sigset_t *set, int last)
{
bool first = true;
int i, sig = 1;
qemu_log("[");
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
abi_ulong bits = 0;
int j;
__get_user(bits, &set->sig[i]);
for (j = 0; j < sizeof(bits) * 8; j++) {
if (bits & ((abi_ulong)1 << j)) {
if (first) {
first = false;
} else {
qemu_log(" ");
}
print_signal_1(sig);
}
sig++;
}
}
qemu_log("]%s", get_comma(last));
}
static void print_target_sigset_t(abi_ulong addr, abi_ulong size, int last)
{
if (addr && size == sizeof(target_sigset_t)) {
target_sigset_t *set;
set = lock_user(VERIFY_READ, addr, sizeof(target_sigset_t), 1);
if (set) {
print_target_sigset_t_1(set, last);
unlock_user(set, addr, 0);
} else {
print_pointer(addr, last);
}
} else {
print_pointer(addr, last);
}
}
#endif
/*
* Variants for the return value output function
*/
@ -3312,11 +3358,29 @@ print_rt_sigprocmask(CPUArchState *cpu_env, const struct syscallname *name,
case TARGET_SIG_SETMASK: how = "SIG_SETMASK"; break;
}
qemu_log("%s,", how);
print_pointer(arg1, 0);
print_target_sigset_t(arg1, arg3, 0);
print_pointer(arg2, 0);
print_raw_param("%u", arg3, 1);
print_syscall_epilogue(name);
}
static void
print_rt_sigprocmask_ret(CPUArchState *cpu_env, const struct syscallname *name,
abi_long ret, abi_long arg0, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5)
{
if (!print_syscall_err(ret)) {
qemu_log(TARGET_ABI_FMT_ld, ret);
if (arg2) {
qemu_log(" (oldset=");
print_target_sigset_t(arg2, arg3, 1);
qemu_log(")");
}
}
qemu_log("\n");
}
#endif
#ifdef TARGET_NR_rt_sigqueueinfo

View File

@ -1189,7 +1189,8 @@
{ TARGET_NR_rt_sigpending, "rt_sigpending" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_rt_sigprocmask
{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask, NULL },
{ TARGET_NR_rt_sigprocmask, "rt_sigprocmask" , NULL, print_rt_sigprocmask,
print_rt_sigprocmask_ret },
#endif
#ifdef TARGET_NR_rt_sigqueueinfo
{ TARGET_NR_rt_sigqueueinfo, "rt_sigqueueinfo" , NULL, print_rt_sigqueueinfo, NULL },

View File

@ -8150,17 +8150,19 @@ static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
{
struct open_self_maps_data d = {
.ts = get_task_state(env_cpu(env)),
.host_maps = read_self_maps(),
.fd = fd,
.smaps = smaps
};
mmap_lock();
d.host_maps = read_self_maps();
if (d.host_maps) {
walk_memory_regions(&d, open_self_maps_2);
free_self_maps(d.host_maps);
} else {
walk_memory_regions(&d, open_self_maps_3);
}
mmap_unlock();
return 0;
}
@ -8942,7 +8944,7 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)

View File

@ -2750,9 +2750,9 @@ struct target_sched_param {
/* from kernel's include/uapi/linux/openat2.h */
struct open_how_ver0 {
__u64 flags;
__u64 mode;
__u64 resolve;
uint64_t flags;
uint64_t mode;
uint64_t resolve;
};
struct target_open_how_ver0 {
abi_ullong flags;

View File

@ -60,14 +60,13 @@ typedef struct PTETranslate {
hwaddr gaddr;
} PTETranslate;
static bool ptw_translate(PTETranslate *inout, hwaddr addr, uint64_t ra)
static bool ptw_translate(PTETranslate *inout, hwaddr addr)
{
CPUTLBEntryFull *full;
int flags;
inout->gaddr = addr;
flags = probe_access_full(inout->env, addr, 0, MMU_DATA_STORE,
inout->ptw_idx, true, &inout->haddr, &full, ra);
flags = probe_access_full_mmu(inout->env, addr, 0, MMU_DATA_STORE,
inout->ptw_idx, &inout->haddr, NULL);
if (unlikely(flags & TLB_INVALID_MASK)) {
TranslateFault *err = inout->err;
@ -150,6 +149,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
uint32_t pkr;
int page_size;
int error_code;
int prot;
restart_all:
rsvd_mask = ~MAKE_64BIT_MASK(0, env_archcpu(env)->phys_bits);
@ -166,7 +166,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 5
*/
pte_addr = (in->cr3 & ~0xfff) + (((addr >> 48) & 0x1ff) << 3);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
restart_5:
@ -190,7 +190,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 4
*/
pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 39) & 0x1ff) << 3);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
restart_4:
@ -210,7 +210,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 3
*/
pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
restart_3_lma:
@ -237,7 +237,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 3
*/
pte_addr = (in->cr3 & 0xffffffe0ULL) + ((addr >> 27) & 0x18);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
rsvd_mask |= PG_HI_USER_MASK;
@ -259,7 +259,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 2
*/
pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
restart_2_pae:
@ -285,7 +285,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 1
*/
pte_addr = (pte & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
pte = ptw_ldq(&pte_trans, ra);
@ -298,12 +298,12 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
page_size = 4096;
} else {
} else if (pg_mode) {
/*
* Page table level 2
*/
pte_addr = (in->cr3 & 0xfffff000ULL) + ((addr >> 20) & 0xffc);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
restart_2_nopae:
@ -332,7 +332,7 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
* Page table level 1
*/
pte_addr = (pte & ~0xfffu) + ((addr >> 10) & 0xffc);
if (!ptw_translate(&pte_trans, pte_addr, ra)) {
if (!ptw_translate(&pte_trans, pte_addr)) {
return false;
}
pte = ptw_ldl(&pte_trans, ra);
@ -343,6 +343,15 @@ static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
ptep &= pte | PG_NX_MASK;
page_size = 4096;
rsvd_mask = 0;
} else {
/*
* No paging (real mode), let's tentatively resolve the address as 1:1
* here, but conditionally still perform an NPT walk on it later.
*/
page_size = 0x40000000;
paddr = in->addr;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
goto stage2;
}
do_check_protect:
@ -358,7 +367,7 @@ do_check_protect_pse36:
goto do_fault_protect;
}
int prot = 0;
prot = 0;
if (!is_mmu_index_smap(in->mmu_idx) || !(ptep & PG_USER_MASK)) {
prot |= PAGE_READ;
if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
@ -420,6 +429,7 @@ do_check_protect_pse36:
/* merge offset within page */
paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
stage2:
/*
* Note that NPT is walked (for both paging structures and final guest
@ -429,9 +439,8 @@ do_check_protect_pse36:
CPUTLBEntryFull *full;
int flags, nested_page_size;
flags = probe_access_full(env, paddr, 0, access_type,
MMU_NESTED_IDX, true,
&pte_trans.haddr, &full, 0);
flags = probe_access_full_mmu(env, paddr, 0, access_type,
MMU_NESTED_IDX, &pte_trans.haddr, &full);
if (unlikely(flags & TLB_INVALID_MASK)) {
*err = (TranslateFault){
.error_code = env->error_code,
@ -562,7 +571,7 @@ static bool get_physical_address(CPUX86State *env, vaddr addr,
addr = (uint32_t)addr;
}
if (likely(env->cr[0] & CR0_PG_MASK)) {
if (likely(env->cr[0] & CR0_PG_MASK || use_stage2)) {
in.cr3 = env->cr[3];
in.mmu_idx = mmu_idx;
in.ptw_idx = use_stage2 ? MMU_NESTED_IDX : MMU_PHYS_IDX;

View File

@ -911,7 +911,9 @@ static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
int sh, int mb, int me, bool rc)
{
tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
tcg_debug_assert((mb & 0x1f) == mb);
tcg_debug_assert((me & 0x1f) == me);
tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh & 0x1f) | MB(mb) | ME(me) | rc);
}
static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,

View File

@ -21,3 +21,12 @@ C_O1_I2(r, rZ, rZ)
C_N1_I2(r, r, rM)
C_O1_I4(r, r, rI, rM, rM)
C_O2_I4(r, r, rZ, rZ, rM, rM)
C_O0_I2(v, r)
C_O1_I1(v, r)
C_O1_I1(v, v)
C_O1_I2(v, v, r)
C_O1_I2(v, v, v)
C_O1_I2(v, vK, v)
C_O1_I2(v, v, vK)
C_O1_I2(v, v, vL)
C_O1_I4(v, v, vL, vK, vK)

View File

@ -9,6 +9,7 @@
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('v', ALL_VECTOR_REGS)
/*
* Define constraint letters for constants:
@ -16,6 +17,8 @@ REGS('r', ALL_GENERAL_REGS)
*/
CONST('I', TCG_CT_CONST_S12)
CONST('J', TCG_CT_CONST_J12)
CONST('K', TCG_CT_CONST_S5)
CONST('L', TCG_CT_CONST_CMP_VI)
CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
CONST('Z', TCG_CT_CONST_ZERO)

File diff suppressed because it is too large Load Diff

View File

@ -28,42 +28,28 @@
#include "host/cpuinfo.h"
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_NB_REGS 32
#define TCG_TARGET_NB_REGS 64
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
typedef enum {
TCG_REG_ZERO,
TCG_REG_RA,
TCG_REG_SP,
TCG_REG_GP,
TCG_REG_TP,
TCG_REG_T0,
TCG_REG_T1,
TCG_REG_T2,
TCG_REG_S0,
TCG_REG_S1,
TCG_REG_A0,
TCG_REG_A1,
TCG_REG_A2,
TCG_REG_A3,
TCG_REG_A4,
TCG_REG_A5,
TCG_REG_A6,
TCG_REG_A7,
TCG_REG_S2,
TCG_REG_S3,
TCG_REG_S4,
TCG_REG_S5,
TCG_REG_S6,
TCG_REG_S7,
TCG_REG_S8,
TCG_REG_S9,
TCG_REG_S10,
TCG_REG_S11,
TCG_REG_T3,
TCG_REG_T4,
TCG_REG_T5,
TCG_REG_T6,
TCG_REG_ZERO, TCG_REG_RA, TCG_REG_SP, TCG_REG_GP,
TCG_REG_TP, TCG_REG_T0, TCG_REG_T1, TCG_REG_T2,
TCG_REG_S0, TCG_REG_S1, TCG_REG_A0, TCG_REG_A1,
TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5,
TCG_REG_A6, TCG_REG_A7, TCG_REG_S2, TCG_REG_S3,
TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7,
TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11,
TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6,
/* RISC-V V Extension registers */
TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
/* aliases */
TCG_AREG0 = TCG_REG_S0,
@ -156,6 +142,32 @@ typedef enum {
#define TCG_TARGET_HAS_tst 0
/* vector instructions */
#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X)
#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X)
#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X)
#define TCG_TARGET_HAS_andc_vec 0
#define TCG_TARGET_HAS_orc_vec 0
#define TCG_TARGET_HAS_nand_vec 0
#define TCG_TARGET_HAS_nor_vec 0
#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_abs_vec 0
#define TCG_TARGET_HAS_roti_vec 1
#define TCG_TARGET_HAS_rots_vec 1
#define TCG_TARGET_HAS_rotv_vec 1
#define TCG_TARGET_HAS_shi_vec 1
#define TCG_TARGET_HAS_shs_vec 1
#define TCG_TARGET_HAS_shv_vec 1
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_HAS_sat_vec 1
#define TCG_TARGET_HAS_minmax_vec 1
#define TCG_TARGET_HAS_bitsel_vec 0
#define TCG_TARGET_HAS_cmpsel_vec 1
#define TCG_TARGET_HAS_tst_vec 0
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_NEED_LDST_LABELS

View File

@ -0,0 +1,12 @@
/*
* Copyright (c) C-SKY Microsystems Co., Ltd.
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version.
*
* See the COPYING file in the top-level directory for details.
*
* Target-specific opcodes for host vector expansion. These will be
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
* consider these to be UNSPEC with names.
*/

View File

@ -1399,7 +1399,6 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
goto retry;
}
qatomic_set(&s->code_gen_ptr, next);
s->data_gen_ptr = NULL;
return tb;
}
@ -6172,6 +6171,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
*/
s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
s->code_ptr = s->code_buf;
s->data_gen_ptr = NULL;
#ifdef TCG_TARGET_NEED_LDST_LABELS
QSIMPLEQ_INIT(&s->ldst_labels);

View File

@ -4,6 +4,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "host/cpuinfo.h"
#ifdef CONFIG_ASM_HWPROBE_H
@ -13,6 +14,7 @@
#endif
unsigned cpuinfo;
unsigned riscv_lg2_vlenb;
static volatile sig_atomic_t got_sigill;
static void sigill_handler(int signo, siginfo_t *si, void *data)
@ -34,7 +36,7 @@ static void sigill_handler(int signo, siginfo_t *si, void *data)
/* Called both as constructor and (possibly) via other constructors. */
unsigned __attribute__((constructor)) cpuinfo_init(void)
{
unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND;
unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND | CPUINFO_ZVE64X;
unsigned info = cpuinfo;
if (info) {
@ -50,6 +52,10 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
#endif
#if defined(__riscv_arch_test) && defined(__riscv_zicond)
info |= CPUINFO_ZICOND;
#endif
#if defined(__riscv_arch_test) && \
(defined(__riscv_vector) || defined(__riscv_zve64x))
info |= CPUINFO_ZVE64X;
#endif
left &= ~info;
@ -69,11 +75,22 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
#ifdef RISCV_HWPROBE_EXT_ZICOND
info |= pair.value & RISCV_HWPROBE_EXT_ZICOND ? CPUINFO_ZICOND : 0;
left &= ~CPUINFO_ZICOND;
#endif
/* For rv64, V is Zve64d, a superset of Zve64x. */
info |= pair.value & RISCV_HWPROBE_IMA_V ? CPUINFO_ZVE64X : 0;
#ifdef RISCV_HWPROBE_EXT_ZVE64X
info |= pair.value & RISCV_HWPROBE_EXT_ZVE64X ? CPUINFO_ZVE64X : 0;
#endif
}
}
#endif /* CONFIG_ASM_HWPROBE_H */
/*
* We only detect support for vectors with hwprobe. All kernels with
* support for vectors in userspace also support the hwprobe syscall.
*/
left &= ~CPUINFO_ZVE64X;
if (left) {
struct sigaction sa_old, sa_new;
@ -113,6 +130,21 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
assert(left == 0);
}
if (info & CPUINFO_ZVE64X) {
/*
* We are guaranteed by RVV-1.0 that VLEN is a power of 2.
* We are guaranteed by Zve64x that VLEN >= 64, and that
* EEW of {8,16,32,64} are supported.
*/
unsigned long vlenb;
/* csrr %0, vlenb */
asm volatile(".insn i 0x73, 0x2, %0, zero, -990" : "=r"(vlenb));
assert(vlenb >= 8);
assert(is_power_of_2(vlenb));
/* Cache VLEN in a convenient form. */
riscv_lg2_vlenb = ctz32(vlenb);
}
info |= CPUINFO_ALWAYS;
cpuinfo = info;
return info;