Merge patch series "target/riscv: Some updates to float point related extensions"
RISC-V defines a handful of extensions related to floating point, along with various relationships between these and other extensions. This patch set adds support for the Zvfh, Zvhfmin, and Zve64d extensions; along with a handful of fixes and cleanups related to the other floating-point extension relationships. * b4-shazam-merge target/riscv: Expose properties for Zv* extensions target/riscv: Simplify check for EEW = 64 in trans_rvv.c.inc target/riscv: Fix check for vector load/store instructions when EEW=64 target/riscv: Add support for Zvfh/zvfhmin extensions target/riscv: Remove rebundunt check for zve32f and zve64f target/riscv: Replace check for F/D to Zve32f/Zve64d in trans_rvv.c.inc target/riscv: Simplify check for Zve32f and Zve64f target/riscv: Indent fixes in cpu.c target/riscv: Add propertie check for Zvfh{min} extensions target/riscv: Fix relationship between V, Zve*, F and D target/riscv: Add cfg properties for Zv* extensions target/riscv: Simplify the check for Zfhmin and Zhinxmin target/riscv: Fix the relationship between Zhinxmin and Zhinx target/riscv: Fix the relationship between Zfhmin and Zfh Message-ID: <20230215020539.4788-1-liweiwei@iscas.ac.cn> [Palmer: commit text] Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
commit
312f632f4c
@ -101,6 +101,9 @@ static const struct isa_ext_data isa_edata_arr[] = {
|
||||
ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
|
||||
ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
|
||||
ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
|
||||
ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d),
|
||||
ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh),
|
||||
ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin),
|
||||
ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
|
||||
ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
|
||||
ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
|
||||
@ -729,7 +732,11 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) {
|
||||
if (cpu->cfg.ext_zfh) {
|
||||
cpu->cfg.ext_zfhmin = true;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) {
|
||||
error_setg(errp, "Zfh/Zfhmin extensions require F extension");
|
||||
return;
|
||||
}
|
||||
@ -739,19 +746,51 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_v && !cpu->cfg.ext_d) {
|
||||
error_setg(errp, "V extension requires D extension");
|
||||
/* The V vector extension depends on the Zve64d extension */
|
||||
if (cpu->cfg.ext_v) {
|
||||
cpu->cfg.ext_zve64d = true;
|
||||
}
|
||||
|
||||
/* The Zve64d extension depends on the Zve64f extension */
|
||||
if (cpu->cfg.ext_zve64d) {
|
||||
cpu->cfg.ext_zve64f = true;
|
||||
}
|
||||
|
||||
/* The Zve64f extension depends on the Zve32f extension */
|
||||
if (cpu->cfg.ext_zve64f) {
|
||||
cpu->cfg.ext_zve32f = true;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) {
|
||||
error_setg(errp, "Zve64d/V extensions require D extension");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) {
|
||||
if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) {
|
||||
error_setg(errp, "Zve32f/Zve64f extensions require F extension");
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zvfh) {
|
||||
cpu->cfg.ext_zvfhmin = true;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
|
||||
error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
|
||||
error_setg(errp, "Zvfh extensions requires Zfhmin extension");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set the ISA extensions, checks should have happened above */
|
||||
if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
|
||||
cpu->cfg.ext_zhinxmin) {
|
||||
if (cpu->cfg.ext_zhinx) {
|
||||
cpu->cfg.ext_zhinxmin = true;
|
||||
}
|
||||
|
||||
if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) {
|
||||
cpu->cfg.ext_zfinx = true;
|
||||
}
|
||||
|
||||
@ -762,7 +801,7 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
}
|
||||
if (cpu->cfg.ext_f) {
|
||||
error_setg(errp,
|
||||
"Zfinx cannot be supported together with F extension");
|
||||
"Zfinx cannot be supported together with F extension");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -825,40 +864,40 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
|
||||
ext |= RVV;
|
||||
if (!is_power_of_2(cpu->cfg.vlen)) {
|
||||
error_setg(errp,
|
||||
"Vector extension VLEN must be power of 2");
|
||||
"Vector extension VLEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports VLEN "
|
||||
"in the range [128, %d]", RV_VLEN_MAX);
|
||||
"Vector extension implementation only supports VLEN "
|
||||
"in the range [128, %d]", RV_VLEN_MAX);
|
||||
return;
|
||||
}
|
||||
if (!is_power_of_2(cpu->cfg.elen)) {
|
||||
error_setg(errp,
|
||||
"Vector extension ELEN must be power of 2");
|
||||
"Vector extension ELEN must be power of 2");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
|
||||
error_setg(errp,
|
||||
"Vector extension implementation only supports ELEN "
|
||||
"in the range [8, 64]");
|
||||
return;
|
||||
}
|
||||
if (cpu->cfg.vext_spec) {
|
||||
if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
|
||||
vext_version = VEXT_VERSION_1_00_0;
|
||||
} else {
|
||||
if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
|
||||
error_setg(errp,
|
||||
"Unsupported vector spec version '%s'",
|
||||
cpu->cfg.vext_spec);
|
||||
"Vector extension implementation only supports ELEN "
|
||||
"in the range [8, 64]");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
qemu_log("vector version is not specified, "
|
||||
"use the default value v1.0\n");
|
||||
}
|
||||
set_vext_version(env, vext_version);
|
||||
if (cpu->cfg.vext_spec) {
|
||||
if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
|
||||
vext_version = VEXT_VERSION_1_00_0;
|
||||
} else {
|
||||
error_setg(errp,
|
||||
"Unsupported vector spec version '%s'",
|
||||
cpu->cfg.vext_spec);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
qemu_log("vector version is not specified, "
|
||||
"use the default value v1.0\n");
|
||||
}
|
||||
set_vext_version(env, vext_version);
|
||||
}
|
||||
if (cpu->cfg.ext_j) {
|
||||
ext |= RVJ;
|
||||
@ -1079,6 +1118,7 @@ static Property riscv_cpu_extensions[] = {
|
||||
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
|
||||
DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
|
||||
DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
|
||||
DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
|
||||
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
|
||||
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
|
||||
DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
|
||||
@ -1138,6 +1178,9 @@ static Property riscv_cpu_extensions[] = {
|
||||
DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
|
||||
DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
|
||||
|
||||
DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
|
||||
DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
|
||||
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
|
@ -449,7 +449,10 @@ struct RISCVCPUConfig {
|
||||
bool ext_zhinxmin;
|
||||
bool ext_zve32f;
|
||||
bool ext_zve64f;
|
||||
bool ext_zve64d;
|
||||
bool ext_zmmul;
|
||||
bool ext_zvfh;
|
||||
bool ext_zvfhmin;
|
||||
bool ext_smaia;
|
||||
bool ext_ssaia;
|
||||
bool ext_sscofpmf;
|
||||
|
@ -51,7 +51,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
|
||||
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
|
||||
*cs_base = 0;
|
||||
|
||||
if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
|
||||
if (cpu->cfg.ext_zve32f) {
|
||||
/*
|
||||
* If env->vl equals to VLMAX, we can use generic vector operation
|
||||
* expanders (GVEC) to accerlate the vector operations.
|
||||
|
@ -93,8 +93,7 @@ static RISCVException vs(CPURISCVState *env, int csrno)
|
||||
CPUState *cs = env_cpu(env);
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
|
||||
if (env->misa_ext & RVV ||
|
||||
cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
|
||||
if (cpu->cfg.ext_zve32f) {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
|
||||
return RISCV_EXCP_ILLEGAL_INST;
|
||||
|
@ -40,10 +40,11 @@ static bool require_rvf(DisasContext *s)
|
||||
|
||||
switch (s->sew) {
|
||||
case MO_16:
|
||||
return s->cfg_ptr->ext_zvfh;
|
||||
case MO_32:
|
||||
return has_ext(s, RVF);
|
||||
return s->cfg_ptr->ext_zve32f;
|
||||
case MO_64:
|
||||
return has_ext(s, RVD);
|
||||
return s->cfg_ptr->ext_zve64d;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -57,57 +58,32 @@ static bool require_scale_rvf(DisasContext *s)
|
||||
|
||||
switch (s->sew) {
|
||||
case MO_8:
|
||||
return s->cfg_ptr->ext_zvfh;
|
||||
case MO_16:
|
||||
return has_ext(s, RVF);
|
||||
return s->cfg_ptr->ext_zve32f;
|
||||
case MO_32:
|
||||
return has_ext(s, RVD);
|
||||
return s->cfg_ptr->ext_zve64d;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool require_zve32f(DisasContext *s)
|
||||
static bool require_scale_rvfmin(DisasContext *s)
|
||||
{
|
||||
/* RVV + Zve32f = RVV. */
|
||||
if (has_ext(s, RVV)) {
|
||||
return true;
|
||||
if (s->mstatus_fs == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Zve32f doesn't support FP64. (Section 18.2) */
|
||||
return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true;
|
||||
}
|
||||
|
||||
static bool require_scale_zve32f(DisasContext *s)
|
||||
{
|
||||
/* RVV + Zve32f = RVV. */
|
||||
if (has_ext(s, RVV)) {
|
||||
return true;
|
||||
switch (s->sew) {
|
||||
case MO_8:
|
||||
return s->cfg_ptr->ext_zvfhmin;
|
||||
case MO_16:
|
||||
return s->cfg_ptr->ext_zve32f;
|
||||
case MO_32:
|
||||
return s->cfg_ptr->ext_zve64d;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Zve32f doesn't support FP64. (Section 18.2) */
|
||||
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
|
||||
}
|
||||
|
||||
static bool require_zve64f(DisasContext *s)
|
||||
{
|
||||
/* RVV + Zve64f = RVV. */
|
||||
if (has_ext(s, RVV)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Zve64f doesn't support FP64. (Section 18.2) */
|
||||
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true;
|
||||
}
|
||||
|
||||
static bool require_scale_zve64f(DisasContext *s)
|
||||
{
|
||||
/* RVV + Zve64f = RVV. */
|
||||
if (has_ext(s, RVV)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Zve64f doesn't support FP64. (Section 18.2) */
|
||||
return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true;
|
||||
}
|
||||
|
||||
/* Destination vector register group cannot overlap source mask register. */
|
||||
@ -173,9 +149,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
|
||||
{
|
||||
TCGv s1, dst;
|
||||
|
||||
if (!require_rvv(s) ||
|
||||
!(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
|
||||
s->cfg_ptr->ext_zve64f)) {
|
||||
if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -210,9 +184,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
|
||||
{
|
||||
TCGv dst;
|
||||
|
||||
if (!require_rvv(s) ||
|
||||
!(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f ||
|
||||
s->cfg_ptr->ext_zve64f)) {
|
||||
if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -315,13 +287,12 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
|
||||
require_nf(vd, nf, s->lmul);
|
||||
|
||||
/*
|
||||
* All Zve* extensions support all vector load and store instructions,
|
||||
* except Zve64* extensions do not support EEW=64 for index values
|
||||
* when XLEN=32. (Section 18.2)
|
||||
* V extension supports all vector load and store instructions,
|
||||
* except V extension does not support EEW=64 for index values
|
||||
* when XLEN=32. (Section 18.3)
|
||||
*/
|
||||
if (get_xl(s) == MXL_RV32) {
|
||||
ret &= (!has_ext(s, RVV) &&
|
||||
s->cfg_ptr->ext_zve64f ? eew != MO_64 : true);
|
||||
ret &= (eew != MO_64);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2027,8 +1998,7 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
|
||||
* are not included for EEW=64 in Zve64*. (Section 18.2)
|
||||
*/
|
||||
return opivv_check(s, a) &&
|
||||
(!has_ext(s, RVV) &&
|
||||
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
|
||||
(!has_ext(s, RVV) ? s->sew != MO_64 : true);
|
||||
}
|
||||
|
||||
static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
|
||||
@ -2041,8 +2011,7 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
|
||||
* are not included for EEW=64 in Zve64*. (Section 18.2)
|
||||
*/
|
||||
return opivx_check(s, a) &&
|
||||
(!has_ext(s, RVV) &&
|
||||
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
|
||||
(!has_ext(s, RVV) ? s->sew != MO_64 : true);
|
||||
}
|
||||
|
||||
GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
|
||||
@ -2259,8 +2228,7 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
|
||||
* for EEW=64 in Zve64*. (Section 18.2)
|
||||
*/
|
||||
return opivv_check(s, a) &&
|
||||
(!has_ext(s, RVV) &&
|
||||
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
|
||||
(!has_ext(s, RVV) ? s->sew != MO_64 : true);
|
||||
}
|
||||
|
||||
static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
|
||||
@ -2271,8 +2239,7 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
|
||||
* for EEW=64 in Zve64*. (Section 18.2)
|
||||
*/
|
||||
return opivx_check(s, a) &&
|
||||
(!has_ext(s, RVV) &&
|
||||
s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true);
|
||||
(!has_ext(s, RVV) ? s->sew != MO_64 : true);
|
||||
}
|
||||
|
||||
GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
|
||||
@ -2335,9 +2302,7 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a)
|
||||
return require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* OPFVV without GVEC IR */
|
||||
@ -2425,9 +2390,7 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a)
|
||||
return require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* OPFVF without GVEC IR */
|
||||
@ -2465,9 +2428,7 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* OPFVV with WIDEN */
|
||||
@ -2510,9 +2471,7 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_ds(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* OPFVF with WIDEN */
|
||||
@ -2544,9 +2503,7 @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* WIDEN OPFVV with WIDEN */
|
||||
@ -2589,9 +2546,7 @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_dd(s, a->rd, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_dd(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
/* WIDEN OPFVF with WIDEN */
|
||||
@ -2668,9 +2623,7 @@ static bool opfv_check(DisasContext *s, arg_rmr *a)
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
/* OPFV instructions ignore vs1 check */
|
||||
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
vext_check_ss(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
static bool do_opfv(DisasContext *s, arg_rmr *a,
|
||||
@ -2735,9 +2688,7 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
|
||||
return require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
vext_check_mss(s, a->rd, a->rs1, a->rs2);
|
||||
}
|
||||
|
||||
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
|
||||
@ -2750,9 +2701,7 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
|
||||
return require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
vext_check_ms(s, a->rd, a->rs2) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
vext_check_ms(s, a->rd, a->rs2);
|
||||
}
|
||||
|
||||
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
|
||||
@ -2773,9 +2722,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
|
||||
if (require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
require_align(a->rd, s->lmul) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s)) {
|
||||
require_align(a->rd, s->lmul)) {
|
||||
gen_set_rm(s, RISCV_FRM_DYN);
|
||||
|
||||
TCGv_i64 t1;
|
||||
@ -2860,18 +2807,14 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
|
||||
static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
|
||||
{
|
||||
return opfv_widen_check(s, a) &&
|
||||
require_rvf(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
require_rvf(s);
|
||||
}
|
||||
|
||||
static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
|
||||
{
|
||||
return opfv_widen_check(s, a) &&
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
require_scale_rvfmin(s) &&
|
||||
(s->sew != MO_8);
|
||||
}
|
||||
|
||||
#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
|
||||
@ -2922,9 +2865,7 @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
|
||||
require_scale_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
/* OPFV widening instructions ignore vs1 check */
|
||||
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_ds(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
#define GEN_OPFXV_WIDEN_TRANS(NAME) \
|
||||
@ -2979,18 +2920,21 @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
|
||||
{
|
||||
return opfv_narrow_check(s, a) &&
|
||||
require_rvf(s) &&
|
||||
(s->sew != MO_64) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
(s->sew != MO_64);
|
||||
}
|
||||
|
||||
static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
|
||||
{
|
||||
return opfv_narrow_check(s, a) &&
|
||||
require_scale_rvfmin(s) &&
|
||||
(s->sew != MO_8);
|
||||
}
|
||||
|
||||
static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
|
||||
{
|
||||
return opfv_narrow_check(s, a) &&
|
||||
require_scale_rvf(s) &&
|
||||
(s->sew != MO_8) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
(s->sew != MO_8);
|
||||
}
|
||||
|
||||
#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
|
||||
@ -3030,7 +2974,7 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
|
||||
GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
|
||||
RISCV_FRM_DYN)
|
||||
/* Reuse the helper function from vfncvt.f.f.w */
|
||||
GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
|
||||
GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
|
||||
RISCV_FRM_ROD)
|
||||
|
||||
static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
|
||||
@ -3039,9 +2983,7 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
|
||||
require_scale_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
/* OPFV narrowing instructions ignore vs1 check */
|
||||
vext_check_sd(s, a->rd, a->rs2, a->vm) &&
|
||||
require_scale_zve32f(s) &&
|
||||
require_scale_zve64f(s);
|
||||
vext_check_sd(s, a->rd, a->rs2, a->vm);
|
||||
}
|
||||
|
||||
#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
|
||||
@ -3115,9 +3057,7 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
|
||||
static bool freduction_check(DisasContext *s, arg_rmrr *a)
|
||||
{
|
||||
return reduction_check(s, a) &&
|
||||
require_rvf(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
require_rvf(s);
|
||||
}
|
||||
|
||||
GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
|
||||
@ -3544,9 +3484,7 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
|
||||
{
|
||||
if (require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s)) {
|
||||
vext_check_isa_ill(s)) {
|
||||
gen_set_rm(s, RISCV_FRM_DYN);
|
||||
|
||||
unsigned int ofs = (8 << s->sew);
|
||||
@ -3572,9 +3510,7 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
|
||||
{
|
||||
if (require_rvv(s) &&
|
||||
require_rvf(s) &&
|
||||
vext_check_isa_ill(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s)) {
|
||||
vext_check_isa_ill(s)) {
|
||||
gen_set_rm(s, RISCV_FRM_DYN);
|
||||
|
||||
/* The instructions ignore LMUL and vector register group. */
|
||||
@ -3625,17 +3561,13 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
|
||||
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
|
||||
{
|
||||
return slideup_check(s, a) &&
|
||||
require_rvf(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
require_rvf(s);
|
||||
}
|
||||
|
||||
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
|
||||
{
|
||||
return slidedown_check(s, a) &&
|
||||
require_rvf(s) &&
|
||||
require_zve32f(s) &&
|
||||
require_zve64f(s);
|
||||
require_rvf(s);
|
||||
}
|
||||
|
||||
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
|
||||
|
@ -28,15 +28,14 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
|
||||
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
|
||||
#define REQUIRE_ZFHMIN(ctx) do { \
|
||||
if (!ctx->cfg_ptr->ext_zfhmin) { \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \
|
||||
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \
|
||||
ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \
|
||||
#define REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx) do { \
|
||||
if (!(ctx->cfg_ptr->ext_zfhmin || ctx->cfg_ptr->ext_zhinxmin)) { \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
@ -47,7 +46,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a)
|
||||
TCGv t0;
|
||||
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN(ctx);
|
||||
REQUIRE_ZFHMIN(ctx);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
@ -70,7 +69,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
|
||||
TCGv t0;
|
||||
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN(ctx);
|
||||
REQUIRE_ZFHMIN(ctx);
|
||||
|
||||
decode_save_opc(ctx);
|
||||
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
|
||||
@ -401,7 +400,7 @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
|
||||
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
|
||||
|
||||
TCGv_i64 dest = dest_fpr(ctx, a->rd);
|
||||
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
|
||||
@ -418,7 +417,7 @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
|
||||
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZDINX_OR_D(ctx);
|
||||
|
||||
TCGv_i64 dest = dest_fpr(ctx, a->rd);
|
||||
@ -436,7 +435,7 @@ static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
|
||||
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
|
||||
|
||||
TCGv_i64 dest = dest_fpr(ctx, a->rd);
|
||||
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
|
||||
@ -452,7 +451,7 @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
|
||||
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx);
|
||||
REQUIRE_ZDINX_OR_D(ctx);
|
||||
|
||||
TCGv_i64 dest = dest_fpr(ctx, a->rd);
|
||||
@ -585,7 +584,7 @@ static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
|
||||
static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN(ctx);
|
||||
REQUIRE_ZFHMIN(ctx);
|
||||
|
||||
TCGv dest = dest_gpr(ctx, a->rd);
|
||||
|
||||
@ -605,7 +604,7 @@ static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
|
||||
static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
|
||||
{
|
||||
REQUIRE_FPU;
|
||||
REQUIRE_ZFH_OR_ZFHMIN(ctx);
|
||||
REQUIRE_ZFHMIN(ctx);
|
||||
|
||||
TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user