target/riscv: rvv: Provide a fast path using direct access to host ram for unit-stride load-only-first load instructions

The unmasked unit-stride fault-only-first load instructions are similar
to the unmasked unit-stride load/store instructions that is suitable to
be optimized by using a direct access to host ram fast path.

Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Message-ID: <20240918171412.150107-6-max.chou@sifive.com>
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
Max Chou 2024-09-19 01:14:10 +08:00 committed by Alistair Francis
parent 3333000f69
commit f00089267d

View File

@ -558,18 +558,18 @@ GEN_VEXT_ST_INDEX(vsxei64_64_v, int64_t, idx_d, ste_d_tlb)
* unit-stride fault-only-fisrt load instructions * unit-stride fault-only-fisrt load instructions
*/ */
static inline void static inline void
vext_ldff(void *vd, void *v0, target_ulong base, vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
CPURISCVState *env, uint32_t desc, uint32_t desc, vext_ldst_elem_fn_tlb *ldst_tlb,
vext_ldst_elem_fn_tlb *ldst_elem, vext_ldst_elem_fn_host *ldst_host, uint32_t log2_esz, uintptr_t ra)
uint32_t log2_esz, uintptr_t ra)
{ {
uint32_t i, k, vl = 0; uint32_t i, k, vl = 0;
uint32_t nf = vext_nf(desc); uint32_t nf = vext_nf(desc);
uint32_t vm = vext_vm(desc); uint32_t vm = vext_vm(desc);
uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t max_elems = vext_max_elems(desc, log2_esz);
uint32_t esz = 1 << log2_esz; uint32_t esz = 1 << log2_esz;
uint32_t msize = nf * esz;
uint32_t vma = vext_vma(desc); uint32_t vma = vext_vma(desc);
target_ulong addr, offset, remain; target_ulong addr, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false); int mmu_index = riscv_env_mmu_index(env, false);
VSTART_CHECK_EARLY_EXIT(env); VSTART_CHECK_EARLY_EXIT(env);
@ -618,19 +618,63 @@ ProbeSuccess:
if (vl != 0) { if (vl != 0) {
env->vl = vl; env->vl = vl;
} }
for (i = env->vstart; i < env->vl; i++) {
k = 0; if (env->vstart < env->vl) {
while (k < nf) { if (vm) {
if (!vm && !vext_elem_mask(v0, i)) { /* Calculate the page range of first page */
/* set masked-off elements to 1s */ addr = base + ((env->vstart * nf) << log2_esz);
vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, page_split = -(addr | TARGET_PAGE_MASK);
(i + k * max_elems + 1) * esz); /* Get number of elements */
k++; elems = page_split / msize;
continue; if (unlikely(env->vstart + elems >= env->vl)) {
elems = env->vl - env->vstart;
}
/* Load/store elements in the first page */
if (likely(elems)) {
vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
log2_esz, true, mmu_index, ldst_tlb,
ldst_host, ra);
}
/* Load/store elements in the second page */
if (unlikely(env->vstart < env->vl)) {
/* Cross page element */
if (unlikely(page_split % msize)) {
for (k = 0; k < nf; k++) {
addr = base + ((env->vstart * nf + k) << log2_esz);
ldst_tlb(env, adjust_addr(env, addr),
env->vstart + k * max_elems, vd, ra);
}
env->vstart++;
}
addr = base + ((env->vstart * nf) << log2_esz);
/* Get number of elements of second page */
elems = env->vl - env->vstart;
/* Load/store elements in the second page */
vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
log2_esz, true, mmu_index, ldst_tlb,
ldst_host, ra);
}
} else {
for (i = env->vstart; i < env->vl; i++) {
k = 0;
while (k < nf) {
if (!vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
(i + k * max_elems + 1) * esz);
k++;
continue;
}
addr = base + ((i * nf + k) << log2_esz);
ldst_tlb(env, adjust_addr(env, addr), i + k * max_elems,
vd, ra);
k++;
}
} }
addr = base + ((i * nf + k) << log2_esz);
ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
k++;
} }
} }
env->vstart = 0; env->vstart = 0;
@ -638,18 +682,18 @@ ProbeSuccess:
vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems); vext_set_tail_elems_1s(env->vl, vd, desc, nf, esz, max_elems);
} }
#define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN_TLB, LOAD_FN_HOST) \
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
CPURISCVState *env, uint32_t desc) \ CPURISCVState *env, uint32_t desc) \
{ \ { \
vext_ldff(vd, v0, base, env, desc, LOAD_FN, \ vext_ldff(vd, v0, base, env, desc, LOAD_FN_TLB, \
ctzl(sizeof(ETYPE)), GETPC()); \ LOAD_FN_HOST, ctzl(sizeof(ETYPE)), GETPC()); \
} }
GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb) GEN_VEXT_LDFF(vle8ff_v, int8_t, lde_b_tlb, lde_b_host)
GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb) GEN_VEXT_LDFF(vle16ff_v, int16_t, lde_h_tlb, lde_h_host)
GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb) GEN_VEXT_LDFF(vle32ff_v, int32_t, lde_w_tlb, lde_w_host)
GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb) GEN_VEXT_LDFF(vle64ff_v, int64_t, lde_d_tlb, lde_d_host)
#define DO_SWAP(N, M) (M) #define DO_SWAP(N, M) (M)
#define DO_AND(N, M) (N & M) #define DO_AND(N, M) (N & M)