target/arm: Enforce alignment for VLDn/VSTn (multiple)
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210419202257.161730-24-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
a8502b37f6
commit
7c68c196cf
@ -429,7 +429,7 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||||||
{
|
{
|
||||||
/* Neon load/store multiple structures */
|
/* Neon load/store multiple structures */
|
||||||
int nregs, interleave, spacing, reg, n;
|
int nregs, interleave, spacing, reg, n;
|
||||||
MemOp endian = s->be_data;
|
MemOp mop, align, endian;
|
||||||
int mmu_idx = get_mem_index(s);
|
int mmu_idx = get_mem_index(s);
|
||||||
int size = a->size;
|
int size = a->size;
|
||||||
TCGv_i64 tmp64;
|
TCGv_i64 tmp64;
|
||||||
@ -473,20 +473,36 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* For our purposes, bytes are always little-endian. */
|
/* For our purposes, bytes are always little-endian. */
|
||||||
|
endian = s->be_data;
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
endian = MO_LE;
|
endian = MO_LE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Enforce alignment requested by the instruction */
|
||||||
|
if (a->align) {
|
||||||
|
align = pow2_align(a->align + 2); /* 4 ** a->align */
|
||||||
|
} else {
|
||||||
|
align = s->align_mem ? MO_ALIGN : 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Consecutive little-endian elements from a single register
|
* Consecutive little-endian elements from a single register
|
||||||
* can be promoted to a larger little-endian operation.
|
* can be promoted to a larger little-endian operation.
|
||||||
*/
|
*/
|
||||||
if (interleave == 1 && endian == MO_LE) {
|
if (interleave == 1 && endian == MO_LE) {
|
||||||
|
/* Retain any natural alignment. */
|
||||||
|
if (align == MO_ALIGN) {
|
||||||
|
align = pow2_align(size);
|
||||||
|
}
|
||||||
size = 3;
|
size = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp64 = tcg_temp_new_i64();
|
tmp64 = tcg_temp_new_i64();
|
||||||
addr = tcg_temp_new_i32();
|
addr = tcg_temp_new_i32();
|
||||||
tmp = tcg_const_i32(1 << size);
|
tmp = tcg_const_i32(1 << size);
|
||||||
load_reg_var(s, addr, a->rn);
|
load_reg_var(s, addr, a->rn);
|
||||||
|
|
||||||
|
mop = endian | size | align;
|
||||||
for (reg = 0; reg < nregs; reg++) {
|
for (reg = 0; reg < nregs; reg++) {
|
||||||
for (n = 0; n < 8 >> size; n++) {
|
for (n = 0; n < 8 >> size; n++) {
|
||||||
int xs;
|
int xs;
|
||||||
@ -494,15 +510,16 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
|
|||||||
int tt = a->vd + reg + spacing * xs;
|
int tt = a->vd + reg + spacing * xs;
|
||||||
|
|
||||||
if (a->l) {
|
if (a->l) {
|
||||||
gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx,
|
gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
|
||||||
endian | size);
|
|
||||||
neon_store_element64(tt, n, size, tmp64);
|
neon_store_element64(tt, n, size, tmp64);
|
||||||
} else {
|
} else {
|
||||||
neon_load_element64(tmp64, tt, n, size);
|
neon_load_element64(tmp64, tt, n, size);
|
||||||
gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx,
|
gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
|
||||||
endian | size);
|
|
||||||
}
|
}
|
||||||
tcg_gen_add_i32(addr, addr, tmp);
|
tcg_gen_add_i32(addr, addr, tmp);
|
||||||
|
|
||||||
|
/* Subsequent memory operations inherit alignment */
|
||||||
|
mop &= ~MO_AMASK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user