tcg/optimize: Do not attempt to constant fold neg_vec

Split out the tail of fold_neg to fold_neg_no_const so that we
can avoid attempting to constant fold vector negate.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2150
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
(cherry picked from commit e25fe886b8)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
(Mjt: context fixup in tests/tcg/aarch64/Makefile.target)
This commit is contained in:
Richard Henderson 2024-04-04 20:53:50 +00:00 committed by Michael Tokarev
parent fd01f5a847
commit b198998b7f
3 changed files with 21 additions and 10 deletions

View File

@ -1634,16 +1634,10 @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
return false;
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
{
uint64_t z_mask;
if (fold_const1(ctx, op)) {
return true;
}
/* Set to 1 all bits to the left of the rightmost. */
z_mask = arg_info(op->args[1])->z_mask;
uint64_t z_mask = arg_info(op->args[1])->z_mask;
ctx->z_mask = -(z_mask & -z_mask);
/*
@ -1654,6 +1648,11 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
return true;
}
static bool fold_neg(OptContext *ctx, TCGOp *op)
{
return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
}
static bool fold_nor(OptContext *ctx, TCGOp *op)
{
if (fold_const2_commutative(ctx, op) ||
@ -1949,7 +1948,7 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
if (have_neg) {
op->opc = neg_op;
op->args[1] = op->args[2];
return fold_neg(ctx, op);
return fold_neg_no_const(ctx, op);
}
return false;
}

View File

@ -10,7 +10,7 @@ VPATH += $(AARCH64_SRC)
# Base architecture tests
AARCH64_TESTS=fcvt pcalign-a64
AARCH64_TESTS += test-2248
AARCH64_TESTS += test-2248 test-2150
fcvt: LDFLAGS+=-lm

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* See https://gitlab.com/qemu-project/qemu/-/issues/2150 */
int main()
{
asm volatile(
"movi v6.4s, #1\n"
"movi v7.4s, #0\n"
"sub v6.2d, v7.2d, v6.2d\n"
: : : "v6", "v7");
return 0;
}