tcg: Split out swap_commutative as a subroutine

Reduces code duplication and prefers

  movcond d, c1, c2, const, s
to
  movcond d, c1, c2, s, const

It also prefers

  add r, r, c
over
  add r, c, r

when both inputs are known constants.  This doesn't matter for true add, as
we will fully constant fold that.  But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.

Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
This commit is contained in:
Richard Henderson 2012-10-02 11:32:21 -07:00 committed by Aurelien Jarno
parent 6f4d6b0908
commit 24c9ae4eba

View File

@ -388,6 +388,23 @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
tcg_abort(); tcg_abort();
} }
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
{
TCGArg a1 = *p1, a2 = *p2;
int sum = 0;
sum += temps[a1].state == TCG_TEMP_CONST;
sum -= temps[a2].state == TCG_TEMP_CONST;
/* Prefer the constant in second argument, and then the form
op a, a, b, which is better handled on non-RISC hosts. */
if (sum > 0 || (sum == 0 && dest == a2)) {
*p1 = a2;
*p2 = a1;
return true;
}
return false;
}
/* Propagate constants and copies, fold constant expressions. */ /* Propagate constants and copies, fold constant expressions. */
static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
TCGArg *args, TCGOpDef *tcg_op_defs) TCGArg *args, TCGOpDef *tcg_op_defs)
@ -397,7 +414,6 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
const TCGOpDef *def; const TCGOpDef *def;
TCGArg *gen_args; TCGArg *gen_args;
TCGArg tmp; TCGArg tmp;
TCGCond cond;
/* Array VALS has an element for each temp. /* Array VALS has an element for each temp.
If this temp holds a constant then its value is kept in VALS' element. If this temp holds a constant then its value is kept in VALS' element.
@ -440,52 +456,28 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
CASE_OP_32_64(eqv): CASE_OP_32_64(eqv):
CASE_OP_32_64(nand): CASE_OP_32_64(nand):
CASE_OP_32_64(nor): CASE_OP_32_64(nor):
/* Prefer the constant in second argument, and then the form swap_commutative(args[0], &args[1], &args[2]);
op a, a, b, which is better handled on non-RISC hosts. */
if (temps[args[1]].state == TCG_TEMP_CONST || (args[0] == args[2]
&& temps[args[2]].state != TCG_TEMP_CONST)) {
tmp = args[1];
args[1] = args[2];
args[2] = tmp;
}
break; break;
CASE_OP_32_64(brcond): CASE_OP_32_64(brcond):
if (temps[args[0]].state == TCG_TEMP_CONST if (swap_commutative(-1, &args[0], &args[1])) {
&& temps[args[1]].state != TCG_TEMP_CONST) {
tmp = args[0];
args[0] = args[1];
args[1] = tmp;
args[2] = tcg_swap_cond(args[2]); args[2] = tcg_swap_cond(args[2]);
} }
break; break;
CASE_OP_32_64(setcond): CASE_OP_32_64(setcond):
if (temps[args[1]].state == TCG_TEMP_CONST if (swap_commutative(args[0], &args[1], &args[2])) {
&& temps[args[2]].state != TCG_TEMP_CONST) {
tmp = args[1];
args[1] = args[2];
args[2] = tmp;
args[3] = tcg_swap_cond(args[3]); args[3] = tcg_swap_cond(args[3]);
} }
break; break;
CASE_OP_32_64(movcond): CASE_OP_32_64(movcond):
cond = args[5]; if (swap_commutative(-1, &args[1], &args[2])) {
if (temps[args[1]].state == TCG_TEMP_CONST args[5] = tcg_swap_cond(args[5]);
&& temps[args[2]].state != TCG_TEMP_CONST) {
tmp = args[1];
args[1] = args[2];
args[2] = tmp;
cond = tcg_swap_cond(cond);
} }
/* For movcond, we canonicalize the "false" input reg to match /* For movcond, we canonicalize the "false" input reg to match
the destination reg so that the tcg backend can implement the destination reg so that the tcg backend can implement
a "move if true" operation. */ a "move if true" operation. */
if (args[0] == args[3]) { if (swap_commutative(args[0], &args[4], &args[3])) {
tmp = args[3]; args[5] = tcg_invert_cond(args[5]);
args[3] = args[4];
args[4] = tmp;
cond = tcg_invert_cond(cond);
} }
args[5] = cond;
default: default:
break; break;
} }