2011-07-07 16:37:12 +04:00
|
|
|
/*
|
|
|
|
* Optimizations for Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2010 Samsung Electronics.
|
|
|
|
* Contributed by Kirill Batuzov <batuzovk@ispras.ru>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "qemu-common.h"
|
|
|
|
#include "tcg-op.h"
|
|
|
|
|
|
|
|
#define CASE_OP_32_64(x) \
|
|
|
|
glue(glue(case INDEX_op_, x), _i32): \
|
|
|
|
glue(glue(case INDEX_op_, x), _i64)
|
|
|
|
|
2011-07-07 16:37:13 +04:00
|
|
|
typedef enum {
|
|
|
|
TCG_TEMP_UNDEF = 0,
|
|
|
|
TCG_TEMP_CONST,
|
|
|
|
TCG_TEMP_COPY,
|
|
|
|
} tcg_temp_state;
|
|
|
|
|
|
|
|
struct tcg_temp_info {
|
|
|
|
tcg_temp_state state;
|
|
|
|
uint16_t prev_copy;
|
|
|
|
uint16_t next_copy;
|
|
|
|
tcg_target_ulong val;
|
2013-01-12 03:42:52 +04:00
|
|
|
tcg_target_ulong mask;
|
2011-07-07 16:37:13 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct tcg_temp_info temps[TCG_MAX_TEMPS];
|
|
|
|
|
2012-09-11 14:31:21 +04:00
|
|
|
/* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
|
|
|
|
the copy flag from the left temp. */
|
|
|
|
static void reset_temp(TCGArg temp)
|
2011-07-07 16:37:13 +04:00
|
|
|
{
|
2012-09-11 14:31:21 +04:00
|
|
|
if (temps[temp].state == TCG_TEMP_COPY) {
|
|
|
|
if (temps[temp].prev_copy == temps[temp].next_copy) {
|
|
|
|
temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF;
|
|
|
|
} else {
|
|
|
|
temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
|
|
|
|
temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
}
|
2012-09-11 01:51:42 +04:00
|
|
|
temps[temp].state = TCG_TEMP_UNDEF;
|
2013-01-12 03:42:52 +04:00
|
|
|
temps[temp].mask = -1;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
|
2013-01-12 03:42:51 +04:00
|
|
|
/* Reset all temporaries, given that there are NB_TEMPS of them. */
|
|
|
|
static void reset_all_temps(int nb_temps)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < nb_temps; i++) {
|
|
|
|
temps[i].state = TCG_TEMP_UNDEF;
|
2013-01-12 03:42:52 +04:00
|
|
|
temps[i].mask = -1;
|
2013-01-12 03:42:51 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-30 23:18:32 +04:00
|
|
|
static int op_bits(TCGOpcode op)
|
2011-07-07 16:37:13 +04:00
|
|
|
{
|
2011-08-18 01:11:45 +04:00
|
|
|
const TCGOpDef *def = &tcg_op_defs[op];
|
|
|
|
return def->flags & TCG_OPF_64BIT ? 64 : 32;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
|
2011-07-30 23:18:32 +04:00
|
|
|
static TCGOpcode op_to_movi(TCGOpcode op)
|
2011-07-07 16:37:13 +04:00
|
|
|
{
|
|
|
|
switch (op_bits(op)) {
|
|
|
|
case 32:
|
|
|
|
return INDEX_op_movi_i32;
|
|
|
|
case 64:
|
|
|
|
return INDEX_op_movi_i64;
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "op_to_movi: unexpected return value of "
|
|
|
|
"function op_bits.\n");
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-11 14:31:21 +04:00
|
|
|
static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
|
|
|
|
{
|
|
|
|
TCGArg i;
|
|
|
|
|
|
|
|
/* If this is already a global, we can't do better. */
|
|
|
|
if (temp < s->nb_globals) {
|
|
|
|
return temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search for a global first. */
|
|
|
|
for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
|
|
|
|
if (i < s->nb_globals) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If it is a temp, search for a temp local. */
|
|
|
|
if (!s->temps[temp].temp_local) {
|
|
|
|
for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
|
|
|
|
if (s->temps[i].temp_local) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Failure to find a better representation, return the same temp. */
|
|
|
|
return temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool temps_are_copies(TCGArg arg1, TCGArg arg2)
|
|
|
|
{
|
|
|
|
TCGArg i;
|
|
|
|
|
|
|
|
if (arg1 == arg2) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (temps[arg1].state != TCG_TEMP_COPY
|
|
|
|
|| temps[arg2].state != TCG_TEMP_COPY) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) {
|
|
|
|
if (i == arg2) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-09-11 14:26:23 +04:00
|
|
|
static void tcg_opt_gen_mov(TCGContext *s, TCGArg *gen_args,
|
|
|
|
TCGArg dst, TCGArg src)
|
2011-07-07 16:37:13 +04:00
|
|
|
{
|
2013-01-12 03:42:52 +04:00
|
|
|
reset_temp(dst);
|
|
|
|
temps[dst].mask = temps[src].mask;
|
|
|
|
assert(temps[src].state != TCG_TEMP_CONST);
|
|
|
|
|
|
|
|
if (s->temps[src].type == s->temps[dst].type) {
|
|
|
|
if (temps[src].state != TCG_TEMP_COPY) {
|
|
|
|
temps[src].state = TCG_TEMP_COPY;
|
|
|
|
temps[src].next_copy = src;
|
|
|
|
temps[src].prev_copy = src;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
2013-01-12 03:42:52 +04:00
|
|
|
temps[dst].state = TCG_TEMP_COPY;
|
|
|
|
temps[dst].next_copy = temps[src].next_copy;
|
|
|
|
temps[dst].prev_copy = src;
|
|
|
|
temps[temps[dst].next_copy].prev_copy = dst;
|
|
|
|
temps[src].next_copy = dst;
|
|
|
|
}
|
2012-09-11 14:31:21 +04:00
|
|
|
|
2013-01-12 03:42:52 +04:00
|
|
|
gen_args[0] = dst;
|
|
|
|
gen_args[1] = src;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
|
2012-09-11 14:31:21 +04:00
|
|
|
static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val)
|
2011-07-07 16:37:13 +04:00
|
|
|
{
|
2013-01-12 03:42:52 +04:00
|
|
|
reset_temp(dst);
|
|
|
|
temps[dst].state = TCG_TEMP_CONST;
|
|
|
|
temps[dst].val = val;
|
|
|
|
temps[dst].mask = val;
|
|
|
|
gen_args[0] = dst;
|
|
|
|
gen_args[1] = val;
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
|
2011-07-30 23:18:32 +04:00
|
|
|
static TCGOpcode op_to_mov(TCGOpcode op)
|
2011-07-07 16:37:14 +04:00
|
|
|
{
|
|
|
|
switch (op_bits(op)) {
|
|
|
|
case 32:
|
|
|
|
return INDEX_op_mov_i32;
|
|
|
|
case 64:
|
|
|
|
return INDEX_op_mov_i64;
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "op_to_mov: unexpected return value of "
|
|
|
|
"function op_bits.\n");
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-30 23:18:32 +04:00
|
|
|
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
2011-07-07 16:37:14 +04:00
|
|
|
{
|
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(add):
|
|
|
|
return x + y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(sub):
|
|
|
|
return x - y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(mul):
|
|
|
|
return x * y;
|
|
|
|
|
2011-07-07 16:37:15 +04:00
|
|
|
CASE_OP_32_64(and):
|
|
|
|
return x & y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(or):
|
|
|
|
return x | y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(xor):
|
|
|
|
return x ^ y;
|
|
|
|
|
2011-07-07 16:37:16 +04:00
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
return (uint32_t)x << (uint32_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_shl_i64:
|
|
|
|
return (uint64_t)x << (uint64_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
return (uint32_t)x >> (uint32_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_shr_i64:
|
|
|
|
return (uint64_t)x >> (uint64_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
return (int32_t)x >> (int32_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_sar_i64:
|
|
|
|
return (int64_t)x >> (int64_t)y;
|
|
|
|
|
|
|
|
case INDEX_op_rotr_i32:
|
2011-08-18 01:11:46 +04:00
|
|
|
x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
|
2011-07-07 16:37:16 +04:00
|
|
|
return x;
|
|
|
|
|
|
|
|
case INDEX_op_rotr_i64:
|
2011-08-18 01:11:46 +04:00
|
|
|
x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
|
2011-07-07 16:37:16 +04:00
|
|
|
return x;
|
|
|
|
|
|
|
|
case INDEX_op_rotl_i32:
|
2011-08-18 01:11:46 +04:00
|
|
|
x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
|
2011-07-07 16:37:16 +04:00
|
|
|
return x;
|
|
|
|
|
|
|
|
case INDEX_op_rotl_i64:
|
2011-08-18 01:11:46 +04:00
|
|
|
x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
|
2011-07-07 16:37:16 +04:00
|
|
|
return x;
|
2011-08-18 01:11:46 +04:00
|
|
|
|
|
|
|
CASE_OP_32_64(not):
|
2011-07-07 16:37:17 +04:00
|
|
|
return ~x;
|
2011-08-18 01:11:46 +04:00
|
|
|
|
2011-08-18 01:11:47 +04:00
|
|
|
CASE_OP_32_64(neg):
|
|
|
|
return -x;
|
|
|
|
|
|
|
|
CASE_OP_32_64(andc):
|
|
|
|
return x & ~y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(orc):
|
|
|
|
return x | ~y;
|
|
|
|
|
|
|
|
CASE_OP_32_64(eqv):
|
|
|
|
return ~(x ^ y);
|
|
|
|
|
|
|
|
CASE_OP_32_64(nand):
|
|
|
|
return ~(x & y);
|
|
|
|
|
|
|
|
CASE_OP_32_64(nor):
|
|
|
|
return ~(x | y);
|
|
|
|
|
2011-08-18 01:11:46 +04:00
|
|
|
CASE_OP_32_64(ext8s):
|
2011-07-07 16:37:17 +04:00
|
|
|
return (int8_t)x;
|
2011-08-18 01:11:46 +04:00
|
|
|
|
|
|
|
CASE_OP_32_64(ext16s):
|
2011-07-07 16:37:17 +04:00
|
|
|
return (int16_t)x;
|
2011-08-18 01:11:46 +04:00
|
|
|
|
|
|
|
CASE_OP_32_64(ext8u):
|
2011-07-07 16:37:17 +04:00
|
|
|
return (uint8_t)x;
|
2011-08-18 01:11:46 +04:00
|
|
|
|
|
|
|
CASE_OP_32_64(ext16u):
|
2011-07-07 16:37:17 +04:00
|
|
|
return (uint16_t)x;
|
|
|
|
|
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
return (int32_t)x;
|
|
|
|
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
return (uint32_t)x;
|
|
|
|
|
2011-07-07 16:37:14 +04:00
|
|
|
default:
|
|
|
|
fprintf(stderr,
|
|
|
|
"Unrecognized operation %d in do_constant_folding.\n", op);
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-30 23:18:32 +04:00
|
|
|
static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
|
2011-07-07 16:37:14 +04:00
|
|
|
{
|
|
|
|
TCGArg res = do_constant_folding_2(op, x, y);
|
|
|
|
if (op_bits(op) == 32) {
|
|
|
|
res &= 0xffffffff;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2012-10-02 22:32:26 +04:00
|
|
|
static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
|
|
|
|
{
|
|
|
|
switch (c) {
|
|
|
|
case TCG_COND_EQ:
|
|
|
|
return x == y;
|
|
|
|
case TCG_COND_NE:
|
|
|
|
return x != y;
|
|
|
|
case TCG_COND_LT:
|
|
|
|
return (int32_t)x < (int32_t)y;
|
|
|
|
case TCG_COND_GE:
|
|
|
|
return (int32_t)x >= (int32_t)y;
|
|
|
|
case TCG_COND_LE:
|
|
|
|
return (int32_t)x <= (int32_t)y;
|
|
|
|
case TCG_COND_GT:
|
|
|
|
return (int32_t)x > (int32_t)y;
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
return x < y;
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
return x >= y;
|
|
|
|
case TCG_COND_LEU:
|
|
|
|
return x <= y;
|
|
|
|
case TCG_COND_GTU:
|
|
|
|
return x > y;
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
|
|
|
|
{
|
|
|
|
switch (c) {
|
|
|
|
case TCG_COND_EQ:
|
|
|
|
return x == y;
|
|
|
|
case TCG_COND_NE:
|
|
|
|
return x != y;
|
|
|
|
case TCG_COND_LT:
|
|
|
|
return (int64_t)x < (int64_t)y;
|
|
|
|
case TCG_COND_GE:
|
|
|
|
return (int64_t)x >= (int64_t)y;
|
|
|
|
case TCG_COND_LE:
|
|
|
|
return (int64_t)x <= (int64_t)y;
|
|
|
|
case TCG_COND_GT:
|
|
|
|
return (int64_t)x > (int64_t)y;
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
return x < y;
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
return x >= y;
|
|
|
|
case TCG_COND_LEU:
|
|
|
|
return x <= y;
|
|
|
|
case TCG_COND_GTU:
|
|
|
|
return x > y;
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool do_constant_folding_cond_eq(TCGCond c)
|
|
|
|
{
|
|
|
|
switch (c) {
|
|
|
|
case TCG_COND_GT:
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
case TCG_COND_LT:
|
|
|
|
case TCG_COND_GTU:
|
|
|
|
case TCG_COND_NE:
|
|
|
|
return 0;
|
|
|
|
case TCG_COND_GE:
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
case TCG_COND_LE:
|
|
|
|
case TCG_COND_LEU:
|
|
|
|
case TCG_COND_EQ:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-18 21:37:00 +04:00
|
|
|
/* Return 2 if the condition can't be simplified, and the result
|
|
|
|
of the condition (0 or 1) if it can */
|
2012-09-06 18:47:14 +04:00
|
|
|
static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
|
|
|
|
TCGArg y, TCGCond c)
|
|
|
|
{
|
2012-09-18 21:37:00 +04:00
|
|
|
if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) {
|
|
|
|
switch (op_bits(op)) {
|
|
|
|
case 32:
|
2012-10-02 22:32:26 +04:00
|
|
|
return do_constant_folding_cond_32(temps[x].val, temps[y].val, c);
|
2012-09-18 21:37:00 +04:00
|
|
|
case 64:
|
2012-10-02 22:32:26 +04:00
|
|
|
return do_constant_folding_cond_64(temps[x].val, temps[y].val, c);
|
2012-09-25 01:21:40 +04:00
|
|
|
default:
|
2012-10-02 22:32:26 +04:00
|
|
|
tcg_abort();
|
2012-09-18 21:37:00 +04:00
|
|
|
}
|
2012-10-02 22:32:26 +04:00
|
|
|
} else if (temps_are_copies(x, y)) {
|
|
|
|
return do_constant_folding_cond_eq(c);
|
2012-09-18 21:37:00 +04:00
|
|
|
} else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) {
|
|
|
|
switch (c) {
|
2012-09-06 18:47:14 +04:00
|
|
|
case TCG_COND_LTU:
|
2012-09-18 21:37:00 +04:00
|
|
|
return 0;
|
2012-09-06 18:47:14 +04:00
|
|
|
case TCG_COND_GEU:
|
2012-09-18 21:37:00 +04:00
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 2;
|
2012-09-06 18:47:14 +04:00
|
|
|
}
|
2012-09-18 21:37:00 +04:00
|
|
|
} else {
|
|
|
|
return 2;
|
2012-09-06 18:47:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-02 22:32:27 +04:00
|
|
|
/* Return 2 if the condition can't be simplified, and the result
|
|
|
|
of the condition (0 or 1) if it can */
|
|
|
|
static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
|
|
|
|
{
|
|
|
|
TCGArg al = p1[0], ah = p1[1];
|
|
|
|
TCGArg bl = p2[0], bh = p2[1];
|
|
|
|
|
|
|
|
if (temps[bl].state == TCG_TEMP_CONST
|
|
|
|
&& temps[bh].state == TCG_TEMP_CONST) {
|
|
|
|
uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val;
|
|
|
|
|
|
|
|
if (temps[al].state == TCG_TEMP_CONST
|
|
|
|
&& temps[ah].state == TCG_TEMP_CONST) {
|
|
|
|
uint64_t a;
|
|
|
|
a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val;
|
|
|
|
return do_constant_folding_cond_64(a, b, c);
|
|
|
|
}
|
|
|
|
if (b == 0) {
|
|
|
|
switch (c) {
|
|
|
|
case TCG_COND_LTU:
|
|
|
|
return 0;
|
|
|
|
case TCG_COND_GEU:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (temps_are_copies(al, bl) && temps_are_copies(ah, bh)) {
|
|
|
|
return do_constant_folding_cond_eq(c);
|
|
|
|
}
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
|
|
|
|
{
|
|
|
|
TCGArg a1 = *p1, a2 = *p2;
|
|
|
|
int sum = 0;
|
|
|
|
sum += temps[a1].state == TCG_TEMP_CONST;
|
|
|
|
sum -= temps[a2].state == TCG_TEMP_CONST;
|
|
|
|
|
|
|
|
/* Prefer the constant in second argument, and then the form
|
|
|
|
op a, a, b, which is better handled on non-RISC hosts. */
|
|
|
|
if (sum > 0 || (sum == 0 && dest == a2)) {
|
|
|
|
*p1 = a2;
|
|
|
|
*p2 = a1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-10-02 22:32:23 +04:00
|
|
|
static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
|
|
|
|
{
|
|
|
|
int sum = 0;
|
|
|
|
sum += temps[p1[0]].state == TCG_TEMP_CONST;
|
|
|
|
sum += temps[p1[1]].state == TCG_TEMP_CONST;
|
|
|
|
sum -= temps[p2[0]].state == TCG_TEMP_CONST;
|
|
|
|
sum -= temps[p2[1]].state == TCG_TEMP_CONST;
|
|
|
|
if (sum > 0) {
|
|
|
|
TCGArg t;
|
|
|
|
t = p1[0], p1[0] = p2[0], p2[0] = t;
|
|
|
|
t = p1[1], p1[1] = p2[1], p2[1] = t;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-07 16:37:13 +04:00
|
|
|
/* Propagate constants and copies, fold constant expressions. */
|
2011-07-07 16:37:12 +04:00
|
|
|
static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
|
|
|
TCGArg *args, TCGOpDef *tcg_op_defs)
|
|
|
|
{
|
2011-07-30 23:18:32 +04:00
|
|
|
int i, nb_ops, op_index, nb_temps, nb_globals, nb_call_args;
|
optimize: optimize using nonzero bits
This adds two optimizations using the non-zero bit mask. In some cases
involving shifts or ANDs the value can become zero, and can thus be
optimized to a move of zero. Second, useless zero-extension or an
AND with constant can be detected that would only zero bits that are
already zero.
The main advantage of this optimization is that it turns zero-extensions
into moves, thus enabling much better copy propagation (around 1% code
reduction). Here is for example a "test $0xff0000,%ecx + je" before
optimization:
mov_i64 tmp0,rcx
movi_i64 tmp1,$0xff0000
discard cc_src
and_i64 cc_dst,tmp0,tmp1
movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0
and after (without patch on the left, with on the right):
movi_i64 tmp1,$0xff0000 movi_i64 tmp1,$0xff0000
discard cc_src discard cc_src
and_i64 cc_dst,rcx,tmp1 and_i64 cc_dst,rcx,tmp1
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
Other similar cases: "test %eax, %eax + jne" where eax is already 32-bit
(after optimization, without patch on the left, with on the right):
discard cc_src discard cc_src
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,ne,$0x0 brcond_i64 rax,tmp12,ne,$0x0
"test $0x1, %dl + je":
movi_i64 tmp1,$0x1 movi_i64 tmp1,$0x1
discard cc_src discard cc_src
and_i64 cc_dst,rdx,tmp1 and_i64 cc_dst,rdx,tmp1
movi_i32 cc_op,$0x1a movi_i32 cc_op,$0x1a
ext8u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
In some cases TCG even outsmarts GCC. :) Here the input code has
"and $0x2,%eax + movslq %eax,%rbx + test %rbx, %rbx" and the optimizer,
thanks to copy propagation, does the following:
movi_i64 tmp12,$0x2 movi_i64 tmp12,$0x2
and_i64 rax,rax,tmp12 and_i64 rax,rax,tmp12
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
ext32s_i64 tmp0,rax -> nop
mov_i64 rbx,tmp0 -> mov_i64 rbx,cc_dst
and_i64 cc_dst,rbx,rbx -> nop
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
2013-01-12 03:42:53 +04:00
|
|
|
tcg_target_ulong mask, affected;
|
2011-07-30 23:18:32 +04:00
|
|
|
TCGOpcode op;
|
2011-07-07 16:37:12 +04:00
|
|
|
const TCGOpDef *def;
|
|
|
|
TCGArg *gen_args;
|
2011-07-07 16:37:14 +04:00
|
|
|
TCGArg tmp;
|
2012-09-21 21:13:38 +04:00
|
|
|
|
2011-07-07 16:37:13 +04:00
|
|
|
/* Array VALS has an element for each temp.
|
|
|
|
If this temp holds a constant then its value is kept in VALS' element.
|
2012-09-11 14:31:21 +04:00
|
|
|
If this temp is a copy of other ones then the other copies are
|
|
|
|
available through the doubly linked circular list. */
|
2011-07-07 16:37:12 +04:00
|
|
|
|
|
|
|
nb_temps = s->nb_temps;
|
|
|
|
nb_globals = s->nb_globals;
|
2013-01-12 03:42:51 +04:00
|
|
|
reset_all_temps(nb_temps);
|
2011-07-07 16:37:12 +04:00
|
|
|
|
2012-11-12 13:27:47 +04:00
|
|
|
nb_ops = tcg_opc_ptr - s->gen_opc_buf;
|
2011-07-07 16:37:12 +04:00
|
|
|
gen_args = args;
|
|
|
|
for (op_index = 0; op_index < nb_ops; op_index++) {
|
2012-11-12 13:27:47 +04:00
|
|
|
op = s->gen_opc_buf[op_index];
|
2011-07-07 16:37:12 +04:00
|
|
|
def = &tcg_op_defs[op];
|
2011-07-07 16:37:13 +04:00
|
|
|
/* Do copy propagation */
|
2012-09-11 18:18:49 +04:00
|
|
|
if (op == INDEX_op_call) {
|
|
|
|
int nb_oargs = args[0] >> 16;
|
|
|
|
int nb_iargs = args[0] & 0xffff;
|
|
|
|
for (i = nb_oargs + 1; i < nb_oargs + nb_iargs + 1; i++) {
|
|
|
|
if (temps[args[i]].state == TCG_TEMP_COPY) {
|
|
|
|
args[i] = find_better_copy(s, args[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2011-07-07 16:37:13 +04:00
|
|
|
for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) {
|
|
|
|
if (temps[args[i]].state == TCG_TEMP_COPY) {
|
2012-09-11 14:31:21 +04:00
|
|
|
args[i] = find_better_copy(s, args[i]);
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-07 16:37:14 +04:00
|
|
|
/* For commutative operations make constant second argument */
|
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(add):
|
|
|
|
CASE_OP_32_64(mul):
|
2011-07-07 16:37:15 +04:00
|
|
|
CASE_OP_32_64(and):
|
|
|
|
CASE_OP_32_64(or):
|
|
|
|
CASE_OP_32_64(xor):
|
2011-08-18 01:11:47 +04:00
|
|
|
CASE_OP_32_64(eqv):
|
|
|
|
CASE_OP_32_64(nand):
|
|
|
|
CASE_OP_32_64(nor):
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
swap_commutative(args[0], &args[1], &args[2]);
|
2011-07-07 16:37:14 +04:00
|
|
|
break;
|
2012-09-06 18:47:14 +04:00
|
|
|
CASE_OP_32_64(brcond):
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
if (swap_commutative(-1, &args[0], &args[1])) {
|
2012-09-06 18:47:14 +04:00
|
|
|
args[2] = tcg_swap_cond(args[2]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
CASE_OP_32_64(setcond):
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
if (swap_commutative(args[0], &args[1], &args[2])) {
|
2012-09-06 18:47:14 +04:00
|
|
|
args[3] = tcg_swap_cond(args[3]);
|
|
|
|
}
|
|
|
|
break;
|
2012-09-21 21:13:37 +04:00
|
|
|
CASE_OP_32_64(movcond):
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
if (swap_commutative(-1, &args[1], &args[2])) {
|
|
|
|
args[5] = tcg_swap_cond(args[5]);
|
2012-09-21 21:13:38 +04:00
|
|
|
}
|
|
|
|
/* For movcond, we canonicalize the "false" input reg to match
|
|
|
|
the destination reg so that the tcg backend can implement
|
|
|
|
a "move if true" operation. */
|
tcg: Split out swap_commutative as a subroutine
Reduces code duplication and prefers
movcond d, c1, c2, const, s
to
movcond d, c1, c2, s, const
It also prefers
add r, r, c
over
add r, c, r
when both inputs are known constants. This doesn't matter for true add, as
we will fully constant fold that. But it matters for a follow-on patch using
this routine for add2 which may not be fully foldable.
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2012-10-02 22:32:21 +04:00
|
|
|
if (swap_commutative(args[0], &args[4], &args[3])) {
|
|
|
|
args[5] = tcg_invert_cond(args[5]);
|
2012-09-21 21:13:37 +04:00
|
|
|
}
|
2012-10-02 22:32:22 +04:00
|
|
|
break;
|
2013-02-20 11:51:52 +04:00
|
|
|
CASE_OP_32_64(add2):
|
2012-10-02 22:32:22 +04:00
|
|
|
swap_commutative(args[0], &args[2], &args[4]);
|
|
|
|
swap_commutative(args[1], &args[3], &args[5]);
|
|
|
|
break;
|
2013-02-20 11:51:52 +04:00
|
|
|
CASE_OP_32_64(mulu2):
|
2013-02-20 11:51:53 +04:00
|
|
|
CASE_OP_32_64(muls2):
|
2012-10-02 22:32:30 +04:00
|
|
|
swap_commutative(args[0], &args[2], &args[3]);
|
|
|
|
break;
|
2012-10-02 22:32:23 +04:00
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
if (swap_commutative2(&args[0], &args[2])) {
|
|
|
|
args[4] = tcg_swap_cond(args[4]);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
if (swap_commutative2(&args[1], &args[3])) {
|
|
|
|
args[5] = tcg_swap_cond(args[5]);
|
|
|
|
}
|
|
|
|
break;
|
2011-07-07 16:37:14 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-09-06 18:47:14 +04:00
|
|
|
/* Simplify expressions for "shift/rot r, 0, a => movi r, 0" */
|
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(shl):
|
|
|
|
CASE_OP_32_64(shr):
|
|
|
|
CASE_OP_32_64(sar):
|
|
|
|
CASE_OP_32_64(rotl):
|
|
|
|
CASE_OP_32_64(rotr):
|
|
|
|
if (temps[args[1]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[1]].val == 0) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], 0);
|
2012-09-06 18:47:14 +04:00
|
|
|
args += 3;
|
|
|
|
gen_args += 2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-09-06 18:47:13 +04:00
|
|
|
/* Simplify expression for "op r, a, 0 => mov r, a" cases */
|
2011-07-07 16:37:14 +04:00
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(add):
|
|
|
|
CASE_OP_32_64(sub):
|
2011-07-07 16:37:16 +04:00
|
|
|
CASE_OP_32_64(shl):
|
|
|
|
CASE_OP_32_64(shr):
|
|
|
|
CASE_OP_32_64(sar):
|
2011-08-18 01:11:46 +04:00
|
|
|
CASE_OP_32_64(rotl):
|
|
|
|
CASE_OP_32_64(rotr):
|
2012-09-06 18:47:14 +04:00
|
|
|
CASE_OP_32_64(or):
|
|
|
|
CASE_OP_32_64(xor):
|
2011-07-07 16:37:14 +04:00
|
|
|
if (temps[args[1]].state == TCG_TEMP_CONST) {
|
|
|
|
/* Proceed with possible constant folding. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[2]].val == 0) {
|
2012-09-11 14:31:21 +04:00
|
|
|
if (temps_are_copies(args[0], args[1])) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2011-07-07 16:37:14 +04:00
|
|
|
} else {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_mov(op);
|
2012-09-11 14:26:23 +04:00
|
|
|
tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
|
2011-07-07 16:37:14 +04:00
|
|
|
gen_args += 2;
|
|
|
|
}
|
2012-09-07 14:24:32 +04:00
|
|
|
args += 3;
|
2011-07-07 16:37:14 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2012-09-06 18:47:13 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-01-12 03:42:52 +04:00
|
|
|
/* Simplify using known-zero bits */
|
|
|
|
mask = -1;
|
optimize: optimize using nonzero bits
This adds two optimizations using the non-zero bit mask. In some cases
involving shifts or ANDs the value can become zero, and can thus be
optimized to a move of zero. Second, useless zero-extension or an
AND with constant can be detected that would only zero bits that are
already zero.
The main advantage of this optimization is that it turns zero-extensions
into moves, thus enabling much better copy propagation (around 1% code
reduction). Here is for example a "test $0xff0000,%ecx + je" before
optimization:
mov_i64 tmp0,rcx
movi_i64 tmp1,$0xff0000
discard cc_src
and_i64 cc_dst,tmp0,tmp1
movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0
and after (without patch on the left, with on the right):
movi_i64 tmp1,$0xff0000 movi_i64 tmp1,$0xff0000
discard cc_src discard cc_src
and_i64 cc_dst,rcx,tmp1 and_i64 cc_dst,rcx,tmp1
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
Other similar cases: "test %eax, %eax + jne" where eax is already 32-bit
(after optimization, without patch on the left, with on the right):
discard cc_src discard cc_src
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,ne,$0x0 brcond_i64 rax,tmp12,ne,$0x0
"test $0x1, %dl + je":
movi_i64 tmp1,$0x1 movi_i64 tmp1,$0x1
discard cc_src discard cc_src
and_i64 cc_dst,rdx,tmp1 and_i64 cc_dst,rdx,tmp1
movi_i32 cc_op,$0x1a movi_i32 cc_op,$0x1a
ext8u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
In some cases TCG even outsmarts GCC. :) Here the input code has
"and $0x2,%eax + movslq %eax,%rbx + test %rbx, %rbx" and the optimizer,
thanks to copy propagation, does the following:
movi_i64 tmp12,$0x2 movi_i64 tmp12,$0x2
and_i64 rax,rax,tmp12 and_i64 rax,rax,tmp12
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
ext32s_i64 tmp0,rax -> nop
mov_i64 rbx,tmp0 -> mov_i64 rbx,cc_dst
and_i64 cc_dst,rbx,rbx -> nop
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
2013-01-12 03:42:53 +04:00
|
|
|
affected = -1;
|
2013-01-12 03:42:52 +04:00
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(ext8s):
|
|
|
|
if ((temps[args[1]].mask & 0x80) != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
CASE_OP_32_64(ext8u):
|
|
|
|
mask = 0xff;
|
|
|
|
goto and_const;
|
|
|
|
CASE_OP_32_64(ext16s):
|
|
|
|
if ((temps[args[1]].mask & 0x8000) != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
CASE_OP_32_64(ext16u):
|
|
|
|
mask = 0xffff;
|
|
|
|
goto and_const;
|
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
if ((temps[args[1]].mask & 0x80000000) != 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
mask = 0xffffffffU;
|
|
|
|
goto and_const;
|
|
|
|
|
|
|
|
CASE_OP_32_64(and):
|
|
|
|
mask = temps[args[2]].mask;
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
|
|
|
and_const:
|
optimize: optimize using nonzero bits
This adds two optimizations using the non-zero bit mask. In some cases
involving shifts or ANDs the value can become zero, and can thus be
optimized to a move of zero. Second, useless zero-extension or an
AND with constant can be detected that would only zero bits that are
already zero.
The main advantage of this optimization is that it turns zero-extensions
into moves, thus enabling much better copy propagation (around 1% code
reduction). Here is for example a "test $0xff0000,%ecx + je" before
optimization:
mov_i64 tmp0,rcx
movi_i64 tmp1,$0xff0000
discard cc_src
and_i64 cc_dst,tmp0,tmp1
movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0
and after (without patch on the left, with on the right):
movi_i64 tmp1,$0xff0000 movi_i64 tmp1,$0xff0000
discard cc_src discard cc_src
and_i64 cc_dst,rcx,tmp1 and_i64 cc_dst,rcx,tmp1
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
Other similar cases: "test %eax, %eax + jne" where eax is already 32-bit
(after optimization, without patch on the left, with on the right):
discard cc_src discard cc_src
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,ne,$0x0 brcond_i64 rax,tmp12,ne,$0x0
"test $0x1, %dl + je":
movi_i64 tmp1,$0x1 movi_i64 tmp1,$0x1
discard cc_src discard cc_src
and_i64 cc_dst,rdx,tmp1 and_i64 cc_dst,rdx,tmp1
movi_i32 cc_op,$0x1a movi_i32 cc_op,$0x1a
ext8u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
In some cases TCG even outsmarts GCC. :) Here the input code has
"and $0x2,%eax + movslq %eax,%rbx + test %rbx, %rbx" and the optimizer,
thanks to copy propagation, does the following:
movi_i64 tmp12,$0x2 movi_i64 tmp12,$0x2
and_i64 rax,rax,tmp12 and_i64 rax,rax,tmp12
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
ext32s_i64 tmp0,rax -> nop
mov_i64 rbx,tmp0 -> mov_i64 rbx,cc_dst
and_i64 cc_dst,rbx,rbx -> nop
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
2013-01-12 03:42:53 +04:00
|
|
|
affected = temps[args[1]].mask & ~mask;
|
2013-01-12 03:42:52 +04:00
|
|
|
}
|
|
|
|
mask = temps[args[1]].mask & mask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(sar):
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
|
|
|
mask = ((tcg_target_long)temps[args[1]].mask
|
|
|
|
>> temps[args[2]].val);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(shr):
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
|
|
|
mask = temps[args[1]].mask >> temps[args[2]].val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(shl):
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST) {
|
|
|
|
mask = temps[args[1]].mask << temps[args[2]].val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(neg):
|
|
|
|
/* Set to 1 all bits to the left of the rightmost. */
|
|
|
|
mask = -(temps[args[1]].mask & -temps[args[1]].mask);
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(deposit):
|
|
|
|
tmp = ((1ull << args[4]) - 1);
|
|
|
|
mask = ((temps[args[1]].mask & ~(tmp << args[3]))
|
|
|
|
| ((temps[args[2]].mask & tmp) << args[3]));
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(or):
|
|
|
|
CASE_OP_32_64(xor):
|
|
|
|
mask = temps[args[1]].mask | temps[args[2]].mask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(setcond):
|
|
|
|
mask = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
CASE_OP_32_64(movcond):
|
|
|
|
mask = temps[args[3]].mask | temps[args[4]].mask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
optimize: optimize using nonzero bits
This adds two optimizations using the non-zero bit mask. In some cases
involving shifts or ANDs the value can become zero, and can thus be
optimized to a move of zero. Second, useless zero-extension or an
AND with constant can be detected that would only zero bits that are
already zero.
The main advantage of this optimization is that it turns zero-extensions
into moves, thus enabling much better copy propagation (around 1% code
reduction). Here is for example a "test $0xff0000,%ecx + je" before
optimization:
mov_i64 tmp0,rcx
movi_i64 tmp1,$0xff0000
discard cc_src
and_i64 cc_dst,tmp0,tmp1
movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0
and after (without patch on the left, with on the right):
movi_i64 tmp1,$0xff0000 movi_i64 tmp1,$0xff0000
discard cc_src discard cc_src
and_i64 cc_dst,rcx,tmp1 and_i64 cc_dst,rcx,tmp1
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
Other similar cases: "test %eax, %eax + jne" where eax is already 32-bit
(after optimization, without patch on the left, with on the right):
discard cc_src discard cc_src
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
movi_i32 cc_op,$0x1c movi_i32 cc_op,$0x1c
ext32u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,ne,$0x0 brcond_i64 rax,tmp12,ne,$0x0
"test $0x1, %dl + je":
movi_i64 tmp1,$0x1 movi_i64 tmp1,$0x1
discard cc_src discard cc_src
and_i64 cc_dst,rdx,tmp1 and_i64 cc_dst,rdx,tmp1
movi_i32 cc_op,$0x1a movi_i32 cc_op,$0x1a
ext8u_i64 tmp0,cc_dst
movi_i64 tmp12,$0x0 movi_i64 tmp12,$0x0
brcond_i64 tmp0,tmp12,eq,$0x0 brcond_i64 cc_dst,tmp12,eq,$0x0
In some cases TCG even outsmarts GCC. :) Here the input code has
"and $0x2,%eax + movslq %eax,%rbx + test %rbx, %rbx" and the optimizer,
thanks to copy propagation, does the following:
movi_i64 tmp12,$0x2 movi_i64 tmp12,$0x2
and_i64 rax,rax,tmp12 and_i64 rax,rax,tmp12
mov_i64 cc_dst,rax mov_i64 cc_dst,rax
ext32s_i64 tmp0,rax -> nop
mov_i64 rbx,tmp0 -> mov_i64 rbx,cc_dst
and_i64 cc_dst,rbx,rbx -> nop
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
2013-01-12 03:42:53 +04:00
|
|
|
if (mask == 0) {
|
|
|
|
assert(def->nb_oargs == 1);
|
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
|
|
|
tcg_opt_gen_movi(gen_args, args[0], 0);
|
|
|
|
args += def->nb_oargs + def->nb_iargs + def->nb_cargs;
|
|
|
|
gen_args += 2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (affected == 0) {
|
|
|
|
assert(def->nb_oargs == 1);
|
|
|
|
if (temps_are_copies(args[0], args[1])) {
|
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
|
|
|
} else if (temps[args[1]].state != TCG_TEMP_CONST) {
|
|
|
|
s->gen_opc_buf[op_index] = op_to_mov(op);
|
|
|
|
tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
|
|
|
|
gen_args += 2;
|
|
|
|
} else {
|
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
|
|
|
tcg_opt_gen_movi(gen_args, args[0], temps[args[1]].val);
|
|
|
|
gen_args += 2;
|
|
|
|
}
|
|
|
|
args += def->nb_iargs + 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-09-06 18:47:13 +04:00
|
|
|
/* Simplify expression for "op r, a, 0 => movi r, 0" cases */
|
|
|
|
switch (op) {
|
2012-09-06 18:47:14 +04:00
|
|
|
CASE_OP_32_64(and):
|
2011-07-07 16:37:14 +04:00
|
|
|
CASE_OP_32_64(mul):
|
|
|
|
if ((temps[args[2]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[2]].val == 0)) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], 0);
|
2011-07-07 16:37:14 +04:00
|
|
|
args += 3;
|
|
|
|
gen_args += 2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2012-09-06 18:47:13 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Simplify expression for "op r, a, a => mov r, a" cases */
|
|
|
|
switch (op) {
|
2011-07-07 16:37:15 +04:00
|
|
|
CASE_OP_32_64(or):
|
|
|
|
CASE_OP_32_64(and):
|
2012-09-18 21:11:32 +04:00
|
|
|
if (temps_are_copies(args[1], args[2])) {
|
2012-09-11 14:31:21 +04:00
|
|
|
if (temps_are_copies(args[0], args[1])) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2011-07-07 16:37:15 +04:00
|
|
|
} else {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_mov(op);
|
2012-09-11 14:26:23 +04:00
|
|
|
tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
|
2011-07-07 16:37:15 +04:00
|
|
|
gen_args += 2;
|
|
|
|
}
|
2012-09-07 14:24:32 +04:00
|
|
|
args += 3;
|
2011-07-07 16:37:15 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2011-07-30 23:18:32 +04:00
|
|
|
default:
|
|
|
|
break;
|
2011-07-07 16:37:14 +04:00
|
|
|
}
|
|
|
|
|
2012-09-18 21:12:36 +04:00
|
|
|
/* Simplify expression for "op r, a, a => movi r, 0" cases */
|
|
|
|
switch (op) {
|
|
|
|
CASE_OP_32_64(sub):
|
|
|
|
CASE_OP_32_64(xor):
|
|
|
|
if (temps_are_copies(args[1], args[2])) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-18 21:12:36 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], 0);
|
|
|
|
gen_args += 2;
|
|
|
|
args += 3;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-07-07 16:37:13 +04:00
|
|
|
/* Propagate constants through copy operations and do constant
|
|
|
|
folding. Constants will be substituted to arguments by register
|
|
|
|
allocator where needed and possible. Also detect copies. */
|
2011-07-07 16:37:12 +04:00
|
|
|
switch (op) {
|
2011-07-07 16:37:13 +04:00
|
|
|
CASE_OP_32_64(mov):
|
2012-09-11 14:31:21 +04:00
|
|
|
if (temps_are_copies(args[0], args[1])) {
|
2011-07-07 16:37:13 +04:00
|
|
|
args += 2;
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2011-07-07 16:37:13 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (temps[args[1]].state != TCG_TEMP_CONST) {
|
2012-09-11 14:26:23 +04:00
|
|
|
tcg_opt_gen_mov(s, gen_args, args[0], args[1]);
|
2011-07-07 16:37:13 +04:00
|
|
|
gen_args += 2;
|
|
|
|
args += 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Source argument is constant. Rewrite the operation and
|
|
|
|
let movi case handle it. */
|
|
|
|
op = op_to_movi(op);
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op;
|
2011-07-07 16:37:13 +04:00
|
|
|
args[1] = temps[args[1]].val;
|
|
|
|
/* fallthrough */
|
|
|
|
CASE_OP_32_64(movi):
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], args[1]);
|
2011-07-07 16:37:13 +04:00
|
|
|
gen_args += 2;
|
|
|
|
args += 2;
|
|
|
|
break;
|
2012-10-02 22:32:24 +04:00
|
|
|
|
2011-07-07 16:37:17 +04:00
|
|
|
CASE_OP_32_64(not):
|
2011-08-18 01:11:47 +04:00
|
|
|
CASE_OP_32_64(neg):
|
2011-08-18 01:11:46 +04:00
|
|
|
CASE_OP_32_64(ext8s):
|
|
|
|
CASE_OP_32_64(ext8u):
|
|
|
|
CASE_OP_32_64(ext16s):
|
|
|
|
CASE_OP_32_64(ext16u):
|
2011-07-07 16:37:17 +04:00
|
|
|
case INDEX_op_ext32s_i64:
|
|
|
|
case INDEX_op_ext32u_i64:
|
|
|
|
if (temps[args[1]].state == TCG_TEMP_CONST) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2011-07-07 16:37:17 +04:00
|
|
|
tmp = do_constant_folding(op, temps[args[1]].val, 0);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
2012-10-02 22:32:24 +04:00
|
|
|
gen_args += 2;
|
|
|
|
args += 2;
|
|
|
|
break;
|
2011-07-07 16:37:17 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
|
|
|
|
2011-07-07 16:37:14 +04:00
|
|
|
CASE_OP_32_64(add):
|
|
|
|
CASE_OP_32_64(sub):
|
|
|
|
CASE_OP_32_64(mul):
|
2011-07-07 16:37:15 +04:00
|
|
|
CASE_OP_32_64(or):
|
|
|
|
CASE_OP_32_64(and):
|
|
|
|
CASE_OP_32_64(xor):
|
2011-07-07 16:37:16 +04:00
|
|
|
CASE_OP_32_64(shl):
|
|
|
|
CASE_OP_32_64(shr):
|
|
|
|
CASE_OP_32_64(sar):
|
2011-08-18 01:11:46 +04:00
|
|
|
CASE_OP_32_64(rotl):
|
|
|
|
CASE_OP_32_64(rotr):
|
2011-08-18 01:11:47 +04:00
|
|
|
CASE_OP_32_64(andc):
|
|
|
|
CASE_OP_32_64(orc):
|
|
|
|
CASE_OP_32_64(eqv):
|
|
|
|
CASE_OP_32_64(nand):
|
|
|
|
CASE_OP_32_64(nor):
|
2011-07-07 16:37:14 +04:00
|
|
|
if (temps[args[1]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[2]].state == TCG_TEMP_CONST) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2011-07-07 16:37:14 +04:00
|
|
|
tmp = do_constant_folding(op, temps[args[1]].val,
|
|
|
|
temps[args[2]].val);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
2011-07-07 16:37:14 +04:00
|
|
|
gen_args += 2;
|
2012-10-02 22:32:24 +04:00
|
|
|
args += 3;
|
|
|
|
break;
|
2011-07-07 16:37:14 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
|
|
|
|
2012-09-21 13:07:29 +04:00
|
|
|
CASE_OP_32_64(deposit):
|
|
|
|
if (temps[args[1]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[2]].state == TCG_TEMP_CONST) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-21 13:07:29 +04:00
|
|
|
tmp = ((1ull << args[4]) - 1);
|
|
|
|
tmp = (temps[args[1]].val & ~(tmp << args[3]))
|
|
|
|
| ((temps[args[2]].val & tmp) << args[3]);
|
|
|
|
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
|
|
|
gen_args += 2;
|
2012-10-02 22:32:24 +04:00
|
|
|
args += 5;
|
|
|
|
break;
|
2012-09-21 13:07:29 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
|
|
|
|
2012-09-06 18:47:14 +04:00
|
|
|
CASE_OP_32_64(setcond):
|
2012-09-18 21:37:00 +04:00
|
|
|
tmp = do_constant_folding_cond(op, args[1], args[2], args[3]);
|
|
|
|
if (tmp != 2) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
2012-09-06 18:47:14 +04:00
|
|
|
gen_args += 2;
|
2012-10-02 22:32:24 +04:00
|
|
|
args += 4;
|
|
|
|
break;
|
2012-09-06 18:47:14 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
|
|
|
|
2012-09-06 18:47:14 +04:00
|
|
|
CASE_OP_32_64(brcond):
|
2012-09-18 21:37:00 +04:00
|
|
|
tmp = do_constant_folding_cond(op, args[0], args[1], args[2]);
|
|
|
|
if (tmp != 2) {
|
|
|
|
if (tmp) {
|
2013-01-12 03:42:51 +04:00
|
|
|
reset_all_temps(nb_temps);
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_br;
|
2012-09-06 18:47:14 +04:00
|
|
|
gen_args[0] = args[3];
|
|
|
|
gen_args += 1;
|
|
|
|
} else {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2012-09-06 18:47:14 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
args += 4;
|
|
|
|
break;
|
2012-09-06 18:47:14 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
|
|
|
|
2012-09-21 21:13:37 +04:00
|
|
|
CASE_OP_32_64(movcond):
|
2012-09-18 21:37:00 +04:00
|
|
|
tmp = do_constant_folding_cond(op, args[1], args[2], args[5]);
|
|
|
|
if (tmp != 2) {
|
2012-09-11 14:31:21 +04:00
|
|
|
if (temps_are_copies(args[0], args[4-tmp])) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2012-09-21 21:13:37 +04:00
|
|
|
} else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_movi(op);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], temps[args[4-tmp]].val);
|
2012-09-21 21:13:37 +04:00
|
|
|
gen_args += 2;
|
|
|
|
} else {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = op_to_mov(op);
|
2012-09-11 14:31:21 +04:00
|
|
|
tcg_opt_gen_mov(s, gen_args, args[0], args[4-tmp]);
|
2012-09-21 21:13:37 +04:00
|
|
|
gen_args += 2;
|
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
args += 6;
|
|
|
|
break;
|
2012-09-21 21:13:37 +04:00
|
|
|
}
|
2012-10-02 22:32:24 +04:00
|
|
|
goto do_default;
|
2012-10-02 22:32:28 +04:00
|
|
|
|
|
|
|
case INDEX_op_add2_i32:
|
|
|
|
case INDEX_op_sub2_i32:
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[3]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[4]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[5]].state == TCG_TEMP_CONST) {
|
|
|
|
uint32_t al = temps[args[2]].val;
|
|
|
|
uint32_t ah = temps[args[3]].val;
|
|
|
|
uint32_t bl = temps[args[4]].val;
|
|
|
|
uint32_t bh = temps[args[5]].val;
|
|
|
|
uint64_t a = ((uint64_t)ah << 32) | al;
|
|
|
|
uint64_t b = ((uint64_t)bh << 32) | bl;
|
|
|
|
TCGArg rl, rh;
|
|
|
|
|
|
|
|
if (op == INDEX_op_add2_i32) {
|
|
|
|
a += b;
|
|
|
|
} else {
|
|
|
|
a -= b;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We emit the extra nop when we emit the add2/sub2. */
|
2012-11-12 13:27:47 +04:00
|
|
|
assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
|
2012-10-02 22:32:28 +04:00
|
|
|
|
|
|
|
rl = args[0];
|
|
|
|
rh = args[1];
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
|
|
|
|
s->gen_opc_buf[++op_index] = INDEX_op_movi_i32;
|
2012-10-02 22:32:28 +04:00
|
|
|
tcg_opt_gen_movi(&gen_args[0], rl, (uint32_t)a);
|
|
|
|
tcg_opt_gen_movi(&gen_args[2], rh, (uint32_t)(a >> 32));
|
|
|
|
gen_args += 4;
|
|
|
|
args += 6;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto do_default;
|
2012-10-02 22:32:30 +04:00
|
|
|
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
if (temps[args[2]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[3]].state == TCG_TEMP_CONST) {
|
|
|
|
uint32_t a = temps[args[2]].val;
|
|
|
|
uint32_t b = temps[args[3]].val;
|
|
|
|
uint64_t r = (uint64_t)a * b;
|
|
|
|
TCGArg rl, rh;
|
|
|
|
|
|
|
|
/* We emit the extra nop when we emit the mulu2. */
|
2012-11-12 13:27:47 +04:00
|
|
|
assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop);
|
2012-10-02 22:32:30 +04:00
|
|
|
|
|
|
|
rl = args[0];
|
|
|
|
rh = args[1];
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
|
|
|
|
s->gen_opc_buf[++op_index] = INDEX_op_movi_i32;
|
2012-10-02 22:32:30 +04:00
|
|
|
tcg_opt_gen_movi(&gen_args[0], rl, (uint32_t)r);
|
|
|
|
tcg_opt_gen_movi(&gen_args[2], rh, (uint32_t)(r >> 32));
|
|
|
|
gen_args += 4;
|
|
|
|
args += 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto do_default;
|
2012-10-02 22:32:24 +04:00
|
|
|
|
2012-10-02 22:32:25 +04:00
|
|
|
case INDEX_op_brcond2_i32:
|
2012-10-02 22:32:27 +04:00
|
|
|
tmp = do_constant_folding_cond2(&args[0], &args[2], args[4]);
|
|
|
|
if (tmp != 2) {
|
|
|
|
if (tmp) {
|
2013-01-12 03:42:51 +04:00
|
|
|
reset_all_temps(nb_temps);
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_br;
|
2012-10-02 22:32:27 +04:00
|
|
|
gen_args[0] = args[5];
|
|
|
|
gen_args += 1;
|
|
|
|
} else {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_nop;
|
2012-10-02 22:32:27 +04:00
|
|
|
}
|
|
|
|
} else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE)
|
|
|
|
&& temps[args[2]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[3]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[2]].val == 0
|
|
|
|
&& temps[args[3]].val == 0) {
|
|
|
|
/* Simplify LT/GE comparisons vs zero to a single compare
|
|
|
|
vs the high word of the input. */
|
2013-01-12 03:42:51 +04:00
|
|
|
reset_all_temps(nb_temps);
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_brcond_i32;
|
2012-10-02 22:32:25 +04:00
|
|
|
gen_args[0] = args[1];
|
|
|
|
gen_args[1] = args[3];
|
|
|
|
gen_args[2] = args[4];
|
|
|
|
gen_args[3] = args[5];
|
|
|
|
gen_args += 4;
|
2012-10-02 22:32:27 +04:00
|
|
|
} else {
|
|
|
|
goto do_default;
|
2012-10-02 22:32:25 +04:00
|
|
|
}
|
2012-10-02 22:32:27 +04:00
|
|
|
args += 6;
|
|
|
|
break;
|
2012-10-02 22:32:25 +04:00
|
|
|
|
|
|
|
case INDEX_op_setcond2_i32:
|
2012-10-02 22:32:27 +04:00
|
|
|
tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]);
|
|
|
|
if (tmp != 2) {
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_movi_i32;
|
2012-10-02 22:32:27 +04:00
|
|
|
tcg_opt_gen_movi(gen_args, args[0], tmp);
|
|
|
|
gen_args += 2;
|
|
|
|
} else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE)
|
|
|
|
&& temps[args[3]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[4]].state == TCG_TEMP_CONST
|
|
|
|
&& temps[args[3]].val == 0
|
|
|
|
&& temps[args[4]].val == 0) {
|
|
|
|
/* Simplify LT/GE comparisons vs zero to a single compare
|
|
|
|
vs the high word of the input. */
|
2012-11-12 13:27:47 +04:00
|
|
|
s->gen_opc_buf[op_index] = INDEX_op_setcond_i32;
|
2012-10-02 22:32:25 +04:00
|
|
|
gen_args[0] = args[0];
|
|
|
|
gen_args[1] = args[2];
|
|
|
|
gen_args[2] = args[4];
|
|
|
|
gen_args[3] = args[5];
|
|
|
|
gen_args += 4;
|
2012-10-02 22:32:27 +04:00
|
|
|
} else {
|
|
|
|
goto do_default;
|
2012-10-02 22:32:25 +04:00
|
|
|
}
|
2012-10-02 22:32:27 +04:00
|
|
|
args += 6;
|
|
|
|
break;
|
2012-10-02 22:32:25 +04:00
|
|
|
|
2011-07-07 16:37:12 +04:00
|
|
|
case INDEX_op_call:
|
2011-07-07 16:37:13 +04:00
|
|
|
nb_call_args = (args[0] >> 16) + (args[0] & 0xffff);
|
2012-10-09 23:53:08 +04:00
|
|
|
if (!(args[nb_call_args + 1] & (TCG_CALL_NO_READ_GLOBALS |
|
|
|
|
TCG_CALL_NO_WRITE_GLOBALS))) {
|
2011-07-07 16:37:13 +04:00
|
|
|
for (i = 0; i < nb_globals; i++) {
|
2012-09-11 14:31:21 +04:00
|
|
|
reset_temp(i);
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i = 0; i < (args[0] >> 16); i++) {
|
2012-09-11 14:31:21 +04:00
|
|
|
reset_temp(args[i + 1]);
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
|
|
|
i = nb_call_args + 3;
|
2011-07-07 16:37:12 +04:00
|
|
|
while (i) {
|
|
|
|
*gen_args = *args;
|
|
|
|
args++;
|
|
|
|
gen_args++;
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
break;
|
2012-10-02 22:32:24 +04:00
|
|
|
|
2011-07-07 16:37:12 +04:00
|
|
|
default:
|
2012-10-02 22:32:24 +04:00
|
|
|
do_default:
|
|
|
|
/* Default case: we know nothing about operation (or were unable
|
|
|
|
to compute the operation result) so no propagation is done.
|
|
|
|
We trash everything if the operation is the end of a basic
|
2013-01-12 03:42:52 +04:00
|
|
|
block, otherwise we only trash the output args. "mask" is
|
|
|
|
the non-zero bits mask for the first output arg. */
|
2012-09-19 23:40:30 +04:00
|
|
|
if (def->flags & TCG_OPF_BB_END) {
|
2013-01-12 03:42:51 +04:00
|
|
|
reset_all_temps(nb_temps);
|
2012-09-19 23:40:30 +04:00
|
|
|
} else {
|
|
|
|
for (i = 0; i < def->nb_oargs; i++) {
|
2012-09-11 14:31:21 +04:00
|
|
|
reset_temp(args[i]);
|
2012-09-19 23:40:30 +04:00
|
|
|
}
|
2011-07-07 16:37:13 +04:00
|
|
|
}
|
2011-07-07 16:37:12 +04:00
|
|
|
for (i = 0; i < def->nb_args; i++) {
|
|
|
|
gen_args[i] = args[i];
|
|
|
|
}
|
|
|
|
args += def->nb_args;
|
|
|
|
gen_args += def->nb_args;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return gen_args;
|
|
|
|
}
|
|
|
|
|
|
|
|
TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
|
|
|
|
TCGArg *args, TCGOpDef *tcg_op_defs)
|
|
|
|
{
|
|
|
|
TCGArg *res;
|
|
|
|
res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
|
|
|
|
return res;
|
|
|
|
}
|