diff mbox

[4/6] Do constant folding for boolean operations.

Message ID 898222ebb06df066cad8c5286bee65319e46789a.1305889001.git.batuzovk@ispras.ru
State New
Headers show

Commit Message

Kirill Batuzov May 20, 2011, 12:39 p.m. UTC
Perform constant folding for AND, OR, XOR operations.

Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
---
 tcg/optimize.c |   58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 58 insertions(+), 0 deletions(-)

Comments

Richard Henderson May 20, 2011, 6:45 p.m. UTC | #1
On 05/20/2011 05:39 AM, Kirill Batuzov wrote:
> +        case INDEX_op_or_i32:
> +        case INDEX_op_and_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        case INDEX_op_and_i64:
> +        case INDEX_op_or_i64:
> +#endif
> +            if (args[1] == args[2]) {
> +                if (args[1] == args[0]) {
> +                    args += 3;
> +                    gen_opc_buf[op_index] = INDEX_op_nop;
> +                } else {

I do wonder if it would be better to split this sort of optimization
out into a different function.  You're applying identity sorts of
functions here, where you're not doing it for other operations, 
such as x + 0.

Indeed, I'll argue that 0+x is more likely to happen than x|x, given
that the 0 value could have been relocation filled in by the linker.
Consider @hi16 and @lo16 relocation pairs when the symbol happens to
be linked into the low 64k of the address space.


r~
diff mbox

Patch

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 4073f05..a02d5c1 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -38,6 +38,13 @@  typedef enum {
     TCG_TEMP_ANY
 } tcg_temp_state;
 
+const int mov_opc[] = {
+    INDEX_op_mov_i32,
+#if TCG_TARGET_REG_BITS == 64
+    INDEX_op_mov_i64,
+#endif
+};
+
 static int mov_to_movi(int op)
 {
     switch (op) {
@@ -89,12 +96,18 @@  static int op_bits(int op)
     case INDEX_op_add_i32:
     case INDEX_op_sub_i32:
     case INDEX_op_mul_i32:
+    case INDEX_op_and_i32:
+    case INDEX_op_or_i32:
+    case INDEX_op_xor_i32:
         return 32;
 #if TCG_TARGET_REG_BITS == 64
     case INDEX_op_mov_i64:
     case INDEX_op_add_i64:
     case INDEX_op_sub_i64:
     case INDEX_op_mul_i64:
+    case INDEX_op_and_i64:
+    case INDEX_op_or_i64:
+    case INDEX_op_xor_i64:
         return 64;
 #endif
     default:
@@ -137,6 +150,24 @@  static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
 #endif
         return x * y;
 
+    case INDEX_op_and_i32:
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_and_i64:
+#endif
+        return x & y;
+
+    case INDEX_op_or_i32:
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_or_i64:
+#endif
+        return x | y;
+
+    case INDEX_op_xor_i32:
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_xor_i64:
+#endif
+        return x ^ y;
+
     default:
         fprintf(stderr,
                 "Unrecognized operation %d in do_constant_folding.\n", op);
@@ -237,10 +268,37 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
             gen_args += 2;
             args += 2;
             break;
+        case INDEX_op_or_i32:
+        case INDEX_op_and_i32:
+#if TCG_TARGET_REG_BITS == 64
+        case INDEX_op_and_i64:
+        case INDEX_op_or_i64:
+#endif
+            if (args[1] == args[2]) {
+                if (args[1] == args[0]) {
+                    args += 3;
+                    gen_opc_buf[op_index] = INDEX_op_nop;
+                } else {
+                    reset_temp(state, vals, args[0], nb_temps, nb_globals);
+                    if (args[1] >= s->nb_globals) {
+                        state[args[0]] = TCG_TEMP_COPY;
+                        vals[args[0]] = args[1];
+                    }
+                    gen_opc_buf[op_index] = mov_opc[op_bits(op) / 32 - 1];
+                    gen_args[0] = args[0];
+                    gen_args[1] = args[1];
+                    gen_args += 2;
+                    args += 3;
+                }
+                break;
+            }
+            /* Proceed with default binary operation handling */
+        case INDEX_op_xor_i32:
         case INDEX_op_add_i32:
         case INDEX_op_sub_i32:
         case INDEX_op_mul_i32:
 #if TCG_TARGET_REG_BITS == 64
+        case INDEX_op_xor_i64:
         case INDEX_op_add_i64:
         case INDEX_op_sub_i64:
         case INDEX_op_mul_i64: