diff mbox

[v3,5/6] Do constant folding for shift operations.

Message ID 673a3b026dabb1cf2584388059ce39e8cc3bd9eb.1309865252.git.batuzovk@ispras.ru
State New
Headers show

Commit Message

Kirill Batuzov July 7, 2011, 12:37 p.m. UTC
Perform constant forlding for SHR, SHL, SAR, ROTR, ROTL operations.

Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
---
 tcg/optimize.c |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 72 insertions(+), 0 deletions(-)

Comments

Blue Swirl July 30, 2011, 12:25 p.m. UTC | #1
On Thu, Jul 7, 2011 at 3:37 PM, Kirill Batuzov <batuzovk@ispras.ru> wrote:
> Perform constant forlding for SHR, SHL, SAR, ROTR, ROTL operations.

This patch broke build on targets (Sparc, MIPS) which don't implement
rotation ops, the next patch likewise. I committed a fix.

> Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
> ---
>  tcg/optimize.c |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 files changed, 72 insertions(+), 0 deletions(-)
>
> diff --git a/tcg/optimize.c b/tcg/optimize.c
> index c469952..a1bb287 100644
> --- a/tcg/optimize.c
> +++ b/tcg/optimize.c
> @@ -102,6 +102,11 @@ static int op_bits(int op)
>     case INDEX_op_and_i32:
>     case INDEX_op_or_i32:
>     case INDEX_op_xor_i32:
> +    case INDEX_op_shl_i32:
> +    case INDEX_op_shr_i32:
> +    case INDEX_op_sar_i32:
> +    case INDEX_op_rotl_i32:
> +    case INDEX_op_rotr_i32:
>         return 32;
>  #if TCG_TARGET_REG_BITS == 64
>     case INDEX_op_mov_i64:
> @@ -111,6 +116,11 @@ static int op_bits(int op)
>     case INDEX_op_and_i64:
>     case INDEX_op_or_i64:
>     case INDEX_op_xor_i64:
> +    case INDEX_op_shl_i64:
> +    case INDEX_op_shr_i64:
> +    case INDEX_op_sar_i64:
> +    case INDEX_op_rotl_i64:
> +    case INDEX_op_rotr_i64:
>         return 64;
>  #endif
>     default:
> @@ -205,6 +215,58 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
>     CASE_OP_32_64(xor):
>         return x ^ y;
>
> +    case INDEX_op_shl_i32:
> +        return (uint32_t)x << (uint32_t)y;
> +
> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_shl_i64:
> +        return (uint64_t)x << (uint64_t)y;
> +#endif
> +
> +    case INDEX_op_shr_i32:
> +        return (uint32_t)x >> (uint32_t)y;
> +
> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_shr_i64:
> +        return (uint64_t)x >> (uint64_t)y;
> +#endif
> +
> +    case INDEX_op_sar_i32:
> +        return (int32_t)x >> (int32_t)y;
> +
> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_sar_i64:
> +        return (int64_t)x >> (int64_t)y;
> +#endif
> +
> +    case INDEX_op_rotr_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        x &= 0xffffffff;
> +        y &= 0xffffffff;
> +#endif
> +        x = (x << (32 - y)) | (x >> y);
> +        return x;
> +
> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_rotr_i64:
> +        x = (x << (64 - y)) | (x >> y);
> +        return x;
> +#endif
> +
> +    case INDEX_op_rotl_i32:
> +#if TCG_TARGET_REG_BITS == 64
> +        x &= 0xffffffff;
> +        y &= 0xffffffff;
> +#endif
> +        x = (x << y) | (x >> (32 - y));
> +        return x;
> +
> +#if TCG_TARGET_REG_BITS == 64
> +    case INDEX_op_rotl_i64:
> +        x = (x << y) | (x >> (64 - y));
> +        return x;
> +#endif
> +
>     default:
>         fprintf(stderr,
>                 "Unrecognized operation %d in do_constant_folding.\n", op);
> @@ -278,6 +340,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
>         switch (op) {
>         CASE_OP_32_64(add):
>         CASE_OP_32_64(sub):
> +        CASE_OP_32_64(shl):
> +        CASE_OP_32_64(shr):
> +        CASE_OP_32_64(sar):
> +        CASE_OP_32_64(rotl):
> +        CASE_OP_32_64(rotr):
>             if (temps[args[1]].state == TCG_TEMP_CONST) {
>                 /* Proceed with possible constant folding. */
>                 break;
> @@ -363,6 +430,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
>         CASE_OP_32_64(or):
>         CASE_OP_32_64(and):
>         CASE_OP_32_64(xor):
> +        CASE_OP_32_64(shl):
> +        CASE_OP_32_64(shr):
> +        CASE_OP_32_64(sar):
> +        CASE_OP_32_64(rotl):
> +        CASE_OP_32_64(rotr):
>             if (temps[args[1]].state == TCG_TEMP_CONST
>                 && temps[args[2]].state == TCG_TEMP_CONST) {
>                 gen_opc_buf[op_index] = op_to_movi(op);
> --
> 1.7.4.1
>
>
>
Blue Swirl July 30, 2011, 7:13 p.m. UTC | #2
On Sat, Jul 30, 2011 at 3:25 PM, Blue Swirl <blauwirbel@gmail.com> wrote:
> On Thu, Jul 7, 2011 at 3:37 PM, Kirill Batuzov <batuzovk@ispras.ru> wrote:
>> Perform constant forlding for SHR, SHL, SAR, ROTR, ROTL operations.
>
> This patch broke build on targets (Sparc, MIPS) which don't implement
> rotation ops, the next patch likewise. I committed a fix.

Unfortunately my patch which fixed Sparc build broke i386 build, so I
committed another fix.

>> Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
>> ---
>>  tcg/optimize.c |   72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 files changed, 72 insertions(+), 0 deletions(-)
>>
>> diff --git a/tcg/optimize.c b/tcg/optimize.c
>> index c469952..a1bb287 100644
>> --- a/tcg/optimize.c
>> +++ b/tcg/optimize.c
>> @@ -102,6 +102,11 @@ static int op_bits(int op)
>>     case INDEX_op_and_i32:
>>     case INDEX_op_or_i32:
>>     case INDEX_op_xor_i32:
>> +    case INDEX_op_shl_i32:
>> +    case INDEX_op_shr_i32:
>> +    case INDEX_op_sar_i32:
>> +    case INDEX_op_rotl_i32:
>> +    case INDEX_op_rotr_i32:
>>         return 32;
>>  #if TCG_TARGET_REG_BITS == 64
>>     case INDEX_op_mov_i64:
>> @@ -111,6 +116,11 @@ static int op_bits(int op)
>>     case INDEX_op_and_i64:
>>     case INDEX_op_or_i64:
>>     case INDEX_op_xor_i64:
>> +    case INDEX_op_shl_i64:
>> +    case INDEX_op_shr_i64:
>> +    case INDEX_op_sar_i64:
>> +    case INDEX_op_rotl_i64:
>> +    case INDEX_op_rotr_i64:
>>         return 64;
>>  #endif
>>     default:
>> @@ -205,6 +215,58 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
>>     CASE_OP_32_64(xor):
>>         return x ^ y;
>>
>> +    case INDEX_op_shl_i32:
>> +        return (uint32_t)x << (uint32_t)y;
>> +
>> +#if TCG_TARGET_REG_BITS == 64
>> +    case INDEX_op_shl_i64:
>> +        return (uint64_t)x << (uint64_t)y;
>> +#endif
>> +
>> +    case INDEX_op_shr_i32:
>> +        return (uint32_t)x >> (uint32_t)y;
>> +
>> +#if TCG_TARGET_REG_BITS == 64
>> +    case INDEX_op_shr_i64:
>> +        return (uint64_t)x >> (uint64_t)y;
>> +#endif
>> +
>> +    case INDEX_op_sar_i32:
>> +        return (int32_t)x >> (int32_t)y;
>> +
>> +#if TCG_TARGET_REG_BITS == 64
>> +    case INDEX_op_sar_i64:
>> +        return (int64_t)x >> (int64_t)y;
>> +#endif
>> +
>> +    case INDEX_op_rotr_i32:
>> +#if TCG_TARGET_REG_BITS == 64
>> +        x &= 0xffffffff;
>> +        y &= 0xffffffff;
>> +#endif
>> +        x = (x << (32 - y)) | (x >> y);
>> +        return x;
>> +
>> +#if TCG_TARGET_REG_BITS == 64
>> +    case INDEX_op_rotr_i64:
>> +        x = (x << (64 - y)) | (x >> y);
>> +        return x;
>> +#endif
>> +
>> +    case INDEX_op_rotl_i32:
>> +#if TCG_TARGET_REG_BITS == 64
>> +        x &= 0xffffffff;
>> +        y &= 0xffffffff;
>> +#endif
>> +        x = (x << y) | (x >> (32 - y));
>> +        return x;
>> +
>> +#if TCG_TARGET_REG_BITS == 64
>> +    case INDEX_op_rotl_i64:
>> +        x = (x << y) | (x >> (64 - y));
>> +        return x;
>> +#endif
>> +
>>     default:
>>         fprintf(stderr,
>>                 "Unrecognized operation %d in do_constant_folding.\n", op);
>> @@ -278,6 +340,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
>>         switch (op) {
>>         CASE_OP_32_64(add):
>>         CASE_OP_32_64(sub):
>> +        CASE_OP_32_64(shl):
>> +        CASE_OP_32_64(shr):
>> +        CASE_OP_32_64(sar):
>> +        CASE_OP_32_64(rotl):
>> +        CASE_OP_32_64(rotr):
>>             if (temps[args[1]].state == TCG_TEMP_CONST) {
>>                 /* Proceed with possible constant folding. */
>>                 break;
>> @@ -363,6 +430,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
>>         CASE_OP_32_64(or):
>>         CASE_OP_32_64(and):
>>         CASE_OP_32_64(xor):
>> +        CASE_OP_32_64(shl):
>> +        CASE_OP_32_64(shr):
>> +        CASE_OP_32_64(sar):
>> +        CASE_OP_32_64(rotl):
>> +        CASE_OP_32_64(rotr):
>>             if (temps[args[1]].state == TCG_TEMP_CONST
>>                 && temps[args[2]].state == TCG_TEMP_CONST) {
>>                 gen_opc_buf[op_index] = op_to_movi(op);
>> --
>> 1.7.4.1
>>
>>
>>
>
diff mbox

Patch

diff --git a/tcg/optimize.c b/tcg/optimize.c
index c469952..a1bb287 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -102,6 +102,11 @@  static int op_bits(int op)
     case INDEX_op_and_i32:
     case INDEX_op_or_i32:
     case INDEX_op_xor_i32:
+    case INDEX_op_shl_i32:
+    case INDEX_op_shr_i32:
+    case INDEX_op_sar_i32:
+    case INDEX_op_rotl_i32:
+    case INDEX_op_rotr_i32:
         return 32;
 #if TCG_TARGET_REG_BITS == 64
     case INDEX_op_mov_i64:
@@ -111,6 +116,11 @@  static int op_bits(int op)
     case INDEX_op_and_i64:
     case INDEX_op_or_i64:
     case INDEX_op_xor_i64:
+    case INDEX_op_shl_i64:
+    case INDEX_op_shr_i64:
+    case INDEX_op_sar_i64:
+    case INDEX_op_rotl_i64:
+    case INDEX_op_rotr_i64:
         return 64;
 #endif
     default:
@@ -205,6 +215,58 @@  static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
     CASE_OP_32_64(xor):
         return x ^ y;
 
+    case INDEX_op_shl_i32:
+        return (uint32_t)x << (uint32_t)y;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_shl_i64:
+        return (uint64_t)x << (uint64_t)y;
+#endif
+
+    case INDEX_op_shr_i32:
+        return (uint32_t)x >> (uint32_t)y;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_shr_i64:
+        return (uint64_t)x >> (uint64_t)y;
+#endif
+
+    case INDEX_op_sar_i32:
+        return (int32_t)x >> (int32_t)y;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_sar_i64:
+        return (int64_t)x >> (int64_t)y;
+#endif
+
+    case INDEX_op_rotr_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+#endif
+        x = (x << (32 - y)) | (x >> y);
+        return x;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_rotr_i64:
+        x = (x << (64 - y)) | (x >> y);
+        return x;
+#endif
+
+    case INDEX_op_rotl_i32:
+#if TCG_TARGET_REG_BITS == 64
+        x &= 0xffffffff;
+        y &= 0xffffffff;
+#endif
+        x = (x << y) | (x >> (32 - y));
+        return x;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_rotl_i64:
+        x = (x << y) | (x >> (64 - y));
+        return x;
+#endif
+
     default:
         fprintf(stderr,
                 "Unrecognized operation %d in do_constant_folding.\n", op);
@@ -278,6 +340,11 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
         switch (op) {
         CASE_OP_32_64(add):
         CASE_OP_32_64(sub):
+        CASE_OP_32_64(shl):
+        CASE_OP_32_64(shr):
+        CASE_OP_32_64(sar):
+        CASE_OP_32_64(rotl):
+        CASE_OP_32_64(rotr):
             if (temps[args[1]].state == TCG_TEMP_CONST) {
                 /* Proceed with possible constant folding. */
                 break;
@@ -363,6 +430,11 @@  static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
         CASE_OP_32_64(or):
         CASE_OP_32_64(and):
         CASE_OP_32_64(xor):
+        CASE_OP_32_64(shl):
+        CASE_OP_32_64(shr):
+        CASE_OP_32_64(sar):
+        CASE_OP_32_64(rotl):
+        CASE_OP_32_64(rotr):
             if (temps[args[1]].state == TCG_TEMP_CONST
                 && temps[args[2]].state == TCG_TEMP_CONST) {
                 gen_opc_buf[op_index] = op_to_movi(op);