@@ -95,6 +95,11 @@ static int op_bits(int op)
case INDEX_op_and_i32:
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotr_i32:
return 32;
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_mov_i64:
@@ -104,6 +109,11 @@ static int op_bits(int op)
case INDEX_op_and_i64:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i64:
return 64;
#endif
default:
@@ -177,6 +187,62 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
#endif
return x ^ y;
+ case INDEX_op_shl_i32:
+#if TCG_TARGET_REG_BITS == 64
+ y &= 0xffffffff;
+ case INDEX_op_shl_i64:
+#endif
+ return x << y;
+
+ case INDEX_op_shr_i32:
+#if TCG_TARGET_REG_BITS == 64
+ x &= 0xffffffff;
+ y &= 0xffffffff;
+ case INDEX_op_shr_i64:
+#endif
+ /* Assuming TCGArg to be unsigned */
+ return x >> y;
+
+ case INDEX_op_sar_i32:
+#if TCG_TARGET_REG_BITS == 64
+ x &= 0xffffffff;
+ y &= 0xffffffff;
+#endif
+ return (int32_t)x >> (int32_t)y;
+
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_sar_i64:
+ return (int64_t)x >> (int64_t)y;
+#endif
+
+ case INDEX_op_rotr_i32:
+#if TCG_TARGET_REG_BITS == 64
+ x &= 0xffffffff;
+ y &= 0xffffffff;
+#endif
+ x = (x << (32 - y)) | (x >> y);
+ return x;
+
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_rotr_i64:
+ x = (x << (64 - y)) | (x >> y);
+ return x;
+#endif
+
+ case INDEX_op_rotl_i32:
+#if TCG_TARGET_REG_BITS == 64
+ x &= 0xffffffff;
+ y &= 0xffffffff;
+#endif
+ x = (x << y) | (x >> (32 - y));
+ return x;
+
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_rotl_i64:
+ x = (x << y) | (x >> (64 - y));
+ return x;
+#endif
+
default:
fprintf(stderr,
"Unrecognized operation %d in do_constant_folding.\n", op);
@@ -246,8 +312,18 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
}
/* Fallthrough */
case INDEX_op_sub_i32:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotr_i32:
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_sub_i64:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i64:
#endif
if (temps[args[1]].state == TCG_TEMP_CONST) {
/* Proceed with possible constant folding. */
@@ -377,6 +453,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
case INDEX_op_add_i32:
case INDEX_op_sub_i32:
case INDEX_op_mul_i32:
+ case INDEX_op_shl_i32:
+ case INDEX_op_shr_i32:
+ case INDEX_op_sar_i32:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotr_i32:
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_and_i64:
case INDEX_op_or_i64:
@@ -384,6 +465,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
case INDEX_op_add_i64:
case INDEX_op_sub_i64:
case INDEX_op_mul_i64:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i64:
#endif
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[2]].state == TCG_TEMP_CONST) {
Perform constant forlding for SHR, SHL, SAR, ROTR, ROTL operations. Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru> --- tcg/optimize.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 86 insertions(+), 0 deletions(-)