@@ -2659,6 +2659,29 @@ operator_bitwise_and::fold_range (irange &r, tree type,
}
+// Optimize BIT_AND_EXPR, BIT_IOR_EXPR and BIT_XOR_EXPR of signed types
+// by considering the number of leading redundant sign bit copies.
+// clrsb (X op Y) = min (clrsb (X), clrsb (Y)), so for example
+// [-1, 0] op [-1, 0] is [-1, 0] (where nonzero_bits doesn't help).
+static bool
+wi_optimize_signed_bitwise_op (irange &r, tree type,
+ const wide_int &lh_lb, const wide_int &lh_ub,
+ const wide_int &rh_lb, const wide_int &rh_ub)
+{
+ int lh_clrsb = MIN (wi::clrsb (lh_lb), wi::clrsb (lh_ub));
+ int rh_clrsb = MIN (wi::clrsb (rh_lb), wi::clrsb (rh_ub));
+ int new_clrsb = MIN (lh_clrsb, rh_clrsb);
+ if (new_clrsb == 0)
+ return false;
+ int type_prec = TYPE_PRECISION (type);
+ int rprec = (type_prec - new_clrsb) - 1;
+ value_range_with_overflow (r, type,
+ wi::mask (rprec, true, type_prec),
+ wi::mask (rprec, false, type_prec));
+ return true;
+}
+
+
// Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
// possible. Basically, see if we can optimize:
//
@@ -2839,7 +2862,14 @@ operator_bitwise_and::wi_fold (irange &r, tree type,
}
// If the limits got swapped around, return varying.
if (wi::gt_p (new_lb, new_ub,sign))
- r.set_varying (type);
+ {
+ if (sign == SIGNED
+ && wi_optimize_signed_bitwise_op (r, type,
+ lh_lb, lh_ub,
+ rh_lb, rh_ub))
+ return;
+ r.set_varying (type);
+ }
else
value_range_with_overflow (r, type, new_lb, new_ub);
}
@@ -3093,6 +3123,11 @@ operator_bitwise_or::wi_fold (irange &r, tree type,
|| wi::lt_p (lh_ub, 0, sign)
|| wi::lt_p (rh_ub, 0, sign))
r.set_nonzero (type);
+ else if (sign == SIGNED
+ && wi_optimize_signed_bitwise_op (r, type,
+ lh_lb, lh_ub,
+ rh_lb, rh_ub))
+ return;
else
r.set_varying (type);
return;
@@ -3180,8 +3215,23 @@ operator_bitwise_xor::wi_fold (irange &r, tree type,
// is better than VARYING.
if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
value_range_with_overflow (r, type, new_lb, new_ub);
+ else if (sign == SIGNED
+ && wi_optimize_signed_bitwise_op (r, type,
+ lh_lb, lh_ub,
+ rh_lb, rh_ub))
+ ; /* Do nothing. */
else
r.set_varying (type);
+
+ /* Furthermore, XOR is non-zero if its arguments can't be equal. */
+ if (wi::lt_p (lh_ub, rh_lb, sign)
+ || wi::lt_p (rh_ub, lh_lb, sign)
+ || wi::ne_p (result_one_bits, 0))
+ {
+ int_range<2> tmp;
+ tmp.set_nonzero (type);
+ r.intersect (tmp);
+ }
}
bool
new file mode 100644
@@ -0,0 +1,21 @@
+/* { dg-do link } */
+/* { dg-options "-O2" } */
+
+extern void link_error(void);
+
+static char a;
+static short d(unsigned e) {
+ char b;
+ short c;
+ a = b = e;
+ if (b)
+ return 0;
+ if (1 >= e) {
+ c = e == 0;
+ if (c)
+ link_error();
+ }
+ return 0;
+}
+int main() { d(a ^ 233); }
+
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-evrp" }*/
+
+typedef __INT32_TYPE__ int32_t;
+
+int32_t and(int32_t x, int32_t y)
+{
+ int32_t tx = x >> 24;
+ int32_t ty = y >> 24;
+ int32_t t = tx & ty;
+ return t;
+}
+
+int32_t ior(int32_t x, int32_t y)
+{
+ int32_t tx = x >> 24;
+ int32_t ty = y >> 24;
+ int32_t t = tx | ty;
+ return t;
+}
+
+int32_t xor(int32_t x, int32_t y)
+{
+ int32_t tx = x >> 24;
+ int32_t ty = y >> 24;
+ int32_t t = tx ^ ty;
+ return t;
+}
+
+/* { dg-final { scan-tree-dump-times "\\\[-128, 127\\\]" 9 "evrp" } } */