Message ID | 4523606e-bf4b-5c44-27e9-7e825ee86e37@redhat.com |
---|---|
State | New |
Headers | show |
On Tue, Feb 14, 2017 at 7:53 AM, Jeff Law <law@redhat.com> wrote: > > This is the first patch in the series with Richi's comments from last week > addressed. #2, #3 and #4 were unchanged. > > Richi asked for the EXACT_DIV_EXPR handling in > extract_range_from_binary_exit_1 to move out one IF conditional nesting > level. > > Richi noted that the use of symbolic_range_based_on_p was unsafe in the > context I used it in extract_range-from_binary_expr, and that the test we > want to make happens to be simpler as well. > > And finally we use Richi's heuristic for when to prefer ~[0,0] over a wide > normal range. > > Bootstrapped and regression tested with the other 3 patches in this kit on > x86_64-unknown-linux-gnu. > > All 4 patches are attached to this message for ease of review. > > > Ok for the trunk? Ok. Thanks, Richard. > Thanks, > jeff > > * tree-vrp.c (extract_range_from_binary_expr_1): For EXACT_DIV_EXPR, > if the numerator has the range ~[0,0] make the resultant range > ~[0,0]. > (extract_range_from_binary_expr): For MINUS_EXPR with no derived > range, > if the operands are known to be not equal, then the resulting range > is ~[0,0]. > (intersect_ranges): If the new range is ~[0,0] and the old range is > wide, then prefer ~[0,0]. > > diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c > index b429217..9174948 100644 > --- a/gcc/tree-vrp.c > +++ b/gcc/tree-vrp.c > @@ -2259,6 +2259,19 @@ extract_range_from_binary_expr_1 (value_range *vr, > else if (vr1.type == VR_UNDEFINED) > set_value_range_to_varying (&vr1); > > + /* We get imprecise results from ranges_from_anti_range when > + code is EXACT_DIV_EXPR. We could mask out bits in the resulting > + range, but then we also need to hack up vrp_meet. It's just > + easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ > + if (code == EXACT_DIV_EXPR > + && vr0.type == VR_ANTI_RANGE > + && vr0.min == vr0.max > + && integer_zerop (vr0.min)) > + { > + set_value_range_to_nonnull (vr, expr_type); > + return; > + } > + > /* Now canonicalize anti-ranges to ranges when they are not symbolic > and express ~[] op X as ([]' op X) U ([]'' op X). */ > if (vr0.type == VR_ANTI_RANGE > @@ -3298,6 +3311,21 @@ extract_range_from_binary_expr (value_range *vr, > > extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); > } > + > + /* If we didn't derive a range for MINUS_EXPR, and > + op1's range is ~[op0,op0] or vice-versa, then we > + can derive a non-null range. This happens often for > + pointer subtraction. */ > + if (vr->type == VR_VARYING > + && code == MINUS_EXPR > + && TREE_CODE (op0) == SSA_NAME > + && ((vr0.type == VR_ANTI_RANGE > + && vr0.min == op1 > + && vr0.min == vr0.max) > + || (vr1.type == VR_ANTI_RANGE > + && vr1.min == op0 > + && vr1.min == vr1.max))) > + set_value_range_to_nonnull (vr, TREE_TYPE (op0)); > } > > /* Extract range information from a unary operation CODE based on > @@ -8620,6 +8648,17 @@ intersect_ranges (enum value_range_type *vr0type, > else if (vrp_val_is_min (vr1min) > && vrp_val_is_max (vr1max)) > ; > + /* Choose the anti-range if it is ~[0,0], that range is special > + enough to special case when vr1's range is relatively wide. */ > + else if (*vr0min == *vr0max > + && integer_zerop (*vr0min) > + && (TYPE_PRECISION (TREE_TYPE (*vr0min)) > + == TYPE_PRECISION (ptr_type_node)) > + && TREE_CODE (vr1max) == INTEGER_CST > + && TREE_CODE (vr1min) == INTEGER_CST > + && (wi::clz (wi::sub (vr1max, vr1min)) > + < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2)) > + ; > /* Else choose the range. */ > else > { > > * tree-vrp.c (overflow_comparison_p_1): New function. > (overflow_comparison_p): New function. > > diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c > index ad8173c..2c03a74 100644 > --- a/gcc/tree-vrp.c > +++ b/gcc/tree-vrp.c > @@ -5186,6 +5186,118 @@ masked_increment (const wide_int &val_in, const > wide_int &mask, > return val ^ sgnbit; > } > > +/* Helper for overflow_comparison_p > + > + OP0 CODE OP1 is a comparison. Examine the comparison and potentially > + OP1's defining statement to see if it ultimately has the form > + OP0 CODE (OP0 PLUS INTEGER_CST) > + > + If so, return TRUE indicating this is an overflow test and store into > + *NEW_CST an updated constant that can be used in a narrowed range test. > + > + REVERSED indicates if the comparison was originally: > + > + OP1 CODE' OP0. > + > + This affects how we build the updated constant. */ > + > +static bool > +overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1, > + bool follow_assert_exprs, bool reversed, tree > *new_cst) > +{ > + /* See if this is a relational operation between two SSA_NAMES with > + unsigned, overflow wrapping values. If so, check it more deeply. */ > + if ((code == LT_EXPR || code == LE_EXPR > + || code == GE_EXPR || code == GT_EXPR) > + && TREE_CODE (op0) == SSA_NAME > + && TREE_CODE (op1) == SSA_NAME > + && INTEGRAL_TYPE_P (TREE_TYPE (op0)) > + && TYPE_UNSIGNED (TREE_TYPE (op0)) > + && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))) > + { > + gimple *op1_def = SSA_NAME_DEF_STMT (op1); > + > + /* If requested, follow any ASSERT_EXPRs backwards for OP1. */ > + if (follow_assert_exprs) > + { > + while (gimple_assign_single_p (op1_def) > + && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR) > + { > + op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0); > + if (TREE_CODE (op1) != SSA_NAME) > + break; > + op1_def = SSA_NAME_DEF_STMT (op1); > + } > + } > + > + /* Now look at the defining statement of OP1 to see if it adds > + or subtracts a nonzero constant from another operand. */ > + if (op1_def > + && is_gimple_assign (op1_def) > + && gimple_assign_rhs_code (op1_def) == PLUS_EXPR > + && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST > + && !integer_zerop (gimple_assign_rhs2 (op1_def))) > + { > + tree target = gimple_assign_rhs1 (op1_def); > + > + /* If requested, follow ASSERT_EXPRs backwards for op0 looking > + for one where TARGET appears on the RHS. */ > + if (follow_assert_exprs) > + { > + /* Now see if that "other operand" is op0, following the chain > + of ASSERT_EXPRs if necessary. */ > + gimple *op0_def = SSA_NAME_DEF_STMT (op0); > + while (op0 != target > + && gimple_assign_single_p (op0_def) > + && TREE_CODE (gimple_assign_rhs1 (op0_def)) == > ASSERT_EXPR) > + { > + op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0); > + if (TREE_CODE (op0) != SSA_NAME) > + break; > + op0_def = SSA_NAME_DEF_STMT (op0); > + } > + } > + > + /* If we did not find our target SSA_NAME, then this is not > + an overflow test. */ > + if (op0 != target) > + return false; > + > + tree type = TREE_TYPE (op0); > + wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED); > + tree inc = gimple_assign_rhs2 (op1_def); > + if (reversed) > + *new_cst = wide_int_to_tree (type, max + inc); > + else > + *new_cst = wide_int_to_tree (type, max - inc); > + return true; > + } > + } > + return false; > +} > + > +/* OP0 CODE OP1 is a comparison. Examine the comparison and potentially > + OP1's defining statement to see if it ultimately has the form > + OP0 CODE (OP0 PLUS INTEGER_CST) > + > + If so, return TRUE indicating this is an overflow test and store into > + *NEW_CST an updated constant that can be used in a narrowed range test. > + > + These statements are left as-is in the IL to facilitate discovery of > + {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But > + the alternate range representation is often useful within VRP. */ > + > +static bool > +overflow_comparison_p (tree_code code, tree name, tree val, > + bool use_equiv_p, tree *new_cst) > +{ > + if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, > new_cst)) > + return true; > + return overflow_comparison_p_1 (swap_tree_comparison (code), val, name, > + use_equiv_p, true, new_cst); > +} > + > + > /* Try to register an edge assertion for SSA name NAME on edge E for > the condition COND contributing to the conditional jump pointed to by > BSI. > Invert the condition COND if INVERT is true. */ > > * tree-vrp.c (register_edge_assert_for_2): Register additional > asserts > if NAME is used in an overflow test. > (vrp_evaluate_conditional_warnv_with_ops): If the ops represent an > overflow check that can be expressed as an equality test, then > adjust > ops to be that equality test. > > diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c > index 2c03a74..21c459c 100644 > --- a/gcc/tree-vrp.c > +++ b/gcc/tree-vrp.c > @@ -5319,7 +5319,17 @@ register_edge_assert_for_2 (tree name, edge e, > gimple_stmt_iterator bsi, > /* Only register an ASSERT_EXPR if NAME was found in the sub-graph > reachable from E. */ > if (live_on_edge (e, name)) > - register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); > + { > + tree x; > + if (overflow_comparison_p (comp_code, name, val, false, &x)) > + { > + enum tree_code new_code > + = ((comp_code == GT_EXPR || comp_code == GE_EXPR) > + ? GT_EXPR : LE_EXPR); > + register_new_assert_for (name, name, new_code, x, NULL, e, bsi); > + } > + register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); > + } > > /* In the case of NAME <= CST and NAME being defined as > NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 > @@ -7678,6 +7688,39 @@ vrp_evaluate_conditional_warnv_with_ops (enum > tree_code code, tree op0, > && !POINTER_TYPE_P (TREE_TYPE (op0))) > return NULL_TREE; > > + /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed > + as a simple equality test, then prefer that over its current form > + for evaluation. > + > + An overflow test which collapses to an equality test can always be > + expressed as a comparison of one argument against zero. Overflow > + occurs when the chosen argument is zero and does not occur if the > + chosen argument is not zero. */ > + tree x; > + if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x)) > + { > + wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), > UNSIGNED); > + /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0) > + B = A - 1; if (A > B) -> B = A - 1; if (A != 0) > + B = A + 1; if (B < A) -> B = A + 1; if (B == 0) > + B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */ > + if (integer_zerop (x)) > + { > + op1 = x; > + code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR; > + } > + /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0) > + B = A + 1; if (A < B) -> B = A + 1; if (B != 0) > + B = A - 1; if (B > A) -> B = A - 1; if (A == 0) > + B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */ > + else if (wi::eq_p (x, max - 1)) > + { > + op0 = op1; > + op1 = wide_int_to_tree (TREE_TYPE (op0), 0); > + code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR; > + } > + } > + > if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges > (code, op0, op1, strict_overflow_p))) > return ret; > > * g++.dg/pr79095-1.C: New test > * g++.dg/pr79095-2.C: New test > * g++.dg/pr79095-3.C: New test > * g++.dg/pr79095-4.C: New test > * g++.dg/pr79095-5.C: New test > * gcc.c-torture/execute/arith-1.c: Update with more cases. > * gcc.dg/tree-ssa/pr79095-1.c: New test. > > diff --git a/gcc/testsuite/g++.dg/pr79095-1.C > b/gcc/testsuite/g++.dg/pr79095-1.C > new file mode 100644 > index 0000000..4b8043c > --- /dev/null > +++ b/gcc/testsuite/g++.dg/pr79095-1.C > @@ -0,0 +1,40 @@ > +/* { dg-do compile } */ > +/* { dg-options "-Wall -O3" } */ > + > +typedef long unsigned int size_t; > + > +inline void > +fill (int *p, size_t n, int) > +{ > + while (n--) > + *p++ = 0; > +} > + > +struct B > +{ > + int* p0, *p1, *p2; > + > + size_t size () const { > + return size_t (p1 - p0); > + } > + > + void resize (size_t n) { > + if (n > size()) > + append (n - size()); > + } > + > + void append (size_t n) > + { > + if (size_t (p2 - p1) >= n) { > + fill (p1, n, 0); > + } > + } > +}; > + > +void foo (B &b) > +{ > + if (b.size () != 0) > + b.resize (b.size () - 1); > +} > + > + > diff --git a/gcc/testsuite/g++.dg/pr79095-2.C > b/gcc/testsuite/g++.dg/pr79095-2.C > new file mode 100644 > index 0000000..9dabc7e > --- /dev/null > +++ b/gcc/testsuite/g++.dg/pr79095-2.C > @@ -0,0 +1,46 @@ > +/* { dg-do compile } */ > +/* { dg-options "-Wall -O3" } */ > + > +typedef long unsigned int size_t; > + > +inline void > +fill (int *p, size_t n, int) > +{ > + while (n--) > + *p++ = 0; > +} > + > +struct B > +{ > + int* p0, *p1, *p2; > + > + size_t size () const { > + return size_t (p1 - p0); > + } > + > + void resize (size_t n) { > + if (n > size()) > + append (n - size()); > + } > + > + void append (size_t n) > + { > + if (size_t (p2 - p1) >= n) { > + fill (p1, n, 0); > + } > + } > +}; > + > +void foo (B &b) > +{ > + b.resize (b.size () - 1); > +} > + > +/* If b.size() == 0, then the argument to b.resize is -1U (it overflowed). > + This will result calling "fill" which turns into a memset with a bogus > + length argument. We want to make sure we warn, which multiple > + things. First the ldist pass converted the loop into a memset, > + cprop and simplifications made the length a constant and the static > + analysis pass determines it's a bogus size to pass to memset. */ > +/* { dg-warning "exceeds maximum object size" "" { target *-*-* } 0 } */ > + > diff --git a/gcc/testsuite/g++.dg/pr79095-3.C > b/gcc/testsuite/g++.dg/pr79095-3.C > new file mode 100644 > index 0000000..28c8a37 > --- /dev/null > +++ b/gcc/testsuite/g++.dg/pr79095-3.C > @@ -0,0 +1,17 @@ > +/* { dg-do compile } */ > +/* { dg-options "-Wall -O3" } */ > + > +#include <vector> > + > +void foo(std::vector<unsigned int> &v); > + > +void vtest() > +{ > + std::vector<unsigned int> v; > + foo (v); > + if (v.size() > 0) > + { > + v.resize (v.size()-1); > + } > +} > + > diff --git a/gcc/testsuite/g++.dg/pr79095-4.C > b/gcc/testsuite/g++.dg/pr79095-4.C > new file mode 100644 > index 0000000..df55025 > --- /dev/null > +++ b/gcc/testsuite/g++.dg/pr79095-4.C > @@ -0,0 +1,26 @@ > +/* { dg-do compile } */ > +/* { dg-options "-Wall -O3 -fdump-tree-vrp2" } */ > + > +#include <vector> > + > +void foo(std::vector<unsigned int> &v); > + > +void vtest() > +{ > + std::vector<unsigned int> v; > + foo (v); > + { > + v.resize (v.size()-1); > + } > +} > + > +/* As written this testcase should trigger a warning. We overflow to -1U > + if v.size() == 0 in foo(). This results in bogus calls to memset. > + > + The number of clearing loops in the IL can vary depending on the C++ > + mode used for the test. But by the end of VRP2, there should be a > single > + clearing loop left and it should be using memcpy. */ > +/* { dg-final { scan-tree-dump-times "__builtin_memset \\(_\[0-9\]+, 0, > \[0-9\]+\\)" 1 "vrp2" } } */ > + > +/* And that call should trigger a warning. */ > +/* { dg-warning "exceeds maximum object size" "" { target *-*-* } 0 } */ > diff --git a/gcc/testsuite/g++.dg/pr79095-5.C > b/gcc/testsuite/g++.dg/pr79095-5.C > new file mode 100644 > index 0000000..266f4e9 > --- /dev/null > +++ b/gcc/testsuite/g++.dg/pr79095-5.C > @@ -0,0 +1,34 @@ > +/* { dg-do compile } */ > +/* { dg-options "-Wall -O3" } */ > + > +typedef __SIZE_TYPE__ size_t; > + > +struct S { > + int *p0, *p1, *p2; > + > + size_t size () const { return p1 - p0; } > + > + void f (size_t n) { > + if (n > size ()) // can't happen because > + foo (n - size ()); // n is in [1, MIN(size() - 1, 3)] > + else if (n < size ()) > + bar (p0 + n); > + } > + > + void foo (size_t n) > + { > + size_t left = (size_t)(p2 - p1); > + if (left >= n) > + __builtin_memset (p2, 0, n * sizeof *p2); // { dg-bogus "maximum > object size" } > + > + } > + > + void bar (int*); > +}; > + > +void f (S &s) > +{ > + size_t n = s.size (); > + if (n > 1 && n < 5) > + s.f (n - 1); > +} > diff --git a/gcc/testsuite/gcc.c-torture/execute/arith-1.c > b/gcc/testsuite/gcc.c-torture/execute/arith-1.c > index 58df322..6168d77 100644 > --- a/gcc/testsuite/gcc.c-torture/execute/arith-1.c > +++ b/gcc/testsuite/gcc.c-torture/execute/arith-1.c > @@ -7,9 +7,41 @@ sat_add (unsigned i) > return ret; > } > > +unsigned > +sat_add2 (unsigned i) > +{ > + unsigned ret = i + 1; > + if (ret > i) > + return ret; > + return i; > +} > + > +unsigned > +sat_add3 (unsigned i) > +{ > + unsigned ret = i - 1; > + if (ret > i) > + ret = i; > + return ret; > +} > + > +unsigned > +sat_add4 (unsigned i) > +{ > + unsigned ret = i - 1; > + if (ret < i) > + return ret; > + return i; > +} > main () > { > if (sat_add (~0U) != ~0U) > abort (); > + if (sat_add2 (~0U) != ~0U) > + abort (); > + if (sat_add3 (0U) != 0U) > + abort (); > + if (sat_add4 (0U) != 0U) > + abort (); > exit (0); > } > diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr79095.c > b/gcc/testsuite/gcc.dg/tree-ssa/pr79095.c > new file mode 100644 > index 0000000..f635fca > --- /dev/null > +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr79095.c > @@ -0,0 +1,436 @@ > +/* { dg-do compile } */ > +/* { dg-options "-O2 -fno-ipa-icf -fdump-tree-vrp1" } */ > + > +extern void arf (unsigned x, unsigned y); > +extern void baz (unsigned x, unsigned y); > + > +unsigned > +f1 (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (b < a) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > + > +unsigned > +f1r (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (a < b) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f1n (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(b < a)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f1nr (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(a < b)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > + > +unsigned > +f1o (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (a < b) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f1ro (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (b < a) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f1no (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(a < b)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f1nro (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(b < a)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > + > +unsigned > +f2 (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (b <= a) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f2r (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (a <= b) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f2n (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(b <= a)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f2nr (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(a <= b)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > + > +unsigned > +f2o (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (a <= b) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f2ro (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (b <= a) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f2no (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(a <= b)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f2nro (unsigned a, unsigned b) > +{ > + b = a + 1; > + if (!(b <= a)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > + > +unsigned > +f3 (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (b < a) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f3r (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (a < b) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f3n (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(b < a)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f3nr (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(a < b)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > + > +unsigned > +f3o (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (a < b) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f3ro (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (b < a) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f3no (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(a < b)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f3nro (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(b < a)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > + > +unsigned > +f4 (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (b <= a) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f4r (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (a <= b) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f4n (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(b <= a)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f4nr (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(a <= b)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > + > +unsigned > +f4o (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (a <= b) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +unsigned > +f4ro (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (b <= a) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f4no (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(a <= b)) > + { > + baz (a, b); > + return 42; > + } > + arf (a, b); > + return b; > +} > + > +unsigned > +f4nro (unsigned a, unsigned b) > +{ > + b = a - 1; > + if (!(b <= a)) > + { > + arf (a, b); > + return 42; > + } > + baz (a, b); > + return b; > +} > + > +/* All calls to baz should still reference a & b as arguments. */ > +/* { dg-final { scan-tree-dump-times "baz \\(a_\[0-9\]+\\(D\\), > b_\[0-9\]+\\)" 32 "vrp1"} } */ > + > + > +/* All calls to arf should have constant arguments. */ > +/* { dg-final { scan-tree-dump-times "arf \\(\[0-9\]+, \[0-9\]+\\)" 32 > "vrp1"} } */ >
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index b429217..9174948 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -2259,6 +2259,19 @@ extract_range_from_binary_expr_1 (value_range *vr, else if (vr1.type == VR_UNDEFINED) set_value_range_to_varying (&vr1); + /* We get imprecise results from ranges_from_anti_range when + code is EXACT_DIV_EXPR. We could mask out bits in the resulting + range, but then we also need to hack up vrp_meet. It's just + easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */ + if (code == EXACT_DIV_EXPR + && vr0.type == VR_ANTI_RANGE + && vr0.min == vr0.max + && integer_zerop (vr0.min)) + { + set_value_range_to_nonnull (vr, expr_type); + return; + } + /* Now canonicalize anti-ranges to ranges when they are not symbolic and express ~[] op X as ([]' op X) U ([]'' op X). */ if (vr0.type == VR_ANTI_RANGE @@ -3298,6 +3311,21 @@ extract_range_from_binary_expr (value_range *vr, extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1); } + + /* If we didn't derive a range for MINUS_EXPR, and + op1's range is ~[op0,op0] or vice-versa, then we + can derive a non-null range. This happens often for + pointer subtraction. */ + if (vr->type == VR_VARYING + && code == MINUS_EXPR + && TREE_CODE (op0) == SSA_NAME + && ((vr0.type == VR_ANTI_RANGE + && vr0.min == op1 + && vr0.min == vr0.max) + || (vr1.type == VR_ANTI_RANGE + && vr1.min == op0 + && vr1.min == vr1.max))) + set_value_range_to_nonnull (vr, TREE_TYPE (op0)); } /* Extract range information from a unary operation CODE based on @@ -8620,6 +8648,17 @@ intersect_ranges (enum value_range_type *vr0type, else if (vrp_val_is_min (vr1min) && vrp_val_is_max (vr1max)) ; + /* Choose the anti-range if it is ~[0,0], that range is special + enough to special case when vr1's range is relatively wide. */ + else if (*vr0min == *vr0max + && integer_zerop (*vr0min) + && (TYPE_PRECISION (TREE_TYPE (*vr0min)) + == TYPE_PRECISION (ptr_type_node)) + && TREE_CODE (vr1max) == INTEGER_CST + && TREE_CODE (vr1min) == INTEGER_CST + && (wi::clz (wi::sub (vr1max, vr1min)) + < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2)) + ; /* Else choose the range. */ else {