Message ID | CAEwic4bPxnFK6ezMgQw-k3JHqhBM72+GBA7vO995MviRTe4R4Q@mail.gmail.com |
---|---|
State | New |
Headers | show |
On Fri, Jul 15, 2011 at 9:42 AM, Kai Tietz <ktietz70@googlemail.com> wrote: > Hello, > > This patch removes from tree-vrp the use of TRUTH-bitwise expression codes. Also > it merges the handling for boolean compatible and non-boolean typed > bitwise-binary > expressions. > Additional it adds primitive checks for bitwise-not expression on > boolean-compatible > types. > In substitute_and_fold the scan-direction of statements within a BB is > controlled now > by its do_dce flag. This provides better results in vrp-pass. > > ChangeLog gcc > > 2011-07-15 Kai Tietz <ktietz@redhat.com> > > * tree-ssa-propagate.c (substitute_and_fold): Use > do_dce flag to deside, if BB's statements are scanned > in last to first, or first to last order. > * tree-vrp.c (extract_range_from_binary_expr): > Remove TRUTH-binary checks. And unify bitwise-binary > cases. > (register_edge_assert_for_1): Add handling boolean-compatible > typed BIT_IOR_EXPR and BIT_NOT_EXPR. > (extract_range_from_unary_expr): Add support for 1-bit > integral typed BIT_NOT_EXPR expression. > (extract_range_from_assignment): Remove TRUTH-binary checks. > Add handling for 1-bit integral typed BIT_NOT_EXPR expression. > (build_assert_expr_for): Likewise. > (register_edge_assert_for_1): Likewise. > (simplify_stmt_using_ranges): Likewise. > (ssa_name_get_inner_ssa_name_p): New helper function. > (ssa_name_get_cast_to_p): New helper function. > (simplify_truth_ops_using_ranges): Handle prefixed > cast instruction for result. Remove TRUTH-binary checks. > Add handling for 1-bit integral typed BIT_NOT_EXPR expression. > and BIT_NOT_EXPR. > Add handling for one bit > > ChangeLog gcc/testsuite > > 2011-07-15 Kai Tietz <ktietz@redhat.com> > > * gcc.dg/tree-ssa/vrp47.c: Test no longer needs > dom dump. > > Bootstrapped and regression tested for all standard languages (plus > Ada & Obj-C++) on x86_64-pc-linux-gnu. Ok for apply? > > Regards, > Kai > > Index: gcc/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c > =================================================================== > --- gcc.orig/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c 2011-07-13 > 12:57:46.869620200 +0200 > +++ gcc/gcc/testsuite/gcc.dg/tree-ssa/vrp47.c 2011-07-13 > 22:29:53.221967000 +0200 > @@ -4,7 +4,7 @@ > jumps when evaluating an && condition. VRP is not able to optimize > this. */ > /* { dg-do compile { target { ! "mips*-*-* s390*-*-* avr-*-* > mn10300-*-*" } } } */ > -/* { dg-options "-O2 -fdump-tree-vrp -fdump-tree-dom" } */ > +/* { dg-options "-O2 -fdump-tree-vrp" } */ > /* { dg-options "-O2 -fdump-tree-vrp -fdump-tree-dom -march=i586" { > target { i?86-*-* && ilp32 } } } */ > > int h(int x, int y) > @@ -36,13 +36,10 @@ int f(int x) > 0 or 1. */ > /* { dg-final { scan-tree-dump-times "\[xy\]\[^ \]* !=" 0 "vrp1" } } */ > > -/* This one needs more copy propagation that only happens in dom1. */ > -/* { dg-final { scan-tree-dump-times "x\[^ \]* & y" 1 "dom1" } } */ > -/* { dg-final { scan-tree-dump-times "x\[^ \]* & y" 1 "vrp1" { xfail > *-*-* } } } */ > +/* { dg-final { scan-tree-dump-times "x\[^ \]* & y" 1 "vrp1" } } */ > > /* These two are fully simplified by VRP. */ > /* { dg-final { scan-tree-dump-times "x\[^ \]* \[|\] y" 1 "vrp1" } } */ > /* { dg-final { scan-tree-dump-times "x\[^ \]* \\^ 1" 1 "vrp1" } } */ > > /* { dg-final { cleanup-tree-dump "vrp\[0-9\]" } } */ > -/* { dg-final { cleanup-tree-dump "dom\[0-9\]" } } */ > Index: gcc/gcc/tree-ssa-propagate.c > =================================================================== > --- gcc.orig/gcc/tree-ssa-propagate.c 2011-07-13 12:57:46.870620200 +0200 > +++ gcc/gcc/tree-ssa-propagate.c 2011-07-13 22:29:53.253971100 +0200 > @@ -979,6 +979,9 @@ replace_phi_args_in (gimple phi, ssa_pro > > DO_DCE is true if trivially dead stmts can be removed. > > + If DO_DCE is true, the statements within a BB are walked from > + last to first element. Otherwise we scan from first to last element. > + > Return TRUE when something changed. */ > > bool > @@ -1059,9 +1062,10 @@ substitute_and_fold (ssa_prop_get_value_ > for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) > replace_phi_args_in (gsi_stmt (i), get_value_fn); > > - /* Propagate known values into stmts. Do a backward walk to expose > - more trivially deletable stmts. */ > - for (i = gsi_last_bb (bb); !gsi_end_p (i);) > + /* Propagate known values into stmts. Do a backward walk if > + do_dce is true. In some case it exposes > + more trivially deletable stmts to walk backward. */ > + for (i = (do_dce ? gsi_last_bb (bb) : gsi_start_bb (bb)); > !gsi_end_p (i);) > { > bool did_replace; > gimple stmt = gsi_stmt (i); > @@ -1070,7 +1074,10 @@ substitute_and_fold (ssa_prop_get_value_ > gimple_stmt_iterator oldi; > > oldi = i; > - gsi_prev (&i); > + if (do_dce) > + gsi_prev (&i); > + else > + gsi_next (&i); > > /* Ignore ASSERT_EXPRs. They are used by VRP to generate > range information for names and they are discarded The tree-ssa-propagate.c change is ok to apply separately. > Index: gcc/gcc/tree-vrp.c > =================================================================== > --- gcc.orig/gcc/tree-vrp.c 2011-07-13 22:25:14.690598100 +0200 > +++ gcc/gcc/tree-vrp.c 2011-07-15 08:53:21.086266100 +0200 > @@ -2174,9 +2174,7 @@ extract_range_from_binary_expr (value_ra > && code != MIN_EXPR > && code != MAX_EXPR > && code != BIT_AND_EXPR > - && code != BIT_IOR_EXPR > - && code != TRUTH_AND_EXPR > - && code != TRUTH_OR_EXPR) > + && code != BIT_IOR_EXPR) > { > /* We can still do constant propagation here. */ > tree const_op0 = op_with_constant_singleton_value_range (op0); > @@ -2231,8 +2229,7 @@ extract_range_from_binary_expr (value_ra > divisions. TODO, we may be able to derive anti-ranges in > some cases. */ > if (code != BIT_AND_EXPR > - && code != TRUTH_AND_EXPR > - && code != TRUTH_OR_EXPR > + && code != BIT_IOR_EXPR > && code != TRUNC_DIV_EXPR > && code != FLOOR_DIV_EXPR > && code != CEIL_DIV_EXPR > @@ -2291,6 +2288,8 @@ extract_range_from_binary_expr (value_ra > else > set_value_range_to_varying (vr); > } > + else if (code == BIT_IOR_EXPR) > + set_value_range_to_varying (vr); Again, how do we arrive with a BIT_IOR_EXPR with pointer type here? We're not supposed to have that (well, in theory, nothing verifies that). > else > gcc_unreachable (); > > @@ -2299,55 +2298,7 @@ extract_range_from_binary_expr (value_ra > > /* For integer ranges, apply the operation to each end of the > range and see what we end up with. */ > - if (code == TRUTH_AND_EXPR > - || code == TRUTH_OR_EXPR) > - { > - /* If one of the operands is zero, we know that the whole > - expression evaluates zero. */ > - if (code == TRUTH_AND_EXPR > - && ((vr0.type == VR_RANGE > - && integer_zerop (vr0.min) > - && integer_zerop (vr0.max)) > - || (vr1.type == VR_RANGE > - && integer_zerop (vr1.min) > - && integer_zerop (vr1.max)))) > - { > - type = VR_RANGE; > - min = max = build_int_cst (expr_type, 0); > - } > - /* If one of the operands is one, we know that the whole > - expression evaluates one. */ > - else if (code == TRUTH_OR_EXPR > - && ((vr0.type == VR_RANGE > - && integer_onep (vr0.min) > - && integer_onep (vr0.max)) > - || (vr1.type == VR_RANGE > - && integer_onep (vr1.min) > - && integer_onep (vr1.max)))) > - { > - type = VR_RANGE; > - min = max = build_int_cst (expr_type, 1); > - } > - else if (vr0.type != VR_VARYING > - && vr1.type != VR_VARYING > - && vr0.type == vr1.type > - && !symbolic_range_p (&vr0) > - && !overflow_infinity_range_p (&vr0) > - && !symbolic_range_p (&vr1) > - && !overflow_infinity_range_p (&vr1)) > - { > - /* Boolean expressions cannot be folded with int_const_binop. */ > - min = fold_binary (code, expr_type, vr0.min, vr1.min); > - max = fold_binary (code, expr_type, vr0.max, vr1.max); > - } > - else > - { > - /* The result of a TRUTH_*_EXPR is always true or false. */ > - set_value_range_to_truthvalue (vr, expr_type); > - return; > - } > - } > - else if (code == PLUS_EXPR > + if (code == PLUS_EXPR > || code == MIN_EXPR > || code == MAX_EXPR) > { > @@ -2682,71 +2633,125 @@ extract_range_from_binary_expr (value_ra > double_int may_be_nonzero0, may_be_nonzero1; > double_int must_be_nonzero0, must_be_nonzero1; > > - vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0); > - vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1); > - int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, > - &must_be_nonzero0); > - int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, > - &must_be_nonzero1); > - > - type = VR_RANGE; > - if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) > - min = max = int_const_binop (code, vr0.max, vr1.max); > - else if (!int_cst_range0 && !int_cst_range1) > + /* If one of the operands is zero, we know that the whole > + expression evaluates zero. */ context diffs help ... now I have to wade through +- mess :/ > + if (code == BIT_AND_EXPR > + && ((vr0.type == VR_RANGE > + && integer_zerop (vr0.min) > + && integer_zerop (vr0.max)) > + || (vr1.type == VR_RANGE > + && integer_zerop (vr1.min) > + && integer_zerop (vr1.max)))) if you wrap all this in if (vr0_int_cst_singleton_p || vr1_int_cst_singleton_p) it becomes much simpler. > { > - set_value_range_to_varying (vr); > - return; > + type = VR_RANGE; > + min = max = build_int_cst (expr_type, 0); this can also be handled better via improving the existing if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) min = max = int_const_binop (code, vr0.max, vr1.max); handling to include the 0 and all-1s cases for AND/IOR instead of trying to move the TRUTH_* code here. > } > - else if (code == BIT_AND_EXPR) > + /* If one of the operands has all bits set to one, we know > + that the whole expression evaluates to this one. */ > + else if (code == BIT_IOR_EXPR > + && (vr0.type == VR_RANGE > + && integer_all_onesp (vr0.min) > + && integer_all_onesp (vr0.max))) > { > - min = double_int_to_tree (expr_type, > - double_int_and (must_be_nonzero0, > - must_be_nonzero1)); > - max = double_int_to_tree (expr_type, > - double_int_and (may_be_nonzero0, > - may_be_nonzero1)); > - if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) > - min = NULL_TREE; > - if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) > - max = NULL_TREE; > - if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) > - { > - if (min == NULL_TREE) > - min = build_int_cst (expr_type, 0); > - if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max)) > - max = vr0.max; > + type = VR_RANGE; > + min = max = fold_convert (expr_type, vr0.min); > + } > + else if (code == BIT_IOR_EXPR > + && (vr1.type == VR_RANGE > + && integer_all_onesp (vr1.min) > + && integer_all_onesp (vr1.max))) > + { > + type = VR_RANGE; > + min = max = fold_convert (expr_type, vr1.min); > + } > + else if (TYPE_PRECISION (TREE_TYPE (op1)) == 1) > + { > + if (vr0.type != VR_VARYING > + && vr1.type != VR_VARYING > + && vr0.type == vr1.type > + && !symbolic_range_p (&vr0) > + && !overflow_infinity_range_p (&vr0) > + && !symbolic_range_p (&vr1) > + && !overflow_infinity_range_p (&vr1)) > + { > + /* Boolean expressions cannot be folded with int_const_binop. */ > + min = fold_binary (code, expr_type, vr0.min, vr1.min); > + max = fold_binary (code, expr_type, vr0.max, vr1.max); > } > - if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) > + else > { > - if (min == NULL_TREE) > - min = build_int_cst (expr_type, 0); > - if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max)) > - max = vr1.max; > + set_value_range_to_varying (vr); > + return; > } > } > - else if (!int_cst_range0 > - || !int_cst_range1 > - || tree_int_cst_sgn (vr0.min) < 0 > - || tree_int_cst_sgn (vr1.min) < 0) > - { > - set_value_range_to_varying (vr); > - return; > - } > else > - { > - min = double_int_to_tree (expr_type, > - double_int_ior (must_be_nonzero0, > - must_be_nonzero1)); > - max = double_int_to_tree (expr_type, > - double_int_ior (may_be_nonzero0, > - may_be_nonzero1)); > - if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) > - min = vr0.min; > + { > + vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0); > + vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1); > + int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, > + &must_be_nonzero0); > + int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, > + &must_be_nonzero1); > + > + type = VR_RANGE; > + if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) > + min = max = int_const_binop (code, vr0.max, vr1.max); > + else if (!int_cst_range0 && !int_cst_range1) > + { > + set_value_range_to_varying (vr); > + return; > + } > + else if (code == BIT_AND_EXPR) > + { > + min = double_int_to_tree (expr_type, > + double_int_and (must_be_nonzero0, > + must_be_nonzero1)); > + max = double_int_to_tree (expr_type, > + double_int_and (may_be_nonzero0, > + may_be_nonzero1)); > + if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) > + min = NULL_TREE; > + if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) > + max = NULL_TREE; > + if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) > + { > + if (min == NULL_TREE) > + min = build_int_cst (expr_type, 0); > + if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max)) > + max = vr0.max; > + } > + if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) > + { > + if (min == NULL_TREE) > + min = build_int_cst (expr_type, 0); > + if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max)) > + max = vr1.max; > + } > + } > + else if (!int_cst_range0 > + || !int_cst_range1 > + || tree_int_cst_sgn (vr0.min) < 0 > + || tree_int_cst_sgn (vr1.min) < 0) > + { > + set_value_range_to_varying (vr); > + return; > + } > else > - min = vrp_int_const_binop (MAX_EXPR, min, vr0.min); > - if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) > - max = NULL_TREE; > - min = vrp_int_const_binop (MAX_EXPR, min, vr1.min); > + { > + min = double_int_to_tree (expr_type, > + double_int_ior (must_be_nonzero0, > + must_be_nonzero1)); > + max = double_int_to_tree (expr_type, > + double_int_ior (may_be_nonzero0, > + may_be_nonzero1)); > + if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) > + min = vr0.min; > + else > + min = vrp_int_const_binop (MAX_EXPR, min, vr0.min); > + if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) > + max = NULL_TREE; > + min = vrp_int_const_binop (MAX_EXPR, min, vr1.min); > + } > } > } > else > @@ -2809,7 +2814,7 @@ extract_range_from_unary_expr (value_ran > cannot easily determine a resulting range. */ > if (code == FIX_TRUNC_EXPR > || code == FLOAT_EXPR > - || code == BIT_NOT_EXPR > + || (code == BIT_NOT_EXPR && TYPE_PRECISION (type) != 1) Huh? That doesn't look worthwhile. Please instead provide true support for BIT_NOT_EXPR, as a separate patch. > || code == CONJ_EXPR) > { > /* We can still do constant propagation here. */ > @@ -3303,10 +3308,7 @@ extract_range_from_assignment (value_ran > extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); > else if (code == SSA_NAME) > extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); > - else if (TREE_CODE_CLASS (code) == tcc_binary > - || code == TRUTH_AND_EXPR > - || code == TRUTH_OR_EXPR > - || code == TRUTH_XOR_EXPR) > + else if (TREE_CODE_CLASS (code) == tcc_binary) > extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), > gimple_expr_type (stmt), > gimple_assign_rhs1 (stmt), > @@ -3976,7 +3978,9 @@ build_assert_expr_for (tree cond, tree v > tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); > assertion = gimple_build_assign (n, a); > } > - else if (TREE_CODE (cond) == TRUTH_NOT_EXPR) > + else if (TREE_CODE (cond) == TRUTH_NOT_EXPR > + || (TREE_CODE (cond) == BIT_NOT_EXPR > + && TYPE_PRECISION (TREE_TYPE (cond)) == 1)) I dont' think we arrive with TRUTH_NOT_EXPR here either - look at the single caller please. > { > /* Given !V, build the assignment N = false. */ > tree op0 = TREE_OPERAND (cond, 0); > @@ -4519,11 +4523,9 @@ register_edge_assert_for_1 (tree op, enu > invert); > } > else if ((code == NE_EXPR > - && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR > - || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)) > + && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) > || (code == EQ_EXPR > - && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR > - || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))) > + && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) > { > /* Recurse on each operand. */ > retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), > @@ -4531,7 +4533,9 @@ register_edge_assert_for_1 (tree op, enu > retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), > code, e, bsi); > } > - else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR) > + else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR > + || (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR > + && TYPE_PRECISION (TREE_TYPE (op)) == 1)) Now without the TRUTH_NOT_EXPR handling. Also elsewhere I guess. > { > /* Recurse, flipping CODE. */ > code = invert_tree_comparison (code, false); > @@ -4588,8 +4592,8 @@ register_edge_assert_for (tree name, edg > the value zero or one, then we may be able to assert values > for SSA_NAMEs which flow into COND. */ > > - /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining > - statement of NAME we can assert both operands of the TRUTH_AND_EXPR > + /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining > + statement of NAME we can assert both operands of the BIT_AND_EXPR > have nonzero value. */ > if (((comp_code == EQ_EXPR && integer_onep (val)) > || (comp_code == NE_EXPR && integer_zerop (val)))) > @@ -4597,8 +4601,7 @@ register_edge_assert_for (tree name, edg > gimple def_stmt = SSA_NAME_DEF_STMT (name); > > if (is_gimple_assign (def_stmt) > - && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR > - || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)) > + && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) > { > tree op0 = gimple_assign_rhs1 (def_stmt); > tree op1 = gimple_assign_rhs2 (def_stmt); > @@ -4607,8 +4610,8 @@ register_edge_assert_for (tree name, edg > } > } > > - /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining > - statement of NAME we can assert both operands of the TRUTH_OR_EXPR > + /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining > + statement of NAME we can assert both operands of the BIT_IOR_EXPR > have zero value. */ > if (((comp_code == EQ_EXPR && integer_zerop (val)) > || (comp_code == NE_EXPR && integer_onep (val)))) > @@ -4616,11 +4619,12 @@ register_edge_assert_for (tree name, edg > gimple def_stmt = SSA_NAME_DEF_STMT (name); > > if (is_gimple_assign (def_stmt) > - && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR > + && ((gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR > + && TYPE_PRECISION (TREE_TYPE (name)) == 1) > /* For BIT_IOR_EXPR only if NAME == 0 both operands have > necessarily zero value. */ The comment needs updating and the condition wants to be re-structured. > || (comp_code == EQ_EXPR > - && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)))) > + && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))) > { > tree op0 = gimple_assign_rhs1 (def_stmt); > tree op1 = gimple_assign_rhs2 (def_stmt); > @@ -6747,19 +6751,95 @@ varying: > return SSA_PROP_VARYING; > } > > +/* Returns operand1 of ssa-name with SSA_NAME as code, Otherwise it > + returns NULL_TREE. */ > +static tree > +ssa_name_get_inner_ssa_name_p (tree op) > +{ > + gimple stmt; > + > + if (TREE_CODE (op) != SSA_NAME > + || !is_gimple_assign (SSA_NAME_DEF_STMT (op))) > + return NULL_TREE; > + stmt = SSA_NAME_DEF_STMT (op); > + if (gimple_assign_rhs_code (stmt) != SSA_NAME) > + return NULL_TREE; > + return gimple_assign_rhs1 (stmt); > +} This and the following should be all a separate patch. Please. > +/* Returns operand of cast operation, if OP is a type-conversion. Otherwise > + return NULL_TREE. */ > +static tree > +ssa_name_get_cast_to_p (tree op) > +{ > + gimple stmt; > + > + if (TREE_CODE (op) != SSA_NAME > + || !is_gimple_assign (SSA_NAME_DEF_STMT (op))) > + return NULL_TREE; > + stmt = SSA_NAME_DEF_STMT (op); > + if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))) > + return NULL_TREE; > + return gimple_assign_rhs1 (stmt); > +} > + > /* Simplify boolean operations if the source is known > to be already a boolean. */ > static bool > simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) > { > enum tree_code rhs_code = gimple_assign_rhs_code (stmt); > + gimple stmt2 = stmt; > tree val = NULL; > - tree op0, op1; > + tree op0, op1, cop0, cop1; > value_range_t *vr; > bool sop = false; > bool need_conversion; > + location_t loc = gimple_location (stmt); > > op0 = gimple_assign_rhs1 (stmt); > + op1 = NULL_TREE; > + > + /* Handle cases with prefixed type-cast. */ What's a 'prefixed type-cast'? Isn't most of simplify_truth(!)_ops_using_ranges obsolete now? > + if (CONVERT_EXPR_CODE_P (rhs_code) > + && INTEGRAL_TYPE_P (TREE_TYPE (op0)) > + && TREE_CODE (op0) == SSA_NAME > + && is_gimple_assign (SSA_NAME_DEF_STMT (op0)) > + && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))) > + { > + stmt2 = SSA_NAME_DEF_STMT (op0); > + op0 = gimple_assign_rhs1 (stmt2); > + if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))) > + return false; > + rhs_code = gimple_assign_rhs_code (stmt2); > + if (rhs_code != BIT_NOT_EXPR > + && rhs_code != TRUTH_NOT_EXPR > + && rhs_code != BIT_AND_EXPR > + && rhs_code != BIT_IOR_EXPR > + && rhs_code != BIT_XOR_EXPR > + && rhs_code != NE_EXPR && rhs_code != EQ_EXPR) > + return false; > + if (rhs_code == BIT_AND_EXPR || rhs_code == BIT_IOR_EXPR > + || rhs_code == BIT_XOR_EXPR > + || rhs_code == NE_EXPR || rhs_code == EQ_EXPR) > + op1 = gimple_assign_rhs2 (stmt2); > + if (gimple_has_location (stmt2)) > + loc = gimple_location (stmt2); > + } > + else if (CONVERT_EXPR_CODE_P (rhs_code)) > + return false; > + else if (rhs_code == BIT_AND_EXPR || rhs_code == BIT_IOR_EXPR > + || rhs_code == BIT_XOR_EXPR > + || rhs_code == NE_EXPR || rhs_code == EQ_EXPR) > + op1 = gimple_assign_rhs2 (stmt); > + > + /* ~X is only equivalent of !X, if type-precision is one and X has > + an integral type. */ > + if (rhs_code == BIT_NOT_EXPR > + && (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) > + || TYPE_PRECISION (TREE_TYPE (op0)) != 1)) > + return false; > + > if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) > { > if (TREE_CODE (op0) != SSA_NAME) > @@ -6775,22 +6855,100 @@ simplify_truth_ops_using_ranges (gimple_ > return false; > } > > - if (rhs_code == TRUTH_NOT_EXPR) > + if (op1 && TREE_CODE (op1) != INTEGER_CST > + && TYPE_PRECISION (TREE_TYPE (op1)) != 1) > + { > + vr = get_value_range (op1); > + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + > + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + } > + > + need_conversion = > + !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), > + TREE_TYPE (op0)); > + > + /* As comparisons X != 0 getting folded by prior pass to (bool) X, > + but X == 0 might be not folded for none boolean type of X > + to (bool) (X ^ 1). > + So for bitwise-binary operations we have three cases to handle: > + a) ((bool) X) op ((bool) Y) > + b) ((bool) X) op (Y == 0) OR (X == 0) op ((bool) Y) > + c) (X == 0) op (Y == 0) > + The later two cases can't be handled for now, as vr tables > + would need to be adjusted. */ > + if (need_conversion > + && (rhs_code == BIT_XOR_EXPR > + || rhs_code == BIT_AND_EXPR > + || rhs_code == BIT_IOR_EXPR) > + && TREE_CODE (op1) == SSA_NAME && TREE_CODE (op0) == SSA_NAME) > + { > + cop0 = ssa_name_get_cast_to_p (op0); > + cop1 = ssa_name_get_cast_to_p (op1); > + if (!cop0 || !cop1) > + /* We would need an new statment for cases b and c, and we can't > + due vr table, so bail out. */ > + return false; > + > + if (!INTEGRAL_TYPE_P (TREE_TYPE (cop0)) > + || !types_compatible_p (TREE_TYPE (cop0), TREE_TYPE (cop1))) > + return false; > + need_conversion = > + !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), > + TREE_TYPE (cop0)); > + if (need_conversion) > + return false; > + op0 = cop0; > + op1 = cop1; > + > + /* We need to re-check if value ranges for new operands > + for 1-bit precision/range. */ > + if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) > + { > + if (TREE_CODE (op0) != SSA_NAME) > + return false; > + vr = get_value_range (op0); > + > + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + > + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + } > + > + if (op1 && TYPE_PRECISION (TREE_TYPE (op1)) != 1) > + { > + vr = get_value_range (op1); > + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + > + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); > + if (!val || !integer_onep (val)) > + return false; > + } > + } > + else if (rhs_code == TRUTH_NOT_EXPR > + || rhs_code == BIT_NOT_EXPR) > { > rhs_code = NE_EXPR; > op1 = build_int_cst (TREE_TYPE (op0), 1); > } > else > { > - op1 = gimple_assign_rhs2 (stmt); > - > /* Reduce number of cases to handle. */ > if (is_gimple_min_invariant (op1)) > { > /* Exclude anything that should have been already folded. */ > if (rhs_code != EQ_EXPR > && rhs_code != NE_EXPR > - && rhs_code != TRUTH_XOR_EXPR) > + && rhs_code != BIT_XOR_EXPR) > return false; > > if (!integer_zerop (op1) > @@ -6810,18 +6968,6 @@ simplify_truth_ops_using_ranges (gimple_ > /* Punt on A == B as there is no BIT_XNOR_EXPR. */ > if (rhs_code == EQ_EXPR) > return false; > - > - if (TYPE_PRECISION (TREE_TYPE (op1)) != 1) > - { > - vr = get_value_range (op1); > - val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); > - if (!val || !integer_onep (val)) > - return false; > - > - val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); > - if (!val || !integer_onep (val)) > - return false; > - } > } > } > > @@ -6834,11 +6980,8 @@ simplify_truth_ops_using_ranges (gimple_ > else > location = gimple_location (stmt); > > - if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR) > - warning_at (location, OPT_Wstrict_overflow, > - _("assuming signed overflow does not occur when " > - "simplifying && or || to & or |")); > - else > + if (rhs_code != BIT_AND_EXPR && rhs_code != BIT_IOR_EXPR > + && rhs_code != BIT_XOR_EXPR) > warning_at (location, OPT_Wstrict_overflow, > _("assuming signed overflow does not occur when " > "simplifying ==, != or ! to identity or ^")); > @@ -6856,19 +6999,17 @@ simplify_truth_ops_using_ranges (gimple_ > > switch (rhs_code) > { > - case TRUTH_AND_EXPR: > - rhs_code = BIT_AND_EXPR; > - break; > - case TRUTH_OR_EXPR: > - rhs_code = BIT_IOR_EXPR; > + case BIT_AND_EXPR: > + case BIT_IOR_EXPR: > break; > - case TRUTH_XOR_EXPR: > + case BIT_XOR_EXPR: > case NE_EXPR: > if (integer_zerop (op1)) > { > gimple_assign_set_rhs_with_ops (gsi, > need_conversion ? NOP_EXPR : SSA_NAME, > op0, NULL); > + gimple_set_location (stmt, loc); > update_stmt (gsi_stmt (*gsi)); > return true; > } > @@ -6879,10 +7020,20 @@ simplify_truth_ops_using_ranges (gimple_ > gcc_unreachable (); > } > > + /* We can't insert here new expression as otherwise > + tracked vr tables getting out of bounds. */ > if (need_conversion) > return false; > > + /* Reduce here SSA_NAME -> SSA_NAME. */ > + while ((cop0 = ssa_name_get_inner_ssa_name_p (op0)) != NULL_TREE) > + op0 = cop0; > + > + while ((cop1 = ssa_name_get_inner_ssa_name_p (op1)) != NULL_TREE) > + op1 = cop1; > + ?? > gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1); > + gimple_set_location (stmt, loc); > update_stmt (gsi_stmt (*gsi)); > return true; Well, at least previously the function was readable and now it looks like spaghetti. > } > @@ -7417,10 +7568,8 @@ simplify_stmt_using_ranges (gimple_stmt_ > { > case EQ_EXPR: > case NE_EXPR: > + case BIT_NOT_EXPR: > case TRUTH_NOT_EXPR: > - case TRUTH_AND_EXPR: > - case TRUTH_OR_EXPR: > - case TRUTH_XOR_EXPR: > /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR > or identity if the RHS is zero or one, and the LHS are known > to be boolean values. Transform all TRUTH_*_EXPR into > @@ -7452,13 +7601,21 @@ simplify_stmt_using_ranges (gimple_stmt_ > if all the bits being cleared are already cleared or > all the bits being set are already set. */ > if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) > - return simplify_bit_ops_using_ranges (gsi, stmt); > + { > + if (simplify_truth_ops_using_ranges (gsi, stmt)) > + return true; > + return simplify_bit_ops_using_ranges (gsi, stmt); > + } > break; > > CASE_CONVERT: > if (TREE_CODE (rhs1) == SSA_NAME > && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) > - return simplify_conversion_using_ranges (stmt); > + { > + if (simplify_truth_ops_using_ranges (gsi, stmt)) > + return true; > + return simplify_conversion_using_ranges (stmt); > + } > break; > > default: >
Index: gcc/gcc/tree-ssa-propagate.c =================================================================== --- gcc.orig/gcc/tree-ssa-propagate.c 2011-07-13 12:57:46.870620200 +0200 +++ gcc/gcc/tree-ssa-propagate.c 2011-07-13 22:29:53.253971100 +0200 @@ -979,6 +979,9 @@ replace_phi_args_in (gimple phi, ssa_pro DO_DCE is true if trivially dead stmts can be removed. + If DO_DCE is true, the statements within a BB are walked from + last to first element. Otherwise we scan from first to last element. + Return TRUE when something changed. */ bool @@ -1059,9 +1062,10 @@ substitute_and_fold (ssa_prop_get_value_ for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) replace_phi_args_in (gsi_stmt (i), get_value_fn); - /* Propagate known values into stmts. Do a backward walk to expose - more trivially deletable stmts. */ - for (i = gsi_last_bb (bb); !gsi_end_p (i);) + /* Propagate known values into stmts. Do a backward walk if + do_dce is true. In some case it exposes + more trivially deletable stmts to walk backward. */ + for (i = (do_dce ? gsi_last_bb (bb) : gsi_start_bb (bb)); !gsi_end_p (i);) { bool did_replace; gimple stmt = gsi_stmt (i); @@ -1070,7 +1074,10 @@ substitute_and_fold (ssa_prop_get_value_ gimple_stmt_iterator oldi; oldi = i; - gsi_prev (&i); + if (do_dce) + gsi_prev (&i); + else + gsi_next (&i); /* Ignore ASSERT_EXPRs. They are used by VRP to generate range information for names and they are discarded Index: gcc/gcc/tree-vrp.c =================================================================== --- gcc.orig/gcc/tree-vrp.c 2011-07-13 22:25:14.690598100 +0200 +++ gcc/gcc/tree-vrp.c 2011-07-15 08:53:21.086266100 +0200 @@ -2174,9 +2174,7 @@ extract_range_from_binary_expr (value_ra && code != MIN_EXPR && code != MAX_EXPR && code != BIT_AND_EXPR - && code != BIT_IOR_EXPR - && code != TRUTH_AND_EXPR - && code != TRUTH_OR_EXPR) + && code != BIT_IOR_EXPR) { /* We can still do constant propagation here. */ tree const_op0 = op_with_constant_singleton_value_range (op0); @@ -2231,8 +2229,7 @@ extract_range_from_binary_expr (value_ra divisions. TODO, we may be able to derive anti-ranges in some cases. */ if (code != BIT_AND_EXPR - && code != TRUTH_AND_EXPR - && code != TRUTH_OR_EXPR + && code != BIT_IOR_EXPR && code != TRUNC_DIV_EXPR && code != FLOOR_DIV_EXPR && code != CEIL_DIV_EXPR @@ -2291,6 +2288,8 @@ extract_range_from_binary_expr (value_ra else set_value_range_to_varying (vr); } + else if (code == BIT_IOR_EXPR) + set_value_range_to_varying (vr); else gcc_unreachable (); @@ -2299,55 +2298,7 @@ extract_range_from_binary_expr (value_ra /* For integer ranges, apply the operation to each end of the range and see what we end up with. */ - if (code == TRUTH_AND_EXPR - || code == TRUTH_OR_EXPR) - { - /* If one of the operands is zero, we know that the whole - expression evaluates zero. */ - if (code == TRUTH_AND_EXPR - && ((vr0.type == VR_RANGE - && integer_zerop (vr0.min) - && integer_zerop (vr0.max)) - || (vr1.type == VR_RANGE - && integer_zerop (vr1.min) - && integer_zerop (vr1.max)))) - { - type = VR_RANGE; - min = max = build_int_cst (expr_type, 0); - } - /* If one of the operands is one, we know that the whole - expression evaluates one. */ - else if (code == TRUTH_OR_EXPR - && ((vr0.type == VR_RANGE - && integer_onep (vr0.min) - && integer_onep (vr0.max)) - || (vr1.type == VR_RANGE - && integer_onep (vr1.min) - && integer_onep (vr1.max)))) - { - type = VR_RANGE; - min = max = build_int_cst (expr_type, 1); - } - else if (vr0.type != VR_VARYING - && vr1.type != VR_VARYING - && vr0.type == vr1.type - && !symbolic_range_p (&vr0) - && !overflow_infinity_range_p (&vr0) - && !symbolic_range_p (&vr1) - && !overflow_infinity_range_p (&vr1)) - { - /* Boolean expressions cannot be folded with int_const_binop. */ - min = fold_binary (code, expr_type, vr0.min, vr1.min); - max = fold_binary (code, expr_type, vr0.max, vr1.max); - } - else - { - /* The result of a TRUTH_*_EXPR is always true or false. */ - set_value_range_to_truthvalue (vr, expr_type); - return; - } - } - else if (code == PLUS_EXPR + if (code == PLUS_EXPR || code == MIN_EXPR || code == MAX_EXPR) { @@ -2682,71 +2633,125 @@ extract_range_from_binary_expr (value_ra double_int may_be_nonzero0, may_be_nonzero1; double_int must_be_nonzero0, must_be_nonzero1; - vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0); - vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1); - int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, - &must_be_nonzero0); - int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, - &must_be_nonzero1); - - type = VR_RANGE; - if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) - min = max = int_const_binop (code, vr0.max, vr1.max); - else if (!int_cst_range0 && !int_cst_range1) + /* If one of the operands is zero, we know that the whole + expression evaluates zero. */ + if (code == BIT_AND_EXPR + && ((vr0.type == VR_RANGE + && integer_zerop (vr0.min) + && integer_zerop (vr0.max)) + || (vr1.type == VR_RANGE + && integer_zerop (vr1.min) + && integer_zerop (vr1.max)))) { - set_value_range_to_varying (vr); - return; + type = VR_RANGE; + min = max = build_int_cst (expr_type, 0); } - else if (code == BIT_AND_EXPR) + /* If one of the operands has all bits set to one, we know + that the whole expression evaluates to this one. */ + else if (code == BIT_IOR_EXPR + && (vr0.type == VR_RANGE + && integer_all_onesp (vr0.min) + && integer_all_onesp (vr0.max))) { - min = double_int_to_tree (expr_type, - double_int_and (must_be_nonzero0, - must_be_nonzero1)); - max = double_int_to_tree (expr_type, - double_int_and (may_be_nonzero0, - may_be_nonzero1)); - if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) - min = NULL_TREE; - if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) - max = NULL_TREE; - if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) - { - if (min == NULL_TREE) - min = build_int_cst (expr_type, 0); - if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max)) - max = vr0.max; + type = VR_RANGE; + min = max = fold_convert (expr_type, vr0.min); + } + else if (code == BIT_IOR_EXPR + && (vr1.type == VR_RANGE + && integer_all_onesp (vr1.min) + && integer_all_onesp (vr1.max))) + { + type = VR_RANGE; + min = max = fold_convert (expr_type, vr1.min); + } + else if (TYPE_PRECISION (TREE_TYPE (op1)) == 1) + { + if (vr0.type != VR_VARYING + && vr1.type != VR_VARYING + && vr0.type == vr1.type + && !symbolic_range_p (&vr0) + && !overflow_infinity_range_p (&vr0) + && !symbolic_range_p (&vr1) + && !overflow_infinity_range_p (&vr1)) + { + /* Boolean expressions cannot be folded with int_const_binop. */ + min = fold_binary (code, expr_type, vr0.min, vr1.min); + max = fold_binary (code, expr_type, vr0.max, vr1.max); } - if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) + else { - if (min == NULL_TREE) - min = build_int_cst (expr_type, 0); - if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max)) - max = vr1.max; + set_value_range_to_varying (vr); + return; } } - else if (!int_cst_range0 - || !int_cst_range1 - || tree_int_cst_sgn (vr0.min) < 0 - || tree_int_cst_sgn (vr1.min) < 0) - { - set_value_range_to_varying (vr); - return; - } else - { - min = double_int_to_tree (expr_type, - double_int_ior (must_be_nonzero0, - must_be_nonzero1)); - max = double_int_to_tree (expr_type, - double_int_ior (may_be_nonzero0, - may_be_nonzero1)); - if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) - min = vr0.min; + { + vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0); + vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1); + int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, + &must_be_nonzero0); + int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, + &must_be_nonzero1); + + type = VR_RANGE; + if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) + min = max = int_const_binop (code, vr0.max, vr1.max); + else if (!int_cst_range0 && !int_cst_range1) + { + set_value_range_to_varying (vr); + return; + } + else if (code == BIT_AND_EXPR) + { + min = double_int_to_tree (expr_type, + double_int_and (must_be_nonzero0, + must_be_nonzero1)); + max = double_int_to_tree (expr_type, + double_int_and (may_be_nonzero0, + may_be_nonzero1)); + if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) + min = NULL_TREE; + if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) + max = NULL_TREE; + if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) + { + if (min == NULL_TREE) + min = build_int_cst (expr_type, 0); + if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max)) + max = vr0.max; + } + if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) + { + if (min == NULL_TREE) + min = build_int_cst (expr_type, 0); + if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max)) + max = vr1.max; + } + } + else if (!int_cst_range0 + || !int_cst_range1 + || tree_int_cst_sgn (vr0.min) < 0 + || tree_int_cst_sgn (vr1.min) < 0) + { + set_value_range_to_varying (vr); + return; + } else - min = vrp_int_const_binop (MAX_EXPR, min, vr0.min); - if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) - max = NULL_TREE; - min = vrp_int_const_binop (MAX_EXPR, min, vr1.min); + { + min = double_int_to_tree (expr_type, + double_int_ior (must_be_nonzero0, + must_be_nonzero1)); + max = double_int_to_tree (expr_type, + double_int_ior (may_be_nonzero0, + may_be_nonzero1)); + if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0) + min = vr0.min; + else + min = vrp_int_const_binop (MAX_EXPR, min, vr0.min); + if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0) + max = NULL_TREE; + min = vrp_int_const_binop (MAX_EXPR, min, vr1.min); + } } } else @@ -2809,7 +2814,7 @@ extract_range_from_unary_expr (value_ran cannot easily determine a resulting range. */ if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR - || code == BIT_NOT_EXPR + || (code == BIT_NOT_EXPR && TYPE_PRECISION (type) != 1) || code == CONJ_EXPR) { /* We can still do constant propagation here. */ @@ -3303,10 +3308,7 @@ extract_range_from_assignment (value_ran extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); else if (code == SSA_NAME) extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); - else if (TREE_CODE_CLASS (code) == tcc_binary - || code == TRUTH_AND_EXPR - || code == TRUTH_OR_EXPR - || code == TRUTH_XOR_EXPR) + else if (TREE_CODE_CLASS (code) == tcc_binary) extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), gimple_expr_type (stmt), gimple_assign_rhs1 (stmt), @@ -3976,7 +3978,9 @@ build_assert_expr_for (tree cond, tree v tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); assertion = gimple_build_assign (n, a); } - else if (TREE_CODE (cond) == TRUTH_NOT_EXPR) + else if (TREE_CODE (cond) == TRUTH_NOT_EXPR + || (TREE_CODE (cond) == BIT_NOT_EXPR + && TYPE_PRECISION (TREE_TYPE (cond)) == 1)) { /* Given !V, build the assignment N = false. */ tree op0 = TREE_OPERAND (cond, 0); @@ -4519,11 +4523,9 @@ register_edge_assert_for_1 (tree op, enu invert); } else if ((code == NE_EXPR - && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR - || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)) + && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR) || (code == EQ_EXPR - && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR - || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))) + && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)) { /* Recurse on each operand. */ retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), @@ -4531,7 +4533,9 @@ register_edge_assert_for_1 (tree op, enu retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), code, e, bsi); } - else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR) + else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR + || (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR + && TYPE_PRECISION (TREE_TYPE (op)) == 1)) { /* Recurse, flipping CODE. */ code = invert_tree_comparison (code, false); @@ -4588,8 +4592,8 @@ register_edge_assert_for (tree name, edg the value zero or one, then we may be able to assert values for SSA_NAMEs which flow into COND. */ - /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining - statement of NAME we can assert both operands of the TRUTH_AND_EXPR + /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining + statement of NAME we can assert both operands of the BIT_AND_EXPR have nonzero value. */ if (((comp_code == EQ_EXPR && integer_onep (val)) || (comp_code == NE_EXPR && integer_zerop (val)))) @@ -4597,8 +4601,7 @@ register_edge_assert_for (tree name, edg gimple def_stmt = SSA_NAME_DEF_STMT (name); if (is_gimple_assign (def_stmt) - && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR - || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)) + && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR) { tree op0 = gimple_assign_rhs1 (def_stmt); tree op1 = gimple_assign_rhs2 (def_stmt); @@ -4607,8 +4610,8 @@ register_edge_assert_for (tree name, edg } } - /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining - statement of NAME we can assert both operands of the TRUTH_OR_EXPR + /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining + statement of NAME we can assert both operands of the BIT_IOR_EXPR have zero value. */ if (((comp_code == EQ_EXPR && integer_zerop (val)) || (comp_code == NE_EXPR && integer_onep (val)))) @@ -4616,11 +4619,12 @@ register_edge_assert_for (tree name, edg gimple def_stmt = SSA_NAME_DEF_STMT (name); if (is_gimple_assign (def_stmt) - && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR + && ((gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR + && TYPE_PRECISION (TREE_TYPE (name)) == 1) /* For BIT_IOR_EXPR only if NAME == 0 both operands have necessarily zero value. */ || (comp_code == EQ_EXPR - && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)))) + && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))) { tree op0 = gimple_assign_rhs1 (def_stmt); tree op1 = gimple_assign_rhs2 (def_stmt); @@ -6747,19 +6751,95 @@ varying: return SSA_PROP_VARYING; } +/* Returns operand1 of ssa-name with SSA_NAME as code, Otherwise it + returns NULL_TREE. */ +static tree +ssa_name_get_inner_ssa_name_p (tree op) +{ + gimple stmt; + + if (TREE_CODE (op) != SSA_NAME + || !is_gimple_assign (SSA_NAME_DEF_STMT (op))) + return NULL_TREE; + stmt = SSA_NAME_DEF_STMT (op); + if (gimple_assign_rhs_code (stmt) != SSA_NAME) + return NULL_TREE; + return gimple_assign_rhs1 (stmt); +} + +/* Returns operand of cast operation, if OP is a type-conversion. Otherwise + return NULL_TREE. */ +static tree +ssa_name_get_cast_to_p (tree op) +{ + gimple stmt; + + if (TREE_CODE (op) != SSA_NAME + || !is_gimple_assign (SSA_NAME_DEF_STMT (op))) + return NULL_TREE; + stmt = SSA_NAME_DEF_STMT (op); + if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))) + return NULL_TREE; + return gimple_assign_rhs1 (stmt); +} + /* Simplify boolean operations if the source is known to be already a boolean. */ static bool simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); + gimple stmt2 = stmt; tree val = NULL; - tree op0, op1; + tree op0, op1, cop0, cop1; value_range_t *vr; bool sop = false; bool need_conversion; + location_t loc = gimple_location (stmt); op0 = gimple_assign_rhs1 (stmt); + op1 = NULL_TREE; + + /* Handle cases with prefixed type-cast. */ + if (CONVERT_EXPR_CODE_P (rhs_code) + && INTEGRAL_TYPE_P (TREE_TYPE (op0)) + && TREE_CODE (op0) == SSA_NAME + && is_gimple_assign (SSA_NAME_DEF_STMT (op0)) + && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))) + { + stmt2 = SSA_NAME_DEF_STMT (op0); + op0 = gimple_assign_rhs1 (stmt2); + if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))) + return false; + rhs_code = gimple_assign_rhs_code (stmt2); + if (rhs_code != BIT_NOT_EXPR + && rhs_code != TRUTH_NOT_EXPR + && rhs_code != BIT_AND_EXPR + && rhs_code != BIT_IOR_EXPR + && rhs_code != BIT_XOR_EXPR + && rhs_code != NE_EXPR && rhs_code != EQ_EXPR) + return false; + if (rhs_code == BIT_AND_EXPR || rhs_code == BIT_IOR_EXPR + || rhs_code == BIT_XOR_EXPR + || rhs_code == NE_EXPR || rhs_code == EQ_EXPR) + op1 = gimple_assign_rhs2 (stmt2); + if (gimple_has_location (stmt2)) + loc = gimple_location (stmt2); + } + else if (CONVERT_EXPR_CODE_P (rhs_code)) + return false; + else if (rhs_code == BIT_AND_EXPR || rhs_code == BIT_IOR_EXPR + || rhs_code == BIT_XOR_EXPR + || rhs_code == NE_EXPR || rhs_code == EQ_EXPR) + op1 = gimple_assign_rhs2 (stmt); + + /* ~X is only equivalent of !X, if type-precision is one and X has + an integral type. */ + if (rhs_code == BIT_NOT_EXPR + && (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) + || TYPE_PRECISION (TREE_TYPE (op0)) != 1)) + return false; + if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) { if (TREE_CODE (op0) != SSA_NAME) @@ -6775,22 +6855,100 @@ simplify_truth_ops_using_ranges (gimple_ return false; } - if (rhs_code == TRUTH_NOT_EXPR) + if (op1 && TREE_CODE (op1) != INTEGER_CST + && TYPE_PRECISION (TREE_TYPE (op1)) != 1) + { + vr = get_value_range (op1); + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); + if (!val || !integer_onep (val)) + return false; + + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); + if (!val || !integer_onep (val)) + return false; + } + + need_conversion = + !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), + TREE_TYPE (op0)); + + /* As comparisons X != 0 getting folded by prior pass to (bool) X, + but X == 0 might be not folded for none boolean type of X + to (bool) (X ^ 1). + So for bitwise-binary operations we have three cases to handle: + a) ((bool) X) op ((bool) Y) + b) ((bool) X) op (Y == 0) OR (X == 0) op ((bool) Y) + c) (X == 0) op (Y == 0) + The later two cases can't be handled for now, as vr tables + would need to be adjusted. */ + if (need_conversion + && (rhs_code == BIT_XOR_EXPR + || rhs_code == BIT_AND_EXPR + || rhs_code == BIT_IOR_EXPR) + && TREE_CODE (op1) == SSA_NAME && TREE_CODE (op0) == SSA_NAME) + { + cop0 = ssa_name_get_cast_to_p (op0); + cop1 = ssa_name_get_cast_to_p (op1); + if (!cop0 || !cop1) + /* We would need an new statment for cases b and c, and we can't + due vr table, so bail out. */ + return false; + + if (!INTEGRAL_TYPE_P (TREE_TYPE (cop0)) + || !types_compatible_p (TREE_TYPE (cop0), TREE_TYPE (cop1))) + return false; + need_conversion = + !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), + TREE_TYPE (cop0)); + if (need_conversion) + return false; + op0 = cop0; + op1 = cop1; + + /* We need to re-check if value ranges for new operands + for 1-bit precision/range. */ + if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) + { + if (TREE_CODE (op0) != SSA_NAME) + return false; + vr = get_value_range (op0); + + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); + if (!val || !integer_onep (val)) + return false; + + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); + if (!val || !integer_onep (val)) + return false; + } + + if (op1 && TYPE_PRECISION (TREE_TYPE (op1)) != 1) + { + vr = get_value_range (op1); + val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); + if (!val || !integer_onep (val)) + return false; + + val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); + if (!val || !integer_onep (val)) + return false; + } + } + else if (rhs_code == TRUTH_NOT_EXPR + || rhs_code == BIT_NOT_EXPR) { rhs_code = NE_EXPR; op1 = build_int_cst (TREE_TYPE (op0), 1); } else { - op1 = gimple_assign_rhs2 (stmt); - /* Reduce number of cases to handle. */ if (is_gimple_min_invariant (op1)) { /* Exclude anything that should have been already folded. */ if (rhs_code != EQ_EXPR && rhs_code != NE_EXPR - && rhs_code != TRUTH_XOR_EXPR) + && rhs_code != BIT_XOR_EXPR) return false; if (!integer_zerop (op1) @@ -6810,18 +6968,6 @@ simplify_truth_ops_using_ranges (gimple_ /* Punt on A == B as there is no BIT_XNOR_EXPR. */ if (rhs_code == EQ_EXPR) return false; - - if (TYPE_PRECISION (TREE_TYPE (op1)) != 1) - { - vr = get_value_range (op1); - val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); - if (!val || !integer_onep (val)) - return false; - - val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); - if (!val || !integer_onep (val)) - return false; - } } } @@ -6834,11 +6980,8 @@ simplify_truth_ops_using_ranges (gimple_ else location = gimple_location (stmt); - if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR) - warning_at (location, OPT_Wstrict_overflow, - _("assuming signed overflow does not occur when " - "simplifying && or || to & or |")); - else + if (rhs_code != BIT_AND_EXPR && rhs_code != BIT_IOR_EXPR + && rhs_code != BIT_XOR_EXPR) warning_at (location, OPT_Wstrict_overflow, _("assuming signed overflow does not occur when " "simplifying ==, != or ! to identity or ^")); @@ -6856,19 +6999,17 @@ simplify_truth_ops_using_ranges (gimple_ switch (rhs_code) { - case TRUTH_AND_EXPR: - rhs_code = BIT_AND_EXPR; - break; - case TRUTH_OR_EXPR: - rhs_code = BIT_IOR_EXPR; + case BIT_AND_EXPR: + case BIT_IOR_EXPR: break; - case TRUTH_XOR_EXPR: + case BIT_XOR_EXPR: case NE_EXPR: if (integer_zerop (op1)) { gimple_assign_set_rhs_with_ops (gsi, need_conversion ? NOP_EXPR : SSA_NAME, op0, NULL); + gimple_set_location (stmt, loc); update_stmt (gsi_stmt (*gsi)); return true; } @@ -6879,10 +7020,20 @@ simplify_truth_ops_using_ranges (gimple_ gcc_unreachable (); } + /* We can't insert here new expression as otherwise + tracked vr tables getting out of bounds. */ if (need_conversion) return false; + /* Reduce here SSA_NAME -> SSA_NAME. */ + while ((cop0 = ssa_name_get_inner_ssa_name_p (op0)) != NULL_TREE) + op0 = cop0; + + while ((cop1 = ssa_name_get_inner_ssa_name_p (op1)) != NULL_TREE) + op1 = cop1; + gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1); + gimple_set_location (stmt, loc); update_stmt (gsi_stmt (*gsi)); return true; } @@ -7417,10 +7568,8 @@ simplify_stmt_using_ranges (gimple_stmt_ { case EQ_EXPR: case NE_EXPR: + case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: - case TRUTH_AND_EXPR: - case TRUTH_OR_EXPR: - case TRUTH_XOR_EXPR: /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR or identity if the RHS is zero or one, and the LHS are known to be boolean values. Transform all TRUTH_*_EXPR into @@ -7452,13 +7601,21 @@ simplify_stmt_using_ranges (gimple_stmt_ if all the bits being cleared are already cleared or all the bits being set are already set. */ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) - return simplify_bit_ops_using_ranges (gsi, stmt); + { + if (simplify_truth_ops_using_ranges (gsi, stmt)) + return true; + return simplify_bit_ops_using_ranges (gsi, stmt); + } break; CASE_CONVERT: if (TREE_CODE (rhs1) == SSA_NAME && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) - return simplify_conversion_using_ranges (stmt); + { + if (simplify_truth_ops_using_ranges (gsi, stmt)) + return true; + return simplify_conversion_using_ranges (stmt); + } break; default: