diff mbox series

Improved handling of MULT_EXPR in bit CCP.

Message ID 00d501d78cf6$59e62f20$0db28d60$@nextmovesoftware.com
State New
Headers show
Series Improved handling of MULT_EXPR in bit CCP. | expand

Commit Message

Roger Sayle Aug. 9, 2021, 8:12 a.m. UTC
This patch allows GCC to constant fold (i | (i<<16)) | ((i<<24) | (i<<8)),
where i is an unsigned char, or the equivalent (i*65537) | (i*16777472), to
i*16843009.  The trick is to teach tree_nonzero_bits which bits may be
set in the result of a multiplication by a constant given which bits are
potentially set in the operands.  This allows the optimizations recently
added to match.pd to catch more cases.

The required mask/value pair from a multiplication may be calculated using
a classical shift-and-add algorithm, given we already have implementations
for both addition and shift by constant.  To keep this optimization "cheap",
this functionality is only used if the constant multiplier has a few bits
set (unless flag_expensive_optimizations), and we provide a special case
fast-path implementation for the common case where the (non-constant)
operand has no bits that are guaranteed to be set.  I have no evidence
that this functionality causes performance issues, it's just that sparse
multipliers provide the largest benefit to CCP.

This patch has been tested on x86_64-pc-linux-gnu with "make bootstrap"
and "make -k check" with no new failures.

Ok for mainline?


2021-08-09  Roger Sayle  <roger@nextmovesoftware.com>

gcc/ChangeLog
	* tree-ssa-ccp.c (bit_value_mult_const): New helper function to
	calculate the mask-value pair result of a multiplication by an
	unsigned constant.
	(bit_value_binop) [MULT_EXPR]:  Call it from here for
multiplications
	by non-negative constants.

gcc/testsuite/ChangeLog
	* gcc.dg/fold-ior-5.c: New test case.

Roger
--

/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-optimized" } */

unsigned int test_ior(unsigned char i)
{
  return (i | (i<<16)) | ((i<<24) | (i<<8));
}

unsigned int test_xor(unsigned char i)
{
  return (i ^ (i<<16)) ^ ((i<<24) ^ (i<<8));
}

/* { dg-final { scan-tree-dump-not " \\^ " "optimized" } } */
/* { dg-final { scan-tree-dump-not " \\| " "optimized" } } */
/* { dg-final { scan-tree-dump-times " \\* 16843009" 2 "optimized" } } */

Comments

Richard Biener Aug. 17, 2021, 10:31 a.m. UTC | #1
On Mon, Aug 9, 2021 at 10:13 AM Roger Sayle <roger@nextmovesoftware.com> wrote:
>
>
> This patch allows GCC to constant fold (i | (i<<16)) | ((i<<24) | (i<<8)),
> where i is an unsigned char, or the equivalent (i*65537) | (i*16777472), to
> i*16843009.  The trick is to teach tree_nonzero_bits which bits may be
> set in the result of a multiplication by a constant given which bits are
> potentially set in the operands.  This allows the optimizations recently
> added to match.pd to catch more cases.
>
> The required mask/value pair from a multiplication may be calculated using
> a classical shift-and-add algorithm, given we already have implementations
> for both addition and shift by constant.  To keep this optimization "cheap",
> this functionality is only used if the constant multiplier has a few bits
> set (unless flag_expensive_optimizations), and we provide a special case
> fast-path implementation for the common case where the (non-constant)
> operand has no bits that are guaranteed to be set.  I have no evidence
> that this functionality causes performance issues, it's just that sparse
> multipliers provide the largest benefit to CCP.
>
> This patch has been tested on x86_64-pc-linux-gnu with "make bootstrap"
> and "make -k check" with no new failures.
>
> Ok for mainline?

OK.

Thanks,
Richard.

>
> 2021-08-09  Roger Sayle  <roger@nextmovesoftware.com>
>
> gcc/ChangeLog
>         * tree-ssa-ccp.c (bit_value_mult_const): New helper function to
>         calculate the mask-value pair result of a multiplication by an
>         unsigned constant.
>         (bit_value_binop) [MULT_EXPR]:  Call it from here for
> multiplications
>         by non-negative constants.
>
> gcc/testsuite/ChangeLog
>         * gcc.dg/fold-ior-5.c: New test case.
>
> Roger
> --
>
diff mbox series

Patch

diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 9ce6214..86ca3ae 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -1340,6 +1340,66 @@  bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
     }
 }
 
+/* Determine the mask pair *VAL and *MASK from multiplying the
+   argument mask pair RVAL, RMASK by the unsigned constant C.  */
+void
+bit_value_mult_const (signop sgn, int width,
+		      widest_int *val, widest_int *mask,
+		      const widest_int &rval, const widest_int &rmask,
+		      widest_int c)
+{
+  widest_int sum_mask = 0;
+
+  /* Ensure rval_lo only contains known bits.  */
+  widest_int rval_lo = wi::bit_and_not (rval, rmask);
+
+  if (rval_lo != 0)
+    {
+      /* General case (some bits of multiplicand are known set).  */
+      widest_int sum_val = 0;
+      while (c != 0)
+	{
+	  /* Determine the lowest bit set in the multiplier.  */
+	  int bitpos = wi::ctz (c);
+	  widest_int term_mask = rmask << bitpos;
+	  widest_int term_val = rval_lo << bitpos;
+
+	  /* sum += term.  */
+	  widest_int lo = sum_val + term_val;
+	  widest_int hi = (sum_val | sum_mask) + (term_val | term_mask);
+	  sum_mask |= term_mask | (lo ^ hi);
+	  sum_val = lo;
+
+	  /* Clear this bit in the multiplier.  */
+	  c ^= wi::lshift (1, bitpos);
+	}
+      /* Correctly extend the result value.  */
+      *val = wi::ext (sum_val, width, sgn);
+    }
+  else
+    {
+      /* Special case (no bits of multiplicand are known set).  */
+      while (c != 0)
+	{
+	  /* Determine the lowest bit set in the multiplier.  */
+	  int bitpos = wi::ctz (c);
+	  widest_int term_mask = rmask << bitpos;
+
+	  /* sum += term.  */
+	  widest_int hi = sum_mask + term_mask;
+	  sum_mask |= term_mask | hi;
+
+	  /* Clear this bit in the multiplier.  */
+	  c ^= wi::lshift (1, bitpos);
+	}
+      *val = 0;
+    }
+
+  /* Correctly extend the result mask.  */
+  *mask = wi::ext (sum_mask, width, sgn);
+}
+
+
 /* Apply the operation CODE in type TYPE to the value, mask pairs
    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
@@ -1482,24 +1542,33 @@  bit_value_binop (enum tree_code code, signop sgn, int width,
       }
 
     case MULT_EXPR:
-      {
-	/* Just track trailing zeros in both operands and transfer
-	   them to the other.  */
-	int r1tz = wi::ctz (r1val | r1mask);
-	int r2tz = wi::ctz (r2val | r2mask);
-	if (r1tz + r2tz >= width)
-	  {
-	    *mask = 0;
-	    *val = 0;
-	  }
-	else if (r1tz + r2tz > 0)
-	  {
-	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
-			     width, sgn);
-	    *val = 0;
-	  }
-	break;
-      }
+      if (r2mask == 0
+	  && !wi::neg_p (r2val, sgn)
+	  && (flag_expensive_optimizations || wi::popcount (r2val) < 8))
+	bit_value_mult_const (sgn, width, val, mask, r1val, r1mask, r2val);
+      else if (r1mask == 0
+	       && !wi::neg_p (r1val, sgn)
+	       && (flag_expensive_optimizations || wi::popcount (r1val) < 8))
+	bit_value_mult_const (sgn, width, val, mask, r2val, r2mask, r1val);
+      else
+	{
+	  /* Just track trailing zeros in both operands and transfer
+	     them to the other.  */
+	  int r1tz = wi::ctz (r1val | r1mask);
+	  int r2tz = wi::ctz (r2val | r2mask);
+	  if (r1tz + r2tz >= width)
+	    {
+	      *mask = 0;
+	      *val = 0;
+	    }
+	  else if (r1tz + r2tz > 0)
+	    {
+	      *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
+			       width, sgn);
+	      *val = 0;
+	    }
+	}
+      break;
 
     case EQ_EXPR:
     case NE_EXPR: