diff mbox series

[v1] Vect: Support IFN SAT_SUB for unsigned vector int

Message ID 20240529114548.52057-1-pan2.li@intel.com
State New
Headers show
Series [v1] Vect: Support IFN SAT_SUB for unsigned vector int | expand

Commit Message

Li, Pan2 May 29, 2024, 11:45 a.m. UTC
From: Pan Li <pan2.li@intel.com>

This patch would like to support the .SAT_SUB for the unsigned
vector int.  Given we have below example code:

void
vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n)
{
  for (unsigned i = 0; i < n; i++)
    out[i] = (x[i] - y[i]) & (-(uint64_t)(x[i] >= y[i]));
}

Before this patch:
void
vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n)
{
  ...
  _77 = .SELECT_VL (ivtmp_75, POLY_INT_CST [2, 2]);
  ivtmp_56 = _77 * 8;
  vect__4.7_59 = .MASK_LEN_LOAD (vectp_x.5_57, 64B, { -1, ... }, _77, 0);
  vect__6.10_63 = .MASK_LEN_LOAD (vectp_y.8_61, 64B, { -1, ... }, _77, 0);

  mask__7.11_64 = vect__4.7_59 >= vect__6.10_63;
  _66 = .COND_SUB (mask__7.11_64, vect__4.7_59, vect__6.10_63, { 0, ... });

  .MASK_LEN_STORE (vectp_out.15_71, 64B, { -1, ... }, _77, 0, _66);
  vectp_x.5_58 = vectp_x.5_57 + ivtmp_56;
  vectp_y.8_62 = vectp_y.8_61 + ivtmp_56;
  vectp_out.15_72 = vectp_out.15_71 + ivtmp_56;
  ivtmp_76 = ivtmp_75 - _77;
  ...
}

After this patch:
void
vec_sat_sub_u64 (uint64_t *out, uint64_t *x, uint64_t *y, unsigned n)
{
  ...
  _76 = .SELECT_VL (ivtmp_74, POLY_INT_CST [2, 2]);
  ivtmp_60 = _76 * 8;
  vect__4.7_63 = .MASK_LEN_LOAD (vectp_x.5_61, 64B, { -1, ... }, _76, 0);
  vect__6.10_67 = .MASK_LEN_LOAD (vectp_y.8_65, 64B, { -1, ... }, _76, 0);

  vect_patt_37.11_68 = .SAT_SUB (vect__4.7_63, vect__6.10_67);

  .MASK_LEN_STORE (vectp_out.12_70, 64B, { -1, ... }, _76, 0, vect_patt_37.11_68);
  vectp_x.5_62 = vectp_x.5_61 + ivtmp_60;
  vectp_y.8_66 = vectp_y.8_65 + ivtmp_60;
  vectp_out.12_71 = vectp_out.12_70 + ivtmp_60;
  ivtmp_75 = ivtmp_74 - _76;
  ...
}

The below test suites are passed for this patch
* The x86 bootstrap test.
* The x86 fully regression test.
* The riscv fully regression tests.

gcc/ChangeLog:

	* match.pd: Add new form for vector mode recog.
	* tree-vect-patterns.cc (gimple_unsigned_integer_sat_sub): Add
	new match func decl;
	(vect_recog_build_binary_gimple_call): Extract helper func to
	build gcall with given internal_fn.
	(vect_recog_sat_sub_pattern): Add new func impl to recog .SAT_SUB.

Signed-off-by: Pan Li <pan2.li@intel.com>
---
 gcc/match.pd              | 14 +++++++
 gcc/tree-vect-patterns.cc | 85 ++++++++++++++++++++++++++++++++-------
 2 files changed, 84 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/gcc/match.pd b/gcc/match.pd
index 3e334533ff8..81f389855cd 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3100,6 +3100,20 @@  DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
  (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
       && types_match (type, @0, @1))))
 
+/* Unsigned saturation sub, case 3 (branchless with gt):
+   SAT_U_SUB = (X - Y) * (X > Y).  */
+(match (unsigned_integer_sat_sub @0 @1)
+ (mult:c (minus @0 @1) (convert (gt @0 @1)))
+ (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
+      && types_match (type, @0, @1))))
+
+/* Unsigned saturation sub, case 4 (branchless with ge):
+   SAT_U_SUB = (X - Y) * (X >= Y).  */
+(match (unsigned_integer_sat_sub @0 @1)
+ (mult:c (minus @0 @1) (convert (gt @0 @1)))
+ (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type)
+      && types_match (type, @0, @1))))
+
 /* x >  y  &&  x != XXX_MIN  -->  x > y
    x >  y  &&  x == XXX_MIN  -->  false . */
 (for eqne (eq ne)
diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
index a313dc64643..09a7c129493 100644
--- a/gcc/tree-vect-patterns.cc
+++ b/gcc/tree-vect-patterns.cc
@@ -4488,6 +4488,32 @@  vect_recog_mult_pattern (vec_info *vinfo,
 }
 
 extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
+extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
+
+static gcall *
+vect_recog_build_binary_gimple_call (vec_info *vinfo, gimple *stmt,
+				     internal_fn fn, tree *type_out,
+				     tree op_0, tree op_1)
+{
+  tree itype = TREE_TYPE (op_0);
+  tree vtype = get_vectype_for_scalar_type (vinfo, itype);
+
+  if (vtype != NULL_TREE
+    && direct_internal_fn_supported_p (fn, vtype, OPTIMIZE_FOR_BOTH))
+    {
+      gcall *call = gimple_build_call_internal (fn, 2, op_0, op_1);
+
+      gimple_call_set_lhs (call, vect_recog_temp_ssa_var (itype, NULL));
+      gimple_call_set_nothrow (call, /* nothrow_p */ false);
+      gimple_set_location (call, gimple_location (stmt));
+
+      *type_out = vtype;
+
+      return call;
+    }
+
+  return NULL;
+}
 
 /*
  * Try to detect saturation add pattern (SAT_ADD), aka below gimple:
@@ -4510,27 +4536,55 @@  vect_recog_sat_add_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
   if (!is_gimple_assign (last_stmt))
     return NULL;
 
-  tree res_ops[2];
+  tree ops[2];
   tree lhs = gimple_assign_lhs (last_stmt);
 
-  if (gimple_unsigned_integer_sat_add (lhs, res_ops, NULL))
+  if (gimple_unsigned_integer_sat_add (lhs, ops, NULL))
     {
-      tree itype = TREE_TYPE (res_ops[0]);
-      tree vtype = get_vectype_for_scalar_type (vinfo, itype);
-
-      if (vtype != NULL_TREE
-	&& direct_internal_fn_supported_p (IFN_SAT_ADD, vtype,
-					   OPTIMIZE_FOR_BOTH))
+      gcall *call = vect_recog_build_binary_gimple_call (vinfo, last_stmt,
+							 IFN_SAT_ADD, type_out,
+							 ops[0], ops[1]);
+      if (call)
 	{
-	  *type_out = vtype;
-	  gcall *call = gimple_build_call_internal (IFN_SAT_ADD, 2, res_ops[0],
-						    res_ops[1]);
+	  vect_pattern_detected ("vect_recog_sat_add_pattern", last_stmt);
+	  return call;
+	}
+    }
+
+  return NULL;
+}
 
-	  gimple_call_set_lhs (call, vect_recog_temp_ssa_var (itype, NULL));
-	  gimple_call_set_nothrow (call, /* nothrow_p */ false);
-	  gimple_set_location (call, gimple_location (last_stmt));
+/*
+ * Try to detect saturation sub pattern (SAT_ADD), aka below gimple:
+ *   _7 = _1 >= _2;
+ *   _8 = _1 - _2;
+ *   _10 = (long unsigned int) _7;
+ *   _9 = _8 * _10;
+ *
+ * And then simplied to
+ *   _9 = .SAT_SUB (_1, _2);
+ */
 
-	  vect_pattern_detected ("vect_recog_sat_add_pattern", last_stmt);
+static gimple *
+vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
+			    tree *type_out)
+{
+  gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
+
+  if (!is_gimple_assign (last_stmt))
+    return NULL;
+
+  tree ops[2];
+  tree lhs = gimple_assign_lhs (last_stmt);
+
+  if (gimple_unsigned_integer_sat_sub (lhs, ops, NULL))
+    {
+      gcall *call = vect_recog_build_binary_gimple_call (vinfo, last_stmt,
+							 IFN_SAT_SUB, type_out,
+							 ops[0], ops[1]);
+      if (call)
+	{
+	  vect_pattern_detected ("vect_recog_sat_sub_pattern", last_stmt);
 	  return call;
 	}
     }
@@ -7039,6 +7093,7 @@  static vect_recog_func vect_vect_recog_func_ptrs[] = {
   { vect_recog_divmod_pattern, "divmod" },
   { vect_recog_mult_pattern, "mult" },
   { vect_recog_sat_add_pattern, "sat_add" },
+  { vect_recog_sat_sub_pattern, "sat_sub" },
   { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
   { vect_recog_gcond_pattern, "gcond" },
   { vect_recog_bool_pattern, "bool" },