diff mbox series

[v2] Vect: Support IFN SAT_TRUNC for unsigned vector int

Message ID 20240703013320.3994215-1-pan2.li@intel.com
State New
Headers show
Series [v2] Vect: Support IFN SAT_TRUNC for unsigned vector int | expand

Commit Message

Li, Pan2 July 3, 2024, 1:33 a.m. UTC
From: Pan Li <pan2.li@intel.com>

This patch would like to support the .SAT_TRUNC for the unsigned
vector int.  Given we have below example code:

Form 1
  #define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT)                             \
  void __attribute__((noinline))                                       \
  vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
  {                                                                    \
    for (unsigned i = 0; i < limit; i++)                               \
      {                                                                \
        bool overflow = y[i] > (WT)(NT)(-1);                           \
        x[i] = ((NT)y[i]) | (NT)-overflow;                             \
      }                                                                \
  }

VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)

Before this patch:
void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
{
  ...
  _51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
  ivtmp_35 = _51 * 8;
  vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
  mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
  vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
  vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... }, vect__5.9_29);
  ivtmp_12 = _51 * 4;
  .MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
  vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
  vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
  ivtmp_50 = ivtmp_49 - _51;
  if (ivtmp_50 != 0)
  ...
}

After this patch:
void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
{
  ...
  _12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
  ivtmp_34 = _12 * 8;
  vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
  vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
  ivtmp_29 = _12 * 4;
  .MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
  vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
  vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
  ivtmp_20 = ivtmp_21 - _12;
  if (ivtmp_20 != 0)
  ...
}

The below test suites are passed for this patch
* The x86 bootstrap test.
* The x86 fully regression test.
* The rv64gcv fully regression tests.

gcc/ChangeLog:

	* tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
	new decl generated by match.
	(vect_recog_sat_trunc_pattern): Add new func impl to recog the
	.SAT_TRUNC pattern.

Signed-off-by: Pan Li <pan2.li@intel.com>
---
 gcc/tree-vect-patterns.cc | 54 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

Comments

Richard Biener July 3, 2024, 9:06 a.m. UTC | #1
On Wed, Jul 3, 2024 at 3:33 AM <pan2.li@intel.com> wrote:
>
> From: Pan Li <pan2.li@intel.com>
>
> This patch would like to support the .SAT_TRUNC for the unsigned
> vector int.  Given we have below example code:
>
> Form 1
>   #define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT)                             \
>   void __attribute__((noinline))                                       \
>   vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
>   {                                                                    \
>     for (unsigned i = 0; i < limit; i++)                               \
>       {                                                                \
>         bool overflow = y[i] > (WT)(NT)(-1);                           \
>         x[i] = ((NT)y[i]) | (NT)-overflow;                             \
>       }                                                                \
>   }
>
> VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)
>
> Before this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
> {
>   ...
>   _51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
>   ivtmp_35 = _51 * 8;
>   vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
>   mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
>   vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
>   vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... }, vect__5.9_29);
>   ivtmp_12 = _51 * 4;
>   .MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
>   vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
>   vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
>   ivtmp_50 = ivtmp_49 - _51;
>   if (ivtmp_50 != 0)
>   ...
> }
>
> After this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
> {
>   ...
>   _12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
>   ivtmp_34 = _12 * 8;
>   vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
>   vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
>   ivtmp_29 = _12 * 4;
>   .MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
>   vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
>   vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
>   ivtmp_20 = ivtmp_21 - _12;
>   if (ivtmp_20 != 0)
>   ...
> }
>
> The below test suites are passed for this patch
> * The x86 bootstrap test.
> * The x86 fully regression test.
> * The rv64gcv fully regression tests.

OK.

Thanks,
Richard.

> gcc/ChangeLog:
>
>         * tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
>         new decl generated by match.
>         (vect_recog_sat_trunc_pattern): Add new func impl to recog the
>         .SAT_TRUNC pattern.
>
> Signed-off-by: Pan Li <pan2.li@intel.com>
> ---
>  gcc/tree-vect-patterns.cc | 54 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
>
> diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
> index 519d15f2a43..86e893a1c43 100644
> --- a/gcc/tree-vect-patterns.cc
> +++ b/gcc/tree-vect-patterns.cc
> @@ -4489,6 +4489,7 @@ vect_recog_mult_pattern (vec_info *vinfo,
>
>  extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
>  extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
> +extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
>
>  static gimple *
>  vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
> @@ -4603,6 +4604,58 @@ vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
>    return NULL;
>  }
>
> +/*
> + * Try to detect saturation truncation pattern (SAT_TRUNC), aka below gimple:
> + *   overflow_5 = x_4(D) > 4294967295;
> + *   _1 = (unsigned int) x_4(D);
> + *   _2 = (unsigned int) overflow_5;
> + *   _3 = -_2;
> + *   _6 = _1 | _3;
> + *
> + * And then simplied to
> + *   _6 = .SAT_TRUNC (x_4(D));
> + */
> +
> +static gimple *
> +vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
> +                             tree *type_out)
> +{
> +  gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
> +
> +  if (!is_gimple_assign (last_stmt))
> +    return NULL;
> +
> +  tree ops[1];
> +  tree lhs = gimple_assign_lhs (last_stmt);
> +
> +  if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
> +    {
> +      tree itype = TREE_TYPE (ops[0]);
> +      tree otype = TREE_TYPE (lhs);
> +      tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
> +      tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
> +      internal_fn fn = IFN_SAT_TRUNC;
> +
> +      if (v_itype != NULL_TREE && v_otype != NULL_TREE
> +       && direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
> +                                          OPTIMIZE_FOR_BOTH))
> +       {
> +         gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
> +         tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
> +
> +         gimple_call_set_lhs (call, out_ssa);
> +         gimple_call_set_nothrow (call, /* nothrow_p */ false);
> +         gimple_set_location (call, gimple_location (last_stmt));
> +
> +         *type_out = v_otype;
> +
> +         return call;
> +       }
> +    }
> +
> +  return NULL;
> +}
> +
>  /* Detect a signed division by a constant that wouldn't be
>     otherwise vectorized:
>
> @@ -7065,6 +7118,7 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
>    { vect_recog_mult_pattern, "mult" },
>    { vect_recog_sat_add_pattern, "sat_add" },
>    { vect_recog_sat_sub_pattern, "sat_sub" },
> +  { vect_recog_sat_trunc_pattern, "sat_trunc" },
>    { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
>    { vect_recog_gcond_pattern, "gcond" },
>    { vect_recog_bool_pattern, "bool" },
> --
> 2.34.1
>
Li, Pan2 July 3, 2024, 1:24 p.m. UTC | #2
> OK.

Committed, thanks Richard.

Pan

-----Original Message-----
From: Richard Biener <richard.guenther@gmail.com> 
Sent: Wednesday, July 3, 2024 5:06 PM
To: Li, Pan2 <pan2.li@intel.com>
Cc: gcc-patches@gcc.gnu.org; juzhe.zhong@rivai.ai; kito.cheng@gmail.com; tamar.christina@arm.com; jeffreyalaw@gmail.com; rdapp.gcc@gmail.com
Subject: Re: [PATCH v2] Vect: Support IFN SAT_TRUNC for unsigned vector int

On Wed, Jul 3, 2024 at 3:33 AM <pan2.li@intel.com> wrote:
>
> From: Pan Li <pan2.li@intel.com>
>
> This patch would like to support the .SAT_TRUNC for the unsigned
> vector int.  Given we have below example code:
>
> Form 1
>   #define VEC_DEF_SAT_U_TRUC_FMT_1(NT, WT)                             \
>   void __attribute__((noinline))                                       \
>   vec_sat_u_truc_##WT##_to_##NT##_fmt_1 (NT *x, WT *y, unsigned limit) \
>   {                                                                    \
>     for (unsigned i = 0; i < limit; i++)                               \
>       {                                                                \
>         bool overflow = y[i] > (WT)(NT)(-1);                           \
>         x[i] = ((NT)y[i]) | (NT)-overflow;                             \
>       }                                                                \
>   }
>
> VEC_DEF_SAT_U_TRUC_FMT_1 (uint32_t, uint64_t)
>
> Before this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
> {
>   ...
>   _51 = .SELECT_VL (ivtmp_49, POLY_INT_CST [2, 2]);
>   ivtmp_35 = _51 * 8;
>   vect__4.7_32 = .MASK_LEN_LOAD (vectp_y.5_34, 64B, { -1, ... }, _51, 0);
>   mask_overflow_16.8_30 = vect__4.7_32 > { 4294967295, ... };
>   vect__5.9_29 = (vector([2,2]) unsigned int) vect__4.7_32;
>   vect__10.13_20 = .VCOND_MASK (mask_overflow_16.8_30, { 4294967295, ... }, vect__5.9_29);
>   ivtmp_12 = _51 * 4;
>   .MASK_LEN_STORE (vectp_x.14_11, 32B, { -1, ... }, _51, 0, vect__10.13_20);
>   vectp_y.5_33 = vectp_y.5_34 + ivtmp_35;
>   vectp_x.14_46 = vectp_x.14_11 + ivtmp_12;
>   ivtmp_50 = ivtmp_49 - _51;
>   if (ivtmp_50 != 0)
>   ...
> }
>
> After this patch:
> void vec_sat_u_truc_uint64_t_to_uint32_t_fmt_1 (uint32_t * x, uint64_t * y, unsigned int limit)
> {
>   ...
>   _12 = .SELECT_VL (ivtmp_21, POLY_INT_CST [2, 2]);
>   ivtmp_34 = _12 * 8;
>   vect__4.7_31 = .MASK_LEN_LOAD (vectp_y.5_33, 64B, { -1, ... }, _12, 0);
>   vect_patt_40.8_30 = .SAT_TRUNC (vect__4.7_31); // << .SAT_TRUNC
>   ivtmp_29 = _12 * 4;
>   .MASK_LEN_STORE (vectp_x.9_28, 32B, { -1, ... }, _12, 0, vect_patt_40.8_30);
>   vectp_y.5_32 = vectp_y.5_33 + ivtmp_34;
>   vectp_x.9_27 = vectp_x.9_28 + ivtmp_29;
>   ivtmp_20 = ivtmp_21 - _12;
>   if (ivtmp_20 != 0)
>   ...
> }
>
> The below test suites are passed for this patch
> * The x86 bootstrap test.
> * The x86 fully regression test.
> * The rv64gcv fully regression tests.

OK.

Thanks,
Richard.

> gcc/ChangeLog:
>
>         * tree-vect-patterns.cc (gimple_unsigned_integer_sat_trunc): Add
>         new decl generated by match.
>         (vect_recog_sat_trunc_pattern): Add new func impl to recog the
>         .SAT_TRUNC pattern.
>
> Signed-off-by: Pan Li <pan2.li@intel.com>
> ---
>  gcc/tree-vect-patterns.cc | 54 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
>
> diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
> index 519d15f2a43..86e893a1c43 100644
> --- a/gcc/tree-vect-patterns.cc
> +++ b/gcc/tree-vect-patterns.cc
> @@ -4489,6 +4489,7 @@ vect_recog_mult_pattern (vec_info *vinfo,
>
>  extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
>  extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
> +extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
>
>  static gimple *
>  vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
> @@ -4603,6 +4604,58 @@ vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
>    return NULL;
>  }
>
> +/*
> + * Try to detect saturation truncation pattern (SAT_TRUNC), aka below gimple:
> + *   overflow_5 = x_4(D) > 4294967295;
> + *   _1 = (unsigned int) x_4(D);
> + *   _2 = (unsigned int) overflow_5;
> + *   _3 = -_2;
> + *   _6 = _1 | _3;
> + *
> + * And then simplied to
> + *   _6 = .SAT_TRUNC (x_4(D));
> + */
> +
> +static gimple *
> +vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
> +                             tree *type_out)
> +{
> +  gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
> +
> +  if (!is_gimple_assign (last_stmt))
> +    return NULL;
> +
> +  tree ops[1];
> +  tree lhs = gimple_assign_lhs (last_stmt);
> +
> +  if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
> +    {
> +      tree itype = TREE_TYPE (ops[0]);
> +      tree otype = TREE_TYPE (lhs);
> +      tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
> +      tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
> +      internal_fn fn = IFN_SAT_TRUNC;
> +
> +      if (v_itype != NULL_TREE && v_otype != NULL_TREE
> +       && direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
> +                                          OPTIMIZE_FOR_BOTH))
> +       {
> +         gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
> +         tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
> +
> +         gimple_call_set_lhs (call, out_ssa);
> +         gimple_call_set_nothrow (call, /* nothrow_p */ false);
> +         gimple_set_location (call, gimple_location (last_stmt));
> +
> +         *type_out = v_otype;
> +
> +         return call;
> +       }
> +    }
> +
> +  return NULL;
> +}
> +
>  /* Detect a signed division by a constant that wouldn't be
>     otherwise vectorized:
>
> @@ -7065,6 +7118,7 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
>    { vect_recog_mult_pattern, "mult" },
>    { vect_recog_sat_add_pattern, "sat_add" },
>    { vect_recog_sat_sub_pattern, "sat_sub" },
> +  { vect_recog_sat_trunc_pattern, "sat_trunc" },
>    { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
>    { vect_recog_gcond_pattern, "gcond" },
>    { vect_recog_bool_pattern, "bool" },
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
index 519d15f2a43..86e893a1c43 100644
--- a/gcc/tree-vect-patterns.cc
+++ b/gcc/tree-vect-patterns.cc
@@ -4489,6 +4489,7 @@  vect_recog_mult_pattern (vec_info *vinfo,
 
 extern bool gimple_unsigned_integer_sat_add (tree, tree*, tree (*)(tree));
 extern bool gimple_unsigned_integer_sat_sub (tree, tree*, tree (*)(tree));
+extern bool gimple_unsigned_integer_sat_trunc (tree, tree*, tree (*)(tree));
 
 static gimple *
 vect_recog_build_binary_gimple_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
@@ -4603,6 +4604,58 @@  vect_recog_sat_sub_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
   return NULL;
 }
 
+/*
+ * Try to detect saturation truncation pattern (SAT_TRUNC), aka below gimple:
+ *   overflow_5 = x_4(D) > 4294967295;
+ *   _1 = (unsigned int) x_4(D);
+ *   _2 = (unsigned int) overflow_5;
+ *   _3 = -_2;
+ *   _6 = _1 | _3;
+ *
+ * And then simplied to
+ *   _6 = .SAT_TRUNC (x_4(D));
+ */
+
+static gimple *
+vect_recog_sat_trunc_pattern (vec_info *vinfo, stmt_vec_info stmt_vinfo,
+			      tree *type_out)
+{
+  gimple *last_stmt = STMT_VINFO_STMT (stmt_vinfo);
+
+  if (!is_gimple_assign (last_stmt))
+    return NULL;
+
+  tree ops[1];
+  tree lhs = gimple_assign_lhs (last_stmt);
+
+  if (gimple_unsigned_integer_sat_trunc (lhs, ops, NULL))
+    {
+      tree itype = TREE_TYPE (ops[0]);
+      tree otype = TREE_TYPE (lhs);
+      tree v_itype = get_vectype_for_scalar_type (vinfo, itype);
+      tree v_otype = get_vectype_for_scalar_type (vinfo, otype);
+      internal_fn fn = IFN_SAT_TRUNC;
+
+      if (v_itype != NULL_TREE && v_otype != NULL_TREE
+	&& direct_internal_fn_supported_p (fn, tree_pair (v_otype, v_itype),
+					   OPTIMIZE_FOR_BOTH))
+	{
+	  gcall *call = gimple_build_call_internal (fn, 1, ops[0]);
+	  tree out_ssa = vect_recog_temp_ssa_var (otype, NULL);
+
+	  gimple_call_set_lhs (call, out_ssa);
+	  gimple_call_set_nothrow (call, /* nothrow_p */ false);
+	  gimple_set_location (call, gimple_location (last_stmt));
+
+	  *type_out = v_otype;
+
+	  return call;
+	}
+    }
+
+  return NULL;
+}
+
 /* Detect a signed division by a constant that wouldn't be
    otherwise vectorized:
 
@@ -7065,6 +7118,7 @@  static vect_recog_func vect_vect_recog_func_ptrs[] = {
   { vect_recog_mult_pattern, "mult" },
   { vect_recog_sat_add_pattern, "sat_add" },
   { vect_recog_sat_sub_pattern, "sat_sub" },
+  { vect_recog_sat_trunc_pattern, "sat_trunc" },
   { vect_recog_mixed_size_cond_pattern, "mixed_size_cond" },
   { vect_recog_gcond_pattern, "gcond" },
   { vect_recog_bool_pattern, "bool" },