diff mbox series

Simplify vector ((VCE?(a cmp b ? -1 : 0)) < 0) ? c : d to just VCE:((a cmp b) ? (VCE c) : (VCE d)).

Message ID 20231110061228.1411882-1-hongtao.liu@intel.com
State New
Headers show
Series Simplify vector ((VCE?(a cmp b ? -1 : 0)) < 0) ? c : d to just VCE:((a cmp b) ? (VCE c) : (VCE d)). | expand

Commit Message

liuhongt Nov. 10, 2023, 6:12 a.m. UTC
When I'm working on PR112443, I notice there's some misoptimizations:
after we fold _mm{,256}_blendv_epi8/pd/ps into gimple, the backend
fails to combine it back to v{,p}blendv{v,ps,pd} since the pattern is
too complicated, so I think maybe we should hanlde it in the gimple
level.

The dump is like

  _1 = c_3(D) >= { 0, 0, 0, 0 };
  _2 = VEC_COND_EXPR <_1, { -1, -1, -1, -1 }, { 0, 0, 0, 0 }>;
  _7 = VIEW_CONVERT_EXPR<vector(32) char>(_2);
  _8 = VIEW_CONVERT_EXPR<vector(32) char>(b_6(D));
  _9 = VIEW_CONVERT_EXPR<vector(32) char>(a_5(D));
  _10 = _7 < { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  _11 = VEC_COND_EXPR <_10, _8, _9>;

It can be optimized to

  _1 = c_2(D) >= { 0, 0, 0, 0 };
  _6 = VEC_COND_EXPR <_1, b_5(D), a_4(D)>;

since _7 is either -1 or 0, the selection of _7 < 0 ? _8 : _9 should
be euqal to _1 ? b : a as long as TYPE_PRECISION of the component type
of the second VEC_COND_EXPR is less equal to the first one.
The patch add a gimple pattern to handle that.

gcc/ChangeLog:

	* match.pd (VCE:(a cmp b ? -1 : 0) < 0) ? c : d ---> VCE:((a
	cmp b) ? (VCE:c) : (VCE:d)): New gimple simplication.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/avx512vl-blendv-3.c: New test.
	* gcc.target/i386/blendv-3.c: New test.
---
 gcc/match.pd                                  | 19 ++++++++
 .../gcc.target/i386/avx512vl-blendv-3.c       |  6 +++
 gcc/testsuite/gcc.target/i386/blendv-3.c      | 46 +++++++++++++++++++
 3 files changed, 71 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
 create mode 100644 gcc/testsuite/gcc.target/i386/blendv-3.c

Comments

Hongtao Liu Nov. 13, 2023, 7:40 a.m. UTC | #1
On Fri, Nov 10, 2023 at 2:14 PM liuhongt <hongtao.liu@intel.com> wrote:
>
> When I'm working on PR112443, I notice there's some misoptimizations:
> after we fold _mm{,256}_blendv_epi8/pd/ps into gimple, the backend
> fails to combine it back to v{,p}blendv{v,ps,pd} since the pattern is
> too complicated, so I think maybe we should hanlde it in the gimple
> level.
>
> The dump is like
>
>   _1 = c_3(D) >= { 0, 0, 0, 0 };
>   _2 = VEC_COND_EXPR <_1, { -1, -1, -1, -1 }, { 0, 0, 0, 0 }>;
>   _7 = VIEW_CONVERT_EXPR<vector(32) char>(_2);
>   _8 = VIEW_CONVERT_EXPR<vector(32) char>(b_6(D));
>   _9 = VIEW_CONVERT_EXPR<vector(32) char>(a_5(D));
>   _10 = _7 < { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
>   _11 = VEC_COND_EXPR <_10, _8, _9>;
>
> It can be optimized to
>
>   _1 = c_2(D) >= { 0, 0, 0, 0 };
>   _6 = VEC_COND_EXPR <_1, b_5(D), a_4(D)>;
>
> since _7 is either -1 or 0, the selection of _7 < 0 ? _8 : _9 should
> be euqal to _1 ? b : a as long as TYPE_PRECISION of the component type
> of the second VEC_COND_EXPR is less equal to the first one.
> The patch add a gimple pattern to handle that.

The is the updated patch according to pinski's comments, I'll reply here.
> It looks like the outer vec_cond isn't actually relevant to the simplification?
>

My original pattern is wrong as pinski mentioned, for the new pattern
outer vec_cond is needed
> Actually this is invalid transformation. It is only valid for unsigned types.
> The reason why it is invalid is because the sign bit changes when
> going to a smaller type from a larger one.
> It would be valid for equals but no other type.

>  (lt (view_convert? (vec_cond (cmp @0 @1) integer_all_onesp
> integer_zerop)) integer_zerop)
>
> is the relevant part?  I wonder what canonicalizes the inner vec_cond?
>  Did you ever see
> the (view_convert ... missing?

typedef char v32qi __attribute__((vector_size(16)));

v32qi
foo (v32qi a, v32qi b, v32qi c)
{
v32qi d = ~c < 0 ?
__extension__(v32qi){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}
: (v32qi){0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
return d < 0 ? a : b;
}
Looks like ccp1 can handle non view_convert case, so I'll remove "?"
in view_convert.
>
> gcc/ChangeLog:
>
>         * match.pd (VCE:(a cmp b ? -1 : 0) < 0) ? c : d ---> VCE:((a
>         cmp b) ? (VCE:c) : (VCE:d)): New gimple simplication.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/i386/avx512vl-blendv-3.c: New test.
>         * gcc.target/i386/blendv-3.c: New test.
> ---
>  gcc/match.pd                                  | 19 ++++++++
>  .../gcc.target/i386/avx512vl-blendv-3.c       |  6 +++
>  gcc/testsuite/gcc.target/i386/blendv-3.c      | 46 +++++++++++++++++++
>  3 files changed, 71 insertions(+)
>  create mode 100644 gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
>  create mode 100644 gcc/testsuite/gcc.target/i386/blendv-3.c
>
> diff --git a/gcc/match.pd b/gcc/match.pd
> index dbc811b2b38..4d823882a7c 100644
> --- a/gcc/match.pd
> +++ b/gcc/match.pd
> @@ -5170,6 +5170,25 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
>   (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
>    (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
>
> +(for cmp (simple_comparison)
> + (simplify
> +  (vec_cond
> +    (lt (view_convert?@5 (vec_cond@6 (cmp@4 @0 @1)
> +                                integer_all_onesp
> +                                integer_zerop))
> +         integer_zerop) @2 @3)
> +  (if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))
> +       && VECTOR_INTEGER_TYPE_P (TREE_TYPE (@5))
> +       && !TYPE_UNSIGNED (TREE_TYPE (@5))
> +       && VECTOR_TYPE_P (TREE_TYPE (@6))
> +       && VECTOR_TYPE_P (type)
> +       && (TYPE_PRECISION (TREE_TYPE (type))
> +         <= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (@6))))
> +       && TYPE_SIZE (type) == TYPE_SIZE (TREE_TYPE (@6)))
> +   (with { tree vtype = TREE_TYPE (@6);}
> +     (view_convert:type
> +       (vec_cond @4 (view_convert:vtype @2) (view_convert:vtype @3)))))))
> +
>  /* c1 ? c2 ? a : b : b  -->  (c1 & c2) ? a : b  */
>  (simplify
>   (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
> diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
> new file mode 100644
> index 00000000000..2777e72ab5f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
> @@ -0,0 +1,6 @@
> +/* { dg-do compile } */
> +/* { dg-options "-mavx512vl -mavx512bw -O2" } */
> +/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
> +/* { dg-final { scan-assembler-not {vpcmp} } } */
> +
> +#include "blendv-3.c"
> diff --git a/gcc/testsuite/gcc.target/i386/blendv-3.c b/gcc/testsuite/gcc.target/i386/blendv-3.c
> new file mode 100644
> index 00000000000..fa0fb067a73
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/i386/blendv-3.c
> @@ -0,0 +1,46 @@
> +/* { dg-do compile } */
> +/* { dg-options "-mavx2 -O2" } */
> +/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
> +/* { dg-final { scan-assembler-not {vpcmp} } } */
> +
> +#include <immintrin.h>
> +
> +__m256i
> +foo (__m256i a, __m256i b, __m256i c)
> +{
> +  return _mm256_blendv_epi8 (a, b, ~c < 0);
> +}
> +
> +__m256d
> +foo1 (__m256d a, __m256d b, __m256i c)
> +{
> +  __m256i d = ~c < 0;
> +  return _mm256_blendv_pd (a, b, (__m256d)d);
> +}
> +
> +__m256
> +foo2 (__m256 a, __m256 b, __m256i c)
> +{
> +  __m256i d = ~c < 0;
> +  return _mm256_blendv_ps (a, b, (__m256)d);
> +}
> +
> +__m128i
> +foo4 (__m128i a, __m128i b, __m128i c)
> +{
> +  return _mm_blendv_epi8 (a, b, ~c < 0);
> +}
> +
> +__m128d
> +foo5 (__m128d a, __m128d b, __m128i c)
> +{
> +  __m128i d = ~c < 0;
> +  return _mm_blendv_pd (a, b, (__m128d)d);
> +}
> +
> +__m128
> +foo6 (__m128 a, __m128 b, __m128i c)
> +{
> +  __m128i d = ~c < 0;
> +  return _mm_blendv_ps (a, b, (__m128)d);
> +}
> --
> 2.31.1
>


--
BR,
Hongtao
diff mbox series

Patch

diff --git a/gcc/match.pd b/gcc/match.pd
index dbc811b2b38..4d823882a7c 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -5170,6 +5170,25 @@  DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
  (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
   (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
 
+(for cmp (simple_comparison)
+ (simplify
+  (vec_cond
+    (lt (view_convert?@5 (vec_cond@6 (cmp@4 @0 @1)
+				 integer_all_onesp
+				 integer_zerop))
+	  integer_zerop) @2 @3)
+  (if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))
+       && VECTOR_INTEGER_TYPE_P (TREE_TYPE (@5))
+       && !TYPE_UNSIGNED (TREE_TYPE (@5))
+       && VECTOR_TYPE_P (TREE_TYPE (@6))
+       && VECTOR_TYPE_P (type)
+       && (TYPE_PRECISION (TREE_TYPE (type))
+	  <= TYPE_PRECISION (TREE_TYPE (TREE_TYPE (@6))))
+       && TYPE_SIZE (type) == TYPE_SIZE (TREE_TYPE (@6)))
+   (with { tree vtype = TREE_TYPE (@6);}
+     (view_convert:type
+       (vec_cond @4 (view_convert:vtype @2) (view_convert:vtype @3)))))))
+
 /* c1 ? c2 ? a : b : b  -->  (c1 & c2) ? a : b  */
 (simplify
  (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
new file mode 100644
index 00000000000..2777e72ab5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
@@ -0,0 +1,6 @@ 
+/* { dg-do compile } */
+/* { dg-options "-mavx512vl -mavx512bw -O2" } */
+/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
+/* { dg-final { scan-assembler-not {vpcmp} } } */
+
+#include "blendv-3.c"
diff --git a/gcc/testsuite/gcc.target/i386/blendv-3.c b/gcc/testsuite/gcc.target/i386/blendv-3.c
new file mode 100644
index 00000000000..fa0fb067a73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/blendv-3.c
@@ -0,0 +1,46 @@ 
+/* { dg-do compile } */
+/* { dg-options "-mavx2 -O2" } */
+/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
+/* { dg-final { scan-assembler-not {vpcmp} } } */
+
+#include <immintrin.h>
+
+__m256i
+foo (__m256i a, __m256i b, __m256i c)
+{
+  return _mm256_blendv_epi8 (a, b, ~c < 0);
+}
+
+__m256d
+foo1 (__m256d a, __m256d b, __m256i c)
+{
+  __m256i d = ~c < 0;
+  return _mm256_blendv_pd (a, b, (__m256d)d);
+}
+
+__m256
+foo2 (__m256 a, __m256 b, __m256i c)
+{
+  __m256i d = ~c < 0;
+  return _mm256_blendv_ps (a, b, (__m256)d);
+}
+
+__m128i
+foo4 (__m128i a, __m128i b, __m128i c)
+{
+  return _mm_blendv_epi8 (a, b, ~c < 0);
+}
+
+__m128d
+foo5 (__m128d a, __m128d b, __m128i c)
+{
+  __m128i d = ~c < 0;
+  return _mm_blendv_pd (a, b, (__m128d)d);
+}
+
+__m128
+foo6 (__m128 a, __m128 b, __m128i c)
+{
+  __m128i d = ~c < 0;
+  return _mm_blendv_ps (a, b, (__m128)d);
+}