Message ID | 20211104015008.70431-1-hongyu.wang@intel.com |
---|---|
State | New |
Headers | show |
Series | i386: Auto vectorize sdot_prod, usdot_prod with VNNI instruction. | expand |
On Thu, Nov 4, 2021 at 9:50 AM Hongyu Wang via Gcc-patches <gcc-patches@gcc.gnu.org> wrote: > > Hi, > > AVX512VNNI/AVXVNNI has vpdpwssd for HImode, vpdpbusd for QImode, so > Adjust HImode sdot_prod expander and add QImode usdot_prod expander > to enhance vectorization for dotprod. > > Bootstraped/regtested on x86_64-pc-linux-gnu{-m32,} and sde. > Ok for master? LGTM. > > gcc/ChangeLog: > > * config/i386/sse.md (VI2_AVX512VNNIBW): New mode iterator. > (VI1_AVX512VNNI): Likewise. > (SDOT_VPDP_SUF): New mode_attr. > (VI1SI): Likewise. > (vi1si): Likewise. > (sdot_prod<mode>): Use VI2_AVX512F iterator, expand to > vpdpwssd when VNNI targets available. > (usdot_prod<mode>): New expander for vector QImode. > > gcc/testsuite/ChangeLog: > > * gcc.target/i386/vnni-auto-vectorize-1.c: New test. > * gcc.target/i386/vnni-auto-vectorize-2.c: Ditto. > --- > > diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md > index 2764a250229..22435e5d036 100644 > --- a/gcc/config/i386/sse.md > +++ b/gcc/config/i386/sse.md > @@ -500,6 +500,9 @@ > (define_mode_iterator VI1_AVX512F > [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI]) > > +(define_mode_iterator VI1_AVX512VNNI > + [(V64QI "TARGET_AVX512VNNI") (V32QI "TARGET_AVX2") V16QI]) > + > (define_mode_iterator VI12_256_512_AVX512VL > [V64QI (V32QI "TARGET_AVX512VL") > V32HI (V16HI "TARGET_AVX512VL")]) > @@ -510,6 +513,10 @@ > (define_mode_iterator VI2_AVX512F > [(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI]) > > +(define_mode_iterator VI2_AVX512VNNIBW > + [(V32HI "TARGET_AVX512BW || TARGET_AVX512VNNI") > + (V16HI "TARGET_AVX2") V8HI]) > + > (define_mode_iterator VI4_AVX > [(V8SI "TARGET_AVX") V4SI]) > > @@ -14798,19 +14805,37 @@ > (define_mode_attr SDOT_PMADD_SUF > [(V32HI "512v32hi") (V16HI "") (V8HI "")]) > > +(define_mode_attr SDOT_VPDP_SUF > + [(V32HI "v16si") (V16HI "v8si") (V8HI "v4si")]) > + > (define_expand "sdot_prod<mode>" > [(match_operand:<sseunpackmode> 0 "register_operand") > - (match_operand:VI2_AVX2 1 "register_operand") > - (match_operand:VI2_AVX2 2 "register_operand") > + (match_operand:VI2_AVX512VNNIBW 1 "register_operand") > + (match_operand:VI2_AVX512VNNIBW 2 "register_operand") > (match_operand:<sseunpackmode> 3 "register_operand")] > "TARGET_SSE2" > { > - rtx t = gen_reg_rtx (<sseunpackmode>mode); > - emit_insn (gen_<sse2_avx2>_pmaddwd<SDOT_PMADD_SUF> (t, operands[1], operands[2])); > - emit_insn (gen_rtx_SET (operands[0], > - gen_rtx_PLUS (<sseunpackmode>mode, > - operands[3], t))); > - DONE; > + /* Try with vnni instructions. */ > + if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI) > + || (<MODE_SIZE> < 64 > + && ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI))) > + { > + operands[1] = lowpart_subreg (<sseunpackmode>mode, operands[1], <MODE>mode); > + operands[2] = lowpart_subreg (<sseunpackmode>mode, operands[2], <MODE>mode); > + emit_insn (gen_rtx_SET (operands[0], operands[3])); > + emit_insn (gen_vpdpwssd_<SDOT_VPDP_SUF> (operands[0], operands[3], > + operands[1], operands[2])); > + } > + /* Otherwise use pmaddwd + paddd. */ > + else > + { > + rtx t = gen_reg_rtx (<sseunpackmode>mode); > + emit_insn (gen_<sse2_avx2>_pmaddwd<SDOT_PMADD_SUF> (t, operands[1], operands[2])); > + emit_insn (gen_rtx_SET (operands[0], > + gen_rtx_PLUS (<sseunpackmode>mode, > + operands[3], t))); > + } > + DONE; > }) > > ;; Normally we use widen_mul_even/odd, but combine can't quite get it all > @@ -27065,6 +27090,29 @@ > [(set_attr ("prefix") ("evex")) > (set_attr "mode" "<sseinsnmode>")]) > > +(define_mode_attr VI1SI > + [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")]) > + > +(define_mode_attr vi1si > + [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")]) > + > +(define_expand "usdot_prod<mode>" > + [(match_operand:<VI1SI> 0 "register_operand") > + (match_operand:VI1_AVX512VNNI 1 "register_operand") > + (match_operand:VI1_AVX512VNNI 2 "register_operand") > + (match_operand:<VI1SI> 3 "register_operand")] > + "(<MODE_SIZE> == 64 > + ||((TARGET_AVX512VNNI && TARGET_AVX512VL) > + || TARGET_AVXVNNI))" > +{ > + operands[1] = lowpart_subreg (<VI1SI>mode, operands[1], <MODE>mode); > + operands[2] = lowpart_subreg (<VI1SI>mode, operands[2], <MODE>mode); > + emit_insn (gen_rtx_SET (operands[0], operands[3])); > + emit_insn (gen_vpdpbusd_<vi1si> (operands[0], operands[3], > + operands[1], operands[2])); > + DONE; > +}) > + > (define_insn "vpdpbusd_v16si" > [(set (match_operand:V16SI 0 "register_operand" "=v") > (unspec:V16SI > diff --git a/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c > new file mode 100644 > index 00000000000..844f37ddfc1 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c > @@ -0,0 +1,30 @@ > +/* { dg-do compile } */ > +/* { dg-options "-mavx512f -mavx512vnni -mavx512vl -O2" } */ > + > +/* { dg-final { scan-assembler "vpdpwssd\t" } } */ > +/* { dg-final { scan-assembler "vpdpbusd\t" } } */ > +/* { dg-final { scan-assembler-not "vpmaddwd\t" } } */ > + > +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) > +sdot_prod_hi (short * restrict a, short * restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) > +usdot_prod_qi (unsigned char * restrict a, char *restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > diff --git a/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c > new file mode 100644 > index 00000000000..dc8047d5644 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c > @@ -0,0 +1,72 @@ > +/* { dg-do run } */ > +/* { dg-options "-O2 -mavx512vnni -mavx512vl" } */ > +/* { dg-require-effective-target avx512vnni } */ > +/* { dg-require-effective-target avx512vl } */ > + > +static void vnni_test (void); > +#define DO_TEST vnni_test > +#define AVX512VNNI > +#define AVX512VL > +#include "avx512f-check.h" > +#include "vnni-auto-vectorize-1.c" > + > +#define N 256 > +unsigned char a_u8[N]; > +char b_i8[N]; > +short a_i16[N], b_i16[N]; > +int i8_exp, i8_ref, i16_exp, i16_ref; > + > +int __attribute__((noinline, noclone, optimize("no-tree-vectorize"))) > +sdot_prod_hi_scalar (short * restrict a, short * restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +int __attribute__((noinline, noclone, optimize("no-tree-vectorize"))) > +usdot_prod_qi_scalar (unsigned char * restrict a, char *restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +void init() > +{ > + int i; > + > + i8_exp = i8_ref = 127; > + i16_exp = i16_ref = 65535; > + > + for (i = 0; i < N; i++) > + { > + a_u8[i] = (i + 3) % 256; > + b_i8[i] = (i + 1) % 128; > + a_i16[i] = i * 2; > + b_i16[i] = -i + 2; > + } > +} > + > +static void vnni_test() > +{ > + init (); > + i16_exp = sdot_prod_hi (a_i16, b_i16, i16_exp, N); > + i16_ref = sdot_prod_hi_scalar (a_i16, b_i16, i16_ref, N); > + if (i16_exp != i16_ref) > + abort (); > + > + init (); > + i8_exp = usdot_prod_qi (a_u8, b_i8, i8_exp, N); > + i8_ref = usdot_prod_qi_scalar (a_u8, b_i8, i8_ref, N); > + if (i8_exp != i8_ref) > + abort (); > +} > -- > 2.18.1 >
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index 2764a250229..22435e5d036 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -500,6 +500,9 @@ (define_mode_iterator VI1_AVX512F [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI]) +(define_mode_iterator VI1_AVX512VNNI + [(V64QI "TARGET_AVX512VNNI") (V32QI "TARGET_AVX2") V16QI]) + (define_mode_iterator VI12_256_512_AVX512VL [V64QI (V32QI "TARGET_AVX512VL") V32HI (V16HI "TARGET_AVX512VL")]) @@ -510,6 +513,10 @@ (define_mode_iterator VI2_AVX512F [(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI]) +(define_mode_iterator VI2_AVX512VNNIBW + [(V32HI "TARGET_AVX512BW || TARGET_AVX512VNNI") + (V16HI "TARGET_AVX2") V8HI]) + (define_mode_iterator VI4_AVX [(V8SI "TARGET_AVX") V4SI]) @@ -14798,19 +14805,37 @@ (define_mode_attr SDOT_PMADD_SUF [(V32HI "512v32hi") (V16HI "") (V8HI "")]) +(define_mode_attr SDOT_VPDP_SUF + [(V32HI "v16si") (V16HI "v8si") (V8HI "v4si")]) + (define_expand "sdot_prod<mode>" [(match_operand:<sseunpackmode> 0 "register_operand") - (match_operand:VI2_AVX2 1 "register_operand") - (match_operand:VI2_AVX2 2 "register_operand") + (match_operand:VI2_AVX512VNNIBW 1 "register_operand") + (match_operand:VI2_AVX512VNNIBW 2 "register_operand") (match_operand:<sseunpackmode> 3 "register_operand")] "TARGET_SSE2" { - rtx t = gen_reg_rtx (<sseunpackmode>mode); - emit_insn (gen_<sse2_avx2>_pmaddwd<SDOT_PMADD_SUF> (t, operands[1], operands[2])); - emit_insn (gen_rtx_SET (operands[0], - gen_rtx_PLUS (<sseunpackmode>mode, - operands[3], t))); - DONE; + /* Try with vnni instructions. */ + if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI) + || (<MODE_SIZE> < 64 + && ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI))) + { + operands[1] = lowpart_subreg (<sseunpackmode>mode, operands[1], <MODE>mode); + operands[2] = lowpart_subreg (<sseunpackmode>mode, operands[2], <MODE>mode); + emit_insn (gen_rtx_SET (operands[0], operands[3])); + emit_insn (gen_vpdpwssd_<SDOT_VPDP_SUF> (operands[0], operands[3], + operands[1], operands[2])); + } + /* Otherwise use pmaddwd + paddd. */ + else + { + rtx t = gen_reg_rtx (<sseunpackmode>mode); + emit_insn (gen_<sse2_avx2>_pmaddwd<SDOT_PMADD_SUF> (t, operands[1], operands[2])); + emit_insn (gen_rtx_SET (operands[0], + gen_rtx_PLUS (<sseunpackmode>mode, + operands[3], t))); + } + DONE; }) ;; Normally we use widen_mul_even/odd, but combine can't quite get it all @@ -27065,6 +27090,29 @@ [(set_attr ("prefix") ("evex")) (set_attr "mode" "<sseinsnmode>")]) +(define_mode_attr VI1SI + [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")]) + +(define_mode_attr vi1si + [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")]) + +(define_expand "usdot_prod<mode>" + [(match_operand:<VI1SI> 0 "register_operand") + (match_operand:VI1_AVX512VNNI 1 "register_operand") + (match_operand:VI1_AVX512VNNI 2 "register_operand") + (match_operand:<VI1SI> 3 "register_operand")] + "(<MODE_SIZE> == 64 + ||((TARGET_AVX512VNNI && TARGET_AVX512VL) + || TARGET_AVXVNNI))" +{ + operands[1] = lowpart_subreg (<VI1SI>mode, operands[1], <MODE>mode); + operands[2] = lowpart_subreg (<VI1SI>mode, operands[2], <MODE>mode); + emit_insn (gen_rtx_SET (operands[0], operands[3])); + emit_insn (gen_vpdpbusd_<vi1si> (operands[0], operands[3], + operands[1], operands[2])); + DONE; +}) + (define_insn "vpdpbusd_v16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI diff --git a/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c new file mode 100644 index 00000000000..844f37ddfc1 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-1.c @@ -0,0 +1,30 @@ +/* { dg-do compile } */ +/* { dg-options "-mavx512f -mavx512vnni -mavx512vl -O2" } */ + +/* { dg-final { scan-assembler "vpdpwssd\t" } } */ +/* { dg-final { scan-assembler "vpdpbusd\t" } } */ +/* { dg-final { scan-assembler-not "vpmaddwd\t" } } */ + +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) +sdot_prod_hi (short * restrict a, short * restrict b, + int c, int n) +{ + int i; + for (i = 0; i < n; i++) + { + c += ((int) a[i] * (int) b[i]); + } + return c; +} + +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) +usdot_prod_qi (unsigned char * restrict a, char *restrict b, + int c, int n) +{ + int i; + for (i = 0; i < n; i++) + { + c += ((int) a[i] * (int) b[i]); + } + return c; +} diff --git a/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c new file mode 100644 index 00000000000..dc8047d5644 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/vnni-auto-vectorize-2.c @@ -0,0 +1,72 @@ +/* { dg-do run } */ +/* { dg-options "-O2 -mavx512vnni -mavx512vl" } */ +/* { dg-require-effective-target avx512vnni } */ +/* { dg-require-effective-target avx512vl } */ + +static void vnni_test (void); +#define DO_TEST vnni_test +#define AVX512VNNI +#define AVX512VL +#include "avx512f-check.h" +#include "vnni-auto-vectorize-1.c" + +#define N 256 +unsigned char a_u8[N]; +char b_i8[N]; +short a_i16[N], b_i16[N]; +int i8_exp, i8_ref, i16_exp, i16_ref; + +int __attribute__((noinline, noclone, optimize("no-tree-vectorize"))) +sdot_prod_hi_scalar (short * restrict a, short * restrict b, + int c, int n) +{ + int i; + for (i = 0; i < n; i++) + { + c += ((int) a[i] * (int) b[i]); + } + return c; +} + +int __attribute__((noinline, noclone, optimize("no-tree-vectorize"))) +usdot_prod_qi_scalar (unsigned char * restrict a, char *restrict b, + int c, int n) +{ + int i; + for (i = 0; i < n; i++) + { + c += ((int) a[i] * (int) b[i]); + } + return c; +} + +void init() +{ + int i; + + i8_exp = i8_ref = 127; + i16_exp = i16_ref = 65535; + + for (i = 0; i < N; i++) + { + a_u8[i] = (i + 3) % 256; + b_i8[i] = (i + 1) % 128; + a_i16[i] = i * 2; + b_i16[i] = -i + 2; + } +} + +static void vnni_test() +{ + init (); + i16_exp = sdot_prod_hi (a_i16, b_i16, i16_exp, N); + i16_ref = sdot_prod_hi_scalar (a_i16, b_i16, i16_ref, N); + if (i16_exp != i16_ref) + abort (); + + init (); + i8_exp = usdot_prod_qi (a_u8, b_i8, i8_exp, N); + i8_ref = usdot_prod_qi_scalar (a_u8, b_i8, i8_ref, N); + if (i8_exp != i8_ref) + abort (); +}