diff mbox series

[1/2,x86] Support dot_prod optabs for 64-bit vector.

Message ID 20240428055819.2313356-1-hongtao.liu@intel.com
State New
Headers show
Series [1/2,x86] Support dot_prod optabs for 64-bit vector. | expand

Commit Message

liuhongt April 28, 2024, 5:58 a.m. UTC
Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}.
Ready push to trunk.

gcc/ChangeLog:

	PR target/113079
	* config/i386/mmx.md (usdot_prodv8qi): New expander.
	(sdot_prodv8qi): Ditto.
	(udot_prodv8qi): Ditto.
	(usdot_prodv4hi): Ditto.
	(udot_prodv4hi): Ditto.
	(sdot_prodv4hi): Ditto.

gcc/testsuite/ChangeLog:

	* gcc.target/i386/pr113079.c: New test.
	* gcc.target/i386/pr113079-2.c: New test.
	* gcc.target/i386/sse4-pr113079-2.c: New test.
---
 gcc/config/i386/mmx.md                        | 195 ++++++++++++++++++
 gcc/testsuite/gcc.target/i386/pr113079-2.c    | 161 +++++++++++++++
 gcc/testsuite/gcc.target/i386/pr113079.c      |  57 +++++
 .../gcc.target/i386/sse4-pr113079-2.c         | 158 ++++++++++++++
 4 files changed, 571 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/pr113079-2.c
 create mode 100644 gcc/testsuite/gcc.target/i386/pr113079.c
 create mode 100644 gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
diff mbox series

Patch

diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index 9a8d6030d8b..5f342497885 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -6342,6 +6342,201 @@  (define_expand "usadv8qi"
   DONE;
 })
 
+(define_expand "usdot_prodv8qi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V8QI 1 "register_operand")
+   (match_operand:V8QI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+  operands[1] = force_reg (V8QImode, operands[1]);
+  operands[2] = force_reg (V8QImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  if ((TARGET_AVX512VNNI && TARGET_AVX512VL)
+     || TARGET_AVXVNNI)
+    {
+      rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+      rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+      rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+
+      emit_insn (gen_usdot_prodv16qi (op0, op1, op2, op3));
+      emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+     }
+   else
+     {
+      rtx op1 = gen_reg_rtx (V8HImode);
+      rtx op2 = gen_reg_rtx (V8HImode);
+      rtx op3 = gen_reg_rtx (V4SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+      rtx op0_1 = gen_reg_rtx (V4SImode);
+
+      emit_move_insn (op3, CONST0_RTX (V4SImode));
+      emit_insn (gen_zero_extendv8qiv8hi2 (op1, operands[1]));
+      emit_insn (gen_extendv8qiv8hi2 (op2, operands[2]));
+      emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+      /* vec_perm (op0, 2, 3, 0, 1);  */
+      emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+      emit_insn (gen_addv4si3 (op0, op0, op0_1));
+      emit_insn (gen_addv2si3 (operands[0], operands[3],
+			       lowpart_subreg (V2SImode, op0, V4SImode)));
+     }
+    DONE;
+})
+
+(define_expand "sdot_prodv8qi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V8QI 1 "register_operand")
+   (match_operand:V8QI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+  operands[1] = force_reg (V8QImode, operands[1]);
+  operands[2] = force_reg (V8QImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  if (TARGET_AVXVNNIINT8)
+    {
+      rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+      rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+      rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+
+      emit_insn (gen_sdot_prodv16qi (op0, op1, op2, op3));
+      emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+    }
+  else
+    {
+      rtx op1 = gen_reg_rtx (V8HImode);
+      rtx op2 = gen_reg_rtx (V8HImode);
+      rtx op3 = gen_reg_rtx (V4SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+      rtx op0_1 = gen_reg_rtx (V4SImode);
+
+      emit_move_insn (op3, CONST0_RTX (V4SImode));
+      emit_insn (gen_extendv8qiv8hi2 (op1, operands[1]));
+      emit_insn (gen_extendv8qiv8hi2 (op2, operands[2]));
+      emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+      /* vec_perm (op0, 2, 3, 0, 1);  */
+      emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+      emit_insn (gen_addv4si3 (op0, op0, op0_1));
+      emit_insn (gen_addv2si3 (operands[0], operands[3],
+			       lowpart_subreg (V2SImode, op0, V4SImode)));
+    }
+  DONE;
+
+})
+
+(define_expand "udot_prodv8qi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V8QI 1 "register_operand")
+   (match_operand:V8QI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_MMX_WITH_SSE && TARGET_SSE4_1"
+{
+  operands[1] = force_reg (V8QImode, operands[1]);
+  operands[2] = force_reg (V8QImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  if (TARGET_AVXVNNIINT8)
+    {
+      rtx op1 = lowpart_subreg (V16QImode, operands[1], V8QImode);
+      rtx op2 = lowpart_subreg (V16QImode, operands[2], V8QImode);
+      rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+
+      emit_insn (gen_udot_prodv16qi (op0, op1, op2, op3));
+      emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+    }
+  else
+    {
+      rtx op1 = gen_reg_rtx (V8HImode);
+      rtx op2 = gen_reg_rtx (V8HImode);
+      rtx op3 = gen_reg_rtx (V4SImode);
+      rtx op0 = gen_reg_rtx (V4SImode);
+      rtx op0_1 = gen_reg_rtx (V4SImode);
+
+      emit_move_insn (op3, CONST0_RTX (V4SImode));
+      emit_insn (gen_zero_extendv8qiv8hi2 (op1, operands[1]));
+      emit_insn (gen_zero_extendv8qiv8hi2 (op2, operands[2]));
+      emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+
+      /* vec_perm (op0, 2, 3, 0, 1);  */
+      emit_insn (gen_sse2_pshufd (op0_1, op0, GEN_INT (78)));
+      emit_insn (gen_addv4si3 (op0, op0, op0_1));
+      emit_insn (gen_addv2si3 (operands[0], operands[3],
+			       lowpart_subreg (V2SImode, op0, V4SImode)));
+    }
+  DONE;
+
+})
+
+(define_expand "usdot_prodv4hi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V4HI 1 "register_operand")
+   (match_operand:V4HI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_AVXVNNIINT16 && TARGET_MMX_WITH_SSE"
+{
+  operands[1] = force_reg (V4HImode, operands[1]);
+  operands[2] = force_reg (V4HImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+  rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+  rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+  rtx op0 = gen_reg_rtx (V4SImode);
+
+  emit_insn (gen_usdot_prodv8hi (op0, op1, op2, op3));
+  emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+  DONE;
+})
+
+(define_expand "udot_prodv4hi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V4HI 1 "register_operand")
+   (match_operand:V4HI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_AVXVNNIINT16 && TARGET_MMX_WITH_SSE"
+{
+  operands[1] = force_reg (V4HImode, operands[1]);
+  operands[2] = force_reg (V4HImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+  rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+  rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+  rtx op0 = gen_reg_rtx (V4SImode);
+
+  emit_insn (gen_udot_prodv8hi (op0, op1, op2, op3));
+  emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+  DONE;
+})
+
+(define_expand "sdot_prodv4hi"
+  [(match_operand:V2SI 0 "register_operand")
+   (match_operand:V4HI 1 "register_operand")
+   (match_operand:V4HI 2 "register_operand")
+   (match_operand:V2SI 3 "register_operand")]
+  "TARGET_MMX_WITH_SSE"
+{
+  operands[1] = force_reg (V4HImode, operands[1]);
+  operands[2] = force_reg (V4HImode, operands[2]);
+  operands[3] = force_reg (V2SImode, operands[3]);
+
+  rtx op1 = lowpart_subreg (V8HImode, operands[1], V4HImode);
+  rtx op2 = lowpart_subreg (V8HImode, operands[2], V4HImode);
+  rtx op3 = lowpart_subreg (V4SImode, operands[3], V2SImode);
+  rtx op0 = gen_reg_rtx (V4SImode);
+
+  emit_insn (gen_sdot_prodv8hi (op0, op1, op2, op3));
+  emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode));
+  DONE;
+})
+
 (define_insn_and_split "mmx_pmovmskb"
   [(set (match_operand:SI 0 "register_operand" "=r,r,jr")
 	(unspec:SI [(match_operand:V8QI 1 "register_operand" "y,x,x")]
diff --git a/gcc/testsuite/gcc.target/i386/pr113079-2.c b/gcc/testsuite/gcc.target/i386/pr113079-2.c
new file mode 100644
index 00000000000..2f0e4e824df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr113079-2.c
@@ -0,0 +1,161 @@ 
+/* { dg-do run } */
+/* { dg-options "-O2 -mavxvnniint8 -mavxvnni -mavxvnniint16" } */
+/* { dg-require-effective-target avxvnniint16 } */
+/* { dg-require-effective-target avxvnniint8 } */
+
+#define AVXVNNIINT16
+#define AVXVNNIINT8
+#ifndef CHECK
+#define CHECK "avx-check.h"
+#endif
+
+#ifndef TEST
+#define TEST avx_test
+#endif
+
+#include CHECK
+#include "pr113079.c"
+
+#define N 256
+
+short hs1[4], hs2[4];
+unsigned short hu1[4], hu2[4];
+char qs1[16], qs2[16];
+unsigned char qu1[16], qu2[16];
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv4hi_scalar (unsigned short *a, short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv4hi_scalar (unsigned short *a, unsigned short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv4hi_scalar (short *a, short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv8qi_scalar (unsigned char *a, char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv8qi_scalar (unsigned char *a, unsigned char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv8qi_scalar (char *a, char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+void init ()
+{
+  int i;
+
+  for (i = 0; i < 4; i++)
+    {
+      hs1[i] = -i + 2;
+      hs2[i] = -i * 2;
+      hu1[i] = i * 3;
+      hu2[i] = i * 4;
+    }
+
+  for (i = 0; i < 8; i++)
+    {
+      qs1[i] = -i + 2;
+      qs2[i] = -i * 2;
+      qu1[i] = i * 3;
+      qu2[i] = i * 4;
+    }
+
+}
+
+void
+TEST (void)
+{
+  init ();
+  int usdot_prodv8qi_ref;
+  int sdot_prodv8qi_ref;
+  int udot_prodv8qi_ref;
+  int usdot_prodv4hi_ref;
+  int sdot_prodv4hi_ref;
+  int udot_prodv4hi_ref;
+
+  int usdot_prodv8qi_exp;
+  int sdot_prodv8qi_exp;
+  int udot_prodv8qi_exp;
+  int usdot_prodv4hi_exp;
+  int sdot_prodv4hi_exp;
+  int udot_prodv4hi_exp;
+
+  usdot_prodv8qi_ref = usdot_prodv8qi (qu1, qs1, 1);
+  usdot_prodv8qi_exp = usdot_prodv8qi_scalar (qu1, qs1, 1);
+  if (usdot_prodv8qi_ref != usdot_prodv8qi_exp)
+    abort ();
+
+  udot_prodv8qi_ref = udot_prodv8qi (qu1, qu2, 2);
+  udot_prodv8qi_exp = udot_prodv8qi_scalar (qu1, qu2, 2);
+  if (udot_prodv8qi_ref != udot_prodv8qi_exp)
+    abort ();
+
+  sdot_prodv8qi_ref = sdot_prodv8qi (qs1, qs2, 3);
+  sdot_prodv8qi_exp = sdot_prodv8qi_scalar (qs1, qs2, 3);
+  if (sdot_prodv8qi_ref != sdot_prodv8qi_exp)
+    abort ();
+
+  usdot_prodv4hi_ref = usdot_prodv4hi (hu1, hs1, 4);
+  usdot_prodv4hi_exp = usdot_prodv4hi_scalar (hu1, hs1, 4);
+  if (usdot_prodv4hi_ref != usdot_prodv4hi_exp)
+    abort ();
+
+  udot_prodv4hi_ref = udot_prodv4hi (hu1, hu2, 5);
+  udot_prodv4hi_exp = udot_prodv4hi_scalar (hu1, hu2, 5);
+  if (udot_prodv4hi_ref != udot_prodv4hi_exp)
+    abort ();
+
+  sdot_prodv4hi_ref = sdot_prodv4hi (hs1, hs2, 6);
+  sdot_prodv4hi_exp = sdot_prodv4hi_scalar (hs1, hs2, 6);
+  if (sdot_prodv4hi_ref != sdot_prodv4hi_exp)
+    abort ();
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr113079.c b/gcc/testsuite/gcc.target/i386/pr113079.c
new file mode 100644
index 00000000000..a2232c22255
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr113079.c
@@ -0,0 +1,57 @@ 
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mavxvnniint8 -mavxvnniint16 -O2 -mavxvnni" } */
+/* { dg-final { scan-assembler-times "vpdpbusd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpbssd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpbuud" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwssd" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwuud" 1 } } */
+/* { dg-final { scan-assembler-times "vpdpwusd" 1 } } */
+
+int
+__attribute__((noinline))
+usdot_prodv8qi (unsigned char* p, char* q, int sum)
+{
+  for (int i = 0; i != 8; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
+
+int
+udot_prodv8qi (unsigned char* p, unsigned char* q, int sum)
+{
+  for (int i = 0; i != 8; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
+
+int
+sdot_prodv8qi (char* p, char* q, int sum)
+{
+  for (int i = 0; i != 8; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
+
+int
+usdot_prodv4hi (unsigned short* p, short* q, int sum)
+{
+  for (int i = 0; i != 4; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
+
+int
+sdot_prodv4hi (short* p, short* q, int sum)
+{
+  for (int i = 0; i != 4; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
+
+int
+udot_prodv4hi (unsigned short* p, unsigned short* q, int sum)
+{
+  for (int i = 0; i != 4; i++)
+    sum += p[i] * q[i];
+  return sum;
+}
diff --git a/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c b/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
new file mode 100644
index 00000000000..1d06635aa6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/sse4-pr113079-2.c
@@ -0,0 +1,158 @@ 
+/* { dg-do run } */
+/* { dg-require-effective-target sse4 } */
+/* { dg-options "-O2 -msse4.1" } */
+
+#ifndef CHECK_H
+#define CHECK_H "sse4_1-check.h"
+#endif
+
+#ifndef TEST
+#define TEST sse4_1_test
+#endif
+
+#include CHECK_H
+#include "pr113079.c"
+
+#define N 256
+
+short hs1[4], hs2[4];
+unsigned short hu1[4], hu2[4];
+char qs1[16], qs2[16];
+unsigned char qu1[16], qu2[16];
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv4hi_scalar (unsigned short *a, short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv4hi_scalar (unsigned short *a, unsigned short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv4hi_scalar (short *a, short *b, int c)
+{
+  int i;
+  for (i = 0; i < 4; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+usdot_prodv8qi_scalar (unsigned char *a, char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+udot_prodv8qi_scalar (unsigned char *a, unsigned char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+int __attribute__((noinline, noclone, optimize("no-tree-vectorize")))
+sdot_prodv8qi_scalar (char *a, char *b, int c)
+{
+  int i;
+  for (i = 0; i < 8; i++)
+    {
+      c += ((int) a[i] * (int) b[i]);
+    }
+  return c;
+}
+
+void init ()
+{
+  int i;
+
+  for (i = 0; i < 4; i++)
+    {
+      hs1[i] = -i + 2;
+      hs2[i] = -i * 2;
+      hu1[i] = i * 3;
+      hu2[i] = i * 4;
+    }
+
+  for (i = 0; i < 8; i++)
+    {
+      qs1[i] = -i + 2;
+      qs2[i] = -i * 2;
+      qu1[i] = i * 3;
+      qu2[i] = i * 4;
+    }
+
+}
+
+void
+TEST (void)
+{
+  init ();
+  int usdot_prodv8qi_ref;
+  int sdot_prodv8qi_ref;
+  int udot_prodv8qi_ref;
+  int usdot_prodv4hi_ref;
+  int sdot_prodv4hi_ref;
+  int udot_prodv4hi_ref;
+
+  int usdot_prodv8qi_exp;
+  int sdot_prodv8qi_exp;
+  int udot_prodv8qi_exp;
+  int usdot_prodv4hi_exp;
+  int sdot_prodv4hi_exp;
+  int udot_prodv4hi_exp;
+
+  usdot_prodv8qi_ref = usdot_prodv8qi (qu1, qs1, 1);
+  usdot_prodv8qi_exp = usdot_prodv8qi_scalar (qu1, qs1, 1);
+  if (usdot_prodv8qi_ref != usdot_prodv8qi_exp)
+    abort ();
+
+  udot_prodv8qi_ref = udot_prodv8qi (qu1, qu2, 2);
+  udot_prodv8qi_exp = udot_prodv8qi_scalar (qu1, qu2, 2);
+  if (udot_prodv8qi_ref != udot_prodv8qi_exp)
+    abort ();
+
+  sdot_prodv8qi_ref = sdot_prodv8qi (qs1, qs2, 3);
+  sdot_prodv8qi_exp = sdot_prodv8qi_scalar (qs1, qs2, 3);
+  if (sdot_prodv8qi_ref != sdot_prodv8qi_exp)
+    abort ();
+
+  usdot_prodv4hi_ref = usdot_prodv4hi (hu1, hs1, 4);
+  usdot_prodv4hi_exp = usdot_prodv4hi_scalar (hu1, hs1, 4);
+  if (usdot_prodv4hi_ref != usdot_prodv4hi_exp)
+    abort ();
+
+  udot_prodv4hi_ref = udot_prodv4hi (hu1, hu2, 5);
+  udot_prodv4hi_exp = udot_prodv4hi_scalar (hu1, hu2, 5);
+  if (udot_prodv4hi_ref != udot_prodv4hi_exp)
+    abort ();
+
+  sdot_prodv4hi_ref = sdot_prodv4hi (hs1, hs2, 6);
+  sdot_prodv4hi_exp = sdot_prodv4hi_scalar (hs1, hs2, 6);
+  if (sdot_prodv4hi_ref != sdot_prodv4hi_exp)
+    abort ();
+}