@@ -41,7 +41,7 @@
#define VMOVNT vmovntdq
/* Often need to access xmm portion. */
-#define VEC_xmm VEC_any_xmm
-#define VEC VEC_any_ymm
+#define VMM_128 VMM_any_xmm
+#define VMM VMM_any_ymm
#endif
@@ -32,8 +32,8 @@
#define VMOVA vmovdqa64
#define VMOVNT vmovntdq
-#define VEC_xmm VEC_hi_xmm
-#define VEC_ymm VEC_hi_ymm
-#define VEC_zmm VEC_hi_zmm
+#define VMM_128 VMM_hi_xmm
+#define VMM_256 VMM_hi_ymm
+#define VMM_512 VMM_hi_zmm
#endif
@@ -28,8 +28,11 @@
#include "evex-vecs-common.h"
#define USE_WITH_EVEX256 1
-#define SECTION(p) p##.evex
-#define VEC VEC_ymm
+#ifndef SECTION
+# define SECTION(p) p##.evex
+#endif
+#define VMM VMM_256
+#define VMM_lo VMM_any_ymm
#endif
@@ -28,8 +28,11 @@
#include "evex-vecs-common.h"
#define USE_WITH_EVEX512 1
-#define SECTION(p) p##.evex512
-#define VEC VEC_zmm
+#ifndef SECTION
+# define SECTION(p) p##.evex512
+#endif
+#define VMM VMM_512
+#define VMM_lo VMM_any_zmm
#endif
@@ -1,16 +1,9 @@
-#if IS_IN (libc)
-# define VEC_SIZE 32
-# define VEC(i) ymm##i
-# define VMOVNT vmovntdq
-# define VMOVU vmovdqu
-# define VMOVA vmovdqa
-# define MOV_SIZE 4
-# define ZERO_UPPER_VEC_REGISTERS_RETURN \
- ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST
+#include <isa-level.h>
-# define VZEROUPPER_RETURN jmp L(return)
+#if ISA_SHOULD_BUILD (3)
+
+# include "avx-rtm-vecs.h"
-# define SECTION(p) p##.avx.rtm
# define MEMMOVE_SYMBOL(p,s) p##_avx_##s##_rtm
# include "memmove-vec-unaligned-erms.S"
@@ -2,14 +2,7 @@
#if ISA_SHOULD_BUILD (3)
-# define VEC_SIZE 32
-# define VEC(i) ymm##i
-# define VMOVNT vmovntdq
-# define VMOVU vmovdqu
-# define VMOVA vmovdqa
-# define MOV_SIZE 4
-
-# define SECTION(p) p##.avx
+# include "avx-vecs.h"
# ifndef MEMMOVE_SYMBOL
# define MEMMOVE_SYMBOL(p,s) p##_avx_##s
@@ -2,35 +2,7 @@
#if ISA_SHOULD_BUILD (4)
-# define VEC_SIZE 64
-# define XMM0 xmm16
-# define XMM1 xmm17
-# define YMM0 ymm16
-# define YMM1 ymm17
-# define VEC0 zmm16
-# define VEC1 zmm17
-# define VEC2 zmm18
-# define VEC3 zmm19
-# define VEC4 zmm20
-# define VEC5 zmm21
-# define VEC6 zmm22
-# define VEC7 zmm23
-# define VEC8 zmm24
-# define VEC9 zmm25
-# define VEC10 zmm26
-# define VEC11 zmm27
-# define VEC12 zmm28
-# define VEC13 zmm29
-# define VEC14 zmm30
-# define VEC15 zmm31
-# define VEC(i) VEC##i
-# define VMOVNT vmovntdq
-# define VMOVU vmovdqu64
-# define VMOVA vmovdqa64
-# define VZEROUPPER
-# define MOV_SIZE 6
-
-# define SECTION(p) p##.evex512
+# include "evex512-vecs.h"
# ifndef MEMMOVE_SYMBOL
# define MEMMOVE_SYMBOL(p,s) p##_avx512_##s
@@ -2,35 +2,7 @@
#if ISA_SHOULD_BUILD (4)
-# define VEC_SIZE 32
-# define XMM0 xmm16
-# define XMM1 xmm17
-# define YMM0 ymm16
-# define YMM1 ymm17
-# define VEC0 ymm16
-# define VEC1 ymm17
-# define VEC2 ymm18
-# define VEC3 ymm19
-# define VEC4 ymm20
-# define VEC5 ymm21
-# define VEC6 ymm22
-# define VEC7 ymm23
-# define VEC8 ymm24
-# define VEC9 ymm25
-# define VEC10 ymm26
-# define VEC11 ymm27
-# define VEC12 ymm28
-# define VEC13 ymm29
-# define VEC14 ymm30
-# define VEC15 ymm31
-# define VEC(i) VEC##i
-# define VMOVNT vmovntdq
-# define VMOVU vmovdqu64
-# define VMOVA vmovdqa64
-# define VZEROUPPER
-# define MOV_SIZE 6
-
-# define SECTION(p) p##.evex
+# include "evex256-vecs.h"
# ifndef MEMMOVE_SYMBOL
# define MEMMOVE_SYMBOL(p,s) p##_evex_##s
@@ -22,18 +22,9 @@
so we need this to build for ISA V2 builds. */
#if ISA_SHOULD_BUILD (2)
-# include <sysdep.h>
+# include "sse2-vecs.h"
-# define VEC_SIZE 16
-# define VEC(i) xmm##i
# define PREFETCHNT prefetchnta
-# define VMOVNT movntdq
-/* Use movups and movaps for smaller code sizes. */
-# define VMOVU movups
-# define VMOVA movaps
-# define MOV_SIZE 3
-
-# define SECTION(p) p
# ifndef MEMMOVE_SYMBOL
# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s
@@ -60,14 +60,6 @@
# define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s)
#endif
-#ifndef XMM0
-# define XMM0 xmm0
-#endif
-
-#ifndef YMM0
-# define YMM0 ymm0
-#endif
-
#ifndef VZEROUPPER
# if VEC_SIZE > 16
# define VZEROUPPER vzeroupper
@@ -225,13 +217,13 @@ L(start):
cmp $VEC_SIZE, %RDX_LP
jb L(less_vec)
/* Load regardless. */
- VMOVU (%rsi), %VEC(0)
+ VMOVU (%rsi), %VMM(0)
cmp $(VEC_SIZE * 2), %RDX_LP
ja L(more_2x_vec)
/* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
- VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx)
+ VMOVU -VEC_SIZE(%rsi,%rdx), %VMM(1)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(1), -VEC_SIZE(%rdi,%rdx)
#if !(defined USE_MULTIARCH && IS_IN (libc))
ZERO_UPPER_VEC_REGISTERS_RETURN
#else
@@ -270,15 +262,15 @@ L(start_erms):
cmp $VEC_SIZE, %RDX_LP
jb L(less_vec)
/* Load regardless. */
- VMOVU (%rsi), %VEC(0)
+ VMOVU (%rsi), %VMM(0)
cmp $(VEC_SIZE * 2), %RDX_LP
ja L(movsb_more_2x_vec)
/* From VEC and to 2 * VEC. No branch when size == VEC_SIZE.
*/
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(1)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), -VEC_SIZE(%rdi, %rdx)
-L(return):
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(1)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(1), -VEC_SIZE(%rdi, %rdx)
+L(return_vzeroupper):
# if VEC_SIZE > 16
ZERO_UPPER_VEC_REGISTERS_RETURN
# else
@@ -359,10 +351,10 @@ L(between_16_31):
.p2align 4,, 10
L(between_32_63):
/* From 32 to 63. No branch when size == 32. */
- VMOVU (%rsi), %YMM0
- VMOVU -32(%rsi, %rdx), %YMM1
- VMOVU %YMM0, (%rdi)
- VMOVU %YMM1, -32(%rdi, %rdx)
+ VMOVU (%rsi), %VMM_256(0)
+ VMOVU -32(%rsi, %rdx), %VMM_256(1)
+ VMOVU %VMM_256(0), (%rdi)
+ VMOVU %VMM_256(1), -32(%rdi, %rdx)
VZEROUPPER_RETURN
#endif
@@ -380,12 +372,12 @@ L(last_4x_vec):
/* Copy from 2 * VEC + 1 to 4 * VEC, inclusively. */
/* VEC(0) and VEC(1) have already been loaded. */
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(2)
- VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(3)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), VEC_SIZE(%rdi)
- VMOVU %VEC(2), -VEC_SIZE(%rdi, %rdx)
- VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi, %rdx)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(2)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(3)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(1), VEC_SIZE(%rdi)
+ VMOVU %VMM(2), -VEC_SIZE(%rdi, %rdx)
+ VMOVU %VMM(3), -(VEC_SIZE * 2)(%rdi, %rdx)
VZEROUPPER_RETURN
.p2align 4
@@ -400,24 +392,24 @@ L(more_2x_vec):
cmpq $(VEC_SIZE * 8), %rdx
ja L(more_8x_vec)
/* Load VEC(1) regardless. VEC(0) has already been loaded. */
- VMOVU VEC_SIZE(%rsi), %VEC(1)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
cmpq $(VEC_SIZE * 4), %rdx
jbe L(last_4x_vec)
/* Copy from 4 * VEC + 1 to 8 * VEC, inclusively. */
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(4)
- VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(5)
- VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(6)
- VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(7)
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(1), VEC_SIZE(%rdi)
- VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi)
- VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi)
- VMOVU %VEC(4), -VEC_SIZE(%rdi, %rdx)
- VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi, %rdx)
- VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi, %rdx)
- VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi, %rdx)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(4)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(5)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(6)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(7)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(1), VEC_SIZE(%rdi)
+ VMOVU %VMM(2), (VEC_SIZE * 2)(%rdi)
+ VMOVU %VMM(3), (VEC_SIZE * 3)(%rdi)
+ VMOVU %VMM(4), -VEC_SIZE(%rdi, %rdx)
+ VMOVU %VMM(5), -(VEC_SIZE * 2)(%rdi, %rdx)
+ VMOVU %VMM(6), -(VEC_SIZE * 3)(%rdi, %rdx)
+ VMOVU %VMM(7), -(VEC_SIZE * 4)(%rdi, %rdx)
VZEROUPPER_RETURN
.p2align 4,, 4
@@ -466,14 +458,14 @@ L(more_8x_vec_forward):
*/
/* First vec was already loaded into VEC(0). */
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5)
- VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(5)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(6)
/* Save begining of dst. */
movq %rdi, %rcx
/* Align dst to VEC_SIZE - 1. */
orq $(VEC_SIZE - 1), %rdi
- VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7)
- VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(7)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(8)
/* Subtract dst from src. Add back after dst aligned. */
subq %rcx, %rsi
@@ -488,25 +480,25 @@ L(more_8x_vec_forward):
.p2align 4,, 11
L(loop_4x_vec_forward):
/* Copy 4 * VEC a time forward. */
- VMOVU (%rsi), %VEC(1)
- VMOVU VEC_SIZE(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(3)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(4)
+ VMOVU (%rsi), %VMM(1)
+ VMOVU VEC_SIZE(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(3)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(4)
subq $-(VEC_SIZE * 4), %rsi
- VMOVA %VEC(1), (%rdi)
- VMOVA %VEC(2), VEC_SIZE(%rdi)
- VMOVA %VEC(3), (VEC_SIZE * 2)(%rdi)
- VMOVA %VEC(4), (VEC_SIZE * 3)(%rdi)
+ VMOVA %VMM(1), (%rdi)
+ VMOVA %VMM(2), VEC_SIZE(%rdi)
+ VMOVA %VMM(3), (VEC_SIZE * 2)(%rdi)
+ VMOVA %VMM(4), (VEC_SIZE * 3)(%rdi)
subq $-(VEC_SIZE * 4), %rdi
cmpq %rdi, %rdx
ja L(loop_4x_vec_forward)
/* Store the last 4 * VEC. */
- VMOVU %VEC(5), (VEC_SIZE * 3)(%rdx)
- VMOVU %VEC(6), (VEC_SIZE * 2)(%rdx)
- VMOVU %VEC(7), VEC_SIZE(%rdx)
- VMOVU %VEC(8), (%rdx)
+ VMOVU %VMM(5), (VEC_SIZE * 3)(%rdx)
+ VMOVU %VMM(6), (VEC_SIZE * 2)(%rdx)
+ VMOVU %VMM(7), VEC_SIZE(%rdx)
+ VMOVU %VMM(8), (%rdx)
/* Store the first VEC. */
- VMOVU %VEC(0), (%rcx)
+ VMOVU %VMM(0), (%rcx)
/* Keep L(nop_backward) target close to jmp for 2-byte encoding.
*/
L(nop_backward):
@@ -523,12 +515,12 @@ L(more_8x_vec_backward):
addresses. */
/* First vec was also loaded into VEC(0). */
- VMOVU VEC_SIZE(%rsi), %VEC(5)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6)
+ VMOVU VEC_SIZE(%rsi), %VMM(5)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(6)
/* Begining of region for 4x backward copy stored in rcx. */
leaq (VEC_SIZE * -4 + -1)(%rdi, %rdx), %rcx
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7)
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(8)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(7)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(8)
/* Subtract dst from src. Add back after dst aligned. */
subq %rdi, %rsi
/* Align dst. */
@@ -540,25 +532,25 @@ L(more_8x_vec_backward):
.p2align 4,, 11
L(loop_4x_vec_backward):
/* Copy 4 * VEC a time backward. */
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(1)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 1)(%rsi), %VEC(3)
- VMOVU (VEC_SIZE * 0)(%rsi), %VEC(4)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(1)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 1)(%rsi), %VMM(3)
+ VMOVU (VEC_SIZE * 0)(%rsi), %VMM(4)
addq $(VEC_SIZE * -4), %rsi
- VMOVA %VEC(1), (VEC_SIZE * 3)(%rcx)
- VMOVA %VEC(2), (VEC_SIZE * 2)(%rcx)
- VMOVA %VEC(3), (VEC_SIZE * 1)(%rcx)
- VMOVA %VEC(4), (VEC_SIZE * 0)(%rcx)
+ VMOVA %VMM(1), (VEC_SIZE * 3)(%rcx)
+ VMOVA %VMM(2), (VEC_SIZE * 2)(%rcx)
+ VMOVA %VMM(3), (VEC_SIZE * 1)(%rcx)
+ VMOVA %VMM(4), (VEC_SIZE * 0)(%rcx)
addq $(VEC_SIZE * -4), %rcx
cmpq %rcx, %rdi
jb L(loop_4x_vec_backward)
/* Store the first 4 * VEC. */
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(5), VEC_SIZE(%rdi)
- VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi)
- VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(5), VEC_SIZE(%rdi)
+ VMOVU %VMM(6), (VEC_SIZE * 2)(%rdi)
+ VMOVU %VMM(7), (VEC_SIZE * 3)(%rdi)
/* Store the last VEC. */
- VMOVU %VEC(8), -VEC_SIZE(%rdx, %rdi)
+ VMOVU %VMM(8), -VEC_SIZE(%rdx, %rdi)
VZEROUPPER_RETURN
#if defined USE_MULTIARCH && IS_IN (libc)
@@ -568,7 +560,7 @@ L(loop_4x_vec_backward):
# if ALIGN_MOVSB
L(skip_short_movsb_check):
# if MOVSB_ALIGN_TO > VEC_SIZE
- VMOVU VEC_SIZE(%rsi), %VEC(1)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
# endif
# if MOVSB_ALIGN_TO > (VEC_SIZE * 2)
# error Unsupported MOVSB_ALIGN_TO
@@ -597,9 +589,9 @@ L(skip_short_movsb_check):
rep movsb
- VMOVU %VEC(0), (%r8)
+ VMOVU %VMM(0), (%r8)
# if MOVSB_ALIGN_TO > VEC_SIZE
- VMOVU %VEC(1), VEC_SIZE(%r8)
+ VMOVU %VMM(1), VEC_SIZE(%r8)
# endif
VZEROUPPER_RETURN
# endif
@@ -640,7 +632,7 @@ L(movsb):
# endif
# if ALIGN_MOVSB
# if MOVSB_ALIGN_TO > VEC_SIZE
- VMOVU VEC_SIZE(%rsi), %VEC(1)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
# endif
# if MOVSB_ALIGN_TO > (VEC_SIZE * 2)
# error Unsupported MOVSB_ALIGN_TO
@@ -664,9 +656,9 @@ L(movsb_align_dst):
rep movsb
/* Store VECs loaded for aligning. */
- VMOVU %VEC(0), (%r8)
+ VMOVU %VMM(0), (%r8)
# if MOVSB_ALIGN_TO > VEC_SIZE
- VMOVU %VEC(1), VEC_SIZE(%r8)
+ VMOVU %VMM(1), VEC_SIZE(%r8)
# endif
VZEROUPPER_RETURN
# else /* !ALIGN_MOVSB. */
@@ -701,18 +693,18 @@ L(large_memcpy_2x):
/* First vec was also loaded into VEC(0). */
# if VEC_SIZE < 64
- VMOVU VEC_SIZE(%rsi), %VEC(1)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
# if VEC_SIZE < 32
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
# endif
# endif
- VMOVU %VEC(0), (%rdi)
+ VMOVU %VMM(0), (%rdi)
# if VEC_SIZE < 64
- VMOVU %VEC(1), VEC_SIZE(%rdi)
+ VMOVU %VMM(1), VEC_SIZE(%rdi)
# if VEC_SIZE < 32
- VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi)
- VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi)
+ VMOVU %VMM(2), (VEC_SIZE * 2)(%rdi)
+ VMOVU %VMM(3), (VEC_SIZE * 3)(%rdi)
# endif
# endif
@@ -761,12 +753,12 @@ L(loop_large_memcpy_2x_inner):
PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE)
PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE * 2)
/* Load vectors from rsi. */
- LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
- LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
+ LOAD_ONE_SET((%rsi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
+ LOAD_ONE_SET((%rsi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
subq $-LARGE_LOAD_SIZE, %rsi
/* Non-temporal store vectors to rdi. */
- STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
- STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
+ STORE_ONE_SET((%rdi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
+ STORE_ONE_SET((%rdi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
subq $-LARGE_LOAD_SIZE, %rdi
decl %ecx
jnz L(loop_large_memcpy_2x_inner)
@@ -785,31 +777,31 @@ L(loop_large_memcpy_2x_tail):
/* Copy 4 * VEC a time forward with non-temporal stores. */
PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
- VMOVU (%rsi), %VEC(0)
- VMOVU VEC_SIZE(%rsi), %VEC(1)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
+ VMOVU (%rsi), %VMM(0)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
subq $-(VEC_SIZE * 4), %rsi
addl $-(VEC_SIZE * 4), %edx
- VMOVA %VEC(0), (%rdi)
- VMOVA %VEC(1), VEC_SIZE(%rdi)
- VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi)
- VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi)
+ VMOVA %VMM(0), (%rdi)
+ VMOVA %VMM(1), VEC_SIZE(%rdi)
+ VMOVA %VMM(2), (VEC_SIZE * 2)(%rdi)
+ VMOVA %VMM(3), (VEC_SIZE * 3)(%rdi)
subq $-(VEC_SIZE * 4), %rdi
cmpl $(VEC_SIZE * 4), %edx
ja L(loop_large_memcpy_2x_tail)
L(large_memcpy_2x_end):
/* Store the last 4 * VEC. */
- VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
- VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
- VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(3)
-
- VMOVU %VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
- VMOVU %VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
- VMOVU %VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
- VMOVU %VEC(3), -VEC_SIZE(%rdi, %rdx)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(0)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(1)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(2)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(3)
+
+ VMOVU %VMM(0), -(VEC_SIZE * 4)(%rdi, %rdx)
+ VMOVU %VMM(1), -(VEC_SIZE * 3)(%rdi, %rdx)
+ VMOVU %VMM(2), -(VEC_SIZE * 2)(%rdi, %rdx)
+ VMOVU %VMM(3), -VEC_SIZE(%rdi, %rdx)
VZEROUPPER_RETURN
.p2align 4
@@ -831,16 +823,16 @@ L(loop_large_memcpy_4x_inner):
PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 2 + PREFETCHED_LOAD_SIZE)
PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 3 + PREFETCHED_LOAD_SIZE)
/* Load vectors from rsi. */
- LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
- LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
- LOAD_ONE_SET((%rsi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
- LOAD_ONE_SET((%rsi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
+ LOAD_ONE_SET((%rsi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
+ LOAD_ONE_SET((%rsi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
+ LOAD_ONE_SET((%rsi), PAGE_SIZE * 2, %VMM(8), %VMM(9), %VMM(10), %VMM(11))
+ LOAD_ONE_SET((%rsi), PAGE_SIZE * 3, %VMM(12), %VMM(13), %VMM(14), %VMM(15))
subq $-LARGE_LOAD_SIZE, %rsi
/* Non-temporal store vectors to rdi. */
- STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
- STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
- STORE_ONE_SET((%rdi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
- STORE_ONE_SET((%rdi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
+ STORE_ONE_SET((%rdi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
+ STORE_ONE_SET((%rdi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
+ STORE_ONE_SET((%rdi), PAGE_SIZE * 2, %VMM(8), %VMM(9), %VMM(10), %VMM(11))
+ STORE_ONE_SET((%rdi), PAGE_SIZE * 3, %VMM(12), %VMM(13), %VMM(14), %VMM(15))
subq $-LARGE_LOAD_SIZE, %rdi
decl %ecx
jnz L(loop_large_memcpy_4x_inner)
@@ -858,31 +850,31 @@ L(loop_large_memcpy_4x_tail):
/* Copy 4 * VEC a time forward with non-temporal stores. */
PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
- VMOVU (%rsi), %VEC(0)
- VMOVU VEC_SIZE(%rsi), %VEC(1)
- VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
- VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
+ VMOVU (%rsi), %VMM(0)
+ VMOVU VEC_SIZE(%rsi), %VMM(1)
+ VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
+ VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
subq $-(VEC_SIZE * 4), %rsi
addl $-(VEC_SIZE * 4), %edx
- VMOVA %VEC(0), (%rdi)
- VMOVA %VEC(1), VEC_SIZE(%rdi)
- VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi)
- VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi)
+ VMOVA %VMM(0), (%rdi)
+ VMOVA %VMM(1), VEC_SIZE(%rdi)
+ VMOVA %VMM(2), (VEC_SIZE * 2)(%rdi)
+ VMOVA %VMM(3), (VEC_SIZE * 3)(%rdi)
subq $-(VEC_SIZE * 4), %rdi
cmpl $(VEC_SIZE * 4), %edx
ja L(loop_large_memcpy_4x_tail)
L(large_memcpy_4x_end):
/* Store the last 4 * VEC. */
- VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
- VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
- VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
- VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(3)
-
- VMOVU %VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
- VMOVU %VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
- VMOVU %VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
- VMOVU %VEC(3), -VEC_SIZE(%rdi, %rdx)
+ VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(0)
+ VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(1)
+ VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(2)
+ VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(3)
+
+ VMOVU %VMM(0), -(VEC_SIZE * 4)(%rdi, %rdx)
+ VMOVU %VMM(1), -(VEC_SIZE * 3)(%rdi, %rdx)
+ VMOVU %VMM(2), -(VEC_SIZE * 2)(%rdi, %rdx)
+ VMOVU %VMM(3), -VEC_SIZE(%rdi, %rdx)
VZEROUPPER_RETURN
#endif
END (MEMMOVE_SYMBOL (__memmove, unaligned_erms))
@@ -31,7 +31,7 @@
# endif
# define PAGE_SIZE 4096
-# define VECMATCH VEC(0)
+# define VMMMATCH VMM(0)
.section SECTION(.text), "ax", @progbits
ENTRY_P2ALIGN(MEMRCHR, 6)
@@ -47,7 +47,7 @@ ENTRY_P2ALIGN(MEMRCHR, 6)
correct page cross check and 2) it correctly sets up end ptr to be
subtract by lzcnt aligned. */
leaq -1(%rdi, %rdx), %rax
- vpbroadcastb %esi, %VECMATCH
+ vpbroadcastb %esi, %VMMMATCH
/* Check if we can load 1x VEC without cross a page. */
testl $(PAGE_SIZE - VEC_SIZE), %eax
@@ -55,7 +55,7 @@ ENTRY_P2ALIGN(MEMRCHR, 6)
/* Don't use rax for pointer here because EVEX has better encoding with
offset % VEC_SIZE == 0. */
- vpcmpb $0, -(VEC_SIZE)(%rdi, %rdx), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE)(%rdi, %rdx), %VMMMATCH, %k0
kmovd %k0, %ecx
/* Fall through for rdx (len) <= VEC_SIZE (expect small sizes). */
@@ -96,7 +96,7 @@ L(more_1x_vec):
movq %rax, %rdx
/* Need no matter what. */
- vpcmpb $0, -(VEC_SIZE)(%rax), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
subq %rdi, %rdx
@@ -115,7 +115,7 @@ L(last_2x_vec):
/* Don't use rax for pointer here because EVEX has better encoding with
offset % VEC_SIZE == 0. */
- vpcmpb $0, -(VEC_SIZE * 2)(%rdi, %rdx), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE * 2)(%rdi, %rdx), %VMMMATCH, %k0
kmovd %k0, %ecx
/* NB: 64-bit lzcnt. This will naturally add 32 to position. */
lzcntq %rcx, %rcx
@@ -131,7 +131,7 @@ L(last_2x_vec):
L(page_cross):
movq %rax, %rsi
andq $-VEC_SIZE, %rsi
- vpcmpb $0, (%rsi), %VECMATCH, %k0
+ vpcmpb $0, (%rsi), %VMMMATCH, %k0
kmovd %k0, %r8d
/* Shift out negative alignment (because we are starting from endptr and
working backwards). */
@@ -165,13 +165,13 @@ L(more_2x_vec):
testl %ecx, %ecx
jnz L(ret_vec_x0_dec)
- vpcmpb $0, -(VEC_SIZE * 2)(%rax), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE * 2)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
testl %ecx, %ecx
jnz L(ret_vec_x1)
/* Need no matter what. */
- vpcmpb $0, -(VEC_SIZE * 3)(%rax), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE * 3)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
subq $(VEC_SIZE * 4), %rdx
@@ -185,7 +185,7 @@ L(last_vec):
/* Need no matter what. */
- vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
lzcntl %ecx, %ecx
subq $(VEC_SIZE * 3 + 1), %rax
@@ -220,7 +220,7 @@ L(more_4x_vec):
testl %ecx, %ecx
jnz L(ret_vec_x2)
- vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VECMATCH, %k0
+ vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
testl %ecx, %ecx
@@ -243,17 +243,17 @@ L(more_4x_vec):
L(loop_4x_vec):
/* Store 1 were not-equals and 0 where equals in k1 (used to mask later
on). */
- vpcmpb $4, (VEC_SIZE * 3)(%rax), %VECMATCH, %k1
+ vpcmpb $4, (VEC_SIZE * 3)(%rax), %VMMMATCH, %k1
/* VEC(2/3) will have zero-byte where we found a CHAR. */
- vpxorq (VEC_SIZE * 2)(%rax), %VECMATCH, %VEC(2)
- vpxorq (VEC_SIZE * 1)(%rax), %VECMATCH, %VEC(3)
- vpcmpb $0, (VEC_SIZE * 0)(%rax), %VECMATCH, %k4
+ vpxorq (VEC_SIZE * 2)(%rax), %VMMMATCH, %VMM(2)
+ vpxorq (VEC_SIZE * 1)(%rax), %VMMMATCH, %VMM(3)
+ vpcmpb $0, (VEC_SIZE * 0)(%rax), %VMMMATCH, %k4
/* Combine VEC(2/3) with min and maskz with k1 (k1 has zero bit where
CHAR is found and VEC(2/3) have zero-byte where CHAR is found. */
- vpminub %VEC(2), %VEC(3), %VEC(3){%k1}{z}
- vptestnmb %VEC(3), %VEC(3), %k2
+ vpminub %VMM(2), %VMM(3), %VMM(3){%k1}{z}
+ vptestnmb %VMM(3), %VMM(3), %k2
/* Any 1s and we found CHAR. */
kortestd %k2, %k4
@@ -270,7 +270,7 @@ L(loop_4x_vec):
L(last_4x_vec):
/* Used no matter what. */
- vpcmpb $0, (VEC_SIZE * -1)(%rax), %VECMATCH, %k0
+ vpcmpb $0, (VEC_SIZE * -1)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
cmpl $(VEC_SIZE * 2), %edx
@@ -280,14 +280,14 @@ L(last_4x_vec):
jnz L(ret_vec_x0_dec)
- vpcmpb $0, (VEC_SIZE * -2)(%rax), %VECMATCH, %k0
+ vpcmpb $0, (VEC_SIZE * -2)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
testl %ecx, %ecx
jnz L(ret_vec_x1)
/* Used no matter what. */
- vpcmpb $0, (VEC_SIZE * -3)(%rax), %VECMATCH, %k0
+ vpcmpb $0, (VEC_SIZE * -3)(%rax), %VMMMATCH, %k0
kmovd %k0, %ecx
cmpl $(VEC_SIZE * 3), %edx
@@ -309,7 +309,7 @@ L(loop_end):
testl %ecx, %ecx
jnz L(ret_vec_x0_end)
- vptestnmb %VEC(2), %VEC(2), %k0
+ vptestnmb %VMM(2), %VMM(2), %k0
kmovd %k0, %ecx
testl %ecx, %ecx
jnz L(ret_vec_x1_end)
@@ -1,10 +1,6 @@
-#define ZERO_UPPER_VEC_REGISTERS_RETURN \
- ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST
+#include "avx-rtm-vecs.h"
-#define VZEROUPPER_RETURN jmp L(return)
-
-#define SECTION(p) p##.avx.rtm
#define MEMSET_SYMBOL(p,s) p##_avx2_##s##_rtm
#define WMEMSET_SYMBOL(p,s) p##_avx2_##s##_rtm
-#include "memset-avx2-unaligned-erms.S"
+# include "memset-avx2-unaligned-erms.S"
@@ -4,14 +4,9 @@
# define USE_WITH_AVX2 1
-# define VEC_SIZE 32
-# define MOV_SIZE 4
-# define RET_SIZE 4
-
-# define VEC(i) ymm##i
-
-# define VMOVU vmovdqu
-# define VMOVA vmovdqa
+# ifndef VEC_SIZE
+# include "avx-vecs.h"
+# endif
# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
vmovd d, %xmm0; \
@@ -26,9 +21,6 @@
# define WMEMSET_VDUP_TO_VEC0_HIGH() vpbroadcastd %xmm0, %ymm0
# define WMEMSET_VDUP_TO_VEC0_LOW() vpbroadcastd %xmm0, %xmm0
-# ifndef SECTION
-# define SECTION(p) p##.avx
-# endif
# ifndef MEMSET_SYMBOL
# define MEMSET_SYMBOL(p,s) p##_avx2_##s
# endif
@@ -4,26 +4,14 @@
# define USE_WITH_AVX512 1
-# define VEC_SIZE 64
-# define MOV_SIZE 6
-# define RET_SIZE 1
-
-# define XMM0 xmm16
-# define YMM0 ymm16
-# define VEC0 zmm16
-# define VEC(i) VEC##i
-
-# define VMOVU vmovdqu64
-# define VMOVA vmovdqa64
-
-# define VZEROUPPER
+# include "evex512-vecs.h"
# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
- vpbroadcastb d, %VEC0; \
+ vpbroadcastb d, %VMM(0); \
movq r, %rax
# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
- vpbroadcastd d, %VEC0; \
+ vpbroadcastd d, %VMM(0); \
movq r, %rax
# define MEMSET_VDUP_TO_VEC0_HIGH()
@@ -32,8 +20,6 @@
# define WMEMSET_VDUP_TO_VEC0_HIGH()
# define WMEMSET_VDUP_TO_VEC0_LOW()
-# define SECTION(p) p##.evex512
-
#ifndef MEMSET_SYMBOL
# define MEMSET_SYMBOL(p,s) p##_avx512_##s
#endif
@@ -4,26 +4,14 @@
# define USE_WITH_EVEX 1
-# define VEC_SIZE 32
-# define MOV_SIZE 6
-# define RET_SIZE 1
-
-# define XMM0 xmm16
-# define YMM0 ymm16
-# define VEC0 ymm16
-# define VEC(i) VEC##i
-
-# define VMOVU vmovdqu64
-# define VMOVA vmovdqa64
-
-# define VZEROUPPER
+# include "evex256-vecs.h"
# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
- vpbroadcastb d, %VEC0; \
+ vpbroadcastb d, %VMM(0); \
movq r, %rax
# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
- vpbroadcastd d, %VEC0; \
+ vpbroadcastd d, %VMM(0); \
movq r, %rax
# define MEMSET_VDUP_TO_VEC0_HIGH()
@@ -32,8 +20,6 @@
# define WMEMSET_VDUP_TO_VEC0_HIGH()
# define WMEMSET_VDUP_TO_VEC0_LOW()
-# define SECTION(p) p##.evex
-
#ifndef MEMSET_SYMBOL
# define MEMSET_SYMBOL(p,s) p##_evex_##s
#endif
@@ -26,13 +26,7 @@
# include <sysdep.h>
# define USE_WITH_SSE2 1
-# define VEC_SIZE 16
-# define MOV_SIZE 3
-# define RET_SIZE 1
-
-# define VEC(i) xmm##i
-# define VMOVU movups
-# define VMOVA movaps
+# include "sse2-vecs.h"
# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
movd d, %xmm0; \
@@ -52,8 +46,6 @@
# define WMEMSET_VDUP_TO_VEC0_HIGH()
# define WMEMSET_VDUP_TO_VEC0_LOW()
-# define SECTION(p) p
-
# ifndef MEMSET_SYMBOL
# define MEMSET_SYMBOL(p,s) p##_sse2_##s
# endif
@@ -34,14 +34,6 @@
# define WMEMSET_CHK_SYMBOL(p,s) WMEMSET_SYMBOL(p, s)
#endif
-#ifndef XMM0
-# define XMM0 xmm0
-#endif
-
-#ifndef YMM0
-# define YMM0 ymm0
-#endif
-
#ifndef VZEROUPPER
# if VEC_SIZE > 16
# define VZEROUPPER vzeroupper
@@ -150,8 +142,8 @@ L(entry_from_wmemset):
cmpq $(VEC_SIZE * 2), %rdx
ja L(more_2x_vec)
/* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
- VMOVU %VEC(0), -VEC_SIZE(%rdi,%rdx)
- VMOVU %VEC(0), (%rdi)
+ VMOVU %VMM(0), -VEC_SIZE(%rdi,%rdx)
+ VMOVU %VMM(0), (%rdi)
VZEROUPPER_RETURN
#if defined USE_MULTIARCH && IS_IN (libc)
END (MEMSET_SYMBOL (__memset, unaligned))
@@ -175,19 +167,19 @@ ENTRY_P2ALIGN (MEMSET_SYMBOL (__memset, unaligned_erms), 6)
cmp $(VEC_SIZE * 2), %RDX_LP
ja L(stosb_more_2x_vec)
/* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi, %rdx)
VZEROUPPER_RETURN
#endif
.p2align 4,, 4
L(last_2x_vec):
#ifdef USE_LESS_VEC_MASK_STORE
- VMOVU %VEC(0), (VEC_SIZE * -2)(%rdi, %rdx)
- VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
+ VMOVU %VMM(0), (VEC_SIZE * -2)(%rdi, %rdx)
+ VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi, %rdx)
#else
- VMOVU %VEC(0), (VEC_SIZE * -2)(%rdi)
- VMOVU %VEC(0), (VEC_SIZE * -1)(%rdi)
+ VMOVU %VMM(0), (VEC_SIZE * -2)(%rdi)
+ VMOVU %VMM(0), (VEC_SIZE * -1)(%rdi)
#endif
VZEROUPPER_RETURN
@@ -221,7 +213,7 @@ L(less_vec_from_wmemset):
bzhil %edx, %ecx, %ecx
kmovd %ecx, %k1
# endif
- vmovdqu8 %VEC(0), (%rax){%k1}
+ vmovdqu8 %VMM(0), (%rax){%k1}
VZEROUPPER_RETURN
# if defined USE_MULTIARCH && IS_IN (libc)
@@ -249,8 +241,8 @@ L(stosb_more_2x_vec):
and (4x, 8x] jump to target. */
L(more_2x_vec):
/* Store next 2x vec regardless. */
- VMOVU %VEC(0), (%rdi)
- VMOVU %VEC(0), (VEC_SIZE * 1)(%rdi)
+ VMOVU %VMM(0), (%rdi)
+ VMOVU %VMM(0), (VEC_SIZE * 1)(%rdi)
/* Two different methods of setting up pointers / compare. The two
@@ -278,8 +270,8 @@ L(more_2x_vec):
#endif
/* Store next 2x vec regardless. */
- VMOVU %VEC(0), (VEC_SIZE * 2)(%rax)
- VMOVU %VEC(0), (VEC_SIZE * 3)(%rax)
+ VMOVU %VMM(0), (VEC_SIZE * 2)(%rax)
+ VMOVU %VMM(0), (VEC_SIZE * 3)(%rax)
#if defined USE_WITH_EVEX || defined USE_WITH_AVX512
@@ -304,20 +296,20 @@ L(more_2x_vec):
andq $(VEC_SIZE * -2), %LOOP_REG
.p2align 4
L(loop):
- VMOVA %VEC(0), LOOP_4X_OFFSET(%LOOP_REG)
- VMOVA %VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG)
- VMOVA %VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG)
- VMOVA %VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
+ VMOVA %VMM(0), LOOP_4X_OFFSET(%LOOP_REG)
+ VMOVA %VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG)
+ VMOVA %VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG)
+ VMOVA %VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
subq $-(VEC_SIZE * 4), %LOOP_REG
cmpq %END_REG, %LOOP_REG
jb L(loop)
.p2align 4,, MOV_SIZE
L(last_4x_vec):
- VMOVU %VEC(0), LOOP_4X_OFFSET(%END_REG)
- VMOVU %VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG)
- VMOVU %VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG)
- VMOVU %VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG)
-L(return):
+ VMOVU %VMM(0), LOOP_4X_OFFSET(%END_REG)
+ VMOVU %VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG)
+ VMOVU %VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG)
+ VMOVU %VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG)
+L(return_vzeroupper):
#if VEC_SIZE > 16
ZERO_UPPER_VEC_REGISTERS_RETURN
#else
@@ -355,7 +347,7 @@ L(cross_page):
jge L(between_16_31)
#endif
#ifndef USE_XMM_LESS_VEC
- MOVQ %XMM0, %SET_REG64
+ MOVQ %VMM_128(0), %SET_REG64
#endif
cmpl $8, %edx
jge L(between_8_15)
@@ -374,8 +366,8 @@ L(between_0_0):
.p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, RET_SIZE)
/* From 32 to 63. No branch when size == 32. */
L(between_32_63):
- VMOVU %YMM0, (%LESS_VEC_REG)
- VMOVU %YMM0, -32(%LESS_VEC_REG, %rdx)
+ VMOVU %VMM_256(0), (%LESS_VEC_REG)
+ VMOVU %VMM_256(0), -32(%LESS_VEC_REG, %rdx)
VZEROUPPER_RETURN
#endif
@@ -383,8 +375,8 @@ L(between_32_63):
.p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, 1)
L(between_16_31):
/* From 16 to 31. No branch when size == 16. */
- VMOVU %XMM0, (%LESS_VEC_REG)
- VMOVU %XMM0, -16(%LESS_VEC_REG, %rdx)
+ VMOVU %VMM_128(0), (%LESS_VEC_REG)
+ VMOVU %VMM_128(0), -16(%LESS_VEC_REG, %rdx)
ret
#endif
@@ -394,8 +386,8 @@ L(between_16_31):
L(between_8_15):
/* From 8 to 15. No branch when size == 8. */
#ifdef USE_XMM_LESS_VEC
- MOVQ %XMM0, (%rdi)
- MOVQ %XMM0, -8(%rdi, %rdx)
+ MOVQ %VMM_128(0), (%rdi)
+ MOVQ %VMM_128(0), -8(%rdi, %rdx)
#else
movq %SET_REG64, (%LESS_VEC_REG)
movq %SET_REG64, -8(%LESS_VEC_REG, %rdx)
@@ -408,8 +400,8 @@ L(between_8_15):
L(between_4_7):
/* From 4 to 7. No branch when size == 4. */
#ifdef USE_XMM_LESS_VEC
- MOVD %XMM0, (%rdi)
- MOVD %XMM0, -4(%rdi, %rdx)
+ MOVD %VMM_128(0), (%rdi)
+ MOVD %VMM_128(0), -4(%rdi, %rdx)
#else
movl %SET_REG32, (%LESS_VEC_REG)
movl %SET_REG32, -4(%LESS_VEC_REG, %rdx)
@@ -40,8 +40,8 @@
#define VMOVA movaps
#define VMOVNT movntdq
-#define VEC_xmm VEC_any_xmm
-#define VEC VEC_any_xmm
+#define VMM_128 VMM_any_xmm
+#define VMM VMM_any_xmm
#endif
@@ -25,66 +25,66 @@
#endif
/* Defines so we can use SSE2 / AVX2 / EVEX / EVEX512 encoding with same
- VEC(N) values. */
-#define VEC_hi_xmm0 xmm16
-#define VEC_hi_xmm1 xmm17
-#define VEC_hi_xmm2 xmm18
-#define VEC_hi_xmm3 xmm19
-#define VEC_hi_xmm4 xmm20
-#define VEC_hi_xmm5 xmm21
-#define VEC_hi_xmm6 xmm22
-#define VEC_hi_xmm7 xmm23
-#define VEC_hi_xmm8 xmm24
-#define VEC_hi_xmm9 xmm25
-#define VEC_hi_xmm10 xmm26
-#define VEC_hi_xmm11 xmm27
-#define VEC_hi_xmm12 xmm28
-#define VEC_hi_xmm13 xmm29
-#define VEC_hi_xmm14 xmm30
-#define VEC_hi_xmm15 xmm31
+ VMM(N) values. */
+#define VMM_hi_xmm0 xmm16
+#define VMM_hi_xmm1 xmm17
+#define VMM_hi_xmm2 xmm18
+#define VMM_hi_xmm3 xmm19
+#define VMM_hi_xmm4 xmm20
+#define VMM_hi_xmm5 xmm21
+#define VMM_hi_xmm6 xmm22
+#define VMM_hi_xmm7 xmm23
+#define VMM_hi_xmm8 xmm24
+#define VMM_hi_xmm9 xmm25
+#define VMM_hi_xmm10 xmm26
+#define VMM_hi_xmm11 xmm27
+#define VMM_hi_xmm12 xmm28
+#define VMM_hi_xmm13 xmm29
+#define VMM_hi_xmm14 xmm30
+#define VMM_hi_xmm15 xmm31
-#define VEC_hi_ymm0 ymm16
-#define VEC_hi_ymm1 ymm17
-#define VEC_hi_ymm2 ymm18
-#define VEC_hi_ymm3 ymm19
-#define VEC_hi_ymm4 ymm20
-#define VEC_hi_ymm5 ymm21
-#define VEC_hi_ymm6 ymm22
-#define VEC_hi_ymm7 ymm23
-#define VEC_hi_ymm8 ymm24
-#define VEC_hi_ymm9 ymm25
-#define VEC_hi_ymm10 ymm26
-#define VEC_hi_ymm11 ymm27
-#define VEC_hi_ymm12 ymm28
-#define VEC_hi_ymm13 ymm29
-#define VEC_hi_ymm14 ymm30
-#define VEC_hi_ymm15 ymm31
+#define VMM_hi_ymm0 ymm16
+#define VMM_hi_ymm1 ymm17
+#define VMM_hi_ymm2 ymm18
+#define VMM_hi_ymm3 ymm19
+#define VMM_hi_ymm4 ymm20
+#define VMM_hi_ymm5 ymm21
+#define VMM_hi_ymm6 ymm22
+#define VMM_hi_ymm7 ymm23
+#define VMM_hi_ymm8 ymm24
+#define VMM_hi_ymm9 ymm25
+#define VMM_hi_ymm10 ymm26
+#define VMM_hi_ymm11 ymm27
+#define VMM_hi_ymm12 ymm28
+#define VMM_hi_ymm13 ymm29
+#define VMM_hi_ymm14 ymm30
+#define VMM_hi_ymm15 ymm31
-#define VEC_hi_zmm0 zmm16
-#define VEC_hi_zmm1 zmm17
-#define VEC_hi_zmm2 zmm18
-#define VEC_hi_zmm3 zmm19
-#define VEC_hi_zmm4 zmm20
-#define VEC_hi_zmm5 zmm21
-#define VEC_hi_zmm6 zmm22
-#define VEC_hi_zmm7 zmm23
-#define VEC_hi_zmm8 zmm24
-#define VEC_hi_zmm9 zmm25
-#define VEC_hi_zmm10 zmm26
-#define VEC_hi_zmm11 zmm27
-#define VEC_hi_zmm12 zmm28
-#define VEC_hi_zmm13 zmm29
-#define VEC_hi_zmm14 zmm30
-#define VEC_hi_zmm15 zmm31
+#define VMM_hi_zmm0 zmm16
+#define VMM_hi_zmm1 zmm17
+#define VMM_hi_zmm2 zmm18
+#define VMM_hi_zmm3 zmm19
+#define VMM_hi_zmm4 zmm20
+#define VMM_hi_zmm5 zmm21
+#define VMM_hi_zmm6 zmm22
+#define VMM_hi_zmm7 zmm23
+#define VMM_hi_zmm8 zmm24
+#define VMM_hi_zmm9 zmm25
+#define VMM_hi_zmm10 zmm26
+#define VMM_hi_zmm11 zmm27
+#define VMM_hi_zmm12 zmm28
+#define VMM_hi_zmm13 zmm29
+#define VMM_hi_zmm14 zmm30
+#define VMM_hi_zmm15 zmm31
-#define PRIMITIVE_VEC(vec, num) vec##num
+#define PRIMITIVE_VMM(vec, num) vec##num
-#define VEC_any_xmm(i) PRIMITIVE_VEC(xmm, i)
-#define VEC_any_ymm(i) PRIMITIVE_VEC(ymm, i)
-#define VEC_any_zmm(i) PRIMITIVE_VEC(zmm, i)
+#define VMM_any_xmm(i) PRIMITIVE_VMM(xmm, i)
+#define VMM_any_ymm(i) PRIMITIVE_VMM(ymm, i)
+#define VMM_any_zmm(i) PRIMITIVE_VMM(zmm, i)
-#define VEC_hi_xmm(i) PRIMITIVE_VEC(VEC_hi_xmm, i)
-#define VEC_hi_ymm(i) PRIMITIVE_VEC(VEC_hi_ymm, i)
-#define VEC_hi_zmm(i) PRIMITIVE_VEC(VEC_hi_zmm, i)
+#define VMM_hi_xmm(i) PRIMITIVE_VMM(VMM_hi_xmm, i)
+#define VMM_hi_ymm(i) PRIMITIVE_VMM(VMM_hi_ymm, i)
+#define VMM_hi_zmm(i) PRIMITIVE_VMM(VMM_hi_zmm, i)
#endif