Message ID | 20220628152735.17863-3-goldstein.w.n@gmail.com |
---|---|
State | New |
Headers | show |
Series | [v1,1/3] x86: Add definition for __wmemset_chk AVX2 RTM in ifunc impl list | expand |
On Tue, Jun 28, 2022 at 8:27 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > 1. Refactor files so that all implementations are in the multiarch > directory > - Moved the implementation portion of memset sse2 from memset.S to > multiarch/memset-sse2.S > > - The non-multiarch file now only includes one of the > implementations in the multiarch directory based on the compiled > ISA level (only used for non-multiarch builds. Otherwise we go > through the ifunc selector). > > 2. Add ISA level build guards to different implementations. > - I.e memset-avx2-unaligned-erms.S which is ISA level 3 will only > build if compiled ISA level <= 3. Otherwise there is no reason > to include it as we will always use one of the ISA level 4 > implementations (memset-evex-unaligned-erms.S). > > 3. Add new multiarch/rtld-memset.S that just include the > non-multiarch memset.S which will in turn select the best > implementation based on the compiled ISA level. > > 4. Refactor the ifunc selector and ifunc implementation list to use > the ISA level aware wrapper macros that allow functions below the > compiled ISA level (with a guranteed replacement) to be skipped. > > Tested with and without multiarch on x86_64 for ISA levels: > {generic, x86-64-v2, x86-64-v3, x86-64-v4} > > And m32 with and without multiarch. > --- > sysdeps/x86_64/memset.S | 45 +---- > sysdeps/x86_64/multiarch/Makefile | 1 + > sysdeps/x86_64/multiarch/ifunc-impl-list.c | 165 +++++++++--------- > sysdeps/x86_64/multiarch/ifunc-memset.h | 45 ++--- > sysdeps/x86_64/multiarch/ifunc-wmemset.h | 21 ++- > .../multiarch/memset-avx2-unaligned-erms.S | 5 +- > .../multiarch/memset-avx512-no-vzeroupper.S | 4 +- > .../multiarch/memset-avx512-unaligned-erms.S | 12 +- > sysdeps/x86_64/multiarch/memset-erms.S | 25 +++ > .../multiarch/memset-evex-unaligned-erms.S | 12 +- > .../multiarch/memset-sse2-unaligned-erms.S | 57 ++++-- > .../multiarch/memset-vec-unaligned-erms.S | 23 --- > sysdeps/x86_64/multiarch/rtld-memset.S | 18 ++ > 13 files changed, 247 insertions(+), 186 deletions(-) > create mode 100644 sysdeps/x86_64/multiarch/memset-erms.S > create mode 100644 sysdeps/x86_64/multiarch/rtld-memset.S > > diff --git a/sysdeps/x86_64/memset.S b/sysdeps/x86_64/memset.S > index a6eea61a4d..f4e1bab601 100644 > --- a/sysdeps/x86_64/memset.S > +++ b/sysdeps/x86_64/memset.S > @@ -18,47 +18,18 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > -#define USE_WITH_SSE2 1 > > -#define VEC_SIZE 16 > -#define MOV_SIZE 3 > -#define RET_SIZE 1 > +#define MEMSET_SYMBOL(p,s) memset > +#define MEMSET_CHK_SYMBOL(p,s) p > > -#define VEC(i) xmm##i > -#define VMOVU movups > -#define VMOVA movaps > +#define WMEMSET_SYMBOL(p,s) __wmemset > +#define WMEMSET_CHK_SYMBOL(p,s) p > > -# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > - movd d, %xmm0; \ > - movq r, %rax; \ > - punpcklbw %xmm0, %xmm0; \ > - punpcklwd %xmm0, %xmm0; \ > - pshufd $0, %xmm0, %xmm0 > +#define DEFAULT_IMPL_V1 "multiarch/memset-sse2-unaligned-erms.S" > +#define DEFAULT_IMPL_V3 "multiarch/memset-avx2-unaligned-erms.S" > +#define DEFAULT_IMPL_V4 "multiarch/memset-evex-unaligned-erms.S" > > -# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > - movd d, %xmm0; \ > - pshufd $0, %xmm0, %xmm0; \ > - movq r, %rax > - > -# define MEMSET_VDUP_TO_VEC0_HIGH() > -# define MEMSET_VDUP_TO_VEC0_LOW() > - > -# define WMEMSET_VDUP_TO_VEC0_HIGH() > -# define WMEMSET_VDUP_TO_VEC0_LOW() > - > -#define SECTION(p) p > - > -#ifndef MEMSET_SYMBOL > -# define MEMSET_CHK_SYMBOL(p,s) p > -# define MEMSET_SYMBOL(p,s) memset > -#endif > - > -#ifndef WMEMSET_SYMBOL > -# define WMEMSET_CHK_SYMBOL(p,s) p > -# define WMEMSET_SYMBOL(p,s) __wmemset > -#endif > - > -#include "multiarch/memset-vec-unaligned-erms.S" > +#include "isa-default-impl.h" > > libc_hidden_builtin_def (memset) > > diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile > index 666ee4d5d6..0525cef3fe 100644 > --- a/sysdeps/x86_64/multiarch/Makefile > +++ b/sysdeps/x86_64/multiarch/Makefile > @@ -29,6 +29,7 @@ sysdep_routines += \ > memset-avx2-unaligned-erms-rtm \ > memset-avx512-no-vzeroupper \ > memset-avx512-unaligned-erms \ > + memset-erms \ This change should be the part of the other memset-erms patch. > memset-evex-unaligned-erms \ > memset-sse2-unaligned-erms \ > rawmemchr-avx2 \ > diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > index 3ae4d49bee..4450e2c593 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c > +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > @@ -209,94 +209,97 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > IFUNC_IMPL (i, name, __memset_chk, > IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > __memset_chk_erms) > - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > - __memset_chk_sse2_unaligned) > - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > - __memset_chk_sse2_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > + (CPU_FEATURE_USABLE (AVX512VL) > + && CPU_FEATURE_USABLE (AVX512BW) > + && CPU_FEATURE_USABLE (BMI2)), > + __memset_chk_avx512_unaligned_erms) > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > + (CPU_FEATURE_USABLE (AVX512VL) > + && CPU_FEATURE_USABLE (AVX512BW) > + && CPU_FEATURE_USABLE (BMI2)), > + __memset_chk_avx512_unaligned) > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > + CPU_FEATURE_USABLE (AVX512F), > + __memset_chk_avx512_no_vzeroupper) > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > + (CPU_FEATURE_USABLE (AVX512VL) > + && CPU_FEATURE_USABLE (AVX512BW) > + && CPU_FEATURE_USABLE (BMI2)), > + __memset_chk_evex_unaligned) > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > + (CPU_FEATURE_USABLE (AVX512VL) > + && CPU_FEATURE_USABLE (AVX512BW) > + && CPU_FEATURE_USABLE (BMI2)), > + __memset_chk_evex_unaligned_erms) > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > CPU_FEATURE_USABLE (AVX2), > __memset_chk_avx2_unaligned) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > CPU_FEATURE_USABLE (AVX2), > __memset_chk_avx2_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > (CPU_FEATURE_USABLE (AVX2) > && CPU_FEATURE_USABLE (RTM)), > __memset_chk_avx2_unaligned_rtm) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > (CPU_FEATURE_USABLE (AVX2) > && CPU_FEATURE_USABLE (RTM)), > __memset_chk_avx2_unaligned_erms_rtm) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + /* Can be lowered to V1 if a V2 implementation is added. */ > + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, > + __memset_chk_sse2_unaligned) > + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, > + __memset_chk_sse2_unaligned_erms) > + ) > +#endif > + > + /* Support sysdeps/x86_64/multiarch/memset.c. */ > + IFUNC_IMPL (i, name, memset, > + IFUNC_IMPL_ADD (array, i, memset, 1, > + __memset_erms) > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __memset_chk_evex_unaligned) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + __memset_avx512_unaligned_erms) > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __memset_chk_evex_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + __memset_avx512_unaligned) > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > + CPU_FEATURE_USABLE (AVX512F), > + __memset_avx512_no_vzeroupper) > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __memset_chk_avx512_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > + __memset_evex_unaligned) > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __memset_chk_avx512_unaligned) > - IFUNC_IMPL_ADD (array, i, __memset_chk, > - CPU_FEATURE_USABLE (AVX512F), > - __memset_chk_avx512_no_vzeroupper) > - ) > -#endif > - > - /* Support sysdeps/x86_64/multiarch/memset.c. */ > - IFUNC_IMPL (i, name, memset, > - IFUNC_IMPL_ADD (array, i, memset, 1, > - __memset_sse2_unaligned) > - IFUNC_IMPL_ADD (array, i, memset, 1, > - __memset_sse2_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms) > - IFUNC_IMPL_ADD (array, i, memset, > + __memset_evex_unaligned_erms) > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > CPU_FEATURE_USABLE (AVX2), > __memset_avx2_unaligned) > - IFUNC_IMPL_ADD (array, i, memset, > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > CPU_FEATURE_USABLE (AVX2), > __memset_avx2_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, memset, > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > (CPU_FEATURE_USABLE (AVX2) > && CPU_FEATURE_USABLE (RTM)), > __memset_avx2_unaligned_rtm) > - IFUNC_IMPL_ADD (array, i, memset, > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > (CPU_FEATURE_USABLE (AVX2) > && CPU_FEATURE_USABLE (RTM)), > __memset_avx2_unaligned_erms_rtm) > - IFUNC_IMPL_ADD (array, i, memset, > - (CPU_FEATURE_USABLE (AVX512VL) > - && CPU_FEATURE_USABLE (AVX512BW) > - && CPU_FEATURE_USABLE (BMI2)), > - __memset_evex_unaligned) > - IFUNC_IMPL_ADD (array, i, memset, > - (CPU_FEATURE_USABLE (AVX512VL) > - && CPU_FEATURE_USABLE (AVX512BW) > - && CPU_FEATURE_USABLE (BMI2)), > - __memset_evex_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, memset, > - (CPU_FEATURE_USABLE (AVX512VL) > - && CPU_FEATURE_USABLE (AVX512BW) > - && CPU_FEATURE_USABLE (BMI2)), > - __memset_avx512_unaligned_erms) > - IFUNC_IMPL_ADD (array, i, memset, > - (CPU_FEATURE_USABLE (AVX512VL) > - && CPU_FEATURE_USABLE (AVX512BW) > - && CPU_FEATURE_USABLE (BMI2)), > - __memset_avx512_unaligned) > - IFUNC_IMPL_ADD (array, i, memset, > - CPU_FEATURE_USABLE (AVX512F), > - __memset_avx512_no_vzeroupper) > + /* Can be lowered to V1 if a V2 implementation is added. */ > + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, > + __memset_sse2_unaligned) > + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, > + __memset_sse2_unaligned_erms) > ) > > /* Support sysdeps/x86_64/multiarch/rawmemchr.c. */ > @@ -820,25 +823,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > > /* Support sysdeps/x86_64/multiarch/wmemset.c. */ > IFUNC_IMPL (i, name, wmemset, > - IFUNC_IMPL_ADD (array, i, wmemset, 1, > - __wmemset_sse2_unaligned) > - IFUNC_IMPL_ADD (array, i, wmemset, > - CPU_FEATURE_USABLE (AVX2), > - __wmemset_avx2_unaligned) > - IFUNC_IMPL_ADD (array, i, wmemset, > - (CPU_FEATURE_USABLE (AVX2) > - && CPU_FEATURE_USABLE (RTM)), > - __wmemset_avx2_unaligned_rtm) > - IFUNC_IMPL_ADD (array, i, wmemset, > + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > __wmemset_evex_unaligned) > - IFUNC_IMPL_ADD (array, i, wmemset, > + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __wmemset_avx512_unaligned)) > + __wmemset_avx512_unaligned) > + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, > + CPU_FEATURE_USABLE (AVX2), > + __wmemset_avx2_unaligned) > + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, > + (CPU_FEATURE_USABLE (AVX2) > + && CPU_FEATURE_USABLE (RTM)), > + __wmemset_avx2_unaligned_rtm) > + /* Can be lowered to V1 if a V2 implementation is added. */ > + X86_IFUNC_IMPL_ADD_V2 (array, i, wmemset, 1, > + __wmemset_sse2_unaligned)) > > #ifdef SHARED > /* Support sysdeps/x86_64/multiarch/memcpy_chk.c. */ > @@ -1023,25 +1027,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > #ifdef SHARED > /* Support sysdeps/x86_64/multiarch/wmemset_chk.c. */ > IFUNC_IMPL (i, name, __wmemset_chk, > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1, > - __wmemset_chk_sse2_unaligned) > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > - CPU_FEATURE_USABLE (AVX2), > - __wmemset_chk_avx2_unaligned) > - IFUNC_IMPL_ADD (array, i, wmemset_chk, > - (CPU_FEATURE_USABLE (AVX2) > - && CPU_FEATURE_USABLE (RTM)), > - __wmemset_chk_avx2_unaligned_rtm) > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > __wmemset_chk_evex_unaligned) > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, > (CPU_FEATURE_USABLE (AVX512VL) > && CPU_FEATURE_USABLE (AVX512BW) > && CPU_FEATURE_USABLE (BMI2)), > - __wmemset_chk_avx512_unaligned)) > + __wmemset_chk_avx512_unaligned) > + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, > + CPU_FEATURE_USABLE (AVX2), > + __wmemset_chk_avx2_unaligned) > + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, > + (CPU_FEATURE_USABLE (AVX2) > + && CPU_FEATURE_USABLE (RTM)), > + __wmemset_chk_avx2_unaligned_rtm) > + /* Can be lowered to V1 if a V2 implementation is added. */ > + X86_IFUNC_IMPL_ADD_V2 (array, i, __wmemset_chk, 1, > + __wmemset_chk_sse2_unaligned)) > #endif > > return 0; > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > index 64d179913c..ed514976aa 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > @@ -20,10 +20,19 @@ > #include <init-arch.h> > > extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) > + > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) > attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) > + attribute_hidden; > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) > + attribute_hidden; > + > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) > + attribute_hidden; > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) > attribute_hidden; > + > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms) > attribute_hidden; > @@ -31,31 +40,26 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) > attribute_hidden; > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm) > attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) > - attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) > - attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) > - attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) > + > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) > attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) > attribute_hidden; > > static inline void * > IFUNC_SELECTOR (void) > { > - const struct cpu_features* cpu_features = __get_cpu_features (); > + const struct cpu_features *cpu_features = __get_cpu_features (); > > if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS)) > return OPTIMIZE (erms); > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F) > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > { > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > return OPTIMIZE (avx512_unaligned_erms); > @@ -66,11 +70,11 @@ IFUNC_SELECTOR (void) > return OPTIMIZE (avx512_no_vzeroupper); > } > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > { > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > return OPTIMIZE (evex_unaligned_erms); > @@ -86,7 +90,8 @@ IFUNC_SELECTOR (void) > return OPTIMIZE (avx2_unaligned_rtm); > } > > - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) > + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > + Prefer_No_VZEROUPPER, !)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > return OPTIMIZE (avx2_unaligned_erms); > diff --git a/sysdeps/x86_64/multiarch/ifunc-wmemset.h b/sysdeps/x86_64/multiarch/ifunc-wmemset.h > index 87c48e2387..3810c719c6 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-wmemset.h > +++ b/sysdeps/x86_64/multiarch/ifunc-wmemset.h > @@ -18,22 +18,26 @@ > > #include <init-arch.h> > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; > + > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; > + > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) > attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; > + > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; > > static inline void * > IFUNC_SELECTOR (void) > { > - const struct cpu_features* cpu_features = __get_cpu_features (); > + const struct cpu_features *cpu_features = __get_cpu_features (); > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) > - && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2) > + && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > + AVX_Fast_Unaligned_Load, !)) > { > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) > { > if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > return OPTIMIZE (avx512_unaligned); > @@ -44,7 +48,8 @@ IFUNC_SELECTOR (void) > if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) > return OPTIMIZE (avx2_unaligned_rtm); > > - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) > + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > + Prefer_No_VZEROUPPER, !)) > return OPTIMIZE (avx2_unaligned); > } > > diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > index c0bf2875d0..a9054a9122 100644 > --- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > @@ -1,4 +1,7 @@ > -#if IS_IN (libc) > +#include <isa-level.h> > + > +#if ISA_SHOULD_BUILD (3) > + > # define USE_WITH_AVX2 1 > > # define VEC_SIZE 32 > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > index c5be8f57ef..8cc9c16d73 100644 > --- a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > +++ b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > @@ -17,8 +17,10 @@ > <https://www.gnu.org/licenses/>. */ > > #include <sysdep.h> > +#include <isa-level.h> > + > +#if ISA_SHOULD_BUILD (4) > > -#if IS_IN (libc) > > #include "asm-syntax.h" > #ifndef MEMSET > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > index 5241216a77..47623b8ee8 100644 > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > @@ -1,4 +1,7 @@ > -#if IS_IN (libc) > +#include <isa-level.h> > + > +#if ISA_SHOULD_BUILD (4) > + > # define USE_WITH_AVX512 1 > > # define VEC_SIZE 64 > @@ -30,8 +33,15 @@ > # define WMEMSET_VDUP_TO_VEC0_LOW() > > # define SECTION(p) p##.evex512 > + > +#ifndef MEMSET_SYMBOL > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > +#endif > +#ifndef WMEMSET_SYMBOL > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > +#endif > + > + > # define USE_LESS_VEC_MASK_STORE 1 > # include "memset-vec-unaligned-erms.S" > #endif > diff --git a/sysdeps/x86_64/multiarch/memset-erms.S b/sysdeps/x86_64/multiarch/memset-erms.S > new file mode 100644 > index 0000000000..1fce0c9fcc > --- /dev/null > +++ b/sysdeps/x86_64/multiarch/memset-erms.S > @@ -0,0 +1,25 @@ > +#include <sysdep.h> > + > +#if defined USE_MULTIARCH && IS_IN (libc) > + .text > +ENTRY (__memset_chk_erms) > + cmp %RDX_LP, %RCX_LP > + jb HIDDEN_JUMPTARGET (__chk_fail) > +END (__memset_chk_erms) > + > +/* Only used to measure performance of REP STOSB. */ > +ENTRY (__memset_erms) > + /* Skip zero length. */ > + test %RDX_LP, %RDX_LP > + jz L(stosb_return_zero) > + mov %RDX_LP, %RCX_LP > + movzbl %sil, %eax > + mov %RDI_LP, %RDX_LP > + rep stosb > + mov %RDX_LP, %RAX_LP > + ret > +L(stosb_return_zero): > + movq %rdi, %rax > + ret > +END (__memset_erms) > +#endif > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > index 6370021506..ac4b2d2d50 100644 > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > @@ -1,4 +1,7 @@ > -#if IS_IN (libc) > +#include <isa-level.h> > + > +#if ISA_SHOULD_BUILD (4) > + > # define USE_WITH_EVEX 1 > > # define VEC_SIZE 32 > @@ -30,8 +33,15 @@ > # define WMEMSET_VDUP_TO_VEC0_LOW() > > # define SECTION(p) p##.evex > + > +#ifndef MEMSET_SYMBOL > # define MEMSET_SYMBOL(p,s) p##_evex_##s > +#endif > +#ifndef WMEMSET_SYMBOL > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > +#endif > + > + > # define USE_LESS_VEC_MASK_STORE 1 > # include "memset-vec-unaligned-erms.S" > #endif > diff --git a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > index 3d92f6993a..44f9b8888b 100644 > --- a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > @@ -17,22 +17,51 @@ > License along with the GNU C Library; if not, see > <https://www.gnu.org/licenses/>. */ > > -#include <sysdep.h> > -#include <shlib-compat.h> > +#include <isa-level.h> > > -#if IS_IN (libc) > -# define MEMSET_SYMBOL(p,s) p##_sse2_##s > -# define WMEMSET_SYMBOL(p,s) p##_sse2_##s > +/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation > + so we need this to build for ISA V2 builds. */ > +#if ISA_SHOULD_BUILD (2) > > -# ifdef SHARED > -# undef libc_hidden_builtin_def > -# define libc_hidden_builtin_def(name) > +# include <sysdep.h> > +# define USE_WITH_SSE2 1 > + > +# define VEC_SIZE 16 > +# define MOV_SIZE 3 > +# define RET_SIZE 1 > + > +# define VEC(i) xmm##i > +# define VMOVU movups > +# define VMOVA movaps > + > +# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > + movd d, %xmm0; \ > + movq r, %rax; \ > + punpcklbw %xmm0, %xmm0; \ > + punpcklwd %xmm0, %xmm0; \ > + pshufd $0, %xmm0, %xmm0 > + > +# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > + movd d, %xmm0; \ > + pshufd $0, %xmm0, %xmm0; \ > + movq r, %rax > + > +# define MEMSET_VDUP_TO_VEC0_HIGH() > +# define MEMSET_VDUP_TO_VEC0_LOW() > + > +# define WMEMSET_VDUP_TO_VEC0_HIGH() > +# define WMEMSET_VDUP_TO_VEC0_LOW() > + > +# define SECTION(p) p > + > +# ifndef MEMSET_SYMBOL > +# define MEMSET_SYMBOL(p,s) p##_sse2_##s > # endif > > -# undef weak_alias > -# define weak_alias(original, alias) > -# undef strong_alias > -# define strong_alias(ignored1, ignored2) > -#endif > +# ifndef WMEMSET_SYMBOL > +# define WMEMSET_SYMBOL(p,s) p##_sse2_##s > +# endif > + > +# include "memset-vec-unaligned-erms.S" > > -#include <sysdeps/x86_64/memset.S> > +#endif > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > index d98c613651..905d0fa464 100644 > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > @@ -430,26 +430,3 @@ L(between_2_3): > #endif > ret > END (MEMSET_SYMBOL (__memset, unaligned_erms)) > - > -#if defined USE_MULTIARCH && IS_IN (libc) && VEC_SIZE == 16 > -ENTRY (__memset_chk_erms) > - cmp %RDX_LP, %RCX_LP > - jb HIDDEN_JUMPTARGET (__chk_fail) > -END (__memset_chk_erms) > - > -/* Only used to measure performance of REP STOSB. */ > -ENTRY (__memset_erms) > - /* Skip zero length. */ > - test %RDX_LP, %RDX_LP > - jz L(stosb_return_zero) > - mov %RDX_LP, %RCX_LP > - movzbl %sil, %eax > - mov %RDI_LP, %RDX_LP > - rep stosb > - mov %RDX_LP, %RAX_LP > - ret > -L(stosb_return_zero): > - movq %rdi, %rax > - ret > -END (__memset_erms) > -#endif > diff --git a/sysdeps/x86_64/multiarch/rtld-memset.S b/sysdeps/x86_64/multiarch/rtld-memset.S > new file mode 100644 > index 0000000000..d912bfa7cc > --- /dev/null > +++ b/sysdeps/x86_64/multiarch/rtld-memset.S > @@ -0,0 +1,18 @@ > +/* Copyright (C) 2022 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, see > + <https://www.gnu.org/licenses/>. */ > + > +#include "../memset.S" > -- > 2.34.1 >
On Wed, Jun 29, 2022 at 12:31 PM H.J. Lu <hjl.tools@gmail.com> wrote: > > On Tue, Jun 28, 2022 at 8:27 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > 1. Refactor files so that all implementations are in the multiarch > > directory > > - Moved the implementation portion of memset sse2 from memset.S to > > multiarch/memset-sse2.S > > > > - The non-multiarch file now only includes one of the > > implementations in the multiarch directory based on the compiled > > ISA level (only used for non-multiarch builds. Otherwise we go > > through the ifunc selector). > > > > 2. Add ISA level build guards to different implementations. > > - I.e memset-avx2-unaligned-erms.S which is ISA level 3 will only > > build if compiled ISA level <= 3. Otherwise there is no reason > > to include it as we will always use one of the ISA level 4 > > implementations (memset-evex-unaligned-erms.S). > > > > 3. Add new multiarch/rtld-memset.S that just include the > > non-multiarch memset.S which will in turn select the best > > implementation based on the compiled ISA level. > > > > 4. Refactor the ifunc selector and ifunc implementation list to use > > the ISA level aware wrapper macros that allow functions below the > > compiled ISA level (with a guranteed replacement) to be skipped. > > > > Tested with and without multiarch on x86_64 for ISA levels: > > {generic, x86-64-v2, x86-64-v3, x86-64-v4} > > > > And m32 with and without multiarch. > > --- > > sysdeps/x86_64/memset.S | 45 +---- > > sysdeps/x86_64/multiarch/Makefile | 1 + > > sysdeps/x86_64/multiarch/ifunc-impl-list.c | 165 +++++++++--------- > > sysdeps/x86_64/multiarch/ifunc-memset.h | 45 ++--- > > sysdeps/x86_64/multiarch/ifunc-wmemset.h | 21 ++- > > .../multiarch/memset-avx2-unaligned-erms.S | 5 +- > > .../multiarch/memset-avx512-no-vzeroupper.S | 4 +- > > .../multiarch/memset-avx512-unaligned-erms.S | 12 +- > > sysdeps/x86_64/multiarch/memset-erms.S | 25 +++ > > .../multiarch/memset-evex-unaligned-erms.S | 12 +- > > .../multiarch/memset-sse2-unaligned-erms.S | 57 ++++-- > > .../multiarch/memset-vec-unaligned-erms.S | 23 --- > > sysdeps/x86_64/multiarch/rtld-memset.S | 18 ++ > > 13 files changed, 247 insertions(+), 186 deletions(-) > > create mode 100644 sysdeps/x86_64/multiarch/memset-erms.S > > create mode 100644 sysdeps/x86_64/multiarch/rtld-memset.S > > > > diff --git a/sysdeps/x86_64/memset.S b/sysdeps/x86_64/memset.S > > index a6eea61a4d..f4e1bab601 100644 > > --- a/sysdeps/x86_64/memset.S > > +++ b/sysdeps/x86_64/memset.S > > @@ -18,47 +18,18 @@ > > <https://www.gnu.org/licenses/>. */ > > > > #include <sysdep.h> > > -#define USE_WITH_SSE2 1 > > > > -#define VEC_SIZE 16 > > -#define MOV_SIZE 3 > > -#define RET_SIZE 1 > > +#define MEMSET_SYMBOL(p,s) memset > > +#define MEMSET_CHK_SYMBOL(p,s) p > > > > -#define VEC(i) xmm##i > > -#define VMOVU movups > > -#define VMOVA movaps > > +#define WMEMSET_SYMBOL(p,s) __wmemset > > +#define WMEMSET_CHK_SYMBOL(p,s) p > > > > -# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > > - movd d, %xmm0; \ > > - movq r, %rax; \ > > - punpcklbw %xmm0, %xmm0; \ > > - punpcklwd %xmm0, %xmm0; \ > > - pshufd $0, %xmm0, %xmm0 > > +#define DEFAULT_IMPL_V1 "multiarch/memset-sse2-unaligned-erms.S" > > +#define DEFAULT_IMPL_V3 "multiarch/memset-avx2-unaligned-erms.S" > > +#define DEFAULT_IMPL_V4 "multiarch/memset-evex-unaligned-erms.S" > > > > -# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > > - movd d, %xmm0; \ > > - pshufd $0, %xmm0, %xmm0; \ > > - movq r, %rax > > - > > -# define MEMSET_VDUP_TO_VEC0_HIGH() > > -# define MEMSET_VDUP_TO_VEC0_LOW() > > - > > -# define WMEMSET_VDUP_TO_VEC0_HIGH() > > -# define WMEMSET_VDUP_TO_VEC0_LOW() > > - > > -#define SECTION(p) p > > - > > -#ifndef MEMSET_SYMBOL > > -# define MEMSET_CHK_SYMBOL(p,s) p > > -# define MEMSET_SYMBOL(p,s) memset > > -#endif > > - > > -#ifndef WMEMSET_SYMBOL > > -# define WMEMSET_CHK_SYMBOL(p,s) p > > -# define WMEMSET_SYMBOL(p,s) __wmemset > > -#endif > > - > > -#include "multiarch/memset-vec-unaligned-erms.S" > > +#include "isa-default-impl.h" > > > > libc_hidden_builtin_def (memset) > > > > diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile > > index 666ee4d5d6..0525cef3fe 100644 > > --- a/sysdeps/x86_64/multiarch/Makefile > > +++ b/sysdeps/x86_64/multiarch/Makefile > > @@ -29,6 +29,7 @@ sysdep_routines += \ > > memset-avx2-unaligned-erms-rtm \ > > memset-avx512-no-vzeroupper \ > > memset-avx512-unaligned-erms \ > > + memset-erms \ > > This change should be the part of the other memset-erms patch. Fixed in V2. > > > memset-evex-unaligned-erms \ > > memset-sse2-unaligned-erms \ > > rawmemchr-avx2 \ > > diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > > index 3ae4d49bee..4450e2c593 100644 > > --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c > > +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > > @@ -209,94 +209,97 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > > IFUNC_IMPL (i, name, __memset_chk, > > IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > > __memset_chk_erms) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > > - __memset_chk_sse2_unaligned) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, > > - __memset_chk_sse2_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > > + (CPU_FEATURE_USABLE (AVX512VL) > > + && CPU_FEATURE_USABLE (AVX512BW) > > + && CPU_FEATURE_USABLE (BMI2)), > > + __memset_chk_avx512_unaligned_erms) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > > + (CPU_FEATURE_USABLE (AVX512VL) > > + && CPU_FEATURE_USABLE (AVX512BW) > > + && CPU_FEATURE_USABLE (BMI2)), > > + __memset_chk_avx512_unaligned) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > > + CPU_FEATURE_USABLE (AVX512F), > > + __memset_chk_avx512_no_vzeroupper) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > > + (CPU_FEATURE_USABLE (AVX512VL) > > + && CPU_FEATURE_USABLE (AVX512BW) > > + && CPU_FEATURE_USABLE (BMI2)), > > + __memset_chk_evex_unaligned) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, > > + (CPU_FEATURE_USABLE (AVX512VL) > > + && CPU_FEATURE_USABLE (AVX512BW) > > + && CPU_FEATURE_USABLE (BMI2)), > > + __memset_chk_evex_unaligned_erms) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > > CPU_FEATURE_USABLE (AVX2), > > __memset_chk_avx2_unaligned) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > > CPU_FEATURE_USABLE (AVX2), > > __memset_chk_avx2_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > > (CPU_FEATURE_USABLE (AVX2) > > && CPU_FEATURE_USABLE (RTM)), > > __memset_chk_avx2_unaligned_rtm) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, > > (CPU_FEATURE_USABLE (AVX2) > > && CPU_FEATURE_USABLE (RTM)), > > __memset_chk_avx2_unaligned_erms_rtm) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + /* Can be lowered to V1 if a V2 implementation is added. */ > > + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, > > + __memset_chk_sse2_unaligned) > > + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, > > + __memset_chk_sse2_unaligned_erms) > > + ) > > +#endif > > + > > + /* Support sysdeps/x86_64/multiarch/memset.c. */ > > + IFUNC_IMPL (i, name, memset, > > + IFUNC_IMPL_ADD (array, i, memset, 1, > > + __memset_erms) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __memset_chk_evex_unaligned) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + __memset_avx512_unaligned_erms) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __memset_chk_evex_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + __memset_avx512_unaligned) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > > + CPU_FEATURE_USABLE (AVX512F), > > + __memset_avx512_no_vzeroupper) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __memset_chk_avx512_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > + __memset_evex_unaligned) > > + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __memset_chk_avx512_unaligned) > > - IFUNC_IMPL_ADD (array, i, __memset_chk, > > - CPU_FEATURE_USABLE (AVX512F), > > - __memset_chk_avx512_no_vzeroupper) > > - ) > > -#endif > > - > > - /* Support sysdeps/x86_64/multiarch/memset.c. */ > > - IFUNC_IMPL (i, name, memset, > > - IFUNC_IMPL_ADD (array, i, memset, 1, > > - __memset_sse2_unaligned) > > - IFUNC_IMPL_ADD (array, i, memset, 1, > > - __memset_sse2_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms) > > - IFUNC_IMPL_ADD (array, i, memset, > > + __memset_evex_unaligned_erms) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > > CPU_FEATURE_USABLE (AVX2), > > __memset_avx2_unaligned) > > - IFUNC_IMPL_ADD (array, i, memset, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > > CPU_FEATURE_USABLE (AVX2), > > __memset_avx2_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, memset, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX2) > > && CPU_FEATURE_USABLE (RTM)), > > __memset_avx2_unaligned_rtm) > > - IFUNC_IMPL_ADD (array, i, memset, > > + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, > > (CPU_FEATURE_USABLE (AVX2) > > && CPU_FEATURE_USABLE (RTM)), > > __memset_avx2_unaligned_erms_rtm) > > - IFUNC_IMPL_ADD (array, i, memset, > > - (CPU_FEATURE_USABLE (AVX512VL) > > - && CPU_FEATURE_USABLE (AVX512BW) > > - && CPU_FEATURE_USABLE (BMI2)), > > - __memset_evex_unaligned) > > - IFUNC_IMPL_ADD (array, i, memset, > > - (CPU_FEATURE_USABLE (AVX512VL) > > - && CPU_FEATURE_USABLE (AVX512BW) > > - && CPU_FEATURE_USABLE (BMI2)), > > - __memset_evex_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, memset, > > - (CPU_FEATURE_USABLE (AVX512VL) > > - && CPU_FEATURE_USABLE (AVX512BW) > > - && CPU_FEATURE_USABLE (BMI2)), > > - __memset_avx512_unaligned_erms) > > - IFUNC_IMPL_ADD (array, i, memset, > > - (CPU_FEATURE_USABLE (AVX512VL) > > - && CPU_FEATURE_USABLE (AVX512BW) > > - && CPU_FEATURE_USABLE (BMI2)), > > - __memset_avx512_unaligned) > > - IFUNC_IMPL_ADD (array, i, memset, > > - CPU_FEATURE_USABLE (AVX512F), > > - __memset_avx512_no_vzeroupper) > > + /* Can be lowered to V1 if a V2 implementation is added. */ > > + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, > > + __memset_sse2_unaligned) > > + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, > > + __memset_sse2_unaligned_erms) > > ) > > > > /* Support sysdeps/x86_64/multiarch/rawmemchr.c. */ > > @@ -820,25 +823,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > > > > /* Support sysdeps/x86_64/multiarch/wmemset.c. */ > > IFUNC_IMPL (i, name, wmemset, > > - IFUNC_IMPL_ADD (array, i, wmemset, 1, > > - __wmemset_sse2_unaligned) > > - IFUNC_IMPL_ADD (array, i, wmemset, > > - CPU_FEATURE_USABLE (AVX2), > > - __wmemset_avx2_unaligned) > > - IFUNC_IMPL_ADD (array, i, wmemset, > > - (CPU_FEATURE_USABLE (AVX2) > > - && CPU_FEATURE_USABLE (RTM)), > > - __wmemset_avx2_unaligned_rtm) > > - IFUNC_IMPL_ADD (array, i, wmemset, > > + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > __wmemset_evex_unaligned) > > - IFUNC_IMPL_ADD (array, i, wmemset, > > + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __wmemset_avx512_unaligned)) > > + __wmemset_avx512_unaligned) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, > > + CPU_FEATURE_USABLE (AVX2), > > + __wmemset_avx2_unaligned) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, > > + (CPU_FEATURE_USABLE (AVX2) > > + && CPU_FEATURE_USABLE (RTM)), > > + __wmemset_avx2_unaligned_rtm) > > + /* Can be lowered to V1 if a V2 implementation is added. */ > > + X86_IFUNC_IMPL_ADD_V2 (array, i, wmemset, 1, > > + __wmemset_sse2_unaligned)) > > > > #ifdef SHARED > > /* Support sysdeps/x86_64/multiarch/memcpy_chk.c. */ > > @@ -1023,25 +1027,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > > #ifdef SHARED > > /* Support sysdeps/x86_64/multiarch/wmemset_chk.c. */ > > IFUNC_IMPL (i, name, __wmemset_chk, > > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1, > > - __wmemset_chk_sse2_unaligned) > > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > > - CPU_FEATURE_USABLE (AVX2), > > - __wmemset_chk_avx2_unaligned) > > - IFUNC_IMPL_ADD (array, i, wmemset_chk, > > - (CPU_FEATURE_USABLE (AVX2) > > - && CPU_FEATURE_USABLE (RTM)), > > - __wmemset_chk_avx2_unaligned_rtm) > > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > __wmemset_chk_evex_unaligned) > > - IFUNC_IMPL_ADD (array, i, __wmemset_chk, > > + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, > > (CPU_FEATURE_USABLE (AVX512VL) > > && CPU_FEATURE_USABLE (AVX512BW) > > && CPU_FEATURE_USABLE (BMI2)), > > - __wmemset_chk_avx512_unaligned)) > > + __wmemset_chk_avx512_unaligned) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, > > + CPU_FEATURE_USABLE (AVX2), > > + __wmemset_chk_avx2_unaligned) > > + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, > > + (CPU_FEATURE_USABLE (AVX2) > > + && CPU_FEATURE_USABLE (RTM)), > > + __wmemset_chk_avx2_unaligned_rtm) > > + /* Can be lowered to V1 if a V2 implementation is added. */ > > + X86_IFUNC_IMPL_ADD_V2 (array, i, __wmemset_chk, 1, > > + __wmemset_chk_sse2_unaligned)) > > #endif > > > > return 0; > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > > index 64d179913c..ed514976aa 100644 > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > > @@ -20,10 +20,19 @@ > > #include <init-arch.h> > > > > extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) > > + > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) > > attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) > > + attribute_hidden; > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) > > + attribute_hidden; > > + > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) > > + attribute_hidden; > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) > > attribute_hidden; > > + > > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; > > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms) > > attribute_hidden; > > @@ -31,31 +40,26 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) > > attribute_hidden; > > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm) > > attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) > > - attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) > > - attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) > > - attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) > > + > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) > > attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) > > attribute_hidden; > > > > static inline void * > > IFUNC_SELECTOR (void) > > { > > - const struct cpu_features* cpu_features = __get_cpu_features (); > > + const struct cpu_features *cpu_features = __get_cpu_features (); > > > > if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS)) > > return OPTIMIZE (erms); > > > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F) > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > { > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (avx512_unaligned_erms); > > @@ -66,11 +70,11 @@ IFUNC_SELECTOR (void) > > return OPTIMIZE (avx512_no_vzeroupper); > > } > > > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > { > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (evex_unaligned_erms); > > @@ -86,7 +90,8 @@ IFUNC_SELECTOR (void) > > return OPTIMIZE (avx2_unaligned_rtm); > > } > > > > - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) > > + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > > + Prefer_No_VZEROUPPER, !)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (avx2_unaligned_erms); > > diff --git a/sysdeps/x86_64/multiarch/ifunc-wmemset.h b/sysdeps/x86_64/multiarch/ifunc-wmemset.h > > index 87c48e2387..3810c719c6 100644 > > --- a/sysdeps/x86_64/multiarch/ifunc-wmemset.h > > +++ b/sysdeps/x86_64/multiarch/ifunc-wmemset.h > > @@ -18,22 +18,26 @@ > > > > #include <init-arch.h> > > > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; > > + > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; > > + > > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; > > extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) > > attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; > > -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; > > + > > +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; > > > > static inline void * > > IFUNC_SELECTOR (void) > > { > > - const struct cpu_features* cpu_features = __get_cpu_features (); > > + const struct cpu_features *cpu_features = __get_cpu_features (); > > > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) > > - && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2) > > + && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > > + AVX_Fast_Unaligned_Load, !)) > > { > > - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) > > + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) > > { > > if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > return OPTIMIZE (avx512_unaligned); > > @@ -44,7 +48,8 @@ IFUNC_SELECTOR (void) > > if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) > > return OPTIMIZE (avx2_unaligned_rtm); > > > > - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) > > + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, > > + Prefer_No_VZEROUPPER, !)) > > return OPTIMIZE (avx2_unaligned); > > } > > > > diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > > index c0bf2875d0..a9054a9122 100644 > > --- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S > > @@ -1,4 +1,7 @@ > > -#if IS_IN (libc) > > +#include <isa-level.h> > > + > > +#if ISA_SHOULD_BUILD (3) > > + > > # define USE_WITH_AVX2 1 > > > > # define VEC_SIZE 32 > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > > index c5be8f57ef..8cc9c16d73 100644 > > --- a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S > > @@ -17,8 +17,10 @@ > > <https://www.gnu.org/licenses/>. */ > > > > #include <sysdep.h> > > +#include <isa-level.h> > > + > > +#if ISA_SHOULD_BUILD (4) > > > > -#if IS_IN (libc) > > > > #include "asm-syntax.h" > > #ifndef MEMSET > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > index 5241216a77..47623b8ee8 100644 > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > @@ -1,4 +1,7 @@ > > -#if IS_IN (libc) > > +#include <isa-level.h> > > + > > +#if ISA_SHOULD_BUILD (4) > > + > > # define USE_WITH_AVX512 1 > > > > # define VEC_SIZE 64 > > @@ -30,8 +33,15 @@ > > # define WMEMSET_VDUP_TO_VEC0_LOW() > > > > # define SECTION(p) p##.evex512 > > + > > +#ifndef MEMSET_SYMBOL > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > > +#endif > > +#ifndef WMEMSET_SYMBOL > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > > +#endif > > + > > + > > # define USE_LESS_VEC_MASK_STORE 1 > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-erms.S b/sysdeps/x86_64/multiarch/memset-erms.S > > new file mode 100644 > > index 0000000000..1fce0c9fcc > > --- /dev/null > > +++ b/sysdeps/x86_64/multiarch/memset-erms.S > > @@ -0,0 +1,25 @@ > > +#include <sysdep.h> > > + > > +#if defined USE_MULTIARCH && IS_IN (libc) > > + .text > > +ENTRY (__memset_chk_erms) > > + cmp %RDX_LP, %RCX_LP > > + jb HIDDEN_JUMPTARGET (__chk_fail) > > +END (__memset_chk_erms) > > + > > +/* Only used to measure performance of REP STOSB. */ > > +ENTRY (__memset_erms) > > + /* Skip zero length. */ > > + test %RDX_LP, %RDX_LP > > + jz L(stosb_return_zero) > > + mov %RDX_LP, %RCX_LP > > + movzbl %sil, %eax > > + mov %RDI_LP, %RDX_LP > > + rep stosb > > + mov %RDX_LP, %RAX_LP > > + ret > > +L(stosb_return_zero): > > + movq %rdi, %rax > > + ret > > +END (__memset_erms) > > +#endif > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > index 6370021506..ac4b2d2d50 100644 > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > @@ -1,4 +1,7 @@ > > -#if IS_IN (libc) > > +#include <isa-level.h> > > + > > +#if ISA_SHOULD_BUILD (4) > > + > > # define USE_WITH_EVEX 1 > > > > # define VEC_SIZE 32 > > @@ -30,8 +33,15 @@ > > # define WMEMSET_VDUP_TO_VEC0_LOW() > > > > # define SECTION(p) p##.evex > > + > > +#ifndef MEMSET_SYMBOL > > # define MEMSET_SYMBOL(p,s) p##_evex_##s > > +#endif > > +#ifndef WMEMSET_SYMBOL > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > > +#endif > > + > > + > > # define USE_LESS_VEC_MASK_STORE 1 > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > > index 3d92f6993a..44f9b8888b 100644 > > --- a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S > > @@ -17,22 +17,51 @@ > > License along with the GNU C Library; if not, see > > <https://www.gnu.org/licenses/>. */ > > > > -#include <sysdep.h> > > -#include <shlib-compat.h> > > +#include <isa-level.h> > > > > -#if IS_IN (libc) > > -# define MEMSET_SYMBOL(p,s) p##_sse2_##s > > -# define WMEMSET_SYMBOL(p,s) p##_sse2_##s > > +/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation > > + so we need this to build for ISA V2 builds. */ > > +#if ISA_SHOULD_BUILD (2) > > > > -# ifdef SHARED > > -# undef libc_hidden_builtin_def > > -# define libc_hidden_builtin_def(name) > > +# include <sysdep.h> > > +# define USE_WITH_SSE2 1 > > + > > +# define VEC_SIZE 16 > > +# define MOV_SIZE 3 > > +# define RET_SIZE 1 > > + > > +# define VEC(i) xmm##i > > +# define VMOVU movups > > +# define VMOVA movaps > > + > > +# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > > + movd d, %xmm0; \ > > + movq r, %rax; \ > > + punpcklbw %xmm0, %xmm0; \ > > + punpcklwd %xmm0, %xmm0; \ > > + pshufd $0, %xmm0, %xmm0 > > + > > +# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ > > + movd d, %xmm0; \ > > + pshufd $0, %xmm0, %xmm0; \ > > + movq r, %rax > > + > > +# define MEMSET_VDUP_TO_VEC0_HIGH() > > +# define MEMSET_VDUP_TO_VEC0_LOW() > > + > > +# define WMEMSET_VDUP_TO_VEC0_HIGH() > > +# define WMEMSET_VDUP_TO_VEC0_LOW() > > + > > +# define SECTION(p) p > > + > > +# ifndef MEMSET_SYMBOL > > +# define MEMSET_SYMBOL(p,s) p##_sse2_##s > > # endif > > > > -# undef weak_alias > > -# define weak_alias(original, alias) > > -# undef strong_alias > > -# define strong_alias(ignored1, ignored2) > > -#endif > > +# ifndef WMEMSET_SYMBOL > > +# define WMEMSET_SYMBOL(p,s) p##_sse2_##s > > +# endif > > + > > +# include "memset-vec-unaligned-erms.S" > > > > -#include <sysdeps/x86_64/memset.S> > > +#endif > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > index d98c613651..905d0fa464 100644 > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > @@ -430,26 +430,3 @@ L(between_2_3): > > #endif > > ret > > END (MEMSET_SYMBOL (__memset, unaligned_erms)) > > - > > -#if defined USE_MULTIARCH && IS_IN (libc) && VEC_SIZE == 16 > > -ENTRY (__memset_chk_erms) > > - cmp %RDX_LP, %RCX_LP > > - jb HIDDEN_JUMPTARGET (__chk_fail) > > -END (__memset_chk_erms) > > - > > -/* Only used to measure performance of REP STOSB. */ > > -ENTRY (__memset_erms) > > - /* Skip zero length. */ > > - test %RDX_LP, %RDX_LP > > - jz L(stosb_return_zero) > > - mov %RDX_LP, %RCX_LP > > - movzbl %sil, %eax > > - mov %RDI_LP, %RDX_LP > > - rep stosb > > - mov %RDX_LP, %RAX_LP > > - ret > > -L(stosb_return_zero): > > - movq %rdi, %rax > > - ret > > -END (__memset_erms) > > -#endif > > diff --git a/sysdeps/x86_64/multiarch/rtld-memset.S b/sysdeps/x86_64/multiarch/rtld-memset.S > > new file mode 100644 > > index 0000000000..d912bfa7cc > > --- /dev/null > > +++ b/sysdeps/x86_64/multiarch/rtld-memset.S > > @@ -0,0 +1,18 @@ > > +/* Copyright (C) 2022 Free Software Foundation, Inc. > > + This file is part of the GNU C Library. > > + > > + The GNU C Library is free software; you can redistribute it and/or > > + modify it under the terms of the GNU Lesser General Public > > + License as published by the Free Software Foundation; either > > + version 2.1 of the License, or (at your option) any later version. > > + > > + The GNU C Library is distributed in the hope that it will be useful, > > + but WITHOUT ANY WARRANTY; without even the implied warranty of > > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > > + Lesser General Public License for more details. > > + > > + You should have received a copy of the GNU Lesser General Public > > + License along with the GNU C Library; if not, see > > + <https://www.gnu.org/licenses/>. */ > > + > > +#include "../memset.S" > > -- > > 2.34.1 > > > > > -- > H.J.
diff --git a/sysdeps/x86_64/memset.S b/sysdeps/x86_64/memset.S index a6eea61a4d..f4e1bab601 100644 --- a/sysdeps/x86_64/memset.S +++ b/sysdeps/x86_64/memset.S @@ -18,47 +18,18 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> -#define USE_WITH_SSE2 1 -#define VEC_SIZE 16 -#define MOV_SIZE 3 -#define RET_SIZE 1 +#define MEMSET_SYMBOL(p,s) memset +#define MEMSET_CHK_SYMBOL(p,s) p -#define VEC(i) xmm##i -#define VMOVU movups -#define VMOVA movaps +#define WMEMSET_SYMBOL(p,s) __wmemset +#define WMEMSET_CHK_SYMBOL(p,s) p -# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ - movd d, %xmm0; \ - movq r, %rax; \ - punpcklbw %xmm0, %xmm0; \ - punpcklwd %xmm0, %xmm0; \ - pshufd $0, %xmm0, %xmm0 +#define DEFAULT_IMPL_V1 "multiarch/memset-sse2-unaligned-erms.S" +#define DEFAULT_IMPL_V3 "multiarch/memset-avx2-unaligned-erms.S" +#define DEFAULT_IMPL_V4 "multiarch/memset-evex-unaligned-erms.S" -# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ - movd d, %xmm0; \ - pshufd $0, %xmm0, %xmm0; \ - movq r, %rax - -# define MEMSET_VDUP_TO_VEC0_HIGH() -# define MEMSET_VDUP_TO_VEC0_LOW() - -# define WMEMSET_VDUP_TO_VEC0_HIGH() -# define WMEMSET_VDUP_TO_VEC0_LOW() - -#define SECTION(p) p - -#ifndef MEMSET_SYMBOL -# define MEMSET_CHK_SYMBOL(p,s) p -# define MEMSET_SYMBOL(p,s) memset -#endif - -#ifndef WMEMSET_SYMBOL -# define WMEMSET_CHK_SYMBOL(p,s) p -# define WMEMSET_SYMBOL(p,s) __wmemset -#endif - -#include "multiarch/memset-vec-unaligned-erms.S" +#include "isa-default-impl.h" libc_hidden_builtin_def (memset) diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile index 666ee4d5d6..0525cef3fe 100644 --- a/sysdeps/x86_64/multiarch/Makefile +++ b/sysdeps/x86_64/multiarch/Makefile @@ -29,6 +29,7 @@ sysdep_routines += \ memset-avx2-unaligned-erms-rtm \ memset-avx512-no-vzeroupper \ memset-avx512-unaligned-erms \ + memset-erms \ memset-evex-unaligned-erms \ memset-sse2-unaligned-erms \ rawmemchr-avx2 \ diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index 3ae4d49bee..4450e2c593 100644 --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c @@ -209,94 +209,97 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, __memset_chk, IFUNC_IMPL_ADD (array, i, __memset_chk, 1, __memset_chk_erms) - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, - __memset_chk_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, __memset_chk, 1, - __memset_chk_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memset_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __memset_chk_avx512_unaligned_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __memset_chk_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, + CPU_FEATURE_USABLE (AVX512F), + __memset_chk_avx512_no_vzeroupper) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __memset_chk_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __memset_chk_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, CPU_FEATURE_USABLE (AVX2), __memset_chk_avx2_unaligned) - IFUNC_IMPL_ADD (array, i, __memset_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, CPU_FEATURE_USABLE (AVX2), __memset_chk_avx2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memset_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (RTM)), __memset_chk_avx2_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, __memset_chk, + X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (RTM)), __memset_chk_avx2_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, __memset_chk, + /* Can be lowered to V1 if a V2 implementation is added. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, + __memset_chk_sse2_unaligned) + X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1, + __memset_chk_sse2_unaligned_erms) + ) +#endif + + /* Support sysdeps/x86_64/multiarch/memset.c. */ + IFUNC_IMPL (i, name, memset, + IFUNC_IMPL_ADD (array, i, memset, 1, + __memset_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __memset_chk_evex_unaligned) - IFUNC_IMPL_ADD (array, i, __memset_chk, + __memset_avx512_unaligned_erms) + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __memset_chk_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memset_chk, + __memset_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, + CPU_FEATURE_USABLE (AVX512F), + __memset_avx512_no_vzeroupper) + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __memset_chk_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, __memset_chk, + __memset_evex_unaligned) + X86_IFUNC_IMPL_ADD_V4 (array, i, memset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __memset_chk_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, __memset_chk, - CPU_FEATURE_USABLE (AVX512F), - __memset_chk_avx512_no_vzeroupper) - ) -#endif - - /* Support sysdeps/x86_64/multiarch/memset.c. */ - IFUNC_IMPL (i, name, memset, - IFUNC_IMPL_ADD (array, i, memset, 1, - __memset_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, memset, 1, - __memset_sse2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms) - IFUNC_IMPL_ADD (array, i, memset, + __memset_evex_unaligned_erms) + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, CPU_FEATURE_USABLE (AVX2), __memset_avx2_unaligned) - IFUNC_IMPL_ADD (array, i, memset, + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, CPU_FEATURE_USABLE (AVX2), __memset_avx2_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memset, + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (RTM)), __memset_avx2_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, memset, + X86_IFUNC_IMPL_ADD_V3 (array, i, memset, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (RTM)), __memset_avx2_unaligned_erms_rtm) - IFUNC_IMPL_ADD (array, i, memset, - (CPU_FEATURE_USABLE (AVX512VL) - && CPU_FEATURE_USABLE (AVX512BW) - && CPU_FEATURE_USABLE (BMI2)), - __memset_evex_unaligned) - IFUNC_IMPL_ADD (array, i, memset, - (CPU_FEATURE_USABLE (AVX512VL) - && CPU_FEATURE_USABLE (AVX512BW) - && CPU_FEATURE_USABLE (BMI2)), - __memset_evex_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memset, - (CPU_FEATURE_USABLE (AVX512VL) - && CPU_FEATURE_USABLE (AVX512BW) - && CPU_FEATURE_USABLE (BMI2)), - __memset_avx512_unaligned_erms) - IFUNC_IMPL_ADD (array, i, memset, - (CPU_FEATURE_USABLE (AVX512VL) - && CPU_FEATURE_USABLE (AVX512BW) - && CPU_FEATURE_USABLE (BMI2)), - __memset_avx512_unaligned) - IFUNC_IMPL_ADD (array, i, memset, - CPU_FEATURE_USABLE (AVX512F), - __memset_avx512_no_vzeroupper) + /* Can be lowered to V1 if a V2 implementation is added. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, + __memset_sse2_unaligned) + X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1, + __memset_sse2_unaligned_erms) ) /* Support sysdeps/x86_64/multiarch/rawmemchr.c. */ @@ -820,25 +823,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, /* Support sysdeps/x86_64/multiarch/wmemset.c. */ IFUNC_IMPL (i, name, wmemset, - IFUNC_IMPL_ADD (array, i, wmemset, 1, - __wmemset_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, wmemset, - CPU_FEATURE_USABLE (AVX2), - __wmemset_avx2_unaligned) - IFUNC_IMPL_ADD (array, i, wmemset, - (CPU_FEATURE_USABLE (AVX2) - && CPU_FEATURE_USABLE (RTM)), - __wmemset_avx2_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, wmemset, + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), __wmemset_evex_unaligned) - IFUNC_IMPL_ADD (array, i, wmemset, + X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __wmemset_avx512_unaligned)) + __wmemset_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, + CPU_FEATURE_USABLE (AVX2), + __wmemset_avx2_unaligned) + X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wmemset_avx2_unaligned_rtm) + /* Can be lowered to V1 if a V2 implementation is added. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, wmemset, 1, + __wmemset_sse2_unaligned)) #ifdef SHARED /* Support sysdeps/x86_64/multiarch/memcpy_chk.c. */ @@ -1023,25 +1027,26 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, #ifdef SHARED /* Support sysdeps/x86_64/multiarch/wmemset_chk.c. */ IFUNC_IMPL (i, name, __wmemset_chk, - IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1, - __wmemset_chk_sse2_unaligned) - IFUNC_IMPL_ADD (array, i, __wmemset_chk, - CPU_FEATURE_USABLE (AVX2), - __wmemset_chk_avx2_unaligned) - IFUNC_IMPL_ADD (array, i, wmemset_chk, - (CPU_FEATURE_USABLE (AVX2) - && CPU_FEATURE_USABLE (RTM)), - __wmemset_chk_avx2_unaligned_rtm) - IFUNC_IMPL_ADD (array, i, __wmemset_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), __wmemset_chk_evex_unaligned) - IFUNC_IMPL_ADD (array, i, __wmemset_chk, + X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk, (CPU_FEATURE_USABLE (AVX512VL) && CPU_FEATURE_USABLE (AVX512BW) && CPU_FEATURE_USABLE (BMI2)), - __wmemset_chk_avx512_unaligned)) + __wmemset_chk_avx512_unaligned) + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, + CPU_FEATURE_USABLE (AVX2), + __wmemset_chk_avx2_unaligned) + X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wmemset_chk_avx2_unaligned_rtm) + /* Can be lowered to V1 if a V2 implementation is added. */ + X86_IFUNC_IMPL_ADD_V2 (array, i, __wmemset_chk, 1, + __wmemset_chk_sse2_unaligned)) #endif return 0; diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h index 64d179913c..ed514976aa 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memset.h +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h @@ -20,10 +20,19 @@ #include <init-arch.h> extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) + +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) + attribute_hidden; + +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) attribute_hidden; + extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms) attribute_hidden; @@ -31,31 +40,26 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) - attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) + +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper) +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { - const struct cpu_features* cpu_features = __get_cpu_features (); + const struct cpu_features *cpu_features = __get_cpu_features (); if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS)) return OPTIMIZE (erms); - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F) && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (avx512_unaligned_erms); @@ -66,11 +70,11 @@ IFUNC_SELECTOR (void) return OPTIMIZE (avx512_no_vzeroupper); } - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) - && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (evex_unaligned_erms); @@ -86,7 +90,8 @@ IFUNC_SELECTOR (void) return OPTIMIZE (avx2_unaligned_rtm); } - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, + Prefer_No_VZEROUPPER, !)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (avx2_unaligned_erms); diff --git a/sysdeps/x86_64/multiarch/ifunc-wmemset.h b/sysdeps/x86_64/multiarch/ifunc-wmemset.h index 87c48e2387..3810c719c6 100644 --- a/sysdeps/x86_64/multiarch/ifunc-wmemset.h +++ b/sysdeps/x86_64/multiarch/ifunc-wmemset.h @@ -18,22 +18,26 @@ #include <init-arch.h> -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; + +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; + extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; + +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { - const struct cpu_features* cpu_features = __get_cpu_features (); + const struct cpu_features *cpu_features = __get_cpu_features (); - if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) - && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2) + && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, + AVX_Fast_Unaligned_Load, !)) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) { if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) return OPTIMIZE (avx512_unaligned); @@ -44,7 +48,8 @@ IFUNC_SELECTOR (void) if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) return OPTIMIZE (avx2_unaligned_rtm); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features, + Prefer_No_VZEROUPPER, !)) return OPTIMIZE (avx2_unaligned); } diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S index c0bf2875d0..a9054a9122 100644 --- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S @@ -1,4 +1,7 @@ -#if IS_IN (libc) +#include <isa-level.h> + +#if ISA_SHOULD_BUILD (3) + # define USE_WITH_AVX2 1 # define VEC_SIZE 32 diff --git a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S index c5be8f57ef..8cc9c16d73 100644 --- a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S +++ b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S @@ -17,8 +17,10 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> +#include <isa-level.h> + +#if ISA_SHOULD_BUILD (4) -#if IS_IN (libc) #include "asm-syntax.h" #ifndef MEMSET diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S index 5241216a77..47623b8ee8 100644 --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S @@ -1,4 +1,7 @@ -#if IS_IN (libc) +#include <isa-level.h> + +#if ISA_SHOULD_BUILD (4) + # define USE_WITH_AVX512 1 # define VEC_SIZE 64 @@ -30,8 +33,15 @@ # define WMEMSET_VDUP_TO_VEC0_LOW() # define SECTION(p) p##.evex512 + +#ifndef MEMSET_SYMBOL # define MEMSET_SYMBOL(p,s) p##_avx512_##s +#endif +#ifndef WMEMSET_SYMBOL # define WMEMSET_SYMBOL(p,s) p##_avx512_##s +#endif + + # define USE_LESS_VEC_MASK_STORE 1 # include "memset-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memset-erms.S b/sysdeps/x86_64/multiarch/memset-erms.S new file mode 100644 index 0000000000..1fce0c9fcc --- /dev/null +++ b/sysdeps/x86_64/multiarch/memset-erms.S @@ -0,0 +1,25 @@ +#include <sysdep.h> + +#if defined USE_MULTIARCH && IS_IN (libc) + .text +ENTRY (__memset_chk_erms) + cmp %RDX_LP, %RCX_LP + jb HIDDEN_JUMPTARGET (__chk_fail) +END (__memset_chk_erms) + +/* Only used to measure performance of REP STOSB. */ +ENTRY (__memset_erms) + /* Skip zero length. */ + test %RDX_LP, %RDX_LP + jz L(stosb_return_zero) + mov %RDX_LP, %RCX_LP + movzbl %sil, %eax + mov %RDI_LP, %RDX_LP + rep stosb + mov %RDX_LP, %RAX_LP + ret +L(stosb_return_zero): + movq %rdi, %rax + ret +END (__memset_erms) +#endif diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S index 6370021506..ac4b2d2d50 100644 --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S @@ -1,4 +1,7 @@ -#if IS_IN (libc) +#include <isa-level.h> + +#if ISA_SHOULD_BUILD (4) + # define USE_WITH_EVEX 1 # define VEC_SIZE 32 @@ -30,8 +33,15 @@ # define WMEMSET_VDUP_TO_VEC0_LOW() # define SECTION(p) p##.evex + +#ifndef MEMSET_SYMBOL # define MEMSET_SYMBOL(p,s) p##_evex_##s +#endif +#ifndef WMEMSET_SYMBOL # define WMEMSET_SYMBOL(p,s) p##_evex_##s +#endif + + # define USE_LESS_VEC_MASK_STORE 1 # include "memset-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S index 3d92f6993a..44f9b8888b 100644 --- a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S @@ -17,22 +17,51 @@ License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ -#include <sysdep.h> -#include <shlib-compat.h> +#include <isa-level.h> -#if IS_IN (libc) -# define MEMSET_SYMBOL(p,s) p##_sse2_##s -# define WMEMSET_SYMBOL(p,s) p##_sse2_##s +/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation + so we need this to build for ISA V2 builds. */ +#if ISA_SHOULD_BUILD (2) -# ifdef SHARED -# undef libc_hidden_builtin_def -# define libc_hidden_builtin_def(name) +# include <sysdep.h> +# define USE_WITH_SSE2 1 + +# define VEC_SIZE 16 +# define MOV_SIZE 3 +# define RET_SIZE 1 + +# define VEC(i) xmm##i +# define VMOVU movups +# define VMOVA movaps + +# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ + movd d, %xmm0; \ + movq r, %rax; \ + punpcklbw %xmm0, %xmm0; \ + punpcklwd %xmm0, %xmm0; \ + pshufd $0, %xmm0, %xmm0 + +# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \ + movd d, %xmm0; \ + pshufd $0, %xmm0, %xmm0; \ + movq r, %rax + +# define MEMSET_VDUP_TO_VEC0_HIGH() +# define MEMSET_VDUP_TO_VEC0_LOW() + +# define WMEMSET_VDUP_TO_VEC0_HIGH() +# define WMEMSET_VDUP_TO_VEC0_LOW() + +# define SECTION(p) p + +# ifndef MEMSET_SYMBOL +# define MEMSET_SYMBOL(p,s) p##_sse2_##s # endif -# undef weak_alias -# define weak_alias(original, alias) -# undef strong_alias -# define strong_alias(ignored1, ignored2) -#endif +# ifndef WMEMSET_SYMBOL +# define WMEMSET_SYMBOL(p,s) p##_sse2_##s +# endif + +# include "memset-vec-unaligned-erms.S" -#include <sysdeps/x86_64/memset.S> +#endif diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index d98c613651..905d0fa464 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -430,26 +430,3 @@ L(between_2_3): #endif ret END (MEMSET_SYMBOL (__memset, unaligned_erms)) - -#if defined USE_MULTIARCH && IS_IN (libc) && VEC_SIZE == 16 -ENTRY (__memset_chk_erms) - cmp %RDX_LP, %RCX_LP - jb HIDDEN_JUMPTARGET (__chk_fail) -END (__memset_chk_erms) - -/* Only used to measure performance of REP STOSB. */ -ENTRY (__memset_erms) - /* Skip zero length. */ - test %RDX_LP, %RDX_LP - jz L(stosb_return_zero) - mov %RDX_LP, %RCX_LP - movzbl %sil, %eax - mov %RDI_LP, %RDX_LP - rep stosb - mov %RDX_LP, %RAX_LP - ret -L(stosb_return_zero): - movq %rdi, %rax - ret -END (__memset_erms) -#endif diff --git a/sysdeps/x86_64/multiarch/rtld-memset.S b/sysdeps/x86_64/multiarch/rtld-memset.S new file mode 100644 index 0000000000..d912bfa7cc --- /dev/null +++ b/sysdeps/x86_64/multiarch/rtld-memset.S @@ -0,0 +1,18 @@ +/* Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#include "../memset.S"