Message ID | 20210419163025.2285675-1-goldstein.w.n@gmail.com |
---|---|
State | New |
Headers | show |
Series | [v2,1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S | expand |
On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > No bug. This commit adds optimized cased for less_vec memset case that > uses the avx512vl/avx512bw mask store avoiding the excessive > branches. test-memset and test-wmemset are passing. > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> > --- > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- > .../multiarch/memset-avx512-unaligned-erms.S | 2 +- > .../multiarch/memset-evex-unaligned-erms.S | 2 +- > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- > 4 files changed, 47 insertions(+), 15 deletions(-) > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > index 502f946a84..eda5640541 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > return OPTIMIZE (avx512_unaligned_erms); > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) Please also update ifunc-impl-list.c. > { > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > return OPTIMIZE (evex_unaligned_erms); > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > index 22e7b187c8..d03460be93 100644 > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > @@ -19,6 +19,6 @@ > # define SECTION(p) p##.evex512 > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > - > +# define USE_LESS_VEC_MASKMOV 1 USE_LESS_VEC_MASKED_STORE > # include "memset-vec-unaligned-erms.S" > #endif > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > index ae0a4d6e46..eb3541ef60 100644 > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > @@ -19,6 +19,6 @@ > # define SECTION(p) p##.evex > # define MEMSET_SYMBOL(p,s) p##_evex_##s > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > - > +# define USE_LESS_VEC_MASKMOV 1 > # include "memset-vec-unaligned-erms.S" > #endif > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > index 584747f1a1..6b02e87f48 100644 > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > @@ -63,6 +63,9 @@ > # endif > #endif > > +#define PAGE_SIZE 4096 > +#define LOG_PAGE_SIZE 12 > + > #ifndef SECTION > # error SECTION is not defined! > #endif > @@ -213,11 +216,38 @@ L(loop): > cmpq %rcx, %rdx > jne L(loop) > VZEROUPPER_SHORT_RETURN > + > + .p2align 4 > L(less_vec): > /* Less than 1 VEC. */ > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > # error Unsupported VEC_SIZE! > # endif > +# ifdef USE_LESS_VEC_MASKMOV > + /* Clear high bits from edi. Only keeping bits relevant to page > + cross check. Using sall instead of andl saves 3 bytes. Note > + that we are using rax which is set in > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > + sall $(32 - LOG_PAGE_SIZE), %edi > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious > + performance degradation when it has to fault supress. */ > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi Please use AND and CMP since AND has higher throughput. > + ja L(cross_page) > +# if VEC_SIZE > 32 > + movq $-1, %rcx > + bzhiq %rdx, %rcx, %rcx > + kmovq %rcx, %k1 > +# else > + movl $-1, %ecx > + bzhil %edx, %ecx, %ecx > + kmovd %ecx, %k1 > +# endif > + vmovdqu8 %VEC(0), (%rax) {%k1} > + VZEROUPPER_RETURN > + > + .p2align 4 > +L(cross_page): > +# endif > # if VEC_SIZE > 32 > cmpb $32, %dl > jae L(between_32_63) > @@ -234,36 +264,36 @@ L(less_vec): > cmpb $1, %dl > ja L(between_2_3) > jb 1f > - movb %cl, (%rdi) > + movb %cl, (%rax) > 1: > VZEROUPPER_RETURN > # if VEC_SIZE > 32 > /* From 32 to 63. No branch when size == 32. */ > L(between_32_63): > - VMOVU %YMM0, -32(%rdi,%rdx) > - VMOVU %YMM0, (%rdi) > + VMOVU %YMM0, -32(%rax,%rdx) > + VMOVU %YMM0, (%rax) > VZEROUPPER_RETURN > # endif > # if VEC_SIZE > 16 > /* From 16 to 31. No branch when size == 16. */ > L(between_16_31): > - VMOVU %XMM0, -16(%rdi,%rdx) > - VMOVU %XMM0, (%rdi) > + VMOVU %XMM0, -16(%rax,%rdx) > + VMOVU %XMM0, (%rax) > VZEROUPPER_RETURN > # endif > /* From 8 to 15. No branch when size == 8. */ > L(between_8_15): > - movq %rcx, -8(%rdi,%rdx) > - movq %rcx, (%rdi) > + movq %rcx, -8(%rax,%rdx) > + movq %rcx, (%rax) > VZEROUPPER_RETURN > L(between_4_7): > /* From 4 to 7. No branch when size == 4. */ > - movl %ecx, -4(%rdi,%rdx) > - movl %ecx, (%rdi) > + movl %ecx, -4(%rax,%rdx) > + movl %ecx, (%rax) > VZEROUPPER_RETURN > L(between_2_3): > /* From 2 to 3. No branch when size == 2. */ > - movw %cx, -2(%rdi,%rdx) > - movw %cx, (%rdi) > + movw %cx, -2(%rax,%rdx) > + movw %cx, (%rax) > VZEROUPPER_RETURN > END (MEMSET_SYMBOL (__memset, unaligned_erms)) > -- > 2.29.2 > Thanks.
On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote: > > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > No bug. This commit adds optimized cased for less_vec memset case that > > uses the avx512vl/avx512bw mask store avoiding the excessive > > branches. test-memset and test-wmemset are passing. > > > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> > > --- > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +- > > .../multiarch/memset-evex-unaligned-erms.S | 2 +- > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- > > 4 files changed, 47 insertions(+), 15 deletions(-) > > > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > > index 502f946a84..eda5640541 100644 > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (avx512_unaligned_erms); > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > Please also update ifunc-impl-list.c. Done. > > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (evex_unaligned_erms); > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > index 22e7b187c8..d03460be93 100644 > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > @@ -19,6 +19,6 @@ > > # define SECTION(p) p##.evex512 > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > > - > > +# define USE_LESS_VEC_MASKMOV 1 > > USE_LESS_VEC_MASKED_STORE Done. > > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > index ae0a4d6e46..eb3541ef60 100644 > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > @@ -19,6 +19,6 @@ > > # define SECTION(p) p##.evex > > # define MEMSET_SYMBOL(p,s) p##_evex_##s > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > > - > > +# define USE_LESS_VEC_MASKMOV 1 > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > index 584747f1a1..6b02e87f48 100644 > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > @@ -63,6 +63,9 @@ > > # endif > > #endif > > > > +#define PAGE_SIZE 4096 > > +#define LOG_PAGE_SIZE 12 > > + > > #ifndef SECTION > > # error SECTION is not defined! > > #endif > > @@ -213,11 +216,38 @@ L(loop): > > cmpq %rcx, %rdx > > jne L(loop) > > VZEROUPPER_SHORT_RETURN > > + > > + .p2align 4 > > L(less_vec): > > /* Less than 1 VEC. */ > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > > # error Unsupported VEC_SIZE! > > # endif > > +# ifdef USE_LESS_VEC_MASKMOV > > + /* Clear high bits from edi. Only keeping bits relevant to page > > + cross check. Using sall instead of andl saves 3 bytes. Note > > + that we are using rax which is set in > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > > + sall $(32 - LOG_PAGE_SIZE), %edi > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious > > + performance degradation when it has to fault supress. */ > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi > > Please use AND and CMP since AND has higher throughput. AND uses more code size for VEC_SIZE=16/32 and just barely pushes the L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND end up costing 16 bytes. Not aligning L(cross_page) to 16 also introduces higher variance to benchmarks so I think it has to be all 16 bytes. As is I don't think throughput of AND / SAL is on the critical path so code size should win out. (We can also decode MOV -1, ecx first cycle with SAL as opposed to AND). What do you think? > > > + ja L(cross_page) > > +# if VEC_SIZE > 32 > > + movq $-1, %rcx > > + bzhiq %rdx, %rcx, %rcx > > + kmovq %rcx, %k1 > > +# else > > + movl $-1, %ecx > > + bzhil %edx, %ecx, %ecx > > + kmovd %ecx, %k1 > > +# endif > > + vmovdqu8 %VEC(0), (%rax) {%k1} > > + VZEROUPPER_RETURN > > + > > + .p2align 4 > > +L(cross_page): > > +# endif > > # if VEC_SIZE > 32 > > cmpb $32, %dl > > jae L(between_32_63) > > @@ -234,36 +264,36 @@ L(less_vec): > > cmpb $1, %dl > > ja L(between_2_3) > > jb 1f > > - movb %cl, (%rdi) > > + movb %cl, (%rax) > > 1: > > VZEROUPPER_RETURN > > # if VEC_SIZE > 32 > > /* From 32 to 63. No branch when size == 32. */ > > L(between_32_63): > > - VMOVU %YMM0, -32(%rdi,%rdx) > > - VMOVU %YMM0, (%rdi) > > + VMOVU %YMM0, -32(%rax,%rdx) > > + VMOVU %YMM0, (%rax) > > VZEROUPPER_RETURN > > # endif > > # if VEC_SIZE > 16 > > /* From 16 to 31. No branch when size == 16. */ > > L(between_16_31): > > - VMOVU %XMM0, -16(%rdi,%rdx) > > - VMOVU %XMM0, (%rdi) > > + VMOVU %XMM0, -16(%rax,%rdx) > > + VMOVU %XMM0, (%rax) > > VZEROUPPER_RETURN > > # endif > > /* From 8 to 15. No branch when size == 8. */ > > L(between_8_15): > > - movq %rcx, -8(%rdi,%rdx) > > - movq %rcx, (%rdi) > > + movq %rcx, -8(%rax,%rdx) > > + movq %rcx, (%rax) > > VZEROUPPER_RETURN > > L(between_4_7): > > /* From 4 to 7. No branch when size == 4. */ > > - movl %ecx, -4(%rdi,%rdx) > > - movl %ecx, (%rdi) > > + movl %ecx, -4(%rax,%rdx) > > + movl %ecx, (%rax) > > VZEROUPPER_RETURN > > L(between_2_3): > > /* From 2 to 3. No branch when size == 2. */ > > - movw %cx, -2(%rdi,%rdx) > > - movw %cx, (%rdi) > > + movw %cx, -2(%rax,%rdx) > > + movw %cx, (%rax) > > VZEROUPPER_RETURN > > END (MEMSET_SYMBOL (__memset, unaligned_erms)) > > -- > > 2.29.2 > > > > Thanks. > > -- > H.J.
On Mon, Apr 19, 2021 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote: > > > > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > > > No bug. This commit adds optimized cased for less_vec memset case that > > > uses the avx512vl/avx512bw mask store avoiding the excessive > > > branches. test-memset and test-wmemset are passing. > > > > > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> > > > --- > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- > > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +- > > > .../multiarch/memset-evex-unaligned-erms.S | 2 +- > > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- > > > 4 files changed, 47 insertions(+), 15 deletions(-) > > > > > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > > > index 502f946a84..eda5640541 100644 > > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) > > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > > { > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > > { > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > > return OPTIMIZE (avx512_unaligned_erms); > > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > > { > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > > > Please also update ifunc-impl-list.c. > > Done. > > > > > > { > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > > return OPTIMIZE (evex_unaligned_erms); > > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > index 22e7b187c8..d03460be93 100644 > > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > @@ -19,6 +19,6 @@ > > > # define SECTION(p) p##.evex512 > > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > > > - > > > +# define USE_LESS_VEC_MASKMOV 1 > > > > USE_LESS_VEC_MASKED_STORE > > Done. > > > > > > # include "memset-vec-unaligned-erms.S" > > > #endif > > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > index ae0a4d6e46..eb3541ef60 100644 > > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > @@ -19,6 +19,6 @@ > > > # define SECTION(p) p##.evex > > > # define MEMSET_SYMBOL(p,s) p##_evex_##s > > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > > > - > > > +# define USE_LESS_VEC_MASKMOV 1 > > > # include "memset-vec-unaligned-erms.S" > > > #endif > > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > index 584747f1a1..6b02e87f48 100644 > > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > @@ -63,6 +63,9 @@ > > > # endif > > > #endif > > > > > > +#define PAGE_SIZE 4096 > > > +#define LOG_PAGE_SIZE 12 > > > + > > > #ifndef SECTION > > > # error SECTION is not defined! > > > #endif > > > @@ -213,11 +216,38 @@ L(loop): > > > cmpq %rcx, %rdx > > > jne L(loop) > > > VZEROUPPER_SHORT_RETURN > > > + > > > + .p2align 4 > > > L(less_vec): > > > /* Less than 1 VEC. */ > > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > > > # error Unsupported VEC_SIZE! > > > # endif > > > +# ifdef USE_LESS_VEC_MASKMOV > > > + /* Clear high bits from edi. Only keeping bits relevant to page > > > + cross check. Using sall instead of andl saves 3 bytes. Note > > > + that we are using rax which is set in > > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > > > + sall $(32 - LOG_PAGE_SIZE), %edi > > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious > > > + performance degradation when it has to fault supress. */ > > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi > > > > Please use AND and CMP since AND has higher throughput. > > AND uses more code size for VEC_SIZE=16/32 and just barely pushes the > L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND > end up costing 16 bytes. Not aligning L(cross_page) to 16 also > introduces higher variance to benchmarks so I think it has to be all 16 bytes. > > As is I don't think throughput of AND / SAL is on the critical > path so code size should win out. (We can also decode MOV -1, ecx > first cycle with SAL as opposed to AND). > > What do you think? I prefer AND over SAL. Something like diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index 3a59d39267..763fb907b9 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -217,21 +217,17 @@ L(loop): jne L(loop) VZEROUPPER_SHORT_RETURN - .p2align 4 + /* NB: Don't align this branch target to reduce code size. */ L(less_vec): /* Less than 1 VEC. */ # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 # error Unsupported VEC_SIZE! # endif # ifdef USE_LESS_VEC_MASK_STORE - /* Clear high bits from edi. Only keeping bits relevant to page - cross check. Using sall instead of andl saves 3 bytes. Note - that we are using rax which is set in - MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ - sall $(32 - LOG_PAGE_SIZE), %edi - /* Check if VEC_SIZE load cross page. Mask loads suffer serious + /* Check if VEC_SIZE store cross page. Mask stores suffer serious performance degradation when it has to fault supress. */ - cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi + andl $(PAGE_SIZE - 1), %edi + cmpl $(PAGE_SIZE - VEC_SIZE), %edi ja L(cross_page) # if VEC_SIZE > 32 movq $-1, %rcx Thanks.
On Mon, Apr 19, 2021 at 1:39 PM H.J. Lu <hjl.tools@gmail.com> wrote: > > On Mon, Apr 19, 2021 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote: > > > > > > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > > > > > No bug. This commit adds optimized cased for less_vec memset case that > > > > uses the avx512vl/avx512bw mask store avoiding the excessive > > > > branches. test-memset and test-wmemset are passing. > > > > > > > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> > > > > --- > > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- > > > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +- > > > > .../multiarch/memset-evex-unaligned-erms.S | 2 +- > > > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- > > > > 4 files changed, 47 insertions(+), 15 deletions(-) > > > > > > > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > > > > index 502f946a84..eda5640541 100644 > > > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > > > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > > > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) > > > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > > > { > > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > > > { > > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > > > return OPTIMIZE (avx512_unaligned_erms); > > > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) > > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > > > { > > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > > > > > Please also update ifunc-impl-list.c. > > > > Done. > > > > > > > > > { > > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > > > return OPTIMIZE (evex_unaligned_erms); > > > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > > index 22e7b187c8..d03460be93 100644 > > > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > > > @@ -19,6 +19,6 @@ > > > > # define SECTION(p) p##.evex512 > > > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > > > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > > > > - > > > > +# define USE_LESS_VEC_MASKMOV 1 > > > > > > USE_LESS_VEC_MASKED_STORE > > > > Done. > > > > > > > > > # include "memset-vec-unaligned-erms.S" > > > > #endif > > > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > > index ae0a4d6e46..eb3541ef60 100644 > > > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > > > @@ -19,6 +19,6 @@ > > > > # define SECTION(p) p##.evex > > > > # define MEMSET_SYMBOL(p,s) p##_evex_##s > > > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > > > > - > > > > +# define USE_LESS_VEC_MASKMOV 1 > > > > # include "memset-vec-unaligned-erms.S" > > > > #endif > > > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > > index 584747f1a1..6b02e87f48 100644 > > > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > > > @@ -63,6 +63,9 @@ > > > > # endif > > > > #endif > > > > > > > > +#define PAGE_SIZE 4096 > > > > +#define LOG_PAGE_SIZE 12 > > > > + > > > > #ifndef SECTION > > > > # error SECTION is not defined! > > > > #endif > > > > @@ -213,11 +216,38 @@ L(loop): > > > > cmpq %rcx, %rdx > > > > jne L(loop) > > > > VZEROUPPER_SHORT_RETURN > > > > + > > > > + .p2align 4 > > > > L(less_vec): > > > > /* Less than 1 VEC. */ > > > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > > > > # error Unsupported VEC_SIZE! > > > > # endif > > > > +# ifdef USE_LESS_VEC_MASKMOV > > > > + /* Clear high bits from edi. Only keeping bits relevant to page > > > > + cross check. Using sall instead of andl saves 3 bytes. Note > > > > + that we are using rax which is set in > > > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > > > > + sall $(32 - LOG_PAGE_SIZE), %edi > > > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious > > > > + performance degradation when it has to fault supress. */ > > > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi > > > > > > Please use AND and CMP since AND has higher throughput. > > > > AND uses more code size for VEC_SIZE=16/32 and just barely pushes the > > L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND > > end up costing 16 bytes. Not aligning L(cross_page) to 16 also > > introduces higher variance to benchmarks so I think it has to be all 16 bytes. > > > > As is I don't think throughput of AND / SAL is on the critical > > path so code size should win out. (We can also decode MOV -1, ecx > > first cycle with SAL as opposed to AND). > > > > What do you think? > > I prefer AND over SAL. Something like > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > index 3a59d39267..763fb907b9 100644 > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > @@ -217,21 +217,17 @@ L(loop): > jne L(loop) > VZEROUPPER_SHORT_RETURN > > - .p2align 4 > + /* NB: Don't align this branch target to reduce code size. */ Not aligning this branch can harm performance. Median stays about the same but variance / geomean go up. > L(less_vec): > /* Less than 1 VEC. */ > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > # error Unsupported VEC_SIZE! > # endif > # ifdef USE_LESS_VEC_MASK_STORE > - /* Clear high bits from edi. Only keeping bits relevant to page > - cross check. Using sall instead of andl saves 3 bytes. Note > - that we are using rax which is set in > - MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > - sall $(32 - LOG_PAGE_SIZE), %edi > - /* Check if VEC_SIZE load cross page. Mask loads suffer serious > + /* Check if VEC_SIZE store cross page. Mask stores suffer serious > performance degradation when it has to fault supress. */ > - cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi > + andl $(PAGE_SIZE - 1), %edi > + cmpl $(PAGE_SIZE - VEC_SIZE), %edi > ja L(cross_page) > # if VEC_SIZE > 32 > movq $-1, %rcx > > Thanks. > > -- > H.J.
diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h index 502f946a84..eda5640541 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memset.h +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) { if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (avx512_unaligned_erms); @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) { if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) { if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) return OPTIMIZE (evex_unaligned_erms); diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S index 22e7b187c8..d03460be93 100644 --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S @@ -19,6 +19,6 @@ # define SECTION(p) p##.evex512 # define MEMSET_SYMBOL(p,s) p##_avx512_##s # define WMEMSET_SYMBOL(p,s) p##_avx512_##s - +# define USE_LESS_VEC_MASKMOV 1 # include "memset-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S index ae0a4d6e46..eb3541ef60 100644 --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S @@ -19,6 +19,6 @@ # define SECTION(p) p##.evex # define MEMSET_SYMBOL(p,s) p##_evex_##s # define WMEMSET_SYMBOL(p,s) p##_evex_##s - +# define USE_LESS_VEC_MASKMOV 1 # include "memset-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index 584747f1a1..6b02e87f48 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -63,6 +63,9 @@ # endif #endif +#define PAGE_SIZE 4096 +#define LOG_PAGE_SIZE 12 + #ifndef SECTION # error SECTION is not defined! #endif @@ -213,11 +216,38 @@ L(loop): cmpq %rcx, %rdx jne L(loop) VZEROUPPER_SHORT_RETURN + + .p2align 4 L(less_vec): /* Less than 1 VEC. */ # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 # error Unsupported VEC_SIZE! # endif +# ifdef USE_LESS_VEC_MASKMOV + /* Clear high bits from edi. Only keeping bits relevant to page + cross check. Using sall instead of andl saves 3 bytes. Note + that we are using rax which is set in + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ + sall $(32 - LOG_PAGE_SIZE), %edi + /* Check if VEC_SIZE load cross page. Mask loads suffer serious + performance degradation when it has to fault supress. */ + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi + ja L(cross_page) +# if VEC_SIZE > 32 + movq $-1, %rcx + bzhiq %rdx, %rcx, %rcx + kmovq %rcx, %k1 +# else + movl $-1, %ecx + bzhil %edx, %ecx, %ecx + kmovd %ecx, %k1 +# endif + vmovdqu8 %VEC(0), (%rax) {%k1} + VZEROUPPER_RETURN + + .p2align 4 +L(cross_page): +# endif # if VEC_SIZE > 32 cmpb $32, %dl jae L(between_32_63) @@ -234,36 +264,36 @@ L(less_vec): cmpb $1, %dl ja L(between_2_3) jb 1f - movb %cl, (%rdi) + movb %cl, (%rax) 1: VZEROUPPER_RETURN # if VEC_SIZE > 32 /* From 32 to 63. No branch when size == 32. */ L(between_32_63): - VMOVU %YMM0, -32(%rdi,%rdx) - VMOVU %YMM0, (%rdi) + VMOVU %YMM0, -32(%rax,%rdx) + VMOVU %YMM0, (%rax) VZEROUPPER_RETURN # endif # if VEC_SIZE > 16 /* From 16 to 31. No branch when size == 16. */ L(between_16_31): - VMOVU %XMM0, -16(%rdi,%rdx) - VMOVU %XMM0, (%rdi) + VMOVU %XMM0, -16(%rax,%rdx) + VMOVU %XMM0, (%rax) VZEROUPPER_RETURN # endif /* From 8 to 15. No branch when size == 8. */ L(between_8_15): - movq %rcx, -8(%rdi,%rdx) - movq %rcx, (%rdi) + movq %rcx, -8(%rax,%rdx) + movq %rcx, (%rax) VZEROUPPER_RETURN L(between_4_7): /* From 4 to 7. No branch when size == 4. */ - movl %ecx, -4(%rdi,%rdx) - movl %ecx, (%rdi) + movl %ecx, -4(%rax,%rdx) + movl %ecx, (%rax) VZEROUPPER_RETURN L(between_2_3): /* From 2 to 3. No branch when size == 2. */ - movw %cx, -2(%rdi,%rdx) - movw %cx, (%rdi) + movw %cx, -2(%rax,%rdx) + movw %cx, (%rax) VZEROUPPER_RETURN END (MEMSET_SYMBOL (__memset, unaligned_erms))
No bug. This commit adds optimized cased for less_vec memset case that uses the avx512vl/avx512bw mask store avoiding the excessive branches. test-memset and test-wmemset are passing. Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> --- sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- .../multiarch/memset-avx512-unaligned-erms.S | 2 +- .../multiarch/memset-evex-unaligned-erms.S | 2 +- .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- 4 files changed, 47 insertions(+), 15 deletions(-)