Message ID | 20220615035709.609517-1-goldstein.w.n@gmail.com |
---|---|
State | New |
Headers | show |
Series | [v2] x86: Cleanup bounds checking in large memcpy case | expand |
On Tue, Jun 14, 2022 at 8:57 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x). > Previously was using `__x86_rep_movsb_threshold` and should > have been using `__x86_shared_non_temporal_threshold`. > > 2. Avoid reloading __x86_shared_non_temporal_threshold before > the L(large_memcpy_4x) bounds check. > > 3. Document the second bounds check for L(large_memcpy_4x) > more clearly. > --- > manual/tunables.texi | 2 +- > sysdeps/x86/dl-cacheinfo.h | 2 +- > .../multiarch/memmove-vec-unaligned-erms.S | 29 ++++++++++++++----- > 3 files changed, 23 insertions(+), 10 deletions(-) > > diff --git a/manual/tunables.texi b/manual/tunables.texi > index 1482412078..49daf3eb4a 100644 > --- a/manual/tunables.texi > +++ b/manual/tunables.texi > @@ -47,7 +47,7 @@ glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff) > glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647) > glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff) > glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff) > -glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff) > +glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff) > glibc.cpu.x86_shstk: > glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff) > glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647) > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h > index cc3b840f9c..858ff8a135 100644 > --- a/sysdeps/x86/dl-cacheinfo.h > +++ b/sysdeps/x86/dl-cacheinfo.h > @@ -932,7 +932,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) > TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX); > TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX); > TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold, > - 0, SIZE_MAX); > + 0, SIZE_MAX >> 4); Please add a comment to describe where >> 4 comes from. > TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold, > minimum_rep_movsb_threshold, SIZE_MAX); > TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1, > diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > index af51177d5d..d1518b8bab 100644 > --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > @@ -118,7 +118,13 @@ > # define LARGE_LOAD_SIZE (VEC_SIZE * 4) > #endif > > -/* Amount to shift rdx by to compare for memcpy_large_4x. */ > +/* Amount to shift __x86_shared_non_temporal_threshold by for > + bound for memcpy_large_4x. This is essentially use to to > + indicate that the copy is far beyond the scope of L3 > + (assuming no user config x86_non_temporal_threshold) and to > + use a more aggressively unrolled loop. NB: before > + increasing the value also update initialization of > + x86_non_temporal_threshold. */ > #ifndef LOG_4X_MEMCPY_THRESH > # define LOG_4X_MEMCPY_THRESH 4 > #endif > @@ -724,9 +730,14 @@ L(skip_short_movsb_check): > .p2align 4,, 10 > #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc) > L(large_memcpy_2x_check): > - cmp __x86_rep_movsb_threshold(%rip), %RDX_LP > - jb L(more_8x_vec_check) > + /* Entry from L(large_memcpy_2x) has a redundant load of > + __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x) > + is only use for the non-erms memmove which is generally less > + common. */ > L(large_memcpy_2x): > + mov __x86_shared_non_temporal_threshold(%rip), %R11_LP > + cmp %R11_LP, %RDX_LP > + jb L(more_8x_vec_check) > /* To reach this point it is impossible for dst > src and > overlap. Remaining to check is src > dst and overlap. rcx > already contains dst - src. Negate rcx to get src - dst. If > @@ -774,18 +785,21 @@ L(large_memcpy_2x): > /* ecx contains -(dst - src). not ecx will return dst - src - 1 > which works for testing aliasing. */ > notl %ecx > + movq %rdx, %r10 > testl $(PAGE_SIZE - VEC_SIZE * 8), %ecx > jz L(large_memcpy_4x) > > - movq %rdx, %r10 > - shrq $LOG_4X_MEMCPY_THRESH, %r10 > - cmp __x86_shared_non_temporal_threshold(%rip), %r10 > + /* r11 has __x86_shared_non_temporal_threshold. Shift it left > + by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold. > + */ > + shlq $LOG_4X_MEMCPY_THRESH, %r11 > + cmp %r11, %rdx > jae L(large_memcpy_4x) > > /* edx will store remainder size for copying tail. */ > andl $(PAGE_SIZE * 2 - 1), %edx > /* r10 stores outer loop counter. */ > - shrq $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10 > + shrq $(LOG_PAGE_SIZE + 1), %r10 > /* Copy 4x VEC at a time from 2 pages. */ > .p2align 4 > L(loop_large_memcpy_2x_outer): > @@ -850,7 +864,6 @@ L(large_memcpy_2x_end): > > .p2align 4 > L(large_memcpy_4x): > - movq %rdx, %r10 > /* edx will store remainder size for copying tail. */ > andl $(PAGE_SIZE * 4 - 1), %edx > /* r10 stores outer loop counter. */ > -- > 2.34.1 >
On Wed, Jun 15, 2022 at 7:52 AM H.J. Lu <hjl.tools@gmail.com> wrote: > > On Tue, Jun 14, 2022 at 8:57 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote: > > > > 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x). > > Previously was using `__x86_rep_movsb_threshold` and should > > have been using `__x86_shared_non_temporal_threshold`. > > > > 2. Avoid reloading __x86_shared_non_temporal_threshold before > > the L(large_memcpy_4x) bounds check. > > > > 3. Document the second bounds check for L(large_memcpy_4x) > > more clearly. > > --- > > manual/tunables.texi | 2 +- > > sysdeps/x86/dl-cacheinfo.h | 2 +- > > .../multiarch/memmove-vec-unaligned-erms.S | 29 ++++++++++++++----- > > 3 files changed, 23 insertions(+), 10 deletions(-) > > > > diff --git a/manual/tunables.texi b/manual/tunables.texi > > index 1482412078..49daf3eb4a 100644 > > --- a/manual/tunables.texi > > +++ b/manual/tunables.texi > > @@ -47,7 +47,7 @@ glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff) > > glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647) > > glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff) > > glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff) > > -glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff) > > +glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff) > > glibc.cpu.x86_shstk: > > glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff) > > glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647) > > diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h > > index cc3b840f9c..858ff8a135 100644 > > --- a/sysdeps/x86/dl-cacheinfo.h > > +++ b/sysdeps/x86/dl-cacheinfo.h > > @@ -932,7 +932,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) > > TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX); > > TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX); > > TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold, > > - 0, SIZE_MAX); > > + 0, SIZE_MAX >> 4); > > Please add a comment to describe where >> 4 comes from. Fixed in V3. > > > TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold, > > minimum_rep_movsb_threshold, SIZE_MAX); > > TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1, > > diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > > index af51177d5d..d1518b8bab 100644 > > --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S > > @@ -118,7 +118,13 @@ > > # define LARGE_LOAD_SIZE (VEC_SIZE * 4) > > #endif > > > > -/* Amount to shift rdx by to compare for memcpy_large_4x. */ > > +/* Amount to shift __x86_shared_non_temporal_threshold by for > > + bound for memcpy_large_4x. This is essentially use to to > > + indicate that the copy is far beyond the scope of L3 > > + (assuming no user config x86_non_temporal_threshold) and to > > + use a more aggressively unrolled loop. NB: before > > + increasing the value also update initialization of > > + x86_non_temporal_threshold. */ > > #ifndef LOG_4X_MEMCPY_THRESH > > # define LOG_4X_MEMCPY_THRESH 4 > > #endif > > @@ -724,9 +730,14 @@ L(skip_short_movsb_check): > > .p2align 4,, 10 > > #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc) > > L(large_memcpy_2x_check): > > - cmp __x86_rep_movsb_threshold(%rip), %RDX_LP > > - jb L(more_8x_vec_check) > > + /* Entry from L(large_memcpy_2x) has a redundant load of > > + __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x) > > + is only use for the non-erms memmove which is generally less > > + common. */ > > L(large_memcpy_2x): > > + mov __x86_shared_non_temporal_threshold(%rip), %R11_LP > > + cmp %R11_LP, %RDX_LP > > + jb L(more_8x_vec_check) > > /* To reach this point it is impossible for dst > src and > > overlap. Remaining to check is src > dst and overlap. rcx > > already contains dst - src. Negate rcx to get src - dst. If > > @@ -774,18 +785,21 @@ L(large_memcpy_2x): > > /* ecx contains -(dst - src). not ecx will return dst - src - 1 > > which works for testing aliasing. */ > > notl %ecx > > + movq %rdx, %r10 > > testl $(PAGE_SIZE - VEC_SIZE * 8), %ecx > > jz L(large_memcpy_4x) > > > > - movq %rdx, %r10 > > - shrq $LOG_4X_MEMCPY_THRESH, %r10 > > - cmp __x86_shared_non_temporal_threshold(%rip), %r10 > > + /* r11 has __x86_shared_non_temporal_threshold. Shift it left > > + by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold. > > + */ > > + shlq $LOG_4X_MEMCPY_THRESH, %r11 > > + cmp %r11, %rdx > > jae L(large_memcpy_4x) > > > > /* edx will store remainder size for copying tail. */ > > andl $(PAGE_SIZE * 2 - 1), %edx > > /* r10 stores outer loop counter. */ > > - shrq $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10 > > + shrq $(LOG_PAGE_SIZE + 1), %r10 > > /* Copy 4x VEC at a time from 2 pages. */ > > .p2align 4 > > L(loop_large_memcpy_2x_outer): > > @@ -850,7 +864,6 @@ L(large_memcpy_2x_end): > > > > .p2align 4 > > L(large_memcpy_4x): > > - movq %rdx, %r10 > > /* edx will store remainder size for copying tail. */ > > andl $(PAGE_SIZE * 4 - 1), %edx > > /* r10 stores outer loop counter. */ > > -- > > 2.34.1 > > > > > -- > H.J.
diff --git a/manual/tunables.texi b/manual/tunables.texi index 1482412078..49daf3eb4a 100644 --- a/manual/tunables.texi +++ b/manual/tunables.texi @@ -47,7 +47,7 @@ glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff) glibc.elision.skip_lock_busy: 3 (min: -2147483648, max: 2147483647) glibc.malloc.top_pad: 0x0 (min: 0x0, max: 0xffffffffffffffff) glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff) -glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0xffffffffffffffff) +glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x0, max: 0x0fffffffffffffff) glibc.cpu.x86_shstk: glibc.cpu.hwcap_mask: 0x6 (min: 0x0, max: 0xffffffffffffffff) glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647) diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h index cc3b840f9c..858ff8a135 100644 --- a/sysdeps/x86/dl-cacheinfo.h +++ b/sysdeps/x86/dl-cacheinfo.h @@ -932,7 +932,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX); TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX); TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold, - 0, SIZE_MAX); + 0, SIZE_MAX >> 4); TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold, minimum_rep_movsb_threshold, SIZE_MAX); TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1, diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index af51177d5d..d1518b8bab 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -118,7 +118,13 @@ # define LARGE_LOAD_SIZE (VEC_SIZE * 4) #endif -/* Amount to shift rdx by to compare for memcpy_large_4x. */ +/* Amount to shift __x86_shared_non_temporal_threshold by for + bound for memcpy_large_4x. This is essentially use to to + indicate that the copy is far beyond the scope of L3 + (assuming no user config x86_non_temporal_threshold) and to + use a more aggressively unrolled loop. NB: before + increasing the value also update initialization of + x86_non_temporal_threshold. */ #ifndef LOG_4X_MEMCPY_THRESH # define LOG_4X_MEMCPY_THRESH 4 #endif @@ -724,9 +730,14 @@ L(skip_short_movsb_check): .p2align 4,, 10 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc) L(large_memcpy_2x_check): - cmp __x86_rep_movsb_threshold(%rip), %RDX_LP - jb L(more_8x_vec_check) + /* Entry from L(large_memcpy_2x) has a redundant load of + __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x) + is only use for the non-erms memmove which is generally less + common. */ L(large_memcpy_2x): + mov __x86_shared_non_temporal_threshold(%rip), %R11_LP + cmp %R11_LP, %RDX_LP + jb L(more_8x_vec_check) /* To reach this point it is impossible for dst > src and overlap. Remaining to check is src > dst and overlap. rcx already contains dst - src. Negate rcx to get src - dst. If @@ -774,18 +785,21 @@ L(large_memcpy_2x): /* ecx contains -(dst - src). not ecx will return dst - src - 1 which works for testing aliasing. */ notl %ecx + movq %rdx, %r10 testl $(PAGE_SIZE - VEC_SIZE * 8), %ecx jz L(large_memcpy_4x) - movq %rdx, %r10 - shrq $LOG_4X_MEMCPY_THRESH, %r10 - cmp __x86_shared_non_temporal_threshold(%rip), %r10 + /* r11 has __x86_shared_non_temporal_threshold. Shift it left + by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold. + */ + shlq $LOG_4X_MEMCPY_THRESH, %r11 + cmp %r11, %rdx jae L(large_memcpy_4x) /* edx will store remainder size for copying tail. */ andl $(PAGE_SIZE * 2 - 1), %edx /* r10 stores outer loop counter. */ - shrq $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10 + shrq $(LOG_PAGE_SIZE + 1), %r10 /* Copy 4x VEC at a time from 2 pages. */ .p2align 4 L(loop_large_memcpy_2x_outer): @@ -850,7 +864,6 @@ L(large_memcpy_2x_end): .p2align 4 L(large_memcpy_4x): - movq %rdx, %r10 /* edx will store remainder size for copying tail. */ andl $(PAGE_SIZE * 4 - 1), %edx /* r10 stores outer loop counter. */