diff mbox

[RFC] Imporve 64bit memset performance for Haswell CPU with AVX2 instruction

Message ID 1396596849-21891-1-git-send-email-ling.ma.program@gmail.com
State New
Headers show

Commit Message

Ma Ling April 4, 2014, 7:34 a.m. UTC
From: Ling Ma <ling.ml@alibaba-inc.com>

In this patch we manage to reduce miss branch prediction by 
avoid using branch instructions and force destination to be aligned
with avx instruction. 

The CPU2006 403.gcc benchmark also indicate this patch improves performance
from  25% to 59% compared with original memset implemented by sse2.

				memset-AVX		memset-SSE2		AVX vs SSE2
gcc.166.i		1903840848		2496605698		1.311352102
gcc.200.i		3486726251		4842279301		1.388775302
gcc.cp-decl.i	1735818099		2275969543		1.311179751
gcc.c-typeck.i	9444855501		12128140476		1.284100162
gcc.expr2.i		4959419745		6476071239		1.305812287
gcc.expr.i		3370511367		4422307808		1.312058417
gcc.g23.i		5025610151		6317892864		1.257139466
gcc.s04.i		8495123899		10900655250		1.283166129
gcc.scilab.i	1209228901		1925675294		1.592482029

---
 In this version we removed prefetch and append vmovd.

 ChangeLog                              |   9 ++
 sysdeps/x86_64/multiarch/Makefile      |   4 +-
 sysdeps/x86_64/multiarch/memset-avx2.S | 192 +++++++++++++++++++++++++++++++++
 sysdeps/x86_64/multiarch/memset.S      |  59 ++++++++++
 sysdeps/x86_64/multiarch/memset_chk.S  |  44 ++++++++
 5 files changed, 307 insertions(+), 1 deletion(-)
 create mode 100644 sysdeps/x86_64/multiarch/memset-avx2.S
 create mode 100644 sysdeps/x86_64/multiarch/memset.S
 create mode 100644 sysdeps/x86_64/multiarch/memset_chk.S

Comments

Richard Henderson April 4, 2014, 2:14 p.m. UTC | #1
On 04/04/2014 12:34 AM, ling.ma.program@gmail.com wrote:
> +	cmp	$4096, %rdx
> +	jmp	L(gobble_data)

Typo for ja?


r~
Ma Ling April 4, 2014, 2:50 p.m. UTC | #2
Sorry, I will resend  memset patch  & comparison result soon

Thanks
Ling


2014-04-04 22:14 GMT+08:00, Richard Henderson <rth@twiddle.net>:
> On 04/04/2014 12:34 AM, ling.ma.program@gmail.com wrote:
>> +	cmp	$4096, %rdx
>> +	jmp	L(gobble_data)
>
> Typo for ja?
>
>
> r~
>
Marko Myllynen July 1, 2014, 9:03 a.m. UTC | #3
Hi,

On 2014-04-04 10:34, ling.ma.program@gmail.com wrote:
> From: Ling Ma <ling.ml@alibaba-inc.com>
> 
> In this patch we manage to reduce miss branch prediction by 
> avoid using branch instructions and force destination to be aligned
> with avx instruction. 
> 
> ---
>  In this version we removed prefetch and append vmovd.
> 
>  ChangeLog                              |   9 ++
>  sysdeps/x86_64/multiarch/Makefile      |   4 +-
>  sysdeps/x86_64/multiarch/memset-avx2.S | 192 +++++++++++++++++++++++++++++++++
>  sysdeps/x86_64/multiarch/memset.S      |  59 ++++++++++
>  sysdeps/x86_64/multiarch/memset_chk.S  |  44 ++++++++
>  5 files changed, 307 insertions(+), 1 deletion(-)
>  create mode 100644 sysdeps/x86_64/multiarch/memset-avx2.S
>  create mode 100644 sysdeps/x86_64/multiarch/memset.S
>  create mode 100644 sysdeps/x86_64/multiarch/memset_chk.S
> 
> diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S b/sysdeps/x86_64/multiarch/memset-avx2.S
> new file mode 100644
> index 0000000..08e8ee8
> --- /dev/null
> +++ b/sysdeps/x86_64/multiarch/memset-avx2.S
> @@ -0,0 +1,192 @@
> +/* memset with AVX2
> +   Copyright (C) 2014 Free Software Foundation, Inc.
> +   Contributed by Alibaba Group.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <sysdep.h>
> +
> +#if !defined NOT_IN_libc
> +
> +#include "asm-syntax.h"
> +#ifndef ALIGN
> +# define ALIGN(n)	.p2align n
> +#endif
> +#ifndef MEMSET
> +# define MEMSET	__memset_avx2
> +# define MEMSET_CHK	__memset_chk_avx2
> +#endif
> +
> +	.section .text.avx2,"ax",@progbits
> +#if defined PIC
> +ENTRY (MEMSET_CHK)
> +	cmpq	%rdx, %rcx
> +	jb	HIDDEN_JUMPTARGET (__chk_fail)
> +END (MEMSET_CHK)
> +#endif
> +
> +ENTRY (MEMSET)
> +	vpxor	%xmm0, %xmm0, %xmm0
> +	vmovd %esi, %xmm1
> +	lea	(%rdi, %rdx), %r8
> +	vpshufb	%xmm0, %xmm1, %xmm0
> +	mov	%rdi, %rax
> +	cmp	$256, %rdx
> +	jae	L(256bytesormore)
> +	vmovd %xmm0, %rcx
> +	cmp	$128, %rdx
> +	jb	L(less_128bytes)
> +	vmovups %xmm0, (%rdi)
> +	vmovups %xmm0, 0x10(%rdi)
> +	vmovups %xmm0, 0x20(%rdi)
> +	vmovups %xmm0, 0x30(%rdi)
> +	vmovups %xmm0, 0x40(%rdi)
> +	vmovups %xmm0, 0x50(%rdi)
> +	vmovups %xmm0, 0x60(%rdi)
> +	vmovups %xmm0, 0x70(%rdi)
> +	vmovups %xmm0, -0x80(%r8)
> +	vmovups %xmm0, -0x70(%r8)
> +	vmovups %xmm0, -0x60(%r8)
> +	vmovups %xmm0, -0x50(%r8)
> +	vmovups %xmm0, -0x40(%r8)
> +	vmovups %xmm0, -0x30(%r8)
> +	vmovups %xmm0, -0x20(%r8)
> +	vmovups %xmm0, -0x10(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_128bytes):
> +	cmp	$64, %edx
> +	jb	L(less_64bytes)
> +	vmovups %xmm0, (%rdi)
> +	vmovups %xmm0, 0x10(%rdi)
> +	vmovups %xmm0, 0x20(%rdi)
> +	vmovups %xmm0, 0x30(%rdi)
> +	vmovups %xmm0, -0x40(%r8)
> +	vmovups %xmm0, -0x30(%r8)
> +	vmovups %xmm0, -0x20(%r8)
> +	vmovups %xmm0, -0x10(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_64bytes):
> +	cmp	$32, %edx
> +	jb	L(less_32bytes)
> +	vmovups %xmm0, (%rdi)
> +	vmovups %xmm0, 0x10(%rdi)
> +	vmovups %xmm0, -0x20(%r8)
> +	vmovups %xmm0, -0x10(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_32bytes):
> +	cmp	$16, %edx
> +	jb	L(less_16bytes)
> +	vmovups %xmm0, (%rdi)
> +	vmovups %xmm0, -0x10(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_16bytes):
> +	cmp	$8, %edx
> +	jb	L(less_8bytes)
> +	mov %rcx, (%rdi)
> +	mov %rcx, -0x08(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_8bytes):
> +	cmp	$4, %edx
> +	jb	L(less_4bytes)
> +	mov %ecx, (%rdi)
> +	mov %ecx, -0x04(%r8)
> +	ALIGN(4)
> +L(less_4bytes):
> +	cmp	$2, %edx
> +	jb	L(less_2bytes)
> +	mov	%cx, (%rdi)
> +	mov	%cx, -0x02(%r8)
> +	ret
> +	ALIGN(4)
> +L(less_2bytes):
> +	cmp	$1, %edx
> +	jb	L(less_1bytes)
> +	mov	%cl, (%rdi)
> +L(less_1bytes):
> +	ret
> +
> +	ALIGN(4)
> +L(256bytesormore):
> +	vinserti128 $1, %xmm0, %ymm0, %ymm0

this breaks build on RHEL 6 x86_64:

../sysdeps/x86_64/multiarch/memset-avx2.S:
../sysdeps/x86_64/multiarch/memset-avx2.S: Assembler messages:
Assembler messages:
../sysdeps/x86_64/multiarch/memset-avx2.S:132:
../sysdeps/x86_64/multiarch/memset-avx2.S:132: Error: Error: no such
instruction: `vinserti128 $1,%xmm0,%ymm0,%ymm0'no such instruction:
`vinserti128 $1,%xmm0,%ymm0,%ymm0'

Cheers,
Ondřej Bílka July 1, 2014, 9:10 a.m. UTC | #4
On Tue, Jul 01, 2014 at 12:03:15PM +0300, Marko Myllynen wrote:
> Hi,
> 
> On 2014-04-04 10:34, ling.ma.program@gmail.com wrote:
> > From: Ling Ma <ling.ml@alibaba-inc.com>
> > 
> > In this patch we manage to reduce miss branch prediction by 
> > avoid using branch instructions and force destination to be aligned
> > with avx instruction. 
> > 
> > ---
> >  In this version we removed prefetch and append vmovd.
> > 
> >  ChangeLog                              |   9 ++
> >  sysdeps/x86_64/multiarch/Makefile      |   4 +-
> >  sysdeps/x86_64/multiarch/memset-avx2.S | 192 +++++++++++++++++++++++++++++++++
> >  sysdeps/x86_64/multiarch/memset.S      |  59 ++++++++++
> >  sysdeps/x86_64/multiarch/memset_chk.S  |  44 ++++++++
> >  5 files changed, 307 insertions(+), 1 deletion(-)
> >  create mode 100644 sysdeps/x86_64/multiarch/memset-avx2.S
> >  create mode 100644 sysdeps/x86_64/multiarch/memset.S
> >  create mode 100644 sysdeps/x86_64/multiarch/memset_chk.S
> > 
> > diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S b/sysdeps/x86_64/multiarch/memset-avx2.S
> > new file mode 100644
> > index 0000000..08e8ee8
> > --- /dev/null
> > +++ b/sysdeps/x86_64/multiarch/memset-avx2.S
> > @@ -0,0 +1,192 @@
> > +/* memset with AVX2
> > +   Copyright (C) 2014 Free Software Foundation, Inc.
> > +   Contributed by Alibaba Group.
> > +   This file is part of the GNU C Library.
> > +
> > +   The GNU C Library is free software; you can redistribute it and/or
> > +   modify it under the terms of the GNU Lesser General Public
> > +   License as published by the Free Software Foundation; either
> > +   version 2.1 of the License, or (at your option) any later version.
> > +
> > +   The GNU C Library is distributed in the hope that it will be useful,
> > +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> > +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> > +   Lesser General Public License for more details.
> > +
> > +   You should have received a copy of the GNU Lesser General Public
> > +   License along with the GNU C Library; if not, see
> > +   <http://www.gnu.org/licenses/>.  */
> > +
> > +#include <sysdep.h>
> > +
> > +#if !defined NOT_IN_libc
> > +
> > +#include "asm-syntax.h"
> > +#ifndef ALIGN
> > +# define ALIGN(n)	.p2align n
> > +#endif
> > +#ifndef MEMSET
> > +# define MEMSET	__memset_avx2
> > +# define MEMSET_CHK	__memset_chk_avx2
> > +#endif
> > +
> > +	.section .text.avx2,"ax",@progbits
> > +#if defined PIC
> > +ENTRY (MEMSET_CHK)
> > +	cmpq	%rdx, %rcx
> > +	jb	HIDDEN_JUMPTARGET (__chk_fail)
> > +END (MEMSET_CHK)
> > +#endif
> > +
> > +ENTRY (MEMSET)
> > +	vpxor	%xmm0, %xmm0, %xmm0
> > +	vmovd %esi, %xmm1
> > +	lea	(%rdi, %rdx), %r8
> > +	vpshufb	%xmm0, %xmm1, %xmm0
> > +	mov	%rdi, %rax
> > +	cmp	$256, %rdx
> > +	jae	L(256bytesormore)
> > +	vmovd %xmm0, %rcx
> > +	cmp	$128, %rdx
> > +	jb	L(less_128bytes)
> > +	vmovups %xmm0, (%rdi)
> > +	vmovups %xmm0, 0x10(%rdi)
> > +	vmovups %xmm0, 0x20(%rdi)
> > +	vmovups %xmm0, 0x30(%rdi)
> > +	vmovups %xmm0, 0x40(%rdi)
> > +	vmovups %xmm0, 0x50(%rdi)
> > +	vmovups %xmm0, 0x60(%rdi)
> > +	vmovups %xmm0, 0x70(%rdi)
> > +	vmovups %xmm0, -0x80(%r8)
> > +	vmovups %xmm0, -0x70(%r8)
> > +	vmovups %xmm0, -0x60(%r8)
> > +	vmovups %xmm0, -0x50(%r8)
> > +	vmovups %xmm0, -0x40(%r8)
> > +	vmovups %xmm0, -0x30(%r8)
> > +	vmovups %xmm0, -0x20(%r8)
> > +	vmovups %xmm0, -0x10(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_128bytes):
> > +	cmp	$64, %edx
> > +	jb	L(less_64bytes)
> > +	vmovups %xmm0, (%rdi)
> > +	vmovups %xmm0, 0x10(%rdi)
> > +	vmovups %xmm0, 0x20(%rdi)
> > +	vmovups %xmm0, 0x30(%rdi)
> > +	vmovups %xmm0, -0x40(%r8)
> > +	vmovups %xmm0, -0x30(%r8)
> > +	vmovups %xmm0, -0x20(%r8)
> > +	vmovups %xmm0, -0x10(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_64bytes):
> > +	cmp	$32, %edx
> > +	jb	L(less_32bytes)
> > +	vmovups %xmm0, (%rdi)
> > +	vmovups %xmm0, 0x10(%rdi)
> > +	vmovups %xmm0, -0x20(%r8)
> > +	vmovups %xmm0, -0x10(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_32bytes):
> > +	cmp	$16, %edx
> > +	jb	L(less_16bytes)
> > +	vmovups %xmm0, (%rdi)
> > +	vmovups %xmm0, -0x10(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_16bytes):
> > +	cmp	$8, %edx
> > +	jb	L(less_8bytes)
> > +	mov %rcx, (%rdi)
> > +	mov %rcx, -0x08(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_8bytes):
> > +	cmp	$4, %edx
> > +	jb	L(less_4bytes)
> > +	mov %ecx, (%rdi)
> > +	mov %ecx, -0x04(%r8)
> > +	ALIGN(4)
> > +L(less_4bytes):
> > +	cmp	$2, %edx
> > +	jb	L(less_2bytes)
> > +	mov	%cx, (%rdi)
> > +	mov	%cx, -0x02(%r8)
> > +	ret
> > +	ALIGN(4)
> > +L(less_2bytes):
> > +	cmp	$1, %edx
> > +	jb	L(less_1bytes)
> > +	mov	%cl, (%rdi)
> > +L(less_1bytes):
> > +	ret
> > +
> > +	ALIGN(4)
> > +L(256bytesormore):
> > +	vinserti128 $1, %xmm0, %ymm0, %ymm0
> 
> this breaks build on RHEL 6 x86_64:
> 
> ../sysdeps/x86_64/multiarch/memset-avx2.S:
> ../sysdeps/x86_64/multiarch/memset-avx2.S: Assembler messages:
> Assembler messages:
> ../sysdeps/x86_64/multiarch/memset-avx2.S:132:
> ../sysdeps/x86_64/multiarch/memset-avx2.S:132: Error: Error: no such
> instruction: `vinserti128 $1,%xmm0,%ymm0,%ymm0'no such instruction:
> `vinserti128 $1,%xmm0,%ymm0,%ymm0'
> 
> Cheers,
> 
What version of gcc?
Marko Myllynen July 1, 2014, 9:13 a.m. UTC | #5
Hi,

On 2014-07-01 12:10, Ondřej Bílka wrote:
> On Tue, Jul 01, 2014 at 12:03:15PM +0300, Marko Myllynen wrote:
>> On 2014-04-04 10:34, ling.ma.program@gmail.com wrote:
>>> From: Ling Ma <ling.ml@alibaba-inc.com>
>>>
>>> In this patch we manage to reduce miss branch prediction by 
>>> avoid using branch instructions and force destination to be aligned
>>> with avx instruction. 
>>>
>>> ---
>>>  In this version we removed prefetch and append vmovd.
>>>
>>>  ChangeLog                              |   9 ++
>>>  sysdeps/x86_64/multiarch/Makefile      |   4 +-
>>>  sysdeps/x86_64/multiarch/memset-avx2.S | 192 +++++++++++++++++++++++++++++++++
>>>  sysdeps/x86_64/multiarch/memset.S      |  59 ++++++++++
>>>  sysdeps/x86_64/multiarch/memset_chk.S  |  44 ++++++++
>>>  5 files changed, 307 insertions(+), 1 deletion(-)
>>>  create mode 100644 sysdeps/x86_64/multiarch/memset-avx2.S
>>>  create mode 100644 sysdeps/x86_64/multiarch/memset.S
>>>  create mode 100644 sysdeps/x86_64/multiarch/memset_chk.S
>>>
>>> diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S b/sysdeps/x86_64/multiarch/memset-avx2.S
>>> new file mode 100644
>>> index 0000000..08e8ee8
>>> --- /dev/null
>>> +++ b/sysdeps/x86_64/multiarch/memset-avx2.S
>>> @@ -0,0 +1,192 @@
>>> +/* memset with AVX2
>>> +   Copyright (C) 2014 Free Software Foundation, Inc.
>>> +   Contributed by Alibaba Group.
>>> +   This file is part of the GNU C Library.
>>> +
>>> +   The GNU C Library is free software; you can redistribute it and/or
>>> +   modify it under the terms of the GNU Lesser General Public
>>> +   License as published by the Free Software Foundation; either
>>> +   version 2.1 of the License, or (at your option) any later version.
>>> +
>>> +   The GNU C Library is distributed in the hope that it will be useful,
>>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>>> +   Lesser General Public License for more details.
>>> +
>>> +   You should have received a copy of the GNU Lesser General Public
>>> +   License along with the GNU C Library; if not, see
>>> +   <http://www.gnu.org/licenses/>.  */
>>> +
>>> +#include <sysdep.h>
>>> +
>>> +#if !defined NOT_IN_libc
>>> +
>>> +#include "asm-syntax.h"
>>> +#ifndef ALIGN
>>> +# define ALIGN(n)	.p2align n
>>> +#endif
>>> +#ifndef MEMSET
>>> +# define MEMSET	__memset_avx2
>>> +# define MEMSET_CHK	__memset_chk_avx2
>>> +#endif
>>> +
>>> +	.section .text.avx2,"ax",@progbits
>>> +#if defined PIC
>>> +ENTRY (MEMSET_CHK)
>>> +	cmpq	%rdx, %rcx
>>> +	jb	HIDDEN_JUMPTARGET (__chk_fail)
>>> +END (MEMSET_CHK)
>>> +#endif
>>> +
>>> +ENTRY (MEMSET)
>>> +	vpxor	%xmm0, %xmm0, %xmm0
>>> +	vmovd %esi, %xmm1
>>> +	lea	(%rdi, %rdx), %r8
>>> +	vpshufb	%xmm0, %xmm1, %xmm0
>>> +	mov	%rdi, %rax
>>> +	cmp	$256, %rdx
>>> +	jae	L(256bytesormore)
>>> +	vmovd %xmm0, %rcx
>>> +	cmp	$128, %rdx
>>> +	jb	L(less_128bytes)
>>> +	vmovups %xmm0, (%rdi)
>>> +	vmovups %xmm0, 0x10(%rdi)
>>> +	vmovups %xmm0, 0x20(%rdi)
>>> +	vmovups %xmm0, 0x30(%rdi)
>>> +	vmovups %xmm0, 0x40(%rdi)
>>> +	vmovups %xmm0, 0x50(%rdi)
>>> +	vmovups %xmm0, 0x60(%rdi)
>>> +	vmovups %xmm0, 0x70(%rdi)
>>> +	vmovups %xmm0, -0x80(%r8)
>>> +	vmovups %xmm0, -0x70(%r8)
>>> +	vmovups %xmm0, -0x60(%r8)
>>> +	vmovups %xmm0, -0x50(%r8)
>>> +	vmovups %xmm0, -0x40(%r8)
>>> +	vmovups %xmm0, -0x30(%r8)
>>> +	vmovups %xmm0, -0x20(%r8)
>>> +	vmovups %xmm0, -0x10(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_128bytes):
>>> +	cmp	$64, %edx
>>> +	jb	L(less_64bytes)
>>> +	vmovups %xmm0, (%rdi)
>>> +	vmovups %xmm0, 0x10(%rdi)
>>> +	vmovups %xmm0, 0x20(%rdi)
>>> +	vmovups %xmm0, 0x30(%rdi)
>>> +	vmovups %xmm0, -0x40(%r8)
>>> +	vmovups %xmm0, -0x30(%r8)
>>> +	vmovups %xmm0, -0x20(%r8)
>>> +	vmovups %xmm0, -0x10(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_64bytes):
>>> +	cmp	$32, %edx
>>> +	jb	L(less_32bytes)
>>> +	vmovups %xmm0, (%rdi)
>>> +	vmovups %xmm0, 0x10(%rdi)
>>> +	vmovups %xmm0, -0x20(%r8)
>>> +	vmovups %xmm0, -0x10(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_32bytes):
>>> +	cmp	$16, %edx
>>> +	jb	L(less_16bytes)
>>> +	vmovups %xmm0, (%rdi)
>>> +	vmovups %xmm0, -0x10(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_16bytes):
>>> +	cmp	$8, %edx
>>> +	jb	L(less_8bytes)
>>> +	mov %rcx, (%rdi)
>>> +	mov %rcx, -0x08(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_8bytes):
>>> +	cmp	$4, %edx
>>> +	jb	L(less_4bytes)
>>> +	mov %ecx, (%rdi)
>>> +	mov %ecx, -0x04(%r8)
>>> +	ALIGN(4)
>>> +L(less_4bytes):
>>> +	cmp	$2, %edx
>>> +	jb	L(less_2bytes)
>>> +	mov	%cx, (%rdi)
>>> +	mov	%cx, -0x02(%r8)
>>> +	ret
>>> +	ALIGN(4)
>>> +L(less_2bytes):
>>> +	cmp	$1, %edx
>>> +	jb	L(less_1bytes)
>>> +	mov	%cl, (%rdi)
>>> +L(less_1bytes):
>>> +	ret
>>> +
>>> +	ALIGN(4)
>>> +L(256bytesormore):
>>> +	vinserti128 $1, %xmm0, %ymm0, %ymm0
>>
>> this breaks build on RHEL 6 x86_64:
>>
>> ../sysdeps/x86_64/multiarch/memset-avx2.S:
>> ../sysdeps/x86_64/multiarch/memset-avx2.S: Assembler messages:
>> Assembler messages:
>> ../sysdeps/x86_64/multiarch/memset-avx2.S:132:
>> ../sysdeps/x86_64/multiarch/memset-avx2.S:132: Error: Error: no such
>> instruction: `vinserti128 $1,%xmm0,%ymm0,%ymm0'no such instruction:
>> `vinserti128 $1,%xmm0,%ymm0,%ymm0'
>>
> What version of gcc?

from configure output:

checking whether as is GNU as... yes
checking whether ld is GNU ld... yes
checking for as... as
checking version of as... 2.20.51.0.2, ok
checking for ld... ld
checking version of ld... 2.20.51.0.2, ok
checking for gcc... gcc
checking version of gcc... 4.4.7, ok

Thanks,
diff mbox

Patch

diff --git a/ChangeLog b/ChangeLog
index ab23a3a..851fe9e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@ 
+2014-04-04  Ling Ma  <ling.ml@alibaba-inc.com>
+
+	* sysdeps/x86_64/multiarch/Makefile: Add memset-avx2
+	* sysdeps/x86_64/multiarch/memset-avx2.S: New file for AVX2 memset
+	* sysdeps/x86_64/multiarch/memset.S: New file for multiple memset
+	versions
+	* sysdeps/x86_64/multiarch/memset_chk.S: New file for multiple memset_chk
+	versions
+
 2014-04-04  Sihai Yao  <sihai.ysh@alibaba-inc.com>
 
 	* sysdeps/x86_64/multiarch/ifunc-defines.sym: Add COMMON_CPU_INDEX_7 and
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index 57a3c13..42df96f 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -17,7 +17,9 @@  sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
 		   strcpy-sse2-unaligned strncpy-sse2-unaligned \
 		   stpcpy-sse2-unaligned stpncpy-sse2-unaligned \
 		   strcat-sse2-unaligned strncat-sse2-unaligned \
-		   strchr-sse2-no-bsf memcmp-ssse3 strstr-sse2-unaligned
+		   strchr-sse2-no-bsf memcmp-ssse3 strstr-sse2-unaligned \
+		   memset-avx2
+
 ifeq (yes,$(config-cflags-sse4))
 sysdep_routines += strcspn-c strpbrk-c strspn-c varshift
 CFLAGS-varshift.c += -msse4
diff --git a/sysdeps/x86_64/multiarch/memset-avx2.S b/sysdeps/x86_64/multiarch/memset-avx2.S
new file mode 100644
index 0000000..08e8ee8
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memset-avx2.S
@@ -0,0 +1,192 @@ 
+/* memset with AVX2
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   Contributed by Alibaba Group.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if !defined NOT_IN_libc
+
+#include "asm-syntax.h"
+#ifndef ALIGN
+# define ALIGN(n)	.p2align n
+#endif
+#ifndef MEMSET
+# define MEMSET	__memset_avx2
+# define MEMSET_CHK	__memset_chk_avx2
+#endif
+
+	.section .text.avx2,"ax",@progbits
+#if defined PIC
+ENTRY (MEMSET_CHK)
+	cmpq	%rdx, %rcx
+	jb	HIDDEN_JUMPTARGET (__chk_fail)
+END (MEMSET_CHK)
+#endif
+
+ENTRY (MEMSET)
+	vpxor	%xmm0, %xmm0, %xmm0
+	vmovd %esi, %xmm1
+	lea	(%rdi, %rdx), %r8
+	vpshufb	%xmm0, %xmm1, %xmm0
+	mov	%rdi, %rax
+	cmp	$256, %rdx
+	jae	L(256bytesormore)
+	vmovd %xmm0, %rcx
+	cmp	$128, %rdx
+	jb	L(less_128bytes)
+	vmovups %xmm0, (%rdi)
+	vmovups %xmm0, 0x10(%rdi)
+	vmovups %xmm0, 0x20(%rdi)
+	vmovups %xmm0, 0x30(%rdi)
+	vmovups %xmm0, 0x40(%rdi)
+	vmovups %xmm0, 0x50(%rdi)
+	vmovups %xmm0, 0x60(%rdi)
+	vmovups %xmm0, 0x70(%rdi)
+	vmovups %xmm0, -0x80(%r8)
+	vmovups %xmm0, -0x70(%r8)
+	vmovups %xmm0, -0x60(%r8)
+	vmovups %xmm0, -0x50(%r8)
+	vmovups %xmm0, -0x40(%r8)
+	vmovups %xmm0, -0x30(%r8)
+	vmovups %xmm0, -0x20(%r8)
+	vmovups %xmm0, -0x10(%r8)
+	ret
+	ALIGN(4)
+L(less_128bytes):
+	cmp	$64, %edx
+	jb	L(less_64bytes)
+	vmovups %xmm0, (%rdi)
+	vmovups %xmm0, 0x10(%rdi)
+	vmovups %xmm0, 0x20(%rdi)
+	vmovups %xmm0, 0x30(%rdi)
+	vmovups %xmm0, -0x40(%r8)
+	vmovups %xmm0, -0x30(%r8)
+	vmovups %xmm0, -0x20(%r8)
+	vmovups %xmm0, -0x10(%r8)
+	ret
+	ALIGN(4)
+L(less_64bytes):
+	cmp	$32, %edx
+	jb	L(less_32bytes)
+	vmovups %xmm0, (%rdi)
+	vmovups %xmm0, 0x10(%rdi)
+	vmovups %xmm0, -0x20(%r8)
+	vmovups %xmm0, -0x10(%r8)
+	ret
+	ALIGN(4)
+L(less_32bytes):
+	cmp	$16, %edx
+	jb	L(less_16bytes)
+	vmovups %xmm0, (%rdi)
+	vmovups %xmm0, -0x10(%r8)
+	ret
+	ALIGN(4)
+L(less_16bytes):
+	cmp	$8, %edx
+	jb	L(less_8bytes)
+	mov %rcx, (%rdi)
+	mov %rcx, -0x08(%r8)
+	ret
+	ALIGN(4)
+L(less_8bytes):
+	cmp	$4, %edx
+	jb	L(less_4bytes)
+	mov %ecx, (%rdi)
+	mov %ecx, -0x04(%r8)
+	ALIGN(4)
+L(less_4bytes):
+	cmp	$2, %edx
+	jb	L(less_2bytes)
+	mov	%cx, (%rdi)
+	mov	%cx, -0x02(%r8)
+	ret
+	ALIGN(4)
+L(less_2bytes):
+	cmp	$1, %edx
+	jb	L(less_1bytes)
+	mov	%cl, (%rdi)
+L(less_1bytes):
+	ret
+
+	ALIGN(4)
+L(256bytesormore):
+	vinserti128 $1, %xmm0, %ymm0, %ymm0
+	vmovups	%ymm0, (%rdi)
+	mov	%rdi, %r9
+	and	$-0x20, %rdi
+	add	$32, %rdi
+	sub	%rdi, %r9
+	add	%r9, %rdx
+	cmp	$4096, %rdx
+	jmp	L(gobble_data)
+
+	sub	$0x80, %rdx
+L(gobble_128_loop):
+	vmovaps	%ymm0, (%rdi)
+	vmovaps	%ymm0, 0x20(%rdi)
+	vmovaps	%ymm0, 0x40(%rdi)
+	vmovaps	%ymm0, 0x60(%rdi)
+	lea	0x80(%rdi), %rdi
+	sub	$0x80, %rdx
+	jae	L(gobble_128_loop)
+	vmovups	%ymm0, -0x80(%r8)
+	vmovups	%ymm0, -0x60(%r8)
+	vmovups	%ymm0, -0x40(%r8)
+	vmovups	%ymm0, -0x20(%r8)
+	vzeroupper
+	ret
+
+	ALIGN(4)
+L(gobble_data):
+#ifdef SHARED_CACHE_SIZE_HALF
+	mov	$SHARED_CACHE_SIZE_HALF, %r9
+#else
+	mov	__x86_shared_cache_size_half(%rip), %r9
+#endif
+	shl	$4, %r9
+	cmp	%r9, %rdx
+	ja	L(gobble_big_data)
+	mov	%rax, %r9
+	mov	%esi, %eax
+	mov	%rdx, %rcx
+	rep	stosb
+	mov	%r9, %rax
+	vzeroupper
+	ret
+
+	ALIGN(4)
+L(gobble_big_data):
+	sub	$0x80, %rdx
+L(gobble_big_data_loop):
+	vmovntdq	%ymm0, (%rdi)
+	vmovntdq	%ymm0, 0x20(%rdi)
+	vmovntdq	%ymm0, 0x40(%rdi)
+	vmovntdq	%ymm0, 0x60(%rdi)
+	lea	0x80(%rdi), %rdi
+	sub	$0x80, %rdx
+	jae	L(gobble_big_data_loop)
+	vmovups	%ymm0, -0x80(%r8)
+	vmovups	%ymm0, -0x60(%r8)
+	vmovups	%ymm0, -0x40(%r8)
+	vmovups	%ymm0, -0x20(%r8)
+	vzeroupper
+	sfence
+	ret
+
+END (MEMSET)
+#endif
diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S
new file mode 100644
index 0000000..df903af
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -0,0 +1,59 @@ 
+/* Multiple versions of memset
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   Contributed by Alibaba Group.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <init-arch.h>
+
+/* Define multiple versions only for the definition in lib.  */
+#ifndef NOT_IN_libc
+ENTRY(memset)
+	.type	memset, @gnu_indirect_function
+	cmpl	$0, __cpu_features+KIND_OFFSET(%rip)
+	jne	1f
+	call	__init_cpu_features
+1:	leaq	__memset_sse2(%rip), %rax
+	testl	$bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
+	jz	2f
+	leaq	__memset_avx2(%rip), %rax
+2:	ret
+END(memset)
+#endif
+
+#if !defined NOT_IN_libc
+# undef memset
+# define memset __memset_sse2
+
+# undef __memset_chk
+# define __memset_chk __memset_chk_sse2
+
+# ifdef SHARED
+#  undef libc_hidden_builtin_def
+/* It doesn't make sense to send libc-internal memset calls through a PLT.
+   The speedup we get from using GPR instruction is likely eaten away
+   by the indirect call in the PLT.  */
+#  define libc_hidden_builtin_def(name) \
+	.globl __GI_memset; __GI_memset = __memset_sse2
+# endif
+
+# undef strong_alias
+# define strong_alias(original, alias)
+#endif
+
+#include "../memset.S"
diff --git a/sysdeps/x86_64/multiarch/memset_chk.S b/sysdeps/x86_64/multiarch/memset_chk.S
new file mode 100644
index 0000000..f048dac
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memset_chk.S
@@ -0,0 +1,44 @@ 
+/* Multiple versions of memset_chk
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   Contributed by Alibaba Group.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+/* Define multiple versions only for the definition in lib.  */
+#ifndef NOT_IN_libc
+# ifdef SHARED
+ENTRY(__memset_chk)
+	.type	__memset_chk, @gnu_indirect_function
+	cmpl	$0, __cpu_features+KIND_OFFSET(%rip)
+	jne	1f
+	call	__init_cpu_features
+1:	leaq	__memset_chk_sse2(%rip), %rax
+	testl	$bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
+	jz	2f
+	leaq	__memset_chk_avx2(%rip), %rax
+2:	ret
+END(__memset_chk)
+
+strong_alias (__memset_chk, __memset_zero_constant_len_parameter)
+	.section .gnu.warning.__memset_zero_constant_len_parameter
+	.string "memset used with constant zero length parameter; this could be due to transposed parameters"
+# else
+#  include "../memset_chk.S"
+# endif
+#endif