@@ -27,216 +27,143 @@
*
*/
-/* Offsets for data table __svml_slog10_data_internal
- */
-#define MinNorm 0
-#define MaxNorm 16
-#define L2H 32
-#define L2L 48
-#define iBrkValue 64
-#define iOffExpoMask 80
-#define One 96
-#define sPoly 112
-#define L2 256
+#define LOCAL_DATA_NAME __svml_slog10_data_internal
+#include "svml_s_common_sse4_rodata_offsets.h"
+
+/* Offsets for data table __svml_slog10_data_internal. */
+#define _L2L 0
+#define _Coeff_9 16
+#define _Coeff_8 32
+#define _Coeff_7 48
+#define _Coeff_6 64
+#define _Coeff_5 80
+#define _Coeff_4 96
+#define _Coeff_3 112
+#define _Coeff_2 128
+#define _Coeff_1 144
+#define _L2H 160
#include <sysdep.h>
.section .text.sse4, "ax", @progbits
ENTRY(_ZGVbN4v_log10f_sse4)
- subq $72, %rsp
- cfi_def_cfa_offset(80)
- movaps %xmm0, %xmm1
-
- /* reduction: compute r, n */
- movdqu iBrkValue+__svml_slog10_data_internal(%rip), %xmm2
- movaps %xmm0, %xmm4
- movdqu iOffExpoMask+__svml_slog10_data_internal(%rip), %xmm10
- psubd %xmm2, %xmm1
- pand %xmm1, %xmm10
- psrad $23, %xmm1
- paddd %xmm2, %xmm10
+ movdqu COMMON_DATA(_NotiOffExpoMask)(%rip), %xmm2
movaps %xmm0, %xmm3
- movups sPoly+__svml_slog10_data_internal(%rip), %xmm5
- movups sPoly+32+__svml_slog10_data_internal(%rip), %xmm6
- movups sPoly+64+__svml_slog10_data_internal(%rip), %xmm7
- movups sPoly+96+__svml_slog10_data_internal(%rip), %xmm9
- cvtdq2ps %xmm1, %xmm12
- cmpltps MinNorm+__svml_slog10_data_internal(%rip), %xmm4
- cmpnleps MaxNorm+__svml_slog10_data_internal(%rip), %xmm3
- subps One+__svml_slog10_data_internal(%rip), %xmm10
- mulps %xmm10, %xmm5
- movaps %xmm10, %xmm8
- mulps %xmm10, %xmm6
- mulps %xmm10, %xmm8
- addps sPoly+16+__svml_slog10_data_internal(%rip), %xmm5
- mulps %xmm10, %xmm7
- addps sPoly+48+__svml_slog10_data_internal(%rip), %xmm6
- mulps %xmm10, %xmm9
- mulps %xmm8, %xmm5
- addps sPoly+80+__svml_slog10_data_internal(%rip), %xmm7
- addps sPoly+112+__svml_slog10_data_internal(%rip), %xmm9
- addps %xmm5, %xmm6
- mulps %xmm8, %xmm6
- orps %xmm3, %xmm4
-
- /* combine and get argument value range mask */
- movmskps %xmm4, %edx
- movups L2L+__svml_slog10_data_internal(%rip), %xmm1
- addps %xmm6, %xmm7
- mulps %xmm12, %xmm1
- mulps %xmm7, %xmm8
- movups L2H+__svml_slog10_data_internal(%rip), %xmm11
- addps %xmm8, %xmm9
- mulps %xmm11, %xmm12
- mulps %xmm10, %xmm9
- addps sPoly+128+__svml_slog10_data_internal(%rip), %xmm9
- mulps %xmm9, %xmm10
- addps %xmm10, %xmm1
- addps %xmm12, %xmm1
- testl %edx, %edx
-
- /* Go to special inputs processing branch */
+ psubd %xmm2, %xmm0
+ movaps COMMON_DATA(_ILoRange)(%rip), %xmm4
+ pcmpgtd %xmm0, %xmm4
+ /* combine and get argument value range mask. */
+ movmskps %xmm4, %eax
+ movups LOCAL_DATA(_L2L)(%rip), %xmm0
+ /* reduction: compute r, n. */
+ movdqu COMMON_DATA(_IBrkValue)(%rip), %xmm4
+ movaps %xmm3, %xmm6
+ psubd %xmm4, %xmm3
+ pandn %xmm3, %xmm2
+ paddd %xmm4, %xmm2
+ subps COMMON_DATA(_OneF)(%rip), %xmm2
+ psrad $0x17, %xmm3
+ cvtdq2ps %xmm3, %xmm4
+ mulps %xmm4, %xmm0
+ movaps %xmm2, %xmm3
+ mulps %xmm2, %xmm2
+ movups LOCAL_DATA(_Coeff_9)(%rip), %xmm1
+ mulps %xmm3, %xmm1
+ addps LOCAL_DATA(_Coeff_8)(%rip), %xmm1
+ mulps %xmm2, %xmm1
+ movups LOCAL_DATA(_Coeff_7)(%rip), %xmm5
+ mulps %xmm3, %xmm5
+ addps LOCAL_DATA(_Coeff_6)(%rip), %xmm5
+ addps %xmm1, %xmm5
+ mulps %xmm2, %xmm5
+ movups LOCAL_DATA(_Coeff_5)(%rip), %xmm1
+ mulps %xmm3, %xmm1
+ addps LOCAL_DATA(_Coeff_4)(%rip), %xmm1
+ addps %xmm5, %xmm1
+ mulps %xmm1, %xmm2
+ movups LOCAL_DATA(_Coeff_3)(%rip), %xmm1
+ mulps %xmm3, %xmm1
+ addps LOCAL_DATA(_Coeff_2)(%rip), %xmm1
+ addps %xmm2, %xmm1
+ mulps %xmm3, %xmm1
+ addps LOCAL_DATA(_Coeff_1)(%rip), %xmm1
+ mulps %xmm1, %xmm3
+ addps %xmm3, %xmm0
+ movups LOCAL_DATA(_L2H)(%rip), %xmm2
+ mulps %xmm4, %xmm2
+ addps %xmm2, %xmm0
+ testl %eax, %eax
+ /* Go to special inputs processing branch. */
jne L(SPECIAL_VALUES_BRANCH)
- # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1
-
- /* Restore registers
- * and exit the function
- */
-
-L(EXIT):
- movaps %xmm1, %xmm0
- addq $72, %rsp
- cfi_def_cfa_offset(8)
ret
- cfi_def_cfa_offset(80)
-
- /* Branch to process
- * special inputs
- */
+ /* Cold case. edx has 1s where there was a special value that
+ more so than speed here. */
L(SPECIAL_VALUES_BRANCH):
- movups %xmm0, 32(%rsp)
- movups %xmm1, 48(%rsp)
- # LOE rbx rbp r12 r13 r14 r15 edx
-
- xorl %eax, %eax
- movq %r12, 16(%rsp)
- cfi_offset(12, -64)
- movl %eax, %r12d
- movq %r13, 8(%rsp)
- cfi_offset(13, -72)
- movl %edx, %r13d
- movq %r14, (%rsp)
- cfi_offset(14, -80)
- # LOE rbx rbp r15 r12d r13d
-
- /* Range mask
- * bits check
- */
-
-L(RANGEMASK_CHECK):
- btl %r12d, %r13d
-
- /* Call scalar math function */
- jc L(SCALAR_MATH_CALL)
- # LOE rbx rbp r15 r12d r13d
-
- /* Special inputs
- * processing loop
- */
-
+ /* Stack coming in 16-byte aligned. Set 8-byte misaligned so on
+ call entry will be 16-byte aligned. */
+
+ subq $0x38, %rsp
+ movups %xmm0, 24(%rsp)
+ movups %xmm6, 40(%rsp)
+
+ /* Use rbx/rbp for callee save registers as they get short
+ encoding for many instructions (as compared with r12/r13). */
+ movq %rbx, (%rsp)
+ cfi_offset (rbx, -64)
+ movq %rbp, 8(%rsp)
+ cfi_offset (rbp, -56)
+ /* edx has 1s where there was a special value that needs to be
+ handled by a tanhf call. */
+ movl %eax, %ebx
L(SPECIAL_VALUES_LOOP):
- incl %r12d
- cmpl $4, %r12d
-
- /* Check bits in range mask */
- jl L(RANGEMASK_CHECK)
- # LOE rbx rbp r15 r12d r13d
-
- movq 16(%rsp), %r12
- cfi_restore(12)
- movq 8(%rsp), %r13
- cfi_restore(13)
- movq (%rsp), %r14
- cfi_restore(14)
- movups 48(%rsp), %xmm1
-
- /* Go to exit */
- jmp L(EXIT)
- cfi_offset(12, -64)
- cfi_offset(13, -72)
- cfi_offset(14, -80)
- # LOE rbx rbp r12 r13 r14 r15 xmm1
-
- /* Scalar math fucntion call
- * to process special input
- */
-
-L(SCALAR_MATH_CALL):
- movl %r12d, %r14d
- movss 32(%rsp, %r14, 4), %xmm0
- call log10f@PLT
- # LOE rbx rbp r14 r15 r12d r13d xmm0
- movss %xmm0, 48(%rsp, %r14, 4)
+ /* use rbp as index for special value that is saved across calls
+ to tanhf. We technically don't need a callee save register
+ here as offset to rsp is always [0, 12] so we can restore
+ rsp by realigning to 64. Essentially the tradeoff is 1 extra
+ save/restore vs 2 extra instructions in the loop. */
+ xorl %ebp, %ebp
+ bsfl %ebx, %ebp
+
+ /* Scalar math fucntion call to process special input. */
+ movss 40(%rsp, %rbp, 4), %xmm0
+ call log10f@PLT
- /* Process special inputs in loop */
- jmp L(SPECIAL_VALUES_LOOP)
- # LOE rbx rbp r15 r12d r13d
+ /* No good way to avoid the store-forwarding fault this will
+ cause on return. `lfence` avoids the SF fault but at greater
+ cost as it serialized stack/callee save restoration. */
+ movss %xmm0, 24(%rsp, %rbp, 4)
+
+ leal -1(%rbx), %eax
+ andl %eax, %ebx
+ jnz L(SPECIAL_VALUES_LOOP)
+
+ /* All results have been written to 24(%rsp). */
+ movups 24(%rsp), %xmm0
+ movq (%rsp), %rbx
+ cfi_restore (rbx)
+ movq 8(%rsp), %rbp
+ cfi_restore (rbp)
+ addq $56, %rsp
+ cfi_def_cfa_offset (8)
+ ret
END(_ZGVbN4v_log10f_sse4)
- .section .rodata, "a"
+ .section .rodata.sse4, "a"
.align 16
-#ifdef __svml_slog10_data_internal_typedef
-typedef unsigned int VUINT32;
-typedef struct {
- __declspec(align(16)) VUINT32 MinNorm[4][1];
- __declspec(align(16)) VUINT32 MaxNorm[4][1];
- __declspec(align(16)) VUINT32 L2H[4][1];
- __declspec(align(16)) VUINT32 L2L[4][1];
- __declspec(align(16)) VUINT32 iBrkValue[4][1];
- __declspec(align(16)) VUINT32 iOffExpoMask[4][1];
- __declspec(align(16)) VUINT32 One[4][1];
- __declspec(align(16)) VUINT32 sPoly[9][4][1];
- __declspec(align(16)) VUINT32 L2[4][1];
-} __svml_slog10_data_internal;
-#endif
-__svml_slog10_data_internal:
- /* MinNorm */
- .long 0x00800000, 0x00800000, 0x00800000, 0x00800000
- /* MaxNorm */
- .align 16
- .long 0x7f7fffff, 0x7f7fffff, 0x7f7fffff, 0x7f7fffff
- /* L2H */
- .align 16
- .long 0x3e9a2100, 0x3e9a2100, 0x3e9a2100, 0x3e9a2100
- /* L2L */
- .align 16
- .long 0xb64AF600, 0xb64AF600, 0xb64AF600, 0xb64AF600
- /* iBrkValue = SP 2/3 */
- .align 16
- .long 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab, 0x3f2aaaab
- /* iOffExpoMask = SP significand mask */
- .align 16
- .long 0x007fffff, 0x007fffff, 0x007fffff, 0x007fffff
- /* sOne = SP 1.0 */
- .align 16
- .long 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000
- /* spoly[9] */
- .align 16
- .long 0x3d8063B4, 0x3d8063B4, 0x3d8063B4, 0x3d8063B4 /* coeff9 */
- .long 0xbd890073, 0xbd890073, 0xbd890073, 0xbd890073 /* coeff8 */
- .long 0x3d775317, 0x3d775317, 0x3d775317, 0x3d775317 /* coeff7 */
- .long 0xbd91FB27, 0xbd91FB27, 0xbd91FB27, 0xbd91FB27 /* coeff6 */
- .long 0x3dB20B96, 0x3dB20B96, 0x3dB20B96, 0x3dB20B96 /* coeff5 */
- .long 0xbdDE6E20, 0xbdDE6E20, 0xbdDE6E20, 0xbdDE6E20 /* coeff4 */
- .long 0x3e143CE5, 0x3e143CE5, 0x3e143CE5, 0x3e143CE5 /* coeff3 */
- .long 0xbe5E5BC5, 0xbe5E5BC5, 0xbe5E5BC5, 0xbe5E5BC5 /* coeff2 */
- .long 0x3eDE5BD9, 0x3eDE5BD9, 0x3eDE5BD9, 0x3eDE5BD9 /* coeff1 */
- /* L2 */
- .align 16
- .long 0x3e9a209b, 0x3e9a209b, 0x3e9a209b, 0x3e9a209b
- .align 16
- .type __svml_slog10_data_internal, @object
- .size __svml_slog10_data_internal, .-__svml_slog10_data_internal
+LOCAL_DATA_NAME:
+ DATA_VEC (LOCAL_DATA_NAME, _L2L, 0xb64af600)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_9, 0x3d8063b4)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_8, 0xbd890073)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_7, 0x3d775317)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_6, 0xbd91fb27)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_5, 0x3db20b96)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_4, 0xbdde6e20)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_3, 0x3e143ce5)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_2, 0xbe5e5bc5)
+ DATA_VEC (LOCAL_DATA_NAME, _Coeff_1, 0x3ede5bd9)
+ DATA_VEC (LOCAL_DATA_NAME, _L2H, 0x3e9a2100)
+ .type LOCAL_DATA_NAME, @object
+ .size LOCAL_DATA_NAME, .-LOCAL_DATA_NAME