@@ -1,6 +1,14 @@
2015-10-07 Adhemerval Zanella <adhemerval.zanella@linaro.org>
Stefan Liebler <stli@linux.vnet.ibm.com>
+ * sysdeps/unix/sysv/linux/s390/s390-64/syscall_cancel.S: New file.
+ * sysdeps/unix/sysv/linux/s390/s390-64/sysdep-cancel.h (PSEUDO): Redefine
+ to call __syscall_cancel function for cancellable syscalls.
+ (__pthread_get_ip): Add implementation.
+ * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (SYSCALL_CANCEL_ERROR): Add
+ definition.
+ (SYSCALL_CANCEL_ERRNO): Likewise.
+
* sysdeps/unix/sysv/linux/s390/s390-32/syscall_cancel.S: New file.
* sysdeps/unix/sysv/linux/s390/s390-32/sysdep-cancel.h (PSEUDO): Redefine
to call __syscall_cancel function for cancellable syscalls.
new file mode 100644
@@ -0,0 +1,87 @@
+/* Cancellable syscall wrapper - s390x version.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* long int [r2] __syscall_cancel_arch (int *cancelhandling [r2],
+ long int nr [r3],
+ long int arg1 [r4],
+ long int arg2 [r5],
+ long int arg3 [r6],
+ long int arg4 [SP+160],
+ long int arg5 [SP+168],
+ long int arg6 [SP+176]) */
+
+ENTRY (__syscall_cancel_arch)
+
+ /* Save registers and setup stack. */
+ stmg %r6,%r15,48(%r15) /* Save registers. */
+ cfi_offset (%r15,-40)
+ cfi_offset (%r14,-48)
+ cfi_offset (%r13,-56)
+ cfi_offset (%r12,-64)
+ cfi_offset (%r11,-72)
+ cfi_offset (%r10,-80)
+ cfi_offset (%r9,-88)
+ cfi_offset (%r8,-96)
+ cfi_offset (%r7,-104)
+ cfi_offset (%r6,-112)
+ lgr %r1,%r15
+ lg %r0,8(%r15) /* Load eos. */
+ aghi %r15,-160 /* Buy stack space. */
+ cfi_adjust_cfa_offset (160)
+ stg %r1,0(%r15) /* Store back chain. */
+ stg %r0,8(%r15) /* Store eos. */
+
+ .globl __syscall_cancel_arch_start
+ .type __syscall_cancel_arch_start,@function
+__syscall_cancel_arch_start:
+
+ /* if (*cancelhandling & CANCELED_BITMASK)
+ __syscall_do_cancel() */
+ tm 3(%r2),4
+ jne 1f
+
+ /* Issue a 6 argument syscall */
+ lgr %r1,%r3 /* Move syscall number. */
+ lgr %r2,%r4 /* First parameter. */
+ lgr %r3,%r5 /* Second parameter. */
+ lgr %r4,%r6 /* Third parameter. */
+ lg %r5,320(%r15) /* Fourth parameter. */
+ lg %r6,328(%r15) /* Fifth parameter. */
+ lg %r7,336(%r15) /* Sixth parameter. */
+
+ svc 0 /* svc number is always in r1. */
+
+ .globl __syscall_cancel_arch_end
+ .type __syscall_cancel_arch_end,@function
+__syscall_cancel_arch_end:
+ lg %r15,0(%r15) /* load back chain */
+ cfi_adjust_cfa_offset (-160)
+ lmg %r6,%r15,48(%r15) /* Load registers. */
+
+ br %r14
+
+ /* Branch to __syscall_do_cancel */
+1:
+ lg %r15,0(%r15) /* load back chain */
+ cfi_adjust_cfa_offset (-160)
+ lmg %r6,%r15,48(%r15) /* Load registers. */
+ jg __syscall_do_cancel
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
@@ -24,93 +24,85 @@
#if IS_IN (libc) || IS_IN (libpthread) || IS_IN (librt)
+# if IS_IN (libc)
+# define JMP_SYSCALL_CANCEL HIDDEN_JUMPTARGET(__syscall_cancel)
+# else
+# define JMP_SYSCALL_CANCEL __syscall_cancel@plt
+# endif
+
+# define STORE_0 /* Nothing */
+# define STORE_1 /* Nothing */
+# define STORE_2 /* Nothing */
+# define STORE_3 /* Nothing */
+# define STORE_4 stg %r6,48(%r15); \
+ cfi_offset (%r6,-112);
+# define STORE_5 STORE_4
+# define STORE_6 STORE_4
+
+# define LOAD_0 /* Nothing */
+# define LOAD_1 /* Nothing */
+# define LOAD_2 /* Nothing */
+# define LOAD_3 /* Nothing */
+# define LOAD_4 lg %r6,48(%r15);
+# define LOAD_5 LOAD_4
+# define LOAD_6 LOAD_4
+
+# define MOVE_ARGS_0
+# define MOVE_ARGS_1 lgr %r3,%r2; \
+ MOVE_ARGS_0
+# define MOVE_ARGS_2 lgr %r4,%r3; \
+ MOVE_ARGS_1
+# define MOVE_ARGS_3 lgr %r5,%r4; \
+ MOVE_ARGS_2
+# define MOVE_ARGS_4 lgr %r6,%r5; \
+ MOVE_ARGS_3
+# define MOVE_ARGS_5 stg %r6,160(%r15); \
+ MOVE_ARGS_4
+# define MOVE_ARGS_6 lg %r14,160(%r14); \
+ stg %r14,168(%r15); \
+ MOVE_ARGS_5
+
+
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.text; \
L(pseudo_cancel): \
cfi_startproc; \
- STM_##args \
- stmg %r13,%r15,104(%r15); \
+ stmg %r14,%r15,112(%r15); \
cfi_offset (%r15,-40); \
cfi_offset (%r14,-48); \
- cfi_offset (%r13,-56); \
+ STORE_##args \
lgr %r14,%r15; \
- aghi %r15,-160; \
- cfi_adjust_cfa_offset (160); \
+ aghi %r15,-176; \
+ cfi_adjust_cfa_offset (176); \
stg %r14,0(%r15); \
- brasl %r14,CENABLE; \
- lgr %r0,%r2; \
- LM_##args \
- .if SYS_ify (syscall_name) < 256; \
- svc SYS_ify (syscall_name); \
- .else; \
- lghi %r1,SYS_ify (syscall_name); \
- svc 0; \
- .endif; \
- LR7_##args \
- lgr %r13,%r2; \
- lgr %r2,%r0; \
- brasl %r14,CDISABLE; \
- lgr %r2,%r13; \
- lmg %r13,%r15,104+160(%r15); \
+ MOVE_ARGS_##args \
+ lghi %r2,SYS_ify (syscall_name); \
+ brasl %r14,JMP_SYSCALL_CANCEL; \
+ lmg %r14,%r15,112+176(%r15); \
+ cfi_restore (%r14); \
+ cfi_restore (%r15); \
+ LOAD_##args \
cfi_endproc; \
j L(pseudo_check); \
ENTRY(name) \
SINGLE_THREAD_P \
jne L(pseudo_cancel); \
-.type __##syscall_name##_nocancel,@function; \
-.globl __##syscall_name##_nocancel; \
-__##syscall_name##_nocancel: \
DO_CALL(syscall_name, args); \
L(pseudo_check): \
lghi %r4,-4095; \
clgr %r2,%r4; \
jgnl SYSCALL_ERROR_LABEL; \
-.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
L(pseudo_end):
# if IS_IN (libpthread)
-# define CENABLE __pthread_enable_asynccancel
-# define CDISABLE __pthread_disable_asynccancel
# define __local_multiple_threads __pthread_multiple_threads
# elif IS_IN (libc)
-# define CENABLE __libc_enable_asynccancel
-# define CDISABLE __libc_disable_asynccancel
# define __local_multiple_threads __libc_multiple_threads
-# elif IS_IN (librt)
-# define CENABLE __librt_enable_asynccancel
-# define CDISABLE __librt_disable_asynccancel
-# else
+# elif !IS_IN (librt)
# error Unsupported library
# endif
-#define STM_0 /* Nothing */
-#define STM_1 stg %r2,16(%r15);
-#define STM_2 stmg %r2,%r3,16(%r15);
-#define STM_3 stmg %r2,%r4,16(%r15);
-#define STM_4 stmg %r2,%r5,16(%r15);
-#define STM_5 stmg %r2,%r5,16(%r15);
-#define STM_6 stmg %r2,%r7,16(%r15);
-
-#define LM_0 /* Nothing */
-#define LM_1 lg %r2,16+160(%r15);
-#define LM_2 lmg %r2,%r3,16+160(%r15);
-#define LM_3 lmg %r2,%r4,16+160(%r15);
-#define LM_4 lmg %r2,%r5,16+160(%r15);
-#define LM_5 lmg %r2,%r5,16+160(%r15);
-#define LM_6 lmg %r2,%r5,16+160(%r15); \
- cfi_offset (%r7, -104); \
- lg %r7,160+160(%r15);
-
-#define LR7_0 /* Nothing */
-#define LR7_1 /* Nothing */
-#define LR7_2 /* Nothing */
-#define LR7_3 /* Nothing */
-#define LR7_4 /* Nothing */
-#define LR7_5 /* Nothing */
-#define LR7_6 lg %r7,56+160(%r15); \
- cfi_restore (%r7);
-
# if IS_IN (libpthread) || IS_IN (libc)
# ifndef __ASSEMBLER__
extern int __local_multiple_threads attribute_hidden;
@@ -149,4 +141,10 @@ extern int __local_multiple_threads attribute_hidden;
# define RTLD_SINGLE_THREAD_P \
__builtin_expect (THREAD_GETMEM (THREAD_SELF, \
header.multiple_threads) == 0, 1)
+
+static inline
+long int __pthread_get_ip (const struct ucontext *uc)
+{
+ return uc->uc_mcontext.psw.addr;
+}
#endif
@@ -249,6 +249,14 @@
#undef INTERNAL_SYSCALL_ERRNO
#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
+#undef SYSCALL_CANCEL_ERROR
+#define SYSCALL_CANCEL_ERROR(__val) \
+ ((unsigned long) (__val) >= -4095UL)
+
+#undef SYSCALL_CANCEL_ERRNO
+#define SYSCALL_CANCEL_ERRNO(__val) \
+ (-(__val))
+
#define DECLARGS_0()
#define DECLARGS_1(arg1) \
register unsigned long gpr2 asm ("2") = (unsigned long)(arg1);