@@ -1,5 +1,35 @@
2014-09-19 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
+ * sysdeps/unix/sysv/linux/fcntl.c (__fcntl_nocancel): Rewrite to use
+ new cancellation mechanism.
+ (__libc_fcntl): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/cancellation.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Remove
+ file.
+ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Remove file.
+ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+ (lll_futex_wait_cancel): New define: cancellable futex wait call.
+ (lll_futex_wake_unlock): New define: futex wake unlock call.
+ (lll_wait_tid): Using cancellable futex wait call.
+ * sysdeps/unix/sysv/linux/x86_64/recv.c (__libc_recv): Remove calls
+ to enable/disable asynchronous cancellation and use call to
+ cancellable syscall entrypoint when required.
+ * sysdeps/unix/sysv/linux/x86_64/send.c (__libc_send): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h (PSEUDO): Likewise.
+ * sysdeps/unix/sysv/linux/x86_64/syscall_cancel.S: New file:
+ cancellable syscall entrypoint.
+ * sysdeps/unix/sysv/linux/x86_64/sysdep.h (SYSCALL_CANCEL_ERROR): New
+ define.
+ (SYSCALL_CANCEL_ERRNO): New define.
+ * sysdeps/x64_64/nptl/pthreaddef.h (__pthread_get_ip): New function:
+ return ucontext_t instruction point address.
+ * sysdeps/x86_64/nptl/tcb-offsets.sym [TCB_CANCELING_BITMASK]: Remove.
+ * sysdeps/x86_64/nptl/tls.h (THREAD_ATOMIC_BIT_SET): Remove define.
+
* nptl/Makefile [routines]: Add syscall_cancel object.
[libpthread-routines]: Remove cancellation object.
* rt/Makefile [CFLAGS-librt-cancellation.c]: Remove rule.
@@ -23,9 +23,8 @@
#include <sys/syscall.h>
-
-static int
-do_fcntl (int fd, int cmd, void *arg)
+static inline int
+__fcntl_common_nocancel (int fd, int cmd, void *arg)
{
if (cmd != F_GETOWN)
return INLINE_SYSCALL (fcntl, 3, fd, cmd, arg);
@@ -40,8 +39,22 @@ do_fcntl (int fd, int cmd, void *arg)
return -1;
}
+static inline int
+__fcntl_common_cancel (int fd, int cmd, void *arg)
+{
+ if (cmd != F_GETOWN)
+ return SYSCALL_CANCEL (fcntl, fd, cmd, arg);
-#ifndef NO_CANCELLATION
+ struct f_owner_ex fex;
+ int res = SYSCALL_CANCEL_NCS (fcntl, fd, F_GETOWN_EX, &fex);
+ if (!SYSCALL_CANCEL_ERROR (res))
+ return fex.type == F_OWNER_GID ? -fex.pid : fex.pid;
+
+ __set_errno (SYSCALL_CANCEL_ERRNO (res));
+ return -1;
+}
+
+#ifndef IS_IN_rtld
int
__fcntl_nocancel (int fd, int cmd, ...)
{
@@ -52,11 +65,10 @@ __fcntl_nocancel (int fd, int cmd, ...)
arg = va_arg (ap, void *);
va_end (ap);
- return do_fcntl (fd, cmd, arg);
+ return __fcntl_common_nocancel (fd, cmd, arg);
}
#endif
-
int
__libc_fcntl (int fd, int cmd, ...)
{
@@ -67,16 +79,10 @@ __libc_fcntl (int fd, int cmd, ...)
arg = va_arg (ap, void *);
va_end (ap);
- if (SINGLE_THREAD_P || cmd != F_SETLKW)
- return do_fcntl (fd, cmd, arg);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_fcntl (fd, cmd, arg);
-
- LIBC_CANCEL_RESET (oldtype);
+ if (cmd != F_SETLKW)
+ return __fcntl_common_nocancel (fd, cmd, arg);
- return result;
+ return __fcntl_common_cancel (fd, cmd, arg);
}
libc_hidden_def (__libc_fcntl)
deleted file mode 100644
@@ -1,117 +0,0 @@
-/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <tcb-offsets.h>
-#include <kernel-features.h>
-#include "lowlevellock.h"
-
-#ifdef IS_IN_libpthread
-# if defined SHARED && !defined NO_HIDDEN
-# define __pthread_unwind __GI___pthread_unwind
-# endif
-#else
-# ifndef SHARED
- .weak __pthread_unwind
-# endif
-#endif
-
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#else
-# if FUTEX_WAIT == 0
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-#endif
-
-/* It is crucial that the functions in this file don't modify registers
- other than %rax and %r11. The syscall wrapper code depends on this
- because it doesn't explicitly save the other registers which hold
- relevant values. */
- .text
-
- .hidden __pthread_enable_asynccancel
-ENTRY(__pthread_enable_asynccancel)
- movl %fs:CANCELHANDLING, %eax
-2: movl %eax, %r11d
- orl $TCB_CANCELTYPE_BITMASK, %r11d
- cmpl %eax, %r11d
- je 1f
-
- lock
- cmpxchgl %r11d, %fs:CANCELHANDLING
- jnz 2b
-
- andl $(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
- cmpl $(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
- je 3f
-
-1: ret
-
-3: subq $8, %rsp
- cfi_adjust_cfa_offset(8)
- LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
- lock
- orl $TCB_EXITING_BITMASK, %fs:CANCELHANDLING
- mov %fs:CLEANUP_JMP_BUF, %RDI_LP
-#ifdef SHARED
- call __pthread_unwind@PLT
-#else
- call __pthread_unwind
-#endif
- hlt
-END(__pthread_enable_asynccancel)
-
-
- .hidden __pthread_disable_asynccancel
-ENTRY(__pthread_disable_asynccancel)
- testl $TCB_CANCELTYPE_BITMASK, %edi
- jnz 1f
-
- movl %fs:CANCELHANDLING, %eax
-2: movl %eax, %r11d
- andl $~TCB_CANCELTYPE_BITMASK, %r11d
- lock
- cmpxchgl %r11d, %fs:CANCELHANDLING
- jnz 2b
-
- movl %r11d, %eax
-3: andl $(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
- cmpl $TCB_CANCELING_BITMASK, %eax
- je 4f
-1: ret
-
- /* Performance doesn't matter in this loop. We will
- delay until the thread is canceled. And we will unlikely
- enter the loop twice. */
-4: mov %fs:0, %RDI_LP
- movl $__NR_futex, %eax
- xorq %r10, %r10
- addq $CANCELHANDLING, %rdi
- LOAD_PRIVATE_FUTEX_WAIT (%esi)
- syscall
- movl %fs:CANCELHANDLING, %eax
- jmp 3b
-END(__pthread_disable_asynccancel)
deleted file mode 100644
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include "cancellation.S"
deleted file mode 100644
@@ -1,21 +0,0 @@
-/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include "cancellation.S"
@@ -124,6 +124,20 @@
__status; \
})
+#define lll_futex_wait_cancel(futexp, val, private) \
+ lll_futex_timed_wait_cancel (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait_cancel(futexp, val, timespec, private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT; \
+ \
+ __ret = __syscall_cancel (__NR_futex, (long int) (futexp), \
+ (long int)__lll_private_flag (__op, private), \
+ (long int)(val), (long int)(timespec), 0, 0); \
+ __ret; \
+ })
+
#define lll_futex_wake(futex, nr, private) \
({ \
@@ -139,6 +153,18 @@
__status; \
})
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
+ ({ \
+ INTERNAL_SYSCALL_DECL (__err); \
+ long int __ret; \
+ \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
+ (nr_wake), (nr_wake2), (futexp2), \
+ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
+ })
+
/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
@@ -405,18 +431,9 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
The macro parameter must not have any side effect. */
#define lll_wait_tid(tid) \
do { \
- int __ignore; \
- register __typeof (tid) _tid asm ("edx") = (tid); \
- if (_tid != 0) \
- __asm __volatile ("xorq %%r10, %%r10\n\t" \
- "1:\tmovq %2, %%rax\n\t" \
- "syscall\n\t" \
- "cmpl $0, (%%rdi)\n\t" \
- "jne 1b" \
- : "=&a" (__ignore) \
- : "S" (FUTEX_WAIT), "i" (SYS_futex), "D" (&tid), \
- "d" (_tid) \
- : "memory", "cc", "r10", "r11", "cx"); \
+ __typeof (tid) __tid; \
+ while ((__tid = (tid)) != 0) \
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
deleted file mode 100644
@@ -1,840 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <pthread-pi-defines.h>
-#include <pthread-errnos.h>
-#include <stap-probe.h>
-
-#include <kernel-features.h>
-
-
- .text
-
-
-/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
- const struct timespec *abstime) */
- .globl __pthread_cond_timedwait
- .type __pthread_cond_timedwait, @function
- .align 16
-__pthread_cond_timedwait:
-.LSTARTCODE:
- cfi_startproc
-#ifdef SHARED
- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
- DW.ref.__gcc_personality_v0)
- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-
- pushq %r12
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r14, 0)
- pushq %r15
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r15, 0)
-#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
-# define FRAME_SIZE (32+8)
-#else
-# define FRAME_SIZE (48+8)
-#endif
- subq $FRAME_SIZE, %rsp
- cfi_adjust_cfa_offset(FRAME_SIZE)
- cfi_remember_state
-
- LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx)
-
- cmpq $1000000000, 8(%rdx)
- movl $EINVAL, %eax
- jae 48f
-
- /* Stack frame:
-
- rsp + 48
- +--------------------------+
- rsp + 32 | timeout value |
- +--------------------------+
- rsp + 24 | old wake_seq value |
- +--------------------------+
- rsp + 16 | mutex pointer |
- +--------------------------+
- rsp + 8 | condvar pointer |
- +--------------------------+
- rsp + 4 | old broadcast_seq value |
- +--------------------------+
- rsp + 0 | old cancellation mode |
- +--------------------------+
- */
-
- LP_OP(cmp) $-1, dep_mutex(%rdi)
-
- /* Prepare structure passed to cancellation handler. */
- movq %rdi, 8(%rsp)
- movq %rsi, 16(%rsp)
- movq %rdx, %r13
-
- je 22f
- mov %RSI_LP, dep_mutex(%rdi)
-
-22:
- xorb %r15b, %r15b
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-# ifdef PIC
- cmpl $0, __have_futex_clock_realtime(%rip)
-# else
- cmpl $0, __have_futex_clock_realtime
-# endif
- je .Lreltmo
-#endif
-
- /* Get internal lock. */
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jnz 31f
-
- /* Unlock the mutex. */
-32: movq 16(%rsp), %rdi
- xorl %esi, %esi
- callq __pthread_mutex_unlock_usercnt
-
- testl %eax, %eax
- jne 46f
-
- movq 8(%rsp), %rdi
- incq total_seq(%rdi)
- incl cond_futex(%rdi)
- addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Get and store current wakeup_seq value. */
- movq 8(%rsp), %rdi
- movq wakeup_seq(%rdi), %r9
- movl broadcast_seq(%rdi), %edx
- movq %r9, 24(%rsp)
- movl %edx, 4(%rsp)
-
- cmpq $0, (%r13)
- movq $-ETIMEDOUT, %r14
- js 36f
-
-38: movl cond_futex(%rdi), %r12d
-
- /* Unlock. */
- LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- jne 33f
-
-.LcleanupSTART1:
-34: callq __pthread_enable_asynccancel
- movl %eax, (%rsp)
-
- movq %r13, %r10
- movl $FUTEX_WAIT_BITSET, %esi
- LP_OP(cmp) $-1, dep_mutex(%rdi)
- je 60f
-
- mov dep_mutex(%rdi), %R8_LP
- /* Requeue to a non-robust PI mutex if the PI bit is set and
- the robust bit is not set. */
- movl MUTEX_KIND(%r8), %eax
- andl $(ROBUST_BIT|PI_BIT), %eax
- cmpl $PI_BIT, %eax
- jne 61f
-
- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
- xorl %eax, %eax
- /* The following only works like this because we only support
- two clocks, represented using a single bit. */
- testl $1, cond_nwaiters(%rdi)
- movl $FUTEX_CLOCK_REALTIME, %edx
- cmove %edx, %eax
- orl %eax, %esi
- movq %r12, %rdx
- addq $cond_futex, %rdi
- movl $SYS_futex, %eax
- syscall
-
- cmpl $0, %eax
- sete %r15b
-
-#ifdef __ASSUME_REQUEUE_PI
- jmp 62f
-#else
- je 62f
-
- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
- successfully, it has already locked the mutex for us and the
- pi_flag (%r15b) is set to denote that fact. However, if another
- thread changed the futex value before we entered the wait, the
- syscall may return an EAGAIN and the mutex is not locked. We go
- ahead with a success anyway since later we look at the pi_flag to
- decide if we got the mutex or not. The sequence numbers then make
- sure that only one of the threads actually wake up. We retry using
- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
- and PI futexes don't mix.
-
- Note that we don't check for EAGAIN specifically; we assume that the
- only other error the futex function could return is EAGAIN (barring
- the ETIMEOUT of course, for the timeout case in futex) since
- anything else would mean an error in our function. It is too
- expensive to do that check for every call (which is quite common in
- case of a large number of threads), so it has been skipped. */
- cmpl $-ENOSYS, %eax
- jne 62f
-
- subq $cond_futex, %rdi
-#endif
-
-61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
-60: xorb %r15b, %r15b
- xorl %eax, %eax
- /* The following only works like this because we only support
- two clocks, represented using a single bit. */
- testl $1, cond_nwaiters(%rdi)
- movl $FUTEX_CLOCK_REALTIME, %edx
- movl $0xffffffff, %r9d
- cmove %edx, %eax
- orl %eax, %esi
- movq %r12, %rdx
- addq $cond_futex, %rdi
- movl $SYS_futex, %eax
- syscall
-62: movq %rax, %r14
-
- movl (%rsp), %edi
- callq __pthread_disable_asynccancel
-.LcleanupEND1:
-
- /* Lock. */
- movq 8(%rsp), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jne 35f
-
-36: movl broadcast_seq(%rdi), %edx
-
- movq woken_seq(%rdi), %rax
-
- movq wakeup_seq(%rdi), %r9
-
- cmpl 4(%rsp), %edx
- jne 53f
-
- cmpq 24(%rsp), %r9
- jbe 45f
-
- cmpq %rax, %r9
- ja 39f
-
-45: cmpq $-ETIMEDOUT, %r14
- je 99f
-
- /* We need to go back to futex_wait. If we're using requeue_pi, then
- release the mutex we had acquired and go back. */
- test %r15b, %r15b
- jz 38b
-
- /* Adjust the mutex values first and then unlock it. The unlock
- should always succeed or else the kernel did not lock the
- mutex correctly. */
- movq %r8, %rdi
- callq __pthread_mutex_cond_lock_adjust
- xorl %esi, %esi
- callq __pthread_mutex_unlock_usercnt
- /* Reload cond_var. */
- movq 8(%rsp), %rdi
- jmp 38b
-
-99: incq wakeup_seq(%rdi)
- incl cond_futex(%rdi)
- movl $ETIMEDOUT, %r14d
- jmp 44f
-
-53: xorq %r14, %r14
- jmp 54f
-
-39: xorq %r14, %r14
-44: incq woken_seq(%rdi)
-
-54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Wake up a thread which wants to destroy the condvar object. */
- cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 55f
- movl cond_nwaiters(%rdi), %eax
- andl $~((1 << nwaiters_shift) - 1), %eax
- jne 55f
-
- addq $cond_nwaiters, %rdi
- LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
- movl $1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
- subq $cond_nwaiters, %rdi
-
-55: LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- jne 40f
-
- /* If requeue_pi is used the kernel performs the locking of the
- mutex. */
-41: movq 16(%rsp), %rdi
- testb %r15b, %r15b
- jnz 64f
-
- callq __pthread_mutex_cond_lock
-
-63: testq %rax, %rax
- cmoveq %r14, %rax
-
-48: addq $FRAME_SIZE, %rsp
- cfi_adjust_cfa_offset(-FRAME_SIZE)
- popq %r15
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r15)
- popq %r14
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r14)
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r12)
-
- retq
-
- cfi_restore_state
-
-64: callq __pthread_mutex_cond_lock_adjust
- movq %r14, %rax
- jmp 48b
-
- /* Initial locking failed. */
-31:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
- jmp 32b
-
- /* Unlock in loop requires wakeup. */
-33:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
- jmp 34b
-
- /* Locking in loop failed. */
-35:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
- jmp 36b
-
- /* Unlock after loop requires wakeup. */
-40:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
- jmp 41b
-
- /* The initial unlocking of the mutex failed. */
-46: movq 8(%rsp), %rdi
- movq %rax, (%rsp)
- LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- jne 47f
-
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
-
-47: movq (%rsp), %rax
- jmp 48b
-
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-.Lreltmo:
- /* Get internal lock. */
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-# if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-# else
- cmpxchgl %esi, cond_lock(%rdi)
-# endif
- jnz 1f
-
- /* Unlock the mutex. */
-2: movq 16(%rsp), %rdi
- xorl %esi, %esi
- callq __pthread_mutex_unlock_usercnt
-
- testl %eax, %eax
- jne 46b
-
- movq 8(%rsp), %rdi
- incq total_seq(%rdi)
- incl cond_futex(%rdi)
- addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Get and store current wakeup_seq value. */
- movq 8(%rsp), %rdi
- movq wakeup_seq(%rdi), %r9
- movl broadcast_seq(%rdi), %edx
- movq %r9, 24(%rsp)
- movl %edx, 4(%rsp)
-
- /* Get the current time. */
-8:
-# ifdef __NR_clock_gettime
- /* Get the clock number. Note that the field in the condvar
- structure stores the number minus 1. */
- movq 8(%rsp), %rdi
- movl cond_nwaiters(%rdi), %edi
- andl $((1 << nwaiters_shift) - 1), %edi
- /* Only clocks 0 and 1 are allowed so far. Both are handled in the
- kernel. */
- leaq 32(%rsp), %rsi
-# ifdef SHARED
- mov __vdso_clock_gettime@GOTPCREL(%rip), %RAX_LP
- mov (%rax), %RAX_LP
- PTR_DEMANGLE (%RAX_LP)
- call *%rax
-# else
- movl $__NR_clock_gettime, %eax
- syscall
-# endif
-
- /* Compute relative timeout. */
- movq (%r13), %rcx
- movq 8(%r13), %rdx
- subq 32(%rsp), %rcx
- subq 40(%rsp), %rdx
-# else
- leaq 24(%rsp), %rdi
- xorl %esi, %esi
- /* This call works because we directly jump to a system call entry
- which preserves all the registers. */
- call JUMPTARGET(__gettimeofday)
-
- /* Compute relative timeout. */
- movq 40(%rsp), %rax
- movl $1000, %edx
- mul %rdx /* Milli seconds to nano seconds. */
- movq (%r13), %rcx
- movq 8(%r13), %rdx
- subq 32(%rsp), %rcx
- subq %rax, %rdx
-# endif
- jns 12f
- addq $1000000000, %rdx
- decq %rcx
-12: testq %rcx, %rcx
- movq 8(%rsp), %rdi
- movq $-ETIMEDOUT, %r14
- js 6f
-
- /* Store relative timeout. */
-21: movq %rcx, 32(%rsp)
- movq %rdx, 40(%rsp)
-
- movl cond_futex(%rdi), %r12d
-
- /* Unlock. */
- LOCK
-# if cond_lock == 0
- decl (%rdi)
-# else
- decl cond_lock(%rdi)
-# endif
- jne 3f
-
-.LcleanupSTART2:
-4: callq __pthread_enable_asynccancel
- movl %eax, (%rsp)
-
- leaq 32(%rsp), %r10
- LP_OP(cmp) $-1, dep_mutex(%rdi)
- movq %r12, %rdx
-# ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAIT, %eax
- movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-# else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
-# if FUTEX_WAIT != 0
- orl $FUTEX_WAIT, %esi
-# endif
-# endif
- addq $cond_futex, %rdi
- movl $SYS_futex, %eax
- syscall
- movq %rax, %r14
-
- movl (%rsp), %edi
- callq __pthread_disable_asynccancel
-.LcleanupEND2:
-
- /* Lock. */
- movq 8(%rsp), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-# if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-# else
- cmpxchgl %esi, cond_lock(%rdi)
-# endif
- jne 5f
-
-6: movl broadcast_seq(%rdi), %edx
-
- movq woken_seq(%rdi), %rax
-
- movq wakeup_seq(%rdi), %r9
-
- cmpl 4(%rsp), %edx
- jne 53b
-
- cmpq 24(%rsp), %r9
- jbe 15f
-
- cmpq %rax, %r9
- ja 39b
-
-15: cmpq $-ETIMEDOUT, %r14
- jne 8b
-
- jmp 99b
-
- /* Initial locking failed. */
-1:
-# if cond_lock != 0
- addq $cond_lock, %rdi
-# endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
- jmp 2b
-
- /* Unlock in loop requires wakeup. */
-3:
-# if cond_lock != 0
- addq $cond_lock, %rdi
-# endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
- jmp 4b
-
- /* Locking in loop failed. */
-5:
-# if cond_lock != 0
- addq $cond_lock, %rdi
-# endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
-# if cond_lock != 0
- subq $cond_lock, %rdi
-# endif
- jmp 6b
-#endif
- .size __pthread_cond_timedwait, .-__pthread_cond_timedwait
-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
- GLIBC_2_3_2)
-
-
- .align 16
- .type __condvar_cleanup2, @function
-__condvar_cleanup2:
- /* Stack frame:
-
- rsp + 72
- +--------------------------+
- rsp + 64 | %r12 |
- +--------------------------+
- rsp + 56 | %r13 |
- +--------------------------+
- rsp + 48 | %r14 |
- +--------------------------+
- rsp + 24 | unused |
- +--------------------------+
- rsp + 16 | mutex pointer |
- +--------------------------+
- rsp + 8 | condvar pointer |
- +--------------------------+
- rsp + 4 | old broadcast_seq value |
- +--------------------------+
- rsp + 0 | old cancellation mode |
- +--------------------------+
- */
-
- movq %rax, 24(%rsp)
-
- /* Get internal lock. */
- movq 8(%rsp), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jz 1f
-
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
-
-1: movl broadcast_seq(%rdi), %edx
- cmpl 4(%rsp), %edx
- jne 3f
-
- /* We increment the wakeup_seq counter only if it is lower than
- total_seq. If this is not the case the thread was woken and
- then canceled. In this case we ignore the signal. */
- movq total_seq(%rdi), %rax
- cmpq wakeup_seq(%rdi), %rax
- jbe 6f
- incq wakeup_seq(%rdi)
- incl cond_futex(%rdi)
-6: incq woken_seq(%rdi)
-
-3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Wake up a thread which wants to destroy the condvar object. */
- xorq %r12, %r12
- cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 4f
- movl cond_nwaiters(%rdi), %eax
- andl $~((1 << nwaiters_shift) - 1), %eax
- jne 4f
-
- LP_OP(cmp) $-1, dep_mutex(%rdi)
- leaq cond_nwaiters(%rdi), %rdi
- movl $1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
- subq $cond_nwaiters, %rdi
- movl $1, %r12d
-
-4: LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- je 2f
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
-
- /* Wake up all waiters to make sure no signal gets lost. */
-2: testq %r12, %r12
- jnz 5f
- addq $cond_futex, %rdi
- LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
- movl $0x7fffffff, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
-
- /* Lock the mutex only if we don't own it already. This only happens
- in case of PI mutexes, if we got cancelled after a successful
- return of the futex syscall and before disabling async
- cancellation. */
-5: movq 16(%rsp), %rdi
- movl MUTEX_KIND(%rdi), %eax
- andl $(ROBUST_BIT|PI_BIT), %eax
- cmpl $PI_BIT, %eax
- jne 7f
-
- movl (%rdi), %eax
- andl $TID_MASK, %eax
- cmpl %eax, %fs:TID
- jne 7f
- /* We managed to get the lock. Fix it up before returning. */
- callq __pthread_mutex_cond_lock_adjust
- jmp 8f
-
-7: callq __pthread_mutex_cond_lock
-
-8: movq 24(%rsp), %rdi
- movq FRAME_SIZE(%rsp), %r15
- movq FRAME_SIZE+8(%rsp), %r14
- movq FRAME_SIZE+16(%rsp), %r13
- movq FRAME_SIZE+24(%rsp), %r12
-.LcallUR:
- call _Unwind_Resume@PLT
- hlt
-.LENDCODE:
- cfi_endproc
- .size __condvar_cleanup2, .-__condvar_cleanup2
-
-
- .section .gcc_except_table,"a",@progbits
-.LexceptSTART:
- .byte DW_EH_PE_omit # @LPStart format
- .byte DW_EH_PE_omit # @TType format
- .byte DW_EH_PE_uleb128 # call-site format
- .uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
- .uleb128 .LcleanupSTART1-.LSTARTCODE
- .uleb128 .LcleanupEND1-.LcleanupSTART1
- .uleb128 __condvar_cleanup2-.LSTARTCODE
- .uleb128 0
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
- .uleb128 .LcleanupSTART2-.LSTARTCODE
- .uleb128 .LcleanupEND2-.LcleanupSTART2
- .uleb128 __condvar_cleanup2-.LSTARTCODE
- .uleb128 0
-#endif
- .uleb128 .LcallUR-.LSTARTCODE
- .uleb128 .LENDCODE-.LcallUR
- .uleb128 0
- .uleb128 0
-.Lcstend:
-
-
-#ifdef SHARED
- .hidden DW.ref.__gcc_personality_v0
- .weak DW.ref.__gcc_personality_v0
- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
- .align LP_SIZE
- .type DW.ref.__gcc_personality_v0, @object
- .size DW.ref.__gcc_personality_v0, LP_SIZE
-DW.ref.__gcc_personality_v0:
- ASM_ADDR __gcc_personality_v0
-#endif
deleted file mode 100644
@@ -1,555 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <tcb-offsets.h>
-#include <pthread-pi-defines.h>
-#include <pthread-errnos.h>
-#include <stap-probe.h>
-
-#include <kernel-features.h>
-
-
- .text
-
-/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
- .globl __pthread_cond_wait
- .type __pthread_cond_wait, @function
- .align 16
-__pthread_cond_wait:
-.LSTARTCODE:
- cfi_startproc
-#ifdef SHARED
- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
- DW.ref.__gcc_personality_v0)
- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-
-#define FRAME_SIZE (32+8)
- leaq -FRAME_SIZE(%rsp), %rsp
- cfi_adjust_cfa_offset(FRAME_SIZE)
-
- /* Stack frame:
-
- rsp + 32
- +--------------------------+
- rsp + 24 | old wake_seq value |
- +--------------------------+
- rsp + 16 | mutex pointer |
- +--------------------------+
- rsp + 8 | condvar pointer |
- +--------------------------+
- rsp + 4 | old broadcast_seq value |
- +--------------------------+
- rsp + 0 | old cancellation mode |
- +--------------------------+
- */
-
- LIBC_PROBE (cond_wait, 2, %rdi, %rsi)
-
- LP_OP(cmp) $-1, dep_mutex(%rdi)
-
- /* Prepare structure passed to cancellation handler. */
- movq %rdi, 8(%rsp)
- movq %rsi, 16(%rsp)
-
- je 15f
- mov %RSI_LP, dep_mutex(%rdi)
-
- /* Get internal lock. */
-15: movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jne 1f
-
- /* Unlock the mutex. */
-2: movq 16(%rsp), %rdi
- xorl %esi, %esi
- callq __pthread_mutex_unlock_usercnt
-
- testl %eax, %eax
- jne 12f
-
- movq 8(%rsp), %rdi
- incq total_seq(%rdi)
- incl cond_futex(%rdi)
- addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Get and store current wakeup_seq value. */
- movq 8(%rsp), %rdi
- movq wakeup_seq(%rdi), %r9
- movl broadcast_seq(%rdi), %edx
- movq %r9, 24(%rsp)
- movl %edx, 4(%rsp)
-
- /* Unlock. */
-8: movl cond_futex(%rdi), %edx
- LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- jne 3f
-
-.LcleanupSTART:
-4: callq __pthread_enable_asynccancel
- movl %eax, (%rsp)
-
- xorq %r10, %r10
- LP_OP(cmp) $-1, dep_mutex(%rdi)
- leaq cond_futex(%rdi), %rdi
- movl $FUTEX_WAIT, %esi
- je 60f
-
- mov dep_mutex-cond_futex(%rdi), %R8_LP
- /* Requeue to a non-robust PI mutex if the PI bit is set and
- the robust bit is not set. */
- movl MUTEX_KIND(%r8), %eax
- andl $(ROBUST_BIT|PI_BIT), %eax
- cmpl $PI_BIT, %eax
- jne 61f
-
- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
- movl $SYS_futex, %eax
- syscall
-
- cmpl $0, %eax
- sete %r8b
-
-#ifdef __ASSUME_REQUEUE_PI
- jmp 62f
-#else
- je 62f
-
- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
- successfully, it has already locked the mutex for us and the
- pi_flag (%r8b) is set to denote that fact. However, if another
- thread changed the futex value before we entered the wait, the
- syscall may return an EAGAIN and the mutex is not locked. We go
- ahead with a success anyway since later we look at the pi_flag to
- decide if we got the mutex or not. The sequence numbers then make
- sure that only one of the threads actually wake up. We retry using
- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
- and PI futexes don't mix.
-
- Note that we don't check for EAGAIN specifically; we assume that the
- only other error the futex function could return is EAGAIN since
- anything else would mean an error in our function. It is too
- expensive to do that check for every call (which is quite common in
- case of a large number of threads), so it has been skipped. */
- cmpl $-ENOSYS, %eax
- jne 62f
-
-# ifndef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAIT, %esi
-# endif
-#endif
-
-61:
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
-#else
- orl %fs:PRIVATE_FUTEX, %esi
-#endif
-60: xorb %r8b, %r8b
- movl $SYS_futex, %eax
- syscall
-
-62: movl (%rsp), %edi
- callq __pthread_disable_asynccancel
-.LcleanupEND:
-
- /* Lock. */
- movq 8(%rsp), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jnz 5f
-
-6: movl broadcast_seq(%rdi), %edx
-
- movq woken_seq(%rdi), %rax
-
- movq wakeup_seq(%rdi), %r9
-
- cmpl 4(%rsp), %edx
- jne 16f
-
- cmpq 24(%rsp), %r9
- jbe 19f
-
- cmpq %rax, %r9
- jna 19f
-
- incq woken_seq(%rdi)
-
- /* Unlock */
-16: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Wake up a thread which wants to destroy the condvar object. */
- cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 17f
- movl cond_nwaiters(%rdi), %eax
- andl $~((1 << nwaiters_shift) - 1), %eax
- jne 17f
-
- addq $cond_nwaiters, %rdi
- LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
- movl $1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
- subq $cond_nwaiters, %rdi
-
-17: LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- jne 10f
-
- /* If requeue_pi is used the kernel performs the locking of the
- mutex. */
-11: movq 16(%rsp), %rdi
- testb %r8b, %r8b
- jnz 18f
-
- callq __pthread_mutex_cond_lock
-
-14: leaq FRAME_SIZE(%rsp), %rsp
- cfi_adjust_cfa_offset(-FRAME_SIZE)
-
- /* We return the result of the mutex_lock operation. */
- retq
-
- cfi_adjust_cfa_offset(FRAME_SIZE)
-
-18: callq __pthread_mutex_cond_lock_adjust
- xorl %eax, %eax
- jmp 14b
-
- /* We need to go back to futex_wait. If we're using requeue_pi, then
- release the mutex we had acquired and go back. */
-19: testb %r8b, %r8b
- jz 8b
-
- /* Adjust the mutex values first and then unlock it. The unlock
- should always succeed or else the kernel did not lock the mutex
- correctly. */
- movq 16(%rsp), %rdi
- callq __pthread_mutex_cond_lock_adjust
- movq %rdi, %r8
- xorl %esi, %esi
- callq __pthread_mutex_unlock_usercnt
- /* Reload cond_var. */
- movq 8(%rsp), %rdi
- jmp 8b
-
- /* Initial locking failed. */
-1:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
- jmp 2b
-
- /* Unlock in loop requires wakeup. */
-3:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- /* The call preserves %rdx. */
- callq __lll_unlock_wake
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
- jmp 4b
-
- /* Locking in loop failed. */
-5:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
- jmp 6b
-
- /* Unlock after loop requires wakeup. */
-10:
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
- jmp 11b
-
- /* The initial unlocking of the mutex failed. */
-12: movq %rax, %r10
- movq 8(%rsp), %rdi
- LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- je 13f
-
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_unlock_wake
-
-13: movq %r10, %rax
- jmp 14b
-
- .size __pthread_cond_wait, .-__pthread_cond_wait
-versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
- GLIBC_2_3_2)
-
-
- .align 16
- .type __condvar_cleanup1, @function
- .globl __condvar_cleanup1
- .hidden __condvar_cleanup1
-__condvar_cleanup1:
- /* Stack frame:
-
- rsp + 32
- +--------------------------+
- rsp + 24 | unused |
- +--------------------------+
- rsp + 16 | mutex pointer |
- +--------------------------+
- rsp + 8 | condvar pointer |
- +--------------------------+
- rsp + 4 | old broadcast_seq value |
- +--------------------------+
- rsp + 0 | old cancellation mode |
- +--------------------------+
- */
-
- movq %rax, 24(%rsp)
-
- /* Get internal lock. */
- movq 8(%rsp), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jz 1f
-
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- callq __lll_lock_wait
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
-
-1: movl broadcast_seq(%rdi), %edx
- cmpl 4(%rsp), %edx
- jne 3f
-
- /* We increment the wakeup_seq counter only if it is lower than
- total_seq. If this is not the case the thread was woken and
- then canceled. In this case we ignore the signal. */
- movq total_seq(%rdi), %rax
- cmpq wakeup_seq(%rdi), %rax
- jbe 6f
- incq wakeup_seq(%rdi)
- incl cond_futex(%rdi)
-6: incq woken_seq(%rdi)
-
-3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
- /* Wake up a thread which wants to destroy the condvar object. */
- xorl %ecx, %ecx
- cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 4f
- movl cond_nwaiters(%rdi), %eax
- andl $~((1 << nwaiters_shift) - 1), %eax
- jne 4f
-
- LP_OP(cmp) $-1, dep_mutex(%rdi)
- leaq cond_nwaiters(%rdi), %rdi
- movl $1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
- subq $cond_nwaiters, %rdi
- movl $1, %ecx
-
-4: LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- je 2f
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
- movl $LLL_PRIVATE, %eax
- movl $LLL_SHARED, %esi
- cmovne %eax, %esi
- /* The call preserves %rcx. */
- callq __lll_unlock_wake
-
- /* Wake up all waiters to make sure no signal gets lost. */
-2: testl %ecx, %ecx
- jnz 5f
- addq $cond_futex, %rdi
- LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
- movl $0x7fffffff, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
- movl $FUTEX_WAKE, %eax
- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
- cmove %eax, %esi
-#else
- movl $0, %eax
- movl %fs:PRIVATE_FUTEX, %esi
- cmove %eax, %esi
- orl $FUTEX_WAKE, %esi
-#endif
- movl $SYS_futex, %eax
- syscall
-
- /* Lock the mutex only if we don't own it already. This only happens
- in case of PI mutexes, if we got cancelled after a successful
- return of the futex syscall and before disabling async
- cancellation. */
-5: movq 16(%rsp), %rdi
- movl MUTEX_KIND(%rdi), %eax
- andl $(ROBUST_BIT|PI_BIT), %eax
- cmpl $PI_BIT, %eax
- jne 7f
-
- movl (%rdi), %eax
- andl $TID_MASK, %eax
- cmpl %eax, %fs:TID
- jne 7f
- /* We managed to get the lock. Fix it up before returning. */
- callq __pthread_mutex_cond_lock_adjust
- jmp 8f
-
-
-7: callq __pthread_mutex_cond_lock
-
-8: movq 24(%rsp), %rdi
-.LcallUR:
- call _Unwind_Resume@PLT
- hlt
-.LENDCODE:
- cfi_endproc
- .size __condvar_cleanup1, .-__condvar_cleanup1
-
-
- .section .gcc_except_table,"a",@progbits
-.LexceptSTART:
- .byte DW_EH_PE_omit # @LPStart format
- .byte DW_EH_PE_omit # @TType format
- .byte DW_EH_PE_uleb128 # call-site format
- .uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
- .uleb128 .LcleanupSTART-.LSTARTCODE
- .uleb128 .LcleanupEND-.LcleanupSTART
- .uleb128 __condvar_cleanup1-.LSTARTCODE
- .uleb128 0
- .uleb128 .LcallUR-.LSTARTCODE
- .uleb128 .LENDCODE-.LcallUR
- .uleb128 0
- .uleb128 0
-.Lcstend:
-
-
-#ifdef SHARED
- .hidden DW.ref.__gcc_personality_v0
- .weak DW.ref.__gcc_personality_v0
- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
- .align LP_SIZE
- .type DW.ref.__gcc_personality_v0, @object
- .size DW.ref.__gcc_personality_v0, LP_SIZE
-DW.ref.__gcc_personality_v0:
- ASM_ADDR __gcc_personality_v0
-#endif
@@ -25,16 +25,7 @@
ssize_t
__libc_recv (int fd, void *buf, size_t n, int flags)
{
- if (SINGLE_THREAD_P)
- return INLINE_SYSCALL (recvfrom, 6, fd, buf, n, flags, NULL, NULL);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- ssize_t result = INLINE_SYSCALL (recvfrom, 6, fd, buf, n, flags, NULL, NULL);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return SYSCALL_CANCEL (recvfrom, fd, buf, n, flags, NULL, NULL);
}
weak_alias (__libc_recv, __recv)
deleted file mode 100644
@@ -1,380 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <kernel-features.h>
-#include <lowlevellock.h>
-#include <shlib-compat.h>
-#include <pthread-errnos.h>
-#include <structsem.h>
-
- .text
-
- .globl sem_timedwait
- .type sem_timedwait,@function
- .align 16
-sem_timedwait:
-.LSTARTCODE:
- cfi_startproc
-#ifdef SHARED
- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
- DW.ref.__gcc_personality_v0)
- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-#if VALUE == 0
- movl (%rdi), %eax
-#else
- movl VALUE(%rdi), %eax
-#endif
-2: testl %eax, %eax
- je 1f
-
- leaq -1(%rax), %rdx
- LOCK
-#if VALUE == 0
- cmpxchgl %edx, (%rdi)
-#else
- cmpxchgl %edx, VALUE(%rdi)
-#endif
- jne 2b
-
- xorl %eax, %eax
- retq
-
- /* Check whether the timeout value is valid. */
-1: cmpq $1000000000, 8(%rsi)
- jae 6f
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-# ifdef PIC
- cmpl $0, __have_futex_clock_realtime(%rip)
-# else
- cmpl $0, __have_futex_clock_realtime
-# endif
- je .Lreltmo
-#endif
-
- cmpq $0, (%rsi)
- js 16f
-
- /* This push is only needed to store the sem_t pointer for the
- exception handler. */
- pushq %rdi
- cfi_adjust_cfa_offset(8)
-
- movq %rsi, %r10
-
- LOCK
- LP_OP(add) $1, NWAITERS(%rdi)
-
-.LcleanupSTART:
-13: call __pthread_enable_asynccancel
- movl %eax, %r8d
-
-#if VALUE != 0
- leaq VALUE(%rdi), %rdi
-#endif
- movl $0xffffffff, %r9d
- movl $FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
- orl PRIVATE(%rdi), %esi
- movl $SYS_futex, %eax
- xorl %edx, %edx
- syscall
- movq %rax, %r9
-#if VALUE != 0
- leaq -VALUE(%rdi), %rdi
-#endif
-
- xchgq %r8, %rdi
- call __pthread_disable_asynccancel
-.LcleanupEND:
- movq %r8, %rdi
-
- testq %r9, %r9
- je 11f
- cmpq $-EWOULDBLOCK, %r9
- jne 3f
-
-11:
-#if VALUE == 0
- movl (%rdi), %eax
-#else
- movl VALUE(%rdi), %eax
-#endif
-14: testl %eax, %eax
- je 13b
-
- leaq -1(%rax), %rcx
- LOCK
-#if VALUE == 0
- cmpxchgl %ecx, (%rdi)
-#else
- cmpxchgl %ecx, VALUE(%rdi)
-#endif
- jne 14b
-
- xorl %eax, %eax
-
-15: LOCK
- LP_OP(sub) $1, NWAITERS(%rdi)
-
- leaq 8(%rsp), %rsp
- cfi_adjust_cfa_offset(-8)
- retq
-
- cfi_adjust_cfa_offset(8)
-3: negq %r9
- movq errno@gottpoff(%rip), %rdx
- movl %r9d, %fs:(%rdx)
-
- orl $-1, %eax
- jmp 15b
-
- cfi_adjust_cfa_offset(-8)
-6:
- movq errno@gottpoff(%rip), %rdx
- movl $EINVAL, %fs:(%rdx)
-
- orl $-1, %eax
-
- retq
-
-16:
- movq errno@gottpoff(%rip), %rdx
- movl $ETIMEDOUT, %fs:(%rdx)
-
- orl $-1, %eax
-
- retq
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-.Lreltmo:
- pushq %r12
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r14, 0)
-
-#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
-# define STACKFRAME 8
-#else
-# define STACKFRAME 24
-#endif
- subq $STACKFRAME, %rsp
- cfi_adjust_cfa_offset(STACKFRAME)
-
- movq %rdi, %r12
- movq %rsi, %r13
-
- LOCK
- LP_OP(add) $1, NWAITERS(%r12)
-
-7: xorl %esi, %esi
- movq %rsp,%rdi
- /* This call works because we directly jump to a system call entry
- which preserves all the registers. */
- call JUMPTARGET(__gettimeofday)
-
- /* Compute relative timeout. */
- movq 8(%rsp), %rax
- movl $1000, %edi
- mul %rdi /* Milli seconds to nano seconds. */
- movq (%r13), %rdi
- movq 8(%r13), %rsi
- subq (%rsp), %rdi
- subq %rax, %rsi
- jns 5f
- addq $1000000000, %rsi
- decq %rdi
-5: testq %rdi, %rdi
- movl $ETIMEDOUT, %r14d
- js 36f /* Time is already up. */
-
- movq %rdi, (%rsp) /* Store relative timeout. */
- movq %rsi, 8(%rsp)
-
-.LcleanupSTART2:
- call __pthread_enable_asynccancel
- movl %eax, 16(%rsp)
-
- movq %rsp, %r10
-# if VALUE == 0
- movq %r12, %rdi
-# else
- leaq VALUE(%r12), %rdi
-# endif
-# if FUTEX_WAIT == 0
- movl PRIVATE(%rdi), %esi
-# else
- movl $FUTEX_WAIT, %esi
- orl PRIVATE(%rdi), %esi
-# endif
- movl $SYS_futex, %eax
- xorl %edx, %edx
- syscall
- movq %rax, %r14
-
- movl 16(%rsp), %edi
- call __pthread_disable_asynccancel
-.LcleanupEND2:
-
- testq %r14, %r14
- je 9f
- cmpq $-EWOULDBLOCK, %r14
- jne 33f
-
-9:
-# if VALUE == 0
- movl (%r12), %eax
-# else
- movl VALUE(%r12), %eax
-# endif
-8: testl %eax, %eax
- je 7b
-
- leaq -1(%rax), %rcx
- LOCK
-# if VALUE == 0
- cmpxchgl %ecx, (%r12)
-# else
- cmpxchgl %ecx, VALUE(%r12)
-# endif
- jne 8b
-
- xorl %eax, %eax
-
-45: LOCK
- LP_OP(sub) $1, NWAITERS(%r12)
-
- addq $STACKFRAME, %rsp
- cfi_adjust_cfa_offset(-STACKFRAME)
- popq %r14
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r14)
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r12)
- retq
-
- cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
- cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
- cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
- cfi_rel_offset(%r14, STACKFRAME)
-33: negq %r14
-36:
- movq errno@gottpoff(%rip), %rdx
- movl %r14d, %fs:(%rdx)
-
- orl $-1, %eax
- jmp 45b
-#endif
- cfi_endproc
- .size sem_timedwait,.-sem_timedwait
-
-
- .type sem_timedwait_cleanup,@function
-sem_timedwait_cleanup:
- cfi_startproc
- cfi_adjust_cfa_offset(8)
-
- movq (%rsp), %rdi
- LOCK
- LP_OP(sub) $1, NWAITERS(%rdi)
- movq %rax, %rdi
-.LcallUR:
- call _Unwind_Resume@PLT
- hlt
-.LENDCODE:
- cfi_endproc
- .size sem_timedwait_cleanup,.-sem_timedwait_cleanup
-
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
- .type sem_timedwait_cleanup2,@function
-sem_timedwait_cleanup2:
- cfi_startproc
- cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
- cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
- cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
- cfi_rel_offset(%r14, STACKFRAME)
-
- LOCK
- LP_OP(sub) $1, NWAITERS(%r12)
- movq %rax, %rdi
- movq STACKFRAME(%rsp), %r14
- movq STACKFRAME+8(%rsp), %r13
- movq STACKFRAME+16(%rsp), %r12
-.LcallUR2:
- call _Unwind_Resume@PLT
- hlt
-.LENDCODE2:
- cfi_endproc
- .size sem_timedwait_cleanup2,.-sem_timedwait_cleanup2
-#endif
-
-
- .section .gcc_except_table,"a",@progbits
-.LexceptSTART:
- .byte DW_EH_PE_omit # @LPStart format
- .byte DW_EH_PE_omit # @TType format
- .byte DW_EH_PE_uleb128 # call-site format
- .uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
- .uleb128 .LcleanupSTART-.LSTARTCODE
- .uleb128 .LcleanupEND-.LcleanupSTART
- .uleb128 sem_timedwait_cleanup-.LSTARTCODE
- .uleb128 0
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
- .uleb128 .LcleanupSTART2-.LSTARTCODE
- .uleb128 .LcleanupEND2-.LcleanupSTART2
- .uleb128 sem_timedwait_cleanup2-.LSTARTCODE
- .uleb128 0
-#endif
- .uleb128 .LcallUR-.LSTARTCODE
- .uleb128 .LENDCODE-.LcallUR
- .uleb128 0
- .uleb128 0
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
- .uleb128 .LcallUR2-.LSTARTCODE
- .uleb128 .LENDCODE2-.LcallUR2
- .uleb128 0
- .uleb128 0
-#endif
-.Lcstend:
-
-
-#ifdef SHARED
- .hidden DW.ref.__gcc_personality_v0
- .weak DW.ref.__gcc_personality_v0
- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
- .align LP_SIZE
- .type DW.ref.__gcc_personality_v0, @object
- .size DW.ref.__gcc_personality_v0, LP_SIZE
-DW.ref.__gcc_personality_v0:
- ASM_ADDR __gcc_personality_v0
-#endif
deleted file mode 100644
@@ -1,176 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <lowlevellock.h>
-#include <shlib-compat.h>
-#include <pthread-errnos.h>
-#include <structsem.h>
-
-
- .text
-
- .globl sem_wait
- .type sem_wait,@function
- .align 16
-sem_wait:
-.LSTARTCODE:
- cfi_startproc
-#ifdef SHARED
- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
- DW.ref.__gcc_personality_v0)
- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-
-#if VALUE == 0
- movl (%rdi), %eax
-#else
- movl VALUE(%rdi), %eax
-#endif
-2: testl %eax, %eax
- je 1f
-
- leal -1(%rax), %edx
- LOCK
-#if VALUE == 0
- cmpxchgl %edx, (%rdi)
-#else
- cmpxchgl %edx, VALUE(%rdi)
-#endif
- jne 2b
-
- xorl %eax, %eax
- retq
-
- /* This push is only needed to store the sem_t pointer for the
- exception handler. */
-1: pushq %rdi
- cfi_adjust_cfa_offset(8)
-
- LOCK
- LP_OP(add) $1, NWAITERS(%rdi)
-
-.LcleanupSTART:
-6: call __pthread_enable_asynccancel
- movl %eax, %r8d
-
- xorq %r10, %r10
- movl $SYS_futex, %eax
-#if FUTEX_WAIT == 0
- movl PRIVATE(%rdi), %esi
-#else
- movl $FUTEX_WAIT, %esi
- orl PRIVATE(%rdi), %esi
-#endif
- xorl %edx, %edx
- syscall
- movq %rax, %rcx
-
- xchgq %r8, %rdi
- call __pthread_disable_asynccancel
-.LcleanupEND:
- movq %r8, %rdi
-
- testq %rcx, %rcx
- je 3f
- cmpq $-EWOULDBLOCK, %rcx
- jne 4f
-
-3:
-#if VALUE == 0
- movl (%rdi), %eax
-#else
- movl VALUE(%rdi), %eax
-#endif
-5: testl %eax, %eax
- je 6b
-
- leal -1(%rax), %edx
- LOCK
-#if VALUE == 0
- cmpxchgl %edx, (%rdi)
-#else
- cmpxchgl %edx, VALUE(%rdi)
-#endif
- jne 5b
-
- xorl %eax, %eax
-
-9: LOCK
- LP_OP(sub) $1, NWAITERS(%rdi)
-
- leaq 8(%rsp), %rsp
- cfi_adjust_cfa_offset(-8)
-
- retq
-
- cfi_adjust_cfa_offset(8)
-4: negq %rcx
- movq errno@gottpoff(%rip), %rdx
- movl %ecx, %fs:(%rdx)
- orl $-1, %eax
-
- jmp 9b
- .size sem_wait,.-sem_wait
-
-
- .type sem_wait_cleanup,@function
-sem_wait_cleanup:
- movq (%rsp), %rdi
- LOCK
- LP_OP(sub) $1, NWAITERS(%rdi)
- movq %rax, %rdi
-.LcallUR:
- call _Unwind_Resume@PLT
- hlt
-.LENDCODE:
- cfi_endproc
- .size sem_wait_cleanup,.-sem_wait_cleanup
-
-
- .section .gcc_except_table,"a",@progbits
-.LexceptSTART:
- .byte DW_EH_PE_omit # @LPStart format
- .byte DW_EH_PE_omit # @TType format
- .byte DW_EH_PE_uleb128 # call-site format
- .uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
- .uleb128 .LcleanupSTART-.LSTARTCODE
- .uleb128 .LcleanupEND-.LcleanupSTART
- .uleb128 sem_wait_cleanup-.LSTARTCODE
- .uleb128 0
- .uleb128 .LcallUR-.LSTARTCODE
- .uleb128 .LENDCODE-.LcallUR
- .uleb128 0
- .uleb128 0
-.Lcstend:
-
-
-#ifdef SHARED
- .hidden DW.ref.__gcc_personality_v0
- .weak DW.ref.__gcc_personality_v0
- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
- .align LP_SIZE
- .type DW.ref.__gcc_personality_v0, @object
- .size DW.ref.__gcc_personality_v0, LP_SIZE
-DW.ref.__gcc_personality_v0:
- ASM_ADDR __gcc_personality_v0
-#endif
@@ -23,17 +23,7 @@
ssize_t
__libc_send (int fd, const void *buf, size_t n, int flags)
{
- if (SINGLE_THREAD_P)
- return INLINE_SYSCALL (sendto, 6, fd, buf, n, flags, NULL, (size_t) 0);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- ssize_t result = INLINE_SYSCALL (sendto, 6, fd, buf, n, flags, NULL,
- (size_t) 0);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return SYSCALL_CANCEL (sendto, fd, buf, n, flags, NULL, (size_t) 0);
}
weak_alias (__libc_send, __send)
new file mode 100644
@@ -0,0 +1,49 @@
+/* Cancellable syscall wrapper - powerpc version.
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+ENTRY (__syscall_cancel_arch)
+
+ .globl __syscall_cancel_arch_start
+ .type __syscall_cancel_arch_start,@function
+__syscall_cancel_arch_start:
+
+ mov (%rdi),%eax
+ testb $4, (%rdi)
+ jne __syscall_do_cancel
+
+ mov %rdi,%r11
+ mov %rsi,%rax
+ mov %rdx,%rdi
+ mov %rcx,%rsi
+ mov %r8,%rdx
+ mov %r9,%r10
+ mov 8(%rsp),%r8
+ mov 16(%rsp),%r9
+ mov %r11,8(%rsp)
+ syscall
+
+ .globl __syscall_cancel_arch_end
+ .type __syscall_cancel_arch_end,@function
+__syscall_cancel_arch_end:
+
+ ret
+
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
@@ -24,81 +24,30 @@
#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
-/* The code to disable cancellation depends on the fact that the called
- functions are special. They don't modify registers other than %rax
- and %r11 if they return. Therefore we don't have to preserve other
- registers around these calls. */
+# ifndef NOT_IN_libc
+# define JMP_SYSCALL_CANCEL HIDDEN_JUMPTARGET(__syscall_cancel)
+# else
+# define JMP_SYSCALL_CANCEL __syscall_cancel@plt
+# endif
+
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.text; \
ENTRY (name) \
- SINGLE_THREAD_P; \
- jne L(pseudo_cancel); \
- .type __##syscall_name##_nocancel,@function; \
- .globl __##syscall_name##_nocancel; \
- __##syscall_name##_nocancel: \
- DO_CALL (syscall_name, args); \
- cmpq $-4095, %rax; \
- jae SYSCALL_ERROR_LABEL; \
- ret; \
- .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
- L(pseudo_cancel): \
- /* We always have to align the stack before calling a function. */ \
- subq $8, %rsp; cfi_adjust_cfa_offset (8); \
- CENABLE \
- /* The return value from CENABLE is argument for CDISABLE. */ \
- movq %rax, (%rsp); \
- DO_CALL (syscall_name, args); \
- movq (%rsp), %rdi; \
- /* Save %rax since it's the error code from the syscall. */ \
- movq %rax, %rdx; \
- CDISABLE \
- movq %rdx, %rax; \
- addq $8,%rsp; cfi_adjust_cfa_offset (-8); \
- cmpq $-4095, %rax; \
- jae SYSCALL_ERROR_LABEL
-
-
-# ifdef IS_IN_libpthread
-# define CENABLE call __pthread_enable_asynccancel;
-# define CDISABLE call __pthread_disable_asynccancel;
-# define __local_multiple_threads __pthread_multiple_threads
-# elif !defined NOT_IN_libc
-# define CENABLE call __libc_enable_asynccancel;
-# define CDISABLE call __libc_disable_asynccancel;
-# define __local_multiple_threads __libc_multiple_threads
-# elif defined IS_IN_librt
-# define CENABLE call __librt_enable_asynccancel;
-# define CDISABLE call __librt_disable_asynccancel;
-# else
-# error Unsupported library
-# endif
-
-# if defined IS_IN_libpthread || !defined NOT_IN_libc
-# ifndef __ASSEMBLER__
-extern int __local_multiple_threads attribute_hidden;
-# define SINGLE_THREAD_P \
- __builtin_expect (__local_multiple_threads == 0, 1)
-# else
-# define SINGLE_THREAD_P cmpl $0, __local_multiple_threads(%rip)
-# endif
-
-# else
-
-# ifndef __ASSEMBLER__
-# define SINGLE_THREAD_P \
- __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
- header.multiple_threads) == 0, 1)
-# else
-# define SINGLE_THREAD_P cmpl $0, %fs:MULTIPLE_THREADS_OFFSET
-# endif
-
-# endif
-
-#elif !defined __ASSEMBLER__
-
-# define SINGLE_THREAD_P (1)
-# define NO_CANCELLATION 1
+ subq $24, %rsp; \
+ cfi_def_cfa_offset (32); \
+ movq %r9, (%rsp); \
+ movq %r8, %r9; \
+ movq %rcx, %r8; \
+ movq %rdx, %rcx; \
+ movq %rsi, %rdx; \
+ movq %rdi, %rsi; \
+ lea SYS_ify (syscall_name), %edi; \
+ call JMP_SYSCALL_CANCEL; \
+ cfi_def_cfa_offset (8); \
+ addq $24, %rsp; \
+ cmpq $-4095, %rax; \
+ jae SYSCALL_ERROR_LABEL;
#endif
@@ -307,6 +307,15 @@
INTERNAL_SYSCALL (name, err, nr, ##args)
# endif
+# undef SYSCALL_CANCEL_ERROR
+# define SYSCALL_CANCEL_ERROR(__val) \
+ ((unsigned long int) (long int) (__val) >= -4095L)
+
+# undef SYSCALL_CANCEL_ERRNO
+# define SYSCALL_CANCEL_ERRNO(__val) \
+ (-(__val))
+
+
# define LOAD_ARGS_0()
# define LOAD_REGS_0
# define ASM_ARGS_0
@@ -16,6 +16,9 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#ifndef _PTHREADDEF_H
+# define _PTHREADDEF_H
+
/* Default stack size. */
#define ARCH_STACK_DEFAULT_SIZE (2 * 1024 * 1024)
@@ -42,3 +45,13 @@
/* Location of current stack frame. The frame pointer is not usable. */
#define CURRENT_STACK_FRAME \
({ register char *frame __asm__("rsp"); frame; })
+
+#ifndef __ASSEMBLER__
+static inline
+long int __pthread_get_ip (const ucontext_t *uc)
+{
+ return (long int)uc->uc_mcontext.gregs[REG_RIP];
+}
+#endif
+
+#endif
@@ -21,7 +21,6 @@ RTLD_SAVESPACE_SSE offsetof (tcbhead_t, rtld_savespace_sse)
-- Not strictly offsets, but these values are also used in the TCB.
TCB_CANCELSTATE_BITMASK CANCELSTATE_BITMASK
TCB_CANCELTYPE_BITMASK CANCELTYPE_BITMASK
-TCB_CANCELING_BITMASK CANCELING_BITMASK
TCB_CANCELED_BITMASK CANCELED_BITMASK
TCB_EXITING_BITMASK EXITING_BITMASK
TCB_CANCEL_RESTMASK CANCEL_RESTMASK
@@ -325,17 +325,6 @@ typedef struct
abort (); })
-/* Atomic set bit. */
-# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
- (void) ({ if (sizeof ((descr)->member) == 4) \
- asm volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
- :: "i" (offsetof (struct pthread, member)), \
- "ir" (1 << (bit))); \
- else \
- /* Not necessary for other sizes in the moment. */ \
- abort (); })
-
-
# define CALL_THREAD_FCT(descr) \
({ void *__res; \
asm volatile ("movq %%fs:%P2, %%rdi\n\t" \