@@ -30,7 +30,7 @@ install-lib-ldscripts := libpthread.so
routines = alloca_cutoff forward libc-lowlevellock libc-cancellation \
libc-cleanup libc_pthread_init libc_multiple_threads \
- register-atfork unregister-atfork
+ register-atfork unregister-atfork syscall_cancel
shared-only-routines = forward
libpthread-routines = nptl-init vars events version \
@@ -103,7 +103,6 @@ libpthread-routines = nptl-init vars events version \
cleanup cleanup_defer cleanup_compat \
cleanup_defer_compat unwind \
pt-longjmp pt-cleanup\
- cancellation \
lowlevellock lowlevelrobustlock \
pt-fork pt-vfork \
ptw-write ptw-read ptw-close ptw-fcntl ptw-accept \
@@ -156,7 +155,6 @@ CFLAGS-pthread_setcanceltype.c = -fexceptions -fasynchronous-unwind-tables
# These are internal functions which similar functionality as setcancelstate
# and setcanceltype.
-CFLAGS-cancellation.c = -fasynchronous-unwind-tables
CFLAGS-libc-cancellation.c = -fasynchronous-unwind-tables
# Calling pthread_exit() must cause the registered cancel handlers to
@@ -36,6 +36,9 @@ libc {
__libc_pthread_init;
__libc_current_sigrtmin_private; __libc_current_sigrtmax_private;
__libc_allocate_rtsig_private;
+ __syscall_cancel;
+ __syscall_cancel_arch_start;
+ __syscall_cancel_arch_end;
}
}
deleted file mode 100644
@@ -1,99 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <setjmp.h>
-#include <stdlib.h>
-#include "pthreadP.h"
-
-
-/* The next two functions are similar to pthread_setcanceltype() but
- more specialized for the use in the cancelable functions like write().
- They do not need to check parameters etc. */
-int
-attribute_hidden
-__pthread_enable_asynccancel (void)
-{
- struct pthread *self = THREAD_SELF;
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- int newval = oldval | CANCELTYPE_BITMASK;
-
- if (newval == oldval)
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__glibc_likely (curval == oldval))
- {
- if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
- {
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
- __do_cancel ();
- }
-
- break;
- }
-
- /* Prepare the next round. */
- oldval = curval;
- }
-
- return oldval;
-}
-
-
-void
-internal_function attribute_hidden
-__pthread_disable_asynccancel (int oldtype)
-{
- /* If asynchronous cancellation was enabled before we do not have
- anything to do. */
- if (oldtype & CANCELTYPE_BITMASK)
- return;
-
- struct pthread *self = THREAD_SELF;
- int newval;
-
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- newval = oldval & ~CANCELTYPE_BITMASK;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__glibc_likely (curval == oldval))
- break;
-
- /* Prepare the next round. */
- oldval = curval;
- }
-
- /* We cannot return when we are being canceled. Upon return the
- thread might be things which would have to be undone. The
- following loop should loop until the cancellation signal is
- delivered. */
- while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
- == CANCELING_BITMASK, 0))
- {
- lll_futex_wait (&self->cancelhandling, newval, LLL_PRIVATE);
- newval = THREAD_GETMEM (self, cancelhandling);
- }
-}
@@ -263,23 +263,20 @@ struct pthread
/* Bit set if asynchronous cancellation mode is selected. */
#define CANCELTYPE_BIT 1
#define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT)
- /* Bit set if canceling has been initiated. */
-#define CANCELING_BIT 2
-#define CANCELING_BITMASK (0x01 << CANCELING_BIT)
- /* Bit set if canceled. */
-#define CANCELED_BIT 3
+ /* Bit set if threads is canceled. */
+#define CANCELED_BIT 2
#define CANCELED_BITMASK (0x01 << CANCELED_BIT)
/* Bit set if thread is exiting. */
-#define EXITING_BIT 4
+#define EXITING_BIT 3
#define EXITING_BITMASK (0x01 << EXITING_BIT)
/* Bit set if thread terminated and TCB is freed. */
-#define TERMINATED_BIT 5
+#define TERMINATED_BIT 4
#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT)
/* Bit set if thread is supposed to change XID. */
-#define SETXID_BIT 6
+#define SETXID_BIT 5
#define SETXID_BITMASK (0x01 << SETXID_BIT)
/* Mask for the rest. Helps the compiler to optimize. */
-#define CANCEL_RESTMASK 0xffffff80
+#define CANCEL_RESTMASK 0xffffffc0
#define CANCEL_ENABLED_AND_CANCELED(value) \
(((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK \
@@ -16,9 +16,63 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#include <setjmp.h>
+#include <stdlib.h>
#include "pthreadP.h"
+#ifdef IS_IN_rtld
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include <nptl/cancellation.c>
+long int
+__syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
+ __syscall_arg_t arg2, __syscall_arg_t arg3,
+ __syscall_arg_t arg4, __syscall_arg_t arg5,
+ __syscall_arg_t arg6)
+{
+ INTERNAL_SYSCALL_DECL (err);
+ return INTERNAL_SYSCALL_NCS (nr, err, 6, arg1, arg2, arg3, arg4, arg5, arg6);
+}
+
+#else
+
+/* Cancellation function called by all cancellable syscalls. */
+long int
+__syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
+ __syscall_arg_t a2, __syscall_arg_t a3,
+ __syscall_arg_t a4, __syscall_arg_t a5,
+ __syscall_arg_t a6)
+{
+ pthread_t self = (pthread_t) THREAD_SELF;
+ volatile struct pthread *pd = (volatile struct pthread *) self;
+ long int result;
+
+ /* If cancellation is not enabled, call the syscall directly. */
+ if (pd->cancelhandling & CANCELSTATE_BITMASK)
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ result = INTERNAL_SYSCALL_NCS (nr, err, 6, a1, a2, a3, a4, a5, a6);
+ return INTERNAL_SYSCALL_ERROR_P (result, err) ? -result : result;
+ }
+
+ /* Call the arch-specific entry points that contains the globals markers
+ to be checked by SIGCANCEL handler. */
+ result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
+ a6);
+
+ if ((result == -EINTR)
+ && (pd->cancelhandling & CANCELED_BITMASK)
+ && !(pd->cancelhandling & CANCELSTATE_BITMASK))
+ __do_cancel ();
+
+ return result;
+}
+libc_hidden_def (__syscall_cancel)
+
+/* Since __do_cancel is a always inline function, this creates a symbol the
+ arch-specific symbol can call to cancel the thread. */
+void
+__syscall_do_cancel (void)
+{
+ __do_cancel ();
+}
+
+#endif
@@ -115,7 +115,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
/* Wait until thread terminates. The kernel so far does not use
the private futex operations for this. */
- if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
+ if (lll_futex_timed_wait_cancel (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
return ETIMEDOUT;
}
@@ -188,36 +188,41 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
return;
struct pthread *self = THREAD_SELF;
+ volatile struct pthread *pd = (volatile struct pthread *) self;
+ ucontext_t *uc = ctx;
+ const char *tip = (const char *)__pthread_get_ip (ctx);
- int oldval = THREAD_GETMEM (self, cancelhandling);
- while (1)
+ extern const char __syscall_cancel_arch_start[1];
+ extern const char __syscall_cancel_arch_end[1];
+
+ if (((pd->cancelhandling & (CANCELSTATE_BITMASK)) != 0)
+ || ((pd->cancelhandling & CANCELED_BITMASK) == 0))
+ return;
+
+ __sigaddset (&uc->uc_sigmask, SIGCANCEL);
+
+ /* Check if asynchronous cancellation mode is set and if interrupted
+ instruction pointer falls within the cancellable syscall code. For
+ interruptable syscalls that might generate external side-effects (partial
+ reads or writes, for instance), the kernel will set the IP to after
+ '__syscall_cancel_arch_end', thus disabling the cancellation and allowing
+ the process to handle such conditions. */
+ if (pd->cancelhandling & CANCELTYPE_BITMASK ||
+ (tip >= __syscall_cancel_arch_start && tip < __syscall_cancel_arch_end))
{
- /* We are canceled now. When canceled by another thread this flag
- is already set but if the signal is directly send (internally or
- from another process) is has to be done here. */
- int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
-
- if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
- /* Already canceled or exiting. */
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (curval == oldval)
- {
- /* Set the return value. */
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
- /* Make sure asynchronous cancellation is still enabled. */
- if ((newval & CANCELTYPE_BITMASK) != 0)
- /* Run the registered destructors and terminate the thread. */
- __do_cancel ();
-
- break;
- }
-
- oldval = curval;
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
+
+ /* __pthread_sigmask removes SIGCANCEL from the set. */
+ INTERNAL_SYSCALL_DECL (err);
+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIGCANCEL, &uc->uc_sigmask, 0,
+ _NSIG / 8);
+
+ __do_cancel ();
}
+
+ INLINE_SYSCALL (tgkill, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
+ SIGCANCEL);
}
@@ -375,7 +380,10 @@ __pthread_initialize_minimal_internal (void)
it is only asynchronous cancellation which is affected. */
struct sigaction sa;
sa.sa_sigaction = sigcancel_handler;
- sa.sa_flags = SA_SIGINFO;
+ /* The signal handle should be non-interruptible to avoid the risk of
+ spurious EINTR caused by SIGCANCEL sent to process or if pthread_cancel
+ is called while cancellation is disabled in the target thread. */
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
__sigemptyset (&sa.sa_mask);
(void) __libc_sigaction (SIGCANCEL, &sa, NULL);
@@ -27,6 +27,3 @@ system (const char *line)
{
return __libc_system (line);
}
-
-/* __libc_system in libc.so handles cancellation. */
-LIBC_CANCEL_HANDLED ();
@@ -272,49 +272,31 @@ __do_cancel (void)
struct pthread *self = THREAD_SELF;
/* Make sure we get no more cancellations. */
- THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+ int oldval = THREAD_GETMEM (self, cancelhandling);
+ while (1)
+ {
+ int newval = (oldval | CANCELSTATE_BITMASK);
+ newval &= ~(CANCELTYPE_BITMASK);
+ if (oldval == newval)
+ break;
+
+ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
+ oldval);
+ if (__glibc_likely (curval == oldval))
+ break;
+ oldval = curval;
+ }
+
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
__pthread_unwind ((__pthread_unwind_buf_t *)
THREAD_GETMEM (self, cleanup_jmp_buf));
}
-
-/* Set cancellation mode to asynchronous. */
-#define CANCEL_ASYNC() \
- __pthread_enable_asynccancel ()
-/* Reset to previous cancellation mode. */
-#define CANCEL_RESET(oldtype) \
- __pthread_disable_asynccancel (oldtype)
-
-#if !defined NOT_IN_libc
-/* Same as CANCEL_ASYNC, but for use in libc.so. */
-# define LIBC_CANCEL_ASYNC() \
- __libc_enable_asynccancel ()
-/* Same as CANCEL_RESET, but for use in libc.so. */
-# define LIBC_CANCEL_RESET(oldtype) \
- __libc_disable_asynccancel (oldtype)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__libc_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__libc_disable_asynccancel")
-#elif defined IS_IN_libpthread
-# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
-# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__pthread_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__pthread_disable_asynccancel")
-#elif defined IS_IN_librt
-# define LIBC_CANCEL_ASYNC() \
- __librt_enable_asynccancel ()
-# define LIBC_CANCEL_RESET(val) \
- __librt_disable_asynccancel (val)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__librt_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__librt_disable_asynccancel")
-#else
-# define LIBC_CANCEL_ASYNC() 0 /* Just a dummy value. */
-# define LIBC_CANCEL_RESET(val) ((void)(val)) /* Nothing, but evaluate it. */
-# define LIBC_CANCEL_HANDLED() /* Nothing. */
-#endif
+extern long int __syscall_cancel_arch (volatile void *, __syscall_arg_t nr,
+ __syscall_arg_t arg1, __syscall_arg_t arg2, __syscall_arg_t arg3,
+ __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel_arch);
/* Internal prototypes. */
@@ -482,9 +464,6 @@ extern int __pthread_equal (pthread_t thread1, pthread_t thread2);
extern int __pthread_kill (pthread_t threadid, int signo);
extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
extern int __pthread_setcanceltype (int type, int *oldtype);
-extern int __pthread_enable_asynccancel (void) attribute_hidden;
-extern void __pthread_disable_asynccancel (int oldtype)
- internal_function attribute_hidden;
#ifdef IS_IN_libpthread
hidden_proto (__pthread_mutex_init)
@@ -514,16 +493,6 @@ extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond,
extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize,
cpu_set_t *cpuset);
-/* The two functions are in libc.so and not exported. */
-extern int __libc_enable_asynccancel (void) attribute_hidden;
-extern void __libc_disable_asynccancel (int oldtype)
- internal_function attribute_hidden;
-
-
-/* The two functions are in librt.so and not exported. */
-extern int __librt_enable_asynccancel (void) attribute_hidden;
-extern void __librt_disable_asynccancel (int oldtype)
- internal_function attribute_hidden;
#ifdef IS_IN_libpthread
/* Special versions which use non-exported functions. */
@@ -18,11 +18,10 @@
#include <errno.h>
#include <signal.h>
-#include "pthreadP.h"
-#include "atomic.h"
+#include <pthreadP.h>
+#include <atomic.h>
#include <sysdep.h>
-
int
pthread_cancel (pthread_t th)
{
@@ -36,67 +35,17 @@ pthread_cancel (pthread_t th)
#ifdef SHARED
pthread_cancel_init ();
#endif
- int result = 0;
- int oldval;
- int newval;
- do
- {
- again:
- oldval = pd->cancelhandling;
- newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
-
- /* Avoid doing unnecessary work. The atomic operation can
- potentially be expensive if the bug has to be locked and
- remote cache lines have to be invalidated. */
- if (oldval == newval)
- break;
-
- /* If the cancellation is handled asynchronously just send a
- signal. We avoid this if possible since it's more
- expensive. */
- if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
- {
- /* Mark the cancellation as "in progress". */
- if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
- oldval | CANCELING_BITMASK,
- oldval))
- goto again;
-
- /* The cancellation handler will take care of marking the
- thread as canceled. */
- INTERNAL_SYSCALL_DECL (err);
-
- /* One comment: The PID field in the TCB can temporarily be
- changed (in fork). But this must not affect this code
- here. Since this function would have to be called while
- the thread is executing fork, it would have to happen in
- a signal handler. But this is no allowed, pthread_cancel
- is not guaranteed to be async-safe. */
- int val;
- val = INTERNAL_SYSCALL (tgkill, err, 3,
- THREAD_GETMEM (THREAD_SELF, pid), pd->tid,
- SIGCANCEL);
-
- if (INTERNAL_SYSCALL_ERROR_P (val, err))
- result = INTERNAL_SYSCALL_ERRNO (val, err);
-
- break;
- }
- /* A single-threaded process should be able to kill itself, since there is
- nothing in the POSIX specification that says that it cannot. So we set
- multiple_threads to true so that cancellation points get executed. */
- THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
+ THREAD_ATOMIC_BIT_SET (pd, cancelhandling, CANCELED_BIT);
+ /* A single-threaded process should be able to kill itself, since there is
+ nothing in the POSIX specification that says that it cannot. So we set
+ multiple_threads to true so that cancellation points get executed. */
+ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
- __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
+ __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
#endif
- }
- /* Mark the thread as canceled. This has to be done
- atomically since other bits could be modified as well. */
- while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
- oldval));
- return result;
+ return __pthread_kill (th, SIGCANCEL);
}
PTHREAD_STATIC_FN_REQUIRE (pthread_create)
@@ -41,7 +41,6 @@ extern void __condvar_cleanup (void *arg)
struct _condvar_cleanup_buffer
{
- int oldtype;
pthread_cond_t *cond;
pthread_mutex_t *mutex;
unsigned int bc_seq;
@@ -62,7 +61,7 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
int pshared = (cond->__data.__mutex == (void *) ~0l)
? LLL_SHARED : LLL_PRIVATE;
-#if (defined lll_futex_timed_wait_requeue_pi \
+#if (defined lll_futex_timed_wait_requeue_pi_cancel \
&& defined __ASSUME_REQUEUE_PI)
int pi_flag = 0;
#endif
@@ -155,12 +154,9 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
/* Prepare to wait. Release the condvar futex. */
lll_unlock (cond->__data.__lock, pshared);
- /* Enable asynchronous cancellation. Required by the standard. */
- cbuffer.oldtype = __pthread_enable_asynccancel ();
-
/* REQUEUE_PI was implemented after FUTEX_CLOCK_REALTIME, so it is sufficient
to check just the former. */
-#if (defined lll_futex_timed_wait_requeue_pi \
+#if (defined lll_futex_timed_wait_requeue_pi_cancel \
&& defined __ASSUME_REQUEUE_PI)
/* If pi_flag remained 1 then it means that we had the lock and the mutex
but a spurious waker raced ahead of us. Give back the mutex before
@@ -176,10 +172,11 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
{
unsigned int clockbit = (cond->__data.__nwaiters & 1
? 0 : FUTEX_CLOCK_REALTIME);
- err = lll_futex_timed_wait_requeue_pi (&cond->__data.__futex,
- futex_val, abstime, clockbit,
- &mutex->__data.__lock,
- pshared);
+ err = lll_futex_timed_wait_requeue_pi_cancel (&cond->__data.__futex,
+ futex_val, abstime,
+ clockbit,
+ &mutex->__data.__lock,
+ pshared);
pi_flag = (err == 0);
}
else
@@ -187,21 +184,19 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
{
#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \
- || !defined lll_futex_timed_wait_bitset)
+ || !defined lll_futex_timed_wait_bitset_cancel)
/* Wait until woken by signal or broadcast. */
- err = lll_futex_timed_wait (&cond->__data.__futex,
- futex_val, &rt, pshared);
+ err = lll_futex_timed_wait_cancel (&cond->__data.__futex,
+ futex_val, &rt, pshared);
#else
unsigned int clockbit = (cond->__data.__nwaiters & 1
? 0 : FUTEX_CLOCK_REALTIME);
- err = lll_futex_timed_wait_bitset (&cond->__data.__futex, futex_val,
- abstime, clockbit, pshared);
+ err = lll_futex_timed_wait_bitset_cancel (&cond->__data.__futex,
+ futex_val,abstime,
+ clockbit, pshared);
#endif
}
- /* Disable asynchronous cancellation. */
- __pthread_disable_asynccancel (cbuffer.oldtype);
-
/* We are going to look at shared data again, so get the lock. */
lll_lock (cond->__data.__lock, pshared);
@@ -29,7 +29,6 @@
struct _condvar_cleanup_buffer
{
- int oldtype;
pthread_cond_t *cond;
pthread_mutex_t *mutex;
unsigned int bc_seq;
@@ -106,7 +105,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
int pshared = (cond->__data.__mutex == (void *) ~0l)
? LLL_SHARED : LLL_PRIVATE;
-#if (defined lll_futex_wait_requeue_pi \
+#if (defined lll_futex_wait_requeue_pi_cancel \
&& defined __ASSUME_REQUEUE_PI)
int pi_flag = 0;
#endif
@@ -157,10 +156,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
/* Prepare to wait. Release the condvar futex. */
lll_unlock (cond->__data.__lock, pshared);
- /* Enable asynchronous cancellation. Required by the standard. */
- cbuffer.oldtype = __pthread_enable_asynccancel ();
-
-#if (defined lll_futex_wait_requeue_pi \
+#if (defined lll_futex_wait_requeue_pi_cancel \
&& defined __ASSUME_REQUEUE_PI)
/* If pi_flag remained 1 then it means that we had the lock and the mutex
but a spurious waker raced ahead of us. Give back the mutex before
@@ -174,19 +170,17 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
if (pi_flag)
{
- err = lll_futex_wait_requeue_pi (&cond->__data.__futex,
- futex_val, &mutex->__data.__lock,
- pshared);
+ err = lll_futex_wait_requeue_pi_cancel (&cond->__data.__futex,
+ futex_val,
+ &mutex->__data.__lock,
+ pshared);
pi_flag = (err == 0);
}
else
#endif
/* Wait until woken by signal or broadcast. */
- lll_futex_wait (&cond->__data.__futex, futex_val, pshared);
-
- /* Disable asynchronous cancellation. */
- __pthread_disable_asynccancel (cbuffer.oldtype);
+ lll_futex_wait_cancel (&cond->__data.__futex, futex_val, pshared);
/* We are going to look at shared data again, so get the lock. */
lll_lock (cond->__data.__lock, pshared);
@@ -266,7 +266,7 @@ start_thread (void *arg)
/* If the parent was running cancellation handlers while creating
the thread the new thread inherited the signal mask. Reset the
cancellation signal mask. */
- if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
+ if (__glibc_unlikely (pd->parent_cancelhandling & CANCELED_BITMASK))
{
INTERNAL_SYSCALL_DECL (err);
sigset_t mask;
@@ -293,14 +293,10 @@ start_thread (void *arg)
if (__glibc_unlikely (pd->stopped_start))
{
- int oldtype = CANCEL_ASYNC ();
-
/* Get the lock the parent locked to force synchronization. */
lll_lock (pd->lock, LLL_PRIVATE);
/* And give it up right away. */
lll_unlock (pd->lock, LLL_PRIVATE);
-
- CANCEL_RESET (oldtype);
}
LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
@@ -23,9 +23,14 @@
void
__pthread_exit (void *value)
{
- THREAD_SETMEM (THREAD_SELF, result, value);
+ struct pthread *self = THREAD_SELF;
- __do_cancel ();
+ THREAD_SETMEM (self, result, value);
+
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+
+ __pthread_unwind ((__pthread_unwind_buf_t *)
+ THREAD_GETMEM (self, cleanup_jmp_buf));
}
strong_alias (__pthread_exit, pthread_exit)
@@ -61,13 +61,10 @@ pthread_join (pthread_t threadid, void **thread_return)
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
- /* Switch to asynchronous cancellation. */
- int oldtype = CANCEL_ASYNC ();
-
if ((pd == self
|| (self->joinid == pd
&& (pd->cancelhandling
- & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+ & (CANCELED_BITMASK | EXITING_BITMASK
| TERMINATED_BITMASK)) == 0))
&& !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
/* This is a deadlock situation. The threads are waiting for each
@@ -89,10 +86,6 @@ pthread_join (pthread_t threadid, void **thread_return)
/* Wait for the child. */
lll_wait_tid (pd->tid);
-
- /* Restore cancellation mode. */
- CANCEL_RESET (oldtype);
-
/* Remove the handler. */
pthread_cleanup_pop (0);
@@ -71,17 +71,9 @@ pthread_timedjoin_np (pthread_t threadid, void **thread_return,
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
- /* Switch to asynchronous cancellation. */
- int oldtype = CANCEL_ASYNC ();
-
-
/* Wait for the child. */
result = lll_timedwait_tid (pd->tid, abstime);
-
- /* Restore cancellation mode. */
- CANCEL_RESET (oldtype);
-
/* Remove the handler. */
pthread_cleanup_pop (0);
@@ -29,22 +29,6 @@
extern void __sem_wait_cleanup (void *arg) attribute_hidden;
-/* This is in a seperate function in order to make sure gcc
- puts the call site into an exception region, and thus the
- cleanups get properly run. */
-static int
-__attribute__ ((noinline))
-do_futex_timed_wait (struct new_sem *isem, struct timespec *rt)
-{
- int err, oldtype = __pthread_enable_asynccancel ();
-
- err = lll_futex_timed_wait (&isem->value, 0, rt,
- isem->private ^ FUTEX_PRIVATE_FLAG);
-
- __pthread_disable_asynccancel (oldtype);
- return err;
-}
-
int
sem_timedwait (sem_t *sem, const struct timespec *abstime)
{
@@ -93,7 +77,8 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
/* Do wait. */
rt.tv_sec = sec;
rt.tv_nsec = nsec;
- err = do_futex_timed_wait(isem, &rt);
+ err = lll_futex_timed_wait_cancel (&isem->value, 0, &rt,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
if (err != 0 && err != -EWOULDBLOCK)
{
__set_errno (-err);
@@ -37,21 +37,6 @@ __sem_wait_cleanup (void *arg)
atomic_decrement (&isem->nwaiters);
}
-/* This is in a seperate function in order to make sure gcc
- puts the call site into an exception region, and thus the
- cleanups get properly run. */
-static int
-__attribute__ ((noinline))
-do_futex_wait (struct new_sem *isem)
-{
- int err, oldtype = __pthread_enable_asynccancel ();
-
- err = lll_futex_wait (&isem->value, 0, isem->private ^ FUTEX_PRIVATE_FLAG);
-
- __pthread_disable_asynccancel (oldtype);
- return err;
-}
-
int
__new_sem_wait (sem_t *sem)
{
@@ -67,7 +52,8 @@ __new_sem_wait (sem_t *sem)
while (1)
{
- err = do_futex_wait(isem);
+ err = lll_futex_wait_cancel (&isem->value, 0,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
if (err != 0 && err != -EWOULDBLOCK)
{
__set_errno (-err);
@@ -104,14 +90,8 @@ __old_sem_wait (sem_t *sem)
if (atomic_decrement_if_positive (futex) > 0)
return 0;
- /* Enable asynchronous cancellation. Required by the standard. */
- int oldtype = __pthread_enable_asynccancel ();
-
/* Always assume the semaphore is shared. */
- err = lll_futex_wait (futex, 0, LLL_SHARED);
-
- /* Disable asynchronous cancellation. */
- __pthread_disable_asynccancel (oldtype);
+ err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
}
while (err == 0 || err == -EWOULDBLOCK);
@@ -62,7 +62,6 @@ include ../Rules
CFLAGS-aio_suspend.c = -fexceptions
CFLAGS-clock_nanosleep.c = -fexceptions -fasynchronous-unwind-tables
-CFLAGS-librt-cancellation.c = -fasynchronous-unwind-tables
LDFLAGS-rt.so = -Wl,--enable-new-dtags,-z,nodelete
@@ -3,6 +3,3 @@
/* No multi-thread handling enabled. */
#define SINGLE_THREAD_P (1)
#define RTLD_SINGLE_THREAD_P (1)
-#define LIBC_CANCEL_ASYNC() 0 /* Just a dummy value. */
-#define LIBC_CANCEL_RESET(val) ((void)(val)) /* Nothing, but evaluate it. */
-#define LIBC_CANCEL_HANDLED() /* Nothing. */
@@ -21,8 +21,7 @@ libpthread-sysdep_routines += errno-loc
endif
ifeq ($(subdir),rt)
-librt-sysdep_routines += timer_routines librt-cancellation
-CFLAGS-librt-cancellation.c += -fexceptions -fasynchronous-unwind-tables
+librt-sysdep_routines += timer_routines
ifeq ($(have-forced-unwind),yes)
tests += tst-mqueue8x
@@ -41,15 +41,15 @@
{ \
pthread_mutex_unlock (&__aio_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = lll_futex_timed_wait (futexaddr, oldval, timeout, \
- LLL_PRIVATE); \
+ if (cancel) \
+ status = lll_futex_timed_wait_cancel (futexaddr, oldval, \
+ timeout, LLL_PRIVATE); \
+ else \
+ status = lll_futex_timed_wait (futexaddr, oldval, timeout, \
+ LLL_PRIVATE); \
if (status != -EWOULDBLOCK) \
break; \
\
@@ -57,9 +57,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == -EINTR) \
result = EINTR; \
else if (status == -ETIMEDOUT) \
@@ -42,15 +42,15 @@
{ \
pthread_mutex_unlock (&__gai_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = lll_futex_timed_wait (futexaddr, oldval, timeout, \
- LLL_PRIVATE); \
+ if (cancel) \
+ status = lll_futex_timed_wait_cancel (futexaddr, oldval, \
+ timeout, LLL_PRIVATE); \
+ else \
+ status = lll_futex_timed_wait (futexaddr, oldval, timeout, \
+ LLL_PRIVATE); \
if (status != -EWOULDBLOCK) \
break; \
\
@@ -58,9 +58,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == -EINTR) \
result = EINTR; \
else if (status == -ETIMEDOUT) \
deleted file mode 100644
@@ -1,24 +0,0 @@
-/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <nptl/pthreadP.h>
-
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include <nptl/cancellation.c>
@@ -172,7 +172,7 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED);\
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED);\
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -50,16 +50,7 @@ do_sigpause (int sig_or_mask, int is_sig)
int
__sigpause (int sig_or_mask, int is_sig)
{
- if (SINGLE_THREAD_P)
- return do_sigpause (sig_or_mask, is_sig);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_sigpause (sig_or_mask, is_sig);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return do_sigpause (sig_or_mask, is_sig);
}
libc_hidden_def (__sigpause)
@@ -151,16 +151,7 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
int
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
{
- if (SINGLE_THREAD_P)
- return do_waitid (idtype, id, infop, options);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_waitid (idtype, id, infop, options);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return do_waitid (idtype, id, infop, options);
}
weak_alias (__waitid, waitid)
strong_alias (__waitid, __libc_waitid)
@@ -15,6 +15,9 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#ifndef _PTHREADDEF_H
+# define _PTHREADDEF_H
+
/* Default stack size. */
#define ARCH_STACK_DEFAULT_SIZE (4 * 1024 * 1024)
@@ -31,3 +34,11 @@
/* Location of current stack frame. */
#define CURRENT_STACK_FRAME __builtin_frame_address (0)
+
+static inline
+const char * __pthread_get_ip (const ucontext_t *uc)
+{
+ return (char *)uc->uc_mcontext.gp_regs[PT_NIP];
+}
+
+#endif
@@ -20,31 +20,69 @@
#include <sys/syscall.h>
#define HAVE_SYSCALLS
+#ifndef __ASSEMBLER__
+# include <errno.h>
+
/* Note that using a `PASTE' macro loses. */
#define SYSCALL__(name, args) PSEUDO (__##name, name, args)
#define SYSCALL(name, args) PSEUDO (name, name, args)
/* Cancellation macros. */
-#define __SYSCALL_NARGS_X(a,b,c,d,e,f,g,n,...) n
+#ifndef __SSC
+typedef long __syscall_arg_t;
+# define __SSC(__x) ((__syscall_arg_t) (__x))
+#endif
+
+#define __SYSCALL_CANCEL0(__n) \
+ (__syscall_cancel)(__n, 0, 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL1(__n, __a) \
+ (__syscall_cancel)(__n, __SSC(__a), 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL2(__n, __a, __b) \
+ (__syscall_cancel)(__n, __SSC(__a), __SSC(__b), 0, 0, 0, 0)
+#define __SYSCALL_CANCEL3(__n, __a, __b, __c) \
+ (__syscall_cancel)(__n, __SSC(__a), __SSC(__b), __SSC(__c), 0, 0, 0)
+#define __SYSCALL_CANCEL4(__n, __a, __b, __c, __d) \
+ (__syscall_cancel)(__n, __SSC(__a), __SSC(__b), __SSC(__c), __SSC(__d), \
+ 0, 0)
+#define __SYSCALL_CANCEL5(__n, __a, __b, __c, __d, __e) \
+ (__syscall_cancel)(__n, __SSC(__a), __SSC(__b), __SSC(__c), __SSC(__d), \
+ __SSC(__e), 0)
+#define __SYSCALL_CANCEL6(__n, __a, __b, __c, __d, __e, __f) \
+ (__syscall_cancel)(__n, __SSC(__a), __SSC(__b), __SSC(__c), __SSC(__d), \
+ __SSC(__e), __SSC(__f))
+
+#define __SYSCALL_NARGS_X(a,b,c,d,e,f,g,h,n,...) n
#define __SYSCALL_NARGS(...) \
__SYSCALL_NARGS_X (__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0,)
+#define __SYSCALL_CONCAT_X(__a,__b) __a##__b
+#define __SYSCALL_CONCAT(__a,__b) __SYSCALL_CONCAT_X (__a, __b)
+#define __SYSCALL_DISP(__b,...) \
+ __SYSCALL_CONCAT (__b,__SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __SYSCALL_CANCEL(...) __SYSCALL_DISP (__SYSCALL_CANCEL, __VA_ARGS__)
-#define SYSCALL_CANCEL(name, ...) \
- ({ \
- long int sc_ret; \
- if (SINGLE_THREAD_P) \
- sc_ret = INLINE_SYSCALL (name, __SYSCALL_NARGS(__VA_ARGS__), \
- __VA_ARGS__); \
- else \
- { \
- int sc_cancel_oldtype = LIBC_CANCEL_ASYNC (); \
- sc_ret = INLINE_SYSCALL (name, __SYSCALL_NARGS (__VA_ARGS__), \
- __VA_ARGS__); \
- LIBC_CANCEL_RESET (sc_cancel_oldtype); \
- } \
- sc_ret; \
+#define SYSCALL_CANCEL_NCS(name, nr, args...) \
+ __SYSCALL_CANCEL (__NR_##name, nr, args)
+
+#define SYSCALL_CANCEL(name, args...) \
+ ({ \
+ long int sc_ret = SYSCALL_CANCEL_NCS (name, args); \
+ if (SYSCALL_CANCEL_ERROR (sc_ret)) \
+ { \
+ __set_errno (SYSCALL_CANCEL_ERRNO (sc_ret)); \
+ sc_ret = -1L; \
+ } \
+ sc_ret; \
})
+long int __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
+ __syscall_arg_t arg2, __syscall_arg_t arg3,
+ __syscall_arg_t arg4, __syscall_arg_t arg5,
+ __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel);
+
+#endif
+
/* Machine-dependent sysdep.h files are expected to define the macro
PSEUDO (function_name, syscall_name) to emit assembly code to define the
C-callable function FUNCTION_NAME to do system call SYSCALL_NAME.
@@ -28,7 +28,6 @@ int
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
struct timespec *rem)
{
- INTERNAL_SYSCALL_DECL (err);
int r;
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
@@ -36,19 +35,9 @@ __clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
- if (SINGLE_THREAD_P)
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
- else
- {
- int oldstate = LIBC_CANCEL_ASYNC ();
+ r = SYSCALL_CANCEL_NCS (clock_nanosleep, clock_id, flags, req, rem);
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
- rem);
-
- LIBC_CANCEL_RESET (oldstate);
- }
-
- return (INTERNAL_SYSCALL_ERROR_P (r, err)
- ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+ return (SYSCALL_CANCEL_ERROR (r)
+ ? SYSCALL_CANCEL_ERRNO (r) : 0);
}
weak_alias (__clock_nanosleep, clock_nanosleep)
@@ -133,5 +133,50 @@
private), \
nr_wake, nr_move, mutex, val)
+/* Cancellable futex macros. */
+#define lll_futex_wait_cancel(futexp, val, private) \
+ lll_futex_timed_wait_cancel (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait_cancel(futexp, val, timespec, private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT; \
+ \
+ __ret = __syscall_cancel (__NR_futex, __SSC (futexp), \
+ __SSC (__lll_private_flag (__op, private)), \
+ __SSC (val), __SSC (timespec), 0, 0); \
+ __ret; \
+ })
+
+#define lll_futex_timed_wait_bitset_cancel(futexp, val, timespec, clockbit, \
+ private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT_BITSET | clockbit; \
+ \
+ __ret = __syscall_cancel (__NR_futex, __SSC (futexp), \
+ __SSC (__lll_private_flag (__op, private)), \
+ __SSC (val), __SSC (timespec), 0, \
+ FUTEX_BITSET_MATCH_ANY); \
+ __ret; \
+ })
+
+#define lll_futex_wait_requeue_pi_cancel(futexp, val, mutex, private) \
+ lll_futex_timed_wait_requeue_pi_cancel (futexp, val, NULL, 0, mutex, private)
+
+#define lll_futex_timed_wait_requeue_pi_cancel(futexp, val, timespec, \
+ clockbit, mutex, private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \
+ \
+ __ret = __syscall_cancel (__NR_futex, __SSC (futexp), \
+ __SSC (__lll_private_flag (__op, private)), \
+ __SSC (val), __SSC (timespec), \
+ __SSC (mutex), 0); \
+ __ret; \
+ })
+
+
#endif /* lowlevellock-futex.h */
@@ -23,8 +23,7 @@
#include <sys/syscall.h>
-
-#ifndef NO_CANCELLATION
+#ifndef IS_IN_rtld
int
__fcntl_nocancel (int fd, int cmd, ...)
{
@@ -39,7 +38,6 @@ __fcntl_nocancel (int fd, int cmd, ...)
}
#endif
-
int
__libc_fcntl (int fd, int cmd, ...)
{
@@ -48,10 +48,10 @@
#if _CALL_ELF == 2
#define FRAMESIZE (FRAME_MIN_SIZE+16+64)
-#define stackblock (FRAME_MIN_SIZE+16)
+#define STACKBLOCK (FRAME_MIN_SIZE+16)
#else
#define FRAMESIZE (FRAME_MIN_SIZE+16)
-#define stackblock (FRAMESIZE+FRAME_PARM_SAVE) /* offset to parm save area. */
+#define STACKBLOCK (FRAMESIZE+FRAME_PARM_SAVE) /* offset to parm save area. */
#endif
.text
@@ -59,75 +59,69 @@ ENTRY(__socket)
CALL_MCOUNT NARGS
stdu r1,-FRAMESIZE(r1)
cfi_adjust_cfa_offset(FRAMESIZE)
+
#if NARGS >= 1
- std r3,stackblock(r1)
+ std r3,STACKBLOCK(r1)
#endif
#if NARGS >= 2
- std r4,8+stackblock(r1)
+ std r4,8+STACKBLOCK(r1)
#endif
#if NARGS >= 3
- std r5,16+stackblock(r1)
+ std r5,16+STACKBLOCK(r1)
#endif
#if NARGS >= 4
- std r6,24+stackblock(r1)
+ std r6,24+STACKBLOCK(r1)
#endif
#if NARGS >= 5
- std r7,32+stackblock(r1)
+ std r7,32+STACKBLOCK(r1)
#endif
#if NARGS >= 6
- std r8,40+stackblock(r1)
+ std r8,40+STACKBLOCK(r1)
#endif
#if NARGS >= 7
- std r9,48+stackblock(r1)
+ std r9,48+STACKBLOCK(r1)
#endif
#if NARGS >= 8
- std r10,56+stackblock(r1)
+ std r10,56+STACKBLOCK(r1)
#endif
#if NARGS >= 9
#error too many arguments!
#endif
-#if defined NEED_CANCELLATION && defined CENABLE
+#ifdef NEED_CANCELLATION
SINGLE_THREAD_P
- bne- .Lsocket_cancel
+ bne- L(socket_cancel)
#endif
- li r3,P(SOCKOP_,socket)
- addi r4,r1,stackblock
- DO_CALL(SYS_ify(socketcall))
- addi r1,r1,FRAMESIZE
+#ifndef NEED_CANCELLATION
+ li r3,P(SOCKOP_,socket)
+ addi r4,r1,STACKBLOCK
+ DO_CALL(SYS_ify(socketcall))
+ addi r1,r1,FRAMESIZE
cfi_adjust_cfa_offset(-FRAMESIZE)
- PSEUDO_RET
-
-#if defined NEED_CANCELLATION && defined CENABLE
-.Lsocket_cancel:
- cfi_adjust_cfa_offset(FRAMESIZE)
+ bnslr+
+ TAIL_CALL_SYSCALL_ERROR
+#else
+L(socket_cancel):
mflr r9
std r9,FRAMESIZE+FRAME_LR_SAVE(r1)
- cfi_offset (lr, FRAME_LR_SAVE)
- CENABLE
- std r3,FRAME_MIN_SIZE+8(r1)
- li r3,P(SOCKOP_,socket)
- addi r4,r1,stackblock
- DO_CALL(SYS_ify(socketcall))
- mfcr r0
- std r3,FRAME_MIN_SIZE(r1)
- std r0,FRAMESIZE+FRAME_CR_SAVE(r1)
- cfi_offset (cr, FRAME_CR_SAVE)
- ld r3,FRAME_MIN_SIZE+8(r1)
- CDISABLE
- ld r4,FRAMESIZE+FRAME_LR_SAVE(r1)
- ld r0,FRAMESIZE+FRAME_CR_SAVE(r1)
- ld r3,FRAME_MIN_SIZE(r1)
- mtlr r4
- mtcr r0
+ cfi_offset(lr, FRAME_LR_SAVE);
+
+ li r3,SYS_ify (socketcall)
+ li r4,P(SOCKOP_,socket)
+ addi r5,r1,STACKBLOCK
+ bl HIDDEN_JUMPTARGET(__syscall_cancel)
+ nop
+ bl JUMPTARGET(__syscall_cancel_error)
+ nop
+ ld r9,FRAMESIZE+FRAME_LR_SAVE(r1)
+ mtlr r9
+ cfi_restore(lr)
addi r1,r1,FRAMESIZE
cfi_adjust_cfa_offset(-FRAMESIZE)
- cfi_restore(lr)
- cfi_restore(cr)
- PSEUDO_RET
-#endif
-PSEUDO_END (__socket)
+ blr
+#endif /* NEED_CANCELLATION */
+END (__socket)
#ifndef NO_WEAK_ALIAS
weak_alias (__socket, socket)
@@ -31,117 +31,59 @@
# define DASHDASHPFX(str) __##str
# endif
-#if _CALL_ELF == 2
-#define CANCEL_FRAMESIZE (FRAME_MIN_SIZE+16+48)
-#define CANCEL_PARM_SAVE (FRAME_MIN_SIZE+16)
-#else
-#define CANCEL_FRAMESIZE (FRAME_MIN_SIZE+16)
-#define CANCEL_PARM_SAVE (CANCEL_FRAMESIZE+FRAME_PARM_SAVE)
-#endif
+# ifdef NOT_IN_libc
+# undef HIDDEN_JUMPTARGET
+# define HIDDEN_JUMPTARGET(__symbol) __symbol
+# endif
+
+#define CANCEL_FRAMESIZE (FRAME_MIN_SIZE)
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.section ".text"; \
ENTRY (name) \
SINGLE_THREAD_P; \
- bne- .Lpseudo_cancel; \
- .type DASHDASHPFX(syscall_name##_nocancel),@function; \
- .globl DASHDASHPFX(syscall_name##_nocancel); \
- DASHDASHPFX(syscall_name##_nocancel): \
+ bne- L(pseudo_cancel); \
DO_CALL (SYS_ify (syscall_name)); \
- PSEUDO_RET; \
- .size DASHDASHPFX(syscall_name##_nocancel),.-DASHDASHPFX(syscall_name##_nocancel); \
- .Lpseudo_cancel: \
- stdu 1,-CANCEL_FRAMESIZE(1); \
- cfi_adjust_cfa_offset (CANCEL_FRAMESIZE); \
- mflr 9; \
- std 9,CANCEL_FRAMESIZE+FRAME_LR_SAVE(1); \
+ bnslr+; \
+ TAIL_CALL_SYSCALL_ERROR; \
+ L(pseudo_cancel): \
+ mflr r0; \
+ std r0,FRAME_LR_SAVE(r1); \
cfi_offset (lr, FRAME_LR_SAVE); \
- DOCARGS_##args; /* save syscall args around CENABLE. */ \
- CENABLE; \
- std 3,FRAME_MIN_SIZE(1); /* store CENABLE return value (MASK). */ \
- UNDOCARGS_##args; /* restore syscall args. */ \
- DO_CALL (SYS_ify (syscall_name)); \
- mfcr 0; /* save CR/R3 around CDISABLE. */ \
- std 3,FRAME_MIN_SIZE+8(1); \
- std 0,CANCEL_FRAMESIZE+FRAME_CR_SAVE(1); \
- cfi_offset (cr, FRAME_CR_SAVE); \
- ld 3,FRAME_MIN_SIZE(1); /* pass MASK to CDISABLE. */ \
- CDISABLE; \
- ld 9,CANCEL_FRAMESIZE+FRAME_LR_SAVE(1); \
- ld 0,CANCEL_FRAMESIZE+FRAME_CR_SAVE(1); /* restore CR/R3. */ \
- ld 3,FRAME_MIN_SIZE+8(1); \
- mtlr 9; \
- mtcr 0; \
- addi 1,1,CANCEL_FRAMESIZE; \
+ stdu r1,-CANCEL_FRAMESIZE(r1); \
+ cfi_adjust_cfa_offset (CANCEL_FRAMESIZE); \
+ mr r9,r8; \
+ mr r8,r7; \
+ mr r7,r6; \
+ mr r6,r5; \
+ mr r5,r4; \
+ mr r4,r3; \
+ li r3,SYS_ify (syscall_name); \
+ bl HIDDEN_JUMPTARGET(__syscall_cancel); \
+ nop; \
+ bl JUMPTARGET(__syscall_cancel_error); \
+ nop; \
+ addi r1,r1,CANCEL_FRAMESIZE; \
cfi_adjust_cfa_offset (-CANCEL_FRAMESIZE); \
+ ld r0,FRAME_LR_SAVE(r1); \
+ mtlr r0; \
cfi_restore (lr); \
- cfi_restore (cr)
-
-# define DOCARGS_0
-# define UNDOCARGS_0
-
-# define DOCARGS_1 std 3,CANCEL_PARM_SAVE(1); DOCARGS_0
-# define UNDOCARGS_1 ld 3,CANCEL_PARM_SAVE(1); UNDOCARGS_0
-
-# define DOCARGS_2 std 4,CANCEL_PARM_SAVE+8(1); DOCARGS_1
-# define UNDOCARGS_2 ld 4,CANCEL_PARM_SAVE+8(1); UNDOCARGS_1
+ blr
-# define DOCARGS_3 std 5,CANCEL_PARM_SAVE+16(1); DOCARGS_2
-# define UNDOCARGS_3 ld 5,CANCEL_PARM_SAVE+16(1); UNDOCARGS_2
-
-# define DOCARGS_4 std 6,CANCEL_PARM_SAVE+24(1); DOCARGS_3
-# define UNDOCARGS_4 ld 6,CANCEL_PARM_SAVE+24(1); UNDOCARGS_3
-
-# define DOCARGS_5 std 7,CANCEL_PARM_SAVE+32(1); DOCARGS_4
-# define UNDOCARGS_5 ld 7,CANCEL_PARM_SAVE+32(1); UNDOCARGS_4
-
-# define DOCARGS_6 std 8,CANCEL_PARM_SAVE+40(1); DOCARGS_5
-# define UNDOCARGS_6 ld 8,CANCEL_PARM_SAVE+40(1); UNDOCARGS_5
-
-# ifdef IS_IN_libpthread
-# ifdef SHARED
-# define CENABLE bl JUMPTARGET(__pthread_enable_asynccancel)
-# define CDISABLE bl JUMPTARGET(__pthread_disable_asynccancel)
-# else
-# define CENABLE bl JUMPTARGET(__pthread_enable_asynccancel); nop
-# define CDISABLE bl JUMPTARGET(__pthread_disable_asynccancel); nop
-# endif
-# elif !defined NOT_IN_libc
-# ifdef SHARED
-# define CENABLE bl JUMPTARGET(__libc_enable_asynccancel)
-# define CDISABLE bl JUMPTARGET(__libc_disable_asynccancel)
-# else
-# define CENABLE bl JUMPTARGET(__libc_enable_asynccancel); nop
-# define CDISABLE bl JUMPTARGET(__libc_disable_asynccancel); nop
-# endif
-# elif defined IS_IN_librt
-# ifdef SHARED
-# define CENABLE bl JUMPTARGET(__librt_enable_asynccancel)
-# define CDISABLE bl JUMPTARGET(__librt_disable_asynccancel)
-# else
-# define CENABLE bl JUMPTARGET(__librt_enable_asynccancel); nop
-# define CDISABLE bl JUMPTARGET(__librt_disable_asynccancel); nop
-# endif
-# else
-# error Unsupported library
-# endif
+# undef PSEUDO_RET
+# define PSEUDO_RET
# ifndef __ASSEMBLER__
-# define SINGLE_THREAD_P \
- __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
- header.multiple_threads) == 0, 1)
+# define SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
# else
-# define SINGLE_THREAD_P \
- lwz 10,MULTIPLE_THREADS_OFFSET(13); \
+# define SINGLE_THREAD_P \
+ lwz 10,MULTIPLE_THREADS_OFFSET(13); \
cmpwi 10,0
# endif
-#elif !defined __ASSEMBLER__
-
-# define SINGLE_THREAD_P (1)
-# define NO_CANCELLATION 1
-
#endif
#ifndef __ASSEMBLER__
@@ -184,6 +184,15 @@
sc_ret; \
})
+#undef SYSCALL_CANCEL_ERROR
+#define SYSCALL_CANCEL_ERROR(err) \
+ (err > 0xfffffffffffff000UL)
+
+#undef SYSCALL_CANCEL_ERRNO
+#define SYSCALL_CANCEL_ERRNO(err) \
+ (-err)
+
+
/* Define a macro which expands inline into the wrapper code for a system
call. This use is for internal calls that do not need to handle errors
normally. It will never touch errno. This returns just what the kernel
new file mode 100644
@@ -0,0 +1,56 @@
+/* Cancellable syscall wrapper - powerpc version.
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+ENTRY (__syscall_cancel_arch)
+
+ .globl __syscall_cancel_arch_start
+ .type __syscall_cancel_arch_start,@function
+__syscall_cancel_arch_start:
+
+ /*mflr r10
+ std r10,FRAME_LR_SAVE(r1)*/
+
+ lwz r0,0(r3)
+ rldicl. r0,r0,62,63
+ beq 1f
+ b __syscall_do_cancel
+ nop
+1:
+ mr r0,r4
+ mr r3,r5
+ mr r4,r6
+ mr r5,r7
+ mr r6,r8
+ mr r7,r9
+ mr r8,r10
+ sc
+
+ .globl __syscall_cancel_arch_end
+ .type __syscall_cancel_arch_end,@function
+__syscall_cancel_arch_end:
+
+ /*ld r10,FRAME_LR_SAVE(r1)
+ mtlr r10*/
+
+ bnslr+
+ neg r3,r3
+ blr
+END (__syscall_cancel_arch)
+libc_hidden_def (__syscall_cancel_arch)
@@ -26,3 +26,14 @@ __syscall_error (int err_no)
__set_errno (err_no);
return -1;
}
+
+long int
+__syscall_cancel_error (unsigned long err)
+{
+ if (__glibc_unlikely ((err) & (1 << 28)))
+ {
+ __set_errno (-err);
+ return -1;
+ }
+ return err;
+}
@@ -41,9 +41,8 @@ __pthread_kill (pthread_t threadid, int signo)
/* Not a valid thread handle. */
return ESRCH;
- /* Disallow sending the signal we use for cancellation, timers,
- for the setxid implementation. */
- if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
+ /* Disallow sending the signal we use for setxid implementation. */
+ if (signo == SIGSETXID)
return EINVAL;
/* We have a special syscall to do the work. */
@@ -55,32 +55,17 @@ do_sigwait (const sigset_t *set, int *sig)
/* XXX The size argument hopefully will have to be changed to the
real size of the user-level sigset_t. */
-#ifdef INTERNAL_SYSCALL
- INTERNAL_SYSCALL_DECL (err);
do
- ret = INTERNAL_SYSCALL (rt_sigtimedwait, err, 4, set,
- NULL, NULL, _NSIG / 8);
- while (INTERNAL_SYSCALL_ERROR_P (ret, err)
- && INTERNAL_SYSCALL_ERRNO (ret, err) == EINTR);
- if (! INTERNAL_SYSCALL_ERROR_P (ret, err))
+ ret = SYSCALL_CANCEL_NCS (rt_sigtimedwait, set, NULL, NULL, _NSIG / 8);
+ while (SYSCALL_CANCEL_ERROR (ret)
+ && SYSCALL_CANCEL_ERRNO (ret) == EINTR);
+ if (!SYSCALL_CANCEL_ERROR (ret))
{
*sig = ret;
ret = 0;
}
else
- ret = INTERNAL_SYSCALL_ERRNO (ret, err);
-#else
- do
- ret = INLINE_SYSCALL (rt_sigtimedwait, 4, set, NULL, NULL, _NSIG / 8);
- while (ret == -1 && errno == EINTR);
- if (ret != -1)
- {
- *sig = ret;
- ret = 0;
- }
- else
- ret = errno;
-#endif
+ ret = SYSCALL_CANCEL_ERRNO (ret);
return ret;
}
@@ -88,16 +73,7 @@ do_sigwait (const sigset_t *set, int *sig)
int
__sigwait (const sigset_t *set, int *sig)
{
- if (SINGLE_THREAD_P)
- return do_sigwait (set, sig);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_sigwait (set, sig);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return do_sigwait (set, sig);
}
libc_hidden_def (__sigwait)
weak_alias (__sigwait, sigwait)