@@ -278,7 +278,7 @@ tests = tst-attr2 tst-attr3 tst-default-attr \
tst-sem17 \
tst-tsd3 tst-tsd4 \
tst-cancel4 tst-cancel4_1 tst-cancel4_2 tst-cancel5 \
- tst-cancel7 tst-cancel13 \
+ tst-cancel7 \
tst-cancel16 tst-cancel17 tst-cancel20 tst-cancel24 \
tst-cleanup4 \
tst-signal3 \
@@ -358,7 +358,6 @@ endif
LDFLAGS-pthread.so = -Wl,--enable-new-dtags,-z,nodelete,-z,initfirst
tests += tst-cancelx4 tst-cancelx5 tst-cancelx7 \
- tst-cancelx13 \
tst-cancelx16 tst-cancelx17 tst-cancelx20 \
tst-cleanupx4
@@ -477,7 +476,6 @@ CFLAGS-tst-cancelx5.c += -Wno-error
CFLAGS-tst-cancelx4.c += -fexceptions
CFLAGS-tst-cancelx5.c += -fexceptions
CFLAGS-tst-cancelx7.c += -fexceptions
-CFLAGS-tst-cancelx13.c += -fexceptions
CFLAGS-tst-cancelx16.c += -fexceptions
CFLAGS-tst-cancelx17.c += -fexceptions
CFLAGS-tst-cancelx20.c += -fexceptions -fasynchronous-unwind-tables
@@ -23,36 +23,98 @@
#include <pt-internal.h>
+struct cancel_ctx
+{
+ struct __pthread *wakeup;
+ sem_t *sem;
+};
+
+static void
+cancel_hook (void *arg)
+{
+ struct cancel_ctx *ctx = arg;
+ struct __pthread *wakeup = ctx->wakeup;
+ sem_t *sem = ctx->sem;
+ int unblock;
+
+ __pthread_spin_wait (&sem->__lock);
+ /* The thread only needs to be awaken if it's blocking or about to block.
+ If it was already unblocked, it's not queued any more. */
+ unblock = wakeup->prevp != NULL;
+ if (unblock)
+ __pthread_dequeue (wakeup);
+ __pthread_spin_unlock (&sem->__lock);
+
+ if (unblock)
+ __pthread_wakeup (wakeup);
+}
+
int
__sem_timedwait_internal (sem_t *restrict sem,
clockid_t clock_id,
const struct timespec *restrict timeout)
{
error_t err;
- int drain;
- struct __pthread *self;
+ int cancelled, oldtype, drain;
+ int ret = 0;
+
+ struct __pthread *self = _pthread_self ();
+ struct cancel_ctx ctx;
+ ctx.wakeup = self;
+ ctx.sem = sem;
+
+ /* Test for a pending cancellation request, switch to deferred mode for
+ safer resource handling, and prepare the hook to call in case we're
+ cancelled while blocking. Once CANCEL_LOCK is released, the cancellation
+ hook can be called by another thread at any time. Whatever happens,
+ this function must exit with MUTEX locked.
+
+ This function contains inline implementations of pthread_testcancel and
+ pthread_setcanceltype to reduce locking overhead. */
+ __pthread_mutex_lock (&self->cancel_lock);
+ cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
+ && self->cancel_pending;
+
+ if (cancelled)
+ {
+ __pthread_mutex_unlock (&self->cancel_lock);
+ __pthread_exit (PTHREAD_CANCELED);
+ }
+
+ self->cancel_hook = cancel_hook;
+ self->cancel_hook_arg = &ctx;
+ oldtype = self->cancel_type;
+ if (oldtype != PTHREAD_CANCEL_DEFERRED)
+ self->cancel_type = PTHREAD_CANCEL_DEFERRED;
+
+ /* Add ourselves to the list of waiters. This is done while setting
+ the cancellation hook to simplify the cancellation procedure, i.e.
+ if the thread is queued, it can be cancelled, otherwise it is
+ already unblocked, progressing on the return path. */
__pthread_spin_wait (&sem->__lock);
if (sem->__value > 0)
/* Successful down. */
{
sem->__value--;
__pthread_spin_unlock (&sem->__lock);
- return 0;
+ goto out_locked;
}
if (timeout != NULL && ! valid_nanoseconds (timeout->tv_nsec))
{
errno = EINVAL;
- return -1;
+ ret = -1;
+ __pthread_spin_unlock (&sem->__lock);
+ goto out_locked;
}
/* Add ourselves to the queue. */
- self = _pthread_self ();
-
__pthread_enqueue (&sem->__queue, self);
__pthread_spin_unlock (&sem->__lock);
+ __pthread_mutex_unlock (&self->cancel_lock);
+
/* Block the thread. */
if (timeout != NULL)
err = __pthread_timedblock_intr (self, timeout, clock_id);
@@ -82,10 +144,24 @@ __sem_timedwait_internal (sem_t *restrict sem,
{
assert (err == ETIMEDOUT || err == EINTR);
errno = err;
- return -1;
+ ret = -1;
}
- return 0;
+ /* We're almost done. Remove the unblock hook, restore the previous
+ cancellation type, and check for a pending cancellation request. */
+ __pthread_mutex_lock (&self->cancel_lock);
+out_locked:
+ self->cancel_hook = NULL;
+ self->cancel_hook_arg = NULL;
+ self->cancel_type = oldtype;
+ cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
+ && self->cancel_pending;
+ __pthread_mutex_unlock (&self->cancel_lock);
+
+ if (cancelled)
+ __pthread_exit (PTHREAD_CANCELED);
+
+ return ret;
}
int
@@ -55,8 +55,8 @@ tests += tst-cnd-basic tst-mtx-trylock tst-cnd-broadcast \
tst-cancel-self-canceltype tst-cancel-self-testcancel \
tst-cancel1 tst-cancel2 tst-cancel3 \
tst-cancel6 tst-cancel8 tst-cancel9 tst-cancel10 tst-cancel11 \
- tst-cancel12 tst-cancel14 tst-cancel15 tst-cancel18 tst-cancel19 \
- tst-cancel21 \
+ tst-cancel12 tst-cancel13 tst-cancel14 tst-cancel15 tst-cancel18 \
+ tst-cancel19 tst-cancel21 \
tst-cancel22 tst-cancel23 tst-cancel26 tst-cancel27 tst-cancel28 \
tst-cleanup0 tst-cleanup1 tst-cleanup2 tst-cleanup3 \
tst-clock1 \
@@ -117,8 +117,8 @@ CFLAGS-tst-cleanup2.c += -fno-builtin
CFLAGS-tst-cleanupx2.c += -fno-builtin
tests += tst-cancelx2 tst-cancelx3 tst-cancelx6 tst-cancelx8 tst-cancelx9 \
- tst-cancelx10 tst-cancelx11 tst-cancelx12 tst-cancelx14 tst-cancelx15 \
- tst-cancelx18 tst-cancelx21 \
+ tst-cancelx10 tst-cancelx11 tst-cancelx12 tst-cancelx13 tst-cancelx14 \
+ tst-cancelx15 tst-cancelx18 tst-cancelx21 \
tst-cleanupx0 tst-cleanupx1 tst-cleanupx2 tst-cleanupx3
ifeq ($(build-shared),yes)
@@ -160,6 +160,7 @@ CFLAGS-tst-cancelx9.c += -fexceptions
CFLAGS-tst-cancelx10.c += -fexceptions
CFLAGS-tst-cancelx11.c += -fexceptions
CFLAGS-tst-cancelx12.c += -fexceptions
+CFLAGS-tst-cancelx13.c += -fexceptions
CFLAGS-tst-cancelx14.c += -fexceptions
CFLAGS-tst-cancelx15.c += -fexceptions
CFLAGS-tst-cancelx18.c += -fexceptions
similarity index 100%
rename from nptl/tst-cancel13.c
rename to sysdeps/pthread/tst-cancel13.c
similarity index 100%
rename from nptl/tst-cancelx13.c
rename to sysdeps/pthread/tst-cancelx13.c