@@ -585,17 +585,22 @@ extern void __wait_lookup_done (void) attribute_hidden;
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
#endif
+extern bool prio_inherit_missing (void);
+
/* Test if the mutex is suitable for the FUTEX_WAIT_REQUEUE_PI operation. */
-#if (defined lll_futex_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
-# define USE_REQUEUE_PI(mut) \
- ((mut) && (mut) != (void *) ~0l \
- && (((mut)->__data.__kind \
- & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) \
- == PTHREAD_MUTEX_PRIO_INHERIT_NP))
-#else
-# define USE_REQUEUE_PI(mut) 0
+static inline
+bool use_requeue_pi (pthread_mutex_t *mut)
+{
+#ifndef __ASSUME_REQUEUE_PI
+ if (prio_inherit_missing ())
+ return false;
#endif
+ return (mut) &&
+ (mut) != (void *) ~0l &&
+ (((mut)->__data.__kind
+ & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) ==
+ PTHREAD_MUTEX_PRIO_INHERIT_NP);
+}
/* Returns 0 if POL is a valid scheduling policy. */
static inline int
@@ -60,9 +60,8 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|| PTHREAD_MUTEX_PSHARED (mut) & PTHREAD_MUTEX_PSHARED_BIT)
goto wake_all;
-#if (defined lll_futex_cmp_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
- if (USE_REQUEUE_PI (mut))
+#ifdef lll_futex_cmp_requeue_pi
+ if (use_requeue_pi (mut))
{
if (lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, INT_MAX,
&mut->__data.__lock, futex_val,
@@ -46,11 +46,10 @@ __pthread_cond_signal (pthread_cond_t *cond)
++cond->__data.__wakeup_seq;
++cond->__data.__futex;
-#if (defined lll_futex_cmp_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_cmp_requeue_pi
pthread_mutex_t *mut = cond->__data.__mutex;
- if (USE_REQUEUE_PI (mut)
+ if (use_requeue_pi (mut)
/* This can only really fail with a ENOSYS, since nobody can modify
futex while we have the cond_lock. */
&& lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0,
@@ -63,8 +63,7 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
int pshared = (cond->__data.__mutex == (void *) ~0l)
? LLL_SHARED : LLL_PRIVATE;
-#if (defined lll_futex_timed_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_timed_wait_requeue_pi
int pi_flag = 0;
#endif
@@ -161,8 +160,7 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
/* REQUEUE_PI was implemented after FUTEX_CLOCK_REALTIME, so it is sufficient
to check just the former. */
-#if (defined lll_futex_timed_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_timed_wait_requeue_pi
/* If pi_flag remained 1 then it means that we had the lock and the mutex
but a spurious waker raced ahead of us. Give back the mutex before
going into wait again. */
@@ -171,7 +169,7 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
__pthread_mutex_cond_lock_adjust (mutex);
__pthread_mutex_unlock_usercnt (mutex, 0);
}
- pi_flag = USE_REQUEUE_PI (mutex);
+ pi_flag = use_requeue_pi (mutex);
if (pi_flag)
{
@@ -250,8 +248,7 @@ __pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
__pthread_cleanup_pop (&buffer, 0);
/* Get the mutex before returning. */
-#if (defined lll_futex_timed_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_timed_wait_requeue_pi
if (pi_flag)
{
__pthread_mutex_cond_lock_adjust (mutex);
@@ -86,7 +86,7 @@ __condvar_cleanup (void *arg)
/* Get the mutex before returning unless asynchronous cancellation
is in effect. We don't try to get the mutex if we already own it. */
- if (!(USE_REQUEUE_PI (cbuffer->mutex))
+ if (!(use_requeue_pi (cbuffer->mutex))
|| ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)))
{
@@ -106,8 +106,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
int pshared = (cond->__data.__mutex == (void *) ~0l)
? LLL_SHARED : LLL_PRIVATE;
-#if (defined lll_futex_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_wait_requeue_pi
int pi_flag = 0;
#endif
@@ -160,8 +159,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
-#if (defined lll_futex_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_wait_requeue_pi
/* If pi_flag remained 1 then it means that we had the lock and the mutex
but a spurious waker raced ahead of us. Give back the mutex before
going into wait again. */
@@ -170,7 +168,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
__pthread_mutex_cond_lock_adjust (mutex);
__pthread_mutex_unlock_usercnt (mutex, 0);
}
- pi_flag = USE_REQUEUE_PI (mutex);
+ pi_flag = use_requeue_pi (mutex);
if (pi_flag)
{
@@ -221,8 +219,7 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
__pthread_cleanup_pop (&buffer, 0);
/* Get the mutex before returning. Not needed for PI. */
-#if (defined lll_futex_wait_requeue_pi \
- && defined __ASSUME_REQUEUE_PI)
+#ifdef lll_futex_wait_requeue_pi
if (pi_flag)
{
__pthread_mutex_cond_lock_adjust (mutex);
@@ -33,7 +33,7 @@ static const struct pthread_mutexattr default_mutexattr =
};
-static bool
+bool
prio_inherit_missing (void)
{
#ifdef __NR_futex