@@ -244,7 +244,7 @@ get_cached_stack (size_t *sizep, void **memp)
/* Clear the DTV. */
dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
- if (! dtv[1 + cnt].pointer.is_static
+ if (!dtv[1 + cnt].pointer.is_static
&& dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
free (dtv[1 + cnt].pointer.val);
memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
@@ -753,7 +753,7 @@ __deallocate_stack (struct pthread *pd)
not reset the 'used' flag in the 'tid' field. This is done by
the kernel. If no thread has been created yet this field is
still zero. */
- if (__glibc_likely (! pd->user_stack))
+ if (__glibc_likely (!pd->user_stack))
(void) queue_stack (pd);
else
/* Free the memory associated with the ELF TLS. */
@@ -986,7 +986,7 @@ setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
/* Wait until this thread is cloned. */
if (t->setxid_futex == -1
- && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
+ && !atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
do
lll_futex_wait (&t->setxid_futex, -2, LLL_PRIVATE);
while (t->setxid_futex == -2);
@@ -34,7 +34,7 @@ cons (void *arg)
pthread_cond_wait (&cond1, &mut1);
}
- while (! last_round);
+ while (!last_round);
pthread_mutex_unlock (&mut1);
@@ -76,12 +76,12 @@ main (int argc, char *argv[])
for (i = 0; __builtin_expect (i < nrounds, 1); ++i)
{
pthread_mutex_lock (&mut2);
- while (! alldone)
+ while (!alldone)
pthread_cond_wait (&cond2, &mut2);
pthread_mutex_unlock (&mut2);
pthread_mutex_lock (&mut1);
- if (! keeplock)
+ if (!keeplock)
pthread_mutex_unlock (&mut1);
ntogo = nthreads;
@@ -56,7 +56,7 @@ int __have_futex_clock_realtime;
# define __set_futex_clock_realtime() \
__have_futex_clock_realtime = 1
#else
-#define __set_futex_clock_realtime() do { } while (0)
+# define __set_futex_clock_realtime() do { } while (0)
#endif
/* Version of the library, used in libthread_db to detect mismatches. */
@@ -429,7 +429,7 @@ main (int argc, char *argv[])
pthread_mutex_unlock (&running_mutex);
- if (! cont)
+ if (!cont)
break;
if (progress)
@@ -703,16 +703,16 @@ clock_getcpuclockid (pid_t pid, clockid_t *clock_id)
#ifdef i386
-#define HP_TIMING_NOW(Var) __asm__ __volatile__ ("rdtsc" : "=A" (Var))
+# define HP_TIMING_NOW(Var) __asm__ __volatile__ ("rdtsc" : "=A" (Var))
#elif defined __x86_64__
# define HP_TIMING_NOW(Var) \
({ unsigned int _hi, _lo; \
asm volatile ("rdtsc" : "=a" (_lo), "=d" (_hi)); \
(Var) = ((unsigned long long int) _hi << 32) | _lo; })
#elif defined __ia64__
-#define HP_TIMING_NOW(Var) __asm__ __volatile__ ("mov %0=ar.itc" : "=r" (Var) : : "memory")
+# define HP_TIMING_NOW(Var) __asm__ __volatile__ ("mov %0=ar.itc" : "=r" (Var) : : "memory")
#else
-#error "HP_TIMING_NOW missing"
+# error "HP_TIMING_NOW missing"
#endif
/* Get current value of CLOCK and store it in TP. */
@@ -39,16 +39,16 @@ __pthread_cleanup_upto (__jmp_buf target, char *targetframe)
{
#if _STACK_GROWS_DOWN
if ((uintptr_t) cbuf - adj <= targetframe_adj)
- {
- cbuf = NULL;
- break;
- }
+ {
+ cbuf = NULL;
+ break;
+ }
#elif _STACK_GROWS_UP
if ((uintptr_t) cbuf - adj >= targetframe_adj)
- {
- cbuf = NULL;
- break;
- }
+ {
+ cbuf = NULL;
+ break;
+ }
#else
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
#endif
@@ -113,7 +113,7 @@ enum
/* Don't include NO_ELISION, as that type is always the same
as the underlying lock type. */
#define PTHREAD_MUTEX_TYPE_ELISION(m) \
- ((m)->__data.__kind & (127|PTHREAD_MUTEX_ELISION_NP))
+ ((m)->__data.__kind & (127 | PTHREAD_MUTEX_ELISION_NP))
#if LLL_PRIVATE == 0 && LLL_SHARED == 128
# define PTHREAD_MUTEX_PSHARED(m) \
@@ -408,7 +408,7 @@ extern struct pthread *__find_thread_by_id (pid_t tid) attribute_hidden
;
#else
weak_function;
-#define __find_thread_by_id(tid) \
+# define __find_thread_by_id(tid) \
(__find_thread_by_id ? (__find_thread_by_id) (tid) : (struct pthread *) NULL)
#endif
@@ -85,11 +85,11 @@ __old_pthread_attr_setstack (pthread_attr_t *attr, void *stackaddr,
# endif
iattr->stacksize = stacksize;
-#if _STACK_GROWS_DOWN
+# if _STACK_GROWS_DOWN
iattr->stackaddr = (char *) stackaddr + stacksize;
-#else
+# else
iattr->stackaddr = (char *) stackaddr;
-#endif
+# endif
iattr->flags |= ATTR_FLAG_STACKADDR;
return 0;
@@ -81,7 +81,7 @@ __condvar_cleanup (void *arg)
lll_unlock (cbuffer->cond->__data.__lock, pshared);
/* Wake everybody to make sure no condvar signal gets lost. */
- if (! destroying)
+ if (!destroying)
lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared);
/* Get the mutex before returning unless asynchronous cancellation
@@ -89,9 +89,9 @@ __condvar_cleanup (void *arg)
if (!(USE_REQUEUE_PI (cbuffer->mutex))
|| ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)))
- {
- __pthread_mutex_cond_lock (cbuffer->mutex);
- }
+ {
+ __pthread_mutex_cond_lock (cbuffer->mutex);
+ }
else
__pthread_mutex_cond_lock_adjust (cbuffer->mutex);
}
@@ -286,7 +286,7 @@ start_thread (void *arg)
int not_first_call;
not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
- if (__glibc_likely (! not_first_call))
+ if (__glibc_likely (!not_first_call))
{
/* Store the new cleanup handler info. */
THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
@@ -31,9 +31,9 @@ pthread_key_delete (key)
{
unsigned int seq = __pthread_keys[key].seq;
- if (__builtin_expect (! KEY_UNUSED (seq), 1)
- && ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[key].seq,
- seq + 1, seq))
+ if (__builtin_expect (!KEY_UNUSED (seq), 1)
+ && !atomic_compare_and_exchange_bool_acq (&__pthread_keys[key].seq,
+ seq + 1, seq))
/* We deleted a valid key. */
result = 0;
}
@@ -26,12 +26,12 @@
#include <stap-probe.h>
#ifndef lll_lock_elision
-#define lll_lock_elision(lock, try_lock, private) ({ \
+# define lll_lock_elision(lock, try_lock, private) ({ \
lll_lock (lock, private); 0; })
#endif
#ifndef lll_trylock_elision
-#define lll_trylock_elision(a,t) lll_trylock(a)
+# define lll_trylock_elision(a, t) lll_trylock (a)
#endif
#ifndef LLL_MUTEX_LOCK
@@ -44,14 +44,14 @@
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
# define LLL_MUTEX_LOCK_ELISION(mutex) \
lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
- PTHREAD_MUTEX_PSHARED (mutex))
+ PTHREAD_MUTEX_PSHARED (mutex))
# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
- lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
- PTHREAD_MUTEX_PSHARED (mutex))
+ lll_trylock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
+ PTHREAD_MUTEX_PSHARED (mutex))
#endif
#ifndef FORCE_ELISION
-#define FORCE_ELISION(m, s)
+# define FORCE_ELISION(m, s)
#endif
static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
@@ -117,9 +117,9 @@ __pthread_mutex_lock (mutex)
mutex->__data.__count = 1;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
- == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
{
- if (! __is_smp)
+ if (!__is_smp)
goto simple;
if (LLL_MUTEX_TRYLOCK (mutex) != 0)
@@ -57,7 +57,7 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling)
}
int oldval = mutex->__data.__lock;
- if (! locked)
+ if (!locked)
do
{
/* Need to lock the mutex, but without obeying the priority
@@ -26,15 +26,15 @@
#include <stap-probe.h>
#ifndef lll_timedlock_elision
-#define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
+# define lll_timedlock_elision(a, dummy, b, c) lll_timedlock (a, b, c)
#endif
#ifndef lll_trylock_elision
-#define lll_trylock_elision(a,t) lll_trylock(a)
+# define lll_trylock_elision(a, t) lll_trylock (a)
#endif
#ifndef FORCE_ELISION
-#define FORCE_ELISION(m, s)
+# define FORCE_ELISION(m, s)
#endif
int
@@ -55,7 +55,7 @@ pthread_mutex_timedlock (mutex, abstime)
PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
- case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
+ case PTHREAD_MUTEX_RECURSIVE_NP | PTHREAD_MUTEX_ELISION_NP:
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -98,7 +98,7 @@ pthread_mutex_timedlock (mutex, abstime)
break;
case PTHREAD_MUTEX_TIMED_ELISION_NP:
- elision: __attribute__((unused))
+ elision: __attribute__ ((unused))
/* Don't record ownership */
return lll_timedlock_elision (mutex->__data.__lock,
mutex->__data.__spins,
@@ -107,7 +107,7 @@ pthread_mutex_timedlock (mutex, abstime)
case PTHREAD_MUTEX_ADAPTIVE_NP:
- if (! __is_smp)
+ if (!__is_smp)
goto simple;
if (lll_trylock (mutex->__data.__lock) != 0)
@@ -23,11 +23,11 @@
#include <lowlevellock.h>
#ifndef lll_trylock_elision
-#define lll_trylock_elision(a,t) lll_trylock(a)
+# define lll_trylock_elision(a, t) lll_trylock (a)
#endif
#ifndef DO_ELISION
-#define DO_ELISION(m) 0
+# define DO_ELISION(m) 0
#endif
/* We don't force elision in trylock, because this can lead to inconsistent
@@ -44,7 +44,7 @@ __pthread_mutex_trylock (mutex)
PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
- case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
+ case PTHREAD_MUTEX_RECURSIVE_NP | PTHREAD_MUTEX_ELISION_NP:
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -72,7 +72,7 @@ __pthread_mutex_trylock (mutex)
elision:
if (lll_trylock_elision (mutex->__data.__lock,
mutex->__data.__elision) != 0)
- break;
+ break;
/* Don't record the ownership. */
return 0;
@@ -401,7 +401,7 @@ __pthread_mutex_trylock (mutex)
}
#ifndef __pthread_mutex_trylock
-#ifndef pthread_mutex_trylock
+# ifndef pthread_mutex_trylock
strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
-#endif
+# endif
#endif
@@ -24,7 +24,7 @@
#include <stap-probe.h>
#ifndef lll_unlock_elision
-#define lll_unlock_elision(a,b) ({ lll_unlock (a,b); 0; })
+# define lll_unlock_elision(a, b) ({ lll_unlock (a, b); 0; })
#endif
static int
@@ -64,10 +64,10 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
{
/* Don't reset the owner/users fields for elision. */
return lll_unlock_elision (mutex->__data.__lock,
- PTHREAD_MUTEX_PSHARED (mutex));
+ PTHREAD_MUTEX_PSHARED (mutex));
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
- == PTHREAD_MUTEX_RECURSIVE_NP, 1))
+ == PTHREAD_MUTEX_RECURSIVE_NP, 1))
{
/* Recursive mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
@@ -79,14 +79,14 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
goto normal;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
- == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
goto normal;
else
{
/* Error checking mutex. */
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_islocked (mutex->__data.__lock))
+ || !lll_islocked (mutex->__data.__lock))
return EPERM;
goto normal;
}
@@ -129,7 +129,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_islocked (mutex->__data.__lock))
+ || !lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
@@ -199,7 +199,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_islocked (mutex->__data.__lock))
+ || !lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
@@ -209,7 +209,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
&& __builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_INCONSISTENT, 0))
pi_notrecoverable:
- newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
+ newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
{
@@ -259,7 +259,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
/* Error checking mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
- || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
+ || (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
return EPERM;
/* FALLTHROUGH */
@@ -33,7 +33,7 @@ pthread_mutexattr_getprioceiling (attr, prioceiling)
ceiling = ((iattr->mutexkind & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT);
- if (! ceiling)
+ if (!ceiling)
{
if (__sched_fifo_min_prio == -1)
__init_sched_fifo_prio ();
@@ -26,7 +26,7 @@
/* Acquire read lock for RWLOCK. Slow path. */
-static int __attribute__((noinline))
+static int __attribute__ ((noinline))
__pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
{
int result = 0;
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
int result = 0;
/* Make sure we are alone. */
- lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -26,7 +26,7 @@
/* Acquire write lock for RWLOCK. */
-static int __attribute__((noinline))
+static int __attribute__ ((noinline))
__pthread_rwlock_wrlock_slow (pthread_rwlock_t *rwlock)
{
int result = 0;
@@ -102,8 +102,8 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Get the rwlock if there is no writer and no reader. */
- if (__glibc_likely((rwlock->__data.__writer |
- rwlock->__data.__nr_readers) == 0))
+ if (__glibc_likely ((rwlock->__data.__writer |
+ rwlock->__data.__nr_readers) == 0))
{
/* Mark self as writer. */
rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid);
@@ -52,7 +52,7 @@ sem_close (sem)
rec = NULL;
the_sem = sem;
twalk (__sem_mappings, walker);
- if (rec != NULL)
+ if (rec != NULL)
{
/* Check the reference counter. If it is going to be zero, free
all the resources. */
@@ -264,7 +264,7 @@ sem_open (const char *name, int oflag, ...)
{
try_again:
fd = __libc_open (finalname,
- (oflag & ~(O_CREAT|O_ACCMODE)) | O_NOFOLLOW | O_RDWR);
+ (oflag & ~(O_CREAT | O_ACCMODE)) | O_NOFOLLOW | O_RDWR);
if (fd == -1)
{
@@ -191,7 +191,7 @@ client (void *arg)
servpoll[i].events = POLLIN;
z_valid = new_coord ();
- if (! z_valid)
+ if (!z_valid)
/* No more to do. Clear the event fields. */
for (i = 0; i < nserv; ++i)
if (servpoll[i].events == POLLOUT)
@@ -206,7 +206,7 @@ client (void *arg)
else if (servpoll[i].events != 0)
cont = true;
- if (! cont && ! z_valid)
+ if (!cont && !z_valid)
break;
}
@@ -26,15 +26,15 @@
#ifdef HAVE_FORCED_UNWIND
-#ifdef _STACK_GROWS_DOWN
-# define FRAME_LEFT(frame, other, adj) \
+# ifdef _STACK_GROWS_DOWN
+# define FRAME_LEFT(frame, other, adj) \
((uintptr_t) frame - adj >= (uintptr_t) other - adj)
-#elif _STACK_GROWS_UP
-# define FRAME_LEFT(frame, other, adj) \
+# elif _STACK_GROWS_UP
+# define FRAME_LEFT(frame, other, adj) \
((uintptr_t) frame - adj <= (uintptr_t) other - adj)
-#else
-# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
-#endif
+# else
+# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
+# endif
static _Unwind_Reason_Code
unwind_stop (int version, _Unwind_Action actions,
@@ -58,8 +58,8 @@ unwind_stop (int version, _Unwind_Action actions,
of a function is NOT within it's stack frame; it's the SP of the
previous frame. */
if ((actions & _UA_END_OF_STACK)
- || ! _JMPBUF_CFA_UNWINDS_ADJ (buf->cancel_jmp_buf[0].jmp_buf, context,
- adj))
+ || !_JMPBUF_CFA_UNWINDS_ADJ (buf->cancel_jmp_buf[0].jmp_buf, context,
+ adj))
do_longjump = 1;
if (__glibc_unlikely (curp != NULL))