@@ -62,6 +62,18 @@ size_t _dl_tls_static_surplus;
dynamic TLS access (e.g. with TLSDESC). */
size_t _dl_tls_static_optional;
+/* Size of the features present in the rseq area. */
+size_t _dl_tls_rseq_feature_size;
+
+/* Alignment requirement of the rseq area. */
+size_t _dl_tls_rseq_align;
+
+/* Size of the rseq area in the static TLS block. */
+size_t _dl_tls_rseq_size;
+
+/* Offset of the rseq area from the thread pointer. */
+ptrdiff_t _dl_tls_rseq_offset;
+
/* Generation counter for the dtv. */
size_t _dl_tls_generation;
@@ -135,6 +147,21 @@ __libc_setup_tls (void)
/* Calculate the size of the static TLS surplus, with 0 auditors. */
_dl_tls_static_surplus_init (0);
+ /* Get the rseq auxiliary vectors, 0 is returned when not implemented
+ and we then default to the rseq ABI minimums. */
+ size_t rseq_size = MAX (GLRO(dl_tls_rseq_feature_size), 32);
+ size_t rseq_align = MAX (GLRO(dl_tls_rseq_align), 32);
+
+ /* Make sure the rseq area size is a multiple of the requested
+ aligment. */
+ rseq_size = roundup (rseq_size, rseq_align);
+
+ /* Increase the max_align if necessary. */
+ max_align = MAX (max_align, rseq_align);
+
+ /* Record the rseq_area block size. */
+ GLRO (dl_tls_rseq_size) = rseq_size;
+
/* We have to set up the TCB block which also (possibly) contains
'errno'. Therefore we avoid 'malloc' which might touch 'errno'.
Instead we use 'sbrk' which would only uses 'errno' if it fails.
@@ -144,13 +171,13 @@ __libc_setup_tls (void)
/* Align the TCB offset to the maximum alignment, as
_dl_allocate_tls_storage (in elf/dl-tls.c) does using __libc_memalign
and dl_tls_static_align. */
- tcb_offset = roundup (memsz + GLRO(dl_tls_static_surplus), max_align);
+ tcb_offset = roundup (memsz + rseq_size + GLRO(dl_tls_static_surplus), max_align);
tlsblock = _dl_early_allocate (tcb_offset + TLS_INIT_TCB_SIZE + max_align);
if (tlsblock == NULL)
_startup_fatal_tls_error ();
#elif TLS_DTV_AT_TP
tcb_offset = roundup (TLS_INIT_TCB_SIZE, align ?: 1);
- tlsblock = _dl_early_allocate (tcb_offset + memsz + max_align
+ tlsblock = _dl_early_allocate (tcb_offset + memsz + rseq_size + max_align
+ TLS_PRE_TCB_SIZE
+ GLRO(dl_tls_static_surplus));
if (tlsblock == NULL)
@@ -175,9 +202,17 @@ __libc_setup_tls (void)
_dl_static_dtv[2].pointer.val = ((char *) tlsblock + tcb_offset
- roundup (memsz, align ?: 1));
main_map->l_tls_offset = roundup (memsz, align ?: 1);
+
+ /* Record the rseq_area offset. The offset is negative with TLS_TCB_AT_TP
+ because the TLS blocks are located before the thread pointer. */
+ GLRO (dl_tls_rseq_offset) = - roundup (main_map->l_tls_offset + rseq_size, rseq_align);
#elif TLS_DTV_AT_TP
_dl_static_dtv[2].pointer.val = (char *) tlsblock + tcb_offset;
main_map->l_tls_offset = tcb_offset;
+
+ /* Record the rseq_area offset. The offset is positive with TLS_DTV_AT_TP
+ because the TLS blocks are located after the thread pointer. */
+ GLRO (dl_tls_rseq_offset) = roundup (tcb_offset + memsz, rseq_align);
#else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
#endif
@@ -215,5 +250,8 @@ __libc_setup_tls (void)
memsz += tcb_offset;
#endif
+ /* Add rseq area to the used size. */
+ memsz = roundup (memsz + rseq_size, rseq_align);
+
init_static_tls (memsz, MAX (TCB_ALIGNMENT, max_align));
}
@@ -75,6 +75,12 @@
/* Default for dl_tls_static_optional. */
#define OPTIONAL_TLS 512
+/* Minimum size of the rseq area. */
+#define TLS_DL_RSEQ_MIN_SIZE 32
+
+/* Minimum size of the rseq area alignment. */
+#define TLS_DL_RSEQ_MIN_ALIGN 32
+
/* Compute the static TLS surplus based on the namespace count and the
TLS space that can be used for optimizations. */
static inline int
@@ -298,6 +304,29 @@ _dl_determine_tlsoffset (void)
slotinfo[cnt].map->l_tls_offset = off;
}
+ /* Insert the rseq area block after the last TLS block. */
+
+ /* Get the rseq auxiliary vectors, 0 is returned when not implemented
+ and we then default to the rseq ABI minimums. */
+ size_t rseq_size = MAX (GLRO (dl_tls_rseq_feature_size), TLS_DL_RSEQ_MIN_SIZE);
+ size_t rseq_align = MAX (GLRO (dl_tls_rseq_align), TLS_DL_RSEQ_MIN_ALIGN);
+
+ /* Make sure the rseq area size is a multiple of the requested
+ aligment. */
+ rseq_size = roundup (rseq_size, rseq_align);
+
+ /* Add the rseq area block to the global offset. */
+ offset = roundup (offset, rseq_align) + rseq_size;
+
+ /* Increase the max_align if necessary. */
+ max_align = MAX (max_align, rseq_align);
+
+ /* Record the rseq_area block size and offset. The offset is negative
+ with TLS_TCB_AT_TP because the TLS blocks are located before the
+ thread pointer. */
+ GLRO (dl_tls_rseq_offset) = -offset;
+ GLRO (dl_tls_rseq_size) = rseq_size;
+
GL(dl_tls_static_used) = offset;
GLRO (dl_tls_static_size) = (roundup (offset + GLRO(dl_tls_static_surplus),
max_align)
@@ -343,6 +372,32 @@ _dl_determine_tlsoffset (void)
offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
}
+ /* Insert the rseq area block after the last TLS block. */
+
+ /* Get the rseq auxiliary vectors, 0 is returned when not implemented
+ and we then default to the rseq ABI minimums. */
+ size_t rseq_size = MAX (GLRO (dl_tls_rseq_feature_size), TLS_DL_RSEQ_MIN_SIZE);
+ size_t rseq_align = MAX (GLRO (dl_tls_rseq_align), TLS_DL_RSEQ_MIN_ALIGN);
+
+ /* Make sure the rseq area size is a multiple of the requested
+ aligment. */
+ rseq_size = roundup (rseq_size, rseq_align);
+
+ /* Align the global offset to the beginning of the rseq area. */
+ offset = roundup (offset, rseq_align);
+
+ /* Record the rseq_area block size and offset. The offset is positive
+ with TLS_DTV_AT_TP because the TLS blocks are located after the
+ thread pointer. */
+ GLRO (dl_tls_rseq_size) = rseq_size;
+ GLRO (dl_tls_rseq_offset) = offset;
+
+ /* Add the rseq area block to the global offset. */
+ offset += rseq_size;
+
+ /* Increase the max_align if necessary. */
+ max_align = MAX (max_align, rseq_align);
+
GL(dl_tls_static_used) = offset;
GLRO (dl_tls_static_size) = roundup (offset + GLRO(dl_tls_static_surplus),
TCB_ALIGNMENT);
@@ -78,6 +78,18 @@ __rtld_static_init (struct link_map *map)
extern __typeof (dl->_dl_tls_static_size) _dl_tls_static_size
attribute_hidden;
dl->_dl_tls_static_size = _dl_tls_static_size;
+ extern __typeof (dl->_dl_tls_rseq_feature_size) _dl_tls_rseq_feature_size
+ attribute_hidden;
+ dl->_dl_tls_rseq_feature_size = _dl_tls_rseq_feature_size;
+ extern __typeof (dl->_dl_tls_rseq_align) _dl_tls_rseq_align
+ attribute_hidden;
+ dl->_dl_tls_rseq_align = _dl_tls_rseq_align;
+ extern __typeof (dl->_dl_tls_rseq_size) _dl_tls_rseq_size
+ attribute_hidden;
+ dl->_dl_tls_rseq_size = _dl_tls_rseq_size;
+ extern __typeof (dl->_dl_tls_rseq_offset) _dl_tls_rseq_offset
+ attribute_hidden;
+ dl->_dl_tls_rseq_offset = _dl_tls_rseq_offset;
dl->_dl_find_object = _dl_find_object;
__rtld_static_init_arch (map, dl);
@@ -404,25 +404,11 @@ struct pthread
/* Used on strsignal. */
struct tls_internal_t tls_state;
- /* rseq area registered with the kernel. Use a custom definition
- here to isolate from kernel struct rseq changes. The
- implementation of sched_getcpu needs acccess to the cpu_id field;
- the other fields are unused and not included here. */
- union
- {
- struct
- {
- uint32_t cpu_id_start;
- uint32_t cpu_id;
- };
- char pad[32]; /* Original rseq area size. */
- } rseq_area __attribute__ ((aligned (32)));
-
/* Amount of end padding, if any, in this structure.
- This definition relies on rseq_area being last. */
+ This definition relies on tls_state being last. */
#define PTHREAD_STRUCT_END_PADDING \
- (sizeof (struct pthread) - offsetof (struct pthread, rseq_area) \
- + sizeof ((struct pthread) {}.rseq_area))
+ (sizeof (struct pthread) - offsetof (struct pthread, tls_state) \
+ + sizeof ((struct pthread) {}.tls_state))
} __attribute ((aligned (TCB_ALIGNMENT)));
static inline bool
@@ -691,7 +691,7 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
/* Inherit rseq registration state. Without seccomp filters, rseq
registration will either always fail or always succeed. */
- if ((int) THREAD_GETMEM_VOLATILE (self, rseq_area.cpu_id) >= 0)
+ if ((int) RSEQ_GETMEM_VOLATILE (rseq_get_area(), cpu_id) >= 0)
pd->flags |= ATTR_FLAG_DO_RSEQ;
/* Initialize the field for the ID of the thread which is waiting
@@ -610,6 +610,18 @@ struct rtld_global_ro
See comments in elf/dl-tls.c where it is initialized. */
EXTERN size_t _dl_tls_static_surplus;
+ /* Size of the features present in the rseq area. */
+ EXTERN size_t _dl_tls_rseq_feature_size;
+
+ /* Alignment requirement of the rseq area. */
+ EXTERN size_t _dl_tls_rseq_align;
+
+ /* Size of the rseq area in the static TLS block. */
+ EXTERN size_t _dl_tls_rseq_size;
+
+ /* Offset of the rseq area from the thread pointer. */
+ EXTERN ptrdiff_t _dl_tls_rseq_offset;
+
/* Name of the shared object to be profiled (if any). */
EXTERN const char *_dl_profile;
/* Filename of the output file. */
@@ -123,3 +123,59 @@
"i" (offsetof (struct pthread, member)), \
"r" (idx)); \
}})
+
+
+/* Read member of the RSEQ area directly. */
+#define RSEQ_GETMEM_VOLATILE(descr, member) \
+ ({ __typeof (descr->member) __value; \
+ ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset); \
+ _Static_assert (sizeof (__value) == 1 \
+ || sizeof (__value) == 4 \
+ || sizeof (__value) == 8, \
+ "size of per-thread data"); \
+ if (sizeof (__value) == 1) \
+ asm volatile ("movb %%gs:%P2(%3),%b0" \
+ : "=q" (__value) \
+ : "0" (0), "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else if (sizeof (__value) == 4) \
+ asm volatile ("movl %%gs:%P1(%2),%0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else /* 8 */ \
+ { \
+ asm volatile ("movl %%gs:%P1(%2),%%eax\n\t" \
+ "movl %%gs:4+%P1(%2),%%edx" \
+ : "=&A" (__value) \
+ : "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ } \
+ __value; })
+
+/* Set member of the RSEQ area directly. */
+#define RSEQ_SETMEM(descr, member, value) \
+ ({ \
+ ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset); \
+ _Static_assert (sizeof (descr->member) == 1 \
+ || sizeof (descr->member) == 4 \
+ || sizeof (descr->member) == 8, \
+ "size of per-thread data"); \
+ if (sizeof (descr->member) == 1) \
+ asm volatile ("movb %b0,%%gs:%P1(%2)" : \
+ : "iq" (value), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else if (sizeof (descr->member) == 4) \
+ asm volatile ("movl %0,%%gs:%P1(%2)" : \
+ : "ir" (value), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else /* 8 */ \
+ { \
+ asm volatile ("movl %%eax,%%gs:%P1(%2)\n\t" \
+ "movl %%edx,%%gs:4+%P1(%2)" : \
+ : "A" ((uint64_t) cast_to_integer (value)), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ }})
@@ -103,13 +103,16 @@ __tls_init_tp (void)
{
bool do_rseq = true;
do_rseq = TUNABLE_GET (rseq, int, NULL);
- if (rseq_register_current_thread (pd, do_rseq))
- {
- /* We need a writable view of the variables. They are in
- .data.relro and are not yet write-protected. */
- extern unsigned int size __asm__ ("__rseq_size");
- size = sizeof (pd->rseq_area);
- }
+ rseq_register_current_thread (pd, do_rseq);
+
+ // FIXME: Even if the registration fails, we need to communicate the size
+ // of the allocated rseq area to an application that could attempt the
+ // registration itself.
+
+ /* We need a writable view of the variables. They are in
+ .data.relro and are not yet write-protected. */
+ extern unsigned int size __asm__ ("__rseq_size");
+ size = GLRO (dl_tls_rseq_size);
#ifdef RSEQ_SIG
/* This should be a compile-time constant, but the current
@@ -118,7 +121,7 @@ __tls_init_tp (void)
if the rseq registration may have happened because RSEQ_SIG is
defined. */
extern ptrdiff_t offset __asm__ ("__rseq_offset");
- offset = (char *) &pd->rseq_area - (char *) __thread_pointer ();
+ offset = GLRO (dl_tls_rseq_offset);
#endif
}
@@ -30,3 +30,8 @@
descr->member = (value)
#define THREAD_SETMEM_NC(descr, member, idx, value) \
descr->member[idx] = (value)
+
+#define RSEQ_GETMEM_VOLATILE(descr, member) \
+ THREAD_GETMEM_VOLATILE(descr, member)
+#define RSEQ_SETMEM(descr, member, value) \
+ THREAD_SETMEM(descr, member, value)
@@ -57,5 +57,8 @@ void _dl_parse_auxv (ElfW(auxv_t) *av, dl_parse_auxv_t auxv_values)
GLRO(dl_sysinfo) = auxv_values[AT_SYSINFO];
#endif
+ GLRO(dl_tls_rseq_feature_size) = auxv_values[AT_RSEQ_FEATURE_SIZE];
+ GLRO(dl_tls_rseq_align) = auxv_values[AT_RSEQ_ALIGN];
+
DL_PLATFORM_AUXV
}
@@ -24,6 +24,24 @@
#include <stdbool.h>
#include <stdio.h>
#include <sys/rseq.h>
+#include <thread_pointer.h>
+#include <ldsodefs.h>
+
+/* rseq area registered with the kernel. Use a custom definition
+ here to isolate from kernel struct rseq changes. The
+ implementation of sched_getcpu needs acccess to the cpu_id field;
+ the other fields are unused and not included here. */
+struct rseq_area
+{
+ uint32_t cpu_id_start;
+ uint32_t cpu_id;
+};
+
+static inline struct rseq_area *
+rseq_get_area(void)
+{
+ return (struct rseq_area *) ((char *) __thread_pointer() + GLRO (dl_tls_rseq_offset));
+}
#ifdef RSEQ_SIG
static inline bool
@@ -31,20 +49,23 @@ rseq_register_current_thread (struct pthread *self, bool do_rseq)
{
if (do_rseq)
{
- int ret = INTERNAL_SYSCALL_CALL (rseq, &self->rseq_area,
- sizeof (self->rseq_area),
+ /* The kernel expects 'rseq_area->rseq_cs == NULL' on registration, zero
+ the whole rseq area. */
+ memset(rseq_get_area(), 0, GLRO (dl_tls_rseq_size));
+ int ret = INTERNAL_SYSCALL_CALL (rseq, rseq_get_area(),
+ GLRO (dl_tls_rseq_size),
0, RSEQ_SIG);
if (!INTERNAL_SYSCALL_ERROR_P (ret))
return true;
}
- THREAD_SETMEM (self, rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+ RSEQ_SETMEM (rseq_get_area(), cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
return false;
}
#else /* RSEQ_SIG */
static inline bool
rseq_register_current_thread (struct pthread *self, bool do_rseq)
{
- THREAD_SETMEM (self, rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+ RSEQ_SETMEM (rseq_get_area(), cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
return false;
}
#endif /* RSEQ_SIG */
@@ -19,6 +19,7 @@
#include <sched.h>
#include <sysdep.h>
#include <sysdep-vdso.h>
+#include <rseq-internal.h>
static int
vsyscall_sched_getcpu (void)
@@ -37,7 +38,7 @@ vsyscall_sched_getcpu (void)
int
sched_getcpu (void)
{
- int cpu_id = THREAD_GETMEM_VOLATILE (THREAD_SELF, rseq_area.cpu_id);
+ int cpu_id = RSEQ_GETMEM_VOLATILE (rseq_get_area(), cpu_id);
return __glibc_likely (cpu_id >= 0) ? cpu_id : vsyscall_sched_getcpu ();
}
#else /* RSEQ_SIG */
@@ -23,6 +23,7 @@
#include <sysdep.h>
#include <thread_pointer.h>
#include <sys/rseq.h>
+#include <sys/auxv.h>
#include <unistd.h>
#ifdef RSEQ_SIG
@@ -31,22 +32,23 @@
static void
check_rseq_disabled (void)
{
- struct pthread *pd = THREAD_SELF;
+ size_t rseq_size = MAX (getauxval (AT_RSEQ_FEATURE_SIZE), 32);
+ struct rseq *rseq_area = (struct rseq *) ((char *) __thread_pointer () + __rseq_offset);
TEST_COMPARE (__rseq_flags, 0);
- TEST_VERIFY ((char *) __thread_pointer () + __rseq_offset
- == (char *) &pd->rseq_area);
- TEST_COMPARE (__rseq_size, 0);
- TEST_COMPARE ((int) pd->rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+ //FIXME: unsure how to test this
+ //TEST_VERIFY ((char *) __thread_pointer () + __rseq_offset
+ // == (char *) &pd->rseq_area);
+ TEST_COMPARE (__rseq_size, rseq_size);
+ TEST_COMPARE ((int) rseq_area->cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
- int ret = syscall (__NR_rseq, &pd->rseq_area, sizeof (pd->rseq_area),
- 0, RSEQ_SIG);
+ int ret = syscall (__NR_rseq, rseq_area, __rseq_size, 0, RSEQ_SIG);
if (ret == 0)
{
- ret = syscall (__NR_rseq, &pd->rseq_area, sizeof (pd->rseq_area),
+ ret = syscall (__NR_rseq, rseq_area, __rseq_size,
RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
TEST_COMPARE (ret, 0);
- pd->rseq_area.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
+ rseq_area->cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
}
else
{
@@ -31,18 +31,28 @@
# include <syscall.h>
# include <thread_pointer.h>
# include <tls.h>
+# include <sys/auxv.h>
# include "tst-rseq.h"
static void
do_rseq_main_test (void)
{
- struct pthread *pd = THREAD_SELF;
+ size_t rseq_size = MAX (getauxval (AT_RSEQ_FEATURE_SIZE), 32);
TEST_VERIFY_EXIT (rseq_thread_registered ());
TEST_COMPARE (__rseq_flags, 0);
- TEST_VERIFY ((char *) __thread_pointer () + __rseq_offset
- == (char *) &pd->rseq_area);
- TEST_COMPARE (__rseq_size, sizeof (pd->rseq_area));
+ TEST_COMPARE (__rseq_size, rseq_size);
+ /* rseq area minimum ABI size is 32 bytes. */
+ TEST_VERIFY (__rseq_size >= 32);
+#if TLS_TCB_AT_TP
+ /* rseq area block should come before the thread pointer and be at least 32 bytes. */
+ TEST_VERIFY (__rseq_offset <= 32);
+#elif TLS_DTV_AT_TP
+ /* rseq area block should come after the thread pointer. */
+ TEST_VERIFY (__rseq_offset >= 0);
+#else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+#endif
}
static void
@@ -23,11 +23,14 @@
#include <syscall.h>
#include <sys/rseq.h>
#include <tls.h>
+#include <rseq-internal.h>
static inline bool
rseq_thread_registered (void)
{
- return THREAD_GETMEM_VOLATILE (THREAD_SELF, rseq_area.cpu_id) >= 0;
+ struct rseq_area *rseq = (struct rseq_area *) ((char *) __thread_pointer () + __rseq_offset);
+
+ return __atomic_load_n (&rseq->cpu_id, __ATOMIC_RELAXED) >= 0;
}
static inline int
@@ -130,3 +130,59 @@
"i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
}})
+
+/* Read member of the RSEQ area directly. */
+# define RSEQ_GETMEM_VOLATILE(descr, member) \
+ ({ __typeof (descr->member) __value; \
+ ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset); \
+ _Static_assert (sizeof (__value) == 1 \
+ || sizeof (__value) == 4 \
+ || sizeof (__value) == 8, \
+ "size of per-thread data"); \
+ if (sizeof (__value) == 1) \
+ asm volatile ("movb %%fs:%P2(%q3),%b0" \
+ : "=q" (__value) \
+ : "0" (0), "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else if (sizeof (__value) == 4) \
+ asm volatile ("movl %%fs:%P1(%q2),%0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else /* 8 */ \
+ { \
+ asm volatile ("movq %%fs:%P1(%q2),%q0" \
+ : "=r" (__value) \
+ : "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ } \
+ __value; })
+
+/* Set member of the RSEQ area directly. */
+# define RSEQ_SETMEM(descr, member, value) \
+ ({ \
+ ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset); \
+ _Static_assert (sizeof (descr->member) == 1 \
+ || sizeof (descr->member) == 4 \
+ || sizeof (descr->member) == 8, \
+ "size of per-thread data"); \
+ if (sizeof (descr->member) == 1) \
+ asm volatile ("movb %b0,%%fs:%P1(%q2)" : \
+ : "iq" (value), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else if (sizeof (descr->member) == 4) \
+ asm volatile ("movl %0,%%fs:%P1(%q2)" : \
+ : IMM_MODE (value), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ else /* 8 */ \
+ { \
+ /* Since movq takes a signed 32-bit immediate or a register source \
+ operand, use "er" constraint for 32-bit signed integer constant \
+ or register. */ \
+ asm volatile ("movq %q0,%%fs:%P1(%q2)" : \
+ : "er" ((uint64_t) cast_to_integer (value)), \
+ "i" (offsetof (struct rseq_area, member)), \
+ "r" (_rseq_offset)); \
+ }})