diff mbox series

[v10,2/4] Add rseq extensible ABI support

Message ID 20240325182927.914830-3-mjeanson@efficios.com
State New
Headers show
Series [v10,1/4] nptl: fix potential merge of __rseq_* relro symbols | expand

Commit Message

Michael Jeanson March 25, 2024, 6:29 p.m. UTC
Introduced in Linux v6.3 the rseq extensible ABI [1] will allow adding
rseq features past the initial 32 bytes of the original ABI.

While the rseq features in the latest kernel still fit within the
original ABI size, there are currently only 4 bytes left. It would thus
be a good time to add support for the extensible ABI so that when new
features are added, they are immediately available to GNU libc users.

We use the ELF auxiliary vectors to query the kernel for the size and
alignment of the rseq area, if this fails we default to the original
fixed size and alignment of '32' which the kernel will accept as a
compatibility mode with the original ABI.

This makes the size of the rseq area variable and thus requires to
relocate it out of 'struct pthread'. We chose to move it after (in block
allocation order) the last TLS block inside the static TLS block
allocation. Tt required a fairly small modification to the TLS block
allocator and did not interfere with the main executable TLS block which
must always be the first block relative to the thread pointer.

[1] https://lore.kernel.org/all/20221122203932.231377-4-mathieu.desnoyers@efficios.com/

Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Co-Authored-By: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-By: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
---
Changes since RFC v1:
- Insert the rseq area after the last TLS block
- Add proper support for TLS_TCB_AT_TP variant
Changes since RFC v2:
- Set __rseq_size even when the registration fails
- Adjust rseq tests to the new ABI
- Added support for statically linked executables
Changes since RFC v3:
- Fix RSEQ_SETMEM for rseq disabled
- Replace sys/auxv.h usage with dl-parse_auxv.h
- Fix offset for TLS_TCB_AT_TP with statically linked executables
- Zero the rseq area before registration
Changes since RFC v4:
- Move dynamic linker defines to a header file
- Fix alignment when tls block align is smaller than rseq align with
  statically linked executables
- Add statically linked rseq tests
- Revert: Set __rseq_size even when the registration fails
- Use minimum size when rseq is disabled by tunable
Changes since v5:
- Fix TLS_DTV_AT_TP rseq offset with statically linked executables
Changes since v6:
- Fix tst-rseq for feature size over 32 bytes
- Rebased on 'nptl: fix potential merge of __rseq_* relro symbols'
Changes since v8:
- Fix copyright year in sysdeps/generic/dl-rseq.h
- Clarify the tcb math comments
- Add a comment to clarify what enforces the aligment requirements of a
  pointer calculated from the rseq_offset
- Remove nonsensical test in tst-rseq-disable
- Add comments to clarify why the rseq size is 0 when registration fails
  or is disabled
- Add comments to explain why we allocate an rseq area block even when
  the registration is disabled by tunable
- Rename 'rseq_size' -> 'rseq_alloc_size' and 'dl_tls_rseq_size' ->
  'dl_tls_rseq_alloc_size' to clarify the distinction between the
  allocated rseq size and the size reported to application code in
  '__rseq_size'
---
 csu/libc-tls.c                                | 84 +++++++++++++++++--
 elf/dl-tls.c                                  | 78 +++++++++++++++++
 elf/rtld_static_init.c                        | 12 +++
 nptl/descr.h                                  | 20 +----
 nptl/pthread_create.c                         |  2 +-
 sysdeps/generic/dl-rseq.h                     | 26 ++++++
 sysdeps/generic/ldsodefs.h                    | 12 +++
 sysdeps/i386/nptl/tcb-access.h                | 56 +++++++++++++
 sysdeps/nptl/dl-tls_init_tp.c                 | 10 ++-
 sysdeps/nptl/tcb-access.h                     |  5 ++
 sysdeps/unix/sysv/linux/Makefile              | 10 +++
 sysdeps/unix/sysv/linux/dl-parse_auxv.h       |  6 ++
 sysdeps/unix/sysv/linux/rseq-internal.h       | 29 ++++++-
 sysdeps/unix/sysv/linux/sched_getcpu.c        |  3 +-
 .../unix/sysv/linux/tst-rseq-disable-static.c |  1 +
 sysdeps/unix/sysv/linux/tst-rseq-disable.c    | 17 ++--
 .../unix/sysv/linux/tst-rseq-nptl-static.c    |  1 +
 sysdeps/unix/sysv/linux/tst-rseq-static.c     |  1 +
 sysdeps/unix/sysv/linux/tst-rseq.c            | 22 ++++-
 sysdeps/unix/sysv/linux/tst-rseq.h            |  9 +-
 sysdeps/x86_64/nptl/tcb-access.h              | 56 +++++++++++++
 21 files changed, 413 insertions(+), 47 deletions(-)
 create mode 100644 sysdeps/generic/dl-rseq.h
 create mode 100644 sysdeps/unix/sysv/linux/tst-rseq-disable-static.c
 create mode 100644 sysdeps/unix/sysv/linux/tst-rseq-nptl-static.c
 create mode 100644 sysdeps/unix/sysv/linux/tst-rseq-static.c

Comments

Florian Weimer July 1, 2024, 8:32 a.m. UTC | #1
* Michael Jeanson:

> diff --git a/csu/libc-tls.c b/csu/libc-tls.c
> index b7682bdf43..f73d0e1c52 100644
> --- a/csu/libc-tls.c
> +++ b/csu/libc-tls.c

> @@ -110,6 +124,7 @@ __libc_setup_tls (void)
>    size_t filesz = 0;
>    void *initimage = NULL;
>    size_t align = 0;
> +  size_t tls_blocks_size = 0;
>    size_t max_align = TCB_ALIGNMENT;
>    size_t tcb_offset;
>    const ElfW(Phdr) *phdr;
> @@ -135,22 +150,79 @@ __libc_setup_tls (void)
>    /* Calculate the size of the static TLS surplus, with 0 auditors.  */
>    _dl_tls_static_surplus_init (0);
>  
> +  /* Even when disabled by tunable, an rseq area will be allocated to allow
> +     application code to test the registration status with 'rseq->cpud_id >= 0'.
> +     Default to the rseq ABI minimum size and alignment, this will ensure we
> +     don't use more TLS than necessary.  */
> +  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
> +  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
> +  bool do_rseq = true;
> +  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);

This breaks the Hurd build because it does not have rseq at all.

> diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
> index 50f58a60e3..c8bd39ddcf 100644
> --- a/sysdeps/generic/ldsodefs.h
> +++ b/sysdeps/generic/ldsodefs.h
> @@ -610,6 +610,18 @@ struct rtld_global_ro
>       See comments in elf/dl-tls.c where it is initialized.  */
>    EXTERN size_t _dl_tls_static_surplus;
>  
> +  /* Size of the features present in the rseq area.  */
> +  EXTERN size_t _dl_tls_rseq_feature_size;
> +
> +  /* Alignment requirement of the rseq area.  */
> +  EXTERN size_t _dl_tls_rseq_align;
> +
> +  /* Size of the rseq area allocated in the static TLS block.  */
> +  EXTERN size_t _dl_tls_rseq_alloc_size;
> +
> +  /* Offset of the rseq area from the thread pointer.  */
> +  EXTERN ptrdiff_t _dl_tls_rseq_offset;
> +

Why is it necessary to duplicate these variables?  Is not a duplicate
and I assume it looks like the it's now used for TLS allocation on new
threads, which means it's needed.  Is it necessary to add it to GLRO?
The __rtld_static_init changes would only be needed if support
pthread_create from the inner libc after static dlopen, but that's not
really the case today.

> diff --git a/sysdeps/unix/sysv/linux/rseq-internal.h b/sysdeps/unix/sysv/linux/rseq-internal.h
> index 48eebc1e16..4123072274 100644
> --- a/sysdeps/unix/sysv/linux/rseq-internal.h
> +++ b/sysdeps/unix/sysv/linux/rseq-internal.h
> @@ -24,6 +24,24 @@
>  #include <stdbool.h>
>  #include <stdio.h>
>  #include <sys/rseq.h>
> +#include <thread_pointer.h>
> +#include <ldsodefs.h>
> +
> +/* rseq area registered with the kernel.  Use a custom definition
> +   here to isolate from kernel struct rseq changes.  The
> +   implementation of sched_getcpu needs acccess to the cpu_id field;
> +   the other fields are unused and not included here.  */
> +struct rseq_area
> +{
> +  uint32_t cpu_id_start;
> +  uint32_t cpu_id;
> +};
> +
> +static inline struct rseq_area *
> +rseq_get_area(void)
> +{
> +  return (struct rseq_area *) ((char *) __thread_pointer() + GLRO (dl_tls_rseq_offset));
> +}
> 
>  #ifdef RSEQ_SIG

Line is longer than 79 characters.  The new inline function must be in
the #ifdef RSEQ_SIG block because __thread_pointer is currently only
available on targets with RSEQ_SIG.  It's missing on SPARC, for example.

Thanks,
Florian
Michael Jeanson July 2, 2024, 10:44 p.m. UTC | #2
On 2024-07-01 04:32, Florian Weimer wrote:
> * Michael Jeanson:
> 
>> diff --git a/csu/libc-tls.c b/csu/libc-tls.c
>> index b7682bdf43..f73d0e1c52 100644
>> --- a/csu/libc-tls.c
>> +++ b/csu/libc-tls.c
> 
>> @@ -110,6 +124,7 @@ __libc_setup_tls (void)
>>     size_t filesz = 0;
>>     void *initimage = NULL;
>>     size_t align = 0;
>> +  size_t tls_blocks_size = 0;
>>     size_t max_align = TCB_ALIGNMENT;
>>     size_t tcb_offset;
>>     const ElfW(Phdr) *phdr;
>> @@ -135,22 +150,79 @@ __libc_setup_tls (void)
>>     /* Calculate the size of the static TLS surplus, with 0 auditors.  */
>>     _dl_tls_static_surplus_init (0);
>>   
>> +  /* Even when disabled by tunable, an rseq area will be allocated to allow
>> +     application code to test the registration status with 'rseq->cpud_id >= 0'.
>> +     Default to the rseq ABI minimum size and alignment, this will ensure we
>> +     don't use more TLS than necessary.  */
>> +  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
>> +  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
>> +  bool do_rseq = true;
>> +  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
> 
> This breaks the Hurd build because it does not have rseq at all.

And what would you suggest as a way forward? Adding ifdefs in the current 
function or moving this code to some other location?

Considering I don't have access to a Hurd environment, how should this be tested?

> 
>> diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
>> index 50f58a60e3..c8bd39ddcf 100644
>> --- a/sysdeps/generic/ldsodefs.h
>> +++ b/sysdeps/generic/ldsodefs.h
>> @@ -610,6 +610,18 @@ struct rtld_global_ro
>>        See comments in elf/dl-tls.c where it is initialized.  */
>>     EXTERN size_t _dl_tls_static_surplus;
>>   
>> +  /* Size of the features present in the rseq area.  */
>> +  EXTERN size_t _dl_tls_rseq_feature_size;
>> +
>> +  /* Alignment requirement of the rseq area.  */
>> +  EXTERN size_t _dl_tls_rseq_align;
>> +
>> +  /* Size of the rseq area allocated in the static TLS block.  */
>> +  EXTERN size_t _dl_tls_rseq_alloc_size;
>> +
>> +  /* Offset of the rseq area from the thread pointer.  */
>> +  EXTERN ptrdiff_t _dl_tls_rseq_offset;
>> +
> 
> Why is it necessary to duplicate these variables?  Is not a duplicate
> and I assume it looks like the it's now used for TLS allocation on new
> threads, which means it's needed.  Is it necessary to add it to GLRO?

GLRO(dl_tls_rseq_feature_size) / GLRO(dl_tls_rseq_align): I followed the same 
pattern as the other AUXV variables in "sysdeps/unix/sysv/linux/dl-parse_auxv.h"

_dl_tls_rseq_alloc_size: is used to keep track of the size of the allocated 
rseq tls block which we need to zero initialize on thread creation and will 
differ from '__rseq_size' in the case of a failed registration.

_dl_tls_rseq_offset: I think this one could be folded into '__rseq_offset' 
with some refactoring but that would require accessing this variable defined 
in the rseq headers from the generic dynamic loader code.

You have a much better understanding of the internals of glibc than I do, so 
I'm very open to your recommendations for each of those variables.


> The __rtld_static_init changes would only be needed if support
> pthread_create from the inner libc after static dlopen, but that's not
> really the case today.

I must say I followed existing patterns for GLRO variables and I thought the 
initialization __rtld_static_init was required. It seems to build and run fine 
without it, I can remove these changes if you think they should be.

> 
>> diff --git a/sysdeps/unix/sysv/linux/rseq-internal.h b/sysdeps/unix/sysv/linux/rseq-internal.h
>> index 48eebc1e16..4123072274 100644
>> --- a/sysdeps/unix/sysv/linux/rseq-internal.h
>> +++ b/sysdeps/unix/sysv/linux/rseq-internal.h
>> @@ -24,6 +24,24 @@
>>   #include <stdbool.h>
>>   #include <stdio.h>
>>   #include <sys/rseq.h>
>> +#include <thread_pointer.h>
>> +#include <ldsodefs.h>
>> +
>> +/* rseq area registered with the kernel.  Use a custom definition
>> +   here to isolate from kernel struct rseq changes.  The
>> +   implementation of sched_getcpu needs acccess to the cpu_id field;
>> +   the other fields are unused and not included here.  */
>> +struct rseq_area
>> +{
>> +  uint32_t cpu_id_start;
>> +  uint32_t cpu_id;
>> +};
>> +
>> +static inline struct rseq_area *
>> +rseq_get_area(void)
>> +{
>> +  return (struct rseq_area *) ((char *) __thread_pointer() + GLRO (dl_tls_rseq_offset));
>> +}
>>
>>   #ifdef RSEQ_SIG
> 
> Line is longer than 79 characters.  The new inline function must be in
> the #ifdef RSEQ_SIG block because __thread_pointer is currently only
> available on targets with RSEQ_SIG.  It's missing on SPARC, for example.

Ack.
Florian Weimer July 3, 2024, 7:40 a.m. UTC | #3
* Michael Jeanson:

> On 2024-07-01 04:32, Florian Weimer wrote:
>> * Michael Jeanson:
>> 
>>> diff --git a/csu/libc-tls.c b/csu/libc-tls.c
>>> index b7682bdf43..f73d0e1c52 100644
>>> --- a/csu/libc-tls.c
>>> +++ b/csu/libc-tls.c
>> 
>>> @@ -110,6 +124,7 @@ __libc_setup_tls (void)
>>>     size_t filesz = 0;
>>>     void *initimage = NULL;
>>>     size_t align = 0;
>>> +  size_t tls_blocks_size = 0;
>>>     size_t max_align = TCB_ALIGNMENT;
>>>     size_t tcb_offset;
>>>     const ElfW(Phdr) *phdr;
>>> @@ -135,22 +150,79 @@ __libc_setup_tls (void)
>>>     /* Calculate the size of the static TLS surplus, with 0 auditors.  */
>>>     _dl_tls_static_surplus_init (0);
>>>   +  /* Even when disabled by tunable, an rseq area will be
>>> allocated to allow
>>> +     application code to test the registration status with 'rseq->cpud_id >= 0'.
>>> +     Default to the rseq ABI minimum size and alignment, this will ensure we
>>> +     don't use more TLS than necessary.  */
>>> +  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
>>> +  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
>>> +  bool do_rseq = true;
>>> +  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
>> This breaks the Hurd build because it does not have rseq at all.
>
> And what would you suggest as a way forward? Adding ifdefs in the
> current function or moving this code to some other location?

I would put a header file into sysdeps/generic/dl-extra_tls.h with the
following functions:

_dl_extra_tls_get_size () returns the size of the extra TLS allocation.
_dl_extra_tls_get_align () returns its alignment.
_dl_extra_tls_set_offset () sets the offset from the thread pointer.

The default implementations in sysdeps/generic/dl-extra_tls.h should be
inline and do nothing.  In sysdeps/unix/sysv/linux/dl-extra_tls.h, there
could be the real implementation (probably inline as well).  It would
use the _rseq_* variables.

> Considering I don't have access to a Hurd environment, how should this
> be tested?

You can use use scripts/build-many-glibcs.py to create a cross-build
environment.  Something like:

python3 scripts/build-many-glibcs.py /home/bmg checkout --shallow
python3 scripts/build-many-glibcs.py /home/bmg host-libraries --keep failed
python3 scripts/build-many-glibcs.py /home/bmg compilers i686-gnu --keep failed
python3 scripts/build-many-glibcs.py /home/bmg glibcs i686-gnu --keep failed

You can put the glibc sources to build into /home/bmg/src/glibc.  It's
possible to redo the last step until the build issues are ironed out.

>> Why is it necessary to duplicate these variables?  Is not a duplicate
>> and I assume it looks like the it's now used for TLS allocation on
>> new threads, which means it's needed.  Is it necessary to add it to
>> GLRO?
>
> GLRO(dl_tls_rseq_feature_size) / GLRO(dl_tls_rseq_align): I followed
> the same pattern as the other AUXV variables in
> "sysdeps/unix/sysv/linux/dl-parse_auxv.h"
>
> _dl_tls_rseq_alloc_size: is used to keep track of the size of the
> allocated rseq tls block which we need to zero initialize on thread
> creation and will differ from '__rseq_size' in the case of a failed
> registration.

Ahh.  This one will actually have to remain in GLRO because we'll need
it in libc during thread creation.

> _dl_tls_rseq_offset: I think this one could be folded into
> '__rseq_offset' with some refactoring but that would require accessing
> this variable defined in the rseq headers from the generic dynamic
> loader code.

That should be okay because sysdeps/unix/sysv/linux/dl-parse_auxv.h is
already Linux-specific, and these variables exist on Linux regardless of
rseq (RSEQ_SIG) support.

>> The __rtld_static_init changes would only be needed if support
>> pthread_create from the inner libc after static dlopen, but that's not
>> really the case today.
>
> I must say I followed existing patterns for GLRO variables and I
> thought the initialization __rtld_static_init was required. It seems
> to build and run fine without it, I can remove these changes if you
> think they should be.

I think today, the inner libc after static dlopen does not observe rseq
support.  We can keep it that way for now.

Thanks,
Florian
Michael Jeanson July 5, 2024, 7:45 p.m. UTC | #4
On 2024-07-03 03:40, Florian Weimer wrote:
> * Michael Jeanson:
> 
>> On 2024-07-01 04:32, Florian Weimer wrote:
>>> * Michael Jeanson:
>>>
>>>> diff --git a/csu/libc-tls.c b/csu/libc-tls.c
>>>> index b7682bdf43..f73d0e1c52 100644
>>>> --- a/csu/libc-tls.c
>>>> +++ b/csu/libc-tls.c
>>>
>>>> @@ -110,6 +124,7 @@ __libc_setup_tls (void)
>>>>      size_t filesz = 0;
>>>>      void *initimage = NULL;
>>>>      size_t align = 0;
>>>> +  size_t tls_blocks_size = 0;
>>>>      size_t max_align = TCB_ALIGNMENT;
>>>>      size_t tcb_offset;
>>>>      const ElfW(Phdr) *phdr;
>>>> @@ -135,22 +150,79 @@ __libc_setup_tls (void)
>>>>      /* Calculate the size of the static TLS surplus, with 0 auditors.  */
>>>>      _dl_tls_static_surplus_init (0);
>>>>    +  /* Even when disabled by tunable, an rseq area will be
>>>> allocated to allow
>>>> +     application code to test the registration status with 'rseq->cpud_id >= 0'.
>>>> +     Default to the rseq ABI minimum size and alignment, this will ensure we
>>>> +     don't use more TLS than necessary.  */
>>>> +  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
>>>> +  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
>>>> +  bool do_rseq = true;
>>>> +  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
>>> This breaks the Hurd build because it does not have rseq at all.
>>
>> And what would you suggest as a way forward? Adding ifdefs in the
>> current function or moving this code to some other location?
> 
> I would put a header file into sysdeps/generic/dl-extra_tls.h with the
> following functions:
> 
> _dl_extra_tls_get_size () returns the size of the extra TLS allocation.
> _dl_extra_tls_get_align () returns its alignment.
> _dl_extra_tls_set_offset () sets the offset from the thread pointer.
> 
> The default implementations in sysdeps/generic/dl-extra_tls.h should be
> inline and do nothing.  In sysdeps/unix/sysv/linux/dl-extra_tls.h, there
> could be the real implementation (probably inline as well).  It would
> use the _rseq_* variables.

Would the goal be to try to remove all rseq related terminology from 
'csu/libc-tls.c' and 'elf/dl-tls.c' and replace it with the generic concept of 
'extra_tls'?

> 
>> Considering I don't have access to a Hurd environment, how should this
>> be tested?
> 
> You can use use scripts/build-many-glibcs.py to create a cross-build
> environment.  Something like:
> 
> python3 scripts/build-many-glibcs.py /home/bmg checkout --shallow
> python3 scripts/build-many-glibcs.py /home/bmg host-libraries --keep failed
> python3 scripts/build-many-glibcs.py /home/bmg compilers i686-gnu --keep failed
> python3 scripts/build-many-glibcs.py /home/bmg glibcs i686-gnu --keep failed
> 
> You can put the glibc sources to build into /home/bmg/src/glibc.  It's
> possible to redo the last step until the build issues are ironed out.

Thanks for this, I implemented most of your recommendations and I now have a 
patchset building on both Linux and Hurd. It still needs some cleanup but I 
don't think there is any rush since we are now targeting 2.41.

> 
>>> Why is it necessary to duplicate these variables?  Is not a duplicate
>>> and I assume it looks like the it's now used for TLS allocation on
>>> new threads, which means it's needed.  Is it necessary to add it to
>>> GLRO?
>>
>> GLRO(dl_tls_rseq_feature_size) / GLRO(dl_tls_rseq_align): I followed
>> the same pattern as the other AUXV variables in
>> "sysdeps/unix/sysv/linux/dl-parse_auxv.h"
>>
>> _dl_tls_rseq_alloc_size: is used to keep track of the size of the
>> allocated rseq tls block which we need to zero initialize on thread
>> creation and will differ from '__rseq_size' in the case of a failed
>> registration.
> 
> Ahh.  This one will actually have to remain in GLRO because we'll need
> it in libc during thread creation.

Ack.

> 
>> _dl_tls_rseq_offset: I think this one could be folded into
>> '__rseq_offset' with some refactoring but that would require accessing
>> this variable defined in the rseq headers from the generic dynamic
>> loader code.
> 
> That should be okay because sysdeps/unix/sysv/linux/dl-parse_auxv.h is
> already Linux-specific, and these variables exist on Linux regardless of
> rseq (RSEQ_SIG) support.

Ack.

> 
>>> The __rtld_static_init changes would only be needed if support
>>> pthread_create from the inner libc after static dlopen, but that's not
>>> really the case today.
>>
>> I must say I followed existing patterns for GLRO variables and I
>> thought the initialization __rtld_static_init was required. It seems
>> to build and run fine without it, I can remove these changes if you
>> think they should be.
> 
> I think today, the inner libc after static dlopen does not observe rseq
> support.  We can keep it that way for now.

Ack.

Cheers,

Michael
Florian Weimer July 8, 2024, 7:05 a.m. UTC | #5
* Michael Jeanson:

>>> And what would you suggest as a way forward? Adding ifdefs in the
>>> current function or moving this code to some other location?
>> I would put a header file into sysdeps/generic/dl-extra_tls.h with
>> the
>> following functions:
>> _dl_extra_tls_get_size () returns the size of the extra TLS
>> allocation.
>> _dl_extra_tls_get_align () returns its alignment.
>> _dl_extra_tls_set_offset () sets the offset from the thread pointer.
>> The default implementations in sysdeps/generic/dl-extra_tls.h should
>> be
>> inline and do nothing.  In sysdeps/unix/sysv/linux/dl-extra_tls.h, there
>> could be the real implementation (probably inline as well).  It would
>> use the _rseq_* variables.
>
> Would the goal be to try to remove all rseq related terminology from
> 'csu/libc-tls.c' and 'elf/dl-tls.c' and replace it with the generic
> concept of 'extra_tls'?

Yes, that's my suggestion.

> Thanks for this, I implemented most of your recommendations and I now
> have a patchset building on both Linux and Hurd. It still needs some
> cleanup but I don't think there is any rush since we are now targeting
> 2.41.

I posted a variant of the symbol change recently for 2.40, let's hope it
goes in.

>>>> Why is it necessary to duplicate these variables?  Is not a duplicate
>>>> and I assume it looks like the it's now used for TLS allocation on
>>>> new threads, which means it's needed.  Is it necessary to add it to
>>>> GLRO?
>>>
>>> GLRO(dl_tls_rseq_feature_size) / GLRO(dl_tls_rseq_align): I followed
>>> the same pattern as the other AUXV variables in
>>> "sysdeps/unix/sysv/linux/dl-parse_auxv.h"
>>>
>>> _dl_tls_rseq_alloc_size: is used to keep track of the size of the
>>> allocated rseq tls block which we need to zero initialize on thread
>>> creation and will differ from '__rseq_size' in the case of a failed
>>> registration.
>> Ahh.  This one will actually have to remain in GLRO because we'll
>> need
>> it in libc during thread creation.
>
> Ack.

We can use __rseq_size from libc and increase it to 32 if it less than
32 (see the patch I posted).

Thanks,
Florian
diff mbox series

Patch

diff --git a/csu/libc-tls.c b/csu/libc-tls.c
index b7682bdf43..f73d0e1c52 100644
--- a/csu/libc-tls.c
+++ b/csu/libc-tls.c
@@ -26,6 +26,8 @@ 
 #include <array_length.h>
 #include <pthreadP.h>
 #include <dl-call_tls_init_tp.h>
+#include <dl-rseq.h>
+#include <elf/dl-tunables.h>
 
 #ifdef SHARED
  #error makefile bug, this file is for static only
@@ -62,6 +64,18 @@  size_t _dl_tls_static_surplus;
    dynamic TLS access (e.g. with TLSDESC).  */
 size_t _dl_tls_static_optional;
 
+/* Size of the features present in the rseq area.  */
+size_t _dl_tls_rseq_feature_size;
+
+/* Alignment requirement of the rseq area.  */
+size_t _dl_tls_rseq_align;
+
+/* Size of the rseq area allocated in the static TLS block.  */
+size_t _dl_tls_rseq_alloc_size;
+
+/* Offset of the rseq area from the thread pointer.  */
+ptrdiff_t _dl_tls_rseq_offset;
+
 /* Generation counter for the dtv.  */
 size_t _dl_tls_generation;
 
@@ -110,6 +124,7 @@  __libc_setup_tls (void)
   size_t filesz = 0;
   void *initimage = NULL;
   size_t align = 0;
+  size_t tls_blocks_size = 0;
   size_t max_align = TCB_ALIGNMENT;
   size_t tcb_offset;
   const ElfW(Phdr) *phdr;
@@ -135,22 +150,79 @@  __libc_setup_tls (void)
   /* Calculate the size of the static TLS surplus, with 0 auditors.  */
   _dl_tls_static_surplus_init (0);
 
+  /* Even when disabled by tunable, an rseq area will be allocated to allow
+     application code to test the registration status with 'rseq->cpud_id >= 0'.
+     Default to the rseq ABI minimum size and alignment, this will ensure we
+     don't use more TLS than necessary.  */
+  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
+  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
+  bool do_rseq = true;
+  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
+  if (do_rseq)
+    {
+      rseq_align = GLRO(dl_tls_rseq_align);
+      /* Make sure the rseq area size is at least the minimum ABI size and a
+         multiple of the requested aligment. */
+      rseq_alloc_size = roundup (MAX (GLRO(dl_tls_rseq_feature_size),
+			      TLS_DL_RSEQ_MIN_SIZE), rseq_align);
+    }
+
+  /* Increase the maximum alignment with the rseq alignment requirements if
+     necessary.  */
+  max_align = MAX (max_align, rseq_align);
+
+  /* Record the rseq_area block size.  */
+  GLRO (dl_tls_rseq_alloc_size) = rseq_alloc_size;
+
   /* We have to set up the TCB block which also (possibly) contains
      'errno'.  Therefore we avoid 'malloc' which might touch 'errno'.
      Instead we use 'sbrk' which would only uses 'errno' if it fails.
      In this case we are right away out of memory and the user gets
      what she/he deserves.  */
 #if TLS_TCB_AT_TP
+  /* Before the the thread pointer, add the aligned tls block size and then
+     align the rseq area block on top.  */
+  tls_blocks_size = roundup (roundup (memsz, align ?: 1) + rseq_alloc_size, rseq_align);
+
+ /* Record the rseq_area offset.
+
+    With TLS_TCB_AT_TP the TLS blocks are allocated before the thread pointer
+    in reverse order.  Our block is added last which results in it being the
+    first in the static TLS block, thus record the most negative offset.
+
+    The alignment requirements of the pointer resulting from this offset and
+    the thread pointer are enforced by 'max_align' which is used to align the
+    tcb_offset.  */
+  GLRO (dl_tls_rseq_offset) = -tls_blocks_size;
+
   /* Align the TCB offset to the maximum alignment, as
      _dl_allocate_tls_storage (in elf/dl-tls.c) does using __libc_memalign
      and dl_tls_static_align.  */
-  tcb_offset = roundup (memsz + GLRO(dl_tls_static_surplus), max_align);
+  tcb_offset = roundup (tls_blocks_size + GLRO(dl_tls_static_surplus), max_align);
   tlsblock = _dl_early_allocate (tcb_offset + TLS_INIT_TCB_SIZE + max_align);
   if (tlsblock == NULL)
     _startup_fatal_tls_error ();
 #elif TLS_DTV_AT_TP
+  /* Align memsz on top of the initial tcb.  */
   tcb_offset = roundup (TLS_INIT_TCB_SIZE, align ?: 1);
-  tlsblock = _dl_early_allocate (tcb_offset + memsz + max_align
+
+  /* After the thread pointer, add the initial tcb plus the tls block size and
+     then align the rseq area block on top.  */
+  tls_blocks_size = roundup (tcb_offset + memsz + rseq_alloc_size, rseq_align);
+
+ /* Record the rseq_area offset.
+
+    With TLS_DTV_AT_TP the TLS blocks are allocated after the thread pointer in
+    order. Our block is added last which results in it being the last in the
+    static TLS block, thus record the offset as the size of the static TLS
+    block minus the size of our block. The resulting offset will be positive.
+
+    The alignment requirements of the pointer resulting from this offset and
+    the thread pointer are enforced by 'max_align' which is used to align the
+    tcb_offset.  */
+  GLRO (dl_tls_rseq_offset) = tls_blocks_size - rseq_alloc_size;
+
+  tlsblock = _dl_early_allocate (tls_blocks_size + max_align
 				 + TLS_PRE_TCB_SIZE
 				 + GLRO(dl_tls_static_surplus));
   if (tlsblock == NULL)
@@ -209,11 +281,5 @@  __libc_setup_tls (void)
   /* static_slotinfo.slotinfo[1].gen = 0; -- Already zero.  */
   static_slotinfo.slotinfo[1].map = main_map;
 
-  memsz = roundup (memsz, align ?: 1);
-
-#if TLS_DTV_AT_TP
-  memsz += tcb_offset;
-#endif
-
-  init_static_tls (memsz, MAX (TCB_ALIGNMENT, max_align));
+  init_static_tls (tls_blocks_size, MAX (TCB_ALIGNMENT, max_align));
 }
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 7b3dd9ab60..7a22a804a9 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -27,6 +27,7 @@ 
 
 #include <tls.h>
 #include <dl-tls.h>
+#include <dl-rseq.h>
 #include <ldsodefs.h>
 
 #if PTHREAD_IN_LIBC
@@ -298,6 +299,44 @@  _dl_determine_tlsoffset (void)
       slotinfo[cnt].map->l_tls_offset = off;
     }
 
+  /* Insert the rseq area block after the last TLS block.  */
+
+  /* Even when disabled by tunable, an rseq area will be allocated to allow
+     application code to test the registration status with 'rseq->cpud_id >= 0'.
+     Default to the rseq ABI minimum size and aligment, this will ensure we
+     don't use more TLS than necessary.  */
+  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
+  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
+  bool do_rseq = true;
+  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
+  if (do_rseq)
+    {
+      rseq_align = GLRO(dl_tls_rseq_align);
+      /* Make sure the rseq area size is at least the minimum ABI size and a
+         multiple of the requested aligment. */
+      rseq_alloc_size = roundup (MAX (GLRO(dl_tls_rseq_feature_size),
+			      TLS_DL_RSEQ_MIN_SIZE), rseq_align);
+    }
+
+  /* Add the rseq area block to the global offset.  */
+  offset = roundup (offset, rseq_align) + rseq_alloc_size;
+
+  /* Increase the maximum alignment with the rseq alignment requirements if
+     necessary.  */
+  max_align = MAX (max_align, rseq_align);
+
+ /* Record the rseq_area offset.
+
+    With TLS_TCB_AT_TP the TLS blocks are allocated before the thread pointer
+    in reverse order.  Our block is added last which results in it being the
+    first in the static TLS block, thus record the most negative offset.
+
+    The alignment requirements of the pointer resulting from this offset and
+    the thread pointer are enforced by 'max_align' which is used to align the
+    tcb_offset.  */
+  GLRO (dl_tls_rseq_offset) = -offset;
+  GLRO (dl_tls_rseq_alloc_size) = rseq_alloc_size;
+
   GL(dl_tls_static_used) = offset;
   GLRO (dl_tls_static_size) = (roundup (offset + GLRO(dl_tls_static_surplus),
 					max_align)
@@ -343,6 +382,45 @@  _dl_determine_tlsoffset (void)
       offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
     }
 
+  /* Insert the rseq area block after the last TLS block.  */
+
+  /* Default to the rseq ABI minimum sizes, this will reduce TLS usage to 32
+     bytes when rseq is disabled by tunables.  */
+  size_t rseq_alloc_size = TLS_DL_RSEQ_MIN_SIZE;
+  size_t rseq_align = TLS_DL_RSEQ_MIN_ALIGN;
+  bool do_rseq = true;
+  do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL);
+  if (do_rseq)
+    {
+      rseq_align = GLRO(dl_tls_rseq_align);
+      /* Make sure the rseq area size is at least the minimum ABI size and a
+         multiple of the requested aligment. */
+      rseq_alloc_size = roundup (MAX (GLRO(dl_tls_rseq_feature_size),
+			      TLS_DL_RSEQ_MIN_SIZE), rseq_align);
+    }
+
+  /* Align the global offset to the beginning of the rseq area.  */
+  offset = roundup (offset, rseq_align);
+
+ /* Record the rseq_area offset.
+
+    With TLS_DTV_AT_TP the TLS blocks are allocated after the thread pointer in
+    order. Our block is added last which results in it being the last in the
+    static TLS block, thus record the offset as the size of the static TLS
+    block minus the size of our block. The resulting offset will be positive.
+
+    The alignment requirements of the pointer resulting from this offset and
+    the thread pointer are enforced by 'max_align' which is used to align the
+    tcb_offset.  */
+  GLRO (dl_tls_rseq_alloc_size) = rseq_alloc_size;
+  GLRO (dl_tls_rseq_offset) = offset;
+
+  /* Add the rseq area block to the global offset.  */
+  offset += rseq_alloc_size;
+
+  /* Increase the max_align if necessary.  */
+  max_align = MAX (max_align, rseq_align);
+
   GL(dl_tls_static_used) = offset;
   GLRO (dl_tls_static_size) = roundup (offset + GLRO(dl_tls_static_surplus),
 				       TCB_ALIGNMENT);
diff --git a/elf/rtld_static_init.c b/elf/rtld_static_init.c
index e918e4ebdf..293d078201 100644
--- a/elf/rtld_static_init.c
+++ b/elf/rtld_static_init.c
@@ -78,6 +78,18 @@  __rtld_static_init (struct link_map *map)
   extern __typeof (dl->_dl_tls_static_size) _dl_tls_static_size
     attribute_hidden;
   dl->_dl_tls_static_size = _dl_tls_static_size;
+  extern __typeof (dl->_dl_tls_rseq_feature_size) _dl_tls_rseq_feature_size
+    attribute_hidden;
+  dl->_dl_tls_rseq_feature_size = _dl_tls_rseq_feature_size;
+  extern __typeof (dl->_dl_tls_rseq_align) _dl_tls_rseq_align
+    attribute_hidden;
+  dl->_dl_tls_rseq_align = _dl_tls_rseq_align;
+  extern __typeof (dl->_dl_tls_rseq_alloc_size) _dl_tls_rseq_alloc_size
+    attribute_hidden;
+  dl->_dl_tls_rseq_alloc_size = _dl_tls_rseq_alloc_size;
+  extern __typeof (dl->_dl_tls_rseq_offset) _dl_tls_rseq_offset
+    attribute_hidden;
+  dl->_dl_tls_rseq_offset = _dl_tls_rseq_offset;
   dl->_dl_find_object = _dl_find_object;
 
   __rtld_static_init_arch (map, dl);
diff --git a/nptl/descr.h b/nptl/descr.h
index 8cef95810c..cdc3c82d9a 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -404,25 +404,11 @@  struct pthread
   /* Used on strsignal.  */
   struct tls_internal_t tls_state;
 
-  /* rseq area registered with the kernel.  Use a custom definition
-     here to isolate from kernel struct rseq changes.  The
-     implementation of sched_getcpu needs acccess to the cpu_id field;
-     the other fields are unused and not included here.  */
-  union
-  {
-    struct
-    {
-      uint32_t cpu_id_start;
-      uint32_t cpu_id;
-    };
-    char pad[32];		/* Original rseq area size.  */
-  } rseq_area __attribute__ ((aligned (32)));
-
   /* Amount of end padding, if any, in this structure.
-     This definition relies on rseq_area being last.  */
+     This definition relies on tls_state being last.  */
 #define PTHREAD_STRUCT_END_PADDING \
-  (sizeof (struct pthread) - offsetof (struct pthread, rseq_area) \
-   + sizeof ((struct pthread) {}.rseq_area))
+  (sizeof (struct pthread) - offsetof (struct pthread, tls_state) \
+   + sizeof ((struct pthread) {}.tls_state))
 } __attribute ((aligned (TCB_ALIGNMENT)));
 
 static inline bool
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 1d3665d5ed..9b49ee7121 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -691,7 +691,7 @@  __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
 
   /* Inherit rseq registration state.  Without seccomp filters, rseq
      registration will either always fail or always succeed.  */
-  if ((int) THREAD_GETMEM_VOLATILE (self, rseq_area.cpu_id) >= 0)
+  if ((int) RSEQ_GETMEM_VOLATILE (rseq_get_area(), cpu_id) >= 0)
     pd->flags |= ATTR_FLAG_DO_RSEQ;
 
   /* Initialize the field for the ID of the thread which is waiting
diff --git a/sysdeps/generic/dl-rseq.h b/sysdeps/generic/dl-rseq.h
new file mode 100644
index 0000000000..c967f99f33
--- /dev/null
+++ b/sysdeps/generic/dl-rseq.h
@@ -0,0 +1,26 @@ 
+/* RSEQ defines for the dynamic linker. Generic version.
+   Copyright (C) 2024 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+/* Minimum size of the rseq area.  */
+#define TLS_DL_RSEQ_MIN_SIZE 32
+
+/* Minimum feature size of the rseq area.  */
+#define TLS_DL_RSEQ_MIN_FEATURE_SIZE 20
+
+/* Minimum size of the rseq area alignment.  */
+#define TLS_DL_RSEQ_MIN_ALIGN 32
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 50f58a60e3..c8bd39ddcf 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -610,6 +610,18 @@  struct rtld_global_ro
      See comments in elf/dl-tls.c where it is initialized.  */
   EXTERN size_t _dl_tls_static_surplus;
 
+  /* Size of the features present in the rseq area.  */
+  EXTERN size_t _dl_tls_rseq_feature_size;
+
+  /* Alignment requirement of the rseq area.  */
+  EXTERN size_t _dl_tls_rseq_align;
+
+  /* Size of the rseq area allocated in the static TLS block.  */
+  EXTERN size_t _dl_tls_rseq_alloc_size;
+
+  /* Offset of the rseq area from the thread pointer.  */
+  EXTERN ptrdiff_t _dl_tls_rseq_offset;
+
   /* Name of the shared object to be profiled (if any).  */
   EXTERN const char *_dl_profile;
   /* Filename of the output file.  */
diff --git a/sysdeps/i386/nptl/tcb-access.h b/sysdeps/i386/nptl/tcb-access.h
index 4b6221e103..e6988186d0 100644
--- a/sysdeps/i386/nptl/tcb-access.h
+++ b/sysdeps/i386/nptl/tcb-access.h
@@ -123,3 +123,59 @@ 
 			 "i" (offsetof (struct pthread, member)),	      \
 			 "r" (idx));					      \
        }})
+
+
+/* Read member of the RSEQ area directly.  */
+#define RSEQ_GETMEM_VOLATILE(descr, member) \
+  ({ __typeof (descr->member) __value;					      \
+     ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset);		      \
+     _Static_assert (sizeof (__value) == 1				      \
+		     || sizeof (__value) == 4				      \
+		     || sizeof (__value) == 8,				      \
+		     "size of per-thread data");			      \
+     if (sizeof (__value) == 1)						      \
+       asm volatile ("movb %%gs:%P2(%3),%b0"				      \
+		     : "=q" (__value)					      \
+		     : "0" (0), "i" (offsetof (struct rseq_area, member)),   \
+		     "r" (_rseq_offset));					      \
+     else if (sizeof (__value) == 4)					      \
+       asm volatile ("movl %%gs:%P1(%2),%0"				      \
+		     : "=r" (__value)					      \
+		     : "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else /* 8 */							      \
+       {								      \
+	 asm volatile  ("movl %%gs:%P1(%2),%%eax\n\t"			      \
+			"movl %%gs:4+%P1(%2),%%edx"			      \
+			: "=&A" (__value)				      \
+			: "i" (offsetof (struct rseq_area, member)),	      \
+			  "r" (_rseq_offset));				      \
+       }								      \
+     __value; })
+
+/* Set member of the RSEQ area directly.  */
+#define RSEQ_SETMEM(descr, member, value) \
+  ({									      \
+     ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset);		      \
+     _Static_assert (sizeof (descr->member) == 1			      \
+		     || sizeof (descr->member) == 4			      \
+		     || sizeof (descr->member) == 8,			      \
+		     "size of per-thread data");			      \
+     if (sizeof (descr->member) == 1)					      \
+       asm volatile ("movb %b0,%%gs:%P1(%2)" :				      \
+		     : "iq" (value),					      \
+		       "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else if (sizeof (descr->member) == 4)				      \
+       asm volatile ("movl %0,%%gs:%P1(%2)" :				      \
+		     : "ir" (value),					      \
+		       "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else /* 8 */							      \
+       {								      \
+	 asm volatile ("movl %%eax,%%gs:%P1(%2)\n\t"			      \
+		       "movl %%edx,%%gs:4+%P1(%2)" :			      \
+		       : "A" ((uint64_t) cast_to_integer (value)),	      \
+			 "i" (offsetof (struct rseq_area, member)),	      \
+			 "r" (_rseq_offset));				      \
+       }})
diff --git a/sysdeps/nptl/dl-tls_init_tp.c b/sysdeps/nptl/dl-tls_init_tp.c
index 2f9750c50b..1cfaf38cf8 100644
--- a/sysdeps/nptl/dl-tls_init_tp.c
+++ b/sysdeps/nptl/dl-tls_init_tp.c
@@ -107,16 +107,22 @@  __tls_init_tp (void)
     do_rseq = TUNABLE_GET (rseq, int, NULL);
     if (rseq_register_current_thread (pd, do_rseq))
       {
-        _rseq_size = sizeof (pd->rseq_area);
+        _rseq_size = GLRO (dl_tls_rseq_alloc_size);
       }
 
+    /* If the registration fails or is disabled by tunable, the public rseq
+       size will be '0' regardless of the size of the allocated rseq area.  An
+       rseq area of at least 32 bytes is always allocated since application
+       code is allowed to test the status of the rseq registration with
+       'rseq->cpu_id >= 0'.  */
+
 #ifdef RSEQ_SIG
     /* This should be a compile-time constant, but the current
        infrastructure makes it difficult to determine its value.  Not
        all targets support __thread_pointer, so set __rseq_offset only
        if the rseq registration may have happened because RSEQ_SIG is
        defined.  */
-    _rseq_offset = (char *) &pd->rseq_area - (char *) __thread_pointer ();
+    _rseq_offset = GLRO (dl_tls_rseq_offset);
 #endif
   }
 
diff --git a/sysdeps/nptl/tcb-access.h b/sysdeps/nptl/tcb-access.h
index 600433766f..9532f30022 100644
--- a/sysdeps/nptl/tcb-access.h
+++ b/sysdeps/nptl/tcb-access.h
@@ -30,3 +30,8 @@ 
   descr->member = (value)
 #define THREAD_SETMEM_NC(descr, member, idx, value) \
   descr->member[idx] = (value)
+
+#define RSEQ_GETMEM_VOLATILE(descr, member) \
+  THREAD_GETMEM_VOLATILE(descr, member)
+#define RSEQ_SETMEM(descr, member, value) \
+  THREAD_SETMEM(descr, member, value)
diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile
index 415aa1f14d..6bcf81461b 100644
--- a/sysdeps/unix/sysv/linux/Makefile
+++ b/sysdeps/unix/sysv/linux/Makefile
@@ -261,6 +261,11 @@  tests-internal += \
   tst-rseq-disable \
   # tests-internal
 
+tests-static += \
+  tst-rseq-disable-static \
+  tst-rseq-static \
+  # tests-static
+
 tests-time64 += \
   tst-adjtimex-time64 \
   tst-clock_adjtime-time64 \
@@ -394,6 +399,7 @@  $(objpfx)tst-mount-compile.out: ../sysdeps/unix/sysv/linux/tst-mount-compile.py
 $(objpfx)tst-mount-compile.out: $(sysdeps-linux-python-deps)
 
 tst-rseq-disable-ENV = GLIBC_TUNABLES=glibc.pthread.rseq=0
+tst-rseq-disable-static-ENV = GLIBC_TUNABLES=glibc.pthread.rseq=0
 
 endif # $(subdir) == misc
 
@@ -655,4 +661,8 @@  tests += \
 tests-internal += \
   tst-rseq-nptl \
   # tests-internal
+
+tests-static += \
+  tst-rseq-nptl-static \
+  # tests-static
 endif
diff --git a/sysdeps/unix/sysv/linux/dl-parse_auxv.h b/sysdeps/unix/sysv/linux/dl-parse_auxv.h
index ea2a58ecb1..51943dd483 100644
--- a/sysdeps/unix/sysv/linux/dl-parse_auxv.h
+++ b/sysdeps/unix/sysv/linux/dl-parse_auxv.h
@@ -21,6 +21,7 @@ 
 #include <fpu_control.h>
 #include <ldsodefs.h>
 #include <link.h>
+#include <dl-rseq.h>
 
 typedef ElfW(Addr) dl_parse_auxv_t[AT_MINSIGSTKSZ + 1];
 
@@ -59,5 +60,10 @@  void _dl_parse_auxv (ElfW(auxv_t) *av, dl_parse_auxv_t auxv_values)
     GLRO(dl_sysinfo) = auxv_values[AT_SYSINFO];
 #endif
 
+  GLRO(dl_tls_rseq_feature_size) = MAX (auxv_values[AT_RSEQ_FEATURE_SIZE],
+		  TLS_DL_RSEQ_MIN_FEATURE_SIZE);
+  GLRO(dl_tls_rseq_align) = MAX (auxv_values[AT_RSEQ_ALIGN],
+		  TLS_DL_RSEQ_MIN_ALIGN);
+
   DL_PLATFORM_AUXV
 }
diff --git a/sysdeps/unix/sysv/linux/rseq-internal.h b/sysdeps/unix/sysv/linux/rseq-internal.h
index 48eebc1e16..4123072274 100644
--- a/sysdeps/unix/sysv/linux/rseq-internal.h
+++ b/sysdeps/unix/sysv/linux/rseq-internal.h
@@ -24,6 +24,24 @@ 
 #include <stdbool.h>
 #include <stdio.h>
 #include <sys/rseq.h>
+#include <thread_pointer.h>
+#include <ldsodefs.h>
+
+/* rseq area registered with the kernel.  Use a custom definition
+   here to isolate from kernel struct rseq changes.  The
+   implementation of sched_getcpu needs acccess to the cpu_id field;
+   the other fields are unused and not included here.  */
+struct rseq_area
+{
+  uint32_t cpu_id_start;
+  uint32_t cpu_id;
+};
+
+static inline struct rseq_area *
+rseq_get_area(void)
+{
+  return (struct rseq_area *) ((char *) __thread_pointer() + GLRO (dl_tls_rseq_offset));
+}
 
 #ifdef RSEQ_SIG
 static inline bool
@@ -31,20 +49,23 @@  rseq_register_current_thread (struct pthread *self, bool do_rseq)
 {
   if (do_rseq)
     {
-      int ret = INTERNAL_SYSCALL_CALL (rseq, &self->rseq_area,
-                                       sizeof (self->rseq_area),
+      /* The kernel expects 'rseq_area->rseq_cs == NULL' on registration, zero
+         the whole rseq area.  */
+      memset(rseq_get_area(), 0, GLRO (dl_tls_rseq_alloc_size));
+      int ret = INTERNAL_SYSCALL_CALL (rseq, rseq_get_area(),
+                                       GLRO (dl_tls_rseq_alloc_size),
                                        0, RSEQ_SIG);
       if (!INTERNAL_SYSCALL_ERROR_P (ret))
         return true;
     }
-  THREAD_SETMEM (self, rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+  RSEQ_SETMEM (rseq_get_area(), cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
   return false;
 }
 #else /* RSEQ_SIG */
 static inline bool
 rseq_register_current_thread (struct pthread *self, bool do_rseq)
 {
-  THREAD_SETMEM (self, rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+  RSEQ_SETMEM (rseq_get_area(), cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
   return false;
 }
 #endif /* RSEQ_SIG */
diff --git a/sysdeps/unix/sysv/linux/sched_getcpu.c b/sysdeps/unix/sysv/linux/sched_getcpu.c
index 72a3360550..3cdf854316 100644
--- a/sysdeps/unix/sysv/linux/sched_getcpu.c
+++ b/sysdeps/unix/sysv/linux/sched_getcpu.c
@@ -19,6 +19,7 @@ 
 #include <sched.h>
 #include <sysdep.h>
 #include <sysdep-vdso.h>
+#include <rseq-internal.h>
 
 static int
 vsyscall_sched_getcpu (void)
@@ -36,6 +37,6 @@  vsyscall_sched_getcpu (void)
 int
 sched_getcpu (void)
 {
-  int cpu_id = THREAD_GETMEM_VOLATILE (THREAD_SELF, rseq_area.cpu_id);
+  int cpu_id = RSEQ_GETMEM_VOLATILE (rseq_get_area(), cpu_id);
   return __glibc_likely (cpu_id >= 0) ? cpu_id : vsyscall_sched_getcpu ();
 }
diff --git a/sysdeps/unix/sysv/linux/tst-rseq-disable-static.c b/sysdeps/unix/sysv/linux/tst-rseq-disable-static.c
new file mode 100644
index 0000000000..2687d13d3d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/tst-rseq-disable-static.c
@@ -0,0 +1 @@ 
+#include "tst-rseq-disable.c"
diff --git a/sysdeps/unix/sysv/linux/tst-rseq-disable.c b/sysdeps/unix/sysv/linux/tst-rseq-disable.c
index bbc655bec4..cd28f1ccfd 100644
--- a/sysdeps/unix/sysv/linux/tst-rseq-disable.c
+++ b/sysdeps/unix/sysv/linux/tst-rseq-disable.c
@@ -26,27 +26,28 @@ 
 #include <unistd.h>
 
 #ifdef RSEQ_SIG
+# include <sys/auxv.h>
+# include "tst-rseq.h"
+
+static __thread struct rseq local_rseq;
 
 /* Check that rseq can be registered and has not been taken by glibc.  */
 static void
 check_rseq_disabled (void)
 {
-  struct pthread *pd = THREAD_SELF;
+  struct rseq *rseq_area = (struct rseq *) ((char *) __thread_pointer () + __rseq_offset);
 
   TEST_COMPARE (__rseq_flags, 0);
-  TEST_VERIFY ((char *) __thread_pointer () + __rseq_offset
-               == (char *) &pd->rseq_area);
   TEST_COMPARE (__rseq_size, 0);
-  TEST_COMPARE ((int) pd->rseq_area.cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
+  TEST_COMPARE ((int) rseq_area->cpu_id, RSEQ_CPU_ID_REGISTRATION_FAILED);
 
-  int ret = syscall (__NR_rseq, &pd->rseq_area, sizeof (pd->rseq_area),
-                     0, RSEQ_SIG);
+  int ret = syscall (__NR_rseq, &local_rseq, RSEQ_TEST_MIN_SIZE, 0, RSEQ_SIG);
   if (ret == 0)
     {
-      ret = syscall (__NR_rseq, &pd->rseq_area, sizeof (pd->rseq_area),
+      ret = syscall (__NR_rseq, &local_rseq, RSEQ_TEST_MIN_SIZE,
                      RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
       TEST_COMPARE (ret, 0);
-      pd->rseq_area.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
+      rseq_area->cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
     }
   else
     {
diff --git a/sysdeps/unix/sysv/linux/tst-rseq-nptl-static.c b/sysdeps/unix/sysv/linux/tst-rseq-nptl-static.c
new file mode 100644
index 0000000000..6e2c923bb9
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/tst-rseq-nptl-static.c
@@ -0,0 +1 @@ 
+#include "tst-rseq-nptl.c"
diff --git a/sysdeps/unix/sysv/linux/tst-rseq-static.c b/sysdeps/unix/sysv/linux/tst-rseq-static.c
new file mode 100644
index 0000000000..1d97f3bd3d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/tst-rseq-static.c
@@ -0,0 +1 @@ 
+#include "tst-rseq.c"
diff --git a/sysdeps/unix/sysv/linux/tst-rseq.c b/sysdeps/unix/sysv/linux/tst-rseq.c
index 2c90409ba0..c8c0518a5d 100644
--- a/sysdeps/unix/sysv/linux/tst-rseq.c
+++ b/sysdeps/unix/sysv/linux/tst-rseq.c
@@ -31,18 +31,32 @@ 
 # include <syscall.h>
 # include <thread_pointer.h>
 # include <tls.h>
+# include <sys/auxv.h>
 # include "tst-rseq.h"
 
 static void
 do_rseq_main_test (void)
 {
-  struct pthread *pd = THREAD_SELF;
+  size_t rseq_align = MAX (getauxval (AT_RSEQ_ALIGN), RSEQ_TEST_MIN_ALIGN);
+  size_t rseq_size = roundup (MAX (getauxval (AT_RSEQ_FEATURE_SIZE), RSEQ_TEST_MIN_SIZE), rseq_align);
+  struct rseq *rseq = __thread_pointer () + __rseq_offset;
 
   TEST_VERIFY_EXIT (rseq_thread_registered ());
   TEST_COMPARE (__rseq_flags, 0);
-  TEST_VERIFY ((char *) __thread_pointer () + __rseq_offset
-               == (char *) &pd->rseq_area);
-  TEST_COMPARE (__rseq_size, sizeof (pd->rseq_area));
+  TEST_COMPARE (__rseq_size, rseq_size);
+  /* The size of the rseq area must be a multiple of the alignment.  */
+  TEST_VERIFY ((__rseq_size % rseq_align) == 0);
+  /* The rseq area address must be aligned.  */
+  TEST_VERIFY (((unsigned long) rseq % rseq_align) == 0);
+#if TLS_TCB_AT_TP
+  /* The rseq area block should come before the thread pointer and be at least 32 bytes. */
+  TEST_VERIFY (__rseq_offset <= RSEQ_TEST_MIN_SIZE);
+#elif TLS_DTV_AT_TP
+  /* The rseq area block should come after the thread pointer. */
+  TEST_VERIFY (__rseq_offset >= 0);
+#else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+#endif
 }
 
 static void
diff --git a/sysdeps/unix/sysv/linux/tst-rseq.h b/sysdeps/unix/sysv/linux/tst-rseq.h
index dc603327d3..4931aa3d14 100644
--- a/sysdeps/unix/sysv/linux/tst-rseq.h
+++ b/sysdeps/unix/sysv/linux/tst-rseq.h
@@ -23,11 +23,18 @@ 
 #include <syscall.h>
 #include <sys/rseq.h>
 #include <tls.h>
+#include <rseq-internal.h>
+
+#define RSEQ_TEST_MIN_SIZE 32
+#define RSEQ_TEST_MIN_FEATURE_SIZE 20
+#define RSEQ_TEST_MIN_ALIGN 32
 
 static inline bool
 rseq_thread_registered (void)
 {
-  return THREAD_GETMEM_VOLATILE (THREAD_SELF, rseq_area.cpu_id) >= 0;
+  struct rseq_area *rseq = (struct rseq_area *) ((char *) __thread_pointer () + __rseq_offset);
+
+  return __atomic_load_n (&rseq->cpu_id, __ATOMIC_RELAXED) >= 0;
 }
 
 static inline int
diff --git a/sysdeps/x86_64/nptl/tcb-access.h b/sysdeps/x86_64/nptl/tcb-access.h
index d35948f111..75ba4b3ce9 100644
--- a/sysdeps/x86_64/nptl/tcb-access.h
+++ b/sysdeps/x86_64/nptl/tcb-access.h
@@ -130,3 +130,59 @@ 
 			 "i" (offsetof (struct pthread, member[0])),	      \
 			 "r" (idx));					      \
        }})
+
+/* Read member of the RSEQ area directly.  */
+# define RSEQ_GETMEM_VOLATILE(descr, member) \
+  ({ __typeof (descr->member) __value;					      \
+     ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset);		      \
+     _Static_assert (sizeof (__value) == 1				      \
+		     || sizeof (__value) == 4				      \
+		     || sizeof (__value) == 8,				      \
+		     "size of per-thread data");			      \
+     if (sizeof (__value) == 1)						      \
+       asm volatile ("movb %%fs:%P2(%q3),%b0"				      \
+		     : "=q" (__value)					      \
+		     : "0" (0), "i" (offsetof (struct rseq_area, member)),    \
+		       "r" (_rseq_offset));					      \
+     else if (sizeof (__value) == 4)					      \
+       asm volatile ("movl %%fs:%P1(%q2),%0"				      \
+		     : "=r" (__value)					      \
+		     : "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else /* 8 */							      \
+       {								      \
+	 asm volatile ("movq %%fs:%P1(%q2),%q0"				      \
+		       : "=r" (__value)					      \
+		       : "i" (offsetof (struct rseq_area, member)),	      \
+			 "r" (_rseq_offset));				      \
+       }								      \
+     __value; })
+
+/* Set member of the RSEQ area directly.  */
+# define RSEQ_SETMEM(descr, member, value) \
+  ({									      \
+     ptrdiff_t _rseq_offset = GLRO (dl_tls_rseq_offset);		      \
+     _Static_assert (sizeof (descr->member) == 1			      \
+		     || sizeof (descr->member) == 4			      \
+		     || sizeof (descr->member) == 8,			      \
+		     "size of per-thread data");			      \
+     if (sizeof (descr->member) == 1)					      \
+       asm volatile ("movb %b0,%%fs:%P1(%q2)" :				      \
+		     : "iq" (value),					      \
+		       "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else if (sizeof (descr->member) == 4)				      \
+       asm volatile ("movl %0,%%fs:%P1(%q2)" :				      \
+		     : IMM_MODE (value),				      \
+		       "i" (offsetof (struct rseq_area, member)),	      \
+		       "r" (_rseq_offset));					      \
+     else /* 8 */							      \
+       {								      \
+	 /* Since movq takes a signed 32-bit immediate or a register source   \
+	    operand, use "er" constraint for 32-bit signed integer constant   \
+	    or register.  */						      \
+	 asm volatile ("movq %q0,%%fs:%P1(%q2)" :			      \
+		       : "er" ((uint64_t) cast_to_integer (value)),	      \
+			 "i" (offsetof (struct rseq_area, member)),	      \
+			 "r" (_rseq_offset));				      \
+       }})