Message ID | e865fa9935c5d16d613c89f6cd8000cc2c1b956c.1618301209.git.szabolcs.nagy@arm.com |
---|---|
State | New |
Headers | show |
Series | Dynamic TLS related data race fixes | expand |
On Tue, Apr 13, 2021 at 2:32 AM Szabolcs Nagy via Libc-alpha <libc-alpha@sourceware.org> wrote: > > Lazy tlsdesc relocation is racy because the static tls optimization and > tlsdesc management operations are done without holding the dlopen lock. > > This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67 > for aarch64, but it fixes a different race: bug 27137. > > On i386 the code is a bit more complicated than on x86_64 because both > rel and rela relocs are supported. > --- > sysdeps/i386/dl-machine.h | 76 ++++++++++++++++++--------------------- > 1 file changed, 34 insertions(+), 42 deletions(-) > > diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h > index 23e9cc3bfb..590b41d8d7 100644 > --- a/sysdeps/i386/dl-machine.h > +++ b/sysdeps/i386/dl-machine.h > @@ -688,50 +688,32 @@ elf_machine_lazy_rel (struct link_map *map, > } > else if (__glibc_likely (r_type == R_386_TLS_DESC)) > { > - struct tlsdesc volatile * __attribute__((__unused__)) td = > - (struct tlsdesc volatile *)reloc_addr; > - > - /* Handle relocations that reference the local *ABS* in a simple > - way, so as to preserve a potential addend. */ > - if (ELF32_R_SYM (reloc->r_info) == 0) > - td->entry = _dl_tlsdesc_resolve_abs_plus_addend; > - /* Given a known-zero addend, we can store a pointer to the > - reloc in the arg position. */ > - else if (td->arg == 0) > - { > - td->arg = (void*)reloc; > - td->entry = _dl_tlsdesc_resolve_rel; > - } > - else > - { > - /* We could handle non-*ABS* relocations with non-zero addends > - by allocating dynamically an arg to hold a pointer to the > - reloc, but that sounds pointless. */ > - const Elf32_Rel *const r = reloc; > - /* The code below was borrowed from elf_dynamic_do_rel(). */ > - const ElfW(Sym) *const symtab = > - (const void *) D_PTR (map, l_info[DT_SYMTAB]); > + const Elf32_Rel *const r = reloc; > + /* The code below was borrowed from elf_dynamic_do_rel(). */ > + const ElfW(Sym) *const symtab = > + (const void *) D_PTR (map, l_info[DT_SYMTAB]); > > + /* Always initialize TLS descriptors completely at load time, in > + case static TLS is allocated for it that requires locking. */ > # ifdef RTLD_BOOTSTRAP > - /* The dynamic linker always uses versioning. */ > - assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL); > + /* The dynamic linker always uses versioning. */ > + assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL); > # else > - if (map->l_info[VERSYMIDX (DT_VERSYM)]) > + if (map->l_info[VERSYMIDX (DT_VERSYM)]) > # endif > - { > - const ElfW(Half) *const version = > - (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); > - ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff; > - elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], > - &map->l_versions[ndx], > - (void *) (l_addr + r->r_offset), skip_ifunc); > - } > + { > + const ElfW(Half) *const version = > + (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); > + ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff; > + elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], > + &map->l_versions[ndx], > + (void *) (l_addr + r->r_offset), skip_ifunc); > + } > # ifndef RTLD_BOOTSTRAP > - else > - elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL, > - (void *) (l_addr + r->r_offset), skip_ifunc); > + else > + elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL, > + (void *) (l_addr + r->r_offset), skip_ifunc); > # endif > - } > } > else if (__glibc_unlikely (r_type == R_386_IRELATIVE)) > { > @@ -758,11 +740,21 @@ elf_machine_lazy_rela (struct link_map *map, > ; > else if (__glibc_likely (r_type == R_386_TLS_DESC)) > { > - struct tlsdesc volatile * __attribute__((__unused__)) td = > - (struct tlsdesc volatile *)reloc_addr; > + const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info); > + const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]); > + const ElfW (Sym) *sym = &symtab[symndx]; > + const struct r_found_version *version = NULL; > + > + if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL) > + { > + const ElfW (Half) *vernum = > + (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); > + version = &map->l_versions[vernum[symndx] & 0x7fff]; > + } > > - td->arg = (void*)reloc; > - td->entry = _dl_tlsdesc_resolve_rela; > + /* Always initialize TLS descriptors completely at load time, in > + case static TLS is allocated for it that requires locking. */ > + elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc); > } > else if (__glibc_unlikely (r_type == R_386_IRELATIVE)) > { > -- > 2.17.1 > LGTM. Thanks.
diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h index 23e9cc3bfb..590b41d8d7 100644 --- a/sysdeps/i386/dl-machine.h +++ b/sysdeps/i386/dl-machine.h @@ -688,50 +688,32 @@ elf_machine_lazy_rel (struct link_map *map, } else if (__glibc_likely (r_type == R_386_TLS_DESC)) { - struct tlsdesc volatile * __attribute__((__unused__)) td = - (struct tlsdesc volatile *)reloc_addr; - - /* Handle relocations that reference the local *ABS* in a simple - way, so as to preserve a potential addend. */ - if (ELF32_R_SYM (reloc->r_info) == 0) - td->entry = _dl_tlsdesc_resolve_abs_plus_addend; - /* Given a known-zero addend, we can store a pointer to the - reloc in the arg position. */ - else if (td->arg == 0) - { - td->arg = (void*)reloc; - td->entry = _dl_tlsdesc_resolve_rel; - } - else - { - /* We could handle non-*ABS* relocations with non-zero addends - by allocating dynamically an arg to hold a pointer to the - reloc, but that sounds pointless. */ - const Elf32_Rel *const r = reloc; - /* The code below was borrowed from elf_dynamic_do_rel(). */ - const ElfW(Sym) *const symtab = - (const void *) D_PTR (map, l_info[DT_SYMTAB]); + const Elf32_Rel *const r = reloc; + /* The code below was borrowed from elf_dynamic_do_rel(). */ + const ElfW(Sym) *const symtab = + (const void *) D_PTR (map, l_info[DT_SYMTAB]); + /* Always initialize TLS descriptors completely at load time, in + case static TLS is allocated for it that requires locking. */ # ifdef RTLD_BOOTSTRAP - /* The dynamic linker always uses versioning. */ - assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL); + /* The dynamic linker always uses versioning. */ + assert (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL); # else - if (map->l_info[VERSYMIDX (DT_VERSYM)]) + if (map->l_info[VERSYMIDX (DT_VERSYM)]) # endif - { - const ElfW(Half) *const version = - (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); - ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff; - elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], - &map->l_versions[ndx], - (void *) (l_addr + r->r_offset), skip_ifunc); - } + { + const ElfW(Half) *const version = + (const void *) D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); + ElfW(Half) ndx = version[ELFW(R_SYM) (r->r_info)] & 0x7fff; + elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], + &map->l_versions[ndx], + (void *) (l_addr + r->r_offset), skip_ifunc); + } # ifndef RTLD_BOOTSTRAP - else - elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL, - (void *) (l_addr + r->r_offset), skip_ifunc); + else + elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)], NULL, + (void *) (l_addr + r->r_offset), skip_ifunc); # endif - } } else if (__glibc_unlikely (r_type == R_386_IRELATIVE)) { @@ -758,11 +740,21 @@ elf_machine_lazy_rela (struct link_map *map, ; else if (__glibc_likely (r_type == R_386_TLS_DESC)) { - struct tlsdesc volatile * __attribute__((__unused__)) td = - (struct tlsdesc volatile *)reloc_addr; + const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info); + const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]); + const ElfW (Sym) *sym = &symtab[symndx]; + const struct r_found_version *version = NULL; + + if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL) + { + const ElfW (Half) *vernum = + (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); + version = &map->l_versions[vernum[symndx] & 0x7fff]; + } - td->arg = (void*)reloc; - td->entry = _dl_tlsdesc_resolve_rela; + /* Always initialize TLS descriptors completely at load time, in + case static TLS is allocated for it that requires locking. */ + elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc); } else if (__glibc_unlikely (r_type == R_386_IRELATIVE)) {