diff mbox series

[v3,24/29] elf: Handle ld.so with LOAD segment gaps in _dl_find_object (bug 31943)

Message ID 86de3d1d9e32d296241d5939f1ffe07203672625.1727624528.git.fweimer@redhat.com
State New
Headers show
Series Teach glibc about possible page sizes and handle gaps in ld.so | expand

Commit Message

Florian Weimer Sept. 29, 2024, 4:36 p.m. UTC
If binutils bug 28743 is present (if HAVE_LD_RELRO_LOAD_GAPS is
defined), compile in code into ld.so that checks if ld.so has such
gaps, and if so, update the _dl_find_object data structures
accordingly.

Likewise, if the architecture supports multiple page sizes, compile
in the same code to support ld.so with gaps.
---
 elf/dl-find_object.c | 78 ++++++++++++++++++++++++++++----------------
 elf/rtld.c           | 28 +++++++++++++++-
 2 files changed, 77 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 449302eda3..997cc0078b 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -26,6 +26,7 @@ 
 #include <stdbool.h>
 #include <stddef.h>
 #include <stdint.h>
+#include <sys/pagesize.h>
 
 /* Fallback implementation of _dl_find_object.  It uses a linear
    search, needs locking, and is not async-signal-safe.  It is used in
@@ -466,6 +467,38 @@  __dl_find_object (void *pc1, struct dl_find_object *result)
 hidden_def (__dl_find_object)
 weak_alias (__dl_find_object, _dl_find_object)
 
+/* Subroutine of _dlfo_process_initial to split out noncontigous link
+   maps.  NODELETE is the number of used _dlfo_nodelete_mappings
+   elements.  It is incremented as needed, and the new NODELETE value
+   is returned.  */
+static size_t
+_dlfo_process_initial_noncontiguous_map (struct link_map *map,
+                                         size_t nodelete)
+{
+  struct dl_find_object_internal dlfo;
+  _dl_find_object_from_map (map, &dlfo);
+
+  /* PT_LOAD segments for a non-contiguous link map are added to the
+     non-closeable mappings.  */
+  const ElfW(Phdr) *ph = map->l_phdr;
+  const ElfW(Phdr) *ph_end = map->l_phdr + map->l_phnum;
+  for (; ph < ph_end; ++ph)
+    if (ph->p_type == PT_LOAD)
+      {
+        if (_dlfo_nodelete_mappings != NULL)
+          {
+            /* Second pass only.  */
+            _dlfo_nodelete_mappings[nodelete] = dlfo;
+            _dlfo_nodelete_mappings[nodelete].map_start
+              = ph->p_vaddr + map->l_addr;
+            _dlfo_nodelete_mappings[nodelete].map_end
+              = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
+          }
+        ++nodelete;
+      }
+  return nodelete;
+}
+
 /* _dlfo_process_initial is called twice.  First to compute the array
    sizes from the initial loaded mappings.  Second to fill in the
    bases and infos arrays with the (still unsorted) data.  Returns the
@@ -477,29 +510,8 @@  _dlfo_process_initial (void)
 
   size_t nodelete = 0;
   if (!main_map->l_contiguous)
-    {
-      struct dl_find_object_internal dlfo;
-      _dl_find_object_from_map (main_map, &dlfo);
-
-      /* PT_LOAD segments for a non-contiguous are added to the
-         non-closeable mappings.  */
-      for (const ElfW(Phdr) *ph = main_map->l_phdr,
-             *ph_end = main_map->l_phdr + main_map->l_phnum;
-           ph < ph_end; ++ph)
-        if (ph->p_type == PT_LOAD)
-          {
-            if (_dlfo_nodelete_mappings != NULL)
-              {
-                /* Second pass only.  */
-                _dlfo_nodelete_mappings[nodelete] = dlfo;
-                _dlfo_nodelete_mappings[nodelete].map_start
-                  = ph->p_vaddr + main_map->l_addr;
-                _dlfo_nodelete_mappings[nodelete].map_end
-                  = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
-              }
-            ++nodelete;
-          }
-    }
+    /* Contiguous case already handled in _dl_find_object_init.  */
+    nodelete = _dlfo_process_initial_noncontiguous_map (main_map, nodelete);
 
   size_t loaded = 0;
   for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
@@ -511,11 +523,21 @@  _dlfo_process_initial (void)
           /* lt_library link maps are implicitly NODELETE.  */
           if (l->l_type == lt_library || l->l_nodelete_active)
             {
-              if (_dlfo_nodelete_mappings != NULL)
-                /* Second pass only.  */
-                _dl_find_object_from_map
-                  (l, _dlfo_nodelete_mappings + nodelete);
-              ++nodelete;
+#if defined SHARED && (defined HAVE_LD_RELRO_LOAD_GAPS \
+                       || PAGE_SIZE_MIN != PAGE_SIZE_MAX)
+              /* The kernel may have loaded ld.so with gaps.   */
+              if (!l->l_contiguous && l == &GL(dl_rtld_map))
+                nodelete
+                  = _dlfo_process_initial_noncontiguous_map (l, nodelete);
+              else
+#endif /* HAVE_LD_RELRO_LOAD_GAPS */
+                {
+                  if (_dlfo_nodelete_mappings != NULL)
+                    /* Second pass only.  */
+                    _dl_find_object_from_map
+                      (l, _dlfo_nodelete_mappings + nodelete);
+                  ++nodelete;
+                }
             }
           else if (l->l_type == lt_loaded)
             {
diff --git a/elf/rtld.c b/elf/rtld.c
index 50c01b9bb4..ac64e38346 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -53,6 +53,7 @@ 
 #include <dl-find_object.h>
 #include <dl-audit-check.h>
 #include <dl-call_tls_init_tp.h>
+#include <sys/pagesize.h>
 
 #include <assert.h>
 
@@ -1262,7 +1263,7 @@  rtld_setup_main_map (struct link_map *main_map)
 
 /* Set up the program header information for the dynamic linker
    itself.  It can be accessed via _r_debug and dl_iterate_phdr
-   callbacks.  */
+   callbacks, and it is used by _dl_find_object.  */
 static void
 rtld_setup_phdr (void)
 {
@@ -1279,6 +1280,31 @@  rtld_setup_phdr (void)
   GL(dl_rtld_map).l_phdr = rtld_phdr;
   GL(dl_rtld_map).l_phnum = rtld_ehdr->e_phnum;
 
+  GL(dl_rtld_map).l_contiguous = 1;
+#if defined HAVE_LD_RELRO_LOAD_GAPS || PAGE_SIZE_MIN != PAGE_SIZE_MAX
+  /* The linker may not have produced a contiguous object.  The kernel
+     will load the object with actual gaps (unlike the glibc loader
+     for shared objects, which always produces a contiguous mapping).
+     See similar logic in rtld_setup_main_map above.  */
+  {
+    ElfW(Addr) expected_load_address = 0;
+    for (const ElfW(Phdr) *ph = rtld_phdr; ph < &rtld_phdr[rtld_ehdr->e_phnum];
+	 ++ph)
+      if (ph->p_type == PT_LOAD)
+	{
+	  ElfW(Addr) mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
+	  if (GL(dl_rtld_map).l_contiguous && expected_load_address != 0
+	      && expected_load_address != mapstart)
+	    GL(dl_rtld_map).l_contiguous = 0;
+	  ElfW(Addr) allocend = ph->p_vaddr + ph->p_memsz;
+	  /* The next expected address is the page following this load
+	     segment.  */
+	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
+				   & ~(GLRO(dl_pagesize) - 1));
+	}
+  }
+#endif /* HAVE_LD_RELRO_LOAD_GAPS */
+
   /* PT_GNU_RELRO is usually the last phdr.  */
   size_t cnt = rtld_ehdr->e_phnum;
   while (cnt-- > 0)