diff mbox series

[v2,2/2] elf: Handle ld.so with LOAD segment gaps in _dl_find_object (bug 31943)

Message ID 4d4499f86affd74c8409318646ea949e1c445873.1720176100.git.fweimer@redhat.com
State New
Headers show
Series elf: Handle ld.so with LOAD segment gaps in _dl_find_object (bug 31943) | expand

Commit Message

Florian Weimer July 5, 2024, 10:43 a.m. UTC
As ld.so is loaded by the kernel, gaps are not filled by the initial
mapping. Check the program headers for a gap, and if there is one,
handle a non-contiguous ld.so link map similar to a non-contiguous
main link map in _dlfo_process_initial.

Ideally, the extra code should not be compiled in if the binutils is
recent enough and used in a configuration that does not produce gaps.
For example, binutils 2.39 and later have commit 9833b7757d246f22db4
("PR28824, relro security issues"), which should avoid creation of
gaps on the x86 architecture.
---
 elf/dl-find_object.c | 76 ++++++++++++++++++++++++++++----------------
 elf/rtld.c           | 25 ++++++++++++++-
 2 files changed, 72 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 449302eda3..056fbda64c 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -466,6 +466,38 @@  __dl_find_object (void *pc1, struct dl_find_object *result)
 hidden_def (__dl_find_object)
 weak_alias (__dl_find_object, _dl_find_object)
 
+/* Subroutine of _dlfo_process_initial to split out noncontigous link
+   maps.  NODELETE is the number of used _dlfo_nodelete_mappings
+   elements.  It is incremented as needed, and the new NODELETE value
+   is returned.  */
+static size_t
+_dlfo_process_initial_noncontiguous_map (struct link_map *map,
+                                         size_t nodelete)
+{
+  struct dl_find_object_internal dlfo;
+  _dl_find_object_from_map (map, &dlfo);
+
+  /* PT_LOAD segments for a non-contiguous link map are added to the
+     non-closeable mappings.  */
+  const ElfW(Phdr) *ph = map->l_phdr;
+  const ElfW(Phdr) *ph_end = map->l_phdr + map->l_phnum;
+  for (; ph < ph_end; ++ph)
+    if (ph->p_type == PT_LOAD)
+      {
+        if (_dlfo_nodelete_mappings != NULL)
+          {
+            /* Second pass only.  */
+            _dlfo_nodelete_mappings[nodelete] = dlfo;
+            _dlfo_nodelete_mappings[nodelete].map_start
+              = ph->p_vaddr + map->l_addr;
+            _dlfo_nodelete_mappings[nodelete].map_end
+              = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
+          }
+        ++nodelete;
+      }
+  return nodelete;
+}
+
 /* _dlfo_process_initial is called twice.  First to compute the array
    sizes from the initial loaded mappings.  Second to fill in the
    bases and infos arrays with the (still unsorted) data.  Returns the
@@ -477,29 +509,8 @@  _dlfo_process_initial (void)
 
   size_t nodelete = 0;
   if (!main_map->l_contiguous)
-    {
-      struct dl_find_object_internal dlfo;
-      _dl_find_object_from_map (main_map, &dlfo);
-
-      /* PT_LOAD segments for a non-contiguous are added to the
-         non-closeable mappings.  */
-      for (const ElfW(Phdr) *ph = main_map->l_phdr,
-             *ph_end = main_map->l_phdr + main_map->l_phnum;
-           ph < ph_end; ++ph)
-        if (ph->p_type == PT_LOAD)
-          {
-            if (_dlfo_nodelete_mappings != NULL)
-              {
-                /* Second pass only.  */
-                _dlfo_nodelete_mappings[nodelete] = dlfo;
-                _dlfo_nodelete_mappings[nodelete].map_start
-                  = ph->p_vaddr + main_map->l_addr;
-                _dlfo_nodelete_mappings[nodelete].map_end
-                  = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
-              }
-            ++nodelete;
-          }
-    }
+    /* Contiguous case already handled in _dl_find_object_init.  */
+    nodelete = _dlfo_process_initial_noncontiguous_map (main_map, nodelete);
 
   size_t loaded = 0;
   for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
@@ -511,11 +522,20 @@  _dlfo_process_initial (void)
           /* lt_library link maps are implicitly NODELETE.  */
           if (l->l_type == lt_library || l->l_nodelete_active)
             {
-              if (_dlfo_nodelete_mappings != NULL)
-                /* Second pass only.  */
-                _dl_find_object_from_map
-                  (l, _dlfo_nodelete_mappings + nodelete);
-              ++nodelete;
+#ifdef SHARED
+              /* The kernel may have loaded ld.so with gaps.   */
+              if (!l->l_contiguous && l == &GL(dl_rtld_map))
+                nodelete
+                  = _dlfo_process_initial_noncontiguous_map (l, nodelete);
+              else
+#endif
+                {
+                  if (_dlfo_nodelete_mappings != NULL)
+                    /* Second pass only.  */
+                    _dl_find_object_from_map
+                      (l, _dlfo_nodelete_mappings + nodelete);
+                  ++nodelete;
+                }
             }
           else if (l->l_type == lt_loaded)
             {
diff --git a/elf/rtld.c b/elf/rtld.c
index 60b0f416e8..cc71f1ded6 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -1286,7 +1286,7 @@  rtld_setup_main_map (struct link_map *main_map)
 
 /* Set up the program header information for the dynamic linker
    itself.  It can be accessed via _r_debug and dl_iterate_phdr
-   callbacks.  */
+   callbacks, and it is used by _dl_find_object.  */
 static void
 rtld_setup_phdr (void)
 {
@@ -1303,6 +1303,29 @@  rtld_setup_phdr (void)
   GL(dl_rtld_map).l_phdr = rtld_phdr;
   GL(dl_rtld_map).l_phnum = rtld_ehdr->e_phnum;
 
+  /* The linker may not have produced a contiguous object.  The kernel
+     will load the object with actual gaps (unlike the glibc loader
+     for shared objects, which always produces a contiguous mapping).
+     See similar logic in rtld_setup_main_map above.  */
+  GL(dl_rtld_map).l_contiguous = 1;
+  {
+    ElfW(Addr) expected_load_address = 0;
+    for (const ElfW(Phdr) *ph = rtld_phdr; ph < &rtld_phdr[rtld_ehdr->e_phnum];
+	 ++ph)
+      if (ph->p_type == PT_LOAD)
+	{
+	  ElfW(Addr) mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
+	  if (GL(dl_rtld_map).l_contiguous && expected_load_address != 0
+	      && expected_load_address != mapstart)
+	    GL(dl_rtld_map).l_contiguous = 0;
+	  ElfW(Addr) allocend = ph->p_vaddr + ph->p_memsz;
+	  /* The next expected address is the page following this load
+	     segment.  */
+	  expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
+				   & ~(GLRO(dl_pagesize) - 1));
+	}
+  }
+
   /* PT_GNU_RELRO is usually the last phdr.  */
   size_t cnt = rtld_ehdr->e_phnum;
   while (cnt-- > 0)