@@ -466,6 +466,38 @@ __dl_find_object (void *pc1, struct dl_find_object *result)
hidden_def (__dl_find_object)
weak_alias (__dl_find_object, _dl_find_object)
+/* Subroutine of _dlfo_process_initial to split out noncontigous link
+ maps. NODELETE is the number of used _dlfo_nodelete_mappings
+ elements. It is incremented as needed, and the new NODELETE value
+ is returned. */
+static size_t
+_dlfo_process_initial_noncontiguous_map (struct link_map *map,
+ size_t nodelete)
+{
+ struct dl_find_object_internal dlfo;
+ _dl_find_object_from_map (map, &dlfo);
+
+ /* PT_LOAD segments for a non-contiguous link map are added to the
+ non-closeable mappings. */
+ const ElfW(Phdr) *ph = map->l_phdr;
+ const ElfW(Phdr) *ph_end = map->l_phdr + map->l_phnum;
+ for (; ph < ph_end; ++ph)
+ if (ph->p_type == PT_LOAD)
+ {
+ if (_dlfo_nodelete_mappings != NULL)
+ {
+ /* Second pass only. */
+ _dlfo_nodelete_mappings[nodelete] = dlfo;
+ _dlfo_nodelete_mappings[nodelete].map_start
+ = ph->p_vaddr + map->l_addr;
+ _dlfo_nodelete_mappings[nodelete].map_end
+ = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
+ }
+ ++nodelete;
+ }
+ return nodelete;
+}
+
/* _dlfo_process_initial is called twice. First to compute the array
sizes from the initial loaded mappings. Second to fill in the
bases and infos arrays with the (still unsorted) data. Returns the
@@ -477,29 +509,8 @@ _dlfo_process_initial (void)
size_t nodelete = 0;
if (!main_map->l_contiguous)
- {
- struct dl_find_object_internal dlfo;
- _dl_find_object_from_map (main_map, &dlfo);
-
- /* PT_LOAD segments for a non-contiguous are added to the
- non-closeable mappings. */
- for (const ElfW(Phdr) *ph = main_map->l_phdr,
- *ph_end = main_map->l_phdr + main_map->l_phnum;
- ph < ph_end; ++ph)
- if (ph->p_type == PT_LOAD)
- {
- if (_dlfo_nodelete_mappings != NULL)
- {
- /* Second pass only. */
- _dlfo_nodelete_mappings[nodelete] = dlfo;
- _dlfo_nodelete_mappings[nodelete].map_start
- = ph->p_vaddr + main_map->l_addr;
- _dlfo_nodelete_mappings[nodelete].map_end
- = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
- }
- ++nodelete;
- }
- }
+ /* Contiguous case already handled in _dl_find_object_init. */
+ nodelete = _dlfo_process_initial_noncontiguous_map (main_map, nodelete);
size_t loaded = 0;
for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
@@ -511,11 +522,20 @@ _dlfo_process_initial (void)
/* lt_library link maps are implicitly NODELETE. */
if (l->l_type == lt_library || l->l_nodelete_active)
{
- if (_dlfo_nodelete_mappings != NULL)
- /* Second pass only. */
- _dl_find_object_from_map
- (l, _dlfo_nodelete_mappings + nodelete);
- ++nodelete;
+#ifdef SHARED
+ /* The kernel may have loaded ld.so with gaps. */
+ if (!l->l_contiguous && l == &GL(dl_rtld_map))
+ nodelete
+ = _dlfo_process_initial_noncontiguous_map (l, nodelete);
+ else
+#endif
+ {
+ if (_dlfo_nodelete_mappings != NULL)
+ /* Second pass only. */
+ _dl_find_object_from_map
+ (l, _dlfo_nodelete_mappings + nodelete);
+ ++nodelete;
+ }
}
else if (l->l_type == lt_loaded)
{
@@ -1286,7 +1286,7 @@ rtld_setup_main_map (struct link_map *main_map)
/* Set up the program header information for the dynamic linker
itself. It can be accessed via _r_debug and dl_iterate_phdr
- callbacks. */
+ callbacks, and it is used by _dl_find_object. */
static void
rtld_setup_phdr (void)
{
@@ -1303,6 +1303,29 @@ rtld_setup_phdr (void)
GL(dl_rtld_map).l_phdr = rtld_phdr;
GL(dl_rtld_map).l_phnum = rtld_ehdr->e_phnum;
+ /* The linker may not have produced a contiguous object. The kernel
+ will load the object with actual gaps (unlike the glibc loader
+ for shared objects, which always produces a contiguous mapping).
+ See similar logic in rtld_setup_main_map above. */
+ GL(dl_rtld_map).l_contiguous = 1;
+ {
+ ElfW(Addr) expected_load_address = 0;
+ for (const ElfW(Phdr) *ph = rtld_phdr; ph < &rtld_phdr[rtld_ehdr->e_phnum];
+ ++ph)
+ if (ph->p_type == PT_LOAD)
+ {
+ ElfW(Addr) mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
+ if (GL(dl_rtld_map).l_contiguous && expected_load_address != 0
+ && expected_load_address != mapstart)
+ GL(dl_rtld_map).l_contiguous = 0;
+ ElfW(Addr) allocend = ph->p_vaddr + ph->p_memsz;
+ /* The next expected address is the page following this load
+ segment. */
+ expected_load_address = ((allocend + GLRO(dl_pagesize) - 1)
+ & ~(GLRO(dl_pagesize) - 1));
+ }
+ }
+
/* PT_GNU_RELRO is usually the last phdr. */
size_t cnt = rtld_ehdr->e_phnum;
while (cnt-- > 0)