@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm != NULL; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n", lm->l_public.l_name,
- (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
@@ -30,7 +30,7 @@ _dl_call_fini (void *closure_map)
map->l_public.l_name, map->l_ns);
/* Make sure nothing happens if we are called twice. */
- map->l_init_called = 0;
+ map->l_rw->l_init_called = 0;
ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
if (fini_array != NULL)
@@ -109,23 +109,23 @@ void
_dl_close_worker (struct link_map_private *map, bool force)
{
/* One less direct use. */
- --map->l_direct_opencount;
+ --map->l_rw->l_direct_opencount;
/* If _dl_close is called recursively (some destructor call dlclose),
just record that the parent _dl_close will need to do garbage collection
again and return. */
static enum { not_pending, pending, rerun } dl_close_state;
- if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+ if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
|| dl_close_state != not_pending)
{
- if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+ if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
dl_close_state = rerun;
/* There are still references to this object. Do nothing more. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
- map->l_public.l_name, map->l_direct_opencount);
+ map->l_public.l_name, map->l_rw->l_direct_opencount);
return;
}
@@ -165,11 +165,11 @@ _dl_close_worker (struct link_map_private *map, bool force)
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
- && l->l_direct_opencount == 0
- && !l->l_nodelete_active
+ && l->l_rw->l_direct_opencount == 0
+ && !l->l_rw->l_nodelete_active
/* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
acquire is sufficient and correct. */
- && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+ && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
&& !l->l_map_used)
continue;
@@ -207,10 +207,10 @@ _dl_close_worker (struct link_map_private *map, bool force)
}
}
/* And the same for relocation dependencies. */
- if (l->l_reldeps != NULL)
- for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
+ if (l->l_rw->l_reldeps != NULL)
+ for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
{
- struct link_map_private *jmap = l->l_reldeps->list[j];
+ struct link_map_private *jmap = l->l_rw->l_reldeps->list[j];
if (jmap->l_idx != IDX_STILL_USED)
{
@@ -244,12 +244,12 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (!imap->l_map_used)
{
- assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+ assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
/* Call its termination function. Do not do it for
half-cooked objects. Temporarily disable exception
handling, so that errors are fatal. */
- if (imap->l_init_called)
+ if (imap->l_rw->l_init_called)
_dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
@@ -496,7 +496,7 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (GL(dl_tls_dtv_slotinfo_list) != NULL
&& ! remove_slotinfo (imap->l_tls_modid,
GL(dl_tls_dtv_slotinfo_list), 0,
- imap->l_init_called))
+ imap->l_rw->l_init_called))
/* All dynamically loaded modules with TLS are unloaded. */
/* Can be read concurrently. */
atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
@@ -652,7 +652,8 @@ _dl_close_worker (struct link_map_private *map, bool force)
if (imap->l_origin != (char *) -1)
free ((char *) imap->l_origin);
- free (imap->l_reldeps);
+ free (imap->l_rw->l_reldeps);
+ free (imap->l_rw);
/* Print debugging message. */
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
@@ -754,7 +755,7 @@ _dl_close (void *_map)
before we took the lock. There is no way to detect this (see below)
so we proceed assuming this isn't the case. First see whether we
can remove the object at all. */
- if (__glibc_unlikely (map->l_nodelete_active))
+ if (__glibc_unlikely (map->l_rw->l_nodelete_active))
{
/* Nope. Do nothing. */
__rtld_lock_unlock_recursive (GL(dl_load_lock));
@@ -771,7 +772,7 @@ _dl_close (void *_map)
should be a detectable case and given that dlclose should be threadsafe
we need this to be a reliable detection.
This is bug 20990. */
- if (__builtin_expect (map->l_direct_opencount, 1) == 0)
+ if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
{
__rtld_lock_unlock_recursive (GL(dl_load_lock));
_dl_signal_error (0, map->l_public.l_name, NULL,
@@ -483,20 +483,20 @@ _dl_map_object_deps (struct link_map_private *map,
/* Maybe we can remove some relocation dependencies now. */
struct link_map_reldeps *l_reldeps = NULL;
- if (map->l_reldeps != NULL)
+ if (map->l_rw->l_reldeps != NULL)
{
for (i = 0; i < nlist; ++i)
map->l_searchlist.r_list[i]->l_reserved = 1;
/* Avoid removing relocation dependencies of the main binary. */
map->l_reserved = 0;
- struct link_map_private **list = &map->l_reldeps->list[0];
- for (i = 0; i < map->l_reldeps->act; ++i)
+ struct link_map_private **list = &map->l_rw->l_reldeps->list[0];
+ for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
if (list[i]->l_reserved)
{
/* Need to allocate new array of relocation dependencies. */
l_reldeps = malloc (sizeof (*l_reldeps)
- + map->l_reldepsmax
+ + map->l_rw->l_reldepsmax
* sizeof (struct link_map_private *));
if (l_reldeps == NULL)
/* Bad luck, keep the reldeps duplicated between
@@ -507,7 +507,7 @@ _dl_map_object_deps (struct link_map_private *map,
unsigned int j = i;
memcpy (&l_reldeps->list[0], &list[0],
i * sizeof (struct link_map_private *));
- for (i = i + 1; i < map->l_reldeps->act; ++i)
+ for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
if (!list[i]->l_reserved)
l_reldeps->list[j++] = list[i];
l_reldeps->act = j;
@@ -552,8 +552,8 @@ _dl_map_object_deps (struct link_map_private *map,
if (l_reldeps != NULL)
{
atomic_write_barrier ();
- void *old_l_reldeps = map->l_reldeps;
- map->l_reldeps = l_reldeps;
+ void *old_l_reldeps = map->l_rw->l_reldeps;
+ map->l_rw->l_reldeps = l_reldeps;
_dl_scope_free (old_l_reldeps);
}
if (old_l_initfini != NULL)
@@ -508,7 +508,7 @@ _dlfo_process_initial (void)
if (l != main_map && l == l->l_real)
{
/* lt_library link maps are implicitly NODELETE. */
- if (l->l_type == lt_library || l->l_nodelete_active)
+ if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
{
if (_dlfo_nodelete_mappings != NULL)
/* Second pass only. */
@@ -82,7 +82,7 @@ _dl_fini (void)
/* Bump l_direct_opencount of all objects so that they
are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
+ ++l->l_rw->l_direct_opencount;
}
assert (ns != LM_ID_BASE || i == nloaded);
assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
@@ -109,7 +109,7 @@ _dl_fini (void)
{
struct link_map_private *l = maps[i];
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
{
_dl_call_fini (l);
#ifdef SHARED
@@ -119,7 +119,7 @@ _dl_fini (void)
}
/* Correct the previous increment. */
- --l->l_direct_opencount;
+ --l->l_rw->l_direct_opencount;
}
#ifdef SHARED
@@ -30,13 +30,13 @@ call_init (struct link_map_private *l, int argc, char **argv, char **env)
need relocation, and neither do proxy objects.) */
assert (l->l_real->l_relocated || l->l_real->l_type == lt_executable);
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
/* This object is all done. */
return;
/* Avoid handling this constructor again in case we have a circular
dependency. */
- l->l_init_called = 1;
+ l->l_rw->l_init_called = 1;
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_public.l_name[0], 'a') == '\0'
@@ -176,9 +176,9 @@ static void
mark_nodelete (struct link_map_private *map, int flags)
{
if (flags & DL_LOOKUP_FOR_RELOCATE)
- map->l_nodelete_pending = true;
+ map->l_rw->l_nodelete_pending = true;
else
- map->l_nodelete_active = true;
+ map->l_rw->l_nodelete_active = true;
}
/* Return true if MAP is marked as NODELETE according to the lookup
@@ -188,8 +188,8 @@ is_nodelete (struct link_map_private *map, int flags)
{
/* Non-pending NODELETE always counts. Pending NODELETE only counts
during initial relocation processing. */
- return map->l_nodelete_active
- || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
+ return map->l_rw->l_nodelete_active
+ || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
}
/* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
@@ -536,7 +536,7 @@ add_dependency (struct link_map_private *undef_map,
return 0;
struct link_map_reldeps *l_reldeps
- = atomic_forced_read (undef_map->l_reldeps);
+ = atomic_forced_read (undef_map->l_rw->l_reldeps);
/* Make sure l_reldeps is read before l_initfini. */
atomic_read_barrier ();
@@ -596,22 +596,23 @@ add_dependency (struct link_map_private *undef_map,
/* Redo the l_reldeps check if undef_map's l_reldeps changed in
the mean time. */
- if (undef_map->l_reldeps != NULL)
+ if (undef_map->l_rw->l_reldeps != NULL)
{
- if (undef_map->l_reldeps != l_reldeps)
+ if (undef_map->l_rw->l_reldeps != l_reldeps)
{
- struct link_map_private **list = &undef_map->l_reldeps->list[0];
- l_reldepsact = undef_map->l_reldeps->act;
+ struct link_map_private **list
+ = &undef_map->l_rw->l_reldeps->list[0];
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (i = 0; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
}
- else if (undef_map->l_reldeps->act > l_reldepsact)
+ else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
{
struct link_map_private **list
- = &undef_map->l_reldeps->list[0];
+ = &undef_map->l_rw->l_reldeps->list[0];
i = l_reldepsact;
- l_reldepsact = undef_map->l_reldeps->act;
+ l_reldepsact = undef_map->l_rw->l_reldeps->act;
for (; i < l_reldepsact; ++i)
if (list[i] == map)
goto out_check;
@@ -667,14 +668,17 @@ marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
}
/* Add the reference now. */
- if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
+ if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
{
/* Allocate more memory for the dependency list. Since this
can never happen during the startup phase we can use
`realloc'. */
struct link_map_reldeps *newp;
- unsigned int max
- = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
+ unsigned int max;
+ if (undef_map->l_rw->l_reldepsmax > 0)
+ max = undef_map->l_rw->l_reldepsmax;
+ else
+ max = 10;
#ifdef RTLD_PREPARE_FOREIGN_CALL
RTLD_PREPARE_FOREIGN_CALL;
@@ -702,23 +706,24 @@ marking %s [%lu] as NODELETE due to memory allocation failure\n",
else
{
if (l_reldepsact)
- memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+ memcpy (&newp->list[0],
+ &undef_map->l_rw->l_reldeps->list[0],
l_reldepsact * sizeof (struct link_map_private *));
newp->list[l_reldepsact] = map;
newp->act = l_reldepsact + 1;
atomic_write_barrier ();
- void *old = undef_map->l_reldeps;
- undef_map->l_reldeps = newp;
- undef_map->l_reldepsmax = max;
+ void *old = undef_map->l_rw->l_reldeps;
+ undef_map->l_rw->l_reldeps = newp;
+ undef_map->l_rw->l_reldepsmax = max;
if (old)
_dl_scope_free (old);
}
}
else
{
- undef_map->l_reldeps->list[l_reldepsact] = map;
+ undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
atomic_write_barrier ();
- undef_map->l_reldeps->act = l_reldepsact + 1;
+ undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
}
/* Display information if we are debugging. */
@@ -94,6 +94,12 @@ _dl_new_object (char *realname, const char *libname, int type,
+ sizeof (*newname) + libname_len, 1);
if (new == NULL)
return NULL;
+ new->l_rw = calloc (1, sizeof (*new->l_rw));
+ if (new->l_rw == NULL)
+ {
+ free (new);
+ return NULL;
+ }
new->l_real = new;
new->l_symbolic_searchlist.r_list
@@ -263,7 +263,7 @@ resize_scopes (struct link_map_private *new)
/* If the initializer has been called already, the object has
not been loaded here and now. */
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -327,7 +327,7 @@ update_scopes (struct link_map_private *new)
struct link_map_private *imap = new->l_searchlist.r_list[i];
int from_scope = 0;
- if (imap->l_init_called && imap->l_type == lt_loaded)
+ if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
{
if (scope_has_map (imap, new))
/* Avoid duplicates. */
@@ -368,7 +368,7 @@ resize_tls_slotinfo (struct link_map_private *new)
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
- if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+ if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
{
_dl_add_to_slotinfo (imap, false);
any_tls = true;
@@ -390,7 +390,7 @@ update_tls_slotinfo (struct link_map_private *new)
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
- if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+ if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
{
_dl_add_to_slotinfo (imap, true);
@@ -415,7 +415,7 @@ TLS generation counter wrapped! Please report this."));
struct link_map_private *imap = new->l_searchlist.r_list[i];
if (imap->l_need_tls_init
- && ! imap->l_init_called
+ && ! imap->l_rw->l_init_called
&& imap->l_tls_blocksize > 0)
{
/* For static TLS we have to allocate the memory here and
@@ -451,7 +451,7 @@ activate_nodelete (struct link_map_private *new)
NODELETE status for objects outside the local scope. */
for (struct link_map_private *l = GL (dl_ns)[new->l_ns]._ns_loaded;
l != NULL; l = l_next (l))
- if (l->l_nodelete_pending)
+ if (l->l_rw->l_nodelete_pending)
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("activating NODELETE for %s [%lu]\n",
@@ -460,11 +460,11 @@ activate_nodelete (struct link_map_private *new)
/* The flag can already be true at this point, e.g. a signal
handler may have triggered lazy binding and set NODELETE
status immediately. */
- l->l_nodelete_active = true;
+ l->l_rw->l_nodelete_active = true;
/* This is just a debugging aid, to indicate that
activate_nodelete has run for this map. */
- l->l_nodelete_pending = false;
+ l->l_rw->l_nodelete_pending = false;
}
}
@@ -547,7 +547,7 @@ dl_open_worker_begin (void *a)
return;
/* This object is directly loaded. */
- ++new->l_direct_opencount;
+ ++new->l_rw->l_direct_opencount;
/* It was already open. */
if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
@@ -556,7 +556,7 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_public.l_name, new->l_ns,
- new->l_direct_opencount);
+ new->l_rw->l_direct_opencount);
/* If the user requested the object to be in the global
namespace but it is not so far, prepare to add it now. This
@@ -569,10 +569,10 @@ dl_open_worker_begin (void *a)
if (__glibc_unlikely (mode & RTLD_NODELETE))
{
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
- && !new->l_nodelete_active)
+ && !new->l_rw->l_nodelete_active)
_dl_debug_printf ("marking %s [%lu] as NODELETE\n",
new->l_public.l_name, new->l_ns);
- new->l_nodelete_active = true;
+ new->l_rw->l_nodelete_active = true;
}
/* Finalize the addition to the global scope. */
@@ -589,7 +589,7 @@ dl_open_worker_begin (void *a)
/* Schedule NODELETE marking for the directly loaded object if
requested. */
if (__glibc_unlikely (mode & RTLD_NODELETE))
- new->l_nodelete_pending = true;
+ new->l_rw->l_nodelete_pending = true;
/* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0,
@@ -697,7 +697,7 @@ dl_open_worker_begin (void *a)
_dl_start_profile ();
/* Prevent unloading the object. */
- GL(dl_profile_map)->l_nodelete_active = true;
+ GL(dl_profile_map)->l_rw->l_nodelete_active = true;
}
}
else
@@ -819,7 +819,7 @@ dl_open_worker (void *a)
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
new->l_public.l_name, new->l_ns,
- new->l_direct_opencount);
+ new->l_rw->l_direct_opencount);
}
void *
@@ -87,10 +87,11 @@ _dl_sort_maps_original (struct link_map_private **maps, unsigned int nmaps,
goto next;
}
- if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
+ if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
{
- unsigned int m = maps[k]->l_reldeps->act;
- struct link_map_private **relmaps = &maps[k]->l_reldeps->list[0];
+ unsigned int m = maps[k]->l_rw->l_reldeps->act;
+ struct link_map_private **relmaps
+ = &maps[k]->l_rw->l_reldeps->list[0];
/* Look through the relocation dependencies of the object. */
while (m-- > 0)
@@ -153,15 +154,15 @@ dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
}
}
- if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+ if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
{
/* Indicate that we encountered relocation dependencies during
traversal. */
*do_reldeps = true;
- for (int m = map->l_reldeps->act - 1; m >= 0; m--)
+ for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
{
- struct link_map_private *dep = map->l_reldeps->list[m];
+ struct link_map_private *dep = map->l_rw->l_reldeps->list[m];
if (dep->l_visited == 0
&& dep->l_main_map == 0)
dfs_traversal (rpo, dep, do_reldeps);
@@ -85,6 +85,7 @@ int _dl_bind_not;
static struct link_map_private _dl_main_map =
{
.l_public = { .l_name = (char *) "", },
+ .l_rw = &(struct link_map_rw) { },
.l_real = &_dl_main_map,
.l_ns = LM_ID_BASE,
.l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
@@ -163,7 +163,7 @@ elf_get_dynamic_info (struct link_map_private *l, bool bootstrap,
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NODELETE)
- l->l_nodelete_pending = true;
+ l->l_rw->l_nodelete_pending = true;
/* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
to assert this, but we can't. Users have been setting
@@ -77,9 +77,9 @@ static const struct
{ \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ map->l_rw->l_direct_opencount); \
fflush (stdout); \
} \
while (0)
@@ -191,8 +191,8 @@ main (int argc, char *argv[])
for (map = MAPS; map != NULL; map = l_next (map))
if (map->l_type == lt_loaded)
{
- printf ("name = \"%s\", direct_opencount = %d\n",
- map->l_public.l_name, (int) map->l_direct_opencount);
+ printf ("name = \"%s\", direct_opencount = %u\n",
+ map->l_public.l_name, map->l_rw->l_direct_opencount);
result = 1;
}
@@ -29,9 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name,
- (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
@@ -29,8 +29,8 @@ check_loaded_objects (const char **loaded)
for (lm = MAPS; lm; lm = l_next (lm))
{
if (lm->l_public.l_name && lm->l_public.l_name[0])
- printf(" %s, count = %d\n",
- lm->l_public.l_name, (int) lm->l_direct_opencount);
+ printf(" %s, count = %u\n",
+ lm->l_public.l_name, lm->l_rw->l_direct_opencount);
if (lm->l_type == lt_loaded && lm->l_public.l_name)
{
int match = 0;
@@ -458,6 +458,9 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
interfere with __rtld_static_init. */
GLRO (dl_find_object) = &_dl_find_object;
+ static struct link_map_rw rtld_map_rw;
+ GL (dl_rtld_map).l_rw = &rtld_map_rw;
+
/* If it hasn't happen yet record the startup time. */
rtld_timer_start (&start_time);
#if !defined DONT_USE_BOOTSTRAP_MAP
@@ -1124,8 +1127,6 @@ rtld_setup_main_map (struct link_map_private *main_map)
main_map->l_text_end = 0;
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
- /* And it was opened directly. */
- ++main_map->l_direct_opencount;
main_map->l_contiguous = 1;
/* A PT_LOAD segment at an unexpected address will clear the
@@ -14,9 +14,8 @@
#define OUT \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
+ map->l_public.l_name, map->l_rw->l_direct_opencount); \
fflush (stdout)
typedef struct
@@ -11,9 +11,8 @@
#define OUT \
for (map = MAPS; map != NULL; map = l_next (map)) \
if (map->l_type == lt_loaded) \
- printf ("name = \"%s\", direct_opencount = %d\n", \
- map->l_public.l_name, \
- (int) map->l_direct_opencount); \
+ printf ("name = \"%s\", direct_opencount = %u\n", \
+ map->l_public.l_name, map->l_rw->l_direct_opencount); \
fflush (stdout)
int
@@ -75,6 +75,43 @@ struct r_search_path_struct
extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
+
+/* Link map attributes that are always readable and writable. */
+struct link_map_rw
+{
+ /* List of the dependencies introduced through symbol binding. */
+ struct link_map_reldeps
+ {
+ unsigned int act;
+ struct link_map_private *list[];
+ } *l_reldeps;
+ unsigned int l_reldepsmax;
+
+ /* Reference count for dlopen/dlclose. See the l_direct_opencount
+ accessor function below. */
+ unsigned int l_direct_opencount;
+
+ /* Number of thread_local objects constructed by this DSO. This is
+ atomically accessed and modified and is not always protected by the load
+ lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
+ size_t l_tls_dtor_count;
+
+ /* Ture if ELF constructors have been called. */
+ bool l_init_called;
+
+ /* NODELETE status of the map. Only valid for maps of type
+ lt_loaded. Lazy binding sets l_nodelete_active directly,
+ potentially from signal handlers. Initial loading of an
+ DF_1_NODELETE object set l_nodelete_pending. Relocation may
+ set l_nodelete_pending as well. l_nodelete_pending maps are
+ promoted to l_nodelete_active status in the final stages of
+ dlopen, prior to calling ELF constructors. dlclose only
+ refuses to unload l_nodelete_active maps, the pending status is
+ ignored. */
+ bool l_nodelete_active;
+ bool l_nodelete_pending;
+};
+
/* Structure describing a loaded shared object. The `l_next' and `l_prev'
members form a chain of all the shared objects loaded at startup.
@@ -98,6 +135,9 @@ struct link_map_private
than one namespace. */
struct link_map_private *l_real;
+ /* Run-time writable fields. */
+ struct link_map_rw *l_rw;
+
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
@@ -157,7 +197,6 @@ struct link_map_private
const Elf_Symndx *l_buckets;
};
- unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
@@ -167,7 +206,6 @@ struct link_map_private
unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
referenced. */
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
- unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_main_map:1; /* Nonzero for the map of the main program. */
@@ -200,18 +238,6 @@ struct link_map_private
needs to process this
lt_library map. */
- /* NODELETE status of the map. Only valid for maps of type
- lt_loaded. Lazy binding sets l_nodelete_active directly,
- potentially from signal handlers. Initial loading of an
- DF_1_NODELETE object set l_nodelete_pending. Relocation may
- set l_nodelete_pending as well. l_nodelete_pending maps are
- promoted to l_nodelete_active status in the final stages of
- dlopen, prior to calling ELF constructors. dlclose only
- refuses to unload l_nodelete_active maps, the pending status is
- ignored. */
- bool l_nodelete_active;
- bool l_nodelete_pending;
-
#include <link_map.h>
/* Collected information about own RPATH directories. */
@@ -265,14 +291,6 @@ struct link_map_private
/* List of object in order of the init and fini calls. */
struct link_map_private **l_initfini;
- /* List of the dependencies introduced through symbol binding. */
- struct link_map_reldeps
- {
- unsigned int act;
- struct link_map_private *list[];
- } *l_reldeps;
- unsigned int l_reldepsmax;
-
/* Nonzero if the DSO is used. */
unsigned int l_used;
@@ -323,11 +341,6 @@ struct link_map_private
/* Index of the module in the dtv array. */
size_t l_tls_modid;
- /* Number of thread_local objects constructed by this DSO. This is
- atomically accessed and modified and is not always protected by the load
- lock. See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c. */
- size_t l_tls_dtor_count;
-
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
@@ -133,7 +133,7 @@ __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
_dl_close_worker is protected by the dl_load_lock. The execution in
__call_tls_dtors does not really depend on this value beyond the fact that
it should be atomic, so Relaxed MO should be sufficient. */
- atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
+ atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
__rtld_lock_unlock_recursive (GL(dl_load_lock));
new->map = lm_cache;
@@ -159,7 +159,7 @@ __call_tls_dtors (void)
l_tls_dtor_count decrement. That way, we protect this access from a
potential DSO unload in _dl_close_worker, which happens when
l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */
- atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
+ atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
free (cur);
}
}
@@ -40,7 +40,7 @@ dl_isa_level_check (struct link_map_private *m, const char *program)
l = m->l_initfini[i];
/* Skip ISA level check if functions have been executed. */
- if (l->l_init_called)
+ if (l->l_rw->l_init_called)
continue;
#ifdef SHARED