@@ -3021,3 +3021,7 @@ $(objpfx)tst-dlclose-lazy.out: \
$(objpfx)tst-dlclose-lazy-mod1.so $(objpfx)tst-dlclose-lazy-mod2.so
$(objpfx)tst-decorate-maps: $(shared-thread-library)
+
+tst-decorate-maps-ENV = \
+ GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024
+tst-decorate-maps-ARGS = 8
@@ -26,6 +26,7 @@
#include <string.h>
#include <ldsodefs.h>
#include <malloc/malloc-internal.h>
+#include <setvmaname.h>
static void *alloc_ptr, *alloc_end, *alloc_last_block;
@@ -60,6 +61,7 @@ __minimal_malloc (size_t n)
MAP_ANON|MAP_PRIVATE, -1, 0);
if (page == MAP_FAILED)
return NULL;
+ __set_vma_name (page, nup, " glibc: loader malloc");
if (page != alloc_end)
alloc_ptr = page;
alloc_end = page + nup;
@@ -32,15 +32,22 @@
static pthread_barrier_t b;
+static int expected_n_arenas;
+
static void *
tf (void *closure)
{
+ void *p = xmalloc (1024);
+
/* Wait the thread startup, so thread stack is allocated. */
xpthread_barrier_wait (&b);
/* Wait the test to read the process mapiping. */
+
xpthread_barrier_wait (&b);
+ free (p);
+
return NULL;
}
@@ -48,6 +55,9 @@ struct proc_maps_t
{
int n_def_threads;
int n_user_threads;
+ int n_arenas;
+ int n_malloc_mmap;
+ int n_loader_malloc_mmap;
};
static struct proc_maps_t
@@ -69,6 +79,12 @@ read_proc_maps (void)
r.n_def_threads++;
else if (strstr (line, "[anon: glibc: pthread user stack:") != NULL)
r.n_user_threads++;
+ else if (strstr (line, "[anon: glibc: malloc arena]") != NULL)
+ r.n_arenas++;
+ else if (strstr (line, "[anon: glibc: malloc]") != NULL)
+ r.n_malloc_mmap++;
+ else if (strstr (line, "[anon: glibc: loader malloc]") != NULL)
+ r.n_loader_malloc_mmap++;
}
free (line);
xfclose (f);
@@ -90,6 +106,9 @@ do_test_threads (bool set_guard)
xpthread_barrier_init (&b, NULL, num_threads + 1);
+ /* Issue a large malloc to trigger a mmap call. */
+ void *p = xmalloc (256 * 1024);
+
pthread_t thr[num_threads];
{
int i = 0;
@@ -128,6 +147,10 @@ do_test_threads (bool set_guard)
struct proc_maps_t r = read_proc_maps ();
TEST_COMPARE (r.n_def_threads, num_def_threads);
TEST_COMPARE (r.n_user_threads, num_user_threads);
+ TEST_COMPARE (r.n_arenas, expected_n_arenas);
+ TEST_COMPARE (r.n_malloc_mmap, 1);
+ /* On some architectures the loader might use more than one page. */
+ TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
}
/* Let the threads finish. */
@@ -140,8 +163,22 @@ do_test_threads (bool set_guard)
struct proc_maps_t r = read_proc_maps ();
TEST_COMPARE (r.n_def_threads, 0);
TEST_COMPARE (r.n_user_threads, 0);
+ TEST_COMPARE (r.n_arenas, expected_n_arenas);
+ TEST_COMPARE (r.n_malloc_mmap, 1);
+ TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
}
+
+ free (p);
+}
+
+static void
+do_prepare (int argc, char *argv[])
+{
+ TEST_VERIFY_EXIT (argc == 2);
+ expected_n_arenas = strtol (argv[1], NULL, 10);
+ expected_n_arenas = expected_n_arenas - 1;
}
+#define PREPARE do_prepare
static int
do_test (void)
@@ -17,6 +17,7 @@
not, see <https://www.gnu.org/licenses/>. */
#include <stdbool.h>
+#include <setvmaname.h>
#define TUNABLE_NAMESPACE malloc
#include <elf/dl-tunables.h>
@@ -436,6 +437,9 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
return 0;
}
+ /* Only considere the actual usable range. */
+ __set_vma_name (p2, size, " glibc: malloc arena");
+
madvise_thp (p2, size);
h = (heap_info *) p2;
@@ -218,6 +218,7 @@
#include <sys/sysinfo.h>
#include <ldsodefs.h>
+#include <setvmaname.h>
#include <unistd.h>
#include <stdio.h> /* needed for malloc_stats */
@@ -2428,6 +2429,8 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
madvise_thp (mm, size);
#endif
+ __set_vma_name (mm, size, " glibc: malloc");
+
/*
The offset to the start of the mmapped region is stored in the prev_size
field of the chunk. This allows us to adjust returned start address to
@@ -2513,6 +2516,8 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
madvise_thp (mbrk, size);
#endif
+ __set_vma_name (mbrk, size, " glibc: malloc");
+
/* Record that we no longer have a contiguous sbrk region. After the first
time mmap is used as backup, we do not ever rely on contiguous space
since this could incorrectly bridge regions. */
@@ -699,6 +699,10 @@ tst-audit-threads-ENV = LD_AUDIT=$(objpfx)tst-audit-threads-mod1.so
tst-setuid1-static-ENV = \
LD_LIBRARY_PATH=$(ld-library-path):$(common-objpfx)elf:$(common-objpfx)nss
+tst-pthread-proc-maps-ENV = \
+ GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024
+tst-pthread-proc-maps-ARGS = 8
+
# The tests here better do not run in parallel.
ifeq ($(run-built-tests),yes)
ifneq ($(filter %tests,$(MAKECMDGOALS)),)
@@ -67,5 +67,8 @@ sysdep_routines += __mtag_tag_zero_region \
endif
ifeq ($(subdir),malloc)
-sysdep_malloc_debug_routines = __mtag_tag_zero_region __mtag_tag_region
+sysdep_malloc_debug_routines += \
+ __mtag_tag_zero_region \
+ __mtag_tag_region \
+ # sysdep_malloc_debug_routines
endif
@@ -53,6 +53,10 @@ CFLAGS-assert-perr.c += -DFATAL_PREPARE_INCLUDE='<fatal-prepare.h>'
endif
ifeq ($(subdir),malloc)
+sysdep_malloc_debug_routines += \
+ setvmaname \
+ # sysdep_malloc_debug_routines
+
CFLAGS-malloc.c += -DMORECORE_CLEARS=2
endif