Message ID | 20231028195559.390407-5-adhemerval.zanella@linaro.org |
---|---|
State | New |
Headers | show |
Series | Add a tunable to decorate anonymous memory maps | expand |
LGTM Reviewed-by: DJ Delorie <dj@redhat.com> Adhemerval Zanella <adhemerval.zanella@linaro.org> writes: > Add anonymous mmap annotations on loader malloc, malloc when it > allocates memory with mmap, and on malloc arena. The /proc/self/maps > will now print: > > [anon: glibc: malloc arena] > [anon: glibc: malloc] > [anon: glibc: loader malloc] > > On arena allocation, glibc annotates only the read/write mapping. > > Checked on x86_64-linux-gnu and aarch64-linux-gnu. > --- > elf/Makefile | 4 ++++ > elf/dl-minimal-malloc.c | 2 ++ > elf/tst-decorate-maps.c | 37 +++++++++++++++++++++++++++++++++++++ > malloc/arena.c | 4 ++++ > malloc/malloc.c | 5 +++++ > nptl/Makefile | 4 ++++ > 6 files changed, 56 insertions(+) > diff --git a/elf/Makefile b/elf/Makefile > + > +tst-decorate-maps-ENV = \ > + GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024 > +tst-decorate-maps-ARGS = 8 Ok. > diff --git a/elf/dl-minimal-malloc.c b/elf/dl-minimal-malloc.c > +#include <setvmaname.h> Ok. > MAP_ANON|MAP_PRIVATE, -1, 0); > if (page == MAP_FAILED) > return NULL; > + __set_vma_name (page, nup, " glibc: loader malloc"); > if (page != alloc_end) > alloc_ptr = page; > alloc_end = page + nup; Ok. > diff --git a/elf/tst-decorate-maps.c b/elf/tst-decorate-maps.c > > +static int expected_n_arenas; > + > static void * > tf (void *closure) > { > + void *p = xmalloc (1024); > + > /* Wait the thread startup, so thread stack is allocated. */ > xpthread_barrier_wait (&b); > > /* Wait the test to read the process mapiping. */ > + > xpthread_barrier_wait (&b); > > + free (p); > + > return NULL; > } Ok. Enough to create an arena, not enough to mmap the chunk. > @@ -48,6 +55,9 @@ struct proc_maps_t > { > int n_def_threads; > int n_user_threads; > + int n_arenas; > + int n_malloc_mmap; > + int n_loader_malloc_mmap; > }; Ok. > @@ -69,6 +79,12 @@ read_proc_maps (void) > r.n_def_threads++; > else if (strstr (line, "[anon: glibc: pthread user stack:") != NULL) > r.n_user_threads++; > + else if (strstr (line, "[anon: glibc: malloc arena]") != NULL) > + r.n_arenas++; > + else if (strstr (line, "[anon: glibc: malloc]") != NULL) > + r.n_malloc_mmap++; > + else if (strstr (line, "[anon: glibc: loader malloc]") != NULL) > + r.n_loader_malloc_mmap++; > } > free (line); > xfclose (f); Ok. > @@ -90,6 +106,9 @@ do_test_threads (bool set_guard) > > xpthread_barrier_init (&b, NULL, num_threads + 1); > > + /* Issue a large malloc to trigger a mmap call. */ > + void *p = xmalloc (256 * 1024); > + > pthread_t thr[num_threads]; > { > int i = 0; Ok. > @@ -128,6 +147,10 @@ do_test_threads (bool set_guard) > struct proc_maps_t r = read_proc_maps (); > TEST_COMPARE (r.n_def_threads, num_def_threads); > TEST_COMPARE (r.n_user_threads, num_user_threads); > + TEST_COMPARE (r.n_arenas, expected_n_arenas); > + TEST_COMPARE (r.n_malloc_mmap, 1); > + /* On some architectures the loader might use more than one page. */ > + TEST_VERIFY (r.n_loader_malloc_mmap >= 1); > } Ok. > /* Let the threads finish. */ > @@ -140,8 +163,22 @@ do_test_threads (bool set_guard) > struct proc_maps_t r = read_proc_maps (); > TEST_COMPARE (r.n_def_threads, 0); > TEST_COMPARE (r.n_user_threads, 0); > + TEST_COMPARE (r.n_arenas, expected_n_arenas); > + TEST_COMPARE (r.n_malloc_mmap, 1); > + TEST_VERIFY (r.n_loader_malloc_mmap >= 1); > } > + > + free (p); > +} Ok. > +static void > +do_prepare (int argc, char *argv[]) > +{ > + TEST_VERIFY_EXIT (argc == 2); > + expected_n_arenas = strtol (argv[1], NULL, 10); > + expected_n_arenas = expected_n_arenas - 1; > } > +#define PREPARE do_prepare Ok. > diff --git a/malloc/arena.c b/malloc/arena.c > index 6f03955ff2..d1e214ac2e 100644 > --- a/malloc/arena.c > +++ b/malloc/arena.c > @@ -17,6 +17,7 @@ > not, see <https://www.gnu.org/licenses/>. */ > > #include <stdbool.h> > +#include <setvmaname.h> Ok. > #define TUNABLE_NAMESPACE malloc > #include <elf/dl-tunables.h> > @@ -436,6 +437,9 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize, > return 0; > } > > + /* Only considere the actual usable range. */ > + __set_vma_name (p2, size, " glibc: malloc arena"); > + > madvise_thp (p2, size); > Ok. > diff --git a/malloc/malloc.c b/malloc/malloc.c > #include <sys/sysinfo.h> > > #include <ldsodefs.h> > +#include <setvmaname.h> Ok. > @@ -2428,6 +2429,8 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) > madvise_thp (mm, size); > #endif > > + __set_vma_name (mm, size, " glibc: malloc"); > + Ok. > @@ -2513,6 +2516,8 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, > madvise_thp (mbrk, size); > #endif > > + __set_vma_name (mbrk, size, " glibc: malloc"); > + Ok. > diff --git a/nptl/Makefile b/nptl/Makefile > +tst-pthread-proc-maps-ENV = \ > + GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024 > +tst-pthread-proc-maps-ARGS = 8 > + Ok.
diff --git a/elf/Makefile b/elf/Makefile index a82590703c..c3cf63a443 100644 --- a/elf/Makefile +++ b/elf/Makefile @@ -3021,3 +3021,7 @@ $(objpfx)tst-dlclose-lazy.out: \ $(objpfx)tst-dlclose-lazy-mod1.so $(objpfx)tst-dlclose-lazy-mod2.so $(objpfx)tst-decorate-maps: $(shared-thread-library) + +tst-decorate-maps-ENV = \ + GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024 +tst-decorate-maps-ARGS = 8 diff --git a/elf/dl-minimal-malloc.c b/elf/dl-minimal-malloc.c index 27549645d0..da36986269 100644 --- a/elf/dl-minimal-malloc.c +++ b/elf/dl-minimal-malloc.c @@ -26,6 +26,7 @@ #include <string.h> #include <ldsodefs.h> #include <malloc/malloc-internal.h> +#include <setvmaname.h> static void *alloc_ptr, *alloc_end, *alloc_last_block; @@ -60,6 +61,7 @@ __minimal_malloc (size_t n) MAP_ANON|MAP_PRIVATE, -1, 0); if (page == MAP_FAILED) return NULL; + __set_vma_name (page, nup, " glibc: loader malloc"); if (page != alloc_end) alloc_ptr = page; alloc_end = page + nup; diff --git a/elf/tst-decorate-maps.c b/elf/tst-decorate-maps.c index bbb7972094..bf508489c4 100644 --- a/elf/tst-decorate-maps.c +++ b/elf/tst-decorate-maps.c @@ -32,15 +32,22 @@ static pthread_barrier_t b; +static int expected_n_arenas; + static void * tf (void *closure) { + void *p = xmalloc (1024); + /* Wait the thread startup, so thread stack is allocated. */ xpthread_barrier_wait (&b); /* Wait the test to read the process mapiping. */ + xpthread_barrier_wait (&b); + free (p); + return NULL; } @@ -48,6 +55,9 @@ struct proc_maps_t { int n_def_threads; int n_user_threads; + int n_arenas; + int n_malloc_mmap; + int n_loader_malloc_mmap; }; static struct proc_maps_t @@ -69,6 +79,12 @@ read_proc_maps (void) r.n_def_threads++; else if (strstr (line, "[anon: glibc: pthread user stack:") != NULL) r.n_user_threads++; + else if (strstr (line, "[anon: glibc: malloc arena]") != NULL) + r.n_arenas++; + else if (strstr (line, "[anon: glibc: malloc]") != NULL) + r.n_malloc_mmap++; + else if (strstr (line, "[anon: glibc: loader malloc]") != NULL) + r.n_loader_malloc_mmap++; } free (line); xfclose (f); @@ -90,6 +106,9 @@ do_test_threads (bool set_guard) xpthread_barrier_init (&b, NULL, num_threads + 1); + /* Issue a large malloc to trigger a mmap call. */ + void *p = xmalloc (256 * 1024); + pthread_t thr[num_threads]; { int i = 0; @@ -128,6 +147,10 @@ do_test_threads (bool set_guard) struct proc_maps_t r = read_proc_maps (); TEST_COMPARE (r.n_def_threads, num_def_threads); TEST_COMPARE (r.n_user_threads, num_user_threads); + TEST_COMPARE (r.n_arenas, expected_n_arenas); + TEST_COMPARE (r.n_malloc_mmap, 1); + /* On some architectures the loader might use more than one page. */ + TEST_VERIFY (r.n_loader_malloc_mmap >= 1); } /* Let the threads finish. */ @@ -140,8 +163,22 @@ do_test_threads (bool set_guard) struct proc_maps_t r = read_proc_maps (); TEST_COMPARE (r.n_def_threads, 0); TEST_COMPARE (r.n_user_threads, 0); + TEST_COMPARE (r.n_arenas, expected_n_arenas); + TEST_COMPARE (r.n_malloc_mmap, 1); + TEST_VERIFY (r.n_loader_malloc_mmap >= 1); } + + free (p); +} + +static void +do_prepare (int argc, char *argv[]) +{ + TEST_VERIFY_EXIT (argc == 2); + expected_n_arenas = strtol (argv[1], NULL, 10); + expected_n_arenas = expected_n_arenas - 1; } +#define PREPARE do_prepare static int do_test (void) diff --git a/malloc/arena.c b/malloc/arena.c index 6f03955ff2..d1e214ac2e 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -17,6 +17,7 @@ not, see <https://www.gnu.org/licenses/>. */ #include <stdbool.h> +#include <setvmaname.h> #define TUNABLE_NAMESPACE malloc #include <elf/dl-tunables.h> @@ -436,6 +437,9 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize, return 0; } + /* Only considere the actual usable range. */ + __set_vma_name (p2, size, " glibc: malloc arena"); + madvise_thp (p2, size); h = (heap_info *) p2; diff --git a/malloc/malloc.c b/malloc/malloc.c index d0bbbf3710..78a531bc7a 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -218,6 +218,7 @@ #include <sys/sysinfo.h> #include <ldsodefs.h> +#include <setvmaname.h> #include <unistd.h> #include <stdio.h> /* needed for malloc_stats */ @@ -2428,6 +2429,8 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av) madvise_thp (mm, size); #endif + __set_vma_name (mm, size, " glibc: malloc"); + /* The offset to the start of the mmapped region is stored in the prev_size field of the chunk. This allows us to adjust returned start address to @@ -2513,6 +2516,8 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, madvise_thp (mbrk, size); #endif + __set_vma_name (mbrk, size, " glibc: malloc"); + /* Record that we no longer have a contiguous sbrk region. After the first time mmap is used as backup, we do not ever rely on contiguous space since this could incorrectly bridge regions. */ diff --git a/nptl/Makefile b/nptl/Makefile index ffa5722e48..d969419af7 100644 --- a/nptl/Makefile +++ b/nptl/Makefile @@ -699,6 +699,10 @@ tst-audit-threads-ENV = LD_AUDIT=$(objpfx)tst-audit-threads-mod1.so tst-setuid1-static-ENV = \ LD_LIBRARY_PATH=$(ld-library-path):$(common-objpfx)elf:$(common-objpfx)nss +tst-pthread-proc-maps-ENV = \ + GLIBC_TUNABLES=glibc.malloc.arena_max=8:glibc.malloc.mmap_threshold=1024 +tst-pthread-proc-maps-ARGS = 8 + # The tests here better do not run in parallel. ifeq ($(run-built-tests),yes) ifneq ($(filter %tests,$(MAKECMDGOALS)),)