diff mbox series

[v4] malloc: send freed small chunks to smallbin

Message ID YYU7Cu7CCrVtJDV7Es3bAL4ZTpH0Uxw2KOmSqKmLyTdrZrd8Tp5Z2vGmCdBZXGLuGwYD1x_hTT-8Buk_QcFZJinKpAPZeYrZiydWacvjj6E=@proton.me
State New
Headers show
Series [v4] malloc: send freed small chunks to smallbin | expand

Commit Message

k4lizen Oct. 31, 2024, 6:47 p.m. UTC
Large chunks get added to the unsorted bin since
sorting them takes time, for small chunks the
benefit of adding them to the unsorted bin is
non-existant, actually hurting performance.

Splitting and malloc_consolidate still add small
chunks to unsorted, but we can hint the compiler
that that is a relatively rare occurance.
Benchmarking shows this to be consistently good.

Signed-off-by: k4lizen <k4lizen@proton.me>
---
 malloc/malloc.c | 53 +++++++++++++++++++++++++++++++------------------
 1 file changed, 34 insertions(+), 19 deletions(-)

Comments

Carlos O'Donell Nov. 4, 2024, 2:30 p.m. UTC | #1
On 10/31/24 2:47 PM, k4lizen wrote:
> Large chunks get added to the unsorted bin since
> sorting them takes time, for small chunks the
> benefit of adding them to the unsorted bin is
> non-existant, actually hurting performance.
> 
> Splitting and malloc_consolidate still add small
> chunks to unsorted, but we can hint the compiler
> that that is a relatively rare occurance.
> Benchmarking shows this to be consistently good.
> 
> Signed-off-by: k4lizen <k4lizen@proton.me>

Please note that under DCO you must use your real name:
https://sourceware.org/glibc/wiki/Contribution%20checklist

It is one of the consequences of DCO, though pseudonyms can
be used with copyright attribution via the FSF.

May you please confirm your real name?
Florian Weimer Nov. 4, 2024, 2:54 p.m. UTC | #2
* Carlos O'Donell:

> On 10/31/24 2:47 PM, k4lizen wrote:
>> Large chunks get added to the unsorted bin since
>> sorting them takes time, for small chunks the
>> benefit of adding them to the unsorted bin is
>> non-existant, actually hurting performance.
>> 
>> Splitting and malloc_consolidate still add small
>> chunks to unsorted, but we can hint the compiler
>> that that is a relatively rare occurance.
>> Benchmarking shows this to be consistently good.
>> 
>> Signed-off-by: k4lizen <k4lizen@proton.me>
>
> Please note that under DCO you must use your real name:
> https://sourceware.org/glibc/wiki/Contribution%20checklist
>
> It is one of the consequences of DCO, though pseudonyms can
> be used with copyright attribution via the FSF.
>
> May you please confirm your real name?

Alternatively, you can start the copyright assignment process with the
FSF.  They will guard your identity.  The available forms are linked
from here: <https://sourceware.org/glibc/wiki/CopyrightFSForDisclaim>
If in doubt, use the request-assign.future form.

Thanks,
Florian
diff mbox series

Patch

diff --git a/malloc/malloc.c b/malloc/malloc.c
index bcb6e5b83c..60b537992c 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4156,9 +4156,9 @@  _int_malloc (mstate av, size_t bytes)
 #endif
             }
 
-          /* place chunk in bin */
-
-          if (in_smallbin_range (size))
+          /* Place chunk in bin.  Only malloc_consolidate() and splitting can put
+             small chunks into the unsorted bin. */
+          if (__glibc_unlikely (in_smallbin_range (size)))
             {
               victim_index = smallbin_index (size);
               bck = bin_at (av, victim_index);
@@ -4723,23 +4723,39 @@  _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
       } else
 	clear_inuse_bit_at_offset(nextchunk, 0);
 
-      /*
-	Place the chunk in unsorted chunk list. Chunks are
-	not placed into regular bins until after they have
-	been given one chance to be used in malloc.
-      */
+      mchunkptr bck, fwd;
+
+      if (!in_smallbin_range (size))
+        {
+          /* Place large chunks in unsorted chunk list.  Large chunks are
+             not placed into regular bins until after they have
+             been given one chance to be used in malloc.
+
+             This branch is first in the if-statement to help branch
+             prediction on consecutive adjacent frees. */
+          bck = unsorted_chunks (av);
+          fwd = bck->fd;
+          if (__glibc_unlikely (fwd->bk != bck))
+            malloc_printerr ("free(): corrupted unsorted chunks");
+          p->fd_nextsize = NULL;
+          p->bk_nextsize = NULL;
+        }
+      else
+        {
+          /* Place small chunks directly in their smallbin, so they
+             don't pollute the unsorted bin. */
+          int chunk_index = smallbin_index (size);
+          bck = bin_at (av, chunk_index);
+          fwd = bck->fd;
+
+          if (__glibc_unlikely (fwd->bk != bck))
+            malloc_printerr ("free(): chunks in smallbin corrupted");
+
+          mark_bin (av, chunk_index);
+        }
 
-      mchunkptr bck = unsorted_chunks (av);
-      mchunkptr fwd = bck->fd;
-      if (__glibc_unlikely (fwd->bk != bck))
-	malloc_printerr ("free(): corrupted unsorted chunks");
-      p->fd = fwd;
       p->bk = bck;
-      if (!in_smallbin_range(size))
-	{
-	  p->fd_nextsize = NULL;
-	  p->bk_nextsize = NULL;
-	}
+      p->fd = fwd;
       bck->fd = p;
       fwd->bk = p;
 
@@ -4748,7 +4764,6 @@  _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
 
       check_free_chunk(av, p);
     }
-
   else
     {
       /* If the chunk borders the current high end of memory,