diff mbox series

middle-end/114563 - improve release_pages

Message ID 20240731140250.78E233858288@sourceware.org
State New
Headers show
Series middle-end/114563 - improve release_pages | expand

Commit Message

Richard Biener July 31, 2024, 2:02 p.m. UTC
The following improves release_pages when using the madvise path
to sort the freelist to get more page entries contiguous and possibly
release them.  This populates the unused prev pointer so the reclaim
can then easily unlink from the freelist without re-ordering it.
The paths not having madvise do not keep the memory allocated, so
I left them untouched.

Re-bootstrap and regtest running on x86_64-unknown-linux-gnu.

I've CCed people messing with release_pages;  This doesn't really
address PR114563 but I thought I post this patch anyway - the
actual issue we run into for the PR is the linear search of
G.free_pages when that list becomes large but a requested allocation
cannot be served from it.

	PR middle-end/114563
	* ggc-page.cc (page_sort): New qsort comparator.
	(release_pages): Sort the free_pages list entries after their
	memory block virtual address to improve contiguous memory
	chunk release.
---
 gcc/ggc-page.cc | 68 ++++++++++++++++++++++++++++++++++---------------
 1 file changed, 48 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/gcc/ggc-page.cc b/gcc/ggc-page.cc
index 4245f843a29..c9d8a8cd8e9 100644
--- a/gcc/ggc-page.cc
+++ b/gcc/ggc-page.cc
@@ -1010,6 +1010,19 @@  free_page (page_entry *entry)
   G.free_pages = entry;
 }
 
+/* Comparison function to sort page_entry after virtual address.  */
+
+static int
+page_sort (const void *pa_, const void *pb_)
+{
+  const page_entry *pa = *(const page_entry * const *)pa_;
+  const page_entry *pb = *(const page_entry * const *)pb_;
+  if ((uintptr_t)pa->page < (uintptr_t)pb->page)
+    return -1;
+  else
+    return 1;
+}
+
 /* Release the free page cache to the system.  */
 
 static void
@@ -1022,7 +1035,7 @@  release_pages (void)
   char *start;
   size_t len;
   size_t mapped_len;
-  page_entry *next, *prev, *newprev;
+  page_entry *prev;
   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
 
   /* First free larger continuous areas to the OS.
@@ -1031,41 +1044,56 @@  release_pages (void)
      This does not always work because the free_pages list is only
      approximately sorted. */
 
-  p = G.free_pages;
+  auto_vec<page_entry *> pages;
   prev = NULL;
+  p = G.free_pages;
   while (p)
     {
+      p->prev = prev;
+      pages.safe_push (p);
+      prev = p;
+      p = p->next;
+    }
+  pages.qsort (page_sort);
+
+  for (unsigned i = 0; i < pages.length ();)
+    {
+      p = pages[i];
       start = p->page;
-      start_p = p;
+      unsigned start_i = i;
       len = 0;
       mapped_len = 0;
-      newprev = prev;
-      while (p && p->page == start + len)
+      while (i < pages.length () && pages[i]->page == start + len)
         {
+	  p = pages[i];
           len += p->bytes;
 	  if (!p->discarded)
-	      mapped_len += p->bytes;
-	  newprev = p;
-          p = p->next;
+	    mapped_len += p->bytes;
+	  ++i;
         }
       if (len >= free_unit)
         {
-          while (start_p != p)
-            {
-              next = start_p->next;
-              free (start_p);
-              start_p = next;
-            }
+	  for (unsigned j = start_i; j != i; ++j)
+	    {
+	      p = pages[j];
+	      if (!p->prev)
+		{
+		  G.free_pages = p->next;
+		  if (p->next)
+		    p->next->prev = NULL;
+		}
+	      else
+		{
+		  p->prev->next = p->next;
+		  if (p->next)
+		    p->next->prev = p->prev;
+		}
+	      free (pages[j]);
+	    }
           munmap (start, len);
-	  if (prev)
-	    prev->next = p;
-          else
-            G.free_pages = p;
           G.bytes_mapped -= mapped_len;
 	  n1 += len;
-	  continue;
         }
-      prev = newprev;
    }
 
   /* Now give back the fragmented pages to the OS, but keep the address