https://gcc.gnu.org/g:dc887145c0e9ae44e939dab3e0198346fe660793

commit r16-950-gdc887145c0e9ae44e939dab3e0198346fe660793
Author: Jakub Jelinek <ja...@redhat.com>
Date:   Thu May 29 09:58:30 2025 +0200

    ggc-page: Fix up build on non-USING_MMAP hosts [PR120464]
    
    The r16-852 "Use optimize free lists for alloc_pages" change broke build
    on non-USING_MMAP hosts.
    I don't have access to one, so I've just added #undef USING_MMAP
    before first use of that macro after the definitions.
    
    There were 2 problems.  One was one missed G.free_pages
    to free_list->free_pages replacement in #ifdef USING_MALLOC_PAGE_GROUPS
    guarded code which resulted in obvious compile error.
    
    Once fixed, there was an ICE during self-test and without self-test pretty
    much on any garbage collection.
    The problem is that the patch moved all of release_pages into new
    do_release_pages and runs it for each freelist from the new release_pages
    wrapper.  The #ifdef USING_MALLOC_PAGE_GROUPS code had two loops, one
    which walked the entries in the freelist and freed the ones which had
    unused group there and another which walked all the groups (regardless of
    which freelist they belong to) and freed the unused ones.
    With the change the first call to do_release_pages would free freelist
    entries from the first freelist with unused groups, then free all unused
    groups and then second and following would access already freed groups,
    crashing there, and then walk again all groups looking for unused ones (but
    there are guaranteed to be none).
    
    So, this patch fixes it by moving the unused group freeing to the caller,
    release_pages after all freelists are freed, and while at it, moves there
    the statistics printout as well, we don't need to print separate info
    for each of the freelist, previously we were emitting just one.
    
    2025-05-29  Jakub Jelinek  <ja...@redhat.com>
    
            PR bootstrap/120464
            * ggc-page.cc (struct ggc_globals): Fix up comment formatting.
            (find_free_list): Likewise.
            (alloc_page): For defined(USING_MALLOC_PAGE_GROUPS) use
            free_list->free_pages instead of G.free_pages.
            (do_release_pages): Add n1 and n2 arguments, make them used.
            Move defined(USING_MALLOC_PAGE_GROUPS) page group freeing to
            release_pages and dumping of statistics as well.  Formatting fixes.
            (release_pages): Adjust do_release_pages caller, move here
            defined(USING_MALLOC_PAGE_GROUPS) page group freeing and dumping
            of statistics.
            (ggc_handle_finalizers): Fix up comment formatting and typo.

Diff:
---
 gcc/ggc-page.cc | 50 +++++++++++++++++++++++++++-----------------------
 1 file changed, 27 insertions(+), 23 deletions(-)

diff --git a/gcc/ggc-page.cc b/gcc/ggc-page.cc
index 0f674c0d6d7c..ebd11978da35 100644
--- a/gcc/ggc-page.cc
+++ b/gcc/ggc-page.cc
@@ -426,7 +426,7 @@ static struct ggc_globals
   int dev_zero_fd;
 #endif
 
-  /* A cache of free system pages. Entry 0 is fallback.  */
+  /* A cache of free system pages.  Entry 0 is fallback.  */
   struct free_list free_lists[num_free_list];
 
 #ifdef USING_MALLOC_PAGE_GROUPS
@@ -787,7 +787,7 @@ find_free_list (size_t entry_size)
          return &G.free_lists[i];
        }
     }
-  /* Fallback. Does this happen?  */
+  /* Fallback.  Does this happen?  */
   if (GATHER_STATISTICS)
     G.stats.fallback++;
   return &G.free_lists[0];
@@ -955,7 +955,7 @@ alloc_page (unsigned order)
       /* If we allocated multiple pages, put the rest on the free list.  */
       if (multiple_pages)
        {
-         struct page_entry *e, *f = G.free_pages;
+         struct page_entry *e, *f = free_list->free_pages;
          for (a = enda - G.pagesize; a != page; a -= G.pagesize)
            {
              e = XCNEWVAR (struct page_entry, page_entry_size);
@@ -1073,10 +1073,10 @@ free_page (page_entry *entry)
 /* Release the free page cache for FREE_LIST to the system.  */
 
 static void
-do_release_pages (struct free_list *free_list)
+do_release_pages (struct free_list *free_list, size_t &n1, size_t &n2)
 {
-  size_t n1 = 0;
-  size_t n2 = 0;
+  (void) n1;
+  (void) n2;
 #ifdef USING_MADVISE
   page_entry *p, *start_p;
   char *start;
@@ -1089,7 +1089,7 @@ do_release_pages (struct free_list *free_list)
      This allows other allocators to grab these areas if needed.
      This is only done on larger chunks to avoid fragmentation.
      This does not always work because the free_pages list is only
-     approximately sorted. */
+     approximately sorted.  */
 
   p = free_list->free_pages;
   prev = NULL;
@@ -1104,7 +1104,7 @@ do_release_pages (struct free_list *free_list)
         {
           len += p->bytes;
          if (!p->discarded)
-             mapped_len += p->bytes;
+           mapped_len += p->bytes;
          newprev = p;
           p = p->next;
         }
@@ -1129,7 +1129,7 @@ do_release_pages (struct free_list *free_list)
    }
 
   /* Now give back the fragmented pages to the OS, but keep the address
-     space to reuse it next time. */
+     space to reuse it next time.  */
 
   for (p = free_list->free_pages; p; )
     {
@@ -1149,10 +1149,10 @@ do_release_pages (struct free_list *free_list)
         }
       /* Give the page back to the kernel, but don't free the mapping.
          This avoids fragmentation in the virtual memory map of the
-        process. Next time we can reuse it by just touching it. */
+        process.  Next time we can reuse it by just touching it.  */
       madvise (start, len, MADV_DONTNEED);
       /* Don't count those pages as mapped to not touch the garbage collector
-         unnecessarily. */
+        unnecessarily.  */
       G.bytes_mapped -= len;
       n2 += len;
       while (start_p != p)
@@ -1195,7 +1195,6 @@ do_release_pages (struct free_list *free_list)
 #endif
 #ifdef USING_MALLOC_PAGE_GROUPS
   page_entry **pp, *p;
-  page_group **gp, *g;
 
   /* Remove all pages from free page groups from the list.  */
   pp = &free_list->free_pages;
@@ -1207,6 +1206,20 @@ do_release_pages (struct free_list *free_list)
       }
     else
       pp = &p->next;
+#endif
+}
+
+/* Release the free page cache to the system.  */
+
+static void
+release_pages ()
+{
+  size_t n1 = 0;
+  size_t n2 = 0;
+  for (int i = 0; i < num_free_list; i++)
+    do_release_pages (&G.free_lists[i], n1, n2);
+#ifdef USING_MALLOC_PAGE_GROUPS
+  page_group **gp, *g;
 
   /* Remove all free page groups, and release the storage.  */
   gp = &G.page_groups;
@@ -1232,15 +1245,6 @@ do_release_pages (struct free_list *free_list)
     }
 }
 
-/* Release the free page cache to the system.  */
-
-static void
-release_pages ()
-{
-  for (int i = 0; i < num_free_list; i++)
-    do_release_pages (&G.free_lists[i]);
-}
-
 /* This table provides a fast way to determine ceil(log_2(size)) for
    allocation requests.  The minimum allocation size is eight bytes.  */
 #define NUM_SIZE_LOOKUP 512
@@ -2008,9 +2012,9 @@ clear_marks (void)
     }
 }
 
-/* Check if any blocks with a registered finalizer have become unmarked. If so
+/* Check if any blocks with a registered finalizer have become unmarked.  If so
    run the finalizer and unregister it because the block is about to be freed.
-   Note that no garantee is made about what order finalizers will run in so
+   Note that no guarantee is made about what order finalizers will run in so
    touching other objects in gc memory is extremely unwise.  */
 
 static void

Reply via email to