A note on why this chance is coming.

malloc.c (as it is today), does mprotects back and forth between RW and
R, to protect an internal object.  This object is in bss, it is not
allocated with mmap.  With the upcoming mimmutable change, the bss will
become immutable by default, at program load time.  mimmutable even prevents
changing a RW object to R.

So I have added an elf section which indicates non-immutable sections in
the address space, and placed this object there.  That solves the problem
at hand.

However, this is leading otto to look at the life-cycle of this object,
to change when it's protection is changed, and consider that the code can
make the object immutable by itself, at the right time.

This diff should be tested in isolation from the immutable changes.
immutable changes will start coming into the tree soon...


Otto Moerbeek <o...@drijf.net> wrote:

> Hi,
> 
> Rearrange things so that we do not have to flip protection of r/o
> pages back and forth when swicthing from single-threaded to
> multi-threaded. Also saves work in many cases by not initing pools
> until they are used: the pool used for MAP_CONCEAL pages is not used
> by very many processes and if you have just a few threads only a few
> pools will be set up.
> 
> The actual mimmutable calls are to be added later. I marked the places where
> they should end up. 
> 
> Please test, 
> 
>       -Otto
> 
> 
> Index: stdlib/malloc.c
> ===================================================================
> RCS file: /cvs/src/lib/libc/stdlib/malloc.c,v
> retrieving revision 1.274
> diff -u -p -r1.274 malloc.c
> --- stdlib/malloc.c   30 Jun 2022 17:15:48 -0000      1.274
> +++ stdlib/malloc.c   29 Sep 2022 09:42:57 -0000
> @@ -142,6 +142,7 @@ struct dir_info {
>       int malloc_junk;                /* junk fill? */
>       int mmap_flag;                  /* extra flag for mmap */
>       int mutex;
> +     int malloc_mt;                  /* multi-threaded mode? */
>                                       /* lists of free chunk info structs */
>       struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
>                                       /* lists of chunks with free slots */
> @@ -181,8 +182,6 @@ struct dir_info {
>  #endif /* MALLOC_STATS */
>       u_int32_t canary2;
>  };
> -#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
> -                     ~MALLOC_PAGEMASK)
>  
>  static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
>  
> @@ -208,7 +207,6 @@ struct malloc_readonly {
>                                       /* Main bookkeeping information */
>       struct dir_info *malloc_pool[_MALLOC_MUTEXES];
>       u_int   malloc_mutexes;         /* how much in actual use? */
> -     int     malloc_mt;              /* multi-threaded mode? */
>       int     malloc_freecheck;       /* Extensive double free check */
>       int     malloc_freeunmap;       /* mprotect free pages PROT_NONE? */
>       int     def_malloc_junk;        /* junk fill? */
> @@ -257,7 +255,7 @@ static void malloc_exit(void);
>  static inline void
>  _MALLOC_LEAVE(struct dir_info *d)
>  {
> -     if (mopts.malloc_mt) {
> +     if (d->malloc_mt) {
>               d->active--;
>               _MALLOC_UNLOCK(d->mutex);
>       }
> @@ -266,7 +264,7 @@ _MALLOC_LEAVE(struct dir_info *d)
>  static inline void
>  _MALLOC_ENTER(struct dir_info *d)
>  {
> -     if (mopts.malloc_mt) {
> +     if (d->malloc_mt) {
>               _MALLOC_LOCK(d->mutex);
>               d->active++;
>       }
> @@ -291,7 +289,7 @@ hash(void *p)
>  static inline struct dir_info *
>  getpool(void)
>  {
> -     if (!mopts.malloc_mt)
> +     if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
>               return mopts.malloc_pool[1];
>       else    /* first one reserved for special pool */
>               return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
> @@ -496,46 +494,22 @@ omalloc_init(void)
>  }
>  
>  static void
> -omalloc_poolinit(struct dir_info **dp, int mmap_flag)
> +omalloc_poolinit(struct dir_info *d, int mmap_flag)
>  {
> -     char *p;
> -     size_t d_avail, regioninfo_size;
> -     struct dir_info *d;
>       int i, j;
>  
> -     /*
> -      * Allocate dir_info with a guard page on either side. Also
> -      * randomise offset inside the page at which the dir_info
> -      * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
> -      */
> -     if ((p = MMAPNONE(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2), mmap_flag)) ==
> -         MAP_FAILED)
> -             wrterror(NULL, "malloc init mmap failed");
> -     mprotect(p + MALLOC_PAGESIZE, DIR_INFO_RSZ, PROT_READ | PROT_WRITE);
> -     d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
> -     d = (struct dir_info *)(p + MALLOC_PAGESIZE +
> -         (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
> -
> -     rbytes_init(d);
> -     d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
> -     regioninfo_size = d->regions_total * sizeof(struct region_info);
> -     d->r = MMAP(regioninfo_size, mmap_flag);
> -     if (d->r == MAP_FAILED) {
> -             d->regions_total = 0;
> -             wrterror(NULL, "malloc init mmap failed");
> -     }
> +     d->r = NULL;
> +     d->rbytesused = sizeof(d->rbytes);
> +     d->regions_free = d->regions_total = 0;
>       for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
>               LIST_INIT(&d->chunk_info_list[i]);
>               for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
>                       LIST_INIT(&d->chunk_dir[i][j]);
>       }
> -     STATS_ADD(d->malloc_used, regioninfo_size + 3 * MALLOC_PAGESIZE);
>       d->mmap_flag = mmap_flag;
>       d->malloc_junk = mopts.def_malloc_junk;
>       d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
>       d->canary2 = ~d->canary1;
> -
> -     *dp = d;
>  }
>  
>  static int
> @@ -550,7 +524,8 @@ omalloc_grow(struct dir_info *d)
>       if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
>               return 1;
>  
> -     newtotal = d->regions_total * 2;
> +     newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
> +         d->regions_total * 2;
>       newsize = PAGEROUND(newtotal * sizeof(struct region_info));
>       mask = newtotal - 1;
>  
> @@ -575,10 +550,12 @@ omalloc_grow(struct dir_info *d)
>               }
>       }
>  
> -     oldpsz = PAGEROUND(d->regions_total * sizeof(struct region_info));
> -     /* clear to avoid meta info ending up in the cache */
> -     unmap(d, d->r, oldpsz, oldpsz);
> -     d->regions_free += d->regions_total;
> +     if (d->regions_total > 0) {
> +             oldpsz = PAGEROUND(d->regions_total * sizeof(struct 
> region_info));
> +             /* clear to avoid meta info ending up in the cache */
> +             unmap(d, d->r, oldpsz, oldpsz);
> +     }
> +     d->regions_free += newtotal - d->regions_total;
>       d->regions_total = newtotal;
>       d->r = p;
>       return 0;
> @@ -595,7 +572,7 @@ insert(struct dir_info *d, void *p, size
>       size_t mask;
>       void *q;
>  
> -     if (d->regions_free * 4 < d->regions_total) {
> +     if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
>               if (omalloc_grow(d))
>                       return 1;
>       }
> @@ -627,6 +604,8 @@ find(struct dir_info *d, void *p)
>       if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
>           d->canary1 != ~d->canary2)
>               wrterror(d, "internal struct corrupt");
> +     if (d->r == NULL)
> +             return NULL;
>       p = MASK_POINTER(p);
>       index = hash(p) & mask;
>       r = d->r[index].p;
> @@ -1299,18 +1278,47 @@ _malloc_init(int from_rthreads)
>               _MALLOC_UNLOCK(1);
>               return;
>       }
> -     if (!mopts.malloc_canary)
> +     if (!mopts.malloc_canary) {
> +             char *p;
> +             size_t sz, d_avail;
> +
>               omalloc_init();
> +             /*
> +              * Allocate dir_infos with a guard page on either side. Also
> +              * randomise offset inside the page at which the dir_infos
> +              * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
> +              */
> +             sz = mopts.malloc_mutexes * sizeof(*d) + 2 * MALLOC_PAGESIZE;
> +             if ((p = MMAPNONE(sz, 0)) == MAP_FAILED)
> +                     wrterror(NULL, "malloc_init mmap failed");
> +             if (mprotect(p + MALLOC_PAGESIZE, mopts.malloc_mutexes * 
> sizeof(*d),
> +                 PROT_READ | PROT_WRITE))
> +                     wrterror(NULL, "malloc_init mprotect failed");
> +             d_avail = (((mopts.malloc_mutexes * sizeof(*d) + 
> MALLOC_PAGEMASK) &
> +                 ~MALLOC_PAGEMASK) - (mopts.malloc_mutexes * sizeof(*d))) >>
> +                 MALLOC_MINSHIFT;
> +             d = (struct dir_info *)(p + MALLOC_PAGESIZE +
> +                 (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
> +             STATS_ADD(d[1].malloc_used, sz);
> +             for (i = 0; i < mopts.malloc_mutexes; i++)
> +                     mopts.malloc_pool[i] = &d[i];
> +             mopts.internal_funcs = 1;
> +             if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
> +                     if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
> +                         PROT_READ))
> +                             wrterror(NULL, "malloc_init mprotect failed");
> +             }
> +             /* mimmutable(p, sz) */
> +     }
>  
>       nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
> -     if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
> -             mprotect(&malloc_readonly, sizeof(malloc_readonly),
> -                 PROT_READ | PROT_WRITE);
>       for (i = 0; i < nmutexes; i++) {
> -             if (mopts.malloc_pool[i])
> +             d = mopts.malloc_pool[i];
> +             d->malloc_mt = from_rthreads;
> +             if (d->canary1 == ~d->canary2)
>                       continue;
>               if (i == 0) {
> -                     omalloc_poolinit(&d, MAP_CONCEAL);
> +                     omalloc_poolinit(d, MAP_CONCEAL);
>                       d->malloc_junk = 2;
>                       d->bigcache_size = 0;
>                       for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
> @@ -1318,7 +1326,7 @@ _malloc_init(int from_rthreads)
>               } else {
>                       size_t sz = 0;
>  
> -                     omalloc_poolinit(&d, 0);
> +                     omalloc_poolinit(d, 0);
>                       d->malloc_junk = mopts.def_malloc_junk;
>                       d->bigcache_size = mopts.def_maxcache;
>                       for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
> @@ -1332,6 +1340,7 @@ _malloc_init(int from_rthreads)
>                               if (p == MAP_FAILED)
>                                       wrterror(NULL,
>                                           "malloc init mmap failed");
> +                             /* mimmutable (p, sz); */
>                               for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
>                                       d->smallcache[j].pages = p;
>                                       p = (char *)p + d->smallcache[j].max *
> @@ -1341,20 +1350,8 @@ _malloc_init(int from_rthreads)
>                       }
>               }
>               d->mutex = i;
> -             mopts.malloc_pool[i] = d;
>       }
>  
> -     if (from_rthreads)
> -             mopts.malloc_mt = 1;
> -     else
> -             mopts.internal_funcs = 1;
> -
> -     /*
> -      * Options have been set and will never be reset.
> -      * Prevent further tampering with them.
> -      */
> -     if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
> -             mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
>       _MALLOC_UNLOCK(1);
>  }
>  DEF_STRONG(_malloc_init);
> @@ -1419,7 +1416,7 @@ findpool(void *p, struct dir_info *argpo
>       if (r == NULL) {
>               u_int i, nmutexes;
>  
> -             nmutexes = mopts.malloc_mt ? mopts.malloc_mutexes : 2;
> +             nmutexes = mopts.malloc_pool[1]->malloc_mt ? 
> mopts.malloc_mutexes : 2;
>               STATS_INC(pool->other_pool);
>               for (i = 1; i < nmutexes; i++) {
>                       u_int j = (argpool->mutex + i) & (nmutexes - 1);
> @@ -2331,7 +2328,7 @@ malloc_dump1(int fd, int poolno, struct 
>       dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
>       if (d == NULL)
>               return;
> -     dprintf(fd, "J=%d Fl=%x\n", d->malloc_junk, d->mmap_flag);
> +     dprintf(fd, "MT=%d J=%d Fl=%x\n", d->malloc_mt, d->malloc_junk, 
> d->mmap_flag);
>       dprintf(fd, "Region slots free %zu/%zu\n",
>               d->regions_free, d->regions_total);
>       dprintf(fd, "Finds %zu/%zu\n", d->finds, d->find_collisions);
> @@ -2420,9 +2417,9 @@ malloc_exit(void)
>       if (fd != -1) {
>               dprintf(fd, "******** Start dump %s *******\n", __progname);
>               dprintf(fd,
> -                 "MT=%d M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
> +                 "M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u "
>                   "G=%zu\n",
> -                 mopts.malloc_mt, mopts.malloc_mutexes,
> +                 mopts.malloc_mutexes,
>                   mopts.internal_funcs, mopts.malloc_freecheck,
>                   mopts.malloc_freeunmap, mopts.def_malloc_junk,
>                   mopts.malloc_realloc, mopts.malloc_xmalloc,
> 

Reply via email to