add glue code for mallocng merge

this includes both an implementation of reclaimed-gap donation from
ldso and a version of mallocng's glue.h with namespace-safe linkage to
underlying syscalls, integration with AT_RANDOM initialization, and
internal locking that's optimized out when the process is
single-threaded.
This commit is contained in:
Rich Felker 2020-06-29 17:41:24 -04:00
parent fdf8b2ad9c
commit 785752a595
3 changed files with 129 additions and 0 deletions

View File

@ -0,0 +1,13 @@
This directory is a skeleton for upcoming merge of musl's new malloc
implementation, mallocng. To use it, drop in copies of or symlinks to
the following files from mallocng:
- meta.h
- malloc.c
- realloc.c
- free.c
- aligned_alloc.c
- malloc_usable_size.c
and build with make variable MALLOC_DIR=mallocng in config.mak or on
make command line.

View File

@ -0,0 +1,39 @@
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <string.h>
#include <sys/mman.h>
#include <errno.h>
#include "meta.h"
static void donate(unsigned char *base, size_t len)
{
uintptr_t a = (uintptr_t)base;
uintptr_t b = a + len;
a += -a & (UNIT-1);
b -= b & (UNIT-1);
memset(base, 0, len);
for (int sc=47; sc>0 && b>a; sc-=4) {
if (b-a < (size_classes[sc]+1)*UNIT) continue;
struct meta *m = alloc_meta();
m->avail_mask = 0;
m->freed_mask = 1;
m->mem = (void *)a;
m->mem->meta = m;
m->last_idx = 0;
m->freeable = 0;
m->sizeclass = sc;
m->maplen = 0;
*((unsigned char *)m->mem+UNIT-4) = 0;
*((unsigned char *)m->mem+UNIT-3) = 255;
m->mem->storage[size_classes[sc]*UNIT-4] = 0;
queue(&ctx.active[sc], m);
a += (size_classes[sc]+1)*UNIT;
}
}
void __malloc_donate(char *start, char *end)
{
donate((void *)start, end-start);
}

View File

@ -0,0 +1,77 @@
#ifndef MALLOC_GLUE_H
#define MALLOC_GLUE_H
#include <stdint.h>
#include <sys/mman.h>
#include <pthread.h>
#include <unistd.h>
#include <elf.h>
#include <string.h>
#include "atomic.h"
#include "syscall.h"
#include "libc.h"
#include "lock.h"
#include "dynlink.h"
// use macros to appropriately namespace these.
#define size_classes __malloc_size_classes
#define ctx __malloc_context
#define alloc_meta __malloc_alloc_meta
#define is_allzero __malloc_allzerop
#define dump_heap __dump_heap
#if USE_REAL_ASSERT
#include <assert.h>
#else
#undef assert
#define assert(x) do { if (!(x)) a_crash(); } while(0)
#endif
#define brk(p) ((uintptr_t)__syscall(SYS_brk, p))
#define mmap __mmap
#define madvise __madvise
#define mremap __mremap
#define DISABLE_ALIGNED_ALLOC (__malloc_replaced && !__aligned_alloc_replaced)
static inline uint64_t get_random_secret()
{
uint64_t secret = (uintptr_t)&secret * 1103515245;
for (size_t i=0; libc.auxv[i]; i+=2)
if (libc.auxv[i]==AT_RANDOM)
memcpy(&secret, (char *)libc.auxv[i+1]+8, sizeof secret);
return secret;
}
#ifndef PAGESIZE
#define PAGESIZE PAGE_SIZE
#endif
#define MT (libc.need_locks)
#define RDLOCK_IS_EXCLUSIVE 1
__attribute__((__visibility__("hidden")))
extern int __malloc_lock[1];
#define LOCK_OBJ_DEF \
int __malloc_lock[1];
static inline void rdlock()
{
if (MT) LOCK(__malloc_lock);
}
static inline void wrlock()
{
if (MT) LOCK(__malloc_lock);
}
static inline void unlock()
{
UNLOCK(__malloc_lock);
}
static inline void upgradelock()
{
}
#endif