Minor changes to jemalloc100 (the old one that only vax etc currently uses).
- Don't use TLS nor pretend to hash out arenas to reduce lock contention, because NetBSD uses thr_curcpu() to choose arena (i.e. per-CPU arenas). - In a single threaded process, don't prepare "ncpus" worth of arenas, allocate only one. - Use getpagesize() since it caches the return. - Sprinkle branch hints. - Make MALLOC_TRACE and MALLOC_DEBUG work again.
This commit is contained in:
parent
61757a071b
commit
bf5f78d57d
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $ */
|
||||
/* $NetBSD: jemalloc.c,v 1.57 2023/10/13 19:30:28 ad Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>.
|
||||
|
@ -97,13 +97,6 @@
|
|||
|
||||
/* LINTLIBRARY */
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# define xutrace(a, b) utrace("malloc", (a), (b))
|
||||
# define __DECONST(x, y) ((x)__UNCONST(y))
|
||||
#else
|
||||
# define xutrace(a, b) utrace((a), (b))
|
||||
#endif /* __NetBSD__ */
|
||||
|
||||
/*
|
||||
* MALLOC_PRODUCTION disables assertions and statistics gathering. It also
|
||||
* defaults the A and J runtime options to off. These settings are appropriate
|
||||
|
@ -117,21 +110,11 @@
|
|||
|
||||
#include <sys/cdefs.h>
|
||||
/* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
|
||||
__RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
||||
__RCSID("$NetBSD: jemalloc.c,v 1.57 2023/10/13 19:30:28 ad Exp $");
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include "libc_private.h"
|
||||
#ifdef MALLOC_DEBUG
|
||||
# define _LOCK_DEBUG
|
||||
#endif
|
||||
#include "spinlock.h"
|
||||
#endif
|
||||
#include "namespace.h"
|
||||
#include <sys/mman.h>
|
||||
#include <sys/param.h>
|
||||
#ifdef __FreeBSD__
|
||||
#include <sys/stddef.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
@ -139,12 +122,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
#include <sys/uio.h>
|
||||
#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include <machine/atomic.h>
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/vmparam.h>
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <pthread.h>
|
||||
|
@ -158,17 +135,10 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
#include <strings.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# include <reentrant.h>
|
||||
# include "extern.h"
|
||||
#include <reentrant.h>
|
||||
#include "extern.h"
|
||||
|
||||
#define STRERROR_R(a, b, c) strerror_r_ss(a, b, c);
|
||||
#endif
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#define STRERROR_R(a, b, c) strerror_r(a, b, c);
|
||||
#include "un-namespace.h"
|
||||
#endif
|
||||
|
||||
/* MALLOC_STATS enables statistics calculation. */
|
||||
#ifndef MALLOC_PRODUCTION
|
||||
|
@ -260,7 +230,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
# define QUANTUM_2POW_MIN 4
|
||||
# define SIZEOF_PTR_2POW 2
|
||||
# define USE_BRK
|
||||
# define NO_TLS
|
||||
#endif
|
||||
#ifdef __sh__
|
||||
# define QUANTUM_2POW_MIN 4
|
||||
|
@ -271,9 +240,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
# define QUANTUM_2POW_MIN 4
|
||||
# define SIZEOF_PTR_2POW 2
|
||||
# define USE_BRK
|
||||
# ifdef __mc68010__
|
||||
# define NO_TLS
|
||||
# endif
|
||||
#endif
|
||||
#if defined(__mips__)
|
||||
# ifdef _LP64
|
||||
|
@ -284,9 +250,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
# endif
|
||||
# define QUANTUM_2POW_MIN 4
|
||||
# define USE_BRK
|
||||
# if defined(__mips__)
|
||||
# define NO_TLS
|
||||
# endif
|
||||
#endif
|
||||
#if defined(__riscv__)
|
||||
# ifdef _LP64
|
||||
|
@ -297,7 +260,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
# endif
|
||||
# define QUANTUM_2POW_MIN 4
|
||||
# define USE_BRK
|
||||
# define NO_TLS
|
||||
#endif
|
||||
#ifdef __hppa__
|
||||
# define QUANTUM_2POW_MIN 4
|
||||
|
@ -367,21 +329,6 @@ __RCSID("$NetBSD: jemalloc.c,v 1.56 2023/05/07 12:41:47 skrll Exp $");
|
|||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
/*
|
||||
* Mutexes based on spinlocks. We can't use normal pthread mutexes, because
|
||||
* they require malloc()ed memory.
|
||||
*/
|
||||
typedef struct {
|
||||
spinlock_t lock;
|
||||
} malloc_mutex_t;
|
||||
|
||||
/* Set to true once the allocator has been initialized. */
|
||||
static bool malloc_initialized = false;
|
||||
|
||||
/* Used to avoid initialization races. */
|
||||
static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
|
||||
#else
|
||||
#define malloc_mutex_t mutex_t
|
||||
|
||||
/* Set to true once the allocator has been initialized. */
|
||||
|
@ -391,7 +338,6 @@ static bool malloc_initialized = false;
|
|||
/* Used to avoid initialization races. */
|
||||
static mutex_t init_lock = MUTEX_INITIALIZER;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
|
@ -774,71 +720,10 @@ static size_t base_mapped;
|
|||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
*/
|
||||
static arena_t **arenas;
|
||||
static unsigned narenas;
|
||||
static unsigned next_arena;
|
||||
#ifdef _REENTRANT
|
||||
static malloc_mutex_t arenas_mtx; /* Protects arenas initialization. */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
|
||||
* for allocations.
|
||||
*/
|
||||
#ifndef NO_TLS
|
||||
static __attribute__((tls_model("initial-exec")))
|
||||
__thread arena_t **arenas_map;
|
||||
#else
|
||||
static arena_t **arenas_map;
|
||||
#endif
|
||||
|
||||
#if !defined(NO_TLS) || !defined(_REENTRANT)
|
||||
# define get_arenas_map() (arenas_map)
|
||||
# define set_arenas_map(x) (arenas_map = x)
|
||||
#else /* NO_TLS && _REENTRANT */
|
||||
|
||||
static thread_key_t arenas_map_key = -1;
|
||||
|
||||
static inline arena_t **
|
||||
get_arenas_map(void)
|
||||
{
|
||||
if (!__isthreaded)
|
||||
return arenas_map;
|
||||
|
||||
if (arenas_map_key == -1) {
|
||||
(void)thr_keycreate(&arenas_map_key, NULL);
|
||||
if (arenas_map != NULL) {
|
||||
thr_setspecific(arenas_map_key, arenas_map);
|
||||
arenas_map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return thr_getspecific(arenas_map_key);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
set_arenas_map(arena_t **a)
|
||||
{
|
||||
if (!__isthreaded) {
|
||||
arenas_map = a;
|
||||
return;
|
||||
}
|
||||
|
||||
if (arenas_map_key == -1) {
|
||||
#ifndef NO_TLS
|
||||
(void)thr_keycreate(&arenas_map_key, NULL);
|
||||
#endif
|
||||
if (arenas_map != NULL) {
|
||||
_DIAGASSERT(arenas_map == a);
|
||||
arenas_map = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NO_TLS
|
||||
thr_setspecific(arenas_map_key, a);
|
||||
#endif
|
||||
}
|
||||
#endif /* NO_TLS && _REENTRANT */
|
||||
|
||||
#ifdef MALLOC_STATS
|
||||
/* Chunk statistics. */
|
||||
static chunk_stats_t stats_chunks;
|
||||
|
@ -866,7 +751,6 @@ static bool opt_utrace = false;
|
|||
static bool opt_sysv = false;
|
||||
static bool opt_xmalloc = false;
|
||||
static bool opt_zero = false;
|
||||
static int32_t opt_narenas_lshift = 0;
|
||||
|
||||
typedef struct {
|
||||
void *p;
|
||||
|
@ -874,13 +758,18 @@ typedef struct {
|
|||
void *r;
|
||||
} malloc_utrace_t;
|
||||
|
||||
/* Sprinkle branch hints for the compiler and CPU. */
|
||||
#define OPT(a) __predict_false(opt_##a)
|
||||
#define NOT_OPT(a) __predict_true(!opt_##a)
|
||||
|
||||
/* Trace malloc/free for ktrace/kdump. */
|
||||
#define UTRACE(a, b, c) \
|
||||
if (opt_utrace) { \
|
||||
if (OPT(utrace)) { \
|
||||
malloc_utrace_t ut; \
|
||||
ut.p = a; \
|
||||
ut.s = b; \
|
||||
ut.r = c; \
|
||||
xutrace(&ut, sizeof(ut)); \
|
||||
utrace("malloc", &ut, sizeof(ut)); \
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
@ -920,8 +809,8 @@ static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
|
|||
static size_t arena_salloc(const void *ptr);
|
||||
static void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
|
||||
static void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||
static bool arena_new(arena_t *arena);
|
||||
static arena_t *arenas_extend(unsigned ind);
|
||||
static void arena_new(arena_t *arena);
|
||||
static arena_t *arenas_extend(void);
|
||||
static void *huge_malloc(size_t size);
|
||||
static void *huge_palloc(size_t alignment, size_t size);
|
||||
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
|
||||
|
@ -1216,8 +1105,8 @@ base_chunk_node_dealloc(chunk_node_t *node)
|
|||
static void
|
||||
stats_print(arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
int gap_start;
|
||||
const unsigned minusone = (unsigned)-1;
|
||||
unsigned i, gap_start;
|
||||
|
||||
malloc_printf(
|
||||
" allocated/mapped nmalloc ndalloc\n");
|
||||
|
@ -1236,12 +1125,12 @@ stats_print(arena_t *arena)
|
|||
|
||||
malloc_printf("bins: bin size regs pgs requests newruns"
|
||||
" reruns maxruns curruns\n");
|
||||
for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
|
||||
for (i = 0, gap_start = minusone; i < ntbins + nqbins + nsbins; i++) {
|
||||
if (arena->bins[i].stats.nrequests == 0) {
|
||||
if (gap_start == -1)
|
||||
if (gap_start == minusone)
|
||||
gap_start = i;
|
||||
} else {
|
||||
if (gap_start != -1) {
|
||||
if (gap_start != minusone) {
|
||||
if (i > gap_start + 1) {
|
||||
/* Gap of more than one size class. */
|
||||
malloc_printf("[%u..%u]\n",
|
||||
|
@ -1250,7 +1139,7 @@ stats_print(arena_t *arena)
|
|||
/* Gap of one size class. */
|
||||
malloc_printf("[%u]\n", gap_start);
|
||||
}
|
||||
gap_start = -1;
|
||||
gap_start = minusone;
|
||||
}
|
||||
malloc_printf(
|
||||
"%13u %1s %4u %4u %3u %9llu %9llu"
|
||||
|
@ -1267,7 +1156,7 @@ stats_print(arena_t *arena)
|
|||
arena->bins[i].stats.curruns);
|
||||
}
|
||||
}
|
||||
if (gap_start != -1) {
|
||||
if (gap_start != minusone) {
|
||||
if (i > gap_start + 1) {
|
||||
/* Gap of more than one size class. */
|
||||
malloc_printf("[%u..%u]\n", gap_start, i - 1);
|
||||
|
@ -1330,7 +1219,7 @@ pages_map_align(void *addr, size_t size, int align)
|
|||
STRERROR_R(errno, buf, sizeof(buf));
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in munmap(): ", buf, "\n");
|
||||
if (opt_abort)
|
||||
if (OPT(abort))
|
||||
abort();
|
||||
}
|
||||
ret = NULL;
|
||||
|
@ -1358,7 +1247,7 @@ pages_unmap(void *addr, size_t size)
|
|||
STRERROR_R(errno, buf, sizeof(buf));
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in munmap(): ", buf, "\n");
|
||||
if (opt_abort)
|
||||
if (OPT(abort))
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
@ -1595,54 +1484,34 @@ chunk_dealloc(void *chunk, size_t size)
|
|||
*/
|
||||
|
||||
/*
|
||||
* Choose an arena based on a per-thread and (optimistically) per-CPU value.
|
||||
*
|
||||
* We maintain at least one block of arenas. Usually there are more.
|
||||
* The blocks are $ncpu arenas in size. Whole blocks are 'hashed'
|
||||
* amongst threads. To accomplish this, next_arena advances only in
|
||||
* ncpu steps.
|
||||
* Choose a per-CPU arena.
|
||||
*/
|
||||
static __noinline arena_t *
|
||||
choose_arena_hard(void)
|
||||
{
|
||||
unsigned i, curcpu;
|
||||
arena_t **map;
|
||||
|
||||
/* Initialize the current block of arenas and advance to next. */
|
||||
assert(arenas[0] != NULL);
|
||||
|
||||
malloc_mutex_lock(&arenas_mtx);
|
||||
assert(next_arena % ncpus == 0);
|
||||
assert(narenas % ncpus == 0);
|
||||
map = &arenas[next_arena];
|
||||
set_arenas_map(map);
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
if (arenas[next_arena] == NULL)
|
||||
arenas_extend(next_arena);
|
||||
next_arena = (next_arena + 1) % narenas;
|
||||
}
|
||||
for (unsigned i = 1; i < ncpus; i++)
|
||||
if (arenas[i] == NULL)
|
||||
arenas[i] = arenas_extend();
|
||||
malloc_mutex_unlock(&arenas_mtx);
|
||||
|
||||
/*
|
||||
* If we were unable to allocate an arena above, then default to
|
||||
* the first arena, which is always present.
|
||||
*/
|
||||
curcpu = thr_curcpu();
|
||||
if (map[curcpu] != NULL)
|
||||
return map[curcpu];
|
||||
return arenas[0];
|
||||
return arenas[thr_curcpu()];
|
||||
}
|
||||
|
||||
static inline arena_t *
|
||||
choose_arena(void)
|
||||
{
|
||||
unsigned curcpu;
|
||||
arena_t **map;
|
||||
arena_t *arena;
|
||||
|
||||
map = get_arenas_map();
|
||||
curcpu = thr_curcpu();
|
||||
if (__predict_true(map != NULL && map[curcpu] != NULL))
|
||||
return map[curcpu];
|
||||
/* NB: when libpthread is absent, thr_curcpu() always returns zero. */
|
||||
arena = arenas[thr_curcpu()];
|
||||
if (__predict_true(arena != NULL))
|
||||
return arena;
|
||||
|
||||
return choose_arena_hard();
|
||||
return choose_arena_hard();
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -1940,7 +1809,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
|||
*/
|
||||
RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
|
||||
|
||||
if (opt_hint == false) {
|
||||
if (NOT_OPT(hint)) {
|
||||
if (arena->spare != NULL) {
|
||||
chunk_dealloc((void *)arena->spare, chunksize);
|
||||
#ifdef MALLOC_STATS
|
||||
|
@ -2082,7 +1951,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
|
|||
* Tell the kernel that we don't need the data in this run, but only if
|
||||
* requested via runtime configuration.
|
||||
*/
|
||||
if (opt_hint)
|
||||
if (OPT(hint))
|
||||
madvise(run, size, MADV_FREE);
|
||||
|
||||
/* Try to coalesce with neighboring runs. */
|
||||
|
@ -2373,9 +2242,9 @@ arena_malloc(arena_t *arena, size_t size)
|
|||
|
||||
malloc_mutex_unlock(&arena->mtx);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
else if (OPT(zero))
|
||||
memset(ret, 0, size);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -2473,9 +2342,9 @@ arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
|
|||
#endif
|
||||
malloc_mutex_unlock(&arena->mtx);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ret, 0xa5, size);
|
||||
else if (opt_zero)
|
||||
else if (OPT(zero))
|
||||
memset(ret, 0, size);
|
||||
return (ret);
|
||||
}
|
||||
|
@ -2558,9 +2427,9 @@ arena_ralloc(void *ptr, size_t size, size_t oldsize)
|
|||
idalloc(ptr);
|
||||
return (ret);
|
||||
IN_PLACE:
|
||||
if (opt_junk && size < oldsize)
|
||||
if (OPT(junk) && size < oldsize)
|
||||
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
|
||||
else if (opt_zero && size > oldsize)
|
||||
else if (OPT(zero) && size > oldsize)
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
|
||||
return (ptr);
|
||||
}
|
||||
|
@ -2596,7 +2465,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
|||
bin = run->bin;
|
||||
size = bin->reg_size;
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ptr, 0x5a, size);
|
||||
|
||||
malloc_mutex_lock(&arena->mtx);
|
||||
|
@ -2652,7 +2521,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
|||
size = mapelm->npages << pagesize_2pow;
|
||||
assert((((uintptr_t)ptr) & pagesize_mask) == 0);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ptr, 0x5a, size);
|
||||
|
||||
malloc_mutex_lock(&arena->mtx);
|
||||
|
@ -2666,7 +2535,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
|||
malloc_mutex_unlock(&arena->mtx);
|
||||
}
|
||||
|
||||
static bool
|
||||
static void
|
||||
arena_new(arena_t *arena)
|
||||
{
|
||||
unsigned i;
|
||||
|
@ -2735,21 +2604,19 @@ arena_new(arena_t *arena)
|
|||
#ifdef MALLOC_DEBUG
|
||||
arena->magic = ARENA_MAGIC;
|
||||
#endif
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
/* Create a new arena and insert it into the arenas array at index ind. */
|
||||
static arena_t *
|
||||
arenas_extend(unsigned ind)
|
||||
arenas_extend(void)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
/* Allocate enough space for trailing bins. */
|
||||
ret = (arena_t *)base_alloc(sizeof(arena_t)
|
||||
+ (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
|
||||
if (ret != NULL && arena_new(ret) == false) {
|
||||
arenas[ind] = ret;
|
||||
if (ret != NULL) {
|
||||
arena_new(ret);
|
||||
return (ret);
|
||||
}
|
||||
/* Only reached if there is an OOM error. */
|
||||
|
@ -2762,7 +2629,7 @@ arenas_extend(unsigned ind)
|
|||
*/
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error initializing arena\n", "", "");
|
||||
if (opt_abort)
|
||||
if (OPT(abort))
|
||||
abort();
|
||||
|
||||
return (arenas[0]);
|
||||
|
@ -2814,9 +2681,9 @@ huge_malloc(size_t size)
|
|||
#endif
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero)
|
||||
else if (OPT(zero))
|
||||
memset(ret, 0, csize);
|
||||
|
||||
return (ret);
|
||||
|
@ -2894,9 +2761,9 @@ huge_palloc(size_t alignment, size_t size)
|
|||
#endif
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ret, 0xa5, chunk_size);
|
||||
else if (opt_zero)
|
||||
else if (OPT(zero))
|
||||
memset(ret, 0, chunk_size);
|
||||
|
||||
return (ret);
|
||||
|
@ -2910,10 +2777,10 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
|||
/* Avoid moving the allocation if the size class would not change. */
|
||||
if (oldsize > arena_maxclass &&
|
||||
CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
|
||||
if (opt_junk && size < oldsize) {
|
||||
if (OPT(junk) && size < oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
|
||||
- size);
|
||||
} else if (opt_zero && size > oldsize) {
|
||||
} else if (OPT(zero) && size > oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + oldsize), 0, size
|
||||
- oldsize);
|
||||
}
|
||||
|
@ -2946,7 +2813,7 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
|||
* tree before we acquire the mutex lock again.
|
||||
*/
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
key.chunk = __DECONST(void *, ptr);
|
||||
key.chunk = __UNCONST(ptr);
|
||||
node = RB_FIND(chunk_tree_s, &huge, &key);
|
||||
assert(node != NULL);
|
||||
assert(node->chunk == ptr);
|
||||
|
@ -2986,10 +2853,10 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
|
|||
#endif
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
if (opt_junk && size < oldsize) {
|
||||
if (OPT(junk) && size < oldsize) {
|
||||
memset((void *)((uintptr_t)newptr + size), 0x5a,
|
||||
newcsize - size);
|
||||
} else if (opt_zero && size > oldsize) {
|
||||
} else if (OPT(zero) && size > oldsize) {
|
||||
memset((void *)((uintptr_t)newptr + oldsize), 0,
|
||||
size - oldsize);
|
||||
}
|
||||
|
@ -3045,7 +2912,7 @@ huge_dalloc(void *ptr)
|
|||
|
||||
/* Unmap chunk. */
|
||||
#ifdef USE_BRK
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(node->chunk, 0x5a, node->size);
|
||||
#endif
|
||||
chunk_dealloc(node->chunk, node->size);
|
||||
|
@ -3184,7 +3051,7 @@ icalloc(size_t size)
|
|||
if (ret == NULL)
|
||||
return (NULL);
|
||||
|
||||
if (opt_junk)
|
||||
if (OPT(junk))
|
||||
memset(ret, 0, size);
|
||||
#ifdef USE_BRK
|
||||
else if ((uintptr_t)ret >= (uintptr_t)brk_base
|
||||
|
@ -3223,7 +3090,7 @@ isalloc(const void *ptr)
|
|||
malloc_mutex_lock(&chunks_mtx);
|
||||
|
||||
/* Extract from tree of huge allocations. */
|
||||
key.chunk = __DECONST(void *, ptr);
|
||||
key.chunk = __UNCONST(ptr);
|
||||
node = RB_FIND(chunk_tree_s, &huge, &key);
|
||||
assert(node != NULL);
|
||||
|
||||
|
@ -3273,7 +3140,7 @@ static void
|
|||
malloc_print_stats(void)
|
||||
{
|
||||
|
||||
if (opt_print_stats) {
|
||||
if (OPT(print_stats)) {
|
||||
char s[UMAX2S_BUFSIZE];
|
||||
_malloc_message("___ Begin malloc statistics ___\n", "", "",
|
||||
"");
|
||||
|
@ -3288,13 +3155,12 @@ malloc_print_stats(void)
|
|||
opt_abort ? "A" : "a",
|
||||
opt_junk ? "J" : "j",
|
||||
opt_hint ? "H" : "h");
|
||||
_malloc_message(opt_utrace ? "PU" : "Pu",
|
||||
_malloc_message(OPT(utrace) ? "PU" : "Pu",
|
||||
opt_sysv ? "V" : "v",
|
||||
opt_xmalloc ? "X" : "x",
|
||||
opt_zero ? "Z\n" : "z\n");
|
||||
|
||||
_malloc_message("CPUs: ", size_t2s(ncpus, s), "\n", "");
|
||||
_malloc_message("Max arenas: ", size_t2s(narenas, s), "\n", "");
|
||||
_malloc_message("Pointer size: ", size_t2s(sizeof(void *), s),
|
||||
"\n", "");
|
||||
_malloc_message("Quantum size: ", size_t2s(quantum, s), "\n", "");
|
||||
|
@ -3314,7 +3180,7 @@ malloc_print_stats(void)
|
|||
/* Calculate and print allocated/mapped stats. */
|
||||
|
||||
/* arenas. */
|
||||
for (i = 0, allocated = 0; i < narenas; i++) {
|
||||
for (i = 0, allocated = 0; i < ncpus; i++) {
|
||||
if (arenas[i] != NULL) {
|
||||
malloc_mutex_lock(&arenas[i]->mtx);
|
||||
allocated +=
|
||||
|
@ -3363,7 +3229,7 @@ malloc_print_stats(void)
|
|||
huge_allocated);
|
||||
|
||||
/* Print stats for each arena. */
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
arena = arenas[i];
|
||||
if (arena != NULL) {
|
||||
malloc_printf(
|
||||
|
@ -3380,15 +3246,14 @@ malloc_print_stats(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
|
||||
* implementation has to take pains to avoid infinite recursion during
|
||||
* initialization.
|
||||
* libpthread might call malloc(3), so the malloc implementation has to take
|
||||
* pains to avoid infinite recursion during initialization.
|
||||
*/
|
||||
static inline bool
|
||||
malloc_init(void)
|
||||
{
|
||||
|
||||
if (malloc_initialized == false)
|
||||
if (__predict_false(malloc_initialized == false))
|
||||
return (malloc_init_hard());
|
||||
|
||||
return (false);
|
||||
|
@ -3432,7 +3297,7 @@ malloc_init_hard(void)
|
|||
{
|
||||
long result;
|
||||
|
||||
result = sysconf(_SC_PAGESIZE);
|
||||
result = getpagesize();
|
||||
assert(result != -1);
|
||||
pagesize = (unsigned) result;
|
||||
|
||||
|
@ -3450,7 +3315,7 @@ malloc_init_hard(void)
|
|||
switch (i) {
|
||||
case 0:
|
||||
if ((linklen = readlink("/etc/malloc.conf", buf,
|
||||
sizeof(buf) - 1)) != -1) {
|
||||
sizeof(buf) - 1)) != -1) {
|
||||
/*
|
||||
* Use the contents of the "/etc/malloc.conf"
|
||||
* symbolic link's name.
|
||||
|
@ -3479,10 +3344,10 @@ malloc_init_hard(void)
|
|||
break;
|
||||
case 2:
|
||||
if (_malloc_options != NULL) {
|
||||
/*
|
||||
* Use options that were compiled into the program.
|
||||
*/
|
||||
opts = _malloc_options;
|
||||
/*
|
||||
* Use options that were compiled into the program.
|
||||
*/
|
||||
opts = _malloc_options;
|
||||
} else {
|
||||
/* No configuration specified. */
|
||||
buf[0] = '\0';
|
||||
|
@ -3529,12 +3394,6 @@ malloc_init_hard(void)
|
|||
(int)(sizeof(size_t) << 3))
|
||||
opt_chunk_2pow++;
|
||||
break;
|
||||
case 'n':
|
||||
opt_narenas_lshift--;
|
||||
break;
|
||||
case 'N':
|
||||
opt_narenas_lshift++;
|
||||
break;
|
||||
case 'p':
|
||||
opt_print_stats = false;
|
||||
break;
|
||||
|
@ -3596,7 +3455,7 @@ malloc_init_hard(void)
|
|||
errno = serrno;
|
||||
|
||||
/* Take care to call atexit() only once. */
|
||||
if (opt_print_stats) {
|
||||
if (OPT(print_stats)) {
|
||||
/* Print statistics at exit. */
|
||||
atexit(malloc_print_stats);
|
||||
}
|
||||
|
@ -3604,13 +3463,13 @@ malloc_init_hard(void)
|
|||
/* Set variables according to the value of opt_small_max_2pow. */
|
||||
if (opt_small_max_2pow < opt_quantum_2pow)
|
||||
opt_small_max_2pow = opt_quantum_2pow;
|
||||
small_max = (1 << opt_small_max_2pow);
|
||||
small_max = (1U << opt_small_max_2pow);
|
||||
|
||||
/* Set bin-related variables. */
|
||||
bin_maxclass = (pagesize >> 1);
|
||||
assert(opt_quantum_2pow >= TINY_MIN_2POW);
|
||||
ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
|
||||
assert(ntbins <= opt_quantum_2pow);
|
||||
assert(ntbins <= (unsigned)opt_quantum_2pow);
|
||||
nqbins = (unsigned)(small_max >> opt_quantum_2pow);
|
||||
nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
|
||||
|
||||
|
@ -3684,37 +3543,8 @@ malloc_init_hard(void)
|
|||
base_chunk_nodes = NULL;
|
||||
malloc_mutex_init(&base_mtx);
|
||||
|
||||
if (ncpus > 1) {
|
||||
/*
|
||||
* For SMP systems, create four times as many arenas as there
|
||||
* are CPUs by default.
|
||||
*/
|
||||
opt_narenas_lshift += 2;
|
||||
}
|
||||
|
||||
/* Determine how many arenas to use. */
|
||||
narenas = ncpus;
|
||||
if (opt_narenas_lshift > 0) {
|
||||
if ((narenas << opt_narenas_lshift) > narenas)
|
||||
narenas <<= opt_narenas_lshift;
|
||||
/*
|
||||
* Make sure not to exceed the limits of what base_malloc()
|
||||
* can handle.
|
||||
*/
|
||||
if (narenas * sizeof(arena_t *) > chunksize)
|
||||
narenas = (unsigned)(chunksize / sizeof(arena_t *));
|
||||
} else if (opt_narenas_lshift < 0) {
|
||||
if ((narenas << opt_narenas_lshift) < narenas)
|
||||
narenas <<= opt_narenas_lshift;
|
||||
/* Make sure there is at least one arena. */
|
||||
if (narenas == 0)
|
||||
narenas = 1;
|
||||
}
|
||||
|
||||
next_arena = 0;
|
||||
|
||||
/* Allocate and initialize arenas. */
|
||||
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
|
||||
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * ncpus);
|
||||
if (arenas == NULL) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
|
@ -3723,14 +3553,13 @@ malloc_init_hard(void)
|
|||
* Zero the array. In practice, this should always be pre-zeroed,
|
||||
* since it was just mmap()ed, but let's be sure.
|
||||
*/
|
||||
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
||||
memset(arenas, 0, sizeof(arena_t *) * ncpus);
|
||||
|
||||
/*
|
||||
* Initialize one arena here. The rest are lazily created in
|
||||
* arena_choose_hard().
|
||||
*/
|
||||
arenas_extend(0);
|
||||
if (arenas[0] == NULL) {
|
||||
if ((arenas[0] = arenas_extend()) == NULL) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
@ -3755,13 +3584,13 @@ malloc(size_t size)
|
|||
{
|
||||
void *ret;
|
||||
|
||||
if (malloc_init()) {
|
||||
if (__predict_false(malloc_init())) {
|
||||
ret = NULL;
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
if (opt_sysv == false)
|
||||
if (__predict_false(size == 0)) {
|
||||
if (NOT_OPT(sysv))
|
||||
size = 1;
|
||||
else {
|
||||
ret = NULL;
|
||||
|
@ -3772,8 +3601,8 @@ malloc(size_t size)
|
|||
ret = imalloc(size);
|
||||
|
||||
RETURN:
|
||||
if (ret == NULL) {
|
||||
if (opt_xmalloc) {
|
||||
if (__predict_false(ret == NULL)) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in malloc(): out of memory\n", "",
|
||||
"");
|
||||
|
@ -3792,13 +3621,13 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
|
|||
int ret;
|
||||
void *result;
|
||||
|
||||
if (malloc_init())
|
||||
if (__predict_false(malloc_init()))
|
||||
result = NULL;
|
||||
else {
|
||||
/* Make sure that alignment is a large enough power of 2. */
|
||||
if (((alignment - 1) & alignment) != 0
|
||||
|| alignment < sizeof(void *)) {
|
||||
if (opt_xmalloc) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in posix_memalign(): "
|
||||
"invalid alignment\n", "", "");
|
||||
|
@ -3812,8 +3641,8 @@ posix_memalign(void **memptr, size_t alignment, size_t size)
|
|||
result = ipalloc(alignment, size);
|
||||
}
|
||||
|
||||
if (result == NULL) {
|
||||
if (opt_xmalloc) {
|
||||
if (__predict_false(result == NULL)) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in posix_memalign(): out of memory\n",
|
||||
"", "");
|
||||
|
@ -3837,15 +3666,15 @@ calloc(size_t num, size_t size)
|
|||
void *ret;
|
||||
size_t num_size;
|
||||
|
||||
if (malloc_init()) {
|
||||
if (__predict_false(malloc_init())) {
|
||||
num_size = 0;
|
||||
ret = NULL;
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
num_size = num * size;
|
||||
if (num_size == 0) {
|
||||
if ((opt_sysv == false) && ((num == 0) || (size == 0)))
|
||||
if (__predict_false(num_size == 0)) {
|
||||
if (NOT_OPT(sysv) && ((num == 0) || (size == 0)))
|
||||
num_size = 1;
|
||||
else {
|
||||
ret = NULL;
|
||||
|
@ -3868,7 +3697,7 @@ calloc(size_t num, size_t size)
|
|||
|
||||
RETURN:
|
||||
if (ret == NULL) {
|
||||
if (opt_xmalloc) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in calloc(): out of memory\n", "",
|
||||
"");
|
||||
|
@ -3886,8 +3715,8 @@ realloc(void *ptr, size_t size)
|
|||
{
|
||||
void *ret;
|
||||
|
||||
if (size == 0) {
|
||||
if (opt_sysv == false)
|
||||
if (__predict_false(size == 0)) {
|
||||
if (NOT_OPT(sysv))
|
||||
size = 1;
|
||||
else {
|
||||
if (ptr != NULL)
|
||||
|
@ -3897,13 +3726,13 @@ realloc(void *ptr, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
if (ptr != NULL) {
|
||||
if (__predict_true(ptr != NULL)) {
|
||||
assert(malloc_initialized);
|
||||
|
||||
ret = iralloc(ptr, size);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (opt_xmalloc) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in realloc(): out of "
|
||||
"memory\n", "", "");
|
||||
|
@ -3912,13 +3741,13 @@ realloc(void *ptr, size_t size)
|
|||
errno = ENOMEM;
|
||||
}
|
||||
} else {
|
||||
if (malloc_init())
|
||||
if (__predict_false(malloc_init()))
|
||||
ret = NULL;
|
||||
else
|
||||
ret = imalloc(size);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (opt_xmalloc) {
|
||||
if (OPT(xmalloc)) {
|
||||
_malloc_message(getprogname(),
|
||||
": (malloc) Error in realloc(): out of "
|
||||
"memory\n", "", "");
|
||||
|
@ -3982,7 +3811,7 @@ _malloc_prefork(void)
|
|||
/* Acquire all mutexes in a safe order. */
|
||||
malloc_mutex_lock(&init_lock);
|
||||
malloc_mutex_lock(&arenas_mtx);
|
||||
for (i = 0; i < narenas; i++) {
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
if (arenas[i] != NULL)
|
||||
malloc_mutex_lock(&arenas[i]->mtx);
|
||||
}
|
||||
|
@ -4005,7 +3834,7 @@ _malloc_postfork(void)
|
|||
malloc_mutex_unlock(&base_mtx);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
|
||||
for (i = narenas; i-- > 0; ) {
|
||||
for (i = ncpus; i-- > 0; ) {
|
||||
if (arenas[i] != NULL)
|
||||
malloc_mutex_unlock(&arenas[i]->mtx);
|
||||
}
|
||||
|
@ -4025,7 +3854,7 @@ _malloc_postfork_child(void)
|
|||
malloc_mutex_init(&base_mtx);
|
||||
malloc_mutex_init(&chunks_mtx);
|
||||
|
||||
for (i = narenas; i-- > 0; ) {
|
||||
for (i = ncpus; i-- > 0; ) {
|
||||
if (arenas[i] != NULL)
|
||||
malloc_mutex_init(&arenas[i]->mtx);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue