Fix race condition on reallocation of huge category.

We need to remove the old region before mremap() since if it relesae the
old region, other thread may map it for the same huge category allocation
and insert it to the tree before we acquire a lock after mremap().

Fixes PR/42876.
This commit is contained in:
enami 2010-03-04 22:48:31 +00:00
parent 7ff20f205b
commit 3df6d33667

View File

@ -1,4 +1,4 @@
/* $NetBSD: jemalloc.c,v 1.20 2009/02/12 03:11:01 lukem Exp $ */ /* $NetBSD: jemalloc.c,v 1.21 2010/03/04 22:48:31 enami Exp $ */
/*- /*-
* Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>. * Copyright (C) 2006,2007 Jason Evans <jasone@FreeBSD.org>.
@ -118,7 +118,7 @@
#include <sys/cdefs.h> #include <sys/cdefs.h>
/* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */ /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
__RCSID("$NetBSD: jemalloc.c,v 1.20 2009/02/12 03:11:01 lukem Exp $"); __RCSID("$NetBSD: jemalloc.c,v 1.21 2010/03/04 22:48:31 enami Exp $");
#ifdef __FreeBSD__ #ifdef __FreeBSD__
#include "libc_private.h" #include "libc_private.h"
@ -2856,25 +2856,38 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize)
/* size_t wrap-around */ /* size_t wrap-around */
return (NULL); return (NULL);
} }
/*
* Remove the old region from the tree now. If mremap()
* returns the region to the system, other thread may
* map it for same huge allocation and insert it to the
* tree before we acquire the mutex lock again.
*/
malloc_mutex_lock(&chunks_mtx);
key.chunk = __DECONST(void *, ptr);
/* LINTED */
node = RB_FIND(chunk_tree_s, &huge, &key);
assert(node != NULL);
assert(node->chunk == ptr);
assert(node->size == oldcsize);
RB_REMOVE(chunk_tree_s, &huge, node);
malloc_mutex_unlock(&chunks_mtx);
newptr = mremap(ptr, oldcsize, NULL, newcsize, newptr = mremap(ptr, oldcsize, NULL, newcsize,
MAP_ALIGNED(chunksize_2pow)); MAP_ALIGNED(chunksize_2pow));
if (newptr != MAP_FAILED) { if (newptr == MAP_FAILED) {
/* We still own the old region. */
malloc_mutex_lock(&chunks_mtx);
RB_INSERT(chunk_tree_s, &huge, node);
malloc_mutex_unlock(&chunks_mtx);
} else {
assert(CHUNK_ADDR2BASE(newptr) == newptr); assert(CHUNK_ADDR2BASE(newptr) == newptr);
/* update tree */ /* Insert new or resized old region. */
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
key.chunk = __DECONST(void *, ptr);
/* LINTED */
node = RB_FIND(chunk_tree_s, &huge, &key);
assert(node != NULL);
assert(node->chunk == ptr);
assert(node->size == oldcsize);
node->size = newcsize; node->size = newcsize;
if (ptr != newptr) { node->chunk = newptr;
RB_REMOVE(chunk_tree_s, &huge, node); RB_INSERT(chunk_tree_s, &huge, node);
node->chunk = newptr;
RB_INSERT(chunk_tree_s, &huge, node);
}
#ifdef MALLOC_STATS #ifdef MALLOC_STATS
huge_nralloc++; huge_nralloc++;
huge_allocated += newcsize - oldcsize; huge_allocated += newcsize - oldcsize;