From 3df6d33667507224291c82eb2dc9b3bde1ed9f47 Mon Sep 17 00:00:00 2001 From: enami Date: Thu, 4 Mar 2010 22:48:31 +0000 Subject: [PATCH] Fix race condition on reallocation of huge category. We need to remove the old region before mremap() since if it relesae the old region, other thread may map it for the same huge category allocation and insert it to the tree before we acquire a lock after mremap(). Fixes PR/42876. --- lib/libc/stdlib/jemalloc.c | 43 +++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/lib/libc/stdlib/jemalloc.c b/lib/libc/stdlib/jemalloc.c index 7216c52a0f84..d01327a7459f 100644 --- a/lib/libc/stdlib/jemalloc.c +++ b/lib/libc/stdlib/jemalloc.c @@ -1,4 +1,4 @@ -/* $NetBSD: jemalloc.c,v 1.20 2009/02/12 03:11:01 lukem Exp $ */ +/* $NetBSD: jemalloc.c,v 1.21 2010/03/04 22:48:31 enami Exp $ */ /*- * Copyright (C) 2006,2007 Jason Evans . @@ -118,7 +118,7 @@ #include /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */ -__RCSID("$NetBSD: jemalloc.c,v 1.20 2009/02/12 03:11:01 lukem Exp $"); +__RCSID("$NetBSD: jemalloc.c,v 1.21 2010/03/04 22:48:31 enami Exp $"); #ifdef __FreeBSD__ #include "libc_private.h" @@ -2856,25 +2856,38 @@ huge_ralloc(void *ptr, size_t size, size_t oldsize) /* size_t wrap-around */ return (NULL); } + + /* + * Remove the old region from the tree now. If mremap() + * returns the region to the system, other thread may + * map it for same huge allocation and insert it to the + * tree before we acquire the mutex lock again. + */ + malloc_mutex_lock(&chunks_mtx); + key.chunk = __DECONST(void *, ptr); + /* LINTED */ + node = RB_FIND(chunk_tree_s, &huge, &key); + assert(node != NULL); + assert(node->chunk == ptr); + assert(node->size == oldcsize); + RB_REMOVE(chunk_tree_s, &huge, node); + malloc_mutex_unlock(&chunks_mtx); + newptr = mremap(ptr, oldcsize, NULL, newcsize, MAP_ALIGNED(chunksize_2pow)); - if (newptr != MAP_FAILED) { + if (newptr == MAP_FAILED) { + /* We still own the old region. */ + malloc_mutex_lock(&chunks_mtx); + RB_INSERT(chunk_tree_s, &huge, node); + malloc_mutex_unlock(&chunks_mtx); + } else { assert(CHUNK_ADDR2BASE(newptr) == newptr); - /* update tree */ + /* Insert new or resized old region. */ malloc_mutex_lock(&chunks_mtx); - key.chunk = __DECONST(void *, ptr); - /* LINTED */ - node = RB_FIND(chunk_tree_s, &huge, &key); - assert(node != NULL); - assert(node->chunk == ptr); - assert(node->size == oldcsize); node->size = newcsize; - if (ptr != newptr) { - RB_REMOVE(chunk_tree_s, &huge, node); - node->chunk = newptr; - RB_INSERT(chunk_tree_s, &huge, node); - } + node->chunk = newptr; + RB_INSERT(chunk_tree_s, &huge, node); #ifdef MALLOC_STATS huge_nralloc++; huge_allocated += newcsize - oldcsize;