kernel memory guard for DEBUG kernels, proposed on tech-kern.

See kmem_alloc(9) for details.
This commit is contained in:
ad 2009-03-29 10:51:53 +00:00
parent 393ca6e076
commit f51a17bccf
5 changed files with 346 additions and 7 deletions

View File

@ -1,4 +1,4 @@
.\" $NetBSD: kmem_alloc.9,v 1.8 2008/12/29 15:57:01 wiz Exp $
.\" $NetBSD: kmem_alloc.9,v 1.9 2009/03/29 10:51:53 ad Exp $
.\"
.\" Copyright (c)2006 YAMAMOTO Takashi,
.\" All rights reserved.
@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.\" ------------------------------------------------------------
.Dd December 29, 2008
.Dd March 29, 2009
.Dt KMEM_ALLOC 9
.Os
.\" ------------------------------------------------------------
@ -40,6 +40,8 @@
.Fn kmem_alloc \
"size_t size" "km_flag_t kmflags"
.\" ------------------------------------------------------------
.Pp
.Cd "options DEBUG"
.Sh DESCRIPTION
.Fn kmem_alloc
allocates kernel wired memory.
@ -118,6 +120,60 @@ For example:
}
.Ed
.\" ------------------------------------------------------------
.Sh OPTIONS
Kernels compiled with the
.Dv DEBUG
option perform CPU intensive sanity checks on kmem operations,
and include the
.Dv kmguard
facility which can be enabled at runtime.
.Pp
.Dv kmguard
adds additional, very high overhead runtime verification to kmem operations.
To enable it, boot the system with the
.Fl d
option, which causes the debugger to be entered early during the kernel
boot process.
Issue commands such as the following:
.Bd -literal
db\*[Gt] w kmem_guard_depth 0t30000
db\*[Gt] c
.Ed
.Pp
This instructs
.Dv kmguard
to queue up to 60000 (30000*2) pages of unmapped KVA to catch
use-after-free type errors.
When
.Fn kmem_free
is called, memory backing a freed item is unmapped and the kernel VA
space pushed onto a FIFO.
The VA space will not be reused until another 30k items have been freed.
Until reused the kernel will catch invalid acceses and panic with a page fault.
Limitations:
.Bl -bullet
.It
It has a severe impact on performance.
.It
It is best used on a 64-bit machine with lots of RAM.
.It
Allocations larger than PAGE_SIZE bypass the
.Dv kmguard
facility.
.El
.Pp
kmguard tries to catch the following types of bugs:
.Bl -bullet
.It
Overflow at time of occurance, by means of a guard page.
.It
Underflow at
.Fn kmem_free ,
by using a canary value.
.It
Invalid pointer or size passed, at
.Fn kmem_free .
.El
.Sh RETURN VALUES
On success,
.Fn kmem_alloc

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_kmem.c,v 1.26 2009/02/18 13:04:59 yamt Exp $ */
/* $NetBSD: subr_kmem.c,v 1.27 2009/03/29 10:51:53 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
@ -63,7 +63,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.26 2009/02/18 13:04:59 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.27 2009/03/29 10:51:53 ad Exp $");
#include <sys/param.h>
#include <sys/callback.h>
@ -75,6 +75,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.26 2009/02/18 13:04:59 yamt Exp $");
#include <uvm/uvm_extern.h>
#include <uvm/uvm_map.h>
#include <uvm/uvm_kmguard.h>
#include <lib/libkern/libkern.h>
@ -98,10 +99,14 @@ static size_t kmem_cache_mask;
static int kmem_cache_shift;
#if defined(DEBUG)
int kmem_guard_depth;
size_t kmem_guard_size;
static struct uvm_kmguard kmem_guard;
static void *kmem_freecheck;
#define KMEM_POISON
#define KMEM_REDZONE
#define KMEM_SIZE
#define KMEM_GUARD
#endif /* defined(DEBUG) */
#if defined(KMEM_POISON)
@ -186,7 +191,15 @@ kmem_alloc(size_t size, km_flag_t kmflags)
uint8_t *p;
KASSERT(!cpu_intr_p());
KASSERT((curlwp->l_pflag & LP_INTR) == 0);
KASSERT(!cpu_softintr_p());
KASSERT(size > 0);
#ifdef KMEM_GUARD
if (size <= kmem_guard_size) {
return uvm_kmguard_alloc(&kmem_guard, size,
(kmflags & KM_SLEEP) != 0);
}
#endif
size += REDZONE_SIZE + SIZE_SIZE;
if (size >= kmem_cache_min && size <= kmem_cache_max) {
@ -239,12 +252,20 @@ kmem_free(void *p, size_t size)
kmem_cache_t *kc;
KASSERT(!cpu_intr_p());
KASSERT((curlwp->l_pflag & LP_INTR) == 0);
KASSERT(!cpu_softintr_p());
KASSERT(size > 0);
size += SIZE_SIZE;
p = (uint8_t *)p - SIZE_SIZE;
kmem_size_check(p, size + REDZONE_SIZE);
#ifdef KMEM_GUARD
if (size <= kmem_guard_size) {
uvm_kmguard_free(&kmem_guard, size, p);
return;
}
#endif
FREECHECK_IN(&kmem_freecheck, p);
LOCKDEBUG_MEM_CHECK(p, size);
kmem_poison_check((char *)p + size,
@ -268,6 +289,11 @@ kmem_init(void)
size_t sz;
int i;
#ifdef KMEM_GUARD
uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
kernel_map);
#endif
kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
kmem_backend_alloc, kmem_backend_free, NULL, KMEM_QCACHE_MAX,
VM_SLEEP, IPL_NONE);

View File

@ -1,4 +1,4 @@
# $NetBSD: files.uvm,v 1.12 2008/11/19 18:36:10 ad Exp $
# $NetBSD: files.uvm,v 1.13 2009/03/29 10:51:53 ad Exp $
#
# UVM options
@ -22,6 +22,7 @@ file uvm/uvm_glue.c
file uvm/uvm_init.c
file uvm/uvm_io.c
file uvm/uvm_km.c
file uvm/uvm_kmguard.c debug
file uvm/uvm_loan.c
file uvm/uvm_map.c
file uvm/uvm_meter.c

209
sys/uvm/uvm_kmguard.c Normal file
View File

@ -0,0 +1,209 @@
/* $NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* A simple memory allocator for debugging. It tries to catch:
*
* - Overflow, in realtime
* - Underflow, at free
* - Invalid pointer/size passed, at free
* - Use-after-free
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $");
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/pool.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
#include <uvm/uvm_kmguard.h>
#define CANARY(va, size) ((void *)((va) ^ 0x9deeba9 ^ (size)))
#define MAXSIZE (PAGE_SIZE - sizeof(void *))
void
uvm_kmguard_init(struct uvm_kmguard *kg, u_int *depth, size_t *size,
struct vm_map *map)
{
vaddr_t va;
/*
* if not enabled, we have nothing to do.
*/
if (*depth == 0) {
return;
}
*depth = roundup((*depth), PAGE_SIZE / sizeof(void *));
KASSERT(*depth != 0);
/*
* allocate fifo.
*/
va = uvm_km_alloc(kernel_map, *depth * sizeof(void *), PAGE_SIZE,
UVM_KMF_WIRED | UVM_KMF_ZERO);
if (va == 0) {
*depth = 0;
*size = 0;
} else {
*size = MAXSIZE;
}
/*
* init object.
*/
kg->kg_map = map;
kg->kg_fifo = (void *)va;
kg->kg_depth = *depth;
kg->kg_rotor = 0;
printf("uvm_kmguard(%p): depth %d\n", kg, *depth);
}
void *
uvm_kmguard_alloc(struct uvm_kmguard *kg, size_t len, bool waitok)
{
struct vm_page *pg;
void **p;
vaddr_t va;
int flag;
/*
* can't handle >PAGE_SIZE allocations. let the caller handle it
* normally.
*/
if (len > MAXSIZE) {
return NULL;
}
/*
* allocate two pages of kernel VA, but do not map anything in yet.
*/
if (waitok) {
flag = UVM_KMF_WAITVA;
} else {
flag = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
}
va = vm_map_min(kg->kg_map);
if (__predict_false(uvm_map(kg->kg_map, &va, PAGE_SIZE*2, NULL,
UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(UVM_PROT_ALL,
UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flag
| UVM_FLAG_QUANTUM)) != 0)) {
return NULL;
}
/*
* allocate a single page and map in at the start of the two page
* block.
*/
for (;;) {
pg = uvm_pagealloc(NULL, va - vm_map_min(kg->kg_map), NULL, 0);
if (__predict_true(pg != NULL)) {
break;
}
if (waitok) {
uvm_wait("kmguard"); /* sleep here */
continue;
} else {
uvm_km_free(kg->kg_map, va, PAGE_SIZE*2,
UVM_KMF_VAONLY);
return NULL;
}
}
pg->flags &= ~PG_BUSY; /* new page */
UVM_PAGE_OWN(pg, NULL);
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE | PMAP_KMPAGE);
pmap_update(pmap_kernel());
/*
* offset the returned pointer so that the unmapped guard page
* sits immediatley after the returned object.
*/
p = (void **)((va + PAGE_SIZE - len) & ~(uintptr_t)ALIGNBYTES);
p[-1] = CANARY(va, len);
return (void *)p;
}
bool
uvm_kmguard_free(struct uvm_kmguard *kg, size_t len, void *p)
{
vaddr_t va;
u_int rotor;
void **c;
if (len > MAXSIZE) {
return false;
}
/*
* first, check that everything is as it should be.
*/
va = trunc_page((vaddr_t)p);
c = (void **)((va + PAGE_SIZE - len) & ~(uintptr_t)ALIGNBYTES);
KASSERT(p == (void *)c);
KASSERT(c[-1] == CANARY(va, len));
KASSERT(pmap_extract(pmap_kernel(), va, NULL));
KASSERT(!pmap_extract(pmap_kernel(), va + PAGE_SIZE, NULL));
/*
* unmap and free the first page. the second page is never
* allocated .
*/
uvm_km_pgremove_intrsafe(kg->kg_map, va, va + PAGE_SIZE * 2);
pmap_kremove(va, PAGE_SIZE * 2);
pmap_update(pmap_kernel());
/*
* put the VA allocation into the list and swap an old one
* out to free. this behaves mostly like a fifo.
*/
rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
if (va != 0) {
uvm_km_free(kg->kg_map, va, PAGE_SIZE*2, UVM_KMF_VAONLY);
}
return true;
}

47
sys/uvm/uvm_kmguard.h Normal file
View File

@ -0,0 +1,47 @@
/* $NetBSD: uvm_kmguard.h,v 1.1 2009/03/29 10:51:53 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _UVM_KMGUARD_H_
#define _UVM_KMGUARD_H_
struct uvm_kmguard {
u_int kg_depth;
intptr_t *kg_fifo;
u_int kg_rotor;
struct vm_map *kg_map;
};
void uvm_kmguard_init(struct uvm_kmguard *, u_int *, size_t *,
struct vm_map *);
void *uvm_kmguard_alloc(struct uvm_kmguard *, size_t, bool);
bool uvm_kmguard_free(struct uvm_kmguard *, size_t, void *);
#endif /* _UVM_KMGUARD_H_ */