Add support for detecting use-after-frees in KASAN. We poison each freed

buffer, any subsequent read or write will be detected as illegal.

 * Add POOL_CHECK_MAGIC, which is disabled under KASAN, because the same
   detection is done in a better way.

 * Register the size+redzone in the pool structure, to reduce the overhead.

 * Fix the CTOR/DTOR check in KLEAK, the fields are never NULL.
This commit is contained in:
maxv 2018-12-16 21:03:35 +00:00
parent 931705a8c6
commit 7347d2a8bd
2 changed files with 65 additions and 22 deletions

View File

@ -1,7 +1,7 @@
/* $NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $ */
/* $NetBSD: subr_pool.c,v 1.229 2018/12/16 21:03:35 maxv Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
/*
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018
* The NetBSD Foundation, Inc.
* All rights reserved.
*
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.228 2018/12/02 21:00:13 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.229 2018/12/16 21:03:35 maxv Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -100,10 +100,12 @@ static struct pool psppool;
static void pool_redzone_init(struct pool *, size_t);
static void pool_redzone_fill(struct pool *, void *);
static void pool_redzone_check(struct pool *, void *);
static void pool_cache_redzone_check(pool_cache_t, void *);
#else
# define pool_redzone_init(pp, sz) /* NOTHING */
# define pool_redzone_fill(pp, ptr) /* NOTHING */
# define pool_redzone_check(pp, ptr) /* NOTHING */
# define pool_redzone_init(pp, sz) __nothing
# define pool_redzone_fill(pp, ptr) __nothing
# define pool_redzone_check(pp, ptr) __nothing
# define pool_cache_redzone_check(pc, ptr) __nothing
#endif
#ifdef KLEAK
@ -114,6 +116,11 @@ static void pool_cache_kleak_fill(pool_cache_t, void *);
#define pool_cache_kleak_fill(pc, ptr) __nothing
#endif
#define pc_has_ctor(pc) \
(pc->pc_ctor != (int (*)(void *, void *, int))nullop)
#define pc_has_dtor(pc) \
(pc->pc_dtor != (void (*)(void *, void *))nullop)
static void *pool_page_alloc_meta(struct pool *, int);
static void pool_page_free_meta(struct pool *, void *);
@ -170,8 +177,12 @@ struct pool_item_header {
#define ph_itemlist ph_u.phu_normal.phu_itemlist
#define ph_bitmap ph_u.phu_notouch.phu_bitmap
#if defined(DIAGNOSTIC) && !defined(KASAN)
#define POOL_CHECK_MAGIC
#endif
struct pool_item {
#ifdef DIAGNOSTIC
#ifdef POOL_CHECK_MAGIC
u_int pi_magic;
#endif
#define PI_MAGIC 0xdeaddeadU
@ -890,10 +901,12 @@ pool_get(struct pool *pp, int flags)
KASSERTMSG((pp->pr_nitems > 0),
"%s: [%s] nitems %u inconsistent on itemlist",
__func__, pp->pr_wchan, pp->pr_nitems);
#ifdef POOL_CHECK_MAGIC
KASSERTMSG((pi->pi_magic == PI_MAGIC),
"%s: [%s] free list modified: "
"magic=%x; page %p; item addr %p", __func__,
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
#endif
/*
* Remove from item list.
@ -977,7 +990,7 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
if (pp->pr_roflags & PR_NOTOUCH) {
pr_item_notouch_put(pp, ph, v);
} else {
#ifdef DIAGNOSTIC
#ifdef POOL_CHECK_MAGIC
pi->pi_magic = PI_MAGIC;
#endif
#ifdef DEBUG
@ -989,6 +1002,13 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
}
}
#endif
if (pp->pr_redzone) {
/*
* Mark the pool_item as valid. The rest is already
* invalid.
*/
kasan_alloc(pi, sizeof(*pi), sizeof(*pi));
}
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
}
@ -1237,7 +1257,7 @@ pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
/* Insert on page list */
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
#ifdef DIAGNOSTIC
#ifdef POOL_CHECK_MAGIC
pi->pi_magic = PI_MAGIC;
#endif
cp = (char *)cp + pp->pr_size;
@ -1540,12 +1560,12 @@ pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
void (*pr)(const char *, ...))
{
struct pool_item_header *ph;
struct pool_item *pi __diagused;
LIST_FOREACH(ph, pl, ph_pagelist) {
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",
ph->ph_page, ph->ph_nmissing, ph->ph_time);
#ifdef DIAGNOSTIC
#ifdef POOL_CHECK_MAGIC
struct pool_item *pi;
if (!(pp->pr_roflags & PR_NOTOUCH)) {
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
if (pi->pi_magic != PI_MAGIC) {
@ -1700,7 +1720,7 @@ pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
pi != NULL;
pi = LIST_NEXT(pi,pi_list), n++) {
#ifdef DIAGNOSTIC
#ifdef POOL_CHECK_MAGIC
if (pi->pi_magic != PI_MAGIC) {
if (label != NULL)
printf("%s: ", label);
@ -2006,6 +2026,15 @@ pool_cache_reclaim(pool_cache_t pc)
static void
pool_cache_destruct_object1(pool_cache_t pc, void *object)
{
if (pc->pc_pool.pr_redzone) {
/*
* The object is marked as invalid. Temporarily mark it as
* valid for the destructor. pool_put below will re-mark it
* as invalid.
*/
kasan_alloc(object, pc->pc_pool.pr_reqsize,
pc->pc_pool.pr_reqsize_with_redzone);
}
(*pc->pc_dtor)(pc->pc_arg, object);
pool_put(&pc->pc_pool, object);
@ -2463,7 +2492,7 @@ pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
int s;
KASSERT(object != NULL);
pool_redzone_check(&pc->pc_pool, object);
pool_cache_redzone_check(pc, object);
FREECHECK_IN(&pc->pc_freecheck, object);
/* Lock out interrupts and disable preemption. */
@ -2697,6 +2726,9 @@ pool_allocator_free(struct pool *pp, void *v)
{
struct pool_allocator *pa = pp->pr_alloc;
if (pp->pr_redzone) {
kasan_alloc(v, pa->pa_pagesz, pa->pa_pagesz);
}
(*pa->pa_free)(pp, v);
}
@ -2753,7 +2785,7 @@ pool_kleak_fill(struct pool *pp, void *p)
static void
pool_cache_kleak_fill(pool_cache_t pc, void *p)
{
if (__predict_false(pc->pc_ctor != NULL || pc->pc_dtor != NULL)) {
if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
return;
}
pool_kleak_fill(&pc->pc_pool, p);
@ -2804,6 +2836,7 @@ pool_redzone_init(struct pool *pp, size_t requested_size)
*/
if (pp->pr_size - requested_size >= redzsz) {
pp->pr_reqsize = requested_size;
pp->pr_reqsize_with_redzone = requested_size + redzsz;
pp->pr_redzone = true;
return;
}
@ -2817,6 +2850,7 @@ pool_redzone_init(struct pool *pp, size_t requested_size)
/* Ok, we can */
pp->pr_size = nsz;
pp->pr_reqsize = requested_size;
pp->pr_reqsize_with_redzone = requested_size + redzsz;
pp->pr_redzone = true;
} else {
/* No space for a red zone... snif :'( */
@ -2832,9 +2866,7 @@ pool_redzone_fill(struct pool *pp, void *p)
if (!pp->pr_redzone)
return;
#ifdef KASAN
size_t size_with_redzone = pp->pr_reqsize;
kasan_add_redzone(&size_with_redzone);
kasan_alloc(p, pp->pr_reqsize, size_with_redzone);
kasan_alloc(p, pp->pr_reqsize, pp->pr_reqsize_with_redzone);
#else
uint8_t *cp, pat;
const uint8_t *ep;
@ -2863,9 +2895,7 @@ pool_redzone_check(struct pool *pp, void *p)
if (!pp->pr_redzone)
return;
#ifdef KASAN
size_t size_with_redzone = pp->pr_reqsize;
kasan_add_redzone(&size_with_redzone);
kasan_free(p, size_with_redzone);
kasan_alloc(p, 0, pp->pr_reqsize_with_redzone);
#else
uint8_t *cp, pat, expected;
const uint8_t *ep;
@ -2892,6 +2922,18 @@ pool_redzone_check(struct pool *pp, void *p)
#endif
}
static void
pool_cache_redzone_check(pool_cache_t pc, void *p)
{
#ifdef KASAN
/* If there is a ctor/dtor, leave the data as valid. */
if (__predict_false(pc_has_ctor(pc) || pc_has_dtor(pc))) {
return;
}
#endif
pool_redzone_check(&pc->pc_pool, p);
}
#endif /* POOL_REDZONE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pool.h,v 1.82 2017/12/16 03:13:29 mrg Exp $ */
/* $NetBSD: pool.h,v 1.83 2018/12/16 21:03:35 maxv Exp $ */
/*-
* Copyright (c) 1997, 1998, 1999, 2000, 2007 The NetBSD Foundation, Inc.
@ -195,6 +195,7 @@ struct pool {
void *pr_qcache;
bool pr_redzone;
size_t pr_reqsize;
size_t pr_reqsize_with_redzone;
};
/*