Introduce POOL_REDZONE.

This commit is contained in:
maxv 2015-07-28 12:32:44 +00:00
parent a0398aa0e8
commit 40e89e3a3c
2 changed files with 151 additions and 12 deletions

View File

@ -1,13 +1,14 @@
/* $NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $ */
/* $NetBSD: subr_pool.c,v 1.204 2015/07/28 12:32:44 maxv Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
* The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center, and by Andrew Doran.
* Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
* Maxime Villard.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -32,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.203 2014/06/13 19:09:07 joerg Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.204 2015/07/28 12:32:44 maxv Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -82,6 +83,17 @@ static struct pool phpool[PHPOOL_MAX];
static struct pool psppool;
#endif
#ifdef POOL_REDZONE
# define POOL_REDZONE_SIZE 2
static void pool_redzone_init(struct pool *, size_t);
static void pool_redzone_fill(struct pool *, void *);
static void pool_redzone_check(struct pool *, void *);
#else
# define pool_redzone_init(pp, sz) /* NOTHING */
# define pool_redzone_fill(pp, ptr) /* NOTHING */
# define pool_redzone_check(pp, ptr) /* NOTHING */
#endif
static void *pool_page_alloc_meta(struct pool *, int);
static void pool_page_free_meta(struct pool *, void *);
@ -459,7 +471,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
const char *wchan, struct pool_allocator *palloc, int ipl)
{
struct pool *pp1;
size_t trysize, phsize;
size_t trysize, phsize, prsize;
int off, slack;
#ifdef DEBUG
@ -506,13 +518,14 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
if (align == 0)
align = ALIGN(1);
if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
size = sizeof(struct pool_item);
prsize = size;
if ((flags & PR_NOTOUCH) == 0 && prsize < sizeof(struct pool_item))
prsize = sizeof(struct pool_item);
size = roundup(size, align);
prsize = roundup(prsize, align);
#ifdef DIAGNOSTIC
if (size > palloc->pa_pagesz)
panic("pool_init: pool item size (%zu) too large", size);
if (prsize > palloc->pa_pagesz)
panic("pool_init: pool item size (%zu) too large", prsize);
#endif
/*
@ -529,7 +542,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_maxpages = UINT_MAX;
pp->pr_roflags = flags;
pp->pr_flags = 0;
pp->pr_size = size;
pp->pr_size = prsize;
pp->pr_align = align;
pp->pr_wchan = wchan;
pp->pr_alloc = palloc;
@ -544,6 +557,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
pp->pr_drain_hook = NULL;
pp->pr_drain_hook_arg = NULL;
pp->pr_freecheck = NULL;
pool_redzone_init(pp, size);
/*
* Decide whether to put the page header off page to avoid
@ -935,6 +949,7 @@ pool_get(struct pool *pp, int flags)
mutex_exit(&pp->pr_lock);
KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
FREECHECK_OUT(&pp->pr_freecheck, v);
pool_redzone_fill(pp, v);
return (v);
}
@ -948,6 +963,7 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
struct pool_item_header *ph;
KASSERT(mutex_owned(&pp->pr_lock));
pool_redzone_check(pp, v);
FREECHECK_IN(&pp->pr_freecheck, v);
LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
@ -2188,6 +2204,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp,
}
FREECHECK_OUT(&pc->pc_freecheck, object);
pool_redzone_fill(&pc->pc_pool, object);
return false;
}
@ -2233,6 +2250,7 @@ pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap)
cc->cc_hits++;
splx(s);
FREECHECK_OUT(&pc->pc_freecheck, object);
pool_redzone_fill(&pc->pc_pool, object);
return object;
}
@ -2376,6 +2394,7 @@ pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa)
int s;
KASSERT(object != NULL);
pool_redzone_check(&pc->pc_pool, object);
FREECHECK_IN(&pc->pc_freecheck, object);
/* Lock out interrupts and disable preemption. */
@ -2597,6 +2616,120 @@ pool_page_free_meta(struct pool *pp, void *v)
vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
}
#ifdef POOL_REDZONE
#if defined(_LP64)
# define PRIME 0x9e37fffffffc0000UL
#else /* defined(_LP64) */
# define PRIME 0x9e3779b1
#endif /* defined(_LP64) */
#define STATIC_BYTE 0xFE
CTASSERT(POOL_REDZONE_SIZE > 1);
static inline uint8_t
pool_pattern_generate(const void *p)
{
return (uint8_t)(((uintptr_t)p) * PRIME
>> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
}
static void
pool_redzone_init(struct pool *pp, size_t requested_size)
{
size_t nsz;
if (pp->pr_roflags & PR_NOTOUCH) {
pp->pr_reqsize = 0;
pp->pr_redzone = false;
return;
}
/*
* We may have extended the requested size earlier; check if
* there's naturally space in the padding for a red zone.
*/
if (pp->pr_size - requested_size >= POOL_REDZONE_SIZE) {
pp->pr_reqsize = requested_size;
pp->pr_redzone = true;
return;
}
/*
* No space in the natural padding; check if we can extend a
* bit the size of the pool.
*/
nsz = roundup(pp->pr_size + POOL_REDZONE_SIZE, pp->pr_align);
if (nsz <= pp->pr_alloc->pa_pagesz) {
/* Ok, we can */
pp->pr_size = nsz;
pp->pr_reqsize = requested_size;
pp->pr_redzone = true;
} else {
/* No space for a red zone... snif :'( */
pp->pr_reqsize = 0;
pp->pr_redzone = false;
printf("pool redzone disabled for '%s'\n", pp->pr_wchan);
}
}
static void
pool_redzone_fill(struct pool *pp, void *p)
{
uint8_t *cp, pat;
const uint8_t *ep;
if (!pp->pr_redzone)
return;
cp = (uint8_t *)p + pp->pr_reqsize;
ep = cp + POOL_REDZONE_SIZE;
/*
* We really don't want the first byte of the red zone to be '\0';
* an off-by-one in a string may not be properly detected.
*/
pat = pool_pattern_generate(cp);
*cp = (pat == '\0') ? STATIC_BYTE: pat;
cp++;
while (cp < ep) {
*cp = pool_pattern_generate(cp);
cp++;
}
}
static void
pool_redzone_check(struct pool *pp, void *p)
{
uint8_t *cp, pat, expected;
const uint8_t *ep;
if (!pp->pr_redzone)
return;
cp = (uint8_t *)p + pp->pr_reqsize;
ep = cp + POOL_REDZONE_SIZE;
pat = pool_pattern_generate(cp);
expected = (pat == '\0') ? STATIC_BYTE: pat;
if (expected != *cp) {
panic("%s: %p: 0x%02x != 0x%02x\n",
__func__, cp, *cp, expected);
}
cp++;
while (cp < ep) {
expected = pool_pattern_generate(cp);
if (*cp != expected) {
panic("%s: %p: 0x%02x != 0x%02x\n",
__func__, cp, *cp, expected);
}
cp++;
}
}
#endif /* POOL_REDZONE */
#ifdef POOL_SUBPAGE
/* Sub-page allocator, for machines with large hardware pages. */
void *

View File

@ -1,4 +1,4 @@
/* $NetBSD: pool.h,v 1.77 2014/09/05 05:42:50 matt Exp $ */
/* $NetBSD: pool.h,v 1.78 2015/07/28 12:32:44 maxv Exp $ */
/*-
* Copyright (c) 1997, 1998, 1999, 2000, 2007 The NetBSD Foundation, Inc.
@ -189,6 +189,8 @@ struct pool {
*/
void *pr_freecheck;
void *pr_qcache;
bool pr_redzone;
size_t pr_reqsize;
};
/*
@ -253,7 +255,11 @@ struct pool_cache {
unsigned int pc_nfull; /* full groups in cache */
unsigned int pc_npart; /* partial groups in cache */
unsigned int pc_refcnt; /* ref count for pagedaemon, etc */
/* Diagnostic aides. */
void *pc_freecheck;
bool pc_redzone;
size_t pc_reqsize;
/* CPU layer. */
pool_cache_cpu_t pc_cpu0 __aligned(CACHE_LINE_SIZE);