Bring down a fix from the "newlock" branch, slightly modified:

* In pool_prime_page(), assert that the object being placed onto the
  free list meets the alignment constraints (that "ioff" within the
  object is aligned to "align").
* In pool_init(), round up the object size to the alignment value (or
  ALIGN(1), if no special alignment is needed) so that the above invariant
  holds true.
This commit is contained in:
thorpej 2002-07-30 01:41:00 +00:00
parent 27174b9ec9
commit 4c9de392fd
1 changed files with 5 additions and 3 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_pool.c,v 1.77 2002/07/11 17:18:48 matt Exp $ */
/* $NetBSD: subr_pool.c,v 1.78 2002/07/30 01:41:00 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.77 2002/07/11 17:18:48 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.78 2002/07/30 01:41:00 thorpej Exp $");
#include "opt_pool.h"
#include "opt_poollog.h"
@ -415,7 +415,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
if (size < sizeof(struct pool_item))
size = sizeof(struct pool_item);
size = ALIGN(size);
size = roundup(size, align);
#ifdef DIAGNOSTIC
if (size > palloc->pa_pagesz)
panic("pool_init: pool item size (%lu) too large",
@ -1125,6 +1125,8 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
while (n--) {
pi = (struct pool_item *)cp;
KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
/* Insert on page list */
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
#ifdef DIAGNOSTIC