in the case of !PMAP_MAP_POOLPAGE, gather pool backend allocations to

large chunks for kernel_map and kmem_map to ease kva fragmentation.
This commit is contained in:
yamt 2005-01-01 21:08:02 +00:00
parent aa64686bba
commit a880e5e2b5
5 changed files with 262 additions and 47 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_malloc.c,v 1.92 2005/01/01 21:02:13 yamt Exp $ */
/* $NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $ */
/*
* Copyright (c) 1987, 1991, 1993
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.92 2005/01/01 21:02:13 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $");
#include "opt_lockdebug.h"
@ -857,6 +857,7 @@ kmeminit(void)
kmem_map = uvm_km_suballoc(kernel_map, &kmb,
&kml, (vsize_t)(nkmempages << PAGE_SHIFT),
VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
uvm_km_vacache_init(kmem_map, "kvakmem", 0);
kmembase = (char *)kmb;
kmemlimit = (char *)kml;
#ifdef KMEMSTATS

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $ */
/* $NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $");
#include "opt_pool.h"
#include "opt_poollog.h"
@ -82,6 +82,14 @@ static struct pool phpool[PHPOOL_MAX];
static struct pool psppool;
#endif
static void *pool_page_alloc_meta(struct pool *, int);
static void pool_page_free_meta(struct pool *, void *);
/* allocator for pool metadata */
static struct pool_allocator pool_allocator_meta = {
pool_page_alloc_meta, pool_page_free_meta
};
/* # of seconds to retain page after last use */
int pool_inactive_time = 10;
@ -634,13 +642,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
* XXX LOCKING.
*/
if (phpool[0].pr_size == 0) {
struct pool_allocator *pa;
int idx;
#ifdef POOL_SUBPAGE
pa = &pool_allocator_kmem;
#else
pa = NULL;
#endif
for (idx = 0; idx < PHPOOL_MAX; idx++) {
static char phpool_names[PHPOOL_MAX][6+1+6+1];
int nelem;
@ -655,14 +657,14 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
+ nelem * sizeof(uint16_t);
}
pool_init(&phpool[idx], sz, 0, 0, 0,
phpool_names[idx], pa);
phpool_names[idx], &pool_allocator_meta);
}
#ifdef POOL_SUBPAGE
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
PR_RECURSIVE, "psppool", &pool_allocator_kmem);
PR_RECURSIVE, "psppool", &pool_allocator_meta);
#endif
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
0, "pcgpool", NULL);
0, "pcgpool", &pool_allocator_meta);
}
/* Insert into the list of all pools. */
@ -2240,14 +2242,29 @@ pool_page_alloc(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
return ((void *) uvm_km_alloc_poolpage(waitok));
return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
}
void
pool_page_free(struct pool *pp, void *v)
{
uvm_km_free_poolpage((vaddr_t) v);
uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
}
static void *
pool_page_alloc_meta(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
}
static void
pool_page_free_meta(struct pool *pp, void *v)
{
uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
}
#ifdef POOL_SUBPAGE
@ -2292,7 +2309,7 @@ pool_page_alloc_nointr(struct pool *pp, int flags)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
return ((void *) uvm_km_alloc_poolpage1(kernel_map,
return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
uvm.kernel_object, waitok));
}
@ -2300,6 +2317,6 @@ void
pool_page_free_nointr(struct pool *pp, void *v)
{
uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
}
#endif /* POOL_SUBPAGE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.95 2005/01/01 21:02:13 yamt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.96 2005/01/01 21:08:02 yamt Exp $ */
/*
*
@ -609,6 +609,11 @@ vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *, vsize_t,
vaddr_t uvm_km_alloc_poolpage1(struct vm_map *,
struct uvm_object *, boolean_t);
void uvm_km_free_poolpage1(struct vm_map *, vaddr_t);
vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *,
struct uvm_object *, boolean_t);
void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t);
void uvm_km_vacache_init(struct vm_map *,
const char *, size_t);
extern __inline__ vaddr_t
uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t sz, int flags)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $ */
/* $NetBSD: uvm_km.c,v 1.72 2005/01/01 21:08:02 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -134,7 +134,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.72 2005/01/01 21:08:02 yamt Exp $");
#include "opt_uvmhist.h"
@ -142,6 +142,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $");
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/pool.h>
#include <uvm/uvm.h>
@ -158,6 +159,120 @@ struct vm_map *kernel_map = NULL;
static struct vm_map_kernel kernel_map_store;
static struct vm_map_entry kernel_first_mapent_store;
#if !defined(PMAP_MAP_POOLPAGE)
/*
* kva cache
*
* XXX maybe it's better to do this at the uvm_map layer.
*/
#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
static void *km_vacache_alloc(struct pool *, int);
static void km_vacache_free(struct pool *, void *);
static void km_vacache_init(struct vm_map *, const char *, size_t);
/* XXX */
#define KM_VACACHE_POOL_TO_MAP(pp) \
((struct vm_map *)((char *)(pp) - \
offsetof(struct vm_map_kernel, vmk_vacache)))
static void *
km_vacache_alloc(struct pool *pp, int flags)
{
vaddr_t va;
size_t size;
struct vm_map *map;
#if defined(DEBUG)
vaddr_t loopva;
#endif
size = pp->pr_alloc->pa_pagesz;
map = KM_VACACHE_POOL_TO_MAP(pp);
if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
((flags & PR_WAITOK) ? 0 : UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
return NULL;
#if defined(DEBUG)
for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
if (pmap_extract(pmap_kernel(), loopva, NULL))
panic("km_vacache_free: has mapping");
}
#endif
return (void *)va;
}
static void
km_vacache_free(struct pool *pp, void *v)
{
vaddr_t va = (vaddr_t)v;
size_t size = pp->pr_alloc->pa_pagesz;
struct vm_map *map;
#if defined(DEBUG)
vaddr_t loopva;
for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
if (pmap_extract(pmap_kernel(), loopva, NULL))
panic("km_vacache_free: has mapping");
}
#endif
map = KM_VACACHE_POOL_TO_MAP(pp);
uvm_unmap(map, va, va + size);
}
/*
* km_vacache_init: initialize kva cache.
*/
static void
km_vacache_init(struct vm_map *map, const char *name, size_t size)
{
struct vm_map_kernel *vmk;
struct pool *pp;
struct pool_allocator *pa;
KASSERT(VM_MAP_IS_KERNEL(map));
KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
vmk = vm_map_to_kernel(map);
pp = &vmk->vmk_vacache;
pa = &vmk->vmk_vacache_allocator;
memset(pa, 0, sizeof(*pa));
pa->pa_alloc = km_vacache_alloc;
pa->pa_free = km_vacache_free;
pa->pa_pagesz = (unsigned int)size;
pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
/* XXX for now.. */
pool_sethiwat(pp, 0);
}
void
uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
{
map->flags |= VM_MAP_VACACHE;
if (size == 0)
size = KM_VACACHE_SIZE;
km_vacache_init(map, name, size);
}
#else /* !defined(PMAP_MAP_POOLPAGE) */
void
uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
{
/* nothing */
}
#endif /* !defined(PMAP_MAP_POOLPAGE) */
/*
* uvm_km_init: init kernel maps and objects to reflect reality (i.e.
* KVM already allocated for text, data, bss, and static data structures).
@ -215,6 +330,7 @@ uvm_km_init(start, end)
*/
kernel_map = &kernel_map_store.vmk_map;
uvm_km_vacache_init(kernel_map, "kvakernel", 0);
}
/*
@ -720,6 +836,55 @@ vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz)
*/
/* ARGSUSED */
vaddr_t
uvm_km_alloc_poolpage_cache(map, obj, waitok)
struct vm_map *map;
struct uvm_object *obj;
boolean_t waitok;
{
#if defined(PMAP_MAP_POOLPAGE)
return uvm_km_alloc_poolpage1(map, obj, waitok);
#else
struct vm_page *pg;
struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
vaddr_t va;
int s = 0xdeadbeaf; /* XXX: gcc */
const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
if ((map->flags & VM_MAP_VACACHE) == 0)
return uvm_km_alloc_poolpage1(map, obj, waitok);
if (intrsafe)
s = splvm();
va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
if (intrsafe)
splx(s);
if (va == 0)
return 0;
KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
again:
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
goto again;
} else {
if (intrsafe)
s = splvm();
pool_put(pp, (void *)va);
if (intrsafe)
splx(s);
return 0;
}
}
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ|VM_PROT_WRITE);
pmap_update(pmap_kernel());
return va;
#endif /* PMAP_MAP_POOLPAGE */
}
vaddr_t
uvm_km_alloc_poolpage1(map, obj, waitok)
struct vm_map *map;
@ -745,22 +910,15 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
return (va);
#else
vaddr_t va;
int s;
int s = 0xdeadbeaf; /* XXX: gcc */
const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
/*
* NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
* go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
s = splvm();
if (intrsafe)
s = splvm();
va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
splx(s);
if (intrsafe)
splx(s);
return (va);
#endif /* PMAP_MAP_POOLPAGE */
}
@ -771,6 +929,40 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
* => if the pmap specifies an alternate unmapping method, we use it.
*/
/* ARGSUSED */
void
uvm_km_free_poolpage_cache(map, addr)
struct vm_map *map;
vaddr_t addr;
{
#if defined(PMAP_UNMAP_POOLPAGE)
uvm_km_free_poolpage1(map, addr);
#else
struct pool *pp;
int s = 0xdeadbeaf; /* XXX: gcc */
const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
if ((map->flags & VM_MAP_VACACHE) == 0) {
uvm_km_free_poolpage1(map, addr);
return;
}
KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
pmap_kremove(addr, PAGE_SIZE);
#if defined(DEBUG)
pmap_update(pmap_kernel());
#endif
KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
pp = &vm_map_to_kernel(map)->vmk_vacache;
if (intrsafe)
s = splvm();
pool_put(pp, (void *)addr);
if (intrsafe)
splx(s);
#endif
}
/* ARGSUSED */
void
uvm_km_free_poolpage1(map, addr)
@ -783,20 +975,13 @@ uvm_km_free_poolpage1(map, addr)
pa = PMAP_UNMAP_POOLPAGE(addr);
uvm_pagefree(PHYS_TO_VM_PAGE(pa));
#else
int s;
int s = 0xdeadbeaf; /* XXX: gcc */
const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
/*
* NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
* go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
s = splvm();
if (intrsafe)
s = splvm();
uvm_km_free(map, addr, PAGE_SIZE);
splx(s);
if (intrsafe)
splx(s);
#endif /* PMAP_UNMAP_POOLPAGE */
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.h,v 1.41 2005/01/01 21:02:14 yamt Exp $ */
/* $NetBSD: uvm_map.h,v 1.42 2005/01/01 21:08:02 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -110,6 +110,7 @@
#endif /* _KERNEL */
#include <sys/tree.h>
#include <sys/pool.h>
#include <uvm/uvm_anon.h>
@ -241,6 +242,11 @@ struct vm_map_kernel {
/* Freelist of map entry */
struct vm_map_entry *vmk_merged_entries;
/* Merged entries, kept for later splitting */
#if !defined(PMAP_MAP_POOLPAGE)
struct pool vmk_vacache; /* kva cache */
struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
#endif
};
#endif /* defined(_KERNEL) */
@ -254,6 +260,7 @@ struct vm_map_kernel {
#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
#define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
#define VM_MAP_VACACHE 0x80 /* ro: use kva cache */
#ifdef _KERNEL
struct uvm_mapent_reservation {