Use a pool cache for L1 PT pages. When we can allocate a cached,

constructed L1 PT page, this saves us from having to copy the kernel
L1 PTEs into the user L1 PT page at fork time (it's already set up).

A simple test shows a 1 second improvement of a rapid fork/exit operation
10000 times on a 533MHz 21164A (12s to 11s).
This commit is contained in:
thorpej 2000-12-07 05:59:07 +00:00
parent 7553470722
commit 5c7e0ada43
1 changed files with 105 additions and 36 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.151 2000/11/24 22:41:38 chs Exp $ */
/* $NetBSD: pmap.c,v 1.152 2000/12/07 05:59:07 thorpej Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
@ -156,7 +156,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.151 2000/11/24 22:41:38 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.152 2000/12/07 05:59:07 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -277,6 +277,8 @@ TAILQ_HEAD(, pmap) pmap_all_pmaps;
* The pools from which pmap structures and sub-structures are allocated.
*/
struct pool pmap_pmap_pool;
struct pool pmap_l1pt_pool;
struct pool_cache pmap_l1pt_cache;
struct pool pmap_asn_pool;
struct pool pmap_asngen_pool;
struct pool pmap_pv_pool;
@ -515,6 +517,11 @@ void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, long,
void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, long);
void pmap_l1pt_delref(pmap_t, pt_entry_t *, long);
void *pmap_l1pt_alloc(unsigned long, int, int);
void pmap_l1pt_free(void *, unsigned long, int);
int pmap_l1pt_ctor(void *, void *, int);
/*
* PV table management functions.
*/
@ -945,6 +952,10 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
pmap_ncpuids = ncpuids;
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
pool_init(&pmap_l1pt_pool, PAGE_SIZE, 0, 0, 0, "l1ptpl",
0, pmap_l1pt_alloc, pmap_l1pt_free, M_VMPMAP);
pool_cache_init(&pmap_l1pt_cache, &pmap_l1pt_pool, pmap_l1pt_ctor,
NULL, NULL);
pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0,
"pmasnpl",
0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
@ -3274,7 +3285,13 @@ pmap_physpage_alloc(int usage, paddr_t *pap)
struct pv_head *pvh;
paddr_t pa;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
/*
* Don't ask for a zero'd page in the L1PT case -- we will
* properly initialize it in the constructor.
*/
pg = uvm_pagealloc(NULL, 0, NULL, usage == PGU_L1PT ?
UVM_PGA_USERESERVE : UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (pg != NULL) {
pa = VM_PAGE_TO_PHYS(pg);
@ -3445,8 +3462,9 @@ pmap_growkernel(vaddr_t maxkvaddr)
l1idx = l1pte_index(va);
/* Update all the user pmaps. */
simple_lock(&pmap_all_pmaps_slock);
/* Update all the user pmaps. */
for (pm = TAILQ_FIRST(&pmap_all_pmaps);
pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) {
/* Skip the kernel pmap. */
@ -3461,6 +3479,10 @@ pmap_growkernel(vaddr_t maxkvaddr)
pm->pm_lev1map[l1idx] = pte;
PMAP_UNLOCK(pm);
}
/* Invalidate the L1 PT cache. */
pool_cache_invalidate(&pmap_l1pt_cache);
simple_unlock(&pmap_all_pmaps_slock);
}
@ -3504,9 +3526,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
int
pmap_lev1map_create(pmap_t pmap, long cpu_id)
{
paddr_t ptpa;
pt_entry_t pte;
int i;
pt_entry_t *l1pt;
#ifdef DIAGNOSTIC
if (pmap == pmap_kernel())
@ -3516,32 +3536,11 @@ pmap_lev1map_create(pmap_t pmap, long cpu_id)
panic("pmap_lev1map_create: pmap uses non-reserved ASN");
#endif
/*
* Allocate a page for the level 1 table.
*/
if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) {
/*
* Yow! No free pages! Try to steal a PT page from
* another pmap!
*/
if (pmap_ptpage_steal(pmap, PGU_L1PT, &ptpa) == FALSE)
return (KERN_RESOURCE_SHORTAGE);
}
pmap->pm_lev1map = (pt_entry_t *) ALPHA_PHYS_TO_K0SEG(ptpa);
l1pt = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
if (l1pt == NULL)
return (KERN_RESOURCE_SHORTAGE);
/*
* Initialize the new level 1 table by copying the
* kernel mappings into it.
*/
for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS);
i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++)
pmap->pm_lev1map[i] = kernel_lev1map[i];
/*
* Now, map the new virtual page table. NOTE: NO ASM!
*/
pte = ((ptpa >> PGSHIFT) << PG_SHIFT) | PG_V | PG_KRE | PG_KWE;
pmap->pm_lev1map[l1pte_index(VPTBASE)] = pte;
pmap->pm_lev1map = l1pt;
/*
* The page table base has changed; if the pmap was active,
@ -3564,15 +3563,13 @@ pmap_lev1map_create(pmap_t pmap, long cpu_id)
void
pmap_lev1map_destroy(pmap_t pmap, long cpu_id)
{
paddr_t ptpa;
pt_entry_t *l1pt = pmap->pm_lev1map;
#ifdef DIAGNOSTIC
if (pmap == pmap_kernel())
panic("pmap_lev1map_destroy: got kernel pmap");
#endif
ptpa = ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap->pm_lev1map);
/*
* Go back to referencing the global kernel_lev1map.
*/
@ -3603,7 +3600,79 @@ pmap_lev1map_destroy(pmap_t pmap, long cpu_id)
/*
* Free the old level 1 page table page.
*/
pmap_physpage_free(ptpa);
pool_cache_put(&pmap_l1pt_cache, l1pt);
}
/*
* pmap_l1pt_ctor:
*
* Pool cache constructor for L1 PT pages.
*/
int
pmap_l1pt_ctor(void *arg, void *object, int flags)
{
pt_entry_t *l1pt = object, pte;
int i;
/*
* Initialize the new level 1 table by zeroing the
* user portion and copying the kernel mappings into
* the kernel portion.
*/
for (i = 0; i < l1pte_index(VM_MIN_KERNEL_ADDRESS); i++)
l1pt[i] = 0;
for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS);
i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++)
l1pt[i] = kernel_lev1map[i];
/*
* Now, map the new virtual page table. NOTE: NO ASM!
*/
pte = ((ALPHA_K0SEG_TO_PHYS((vaddr_t) l1pt) >> PGSHIFT) << PG_SHIFT) |
PG_V | PG_KRE | PG_KWE;
l1pt[l1pte_index(VPTBASE)] = pte;
return (0);
}
/*
* pmap_l1pt_alloc:
*
* Page alloctor for L1 PT pages.
*/
void *
pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
{
paddr_t ptpa;
/*
* Attempt to allocate a free page.
*/
if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) {
#if 0
/*
* Yow! No free pages! Try to steal a PT page from
* another pmap!
*/
if (pmap_ptpage_steal(pmap, PGU_L1PT, &ptpa) == FALSE)
#endif
return (NULL);
}
return ((void *) ALPHA_PHYS_TO_K0SEG(ptpa));
}
/*
* pmap_l1pt_free:
*
* Page freer for L1 PT pages.
*/
void
pmap_l1pt_free(void *v, unsigned long sz, int mtype)
{
pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v));
}
/*