remove a special allocator for uareas, which is no longer necessary.

use pool_cache instead.
This commit is contained in:
yamt 2008-01-28 12:22:46 +00:00
parent cc7f05eb50
commit 52838e34f5
7 changed files with 85 additions and 121 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exit.c,v 1.198 2008/01/02 11:48:49 ad Exp $ */
/* $NetBSD: kern_exit.c,v 1.199 2008/01/28 12:22:46 yamt Exp $ */
/*-
* Copyright (c) 1998, 1999, 2006, 2007 The NetBSD Foundation, Inc.
@ -74,7 +74,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.198 2008/01/02 11:48:49 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.199 2008/01/28 12:22:46 yamt Exp $");
#include "opt_ktrace.h"
#include "opt_perfctrs.h"
@ -308,9 +308,6 @@ exit1(struct lwp *l, int rv)
if (p->p_emul->e_proc_exit)
(*p->p_emul->e_proc_exit)(p);
/* Collect child u-areas. */
uvm_uarea_drain(false);
/*
* Free the VM resources we're still holding on to.
* We must do this from a valid thread because doing
@ -1001,11 +998,6 @@ proc_free(struct proc *p, struct rusage *ru)
rw_destroy(&p->p_reflock);
proc_free_mem(p);
/*
* Collect child u-areas.
*/
uvm_uarea_drain(false);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu_data.h,v 1.15 2008/01/14 12:40:02 yamt Exp $ */
/* $NetBSD: cpu_data.h,v 1.16 2008/01/28 12:22:46 yamt Exp $ */
/*-
* Copyright (c) 2004, 2006, 2007 The NetBSD Foundation, Inc.
@ -88,9 +88,6 @@ struct cpu_data {
void *cpu_softcpu; /* soft interrupt table */
TAILQ_HEAD(,buf) cpu_biodone; /* finished block xfers */
u_int cpu_softints; /* pending (slow) softints */
kmutex_t cpu_uarea_lock; /* uarea alloc lock */
u_int cpu_uarea_cnt; /* count of free uareas */
vaddr_t cpu_uarea_list; /* free uareas */
percpu_cpu_t cpu_percpu; /* per-cpu data */
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.143 2008/01/02 11:49:16 ad Exp $ */
/* $NetBSD: uvm_extern.h,v 1.144 2008/01/28 12:22:47 yamt Exp $ */
/*
*
@ -590,7 +590,6 @@ void uvm_kick_scheduler(void);
void uvm_swapin(struct lwp *);
bool uvm_uarea_alloc(vaddr_t *);
void uvm_uarea_free(vaddr_t, struct cpu_info *);
void uvm_uarea_drain(bool);
int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t);
void uvm_vsunlock(struct vmspace *, void *, size_t);
void uvm_lwp_hold(struct lwp *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.114 2008/01/02 11:49:16 ad Exp $ */
/* $NetBSD: uvm_glue.c,v 1.115 2008/01/28 12:22:47 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.114 2008/01/02 11:49:16 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.115 2008/01/28 12:22:47 yamt Exp $");
#include "opt_coredump.h"
#include "opt_kgdb.h"
@ -285,9 +285,71 @@ void
uvm_cpu_attach(struct cpu_info *ci)
{
mutex_init(&ci->ci_data.cpu_uarea_lock, MUTEX_DEFAULT, IPL_NONE);
ci->ci_data.cpu_uarea_cnt = 0;
ci->ci_data.cpu_uarea_list = 0;
}
static int
uarea_swapin(vaddr_t addr)
{
return uvm_fault_wire(kernel_map, addr, addr + USPACE,
VM_PROT_READ | VM_PROT_WRITE, 0);
}
static void
uarea_swapout(vaddr_t addr)
{
uvm_fault_unwire(kernel_map, addr, addr + USPACE);
}
#ifndef USPACE_ALIGN
#define USPACE_ALIGN 0
#endif
static pool_cache_t uvm_uarea_cache;
static int
uarea_ctor(void *arg, void *obj, int flags)
{
KASSERT((flags & PR_WAITOK) != 0);
return uarea_swapin((vaddr_t)obj);
}
static void *
uarea_poolpage_alloc(struct pool *pp, int flags)
{
return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
USPACE_ALIGN, UVM_KMF_PAGEABLE |
((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA :
(UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
}
static void
uarea_poolpage_free(struct pool *pp, void *addr)
{
uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
UVM_KMF_PAGEABLE);
}
static struct pool_allocator uvm_uarea_allocator = {
.pa_alloc = uarea_poolpage_alloc,
.pa_free = uarea_poolpage_free,
.pa_pagesz = USPACE,
};
void
uvm_uarea_init(void)
{
uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0,
#if USPACE_ALIGN == 0
PR_NOALIGN |
#endif
PR_NOTOUCH,
"uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL);
}
/*
@ -297,32 +359,9 @@ uvm_cpu_attach(struct cpu_info *ci)
bool
uvm_uarea_alloc(vaddr_t *uaddrp)
{
struct cpu_info *ci;
vaddr_t uaddr;
#ifndef USPACE_ALIGN
#define USPACE_ALIGN 0
#endif
ci = curcpu();
if (ci->ci_data.cpu_uarea_cnt > 0) {
mutex_enter(&ci->ci_data.cpu_uarea_lock);
if (ci->ci_data.cpu_uarea_cnt == 0) {
mutex_exit(&ci->ci_data.cpu_uarea_lock);
} else {
uaddr = ci->ci_data.cpu_uarea_list;
ci->ci_data.cpu_uarea_list = UAREA_NEXTFREE(uaddr);
ci->ci_data.cpu_uarea_cnt--;
mutex_exit(&ci->ci_data.cpu_uarea_lock);
*uaddrp = uaddr;
return true;
}
}
*uaddrp = uvm_km_alloc(kernel_map, USPACE, USPACE_ALIGN,
UVM_KMF_PAGEABLE);
return false;
*uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
return true;
}
/*
@ -333,63 +372,7 @@ void
uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci)
{
mutex_enter(&ci->ci_data.cpu_uarea_lock);
UAREA_NEXTFREE(uaddr) = ci->ci_data.cpu_uarea_list;
ci->ci_data.cpu_uarea_list = uaddr;
ci->ci_data.cpu_uarea_cnt++;
mutex_exit(&ci->ci_data.cpu_uarea_lock);
}
/*
* uvm_uarea_drain: return memory of u-areas over limit
* back to system
*
* => if asked to drain as much as possible, drain all cpus.
* => if asked to drain to low water mark, drain local cpu only.
*/
void
uvm_uarea_drain(bool empty)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
vaddr_t uaddr, nuaddr;
int count;
if (empty) {
for (CPU_INFO_FOREACH(cii, ci)) {
mutex_enter(&ci->ci_data.cpu_uarea_lock);
count = ci->ci_data.cpu_uarea_cnt;
uaddr = ci->ci_data.cpu_uarea_list;
ci->ci_data.cpu_uarea_cnt = 0;
ci->ci_data.cpu_uarea_list = 0;
mutex_exit(&ci->ci_data.cpu_uarea_lock);
while (count != 0) {
nuaddr = UAREA_NEXTFREE(uaddr);
uvm_km_free(kernel_map, uaddr, USPACE,
UVM_KMF_PAGEABLE);
uaddr = nuaddr;
count--;
}
}
return;
}
ci = curcpu();
if (ci->ci_data.cpu_uarea_cnt > UVM_NUAREA_HIWAT) {
mutex_enter(&ci->ci_data.cpu_uarea_lock);
while (ci->ci_data.cpu_uarea_cnt > UVM_NUAREA_LOWAT) {
uaddr = ci->ci_data.cpu_uarea_list;
ci->ci_data.cpu_uarea_list = UAREA_NEXTFREE(uaddr);
ci->ci_data.cpu_uarea_cnt--;
mutex_exit(&ci->ci_data.cpu_uarea_lock);
uvm_km_free(kernel_map, uaddr, USPACE,
UVM_KMF_PAGEABLE);
mutex_enter(&ci->ci_data.cpu_uarea_lock);
}
mutex_exit(&ci->ci_data.cpu_uarea_lock);
}
pool_cache_put(uvm_uarea_cache, (void *)uaddr);
}
/*
@ -472,16 +455,12 @@ int swapdebug = 0;
void
uvm_swapin(struct lwp *l)
{
vaddr_t addr;
int error;
/* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */
KASSERT(l != curlwp);
addr = USER_TO_UAREA(l->l_addr);
/* make L_INMEM true */
error = uvm_fault_wire(kernel_map, addr, addr + USPACE,
VM_PROT_READ | VM_PROT_WRITE, 0);
error = uarea_swapin(USER_TO_UAREA(l->l_addr));
if (error) {
panic("uvm_swapin: rewiring stack failed: %d", error);
}
@ -758,7 +737,6 @@ uvm_swapout_threads(void)
static void
uvm_swapout(struct lwp *l)
{
vaddr_t addr;
struct proc *p = l->l_proc;
KASSERT(mutex_owned(&l->l_swaplock));
@ -796,8 +774,7 @@ uvm_swapout(struct lwp *l)
/*
* Unwire the to-be-swapped process's user struct and kernel stack.
*/
addr = USER_TO_UAREA(l->l_addr);
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */
uarea_swapout(USER_TO_UAREA(l->l_addr));
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.h,v 1.8 2005/12/11 12:25:29 christos Exp $ */
/* $NetBSD: uvm_glue.h,v 1.9 2008/01/28 12:22:47 yamt Exp $ */
/*
*
@ -45,6 +45,8 @@
void uvm_swapout_threads(void);
void uvm_uarea_init(void);
#endif /* _KERNEL */
#endif /* _UVM_UVM_GLUE_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_init.c,v 1.31 2008/01/02 11:49:17 ad Exp $ */
/* $NetBSD: uvm_init.c,v 1.32 2008/01/28 12:22:47 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.31 2008/01/02 11:49:17 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.32 2008/01/28 12:22:47 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -177,6 +177,8 @@ uvm_init(void)
uvm_anon_init();
uvm_uarea_init();
/*
* init readahead module
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.89 2008/01/02 11:49:19 ad Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.90 2008/01/28 12:22:47 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.89 2008/01/02 11:49:19 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.90 2008/01/28 12:22:47 yamt Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -327,11 +327,6 @@ uvm_pageout(void *arg)
buf_drain(bufcnt << PAGE_SHIFT);
mutex_exit(&bufcache_lock);
/*
* free any cached u-areas we don't need
*/
uvm_uarea_drain(true);
/*
* complete draining the pools.
*/