splimp() -> splvm()

This commit is contained in:
thorpej 2001-01-14 02:10:01 +00:00
parent a624c70966
commit f4395a4eae
4 changed files with 20 additions and 20 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.41 2000/11/27 04:36:40 nisimura Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -93,9 +93,9 @@
* the vm system has several standard kernel submaps, including:
* kmem_map => contains only wired kernel memory for the kernel
* malloc. *** access to kmem_map must be protected
* by splimp() because we are allowed to call malloc()
* by splvm() because we are allowed to call malloc()
* at interrupt time ***
* mb_map => memory for large mbufs, *** protected by splimp ***
* mb_map => memory for large mbufs, *** protected by splvm ***
* pager_map => used to map "buf" structures into kernel space
* exec_map => used during exec to handle exec args
* etc...
@ -109,7 +109,7 @@
*
* most kernel private memory lives in kernel_object. the only exception
* to this is for memory that belongs to submaps that must be protected
* by splimp(). each of these submaps has their own private kernel
* by splvm(). each of these submaps has their own private kernel
* object (e.g. kmem_object, mb_object).
*
* note that just because a kernel object spans the entire kernel virutal
@ -865,16 +865,16 @@ uvm_km_alloc_poolpage1(map, obj, waitok)
int s;
/*
* NOTE: We may be called with a map that doens't require splimp
* NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
* go to splimp in this case (since unprocted maps will never be
* go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
s = splimp();
s = splvm();
va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
splx(s);
return (va);
@ -902,16 +902,16 @@ uvm_km_free_poolpage1(map, addr)
int s;
/*
* NOTE: We may be called with a map that doens't require splimp
* NOTE: We may be called with a map that doens't require splvm
* protection (e.g. kernel_map). However, it does not hurt to
* go to splimp in this case (since unprocted maps will never be
* go to splvm in this case (since unprocted maps will never be
* accessed in interrupt context).
*
* XXX We may want to consider changing the interface to this
* XXX function.
*/
s = splimp();
s = splvm();
uvm_km_free(map, addr, PAGE_SIZE);
splx(s);
#endif /* PMAP_UNMAP_POOLPAGE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.87 2000/12/13 08:06:11 enami Exp $ */
/* $NetBSD: uvm_map.c,v 1.88 2001/01/14 02:10:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -211,7 +211,7 @@ uvm_mapent_alloc(map)
me->flags = 0;
/* me can't be null, wait ok */
} else {
s = splimp(); /* protect kentry_free list with splimp */
s = splvm(); /* protect kentry_free list with splvm */
simple_lock(&uvm.kentry_lock);
me = uvm.kentry_free;
if (me) uvm.kentry_free = me->next;
@ -246,7 +246,7 @@ uvm_mapent_free(me)
if ((me->flags & UVM_MAP_STATIC) == 0) {
pool_put(&uvm_map_entry_pool, me);
} else {
s = splimp(); /* protect kentry_free list with splimp */
s = splvm(); /* protect kentry_free list with splvm */
simple_lock(&uvm.kentry_lock);
me->next = uvm.kentry_free;
uvm.kentry_free = me;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.46 2000/12/01 09:54:42 chs Exp $ */
/* $NetBSD: uvm_page.c,v 1.47 2001/01/14 02:10:02 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -159,7 +159,7 @@ uvm_pageinsert(pg)
#endif
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
s = splimp();
s = splvm();
simple_lock(&uvm.hashlock);
TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */
simple_unlock(&uvm.hashlock);
@ -186,7 +186,7 @@ uvm_pageremove(pg)
KASSERT(pg->flags & PG_TABLED);
buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
s = splimp();
s = splvm();
simple_lock(&uvm.hashlock);
TAILQ_REMOVE(buck, pg, hashq);
simple_unlock(&uvm.hashlock);
@ -794,7 +794,7 @@ uvm_page_rehash()
* now replace the old buckets with the new ones and rehash everything
*/
s = splimp();
s = splvm();
simple_lock(&uvm.hashlock);
uvm.page_hash = newbuckets;
uvm.page_nhash = bucketcount;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page_i.h,v 1.14 2000/11/27 07:47:42 chs Exp $ */
/* $NetBSD: uvm_page_i.h,v 1.15 2001/01/14 02:10:02 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -92,7 +92,7 @@ uvm_lock_fpageq()
{
int s;
s = splimp();
s = splvm();
simple_lock(&uvm.fpageqlock);
return (s);
}
@ -131,7 +131,7 @@ uvm_pagelookup(obj, off)
buck = &uvm.page_hash[uvm_pagehash(obj,off)];
s = splimp();
s = splvm();
simple_lock(&uvm.hashlock);
TAILQ_FOREACH(pg, buck, hashq) {
if (pg->uobject == obj && pg->offset == off) {