From f4395a4eae8c217f5bcefa31f8ed56d553cb5f88 Mon Sep 17 00:00:00 2001 From: thorpej Date: Sun, 14 Jan 2001 02:10:01 +0000 Subject: [PATCH] splimp() -> splvm() --- sys/uvm/uvm_km.c | 20 ++++++++++---------- sys/uvm/uvm_map.c | 6 +++--- sys/uvm/uvm_page.c | 8 ++++---- sys/uvm/uvm_page_i.h | 6 +++--- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 1c7d04d44c3e..be9bc4bb8732 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_km.c,v 1.41 2000/11/27 04:36:40 nisimura Exp $ */ +/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -93,9 +93,9 @@ * the vm system has several standard kernel submaps, including: * kmem_map => contains only wired kernel memory for the kernel * malloc. *** access to kmem_map must be protected - * by splimp() because we are allowed to call malloc() + * by splvm() because we are allowed to call malloc() * at interrupt time *** - * mb_map => memory for large mbufs, *** protected by splimp *** + * mb_map => memory for large mbufs, *** protected by splvm *** * pager_map => used to map "buf" structures into kernel space * exec_map => used during exec to handle exec args * etc... @@ -109,7 +109,7 @@ * * most kernel private memory lives in kernel_object. the only exception * to this is for memory that belongs to submaps that must be protected - * by splimp(). each of these submaps has their own private kernel + * by splvm(). each of these submaps has their own private kernel * object (e.g. kmem_object, mb_object). * * note that just because a kernel object spans the entire kernel virutal @@ -865,16 +865,16 @@ uvm_km_alloc_poolpage1(map, obj, waitok) int s; /* - * NOTE: We may be called with a map that doens't require splimp + * NOTE: We may be called with a map that doens't require splvm * protection (e.g. kernel_map). However, it does not hurt to - * go to splimp in this case (since unprocted maps will never be + * go to splvm in this case (since unprocted maps will never be * accessed in interrupt context). * * XXX We may want to consider changing the interface to this * XXX function. */ - s = splimp(); + s = splvm(); va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); splx(s); return (va); @@ -902,16 +902,16 @@ uvm_km_free_poolpage1(map, addr) int s; /* - * NOTE: We may be called with a map that doens't require splimp + * NOTE: We may be called with a map that doens't require splvm * protection (e.g. kernel_map). However, it does not hurt to - * go to splimp in this case (since unprocted maps will never be + * go to splvm in this case (since unprocted maps will never be * accessed in interrupt context). * * XXX We may want to consider changing the interface to this * XXX function. */ - s = splimp(); + s = splvm(); uvm_km_free(map, addr, PAGE_SIZE); splx(s); #endif /* PMAP_UNMAP_POOLPAGE */ diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index f94d3902774c..7c919259e80b 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.87 2000/12/13 08:06:11 enami Exp $ */ +/* $NetBSD: uvm_map.c,v 1.88 2001/01/14 02:10:01 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -211,7 +211,7 @@ uvm_mapent_alloc(map) me->flags = 0; /* me can't be null, wait ok */ } else { - s = splimp(); /* protect kentry_free list with splimp */ + s = splvm(); /* protect kentry_free list with splvm */ simple_lock(&uvm.kentry_lock); me = uvm.kentry_free; if (me) uvm.kentry_free = me->next; @@ -246,7 +246,7 @@ uvm_mapent_free(me) if ((me->flags & UVM_MAP_STATIC) == 0) { pool_put(&uvm_map_entry_pool, me); } else { - s = splimp(); /* protect kentry_free list with splimp */ + s = splvm(); /* protect kentry_free list with splvm */ simple_lock(&uvm.kentry_lock); me->next = uvm.kentry_free; uvm.kentry_free = me; diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index 23837d9fe3c4..04b9315848bc 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.c,v 1.46 2000/12/01 09:54:42 chs Exp $ */ +/* $NetBSD: uvm_page.c,v 1.47 2001/01/14 02:10:02 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -159,7 +159,7 @@ uvm_pageinsert(pg) #endif buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; - s = splimp(); + s = splvm(); simple_lock(&uvm.hashlock); TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ simple_unlock(&uvm.hashlock); @@ -186,7 +186,7 @@ uvm_pageremove(pg) KASSERT(pg->flags & PG_TABLED); buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; - s = splimp(); + s = splvm(); simple_lock(&uvm.hashlock); TAILQ_REMOVE(buck, pg, hashq); simple_unlock(&uvm.hashlock); @@ -794,7 +794,7 @@ uvm_page_rehash() * now replace the old buckets with the new ones and rehash everything */ - s = splimp(); + s = splvm(); simple_lock(&uvm.hashlock); uvm.page_hash = newbuckets; uvm.page_nhash = bucketcount; diff --git a/sys/uvm/uvm_page_i.h b/sys/uvm/uvm_page_i.h index 1384d794df27..b7a869cdadb7 100644 --- a/sys/uvm/uvm_page_i.h +++ b/sys/uvm/uvm_page_i.h @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page_i.h,v 1.14 2000/11/27 07:47:42 chs Exp $ */ +/* $NetBSD: uvm_page_i.h,v 1.15 2001/01/14 02:10:02 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -92,7 +92,7 @@ uvm_lock_fpageq() { int s; - s = splimp(); + s = splvm(); simple_lock(&uvm.fpageqlock); return (s); } @@ -131,7 +131,7 @@ uvm_pagelookup(obj, off) buck = &uvm.page_hash[uvm_pagehash(obj,off)]; - s = splimp(); + s = splvm(); simple_lock(&uvm.hashlock); TAILQ_FOREACH(pg, buck, hashq) { if (pg->uobject == obj && pg->offset == off) {