change uvm_winsize to uvm_winshift so that we can avoid division

by a non-constant value.
This commit is contained in:
chs 2001-03-19 00:29:03 +00:00
parent e47583af2c
commit c40daf0aed
3 changed files with 26 additions and 23 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.8 2001/02/03 17:18:30 bjh21 Exp $ */
/* $NetBSD: param.h,v 1.9 2001/03/19 00:29:03 chs Exp $ */
/*
* Copyright (c) 1994,1995 Mark Brinicombe.
@ -114,8 +114,8 @@
* Reduce UBC KVM usage from its default (8Mb when I looked).
* Also make sure the window size is at least the page size.
*/
#ifndef UBC_WINSIZE
#define UBC_WINSIZE NBPG
#ifndef UBC_WINSHIFT
#define UBC_WINSHIFT PAGE_SHIFT
#endif
#ifndef UBC_NWINS
#define UBC_NWINS 32

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.122 2001/02/14 18:21:42 eeh Exp $ */
/* $NetBSD: param.h,v 1.123 2001/03/19 00:29:04 chs Exp $ */
/*-
* Copyright (c) 1982, 1986, 1989, 1993
@ -256,8 +256,8 @@
* Defaults for Unified Buffer Cache parameters.
*/
#ifndef UBC_WINSIZE
#define UBC_WINSIZE 8192
#ifndef UBC_WINSHIFT
#define UBC_WINSHIFT 13
#endif
#ifndef UBC_NWINS
#define UBC_NWINS 1024

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_bio.c,v 1.10 2001/03/17 04:01:02 chs Exp $ */
/* $NetBSD: uvm_bio.c,v 1.11 2001/03/19 00:29:04 chs Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers.
@ -66,7 +66,7 @@ static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
(((u_long)(offset)) >> PAGE_SHIFT)) & \
ubc_object.hashmask)
#define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) / ubc_winsize) & \
#define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \
(UBC_NQUEUES - 1)])
struct ubc_map
@ -105,7 +105,8 @@ struct uvm_pagerops ubc_pager =
};
int ubc_nwins = UBC_NWINS;
int ubc_winsize = UBC_WINSIZE;
int ubc_winshift = UBC_WINSHIFT;
int ubc_winsize;
#ifdef PMAP_PREFER
int ubc_nqueues;
boolean_t ubc_release_unmap = FALSE;
@ -152,14 +153,15 @@ ubc_init(void)
va = (vaddr_t)1L;
#ifdef PMAP_PREFER
PMAP_PREFER(0, &va);
if (va < ubc_winsize) {
va = ubc_winsize;
ubc_nqueues = va >> ubc_winshift;
if (ubc_nqueues == 0) {
ubc_nqueues = 1;
}
ubc_nqueues = va / ubc_winsize;
if (ubc_nqueues != 1) {
ubc_release_unmap = TRUE;
}
#endif
ubc_winsize = 1 << ubc_winshift;
ubc_object.inactive = malloc(UBC_NQUEUES *
sizeof(struct ubc_inactive_head),
M_TEMP, M_NOWAIT);
@ -181,7 +183,7 @@ ubc_init(void)
}
if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
ubc_nwins * ubc_winsize, &ubc_object.uobj, 0, (vsize_t)va,
ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
panic("ubc_init: failed to map ubc_object\n");
@ -193,7 +195,7 @@ ubc_init(void)
/*
* ubc_fault: fault routine for ubc mapping
*/
static int
int
ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
struct uvm_faultinfo *ufi;
vaddr_t ign1;
@ -208,7 +210,7 @@ ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
struct ubc_map *umap;
vaddr_t va, eva, ubc_offset, slot_offset;
int i, error, rv, npages;
struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg;
UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
/*
@ -231,7 +233,7 @@ ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
va, ubc_offset, access_type,0);
umap = &ubc_object.umap[ubc_offset / ubc_winsize];
umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
KASSERT(umap->refcount != 0);
slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
@ -336,7 +338,7 @@ again:
* local functions
*/
static struct ubc_map *
struct ubc_map *
ubc_find_mapping(uobj, offset)
struct uvm_object *uobj;
voff_t offset;
@ -412,7 +414,7 @@ again:
umap, hash);
va = (vaddr_t)(ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize);
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
}
@ -436,10 +438,11 @@ again:
splx(s);
UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
umap, umap->refcount,
ubc_object.kva + (umap - ubc_object.umap) * ubc_winsize,0);
ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift),
0);
return ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize + slot_offset;
((umap - ubc_object.umap) << ubc_winshift) + slot_offset;
}
@ -458,7 +461,7 @@ ubc_release(va, wlen)
s = splbio();
simple_lock(&ubc_object.uobj.vmobjlock);
umap = &ubc_object.umap[((char *)va - ubc_object.kva) / ubc_winsize];
umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
uobj = umap->uobj;
KASSERT(uobj != NULL);
@ -482,7 +485,7 @@ ubc_release(va, wlen)
*/
va = (vaddr_t)(ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize);
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
LIST_REMOVE(umap, hash);
umap->uobj = NULL;
@ -535,7 +538,7 @@ ubc_flush(uobj, start, end)
*/
va = (vaddr_t)(ubc_object.kva +
(umap - ubc_object.umap) * ubc_winsize);
((umap - ubc_object.umap) << ubc_winshift));
pmap_remove(pmap_kernel(), va, va + ubc_winsize);
LIST_REMOVE(umap, hash);