Support dynamic sizing of the page color bins. We also support

dynamically re-coloring pages; as machine-dependent code discovers
the size of the system's caches, it may call uvm_page_recolor() with
the new number of colors to use.  If the new mumber of colors is
smaller (or equal to) the current number of colors, then uvm_page_recolor()
is a no-op.

The system defaults to one bucket if machine-dependent code does not
initialize uvmexp.ncolors before uvm_page_init() is called.

Note that the number of color bins should be initialized to something
reasonable as early as possible -- for many early memory allocations,
we live with the consequences of the page choice for the lifetime of
the boot.
This commit is contained in:
thorpej 2001-05-02 01:22:19 +00:00
parent 05dac31b1e
commit 31fafb678f
5 changed files with 134 additions and 39 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.61 2001/05/01 19:36:56 thorpej Exp $ */
/* $NetBSD: uvm_extern.h,v 1.62 2001/05/02 01:22:19 thorpej Exp $ */
/*
*
@ -252,6 +252,9 @@ struct uvmexp {
int paging; /* number of pages in the process of being paged out */
int wired; /* number of wired pages */
int ncolors; /* number of page color buckets: must be p-o-2 */
int colormask; /* color bucket mask */
/*
* Adding anything before this line will break binary compatibility
* with top(1) on NetBSD 1.5.
@ -310,7 +313,6 @@ struct uvmexp {
aborted */
int colorhit; /* pagealloc where we got optimal color */
int colormiss; /* pagealloc where we didn't */
int ncolors; /* number of page color buckets */
/* fault subcounters */
int fltnoram; /* number of times fault was out of ram */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.59 2001/05/01 19:36:57 thorpej Exp $ */
/* $NetBSD: uvm_page.c,v 1.60 2001/05/02 01:22:20 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -125,6 +125,16 @@ static vaddr_t virtual_space_end;
static struct pglist uvm_bootbucket;
/*
* we allocate an initial number of page colors in uvm_page_init(),
* and remember them. We may re-color pages as cache sizes are
* discovered during the autoconfiguration phase. But we can never
* free the initial set of buckets, since they are allocated using
* uvm_pageboot_alloc().
*/
static boolean_t have_recolored_pages /* = FALSE */;
/*
* local prototypes
*/
@ -202,6 +212,19 @@ uvm_pageremove(pg)
pg->version++;
}
static void
uvm_page_init_buckets(struct pgfreelist *pgfl)
{
int color, i;
for (color = 0; color < uvmexp.ncolors; color++) {
for (i = 0; i < PGFL_NQUEUES; i++) {
TAILQ_INIT(&pgfl->pgfl_buckets[
color].pgfl_queues[i]);
}
}
}
/*
* uvm_page_init: init the page system. called from uvm_init().
*
@ -212,24 +235,18 @@ void
uvm_page_init(kvm_startp, kvm_endp)
vaddr_t *kvm_startp, *kvm_endp;
{
vsize_t freepages, pagecount, n;
vsize_t freepages, pagecount, bucketcount, n;
struct pgflbucket *bucketarray;
vm_page_t pagearray;
int lcv, color, i;
int lcv, i;
paddr_t paddr;
/*
* init the page queues and page queue locks
* init the page queues and page queue locks, except the free
* list; we allocate that later (with the initial vm_page
* structures).
*/
uvmexp.ncolors = VM_PGCOLOR_BUCKETS;
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
for (color = 0; color < VM_PGCOLOR_BUCKETS; color++) {
for (i = 0; i < PGFL_NQUEUES; i++) {
TAILQ_INIT(&uvm.page_free[lcv].pgfl_buckets[
color].pgfl_queues[i]);
}
}
}
TAILQ_INIT(&uvm.page_active);
TAILQ_INIT(&uvm.page_inactive_swp);
TAILQ_INIT(&uvm.page_inactive_obj);
@ -274,6 +291,14 @@ uvm_page_init(kvm_startp, kvm_endp)
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
/*
* Let MD code initialize the number of colors, or default
* to 1 color if MD code doesn't care.
*/
if (uvmexp.ncolors == 0)
uvmexp.ncolors = 1;
uvmexp.colormask = uvmexp.ncolors - 1;
/*
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
* use. for each page of memory we use we need a vm_page structure.
@ -284,10 +309,21 @@ uvm_page_init(kvm_startp, kvm_endp)
* pages).
*/
bucketcount = uvmexp.ncolors * VM_NFREELIST;
pagecount = ((freepages + 1) << PAGE_SHIFT) /
(PAGE_SIZE + sizeof(struct vm_page));
pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
sizeof(struct vm_page));
bucketarray = (void *) uvm_pageboot_alloc((bucketcount *
sizeof(struct pgflbucket)) + (pagecount *
sizeof(struct vm_page)));
pagearray = (struct vm_page *)(bucketarray + bucketcount);
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
uvm.page_free[lcv].pgfl_buckets =
(bucketarray + (lcv * uvmexp.ncolors));
uvm_page_init_buckets(&uvm.page_free[lcv]);
}
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
/*
@ -841,6 +877,76 @@ uvm_page_rehash()
return;
}
/*
* uvm_page_recolor: Recolor the pages if the new bucket count is
* larger than the old one.
*/
void
uvm_page_recolor(int newncolors)
{
struct pgflbucket *bucketarray, *oldbucketarray;
struct pgfreelist pgfl;
vm_page_t pg;
vsize_t bucketcount;
int s, lcv, color, i, ocolors;
if (newncolors <= uvmexp.ncolors)
return;
bucketcount = newncolors * VM_NFREELIST;
bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
M_VMPAGE, M_NOWAIT);
if (bucketarray == NULL) {
printf("WARNING: unable to allocate %ld page color buckets\n",
(long) bucketcount);
return;
}
s = uvm_lock_fpageq();
/* Make sure we should still do this. */
if (newncolors <= uvmexp.ncolors) {
uvm_unlock_fpageq(s);
free(bucketarray, M_VMPAGE);
return;
}
oldbucketarray = uvm.page_free[0].pgfl_buckets;
ocolors = uvmexp.ncolors;
uvmexp.ncolors = newncolors;
uvmexp.colormask = uvmexp.ncolors - 1;
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
uvm_page_init_buckets(&pgfl);
for (color = 0; color < ocolors; color++) {
for (i = 0; i < PGFL_NQUEUES; i++) {
while ((pg = TAILQ_FIRST(&uvm.page_free[
lcv].pgfl_buckets[color].pgfl_queues[i]))
!= NULL) {
TAILQ_REMOVE(&uvm.page_free[
lcv].pgfl_buckets[
color].pgfl_queues[i], pg, pageq);
TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
i], pg, pageq);
}
}
}
uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
}
if (have_recolored_pages) {
uvm_unlock_fpageq(s);
free(oldbucketarray, M_VMPAGE);
return;
}
have_recolored_pages = TRUE;
uvm_unlock_fpageq(s);
}
#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
@ -891,7 +997,7 @@ uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
if ((pg = TAILQ_FIRST((freeq =
&pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
goto gotit;
color = (color + 1) & VM_PGCOLOR_MASK;
color = (color + 1) & uvmexp.colormask;
} while (color != trycolor);
return (NULL);
@ -1046,7 +1152,7 @@ uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
* We now know which color we actually allocated from; set
* the next color accordingly.
*/
uvm.page_free_nextcolor = (color + 1) & VM_PGCOLOR_MASK;
uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
/*
* update allocation statistics and remember if we have to
@ -1420,7 +1526,7 @@ uvm_pageidlezero()
}
}
nextbucket = (nextbucket + 1) & VM_PGCOLOR_MASK;
nextbucket = (nextbucket + 1) & uvmexp.colormask;
} while (nextbucket != firstbucket);
uvm_unlock_fpageq(s);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.h,v 1.23 2001/05/01 03:01:18 thorpej Exp $ */
/* $NetBSD: uvm_page.h,v 1.24 2001/05/02 01:22:20 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -273,6 +273,7 @@ void uvm_page_own __P((struct vm_page *, char *));
boolean_t uvm_page_physget __P((paddr_t *));
#endif
void uvm_page_rehash __P((void));
void uvm_page_recolor __P((int));
void uvm_pageidlezero __P((void));
PAGE_INLINE int uvm_lock_fpageq __P((void));
@ -312,11 +313,9 @@ static int vm_physseg_find __P((paddr_t, int *));
/*
* Compute the page color bucket for a given page.
*
* The constants we uses here come from <uvm/uvm_param.h>.
*/
#define VM_PGCOLOR_BUCKET(pg) \
(atop(VM_PAGE_TO_PHYS((pg))) & VM_PGCOLOR_MASK)
(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
/*
* when VM_PHYSSEG_MAX is 1, we can simplify these functions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_param.h,v 1.8 2001/04/29 04:23:21 thorpej Exp $ */
/* $NetBSD: uvm_param.h,v 1.9 2001/05/02 01:22:20 thorpej Exp $ */
/*
* Copyright (c) 1991, 1993
@ -101,18 +101,6 @@ typedef int boolean_t;
#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */
#endif /* _KERNEL */
/*
* Page coloring parameters. We let machine dependent code tell us how
* many buckets to create. If it doesn't, we assume it doesn't want to
* do coloring, so we disable it (set the number of buckets to 1).
*
* Note: the number of buckets must be a power of two.
*/
#ifndef VM_PGCOLOR_BUCKETS
#define VM_PGCOLOR_BUCKETS 1
#endif
#define VM_PGCOLOR_MASK (VM_PGCOLOR_BUCKETS - 1)
/*
* CTL_VM identifiers
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pglist.h,v 1.2 2001/04/29 04:23:21 thorpej Exp $ */
/* $NetBSD: uvm_pglist.h,v 1.3 2001/05/02 01:22:20 thorpej Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@ -58,7 +58,7 @@ struct pgflbucket {
};
struct pgfreelist {
struct pgflbucket pgfl_buckets[VM_PGCOLOR_BUCKETS];
struct pgflbucket *pgfl_buckets;
};
#endif /* _PGLIST_H_ */