Add another allocator to uvm_pglistalloc() which is used in the case where

no alignment / boundary / nsegs restrictions apply.
This one doesn't insist in a contigous range, and it honours the "waitok"
flag, thus succeeds in situations which were hopeless with the existing one.

(A solution which searches for a minimum number of contiguous ranges using
some best-fit or so algorithm would be expensive to implement; I believe the
"either-or" done here does reflect the current use by bus_dma quite well.)

Now agp memory allocation is robust for me. (tested on i810)
This commit is contained in:
drochner 2002-05-29 19:20:11 +00:00
parent 4e357d1878
commit f452b252a8
1 changed files with 156 additions and 54 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pglist.c,v 1.19 2001/11/10 07:37:01 lukem Exp $ */
/* $NetBSD: uvm_pglist.c,v 1.20 2002/05/29 19:20:11 drochner Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.19 2001/11/10 07:37:01 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.20 2002/05/29 19:20:11 drochner Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -84,34 +84,65 @@ u_long uvm_pglistalloc_npages;
* power-of-two boundary (relative to zero).
*/
int
uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
psize_t size;
paddr_t low, high, alignment, boundary;
static void uvm_pglist_add(struct vm_page *, struct pglist *);
static int uvm_pglistalloc_contig(psize_t, paddr_t, paddr_t, paddr_t, paddr_t,
struct pglist *);
static int uvm_pglistalloc_simple(psize_t, paddr_t, paddr_t,
struct pglist *, int);
static void
uvm_pglist_add(pg, rlist)
struct vm_page *pg;
struct pglist *rlist;
int nsegs, waitok;
{
paddr_t try, idxpa, lastidxpa;
int psi;
struct vm_page *pgs, *pg;
int s, tryidx, idx, pgflidx, end, error, free_list, color;
u_long pagemask;
int free_list, color, pgflidx;
#ifdef DEBUG
struct vm_page *tp;
#endif
KASSERT((alignment & (alignment - 1)) == 0);
KASSERT((boundary & (boundary - 1)) == 0);
#if PGFL_NQUEUES != 2
#error uvm_pglistalloc needs to be updated
#endif
/*
* Our allocations are always page granularity, so our alignment
* must be, too.
*/
free_list = uvm_page_lookup_freelist(pg);
color = VM_PGCOLOR_BUCKET(pg);
pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
for (tp = TAILQ_FIRST(&uvm.page_free[
free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
tp != NULL;
tp = TAILQ_NEXT(tp, pageq)) {
if (tp == pg)
break;
}
if (tp == NULL)
panic("uvm_pglistalloc: page not on freelist");
#endif
TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[
color].pgfl_queues[pgflidx], pg, pageq);
uvmexp.free--;
if (pg->flags & PG_ZERO)
uvmexp.zeropages--;
pg->flags = PG_CLEAN;
pg->pqflags = 0;
pg->uobject = NULL;
pg->uanon = NULL;
TAILQ_INSERT_TAIL(rlist, pg, pageq);
STAT_INCR(uvm_pglistalloc_npages);
}
static int
uvm_pglistalloc_contig(size, low, high, alignment, boundary, rlist)
psize_t size;
paddr_t low, high, alignment, boundary;
struct pglist *rlist;
{
paddr_t try, idxpa, lastidxpa;
int psi;
struct vm_page *pgs;
int s, tryidx, idx, end, error;
u_long pagemask;
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
size = round_page(size);
try = roundup(low, alignment);
if (boundary != 0 && boundary < size)
return (EINVAL);
pagemask = ~(boundary - 1);
@ -129,7 +160,7 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
goto out;
for (;; try += alignment) {
for (try = low;; try += alignment) {
if (try + size > high) {
/*
@ -186,42 +217,12 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
}
}
#if PGFL_NQUEUES != 2
#error uvm_pglistalloc needs to be updated
#endif
/*
* we have a chunk of memory that conforms to the requested constraints.
*/
idx = tryidx;
while (idx < end) {
pg = &pgs[idx];
free_list = uvm_page_lookup_freelist(pg);
color = VM_PGCOLOR_BUCKET(pg);
pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef DEBUG
for (tp = TAILQ_FIRST(&uvm.page_free[
free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
tp != NULL;
tp = TAILQ_NEXT(tp, pageq)) {
if (tp == pg)
break;
}
if (tp == NULL)
panic("uvm_pglistalloc: page not on freelist");
#endif
TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[
color].pgfl_queues[pgflidx], pg, pageq);
uvmexp.free--;
if (pg->flags & PG_ZERO)
uvmexp.zeropages--;
pg->flags = PG_CLEAN;
pg->pqflags = 0;
pg->uobject = NULL;
pg->uanon = NULL;
TAILQ_INSERT_TAIL(rlist, pg, pageq);
idx++;
STAT_INCR(uvm_pglistalloc_npages);
uvm_pglist_add(&pgs[idx++], rlist);
}
error = 0;
@ -236,6 +237,107 @@ out:
return (error);
}
static int
uvm_pglistalloc_simple(size, low, high, rlist, waitok)
psize_t size;
paddr_t low, high;
struct pglist *rlist;
int waitok;
{
psize_t try;
int psi;
struct vm_page *pg;
int s, todo, idx, error;
/* Default to "lose". */
error = ENOMEM;
todo = size / PAGE_SIZE;
again:
/*
* Block all memory allocation and lock the free list.
*/
s = uvm_lock_fpageq();
/* Are there even any free pages? */
if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
goto out;
for (try = low; try < high; try += PAGE_SIZE) {
/*
* Make sure this is a managed physical page.
*/
if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
continue; /* managed? */
pg = &vm_physmem[psi].pgs[idx];
if (VM_PAGE_IS_FREE(pg) == 0)
continue;
uvm_pglist_add(pg, rlist);
if (--todo == 0) {
error = 0;
goto out;
}
}
out:
/*
* check to see if we need to generate some free pages waking
* the pagedaemon.
*/
UVM_KICK_PDAEMON();
uvm_unlock_fpageq(s);
if (error) {
if (waitok) {
/* XXX perhaps some time limitation? */
#ifdef DEBUG
printf("pglistalloc waiting\n");
#endif
uvm_wait("pglalloc");
goto again;
} else
uvm_pglistfree(rlist);
}
return (error);
}
int
uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
psize_t size;
paddr_t low, high, alignment, boundary;
struct pglist *rlist;
int nsegs, waitok;
{
int res;
KASSERT((alignment & (alignment - 1)) == 0);
KASSERT((boundary & (boundary - 1)) == 0);
/*
* Our allocations are always page granularity, so our alignment
* must be, too.
*/
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
size = round_page(size);
low = roundup(low, alignment);
if ((nsegs < size / PAGE_SIZE) || (alignment != PAGE_SIZE)
|| (boundary != 0))
res = uvm_pglistalloc_contig(size, low, high, alignment,
boundary, rlist);
else
res = uvm_pglistalloc_simple(size, low, high, rlist, waitok);
return (res);
}
/*
* uvm_pglistfree: free a list of pages
*