A bunch of changes:

- Free pte pages not in use anymore.
- Inline pmap_extract().
- Fix annoying page reference/modify bug. Fixes PR#7858 & PR#7859.
This commit is contained in:
ragge 1999-08-01 13:48:06 +00:00
parent 9ee533d1f6
commit 8f9615d62c
2 changed files with 252 additions and 158 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.36 1999/06/30 19:31:34 ragge Exp $ */
/* $NetBSD: pmap.h,v 1.37 1999/08/01 13:48:07 ragge Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -43,12 +43,20 @@
*/
#ifndef PMAP_H
#define PMAP_H
#ifndef PMAP_H
#define PMAP_H
#include <machine/pte.h>
#include <machine/mtpr.h>
#include <machine/pcb.h>
struct pte;
/*
* Some constants to make life easier.
*/
#define LTOHPS (PGSHIFT - VAX_PGSHIFT)
#define LTOHPN (1 << LTOHPS)
#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
#define NPTEPGS (USRPTSIZE / (sizeof(struct pte) * LTOHPN))
/*
* Pmap structure
@ -57,13 +65,14 @@ struct pte;
typedef struct pmap {
vaddr_t pm_stack; /* Base of alloced p1 pte space */
int ref_count; /* reference count */
int ref_count; /* reference count */
struct pte *pm_p0br; /* page 0 base register */
long pm_p0lr; /* page 0 length register */
struct pte *pm_p1br; /* page 1 base register */
long pm_p1lr; /* page 1 length register */
int pm_lock; /* Lock entry in MP environment */
struct pmap_statistics pm_stats; /* Some statistics */
u_char pm_refcnt[NPTEPGS]; /* Refcount per pte page */
} *pmap_t;
/*
@ -72,24 +81,22 @@ typedef struct pmap {
*/
struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
struct pv_entry *pv_next; /* next pv_entry */
struct pte *pv_pte; /* pte for this physical page */
struct pmap *pv_pmap; /* pmap this entry belongs to */
int pv_attr; /* write/modified bits */
};
/* ROUND_PAGE used before vm system is initialized */
#define ROUND_PAGE(x) (((uint)(x) + PGOFSET) & ~PGOFSET)
#define TRUNC_PAGE(x) ((uint)(x) & ~PGOFSET)
#define LTOHPS (PGSHIFT - VAX_PGSHIFT)
#define LTOHPN (1 << LTOHPS)
#define ROUND_PAGE(x) (((uint)(x) + PGOFSET) & ~PGOFSET)
#define TRUNC_PAGE(x) ((uint)(x) & ~PGOFSET)
/* Mapping macros used when allocating SPT */
#define MAPVIRT(ptr, count) \
#define MAPVIRT(ptr, count) \
(vm_offset_t)ptr = virtual_avail; \
virtual_avail += (count) * VAX_NBPG;
#define MAPPHYS(ptr, count, perm) \
#define MAPPHYS(ptr, count, perm) \
(vm_offset_t)ptr = avail_start + KERNBASE; \
avail_start += (count) * VAX_NBPG;
@ -97,7 +104,7 @@ struct pv_entry {
extern struct pmap kernel_pmap_store;
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_kernel() (&kernel_pmap_store)
#endif /* _KERNEL */
@ -105,21 +112,58 @@ extern struct pmap kernel_pmap_store;
* Real nice (fast) routines to get the virtual address of a physical page
* (and vice versa).
*/
#define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
#define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
#define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
#define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
#define PMAP_STEAL_MEMORY
/*
* This is the by far most used pmap routine. Make it inline.
*/
__inline static boolean_t
pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
{
paddr_t pa = 0;
int *pte, sva;
if (va & KERNBASE) {
pa = kvtophys(va); /* Is 0 if not mapped */
if (pap)
*pap = pa;
if (pa)
return (TRUE);
return (FALSE);
}
sva = PG_PFNUM(va);
if (va < 0x40000000) {
if (sva > (pmap->pm_p0lr & ~AST_MASK))
return FALSE;
pte = (int *)pmap->pm_p0br;
} else {
if (sva < pmap->pm_p1lr)
return FALSE;
pte = (int *)pmap->pm_p1br;
}
if (kvtopte(&pte[sva])->pg_pfn) {
if (pap)
*pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
return (TRUE);
}
return (FALSE);
}
#define PMAP_STEAL_MEMORY
/* Routines that are best to define as macros */
#define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
#define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
#define pmap_unwire(pmap, v) /* no need */
#define pmap_copy(a,b,c,d,e) /* Dont do anything */
#define pmap_update() mtpr(0,PR_TBIA) /* Update buffes */
#define pmap_collect(pmap) /* No need so far */
#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_deactivate(p) /* Dont do anything */
#define pmap_reference(pmap) (pmap)->ref_count++
#define pmap_copy(a,b,c,d,e) /* Dont do anything */
#define pmap_update() mtpr(0,PR_TBIA) /* Update buffes */
#define pmap_collect(pmap) /* No need so far */
#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_deactivate(p) /* Dont do anything */
#define pmap_reference(pmap) (pmap)->ref_count++
/* These can be done as efficient inline macros */
#define pmap_copy_page(src, dst) \
@ -133,7 +177,7 @@ extern struct pmap kernel_pmap_store;
/* Prototypes */
void pmap_bootstrap __P((void));
vaddr_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
vaddr_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_pinit __P((pmap_t));
#endif PMAP_H

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.69 1999/07/10 22:04:59 ragge Exp $ */
/* $NetBSD: pmap.c,v 1.70 1999/08/01 13:48:06 ragge Exp $ */
/*
* Copyright (c) 1994, 1998, 1999 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -81,6 +81,7 @@ struct pmap kernel_pmap_store;
struct pte *Sysmap; /* System page table */
struct pv_entry *pv_table; /* array of entries, one per LOGICAL page */
int pventries;
void *scratch;
vaddr_t iospace;
@ -108,6 +109,11 @@ volatile int recurse;
int startpmapdebug = 0;
#endif
#ifndef DEBUG
static inline
#endif
void pmap_decpteref __P((struct pmap *, struct pte *));
#ifndef PMAPDEBUG
static inline
#endif
@ -118,6 +124,9 @@ vaddr_t virtual_avail, virtual_end; /* Available virtual memory */
void pmap_pinit __P((pmap_t));
void pmap_release __P((pmap_t));
struct pv_entry *get_pventry __P((void));
void free_pventry __P((struct pv_entry *));
void more_pventries __P((void));
/*
* pmap_bootstrap().
@ -327,40 +336,59 @@ pmap_init()
panic("pmap_init");
}
/*
* Decrement a reference to a pte page. If all references are gone,
* free the page.
*/
void
pmap_decpteref(pmap, pte)
struct pmap *pmap;
struct pte *pte;
{
paddr_t paddr;
int index;
if (pmap == pmap_kernel())
return;
index = ((vaddr_t)pte - (vaddr_t)pmap->pm_p0br) >> PGSHIFT;
pte = (struct pte *)trunc_page(pte);
#ifdef PMAPDEBUG
if (startpmapdebug)
printf("pmap_decpteref: pmap %p pte %p index %d refcnt %d\n",
pmap, pte, index, pmap->pm_refcnt[index]);
#endif
#ifdef DEBUG
if ((index < 0) || (index >= NPTEPGS))
panic("pmap_decpteref: bad index %d", index);
#endif
pmap->pm_refcnt[index]--;
#ifdef DEBUG
if (pmap->pm_refcnt[index] >= VAX_NBPG/sizeof(struct pte))
panic("pmap_decpteref");
#endif
if (pmap->pm_refcnt[index] == 0) {
paddr = kvtopte(pte)->pg_pfn << VAX_PGSHIFT;
uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
bzero(kvtopte(pte), sizeof(struct pte) * LTOHPN);
}
}
/*
* pmap_create() creates a pmap for a new task.
* If not already allocated, malloc space for one.
*/
#ifdef PMAP_NEW
struct pmap *
pmap_create()
{
struct pmap *pmap;
MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMPMAP, M_WAITOK);
bzero(pmap, sizeof(struct pmap));
pmap_pinit(pmap);
return(pmap);
}
#else
pmap_t
pmap_create(phys_size)
psize_t phys_size;
{
pmap_t pmap;
#ifdef PMAPDEBUG
if(startpmapdebug)printf("pmap_create: phys_size %x\n",(int)phys_size);
#endif
if (phys_size)
return NULL;
pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
bzero(pmap, sizeof(struct pmap));
pmap_pinit(pmap);
return (pmap);
}
#endif
/*
* Initialize a preallocated an zeroed pmap structure,
@ -404,8 +432,10 @@ void
pmap_release(pmap)
struct pmap *pmap;
{
#ifdef DEBUG
vaddr_t saddr, eaddr;
paddr_t paddr;
int i;
#endif
#ifdef PMAPDEBUG
if(startpmapdebug)printf("pmap_release: pmap %p\n",pmap);
@ -414,15 +444,18 @@ if(startpmapdebug)printf("pmap_release: pmap %p\n",pmap);
if (pmap->pm_p0br == 0)
return;
#ifdef DEBUG
for (i = 0; i < NPTEPGS; i++)
if (pmap->pm_refcnt[i])
panic("pmap_release: refcnt %d index %d",
pmap->pm_refcnt[i], i);
saddr = (vaddr_t)pmap->pm_p0br;
eaddr = saddr + USRPTSIZE * sizeof(struct pte);
for (; saddr < eaddr; saddr += NBPG) {
paddr = (kvtopte(saddr)->pg_pfn << VAX_PGSHIFT);
if (paddr == 0)
continue; /* page not mapped */
bzero(kvtopte(saddr), sizeof(struct pte) * 8); /* XXX */
uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
}
for (; saddr < eaddr; saddr += NBPG)
if (kvtopte(saddr)->pg_pfn)
panic("pmap_release: page mapped");
#endif
extent_free(ptemap, (u_long)pmap->pm_p0br,
USRPTSIZE * sizeof(struct pte), EX_WAITOK);
mtpr(0, PR_TBIA);
@ -453,7 +486,7 @@ if(startpmapdebug)printf("pmap_destroy: pmap %p\n",pmap);
if (count == 0) {
pmap_release(pmap);
free((caddr_t)pmap, M_VMPMAP);
FREE((caddr_t)pmap, M_VMPMAP);
}
}
@ -477,7 +510,7 @@ if (startpmapdebug)
RECURSESTART;
if (pv->pv_pte == ptp) {
g = (int *)pv->pv_pte;
if ((pv->pv_attr & (PG_V|PG_M)) == 0)
if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
pv->pv_attr |= g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
pv->pv_pte = 0;
pv->pv_pmap->pm_stats.resident_count--;
@ -491,11 +524,11 @@ if (startpmapdebug)
pf = pl->pv_next;
pl->pv_next = pl->pv_next->pv_next;
g = (int *)pf->pv_pte;
if ((pv->pv_attr & (PG_V|PG_M)) == 0)
if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
pv->pv_attr |=
g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
pf->pv_pmap->pm_stats.resident_count--;
FREE(pf, M_VMPVENT);
free_pventry(pf);
splx(s);
RECURSEEND;
return;
@ -504,7 +537,6 @@ if (startpmapdebug)
panic("rensa");
}
#ifdef PMAP_NEW
/*
* New (real nice!) function that allocates memory in kernel space
* without tracking it in the MD code.
@ -597,7 +629,6 @@ if(startpmapdebug)
}
mtpr(0, PR_TBIA);
}
#endif
/*
* pmap_enter() is the main routine that puts in mappings for pages, or
@ -614,7 +645,7 @@ pmap_enter(pmap, v, p, prot, wired, access_type)
boolean_t wired;
{
struct pv_entry *pv, *tmp;
int i, s, newpte, oldpte, *patch;
int i, s, newpte, oldpte, *patch, index = 0; /* XXX gcc */
#ifdef PMAPDEBUG
if (startpmapdebug)
@ -650,15 +681,38 @@ if (startpmapdebug)
newpte = (p >> VAX_PGSHIFT) |
(prot & VM_PROT_WRITE ? PG_RW : PG_RO);
}
/* Check for PTE page */
if (kvtopte(&patch[i])->pg_pfn == 0) {
/*
* Check if a pte page must be mapped in.
*/
index = ((u_int)&patch[i] - (u_int)pmap->pm_p0br) >> PGSHIFT;
#ifdef DIAGNOSTIC
if ((index < 0) || (index >= NPTEPGS))
panic("pmap_enter: bad index %d", index);
#endif
if (pmap->pm_refcnt[index] == 0) {
vaddr_t ptaddr = trunc_page(&patch[i]);
paddr_t phys;
struct vm_page *pg;
#ifdef DEBUG
if (kvtopte(&patch[i])->pg_pfn)
panic("pmap_enter: refcnt == 0");
#endif
/*
* It seems to be legal to sleep here to wait for
* pages; at least some other ports do so.
*/
for (;;) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg != NULL)
break;
if (pmap == pmap_kernel())
panic("pmap_enter: no free pages");
else
uvm_wait("pmap_enter");
}
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL)
panic("pmap_ptefault"); /* XXX */
phys = VM_PAGE_TO_PHYS(pg);
bzero((caddr_t)(phys|KERNBASE), NBPG);
pmap_kenter_pa(ptaddr, phys,
@ -666,19 +720,6 @@ if (startpmapdebug)
}
}
#if 0
/*
* Map in a pte page, if needed.
*/
if (kvtopte(&patch[i])->pg_pfn == 0) {
int g;
g = pmap_ptefault(pmap, v);
if (g)
panic("pmap_enter: %d\n", g);
}
#endif
oldpte = patch[i] & ~(PG_V|PG_M);
/* No mapping change. Can this happen??? */
@ -695,33 +736,20 @@ if (startpmapdebug)
/*
* Mapped before? Remove it then.
* This can be done more efficient than pmap_page_protect().
*/
if (oldpte) {
RECURSEEND;
pmap_page_protect(PHYS_TO_VM_PAGE((oldpte
<< VAX_PGSHIFT)), 0);
rensa(oldpte >> LTOHPS, (struct pte *)&patch[i]);
RECURSESTART;
}
} else if (pmap != pmap_kernel())
pmap->pm_refcnt[index]++; /* New mapping */
s = splimp();
if (pv->pv_pte == 0) {
pv->pv_pte = (struct pte *) & patch[i];
pv->pv_pmap = pmap;
} else {
/*
* We may recurse here if we're out of pv entries.
* Hopefully it will call pmap_kenter instead and
* otherwise we may get into trouble.
*/
RECURSEEND;
MALLOC(tmp, struct pv_entry *, sizeof(struct pv_entry),
M_VMPVENT, M_NOWAIT);
RECURSESTART;
#ifdef DIAGNOSTIC
if (tmp == 0)
panic("pmap_enter: cannot alloc pv_entry");
#endif
tmp = get_pventry();
tmp->pv_pte = (struct pte *)&patch[i];
tmp->pv_pmap = pmap;
tmp->pv_next = pv->pv_next;
@ -750,6 +778,13 @@ if (startpmapdebug)
patch[i+6] = newpte+6;
patch[i+7] = newpte+7;
RECURSEEND;
#ifdef DEBUG
if (pmap != pmap_kernel())
if (pmap->pm_refcnt[index] > VAX_NBPG/sizeof(struct pte))
panic("pmap_enter: refcnt %d", pmap->pm_refcnt[index]);
#endif
if (pventries < 10)
more_pventries();
}
void *
@ -796,6 +831,7 @@ if(startpmapdebug)
return(virtuell+(count-pstart)+0x80000000);
}
#if 0
boolean_t
pmap_extract(pmap, va, pap)
pmap_t pmap;
@ -835,7 +871,7 @@ if(startpmapdebug)printf("pmap_extract: pmap %p, va %lx\n",pmap, va);
}
return (FALSE);
}
#endif
/*
* Sets protection for a given region to prot. If prot == none then
* unmap region. pmap_remove is implemented as pmap_protect with
@ -901,20 +937,16 @@ if(startpmapdebug) printf("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n"
panic("pmap_remove: ptd not even");
#endif
if (prot == VM_PROT_NONE) {
while (pts < ptd) {
if (kvtopte(pts)->pg_pfn && *(int *)pts) {
while (pts < ptd) {
if (kvtopte(pts)->pg_pfn && *(int *)pts) {
if (prot == VM_PROT_NONE) {
RECURSEEND;
if ((*(int *)pts & PG_SREF) == 0)
rensa(pts->pg_pfn >> LTOHPS, pts);
RECURSESTART;
bzero(pts, sizeof(struct pte) * LTOHPN);
}
pts += LTOHPN;
}
} else {
while (pts < ptd) {
if (kvtopte(pts)->pg_pfn && *(int *)pts) {
pmap_decpteref(pmap, pts);
} else {
pts[0].pg_prot = pr;
pts[1].pg_prot = pr;
pts[2].pg_prot = pr;
@ -924,8 +956,8 @@ if(startpmapdebug) printf("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n"
pts[6].pg_prot = pr;
pts[7].pg_prot = pr;
}
pts += LTOHPN;
}
pts += LTOHPN;
}
RECURSEEND;
mtpr(0,PR_TBIA);
@ -991,18 +1023,11 @@ if (startpmapdebug)
/*
* Checks if page is referenced; returns true or false depending on result.
*/
#ifdef PMAP_NEW
boolean_t
pmap_is_referenced(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
#else
boolean_t
pmap_is_referenced(pa)
paddr_t pa;
{
#endif
struct pv_entry *pv;
pv = pv_table + (pa >> PGSHIFT);
@ -1020,18 +1045,11 @@ pmap_is_referenced(pa)
/*
* Clears valid bit in all ptes referenced to this physical page.
*/
#ifdef PMAP_NEW
boolean_t
pmap_clear_reference(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
#else
void
pmap_clear_reference(pa)
paddr_t pa;
{
#endif
struct pv_entry *pv;
pv = pv_table + (pa >> PGSHIFT);
@ -1055,26 +1073,17 @@ pmap_clear_reference(pa)
pv->pv_pte[4].pg_v = pv->pv_pte[5].pg_v =
pv->pv_pte[6].pg_v = pv->pv_pte[7].pg_v = 0;
RECURSEEND;
#ifdef PMAP_NEW
return TRUE; /* XXX */
#endif
}
/*
* Checks if page is modified; returns true or false depending on result.
*/
#ifdef PMAP_NEW
boolean_t
pmap_is_modified(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
#else
boolean_t
pmap_is_modified(pa)
paddr_t pa;
{
#endif
struct pv_entry *pv;
pv = pv_table + (pa >> PGSHIFT);
@ -1083,8 +1092,13 @@ pmap_is_modified(pa)
printf("pmap_is_modified: pa %lx pv_entry %p ", pa, pv);
#endif
if (pv->pv_attr & PG_M)
if (pv->pv_attr & PG_M) {
#ifdef PMAPDEBUG
if (startpmapdebug)
printf("Yes: (0)\n");
#endif
return 1;
}
if (pv->pv_pte)
if ((pv->pv_pte[0].pg_m | pv->pv_pte[1].pg_m
@ -1117,18 +1131,11 @@ pmap_is_modified(pa)
/*
* Clears modify bit in all ptes referenced to this physical page.
*/
#ifdef PMAP_NEW
boolean_t
pmap_clear_modify(pg)
struct vm_page *pg;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
#else
void
pmap_clear_modify(pa)
paddr_t pa;
{
#endif
struct pv_entry *pv;
pv = pv_table + (pa >> PGSHIFT);
@ -1150,9 +1157,7 @@ pmap_clear_modify(pa)
pv->pv_pte[2].pg_m = pv->pv_pte[3].pg_m =
pv->pv_pte[4].pg_m = pv->pv_pte[5].pg_m =
pv->pv_pte[6].pg_m = pv->pv_pte[7].pg_m = 0;
#ifdef PMAP_NEW
return TRUE; /* XXX */
#endif
}
/*
@ -1160,7 +1165,6 @@ pmap_clear_modify(pa)
* Lower permission can only mean setting protection to either read-only
* or none; where none is unmapping of the page.
*/
#ifdef PMAP_NEW
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
@ -1178,18 +1182,6 @@ if(startpmapdebug) printf("pmap_page_protect: pg %p, prot %x, ",pg, prot);
if(startpmapdebug) printf("pa %lx\n",pa);
#endif
#else
void
pmap_page_protect(pa, prot)
paddr_t pa;
vm_prot_t prot;
{
struct pte *pt;
struct pv_entry *pv, *opv, *pl;
int s, *g;
#endif
pv = pv_table + (pa >> PGSHIFT);
if (pv->pv_pte == 0 && pv->pv_next == 0)
return;
@ -1199,29 +1191,31 @@ pmap_page_protect(pa, prot)
RECURSESTART;
if (prot == VM_PROT_NONE) {
g = (int *)pv->pv_pte;
s = splimp();
g = (int *)pv->pv_pte;
if (g) {
if ((pv->pv_attr & (PG_V|PG_M)) == 0)
if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
pv->pv_attr |=
g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
bzero(g, sizeof(struct pte) * LTOHPN);
pv->pv_pte = 0;
pv->pv_pmap->pm_stats.resident_count--;
pmap_decpteref(pv->pv_pmap, pv->pv_pte);
pv->pv_pte = 0;
}
pl = pv->pv_next;
pv->pv_pmap = 0;
pv->pv_next = 0;
while (pl) {
g = (int *)pl->pv_pte;
if ((pv->pv_attr & (PG_V|PG_M)) == 0)
if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
pv->pv_attr |=
g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
bzero(g, sizeof(struct pte) * LTOHPN);
pl->pv_pmap->pm_stats.resident_count--;
pmap_decpteref(pl->pv_pmap, pl->pv_pte);
opv = pl;
pl = pl->pv_next;
FREE(opv, M_VMPVENT);
free_pventry(opv);
}
splx(s);
} else { /* read-only */
@ -1274,3 +1268,59 @@ if(startpmapdebug) printf("pmap_activate: p %p\n", p);
}
mtpr(0, PR_TBIA);
}
struct pv_entry *pv_list;
struct pv_entry *
get_pventry()
{
struct pv_entry *tmp;
int s = splimp();
if (pventries == 0)
panic("get_pventry");
tmp = pv_list;
pv_list = tmp->pv_next;
pventries--;
splx(s);
return tmp;
}
void
free_pventry(pv)
struct pv_entry *pv;
{
int s = splimp();
pv->pv_next = pv_list;
pv_list = pv;
pventries++;
splx(s);
}
void
more_pventries()
{
struct vm_page *pg;
struct pv_entry *pv;
vaddr_t v;
int s, i, count;
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == 0)
return;
v = VM_PAGE_TO_PHYS(pg) | KERNBASE;
pv = (struct pv_entry *)v;
count = NBPG/sizeof(struct pv_entry);
for (i = 0; i < count; i++)
pv[i].pv_next = &pv[i + 1];
s = splimp();
pv[count - 1].pv_next = pv_list;
pv_list = pv;
pventries += count;
splx(s);
}