implement pmap_wired_count(). fix some places we would forget to splx().

move the parts of pte.h that were used into pmap.c (since they were really
pmap-internal details) and delete ibm4xx/pte.h.  other misc cleanup.
This commit is contained in:
chs 2005-01-16 21:35:58 +00:00
parent c306ebdd4c
commit bfbf7ea9b1
4 changed files with 76 additions and 151 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.29 2004/07/31 13:28:53 simonb Exp $ */
/* $NetBSD: pmap.c,v 1.30 2005/01/16 21:35:58 chs Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.29 2004/07/31 13:28:53 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30 2005/01/16 21:35:58 chs Exp $");
#include <sys/param.h>
#include <sys/malloc.h>
@ -146,6 +146,8 @@ static char *pmap_attrib;
#define PV_WIRED 0x1
#define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED)
#define PV_UNWIRE(pv) ((pv)->pv_va &= ~PV_WIRED)
#define PV_ISWIRED(pv) ((pv)->pv_va & PV_WIRED)
#define PV_CMPVA(va,pv) (!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
struct pv_entry {
@ -167,9 +169,7 @@ static inline char *pa_to_attr(paddr_t);
static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
static inline int pte_enter(struct pmap *, vaddr_t, u_int);
static void pmap_pinit(pmap_t);
static void pmap_release(pmap_t);
static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t);
static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, boolean_t);
static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
@ -207,7 +207,8 @@ pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
if (!pm->pm_ptbl[seg]) {
/* Don't allocate a page to clear a non-existent mapping. */
if (!pte) return (0);
if (!pte)
return (0);
/* Allocate a page XXXX this will sleep! */
pm->pm_ptbl[seg] =
(uint *)uvm_km_zalloc(kernel_map, PAGE_SIZE);
@ -503,8 +504,7 @@ extern void vm_page_free1 __P((struct vm_page *));
vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
vaddr_t
pmap_growkernel(maxkvaddr)
vaddr_t maxkvaddr;
pmap_growkernel(vaddr_t maxkvaddr)
{
int s;
int seg;
@ -518,7 +518,8 @@ pmap_growkernel(maxkvaddr)
kbreak += PTMAP) {
seg = STIDX(kbreak);
if (pte_find(pm, kbreak)) continue;
if (pte_find(pm, kbreak))
continue;
if (uvm.page_init_done) {
pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
@ -542,9 +543,11 @@ pmap_growkernel(maxkvaddr)
* Allocate and return a memory cell with no associated object.
*/
struct vm_page *
vm_page_alloc1()
vm_page_alloc1(void)
{
struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
struct vm_page *pg;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg) {
pg->wire_count = 1; /* no mappings yet */
pg->flags &= ~PG_BUSY; /* never busy */
@ -561,8 +564,7 @@ vm_page_alloc1()
* Object and page must be locked prior to entry.
*/
void
vm_page_free1(mem)
struct vm_page *mem;
vm_page_free1(struct vm_page *mem)
{
#ifdef DIAGNOSTIC
if (mem->flags != (PG_CLEAN|PG_FAKE)) {
@ -588,26 +590,10 @@ pmap_create(void)
{
struct pmap *pm;
pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
memset((caddr_t)pm, 0, sizeof *pm);
pmap_pinit(pm);
return pm;
}
/*
* Initialize a preallocated and zeroed pmap structure.
*/
void
pmap_pinit(struct pmap *pm)
{
int i;
/*
* Allocate some segment registers for this pmap.
*/
pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
memset(pm, 0, sizeof *pm);
pm->pm_refs = 1;
for (i = 0; i < STSZ; i++)
pm->pm_ptbl[i] = NULL;
return pm;
}
/*
@ -626,30 +612,23 @@ pmap_reference(struct pmap *pm)
*/
void
pmap_destroy(struct pmap *pm)
{
if (--pm->pm_refs == 0) {
pmap_release(pm);
free((caddr_t)pm, M_VMPMAP);
}
}
/*
* Release any resources held by the given physical map.
* Called when a pmap initialized by pmap_pinit is being released.
*/
static void
pmap_release(struct pmap *pm)
{
int i;
if (--pm->pm_refs > 0) {
return;
}
KASSERT(pm->pm_stats.resident_count == 0);
KASSERT(pm->pm_stats.wired_count == 0);
for (i = 0; i < STSZ; i++)
if (pm->pm_ptbl[i]) {
uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
PAGE_SIZE);
pm->pm_ptbl[i] = NULL;
}
if (pm->pm_ctx) ctx_free(pm);
if (pm->pm_ctx)
ctx_free(pm);
free(pm, M_VMPMAP);
}
/*
@ -721,7 +700,7 @@ pmap_copy_page(paddr_t src, paddr_t dst)
* This returns whether this is the first mapping of a page.
*/
static inline int
pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, boolean_t wired)
{
struct pv_entry *pv, *npv = NULL;
int s;
@ -730,17 +709,7 @@ pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
return 0;
s = splvm();
pv = pa_to_pv(pa);
for (npv = pv; npv; npv = npv->pv_next)
if (npv->pv_va == va && npv->pv_pm == pm) {
printf("Duplicate pv: va %lx pm %p\n", va, pm);
#ifdef DDB
Debugger();
#endif
return (1);
}
if (!pv->pv_pm) {
/*
* No entries yet, use header as the first entry.
@ -754,12 +723,14 @@ return (1);
* Place this entry after the header.
*/
npv = pool_get(&pv_pool, PR_WAITOK);
if (!npv) return (0);
npv->pv_va = va;
npv->pv_pm = pm;
npv->pv_next = pv->pv_next;
pv->pv_next = npv;
}
if (wired) {
PV_WIRE(pv);
}
splx(s);
return (1);
}
@ -773,7 +744,8 @@ pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
* Remove from the PV table.
*/
pv = pa_to_pv(pa);
if (!pv) return;
if (!pv)
return;
/*
* If it is the first entry on the list, it is actually
@ -782,6 +754,9 @@ pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
* the entry. In either case we free the now unused entry.
*/
if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
if (PV_ISWIRED(pv)) {
pm->pm_stats.wired_count--;
}
if ((npv = pv->pv_next)) {
*pv = *npv;
pool_put(&pv_pool, npv);
@ -793,6 +768,9 @@ pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
break;
if (npv) {
pv->pv_next = npv->pv_next;
if (PV_ISWIRED(npv)) {
pm->pm_stats.wired_count--;
}
pool_put(&pv_pool, npv);
}
}
@ -813,11 +791,8 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
*/
pmap_remove(pm, va, va + PAGE_SIZE);
if (flags & PMAP_WIRED) flags |= prot;
/* If it has no protections don't bother w/the rest */
if (!(flags & VM_PROT_ALL))
return (0);
if (flags & PMAP_WIRED)
flags |= prot;
managed = 0;
if (vm_physseg_find(atop(pa), NULL) != -1)
@ -861,7 +836,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (pmap_initialized && managed) {
char *attr;
if (!pmap_enter_pv(pm, va, pa)) {
if (!pmap_enter_pv(pm, va, pa, flags & PMAP_WIRED)) {
/* Could not enter pv on a managed page */
return 1;
}
@ -873,9 +848,9 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
panic("managed but no attr");
#endif
if (flags & VM_PROT_ALL)
*attr |= PTE_HI_REF;
*attr |= PMAP_ATTR_REF;
if (flags & VM_PROT_WRITE)
*attr |= PTE_HI_CHG;
*attr |= PMAP_ATTR_CHG;
}
s = splvm();
@ -893,6 +868,9 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
__syncicache((void *)pa, PAGE_SIZE);
if (flags & PMAP_WIRED)
pm->pm_stats.wired_count++;
return 0;
}
@ -901,20 +879,15 @@ pmap_unwire(struct pmap *pm, vaddr_t va)
{
struct pv_entry *pv, *npv;
paddr_t pa;
int s = splvm();
if (pm == NULL) {
return;
}
int s;
if (!pmap_extract(pm, va, &pa)) {
return;
}
va |= PV_WIRED;
pv = pa_to_pv(pa);
if (!pv) return;
if (!pv)
return;
/*
* If it is the first entry on the list, it is actually
@ -922,9 +895,13 @@ pmap_unwire(struct pmap *pm, vaddr_t va)
* to the header. Otherwise we must search the list for
* the entry. In either case we free the now unused entry.
*/
s = splvm();
for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) {
if (pm == npv->pv_pm && PV_CMPVA(va, npv)) {
npv->pv_va &= ~PV_WIRED;
if (PV_ISWIRED(npv)) {
PV_UNWIRE(npv);
pm->pm_stats.wired_count--;
}
break;
}
}
@ -1028,8 +1005,9 @@ pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
int seg = STIDX(va);
int ptn = PTIDX(va);
u_int pa = 0;
int s = splvm();
int s;
s = splvm();
if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
*pap = TTE_PA(pa) | (va & PGOFSET);
}
@ -1075,25 +1053,25 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
}
boolean_t
check_attr(struct vm_page *pg, u_int mask, int clear)
pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
int s;
paddr_t pa;
char *attr;
int rv;
int s, rv;
/*
* First modify bits in cache.
*/
s = splvm();
pa = VM_PAGE_TO_PHYS(pg);
attr = pa_to_attr(pa);
if (attr == NULL)
return FALSE;
s = splvm();
rv = ((*attr & mask) != 0);
if (clear) {
*attr &= ~mask;
pmap_page_protect(pg, (mask == PTE_HI_CHG) ? VM_PROT_READ : 0);
pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
}
splx(s);
return rv;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.9 2004/08/31 01:06:12 simonb Exp $ */
/* $NetBSD: pmap.h,v 1.10 2005/01/16 21:35:59 chs Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -69,7 +69,6 @@
#ifndef _IBM4XX_PMAP_H_
#define _IBM4XX_PMAP_H_
#include <powerpc/ibm4xx/pte.h>
#include <powerpc/ibm4xx/tlb.h>
#define KERNEL_PID 1 /* TLB PID to use for kernel translation */
@ -152,19 +151,23 @@ typedef struct pmap *pmap_t;
extern struct pmap kernel_pmap_;
#define pmap_kernel() (&kernel_pmap_)
#define pmap_clear_modify(pg) (check_attr((pg), PTE_HI_CHG, 1))
#define pmap_clear_reference(pg) (check_attr((pg), PTE_HI_REF, 1))
#define pmap_is_modified(pg) (check_attr((pg), PTE_HI_CHG, 0))
#define pmap_is_referenced(pg) (check_attr((pg), PTE_HI_REF, 0))
#define PMAP_ATTR_REF 0x1
#define PMAP_ATTR_CHG 0x2
#define pmap_clear_modify(pg) (pmap_check_attr((pg), PMAP_ATTR_CHG, 1))
#define pmap_clear_reference(pg)(pmap_check_attr((pg), PMAP_ATTR_REF, 1))
#define pmap_is_modified(pg) (pmap_check_attr((pg), PMAP_ATTR_CHG, 0))
#define pmap_is_referenced(pg) (pmap_check_attr((pg), PMAP_ATTR_REF, 0))
#define pmap_phys_address(x) (x)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
void pmap_unwire(struct pmap *pm, vaddr_t va);
void pmap_bootstrap(u_int kernelstart, u_int kernelend);
boolean_t pmap_extract(struct pmap *, vaddr_t, paddr_t *);
boolean_t check_attr(struct vm_page *, u_int, int);
boolean_t pmap_check_attr(struct vm_page *, u_int, int);
void pmap_real_memory(paddr_t *, psize_t *);
int pmap_tlbmiss(vaddr_t va, int ctx);

View File

@ -1,54 +0,0 @@
/* $NetBSD: pte.h,v 1.1 2001/06/13 06:01:49 simonb Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
* Copyright (C) 1995, 1996 TooLs GmbH.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IBM4XX_PTE_H_
#define _IBM4XX_PTE_H_
#include <powerpc/ibm4xx/tlb.h>
/*
* Page Table Entries
*/
#ifndef _LOCORE
struct pte {
u_int pte_hi;
u_int pte_lo;
};
typedef struct pte pte_t;
#endif /* _LOCORE */
/* Pmap mananged reference/modified bits */
#define PTE_HI_REF 0x00000020
#define PTE_HI_CHG 0x00000040
#endif /* _IBM4XX_PTE_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.6 2003/02/03 17:10:02 matt Exp $ */
/* $NetBSD: pte.h,v 1.7 2005/01/16 21:35:59 chs Exp $ */
#ifdef _KERNEL_OPT
#include "opt_ppcarch.h"
@ -6,6 +6,4 @@
#ifdef PPC_OEA
#include <powerpc/oea/pte.h>
#elif defined(PPC_IBM4XX)
#include <powerpc/ibm4xx/pte.h>
#endif