Slight adjustment to last; move pmap_kenter_pa() to where the other

pmap_k* functions are.
This commit is contained in:
thorpej 1999-07-28 01:17:01 +00:00
parent 31fa5c6f78
commit 9d4524778d
2 changed files with 50 additions and 72 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.75 1999/07/28 01:07:58 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.76 1999/07/28 01:17:01 thorpej Exp $ */
/*
*
@ -599,9 +599,30 @@ pmap_unmap_ptes(pmap)
* => should be faster than normal pmap enter function
*/
/*
* resides in pmap.h
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V | pmap_pg_g; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
/*
* pmap_kremove: remove a kernel mapping(s) without R/M (pv_entry) tracking
@ -3542,38 +3563,6 @@ enter_now:
PMAP_MAP_TO_HEAD_UNLOCK();
}
/*
* pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V | pmap_pg_g; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
/*
* pmap_growkernel: increase usage of KVM space
*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.41 1999/07/28 01:07:52 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.42 1999/07/28 01:17:03 thorpej Exp $ */
/*
*
@ -578,9 +578,30 @@ pmap_unmap_ptes(pmap)
* => should be faster than normal pmap enter function
*/
/*
* resides in pmap.h
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
/*
* pmap_kremove: remove a kernel mapping(s) without R/M (pv_entry) tracking
@ -3307,38 +3328,6 @@ enter_now:
cinv(ia, 0);
}
/*
* pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
/*
* pmap_growkernel: increase usage of KVM space
*