Some general code cleanup.

This commit is contained in:
mycroft 1993-12-14 13:02:52 +00:00
parent fccc819b20
commit 13552373e2

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.9 1993/12/14 05:31:25 mycroft Exp $
* $Id: pmap.c,v 1.10 1993/12/14 13:02:52 mycroft Exp $
*/
/*
@ -184,6 +184,7 @@ boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, int));
/* XXX should be in a .h file somewhere */
#define PMAP_COPY_ON_WRITE(pa) pmap_changebit(pa, PG_RO, ~PG_RW)
#define PMAP_CLEAR_MODIFY(pa) pmap_changebit(pa, 0, ~PG_M)
#define PMAP_CLEAR_REFERENCE(pa) pmap_changebit(pa, 0, ~PG_U)
#define PMAP_IS_REFERENCED(pa) pmap_testbit(pa, PG_U)
@ -192,7 +193,7 @@ void pmap_changebit __P((vm_offset_t, int, int));
#define pmap_valid_page(pa) (pmap_initialized && pmap_page_index(pa) >= 0)
#if BSDVM_COMPAT
#include "msgbuf.h"
#include <sys/msgbuf.h>
/*
* All those kernel PT submaps that BSD is so fond of
@ -210,8 +211,8 @@ struct pte *msgbufmap;
* and just syncs the pmap module with what has already been done.
* [We can't call it easily with mapping off since the kernel is not
* mapped with PA == VA, hence we would have to relocate every address
* from the linked base (virtual) address 0xFE000000 to the actual
* (physical) address starting relative to 0]
* from the linked base (virtual) address to the actual (physical)
* address starting relative to 0]
*/
struct pte *pmap_pte();
@ -252,13 +253,13 @@ pmap_bootstrap(virtual_start)
* Create Kernel page directory table and page maps.
* [ currently done in locore. i have wild and crazy ideas -wfj ]
*/
bzero(firstaddr, 4*NBPG);
bzero(firstaddr, (1+NKPDE)*NBPG);
kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
firstaddr += NBPG;
for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) {
x < i386_btod(VM_MIN_KERNEL_ADDRESS)+NKPDE; x++) {
struct pde *pde;
pde = kernel_pmap->pm_pdir + x;
*(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW;
@ -302,6 +303,7 @@ pmap_bootstrap(virtual_start)
isaphysmem = pmap_steal_memory(DMA_BOUNCE * NBPG);
#endif
/* undo temporary double mapping */
*(int *)PTD = 0;
tlbflush();
}
@ -359,23 +361,21 @@ pmap_init()
* specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
pmap_map(va, spa, epa, prot)
vm_offset_t va, spa, epa;
int prot;
{
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
printf("pmap_map(%x, %x, %x, %x)\n", va, spa, epa, prot);
#endif
while (start < end) {
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
virt += NBPG;
start += NBPG;
while (spa < epa) {
pmap_enter(kernel_pmap, va, spa, prot, FALSE);
va += NBPG;
spa += NBPG;
}
return virt;
return va;
}
/*
@ -442,10 +442,10 @@ pmap_pinit(pmap)
pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
/* wire in kernel global address entries */
bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPDE*4);
bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, NKPDE * sizeof(pd_entry_t));
/* install self-referential address mapping entry */
*(int *)(pmap->pm_pdir+PTDPTDI) =
*(int *)(pmap->pm_pdir + PTDPTDI) =
(int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_KW;
pmap->pm_count = 1;
@ -495,8 +495,8 @@ pmap_release(pmap)
pg("pmap_release(%x)\n", pmap);
#endif
/* sometimes 1, sometimes 0; could rearrange pmap_destroy */
#ifdef DIAGNOSTICx
/* sometimes 1, sometimes 0; could rearrange pmap_destroy */
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
@ -512,16 +512,17 @@ pmap_reference(pmap)
pmap_t pmap;
{
if (pmap == NULL)
return;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_reference(%x)", pmap);
#endif
if (pmap != NULL) {
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
}
}
/*
@ -575,8 +576,6 @@ pmap_remove(pmap, sva, eva)
continue;
pa = pmap_pte_pa(pte);
if (!pmap_valid_page(pa))
continue;
#ifdef DEBUG
remove_stats.removes++;
@ -606,6 +605,9 @@ pmap_remove(pmap, sva, eva)
reduce wiring count on page table pages as references drop
#endif
if (!pmap_valid_page(pa))
continue;
/*
* Remove from the PV table (raise IPL since we
* may be called at interrupt time).
@ -699,24 +701,6 @@ pmap_remove_all(pa)
splx(s);
}
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
void
pmap_copy_on_write(pa)
vm_offset_t pa;
{
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
printf("pmap_copy_on_write(%x)", pa);
#endif
pmap_changebit(pa, PG_RO, ~PG_RW);
}
/*
* Set the physical protection on the
* specified range of this map as requested.
@ -735,9 +719,6 @@ pmap_protect(pmap, sva, eva, prot)
printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot);
#endif
if (pmap == NULL)
return;
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@ -1009,7 +990,7 @@ pmap_page_protect(phys, prot)
switch (prot) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
pmap_copy_on_write(phys);
PMAP_COPY_ON_WRITE(phys);
break;
case VM_PROT_ALL:
break;
@ -1146,12 +1127,11 @@ pmap_extract(pmap, va)
*
* This routine is only advisory and need not do anything.
*/
void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap;
pmap_t src_pmap;
vm_offset_t dst_addr;
void
pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap, src_pmap;
vm_offset_t dst_addr, src_addr;
vm_size_t len;
vm_offset_t src_addr;
{
#ifdef DEBUG
@ -1169,7 +1149,8 @@ void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
* Generally used to insure that a thread about
* to run will see a semantically correct world.
*/
void pmap_update()
void
pmap_update()
{
#ifdef DEBUG
@ -1202,10 +1183,9 @@ pmap_collect(pmap)
int s;
#ifdef DEBUG
int *pde;
int opmapdebug;
printf("pmap_collect(%x) ", pmap);
#endif
if (pmap != kernel_pmap)
return;
@ -1277,7 +1257,6 @@ pmap_copy_page(src, dst)
bcopy(CADDR1, CADDR2, NBPG);
}
/*
* Routine: pmap_pageable
* Function:
@ -1333,10 +1312,10 @@ pmap_pageable(pmap, sva, eva, pageable)
pa = pmap_pte_pa(pte);
#ifdef DEBUG
if (!pmap_valid_page(pa))
return;
#ifdef DEBUG
pv = pa_to_pvh(pa);
if (pv->pv_va != sva || pv->pv_next) {
pg("pmap_pageable: bad PT page va %x next %x\n",
@ -1360,10 +1339,23 @@ pmap_pageable(pmap, sva, eva, pageable)
}
}
/*
* Routine: pmap_copy_on_write
* Function:
* Remove write privileges from all
* physical maps for this physical page.
*/
void
pmap_copy_on_write(pa)
vm_offset_t pa;
{
PMAP_COPY_ON_WRITE(pa);
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(pa)
vm_offset_t pa;
@ -1377,8 +1369,8 @@ pmap_clear_modify(pa)
*
* Clear the reference bit on the specified physical page.
*/
void pmap_clear_reference(pa)
void
pmap_clear_reference(pa)
vm_offset_t pa;
{
@ -1391,7 +1383,6 @@ void pmap_clear_reference(pa)
* Return whether or not the specified physical page is referenced
* by any physical maps.
*/
boolean_t
pmap_is_referenced(pa)
vm_offset_t pa;
@ -1406,7 +1397,6 @@ pmap_is_referenced(pa)
* Return whether or not the specified physical page is modified
* by any physical maps.
*/
boolean_t
pmap_is_modified(pa)
vm_offset_t pa;
@ -1431,13 +1421,13 @@ i386_protection_init()
{
protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] = 0;
protection_codes[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =
protection_codes[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =
protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] = PG_RO;
protection_codes[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =
protection_codes[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =
protection_codes[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =
protection_codes[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] = PG_RW;
protection_codes[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =
protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_NONE] =
protection_codes[VM_PROT_NONE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RO;
protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_NONE] =
protection_codes[VM_PROT_WRITE | VM_PROT_NONE | VM_PROT_EXECUTE] =
protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_NONE] =
protection_codes[VM_PROT_WRITE | VM_PROT_READ | VM_PROT_EXECUTE] = PG_RW;
}
boolean_t