vm_offset_t -> vaddr_t or paddr_t, as appropriate.

Purge register qualifiers.
bcopy() -> memcpy(), bzero() -> memset()
No longer try to protect the kernel with uvm_map_protect,
pmap_bootstrap has already done this.
Now use pmap_kenter_pa/pmap_kremove when dumping, instead
of pmap_enter/pmap_remove.
This commit is contained in:
fredette 2001-11-30 18:10:27 +00:00
parent bbb634ca9e
commit d925ceec5b
1 changed files with 17 additions and 27 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.11 2001/09/10 21:19:40 chris Exp $ */
/* $NetBSD: machdep.c,v 1.12 2001/11/30 18:10:27 fredette Exp $ */
/*
* Copyright (c) 2001 Matthew Fredette.
@ -208,7 +208,7 @@ int fputype;
caddr_t msgbufaddr;
/* Virtual page frame for /dev/mem (see mem.c) */
vm_offset_t vmmap;
vaddr_t vmmap;
/*
* safepri is a safe priority for sleep to set for a spin-wait
@ -230,7 +230,7 @@ label_t *nofault;
static struct extent *dvmamap;
/* Our private scratch page for dumping the MMU. */
static vm_offset_t dumppage;
static vaddr_t dumppage;
static void identifycpu __P((void));
static void initcpu __P((void));
@ -248,9 +248,9 @@ cpu_startup()
{
caddr_t v;
int sz, i;
vm_size_t size;
vsize_t size;
int base, residual;
vm_offset_t minaddr, maxaddr;
vaddr_t minaddr, maxaddr;
char pbuf[9];
/*
@ -319,12 +319,12 @@ cpu_startup()
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != 0)
panic("startup: cannot allocate VM for buffers");
minaddr = (vm_offset_t)buffers;
minaddr = (vaddr_t)buffers;
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
bufpages = btoc(MAXBSIZE) * nbuf;
@ -332,8 +332,8 @@ cpu_startup()
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
vm_size_t curbufsize;
vm_offset_t curbuf;
vsize_t curbufsize;
vaddr_t curbuf;
struct vm_page *pg;
/*
@ -342,7 +342,7 @@ cpu_startup()
* for the first "residual" buffers, and then we allocate
* "base" pages for the rest.
*/
curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
while (curbufsize) {
@ -384,15 +384,6 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
/*
* Tell the VM system that writing to kernel text isn't allowed.
* If we don't, we might end up COW'ing the text segment!
*/
if (uvm_map_protect(kernel_map, (vm_offset_t) kernel_text,
m68k_trunc_page((vm_offset_t) etext),
UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
panic("can't protect kernel text");
/*
* Allocate a virtual page (for use by /dev/mem)
* This page is handed to pmap_enter() therefore
@ -684,7 +675,7 @@ cpu_dumpconf()
/* Note: gdb looks for "dumppcb" in a kernel crash dump. */
struct pcb dumppcb;
extern vm_offset_t avail_start;
extern paddr_t avail_start;
/*
* Write a crash dump. The format while in swap is:
@ -702,7 +693,7 @@ dumpsys()
cpu_kcore_hdr_t *chdr_p;
struct sun2_kcore_hdr *sh;
char *vaddr;
vm_offset_t paddr;
paddr_t paddr;
int psize, todo, chunk;
daddr_t blkno;
int error = 0;
@ -741,7 +732,7 @@ dumpsys()
blkno = dumplo;
todo = dumpsize; /* pages */
vaddr = (char*)dumppage;
bzero(vaddr, NBPG);
memset(vaddr, 0, NBPG);
/* Set pointers to all three parts. */
kseg_p = (kcore_seg_t *)vaddr;
@ -808,11 +799,10 @@ dumpsys()
do {
if ((todo & 0xf) == 0)
printf("\r%4d", todo);
pmap_enter(pmap_kernel(), vmmap, paddr | PMAP_NC,
VM_PROT_READ, VM_PROT_READ);
pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
pmap_remove(pmap_kernel(), vmmap, vmmap + NBPG);
pmap_kremove(vmmap, NBPG);
pmap_update(pmap_kernel());
if (error)
goto fail;
@ -862,7 +852,7 @@ cpu_exec_aout_makecmds(p, epp)
void isr_soft_request(level)
int level;
{
register u_char bit;
u_char bit;
if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
return;
@ -874,7 +864,7 @@ void isr_soft_request(level)
void isr_soft_clear(level)
int level;
{
register u_char bit;
u_char bit;
if ((level < _IPL_SOFT_LEVEL_MIN) || (level > _IPL_SOFT_LEVEL_MAX))
return;