Support for UVM on VAXen.

This commit is contained in:
ragge 1998-03-02 17:00:00 +00:00
parent 699af7acdd
commit 8e2874bf64
12 changed files with 456 additions and 218 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kdb.c,v 1.11 1998/01/24 14:17:07 ragge Exp $ */
/* $NetBSD: kdb.c,v 1.12 1998/03/02 17:03:12 ragge Exp $ */
/*
* Copyright (c) 1996 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -13,8 +13,8 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed at Ludd, University of
* Lule}, Sweden and its contributors.
* This product includes software developed at Ludd, University of
* Lule}, Sweden and its contributors.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
@ -46,6 +46,7 @@
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
@ -66,7 +67,7 @@
#include "locators.h"
#define b_forw b_hash.le_next
#define b_forw b_hash.le_next
/*
* Software status, per controller.
*/
@ -119,19 +120,19 @@ kdbprint(aux, name)
int
kdbmatch(parent, cf, aux)
struct device *parent;
struct cfdata *cf;
struct cfdata *cf;
void *aux;
{
struct bi_attach_args *ba = aux;
if (ba->ba_node->biic.bi_dtype != BIDT_KDB50)
return 0;
if (ba->ba_node->biic.bi_dtype != BIDT_KDB50)
return 0;
if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
cf->cf_loc[BICF_NODE] != ba->ba_nodenr)
return 0;
return 0;
return 1;
return 1;
}
void
@ -142,7 +143,7 @@ kdbattach(parent, self, aux)
struct kdb_softc *sc = (void *)self;
struct bi_attach_args *ba = aux;
struct mscp_attach_args ma;
extern struct ivec_dsp idsptch;
extern struct ivec_dsp idsptch;
volatile int i = 10000;
printf("\n");
@ -182,42 +183,48 @@ kdbgo(usc, bp)
struct kdb_softc *sc = (void *)usc;
struct mscp_softc *mi = sc->sc_softc;
struct mscp *mp = (void *)bp->b_actb;
struct pcb *pcb;
pt_entry_t *pte;
int pfnum, npf, o, i;
struct pcb *pcb;
pt_entry_t *pte;
int npf, o, i;
unsigned info = 0;
caddr_t addr;
caddr_t addr;
o = (int)bp->b_un.b_addr & PGOFSET;
npf = btoc(bp->b_bcount + o) + 1;
addr = bp->b_un.b_addr;
/*
* Get a pointer to the pte pointing out the first virtual address.
* Use different ways in kernel and user space.
*/
if ((bp->b_flags & B_PHYS) == 0) {
pte = kvtopte(addr);
} else {
pcb = &bp->b_proc->p_addr->u_pcb;
pte = uvtopte(addr, pcb);
}
/*
* Get a pointer to the pte pointing out the first virtual address.
* Use different ways in kernel and user space.
*/
if ((bp->b_flags & B_PHYS) == 0) {
pte = kvtopte(addr);
} else {
pcb = &bp->b_proc->p_addr->u_pcb;
pte = uvtopte(addr, pcb);
}
/*
* When we are doing DMA to user space, be sure that all pages
* we want to transfer to is mapped. WHY DO WE NEED THIS???
* SHOULDN'T THEY ALWAYS BE MAPPED WHEN DOING THIS???
*/
for (i = 0; i < (npf - 1); i++) {
if ((pte + i)->pg_pfn == 0) {
int rv;
rv = vm_fault(&bp->b_proc->p_vmspace->vm_map,
(unsigned)addr + i * NBPG,
VM_PROT_READ|VM_PROT_WRITE, FALSE);
if (rv)
panic("KDB DMA to nonexistent page, %d", rv);
}
}
/*
* When we are doing DMA to user space, be sure that all pages
* we want to transfer to is mapped. WHY DO WE NEED THIS???
* SHOULDN'T THEY ALWAYS BE MAPPED WHEN DOING THIS???
*/
for (i = 0; i < (npf - 1); i++) {
if ((pte + i)->pg_pfn == 0) {
int rv;
#if defined(UVM)
rv = uvm_fault(&bp->b_proc->p_vmspace->vm_map,
(unsigned)addr + i * NBPG, 0,
VM_PROT_READ|VM_PROT_WRITE);
#else
rv = vm_fault(&bp->b_proc->p_vmspace->vm_map,
(unsigned)addr + i * NBPG,
VM_PROT_READ|VM_PROT_WRITE, FALSE);
#endif
if (rv)
panic("KDB DMA to nonexistent page, %d", rv);
}
}
/*
* pte's for userspace isn't necessary positioned
* in consecutive physical pages. We check if they
@ -266,8 +273,6 @@ kdbintr(ctlr)
int ctlr;
{
struct kdb_softc *sc = kdb_cd.cd_devs[ctlr];
struct uba_softc *uh;
struct mscp_pack *ud;
if (sc->sc_kr->kdb_sa & MP_ERR) { /* ctlr fatal error */
kdbsaerror(&sc->sc_dev, 1);

View File

@ -1,4 +1,4 @@
/* $NetBSD: macros.h,v 1.14 1998/01/18 22:06:02 ragge Exp $ */
/* $NetBSD: macros.h,v 1.15 1998/03/02 17:00:01 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -34,7 +34,7 @@
#if !defined(_VAX_MACROS_H_) && !defined(STANDALONE) && \
(!defined(_LOCORE) && defined(_VAX_INLINE_))
#define _VAX_MACROS_H_
#define _VAX_MACROS_H_
/* Here general macros are supposed to be stored */
@ -44,7 +44,7 @@ static __inline__ int ffs(int reg){
__asm__ __volatile ("ffs $0,$32,%1,%0
bneq 1f
mnegl $1,%0
1: incl %0"
1: incl %0"
: "&=r" (val)
: "r" (reg) );
return val;
@ -58,10 +58,10 @@ static __inline__ void _remque(void*p){
}
static __inline__ void _insque(void*p, void*q) {
__asm__ __volatile ("insque (%0), (%1)"
:
: "r" (p),"r" (q)
: "memory" );
__asm__ __volatile ("insque (%0), (%1)"
:
: "r" (p),"r" (q)
: "memory" );
}
static __inline__ void bcopy(const void*from, void*toe, u_int len) {
@ -71,11 +71,17 @@ static __inline__ void bcopy(const void*from, void*toe, u_int len) {
:"r0","r1","r2","r3","r4","r5");
}
void blkclr __P((void *, u_int));
static __inline__ void bzero(void*block, u_int len){
__asm__ __volatile ("movc5 $0,(%0),$0,%1,(%0)"
if (len > 65535)
blkclr(block, len);
else {
__asm__ __volatile ("movc5 $0,(%0),$0,%1,(%0)"
:
: "r" (block), "r" (len)
:"r0","r1","r2","r3","r4","r5");
}
}
static __inline__ int bcmp(const void *b1, const void *b2, size_t len){
@ -121,13 +127,13 @@ static __inline__ int skpc(int mask, size_t size, u_char *cp){
return ret;
}
#define setrunqueue(p) \
#define setrunqueue(p) \
__asm__ __volatile("movl %0,r0;jsb Setrq":: "g"(p):"r0","r1","r2");
#define remrunqueue(p) \
#define remrunqueue(p) \
__asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
#define cpu_switch(p) \
#define cpu_switch(p) \
__asm__ __volatile("movl %0,r0;movpsl -(sp);jsb Swtch" \
::"g"(p):"r0","r1","r2","r3");
#endif /* _VAX_MACROS_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.15 1997/07/12 16:20:38 perry Exp $ */
/* $NetBSD: vmparam.h,v 1.16 1998/03/02 17:00:02 ragge Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -48,36 +48,33 @@
*/
/*
* Virtual address space arrangement. On 386, both user and kernel
* share the address space, not unlike the vax.
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. Immediately above the user stack
* resides the user structure, which is UPAGES long and contains the
* kernel stack.
* resides kernel.
*
*/
#define USRTEXT 0x400
#define USRSTACK 0x7fffe000 /* XXX */
#define USRTEXT 0x400
#define USRSTACK 0x7fffe000 /* XXX should be KERNBASE */
/*
* Virtual memory related constants, all in bytes
*/
#ifndef MAXTSIZ
#define MAXTSIZ (8*1024*1024) /* max text size */
#define MAXTSIZ (8*1024*1024) /* max text size */
#endif
#ifndef MAXDSIZ
#define MAXDSIZ (24*1024*1024) /* max data size */
#define MAXDSIZ (24*1024*1024) /* max data size */
#endif
#ifndef MAXSSIZ
#define MAXSSIZ (8*1024*1024) /* max stack size */
#ifndef MAXSSIZ
#define MAXSSIZ (8*1024*1024) /* max stack size */
#endif
#ifndef DFLDSIZ
#define DFLDSIZ (16*1024*1024) /* initial data size limit */
#define DFLDSIZ (16*1024*1024) /* initial data size limit */
#endif
#ifndef DFLSSIZ
#define DFLSSIZ (512*1024) /* initial stack size limit */
#ifndef DFLSSIZ
#define DFLSSIZ (512*1024) /* initial stack size limit */
#endif
/*
@ -86,7 +83,7 @@
* Note: This is just a hint, if we mmap() more than this the page
* table will be expanded. (at the cost of speed).
*/
#define MMAPSPACE (24*1024*1024)
#define MMAPSPACE (24*1024*1024)
/*
* Size of shared memory map
@ -99,19 +96,19 @@
/*
* Size of User Raw I/O map
*/
#define USRIOSIZE 300
#define USRIOSIZE 300
/*
* The time for a process to be blocked before being very swappable.
* This is a number of seconds which the system takes as being a non-trivial
* amount of real time. You probably shouldn't change this;
* amount of real time. You probably shouldn't change this;
* it is used in subtle ways (fractions and multiples of it are, that is, like
* half of a ``long time'', almost a long time, etc.)
* It is related to human patience and other factors which don't really
* change over time.
*/
#define MAXSLP 20
#define MAXSLP 20
/*
* A swapped in process is given a small amount of core without being bothered
@ -125,29 +122,39 @@
* so we loan each swapped in process memory worth 100$, or just admit
* that we don't consider it worthwhile and swap it out to disk which costs
* $30/mb or about $0.75.
* Update: memory prices have changed recently (9/96). At the current
* Update: memory prices have changed recently (9/96). At the current
* value of $6 per megabyte, we lend each swapped in process memory worth
* $0.15, or just admit that we don't consider it worthwhile and swap it out
* to disk which costs $0.20/MB, or just under half a cent.
*/
#define SAFERSS 8 /* nominal ``small'' resident set size
#define SAFERSS 8 /* nominal ``small'' resident set size
protected against replacement */
#define mapin(pte, v, pfnum, prot) \
#define mapin(pte, v, pfnum, prot) \
{(*(int *)(pte) = ((pfnum)<<PGSHIFT) | (prot)) ; }
#if defined(UVM)
#define MACHINE_NEW_NONCONTIG
#define VM_PHYSSEG_MAX 1
#define VM_PHYSSEG_NOADD
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH /* XXX */
struct pmap_physseg {
};
#endif
/*
* Mach derived constants
*/
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7FFFE000)
#define VM_MAX_ADDRESS ((vm_offset_t)0xC0000000)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0x80000000)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)(VM_MIN_KERNEL_ADDRESS+\
(VM_KERNEL_PT_PAGES*0x10000)))
#define VM_MAXUSER_ADDRESS ((vm_offset_t)KERNBASE)
#define VM_MAX_ADDRESS ((vm_offset_t)KERNBASE)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNBASE)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)(0xC0000000))
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
@ -155,5 +162,5 @@
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
/* pcb base */
#define pcbb(p) ((u_int)(p)->p_addr)
#define pcbb(p) ((u_int)(p)->p_addr)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uba.c,v 1.32 1998/01/24 14:16:33 ragge Exp $ */
/* $NetBSD: uba.c,v 1.33 1998/03/02 17:00:02 ragge Exp $ */
/*
* Copyright (c) 1996 Jonathan Stone.
* Copyright (c) 1994, 1996 Ludd, University of Lule}, Sweden.
@ -434,8 +434,13 @@ qba_attach(parent, self, aux)
* Map in the UBA page map into kernel space. On other UBAs,
* the map registers are in the bus IO space.
*/
#if defined(UVM)
(void)uvm_km_suballoc(kernel_map, &mini, &maxi,
QBAPAGES * sizeof(struct pte), FALSE, FALSE, NULL);
#else
(void)kmem_suballoc(kernel_map, &mini, &maxi,
QBAPAGES * sizeof(struct pte), FALSE);
#endif
pmap_map(mini, QBAMAP, QBAMAP + QBAPAGES * sizeof(struct pte),
VM_PROT_READ | VM_PROT_WRITE);
sc->uh_mr = (void *)mini;
@ -849,8 +854,13 @@ uba_attach(sc, iopagephys)
vm_offset_t iarea;
int i;
#if defined(UVM)
iarea = uvm_km_valloc(kernel_map,
NO_IVEC * sizeof(struct ivec_dsp));
#else
iarea = kmem_alloc(kernel_map,
NO_IVEC * sizeof(struct ivec_dsp));
#endif
sc->uh_idsp = (struct ivec_dsp *)iarea;
for (i = 0; i < NO_IVEC; i++) {
@ -866,7 +876,12 @@ uba_attach(sc, iopagephys)
* This is done with kmem_suballoc() but after that
* never used in the vm system. Is it OK to do so?
*/
#if defined(UVM)
(void)uvm_km_suballoc(kernel_map, &mini, &maxi, UBAIOPAGES * NBPG,
FALSE, FALSE, NULL);
#else
(void)kmem_suballoc(kernel_map, &mini, &maxi, UBAIOPAGES * NBPG, FALSE);
#endif
pmap_map(mini, iopagephys, iopagephys + UBAIOPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE);
sc->uh_iopage = (void *)mini;

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.15 1998/01/18 22:09:13 ragge Exp $ */
/* $NetBSD: disksubr.c,v 1.16 1998/03/02 17:00:00 ragge Exp $ */
/*
* Copyright (c) 1982, 1986, 1988 Regents of the University of California.
@ -50,6 +50,7 @@
#include <machine/macros.h>
#include <machine/pte.h>
#include <machine/pcb.h>
#include <machine/cpu.h>
#include <arch/vax/mscp/mscp.h> /* For disk encoding scheme */
@ -237,10 +238,10 @@ void
disk_printtype(unit, type)
int unit, type;
{
printf(" drive %d: %c%c", unit, MSCP_MID_CHAR(2, type),
MSCP_MID_CHAR(1, type));
printf(" drive %d: %c%c", unit, (int)MSCP_MID_CHAR(2, type),
(int)MSCP_MID_CHAR(1, type));
if (MSCP_MID_ECH(0, type))
printf("%c", MSCP_MID_CHAR(0, type));
printf("%c", (int)MSCP_MID_CHAR(0, type));
printf("%d\n", MSCP_MID_NUM(type));
}
@ -255,6 +256,7 @@ disk_reallymapin(bp, map, reg, flag)
struct pte *map;
int reg, flag;
{
struct proc *p;
volatile pt_entry_t *io;
pt_entry_t *pte;
struct pcb *pcb;
@ -264,6 +266,7 @@ disk_reallymapin(bp, map, reg, flag)
o = (int)bp->b_un.b_addr & PGOFSET;
npf = btoc(bp->b_bcount + o) + 1;
addr = bp->b_un.b_addr;
p = bp->b_proc;
/*
* Get a pointer to the pte pointing out the first virtual address.
@ -271,8 +274,10 @@ disk_reallymapin(bp, map, reg, flag)
*/
if ((bp->b_flags & B_PHYS) == 0) {
pte = kvtopte(addr);
if (p == 0)
p = &proc0;
} else {
pcb = &bp->b_proc->p_addr->u_pcb;
pcb = &p->p_addr->u_pcb;
pte = uvtopte(addr, pcb);
}
@ -284,9 +289,15 @@ disk_reallymapin(bp, map, reg, flag)
for (i = 0; i < (npf - 1); i++) {
if ((pte + i)->pg_pfn == 0) {
int rv;
rv = vm_fault(&bp->b_proc->p_vmspace->vm_map,
#if defined(UVM)
rv = uvm_fault(&p->p_vmspace->vm_map,
(unsigned)addr + i * NBPG, 0,
VM_PROT_READ|VM_PROT_WRITE);
#else
rv = vm_fault(&p->p_vmspace->vm_map,
(unsigned)addr + i * NBPG,
VM_PROT_READ|VM_PROT_WRITE, FALSE);
#endif
if (rv)
panic("DMA to nonexistent page, %d", rv);
}

View File

@ -1,7 +1,7 @@
/* $NetBSD: machdep.c,v 1.52 1998/02/19 04:18:34 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.53 1998/03/02 17:00:00 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
* Copyright (c) 1993 Adam Glass
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
@ -64,6 +64,8 @@
#include <sys/mount.h>
#include <sys/syscallargs.h>
#include <sys/ptrace.h>
#include <vm/vm.h>
#include <sys/sysctl.h>
#ifdef SYSVMSG
#include <sys/msg.h>
#endif
@ -74,6 +76,8 @@
#include <sys/shm.h>
#endif
#include <dev/cons.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
@ -147,6 +151,12 @@ int nbuf = NBUF;
int nbuf = 0;
#endif
#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#endif
void
cpu_startup()
{
@ -185,31 +195,52 @@ cpu_startup()
*/
sz = (int) allocsys((caddr_t) 0);
#if defined(UVM)
if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#else
if ((v = (caddr_t) kmem_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#endif
if (allocsys(v) - v != sz)
panic("startup: table size inconsistency");
/*
* Now allocate buffers proper. They are different than the above in
* that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
size = MAXBSIZE * nbuf; /* # bytes for buffers */
#if defined(UVM)
/* allocate VM for buffers... area is not managed by VM system */
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
#else
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *) & buffers,
&maxaddr, size, TRUE);
#endif
minaddr = (vm_offset_t) buffers;
#if !defined(UVM)
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t) 0,
&minaddr, size, FALSE) != KERN_SUCCESS)
&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
#endif
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
bufpages = btoc(MAXBSIZE) * nbuf;
}
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
vm_size_t curbufsize;
vm_offset_t curbuf;
/* now allocate RAM for buffers */
for (i = 0 ; i < nbuf ; i++) {
vm_offset_t curbuf;
vm_size_t curbufsize;
#if defined(UVM)
struct vm_page *pg;
#endif
/*
* First <residual> buffers get (base+1) physical pages
@ -220,29 +251,58 @@ cpu_startup()
*/
curbuf = (vm_offset_t) buffers + i * MAXBSIZE;
curbufsize = CLBYTES * (i < residual ? base + 1 : base);
#if defined(UVM)
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL);
if (pg == NULL)
panic("cpu_startup: "
"not enough RAM for buffer cache");
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
curbuf += CLBYTES;
curbufsize -= CLBYTES;
}
#else
vm_map_pageable(buffer_map, curbuf, curbuf + curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
#endif
}
/*
* Allocate a submap for exec arguments. This map effectively limits
* the number of processes exec'ing at any time.
*/
#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16 * NCARGS, TRUE, FALSE, NULL);
#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16 * NCARGS, TRUE);
#endif
/*
* Finally, allocate mbuf cluster submap.
*/
#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *) & mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE, FALSE, NULL);
#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *) & mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
#endif
#if VAX410 || VAX43
/*
* Allocate a submap for physio
*/
#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE, FALSE, NULL);
#else
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
#endif
#endif
/*
* Initialize callouts
*/
@ -252,7 +312,11 @@ cpu_startup()
callout[i - 1].c_next = &callout[i];
callout[i - 1].c_next = NULL;
#if defined(UVM)
printf("avail mem = %d\n", (int)ptoa(uvmexp.free));
#else
printf("avail mem = %d\n", (int)ptoa(cnt.v_free_count));
#endif
printf("Using %d buffers containing %d bytes of memory.\n",
nbuf, bufpages * CLBYTES);
@ -333,7 +397,9 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
#endif
valloc(buf, struct buf, nbuf);
return v;
}
@ -480,7 +546,11 @@ sendsig(catcher, sig, mask, code)
} else
cursp = syscf->sp;
if (cursp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
#if !defined(UVM)
(void) grow(p, cursp);
#else
(void) uvm_grow(p, cursp);
#endif
/* Set up positions for structs on stack */
sigctx = (struct sigcontext *) (cursp - sizeof(struct sigcontext));
@ -490,8 +560,13 @@ sendsig(catcher, sig, mask, code)
/* Place for pointer to arg list in sigreturn */
cursp = (unsigned)sigctx - 8;
#if defined(UVM)
if (uvm_useracc((caddr_t) cursp, sizeof(struct sigcontext) +
sizeof(struct trampframe), B_WRITE) == 0) {
#else
if (useracc((caddr_t) cursp, sizeof(struct sigcontext) +
sizeof(struct trampframe), B_WRITE) == 0) {
#endif
/*
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.10 1997/11/02 14:25:21 ragge Exp $ */
/* $NetBSD: mem.c,v 1.11 1998/03/02 17:00:00 ragge Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -37,7 +37,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)mem.c 8.3 (Berkeley) 1/12/94
* @(#)mem.c 8.3 (Berkeley) 1/12/94
*/
/*
@ -123,7 +123,11 @@ mmrw(dev, uio, flags)
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
#else
if (!kernacc((caddr_t)v, c,
#endif
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
error = uiomove((caddr_t)v, c, uio);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.45 1998/01/31 12:17:34 ragge Exp $ */
/* $NetBSD: pmap.c,v 1.46 1998/03/02 17:00:01 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -42,6 +42,10 @@
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm.h>
#endif
#include <machine/pte.h>
#include <machine/pcb.h>
#include <machine/mtpr.h>
@ -55,12 +59,12 @@
/*
* This code uses bitfield operators for most page table entries.
*/
#define PROTSHIFT 27
#define PROT_KW (PG_KW >> PROTSHIFT)
#define PROT_KR (PG_KR >> PROTSHIFT)
#define PROT_RW (PG_RW >> PROTSHIFT)
#define PROT_RO (PG_RO >> PROTSHIFT)
#define PROT_URKW (PG_URKW >> PROTSHIFT)
#define PROTSHIFT 27
#define PROT_KW (PG_KW >> PROTSHIFT)
#define PROT_KR (PG_KR >> PROTSHIFT)
#define PROT_RW (PG_RW >> PROTSHIFT)
#define PROT_RO (PG_RO >> PROTSHIFT)
#define PROT_URKW (PG_URKW >> PROTSHIFT)
struct pmap kernel_pmap_store;
@ -69,6 +73,9 @@ void *scratch;
vm_offset_t ptemapstart, ptemapend;
vm_map_t pte_map;
#if defined(UVM)
struct vm_map pte_map_store;
#endif
extern caddr_t msgbufaddr;
@ -121,7 +128,7 @@ pmap_bootstrap()
/* reverse mapping struct */
sysptsize += (avail_end >> PGSHIFT) * 2;
/* User Page table area. This may grow big */
#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / NBPG)
#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / NBPG)
sysptsize += ((USRPTSIZE * 4) / NBPG) * maxproc;
/* Kernel stacks per process */
sysptsize += UPAGES * maxproc;
@ -178,8 +185,13 @@ pmap_bootstrap()
blkclr((void *)istack, (avail_start + KERNBASE) - istack);
/* Set logical page size */
#if defined(UVM)
uvmexp.pagesize = CLBYTES;
uvm_setpagesize();
#else
PAGE_SIZE = CLBYTES;
vm_set_page_size();
#endif
/*
* We move SCB here from physical address 0 to an address
@ -227,10 +239,28 @@ pmap_bootstrap()
/*
* Now everything should be complete, start virtual memory.
*/
#if defined(UVM)
uvm_page_physload(avail_start >> CLSHIFT, avail_end >> CLSHIFT,
avail_start >> CLSHIFT, avail_end >> CLSHIFT);
#endif
mtpr(sysptsize, PR_SLR);
mtpr(1, PR_MAPEN);
}
#if defined(UVM)
/*
* How much virtual space does this kernel have?
* (After mapping kernel text, data, etc.)
*/
void
pmap_virtual_space(v_start, v_end)
vm_offset_t *v_start;
vm_offset_t *v_end;
{
*v_start = virtual_avail;
*v_end = virtual_end;
}
#endif
/*
* pmap_init() is called as part of vm init after memory management
@ -238,13 +268,22 @@ pmap_bootstrap()
* Here we allocate virtual memory for user page tables.
*/
void
#if defined(UVM)
pmap_init()
#else
pmap_init(start, end)
vm_offset_t start, end;
#endif
{
/* reserve place on SPT for UPT */
#if !defined(UVM)
pte_map = kmem_suballoc(kernel_map, &ptemapstart, &ptemapend,
USRPTSIZE * 4 * maxproc, TRUE);
#else
pte_map = uvm_km_suballoc(kernel_map, &ptemapstart, &ptemapend,
USRPTSIZE * 4 * maxproc, TRUE, FALSE, &pte_map_store);
#endif
}
@ -284,9 +323,12 @@ pmap_pinit(pmap)
* Allocate PTEs and stash them away in the pmap.
* XXX Ok to use kmem_alloc_wait() here?
*/
bytesiz = btoc(MAXTSIZ + MAXDSIZ + MMAPSPACE + MAXSSIZ) *
sizeof(struct pte);
bytesiz = USRPTSIZE * sizeof(struct pte);
#if defined(UVM)
pmap->pm_p0br = (void *)uvm_km_valloc_wait(pte_map, bytesiz);
#else
pmap->pm_p0br = (void *)kmem_alloc_wait(pte_map, bytesiz);
#endif
pmap->pm_p0lr = btoc(MAXTSIZ + MAXDSIZ + MMAPSPACE) | AST_PCB;
pmap->pm_p1br = (void *)pmap->pm_p0br + bytesiz - 0x800000;
pmap->pm_p1lr = (0x200000 - btoc(MAXSSIZ));
@ -314,8 +356,13 @@ if(startpmapdebug)printf("pmap_release: pmap %x\n",pmap);
#endif
if (pmap->pm_p0br)
#if defined(UVM)
uvm_km_free_wakeup(pte_map, (vm_offset_t)pmap->pm_p0br,
USRPTSIZE * sizeof(struct pte));
#else
kmem_free_wakeup(pte_map, (vm_offset_t)pmap->pm_p0br,
USRPTSIZE * sizeof(struct pte));
#endif
}
@ -392,9 +439,11 @@ printf("pmap_enter: pmap: %x,virt %x, phys %x, prot %x w %x\n",
if ((patch[i] & ~PG_M) == nypte)
return;
#ifdef DIAGNOSTIC
if ((patch[i] & PG_FRAME) &&
((patch[i] & PG_FRAME) != (nypte & PG_FRAME)))
#if defined(UVM)
pmap_page_protect((patch[i] & PG_FRAME) << PGSHIFT, 0);
#else
panic("pmap_enter: mapping onto old map");
#endif
@ -482,7 +531,7 @@ pmap_extract(pmap, va)
pmap_t pmap;
vm_offset_t va;
{
struct pte *pte;
struct pte *pte;
#ifdef PMAPDEBUG
if(startpmapdebug)printf("pmap_extract: pmap %x, va %x\n",pmap, va);
@ -492,19 +541,19 @@ if(startpmapdebug)printf("pmap_extract: pmap %x, va %x\n",pmap, va);
printf("Warning, pmap_extract va not aligned\n");
#endif
if (va < 0x40000000) {
pte = pmap->pm_p0br;
if ((va >> PGSHIFT) > (pmap->pm_p0lr & ~AST_MASK))
return 0;
} else if (va & KERNBASE) {
if (va < 0x40000000) {
pte = pmap->pm_p0br;
if ((va >> PGSHIFT) > (pmap->pm_p0lr & ~AST_MASK))
return 0;
} else if (va & KERNBASE) {
pte = Sysmap;
} else {
pte = pmap->pm_p1br;
if (POFF(va) < pmap->pm_p1lr)
return 0;
}
pte = pmap->pm_p1br;
if (POFF(va) < pmap->pm_p1lr)
return 0;
}
return (pte[POFF(va)].pg_pfn << PGSHIFT);
return (pte[POFF(va)].pg_pfn << PGSHIFT);
}
/*
@ -620,7 +669,7 @@ if(startpmapdebug) printf("pmap_protect: pmap %x, start %x, end %x, prot %x\n",
*/
boolean_t
pmap_is_referenced(pa)
vm_offset_t pa;
vm_offset_t pa;
{
struct pv_entry *pv;

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr.s,v 1.22 1998/01/18 22:06:01 ragge Exp $ */
/* $NetBSD: subr.s,v 1.23 1998/03/02 17:00:01 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -33,8 +33,9 @@
#include <machine/asm.h>
#include "assym.h"
#include "opt_uvm.h"
#define JSBENTRY(x) .globl x ; .align 2 ; x :
#define JSBENTRY(x) .globl x ; .align 2 ; x :
.text
@ -61,8 +62,8 @@ _eidsptch:
ENTRY(badaddr,0) # Called with addr,b/w/l
mfpr $0x12,r0
mtpr $0x1f,$0x12
movl 4(ap),r2 # First argument, the address
movl 8(ap),r1 # Sec arg, b,w,l
movl 4(ap),r2 # First argument, the address
movl 8(ap),r1 # Sec arg, b,w,l
pushl r0 # Save old IPL
clrl r3
movl $4f,_memtest # Set the return adress
@ -111,7 +112,7 @@ ENTRY(bzero,0)
_setjmp:.word 0
movl 4(ap), r0
movl 8(fp), (r0)
movl 12(fp), 4(r0)
movl 12(fp), 4(r0)
movl 16(fp), 8(r0)
addl3 fp,$28,12(r0)
clrl r0
@ -138,7 +139,7 @@ JSBENTRY(Setrq)
calls $1,_panic
setrq: .asciz "setrunqueue"
#endif
1: extzv $2,$6,P_PRIORITY(r0),r1 # get priority
1: extzv $2,$6,P_PRIORITY(r0),r1 # get priority
movaq _qs[r1],r2 # get address of queue
insque (r0),*4(r2) # put proc last in queue
bbss r1,_whichqs,1f # set queue bit.
@ -220,13 +221,21 @@ noque: .asciz "swtch"
ENTRY(cpu_exit,0)
movl 4(ap),r6 # Process pointer in r0
pushl P_VMSPACE(r6) # free current vm space
#if defined(UVM)
calls $1,_uvmspace_free
#else
calls $1,_vmspace_free
#endif
mtpr $0x18,$PR_IPL # Block almost everything
addl3 $512,_scratch,sp # Change stack, we will free it now
pushl $USPACE # stack size
pushl P_ADDR(r6) # pointer to stack space
pushl _kernel_map # the correct vm map
#if defined(UVM)
calls $3,_uvm_km_free
#else
calls $3,_kmem_free
#endif
clrl r0 # No process to switch from
bicl3 $0xc0000000,_scratch,r1
mtpr r1,$PR_PCBB
@ -247,6 +256,16 @@ _copyin:.word 0
1: clrl *pcbtrap
ret
ENTRY(kcopy,0)
moval 1f,*pcbtrap
movl 4(ap),r1
movl 8(ap),r2
movc3 12(ap),(r1), (r2)
clrl r1
1: clrl *pcbtrap
movl r1,r0
ret
_copystr: .globl _copystr
_copyinstr: .globl _copyinstr
_copyoutstr: .globl _copyoutstr
@ -333,7 +352,7 @@ ENTRY(suswintr,0)
ENTRY(fuswintr,0)
moval 1f,*pcbtrap
movl 4(ap),r0
movl 4(ap),r0
movzwl (r0),r1
1: clrl *pcbtrap
movl r1,r0
@ -352,16 +371,16 @@ pcbtrap: .long 0x800001fc; .globl pcbtrap # Safe place
* Copy/zero more than 64k of memory (as opposite of bcopy/bzero).
*/
ENTRY(blkcpy,R6)
movl 4(ap),r1
movl 8(ap),r3
movl 4(ap),r1
movl 8(ap),r3
movl 12(ap),r6
jbr 2f
1: subl2 r0,r6
movc3 r0,(r1),(r3)
2: movzwl $65535,r0
cmpl r6,r0
jgtr 1b
movc3 r6,(r1),(r3)
1: subl2 r0,r6
movc3 r0,(r1),(r3)
2: movzwl $65535,r0
cmpl r6,r0
jgtr 1b
movc3 r6,(r1),(r3)
ret
ENTRY(blkclr,R6)

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.33 1998/01/03 00:35:28 thorpej Exp $ */
/* $NetBSD: trap.c,v 1.34 1998/03/02 17:00:01 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -92,14 +92,14 @@ char *traptypes[]={
int no_traps = 18;
#define USERMODE(framep) ((((framep)->psl) & (PSL_U)) == PSL_U)
#define FAULTCHK \
if (p->p_addr->u_pcb.iftrap) { \
frame->pc = (unsigned)p->p_addr->u_pcb.iftrap; \
#define FAULTCHK \
if (p->p_addr->u_pcb.iftrap) { \
frame->pc = (unsigned)p->p_addr->u_pcb.iftrap; \
frame->psl &= ~PSL_FPD; \
frame->r0 = EFAULT;/* for copyin/out */ \
frame->r1 = -1; /* for fetch/store */ \
return; \
}
frame->r1 = -1; /* for fetch/store */ \
return; \
}
void
arithflt(frame)
@ -112,8 +112,13 @@ arithflt(frame)
u_quad_t oticks = 0;
vm_map_t map;
vm_prot_t ftype;
extern vm_map_t pte_map;
extern vm_map_t pte_map;
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
if ((umode = USERMODE(frame))) {
type |= T_USER;
oticks = p->p_sticks;
@ -166,7 +171,7 @@ fram:
case T_ACCFLT:
#ifdef TRAPDEBUG
if(faultdebug)printf("trap accflt type %x, code %x, pc %x, psl %x\n",
frame->trap, frame->code, frame->pc, frame->psl);
frame->trap, frame->code, frame->pc, frame->psl);
#endif
#ifdef DIAGNOSTIC
if (p == 0)
@ -189,11 +194,15 @@ if(faultdebug)printf("trap accflt type %x, code %x, pc %x, psl %x\n",
addr = trunc_page((unsigned)&pm->pm_p1br[
(frame->code & 0x3fffffff) >> PGSHIFT]);
}
#if defined(UVM)
rv = uvm_fault(pte_map, addr, 0,
VM_PROT_WRITE|VM_PROT_READ);
#else
rv = vm_fault(pte_map, addr,
VM_PROT_WRITE|VM_PROT_READ, FALSE);
#endif
if (rv != KERN_SUCCESS) {
sig = SIGSEGV;
break;
goto ufault;
} else
trapsig = 0;
}
@ -208,13 +217,20 @@ if(faultdebug)printf("trap accflt type %x, code %x, pc %x, psl %x\n",
else
ftype = VM_PROT_READ;
#if defined(UVM)
rv = uvm_fault(map, addr, 0, ftype);
#else
rv = vm_fault(map, addr, ftype, FALSE);
#endif
if (rv != KERN_SUCCESS) {
if (umode == 0) {
FAULTCHK;
panic("Segv in kernel mode: pc %x addr %x",
(u_int)frame->pc, (u_int)frame->code);
}
ufault: if (rv == KERN_RESOURCE_SHORTAGE)
printf("Pid %d killed: out of memory.\n",
p->p_pid);
sig = SIGSEGV;
} else
trapsig = 0;
@ -270,14 +286,14 @@ if(faultdebug)printf("trap accflt type %x, code %x, pc %x, psl %x\n",
postsig(sig);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqueue ourselves but before
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqueue ourselves but before
* we swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
* our priority.
*/
splstatclock();
setrunqueue(p);
mi_switch();
@ -288,14 +304,14 @@ if(faultdebug)printf("trap accflt type %x, code %x, pc %x, psl %x\n",
extern int psratio;
addupc_task(p, frame->pc, (int)(p->p_sticks-oticks) * psratio);
}
curpriority = p->p_priority;
curpriority = p->p_priority;
}
void
setregs(p, pack, stack)
struct proc *p;
struct proc *p;
struct exec_package *pack;
u_long stack;
u_long stack;
{
struct trapframe *exptr;
@ -317,10 +333,15 @@ syscall(frame)
#ifdef TRAPDEBUG
if(startsysc)printf("trap syscall %s pc %x, psl %x, sp %x, pid %d, frame %x\n",
syscallnames[frame->code], frame->pc, frame->psl,frame->sp,
syscallnames[frame->code], frame->pc, frame->psl,frame->sp,
curproc->p_pid,frame);
#endif
#if defined(UVM)
uvmexp.syscalls++;
#else
cnt.v_syscall++;
#endif
exptr = p->p_addr->u_pcb.framep = frame;
callp = p->p_emul->e_sysent;
nsys = p->p_emul->e_nsysent;
@ -362,8 +383,8 @@ if(startsysc)printf("trap syscall %s pc %x, psl %x, sp %x, pid %d, frame %x\n",
#ifdef TRAPDEBUG
if(startsysc)
printf("retur %s pc %x, psl %x, sp %x, pid %d, v{rde %d r0 %d, r1 %d, frame %x\n",
syscallnames[exptr->code], exptr->pc, exptr->psl,exptr->sp,
curproc->p_pid,err,rval[0],rval[1],exptr);
syscallnames[exptr->code], exptr->pc, exptr->psl,exptr->sp,
curproc->p_pid,err,rval[0],rval[1],exptr);
#endif
bad:
@ -391,14 +412,14 @@ bad:
postsig(sig);
p->p_priority = p->p_usrpri;
if (want_resched) {
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqueue ourselves but before
/*
* Since we are curproc, clock will normally just change
* our priority without moving us from one queue to another
* (since the running process is not on a queue.)
* If that happened after we setrunqueue ourselves but before
* we swtch()'ed, we might not be on the queue indicated by
* our priority.
*/
* our priority.
*/
splstatclock();
setrunqueue(p);
mi_switch();

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.37 1998/01/03 00:37:31 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.38 1998/03/02 17:00:01 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -94,24 +94,22 @@ void
cpu_fork(p1, p2)
struct proc *p1, *p2;
{
struct pte *pt;
struct pcb *nyproc;
struct trapframe *tf;
struct pmap *pmap, *opmap;
extern vm_map_t pte_map;
nyproc = &p2->p_addr->u_pcb;
tf = p1->p_addr->u_pcb.framep;
opmap = p1->p_vmspace->vm_map.pmap;
pmap = p2->p_vmspace->vm_map.pmap;
#ifdef notyet
/* Mark page invalid */
p2pte = kvtopte((u_int *)p2->p_addr + 2 * NBPG);
*p2pte = 0;
#endif
pt = kvtopte((u_int)p2->p_addr + NBPG);
pt->pg_v = 0;
/*
* Activate address space for the new process. The PTEs have
* Activate address space for the new process. The PTEs have
* already been allocated by way of pmap_create().
*/
pmap_activate(p2);
@ -161,7 +159,7 @@ cpu_set_kpc(p, pc)
kc->cf.ca_pc = (unsigned)&sret;
kc->cf.ca_argno = 1;
kc->cf.ca_arg1 = (unsigned)p;
kc->tf.r11 = boothowto; /* If we have old init */
kc->tf.r11 = boothowto; /* If we have old init */
kc->tf.psl = 0x3c00000;
nyproc->framep = (void *)&kc->tf;
@ -218,7 +216,7 @@ extern struct emul emul_ultrix;
* 4.3BSD Reno programs have an 1K header first in the executable
* file, containing a.out header. Otherwise programs are identical.
*
* from: exec_aout.c,v 1.9 1994/01/28 23:46:59 jtc Exp $
* from: exec_aout.c,v 1.9 1994/01/28 23:46:59 jtc Exp $
*/
int
@ -301,14 +299,14 @@ cpu_coredump(p, vp, cred, chdr)
if (error)
return error;
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&state, sizeof(state),
(off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p);
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&state, sizeof(state),
(off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p);
if (!error)
chdr->c_nseg++;
if (!error)
chdr->c_nseg++;
return error;
return error;
}
/*
@ -322,6 +320,7 @@ void
cpu_swapin(p)
struct proc *p;
{
struct pte *pt;
u_int uarea, i, *j, rv;
uarea = (u_int)p->p_addr;
@ -329,16 +328,20 @@ cpu_swapin(p)
for (i = uarea;i < uarea + USPACE;i += PAGE_SIZE) {
j = (u_int *)kvtopte(i);
if ((*j & PG_V) == 0) {
#if defined(UVM)
rv = uvm_fault(kernel_map, i, 0,
VM_PROT_WRITE|VM_PROT_READ);
#else
rv = vm_fault(kernel_map, i,
VM_PROT_WRITE|VM_PROT_READ, FALSE);
#endif
if (rv != KERN_SUCCESS)
panic("cpu_swapin: rv %d",rv);
}
}
#ifdef notyet
j = (u_int *)kvtopte(uarea + 2 * NBPG);
*j = 0; /* Set kernel stack red zone */
#endif
pt = kvtopte(uarea + NBPG);
pt->pg_v = 0; /* Set kernel stack red zone */
}
#if VAX410 || VAX43
@ -349,32 +352,36 @@ cpu_swapin(p)
*/
void
vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
struct buf *bp;
vm_size_t len;
{
vm_offset_t faddr, taddr, off, pa;
pmap_t fmap, tmap;
vm_offset_t faddr, taddr, off, pa;
pmap_t fmap, tmap;
if ((vax_boardtype != VAX_BTYP_43) && (vax_boardtype != VAX_BTYP_410))
return;
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
taddr = kmem_alloc_wait(phys_map, len);
bp->b_data = (caddr_t)(taddr + off);
fmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
tmap = vm_map_pmap(phys_map);
len = len >> PGSHIFT;
while (len--) {
pa = pmap_extract(fmap, faddr);
if (pa == 0)
panic("vmapbuf: null page frame for %x", (u_int)faddr);
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
#if defined(UVM)
taddr = uvm_km_valloc_wait(phys_map, len);
#else
taddr = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(taddr + off);
fmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
tmap = vm_map_pmap(phys_map);
len = len >> PGSHIFT;
while (len--) {
pa = pmap_extract(fmap, faddr);
if (pa == 0)
panic("vmapbuf: null page frame for %x", (u_int)faddr);
pmap_enter(tmap, taddr, pa & ~(NBPG - 1),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
faddr += NBPG;
taddr += NBPG;
}
pmap_enter(tmap, taddr, pa & ~(NBPG - 1),
VM_PROT_READ|VM_PROT_WRITE, TRUE);
faddr += NBPG;
taddr += NBPG;
}
}
/*
@ -383,18 +390,22 @@ vmapbuf(bp, len)
*/
void
vunmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
struct buf *bp;
vm_size_t len;
{
vm_offset_t addr, off;
vm_offset_t addr, off;
if ((vax_boardtype != VAX_BTYP_43) && (vax_boardtype != VAX_BTYP_410))
return;
addr = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
kmem_free_wakeup(phys_map, addr, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
addr = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
#if defined(UVM)
uvm_km_free_wakeup(phys_map, addr, len);
#else
kmem_free_wakeup(phys_map, addr, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: uba.c,v 1.32 1998/01/24 14:16:33 ragge Exp $ */
/* $NetBSD: uba.c,v 1.33 1998/03/02 17:00:02 ragge Exp $ */
/*
* Copyright (c) 1996 Jonathan Stone.
* Copyright (c) 1994, 1996 Ludd, University of Lule}, Sweden.
@ -434,8 +434,13 @@ qba_attach(parent, self, aux)
* Map in the UBA page map into kernel space. On other UBAs,
* the map registers are in the bus IO space.
*/
#if defined(UVM)
(void)uvm_km_suballoc(kernel_map, &mini, &maxi,
QBAPAGES * sizeof(struct pte), FALSE, FALSE, NULL);
#else
(void)kmem_suballoc(kernel_map, &mini, &maxi,
QBAPAGES * sizeof(struct pte), FALSE);
#endif
pmap_map(mini, QBAMAP, QBAMAP + QBAPAGES * sizeof(struct pte),
VM_PROT_READ | VM_PROT_WRITE);
sc->uh_mr = (void *)mini;
@ -849,8 +854,13 @@ uba_attach(sc, iopagephys)
vm_offset_t iarea;
int i;
#if defined(UVM)
iarea = uvm_km_valloc(kernel_map,
NO_IVEC * sizeof(struct ivec_dsp));
#else
iarea = kmem_alloc(kernel_map,
NO_IVEC * sizeof(struct ivec_dsp));
#endif
sc->uh_idsp = (struct ivec_dsp *)iarea;
for (i = 0; i < NO_IVEC; i++) {
@ -866,7 +876,12 @@ uba_attach(sc, iopagephys)
* This is done with kmem_suballoc() but after that
* never used in the vm system. Is it OK to do so?
*/
#if defined(UVM)
(void)uvm_km_suballoc(kernel_map, &mini, &maxi, UBAIOPAGES * NBPG,
FALSE, FALSE, NULL);
#else
(void)kmem_suballoc(kernel_map, &mini, &maxi, UBAIOPAGES * NBPG, FALSE);
#endif
pmap_map(mini, iopagephys, iopagephys + UBAIOPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE);
sc->uh_iopage = (void *)mini;