add real vmapbuf/vunmapbuf routines. needed for VS4000 SCSI support.

This commit is contained in:
matt 2000-03-07 00:05:59 +00:00
parent e42cc6e32e
commit e8c7c2f842
4 changed files with 72 additions and 7 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.41 2000/02/11 19:30:30 thorpej Exp $ */
/* $NetBSD: param.h,v 1.42 2000/03/07 00:05:59 matt Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
@ -73,6 +73,7 @@
#define VAX_PGSHIFT 9
#define VAX_NBPG (1 << VAX_PGSHIFT)
#define VAX_PGOFSET (VAX_NBPG - 1)
#define VAX_NPTEPG (VAX_NBPG / 4)
#define KERNBASE 0x80000000 /* start of kernel virtual */
@ -204,9 +205,6 @@
#define spl6() splx(0x16)
#define spl7() splx(0x17)
#define vmapbuf(p,q)
#define vunmapbuf(p,q)
/* Prototype needed for delay() */
#ifndef _LOCORE
void delay __P((int));

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.31 2000/03/04 07:27:49 matt Exp $ */
/* $NetBSD: vmparam.h,v 1.32 2000/03/07 00:05:59 matt Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -130,4 +130,7 @@ struct pmap_physseg {
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)KERNBASE)
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)(0xC0000000))
#define USRIOSIZE (8 * VAX_NPTEPG) /* 512MB */
#define VM_PHYS_SIZE (USRIOSIZE*VAX_NBPG)
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.94 2000/03/04 07:27:49 matt Exp $ */
/* $NetBSD: machdep.c,v 1.95 2000/03/07 00:05:59 matt Exp $ */
/*
* Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
@ -133,6 +133,7 @@ static struct map iomap[IOMAPSZ];
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#ifdef DEBUG
int iospace_inited = 0;
@ -234,6 +235,13 @@ cpu_startup()
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
/*
* Allocate a submap for physio. This map effectively limits the
* number of processes doing physio at any one time.
*/
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.56 2000/01/20 22:19:00 sommerfeld Exp $ */
/* $NetBSD: vm_machdep.c,v 1.57 2000/03/07 00:05:59 matt Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -308,3 +308,59 @@ iounaccess(vaddr, npgs)
pte[i] = 0;
mtpr(0, PR_TBIA);
}
extern vm_map_t phys_map;
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/
void
vmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
vaddr_t faddr, taddr, off;
paddr_t pa;
struct proc *p;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
p = bp->b_proc;
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vaddr_t)bp->b_data - faddr;
len = round_page(off + len);
taddr = uvm_km_valloc_wait(phys_map, len);
bp->b_data = (caddr_t)(taddr + off);
len = atop(len);
while (len--) {
if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
&pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
}
}
/*
* Unmap a previously-mapped user I/O request.
*/
void
vunmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
vaddr_t addr, off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
addr = trunc_page(bp->b_data);
off = (vaddr_t)bp->b_data - addr;
len = round_page(off + len);
uvm_km_free_wakeup(phys_map, addr, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}