Major rewriting, optimization and simplifying of the pmap code:

- Map in all physical memory first in system space. This reduces
  pmap_copy_page() and pmap_zero_page to 3 resp. 2 instructions.
- Have fized position user page tables. Makes the pv_table smaller
  and all reverse references faster (and simpler).
- Remove the wiring code. Nobody doesn't even know what a DR32 is anymore.
- Simulate page reference bit by setting page invalid, as suggested by
  Rich Draves in a paper for 1991 Mach Usenix Symposium.

This reduced the time spent in the pmap module to between 70-75% of
the previous; and made process startup _much_ faster.
This commit is contained in:
ragge 1997-11-02 14:25:19 +00:00
parent d96a1a939d
commit ae27edc757
4 changed files with 300 additions and 692 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: param.h,v 1.26 1997/10/19 20:48:47 ragge Exp $ */
/* $NetBSD: param.h,v 1.27 1997/11/02 14:25:19 ragge Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
@ -142,7 +142,7 @@
/* clicks to bytes */
#define ctob(x) ((x) << PGSHIFT)
#define btoc(x) (((x) + PGOFSET) >> PGSHIFT)
#define btoc(x) (((unsigned)(x) + PGOFSET) >> PGSHIFT)
#define btop(x) (((unsigned)(x)) >> PGSHIFT)
/* bytes to disk blocks */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.19 1997/07/06 22:38:29 ragge Exp $ */
/* $NetBSD: pmap.h,v 1.20 1997/11/02 14:25:20 ragge Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -48,23 +48,16 @@
#include <machine/mtpr.h>
#define VAX_PAGE_SIZE NBPG
#define VAX_SEG_SIZE NBSEG
/*
* Pmap structure
*
* p0br == PR_P0BR in user struct, p0br is also == SBR in pmap_kernel()
* p1br is the same for stack space, stack is base of alloced pte mem
* Pmap structure
* pm_stack holds lowest allocated memory for the process stack.
* pm_pcb is a pointer to the corresponding pcb.
*/
typedef struct pmap {
vm_offset_t pm_stack; /* Base of alloced p1 pte space */
struct pcb *pm_pcb; /* Pointer to PCB for this pmap */
int ref_count; /* reference count */
struct pmap_statistics stats; /* statistics */
simple_lock_data_t lock; /* lock on pmap */
} *pmap_t;
/*
@ -74,15 +67,9 @@ typedef struct pmap {
typedef struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
struct pmap *pv_pmap;/* if not NULL, pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
int pv_flags; /* flags */
struct pte *pv_pte; /* pte for this physical page */
} *pv_entry_t;
#define PV_REF 0x00000001 /* Simulated phys ref bit */
#define PHYS_TO_PV(phys_page) (&pv_table[((phys_page)>>PAGE_SHIFT)])
/* ROUND_PAGE used before vm system is initialized */
#define ROUND_PAGE(x) (((uint)(x) + PAGE_SIZE-1)& ~(PAGE_SIZE - 1))
#define TRUNC_PAGE(x) ((uint)(x) & ~(PAGE_SIZE - 1))
@ -93,15 +80,10 @@ typedef struct pv_entry {
virtual_avail += (count) * NBPG;
#define MAPPHYS(ptr, count, perm) \
pmap_map(virtual_avail, avail_start, avail_start + \
(count) * NBPG, perm); \
(vm_offset_t)ptr = virtual_avail; \
virtual_avail += (count) * NBPG; \
(vm_offset_t)ptr = avail_start + KERNBASE; \
avail_start += (count) * NBPG;
#ifdef _KERNEL
#define pa_index(pa) atop(pa)
#define pa_to_pvh(pa) (&pv_table[atop(pa)])
extern struct pmap kernel_pmap_store;
@ -117,10 +99,21 @@ extern struct pmap kernel_pmap_store;
#define pmap_reference(pmap) if(pmap) (pmap)->ref_count++
#define pmap_pinit(pmap) (pmap)->ref_count=1;
#define pmap_phys_address(phys) ((u_int)(phys)<<PAGE_SHIFT)
#define pmap_is_referenced(phys) (FALSE)
#define pmap_clear_reference(pa) pmap_page_protect(pa, VM_PROT_NONE)
#define pmap_change_wiring(pmap, v, w) /* no need */
#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
/* These can be done as efficient inline macros */
#define pmap_copy_page(src, dst) \
asm("addl3 $0x80000000,%0,r0;addl3 $0x80000000,%1,r1; \
movc3 $1024,(r0),(r1)" \
:: "r"(src),"r"(dst):"r0","r1","r2","r3","r4","r5");
#define pmap_zero_page(phys) \
asm("addl3 $0x80000000,%0,r0;movc5 $0,(r0),$0,$1024,(r0)" \
:: "r"(phys): "r0","r1","r2","r3","r4","r5");
/* Prototypes */
void pmap_bootstrap __P((void));
void pmap_expandp0 __P((struct pmap *, int));
void pmap_expandp1 __P((struct pmap *));
#endif PMAP_H

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.9 1996/04/08 18:32:48 ragge Exp $ */
/* $NetBSD: mem.c,v 1.10 1997/11/02 14:25:21 ragge Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -57,7 +57,7 @@
#include <vm/vm.h>
extern unsigned int vmmap, avail_end;
extern unsigned int avail_end;
caddr_t zeropage;
int mmopen __P((dev_t, int, int));
@ -93,23 +93,11 @@ mmrw(dev, uio, flags)
struct uio *uio;
int flags;
{
register vm_offset_t o, v;
register vm_offset_t v;
register int c;
register struct iovec *iov;
int error = 0;
static int physlock;
if (minor(dev) == 0) {
/* lock against other uses of shared vmmap */
while (physlock > 0) {
physlock++;
error = tsleep((caddr_t)&physlock, PZERO | PCATCH,
"mmrw", 0);
if (error)
return (error);
}
physlock = 1;
}
while (uio->uio_resid > 0 && error == 0) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@ -125,18 +113,11 @@ mmrw(dev, uio, flags)
case 0:
v = uio->uio_offset;
if (v < 0 || v >= avail_end) {
error = EFAULT;
goto unlock;
return (EFAULT);
}
pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
trunc_page(v), uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE, TRUE);
o = uio->uio_offset & PAGE_MASK;
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
(vm_offset_t)vmmap + PAGE_SIZE);
c = min(iov->iov_len, MAXPHYS);
error = uiomove((caddr_t)v + KERNBASE, c, uio);
continue;
/* minor device 1 is kernel memory */
case 1:
@ -179,12 +160,6 @@ mmrw(dev, uio, flags)
uio->uio_offset += c;
uio->uio_resid -= c;
}
if (minor(dev) == 0) {
unlock:
if (physlock > 1)
wakeup((caddr_t)&physlock);
physlock = 0;
}
return (error);
}

File diff suppressed because it is too large Load Diff