Generally update the comment above the vmapbuf() implementations.

This commit is contained in:
thorpej 1999-05-26 22:07:36 +00:00
parent 6b655611b1
commit a2d06a4721
18 changed files with 76 additions and 147 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.48 1999/05/26 00:37:40 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.49 1999/05/26 22:07:36 thorpej Exp $ */
/* /*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -29,7 +29,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.48 1999/05/26 00:37:40 thorpej Exp $"); __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.49 1999/05/26 22:07:36 thorpej Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -349,6 +349,8 @@ extern vm_map_t phys_map;
/* /*
* Map a user I/O request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.44 1999/05/14 02:11:59 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.45 1999/05/26 22:07:37 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -344,12 +344,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.43 1999/05/26 00:40:20 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.44 1999/05/26 22:07:38 thorpej Exp $ */
/* /*
* Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994-1998 Mark Brinicombe.
@ -312,24 +312,10 @@ pagemove(from, to, size)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. Requests fall into * Map a user I/O request into kernel virtual address space.
* one of five catagories: * Note: the pages are already locked by uvm_vslock(), so we
* * do not need to pass an access_type to pmap_enter().
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
*
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)
struct buf *bp; struct buf *bp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.25 1999/05/14 02:11:59 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.26 1999/05/26 22:07:38 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -342,12 +342,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.48 1999/05/13 21:58:33 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.49 1999/05/26 22:07:38 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -298,12 +298,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.78 1999/05/13 21:58:34 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.79 1999/05/26 22:07:38 thorpej Exp $ */
/*- /*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved. * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -375,26 +375,11 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. Requests fall into * Map a user I/O request into kernel virtual address space.
* one of five catagories: * Note: the pages are already locked by uvm_vslock(), so we
* * do not need to pass an access_type to pmap_enter().
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
*
* All requests are (re)mapped into kernel VA space via the phys_map
* (a name with only slightly more meaning than "kernel_map")
*/ */
#if defined(PMAP_NEW) #if defined(PMAP_NEW)
void void
vmapbuf(bp, len) vmapbuf(bp, len)
struct buf *bp; struct buf *bp;
@ -432,9 +417,7 @@ vmapbuf(bp, len)
len -= PAGE_SIZE; len -= PAGE_SIZE;
} }
} }
#else /* PMAP_NEW */ #else /* PMAP_NEW */
void void
vmapbuf(bp, len) vmapbuf(bp, len)
struct buf *bp; struct buf *bp;
@ -462,8 +445,7 @@ vmapbuf(bp, len)
len -= PAGE_SIZE; len -= PAGE_SIZE;
} while (len); } while (len);
} }
#endif /* PMAP_NEW */
#endif
/* /*
* Free the io map PTEs associated with this IO operation. * Free the io map PTEs associated with this IO operation.

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.38 1999/05/14 02:12:00 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.39 1999/05/26 22:07:39 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -296,12 +296,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.37 1999/05/14 02:12:00 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.38 1999/05/26 22:07:39 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -43,7 +43,7 @@
*/ */
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.37 1999/05/14 02:12:00 nisimura Exp $"); __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.38 1999/05/26 22:07:39 thorpej Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -299,10 +299,9 @@ pagemove(from, to, size)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* Called by physio() in kern/kern_physio.c for raw device I/O * do not need to pass an access_type to pmap_enter().
* between user address and device driver bypassing filesystem cache.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.22 1999/05/14 02:12:00 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.23 1999/05/26 22:07:39 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -366,12 +366,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.10 1999/05/13 21:58:34 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.11 1999/05/26 22:07:39 thorpej Exp $ */
/* /*
* This file was taken from mvme68k/mvme68k/vm_machdep.c * This file was taken from mvme68k/mvme68k/vm_machdep.c
@ -377,12 +377,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.32 1999/05/13 21:58:35 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.33 1999/05/26 22:07:40 thorpej Exp $ */
/*- /*-
* Copyright (c) 1996 Matthias Pfaller. * Copyright (c) 1996 Matthias Pfaller.
@ -341,22 +341,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. Requests fall into * Map a user I/O request into kernel virtual address space.
* one of five catagories: * Note: the pages are already locked by uvm_vslock(), so we
* * do not need to pass an access_type to pmap_enter().
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
*
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
*/ */
#if defined(PMAP_NEW) #if defined(PMAP_NEW)
void void
@ -397,9 +384,7 @@ vmapbuf(bp, len)
len -= PAGE_SIZE; len -= PAGE_SIZE;
} }
} }
#else /* PMAP_NEW */ #else /* PMAP_NEW */
void void
vmapbuf(bp, len) vmapbuf(bp, len)
struct buf *bp; struct buf *bp;
@ -427,7 +412,7 @@ vmapbuf(bp, len)
len -= PAGE_SIZE; len -= PAGE_SIZE;
} while (len); } while (len);
} }
#endif #endif /* PMAP_NEW */
/* /*
* Free the io map PTEs associated with this IO operation. * Free the io map PTEs associated with this IO operation.

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.5 1999/03/26 23:41:34 mycroft Exp $ */ /* $NetBSD: vm_machdep.c,v 1.6 1999/05/26 22:07:40 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -233,21 +233,9 @@ pagemove(from, to, size)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. Requests fall into * Map a user I/O request into kernel virtual address space.
* one of five catagories: * Note: the pages are already locked by uvm_vslock(), so we
* * do not need to pass an access_type to pmap_enter().
* B_PHYS|B_UAREA: User u-area swap.
* Address is relative to start of u-area (p_addr).
* B_PHYS|B_PAGET: User page table swap.
* Address is a kernel VA in usrpt (Usrptmap).
* B_PHYS|B_DIRTY: Dirty page push.
* Address is a VA in proc2's address space.
* B_PHYS|B_PGIN: Kernel pagein of user pages.
* Address is VA in user's address space.
* B_PHYS: User "raw" IO request.
* Address is VA in user's address space.
*
* All requests are (re)mapped into kernel VA space via the phys_map
*/ */
vmapbuf(bp) vmapbuf(bp)
register struct buf *bp; register struct buf *bp;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.10 1999/05/13 21:58:35 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.11 1999/05/26 22:07:40 thorpej Exp $ */
/* /*
* Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -225,7 +225,9 @@ cpu_coredump(p, vp, cred, chdr)
} }
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.45 1999/05/13 21:58:35 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.46 1999/05/26 22:07:41 thorpej Exp $ */
/* /*
* Copyright (c) 1996 * Copyright (c) 1996
@ -97,7 +97,9 @@ pagemove(from, to, size)
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.16 1999/05/13 21:58:36 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.17 1999/05/26 22:07:41 thorpej Exp $ */
/* /*
* Copyright (c) 1996 * Copyright (c) 1996
@ -108,7 +108,9 @@ pagemove(from, to, size)
} }
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.51 1999/05/14 02:12:00 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.52 1999/05/26 22:07:41 thorpej Exp $ */
/* /*
* Copyright (c) 1994, 1995 Gordon W. Ross * Copyright (c) 1994, 1995 Gordon W. Ross
@ -336,11 +336,9 @@ pagemove(from, to, len)
} }
/* /*
* Map a user-space I/O request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* NB: We have DVMA, and therefore need no separate phys_map. * Note: the pages are already locked by uvm_vslock(), so we
* * do not need to pass an access_type to pmap_enter().
* This routine has user context and can sleep
* (called only by physio).
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.49 1999/05/13 21:58:36 thorpej Exp $ */ /* $NetBSD: vm_machdep.c,v 1.50 1999/05/26 22:07:41 thorpej Exp $ */
/* /*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden. * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -283,6 +283,10 @@ cpu_swapin(p)
#if VAX410 || VAX43 #if VAX410 || VAX43
/* /*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*
* vmapbuf()/vunmapbuf() only used on some vaxstations without * vmapbuf()/vunmapbuf() only used on some vaxstations without
* any busadapter with MMU. * any busadapter with MMU.
* XXX - This must be reworked to be effective. * XXX - This must be reworked to be effective.

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.21 1999/05/14 02:11:59 nisimura Exp $ */ /* $NetBSD: vm_machdep.c,v 1.22 1999/05/26 22:07:42 thorpej Exp $ */
/* /*
* Copyright (c) 1988 University of Utah. * Copyright (c) 1988 University of Utah.
@ -296,12 +296,9 @@ kvtop(addr)
extern vm_map_t phys_map; extern vm_map_t phys_map;
/* /*
* Map an IO request into kernel virtual address space. * Map a user I/O request into kernel virtual address space.
* * Note: the pages are already locked by uvm_vslock(), so we
* XXX we allocate KVA space by using kmem_alloc_wait which we know * do not need to pass an access_type to pmap_enter().
* allocates space without backing physical memory. This implementation
* is a total crock, the multiple mappings of these physical pages should
* be reflected in the higher-level VM structures to avoid problems.
*/ */
void void
vmapbuf(bp, len) vmapbuf(bp, len)