Remove emap support. Unfortunately it never got to state where it would be

used and usable, due to reliability and limited & complicated MD support.

Going forward, we need to concentrate on interface which do not map anything
into kernel in first place (such as direct map or KVA-less I/O), rather
than making those mappings cheaper to do.
This commit is contained in:
jdolecek 2018-05-19 11:39:37 +00:00
parent 1f4255bff3
commit 482e5d893a
9 changed files with 26 additions and 542 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.77 2018/05/08 17:20:44 maxv Exp $ */
/* $NetBSD: pmap.h,v 1.78 2018/05/19 11:39:37 jdolecek Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -366,8 +366,6 @@ void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
void pmap_tlb_shootnow(void);
void pmap_tlb_intr(void);
#define __HAVE_PMAP_EMAP
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
#define PMAP_FORK /* turn on pmap_fork interface */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.289 2018/03/04 23:25:35 jdolecek Exp $ */
/* $NetBSD: pmap.c,v 1.290 2018/05/19 11:39:37 jdolecek Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@ -170,7 +170,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.289 2018/03/04 23:25:35 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.290 2018/05/19 11:39:37 jdolecek Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -1016,67 +1016,6 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
}
}
void
pmap_emap_enter(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
pt_entry_t *pte, npte;
KASSERT((prot & ~VM_PROT_ALL) == 0);
pte = (va < VM_MIN_KERNEL_ADDRESS) ? vtopte(va) : kvtopte(va);
#ifdef DOM0OPS
if (pa < pmap_pa_start || pa >= pmap_pa_end) {
npte = pa;
} else
#endif
npte = pmap_pa2pte(pa);
npte = pmap_pa2pte(pa);
npte |= protection_codes[prot] | PG_V;
pmap_pte_set(pte, npte);
pmap_pte_flush();
}
/*
* pmap_emap_sync: perform TLB flush or pmap load, if it was deferred.
*/
void
pmap_emap_sync(bool canload)
{
struct cpu_info *ci = curcpu();
struct pmap *pmap;
KASSERT(kpreempt_disabled());
if (__predict_true(ci->ci_want_pmapload && canload)) {
/*
* XXX: Hint for pmap_reactivate(), which might suggest to
* not perform TLB flush, if state has not changed.
*/
pmap = vm_map_pmap(&curlwp->l_proc->p_vmspace->vm_map);
if (__predict_false(pmap == ci->ci_pmap)) {
kcpuset_atomic_clear(pmap->pm_cpus, cpu_index(ci));
}
pmap_load();
KASSERT(ci->ci_want_pmapload == 0);
} else {
tlbflush();
}
}
void
pmap_emap_remove(vaddr_t sva, vsize_t len)
{
pt_entry_t *pte;
vaddr_t va, eva = sva + len;
for (va = sva; va < eva; va += PAGE_SIZE) {
pte = (va < VM_MIN_KERNEL_ADDRESS) ? vtopte(va) : kvtopte(va);
pmap_pte_set(pte, 0);
}
pmap_pte_flush();
}
__strict_weak_alias(pmap_kenter_ma, pmap_kenter_pa);
#if defined(__x86_64__)
@ -2925,9 +2864,7 @@ pmap_reactivate(struct pmap *pmap)
*/
kcpuset_atomic_set(pmap->pm_cpus, cid);
u_int gen = uvm_emap_gen_return();
tlbflush();
uvm_emap_update(gen);
}
}
@ -3027,9 +2964,7 @@ pmap_load(void)
lldt(pmap->pm_ldt_sel);
u_int gen = uvm_emap_gen_return();
cpu_load_pmap(pmap, oldpmap);
uvm_emap_update(gen);
ci->ci_want_pmapload = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.314 2018/02/16 07:04:51 ozaki-r Exp $ */
/* $NetBSD: kern_synch.c,v 1.315 2018/05/19 11:39:37 jdolecek Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.314 2018/02/16 07:04:51 ozaki-r Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.315 2018/05/19 11:39:37 jdolecek Exp $");
#include "opt_kstack.h"
#include "opt_perfctrs.h"
@ -730,7 +730,6 @@ mi_switch(lwp_t *l)
* Restore VM context and IPL.
*/
pmap_activate(l);
uvm_emap_switch(l);
pcu_switchpoint(l);
if (prevlwp != NULL) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_pipe.c,v 1.144 2018/04/20 19:02:18 jdolecek Exp $ */
/* $NetBSD: sys_pipe.c,v 1.145 2018/05/19 11:39:37 jdolecek Exp $ */
/*-
* Copyright (c) 2003, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.144 2018/04/20 19:02:18 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.145 2018/05/19 11:39:37 jdolecek Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -92,8 +92,6 @@ __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.144 2018/04/20 19:02:18 jdolecek Exp
#include <sys/atomic.h>
#include <sys/pipe.h>
#include <uvm/uvm_extern.h>
/*
* Use this to disable direct I/O and decrease the code size:
* #define PIPE_NODIRECT
@ -102,6 +100,10 @@ __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.144 2018/04/20 19:02:18 jdolecek Exp
/* XXX Disabled for now; rare hangs switching between direct/buffered */
#define PIPE_NODIRECT
#ifndef PIPE_NODIRECT
#include <uvm/uvm.h>
#endif
static int pipe_read(file_t *, off_t *, struct uio *, kauth_cred_t, int);
static int pipe_write(file_t *, off_t *, struct uio *, kauth_cred_t, int);
static int pipe_close(file_t *);
@ -509,7 +511,6 @@ again:
* Direct copy, bypassing a kernel buffer.
*/
void *va;
u_int gen;
KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
@ -518,15 +519,8 @@ again:
size = uio->uio_resid;
va = (char *)rmap->kva + rmap->pos;
gen = rmap->egen;
mutex_exit(lock);
/*
* Consume emap and read the data from loaned pages.
*/
uvm_emap_consume(gen);
error = uiomove(va, size, uio);
mutex_enter(lock);
if (error)
break;
@ -660,7 +654,6 @@ pipe_loan_free(struct pipe *wpipe)
struct pipemapping * const wmap = &wpipe->pipe_map;
const vsize_t len = ptoa(wmap->npages);
uvm_emap_remove(wmap->kva, len); /* XXX */
uvm_km_free(kernel_map, wmap->kva, len, UVM_KMF_VAONLY);
wmap->kva = 0;
atomic_add_int(&amountpipekva, -len);
@ -746,10 +739,12 @@ pipe_direct_write(file_t *fp, struct pipe *wpipe, struct uio *uio)
return (ENOMEM); /* so that caller fallback to ordinary write */
}
/* Enter the loaned pages to KVA, produce new emap generation number. */
uvm_emap_enter(wmap->kva + ptoa(starting_color), pgs, npages,
VM_PROT_READ);
wmap->egen = uvm_emap_produce();
/* Enter the loaned pages to kva */
vaddr_t kva = wpipe->pipe_map.kva;
for (int j = 0; j < npages; j++, kva += PAGE_SIZE) {
pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ, 0);
}
pmap_update(pmap_kernel());
/* Now we can put the pipe in direct write mode */
wmap->pos = bpos + ptoa(starting_color);
@ -791,7 +786,8 @@ pipe_direct_write(file_t *fp, struct pipe *wpipe, struct uio *uio)
mutex_exit(lock);
if (pgs != NULL) {
/* XXX: uvm_emap_remove */
pmap_kremove(wpipe->pipe_map.kva, blen);
pmap_update(pmap_kernel());
uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
}
if (error || amountpipekva > maxpipekva)
@ -813,7 +809,7 @@ pipe_direct_write(file_t *fp, struct pipe *wpipe, struct uio *uio)
return (error);
}
bcnt -= wpipe->cnt;
bcnt -= wmap->cnt;
}
uio->uio_resid -= bcnt;
@ -918,7 +914,7 @@ pipe_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
*/
if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
(fp->f_flag & FNONBLOCK) == 0 &&
(wmap->kva || (amountpipekva < limitpipekva))) {
(wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
error = pipe_direct_write(fp, wpipe, uio);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pipe.h,v 1.33 2016/01/22 23:38:45 dholland Exp $ */
/* $NetBSD: pipe.h,v 1.34 2018/05/19 11:39:37 jdolecek Exp $ */
/*
* Copyright (c) 1996 John S. Dyson
@ -82,7 +82,6 @@ struct pipemapping {
voff_t pos; /* current position within page */
int npages; /* how many pages allocated */
struct vm_page **pgs; /* pointers to the pages */
u_int egen; /* emap generation number */
};
/*

View File

@ -1,4 +1,4 @@
# $NetBSD: files.uvm,v 1.28 2016/12/23 07:15:28 cherry Exp $
# $NetBSD: files.uvm,v 1.29 2018/05/19 11:39:37 jdolecek Exp $
#
# UVM options
@ -25,7 +25,6 @@ file uvm/uvm_aobj.c uvm
file uvm/uvm_bio.c uvm
file uvm/uvm_coredump.c coredump
file uvm/uvm_device.c uvm
file uvm/uvm_emap.c uvm
file uvm/uvm_fault.c uvm
file uvm/uvm_glue.c uvm
file uvm/uvm_init.c uvm

View File

@ -1,410 +0,0 @@
/* $NetBSD: uvm_emap.c,v 1.13 2018/04/20 19:02:18 jdolecek Exp $ */
/*-
* Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Mindaugas Rasiukevicius and Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* UVM ephemeral mapping interface.
*/
/*
* Overview:
*
* On multiprocessor systems, frequent uses of pmap_kenter_pa/pmap_kremove
* for ephemeral mappings are not desirable because they likely involve
* TLB flush IPIs because that pmap_kernel() is shared among all LWPs.
* This interface can be used instead, to reduce the number of IPIs.
*
* For a single-page mapping, PMAP_DIRECT_MAP is likely a better choice
* if available. (__HAVE_DIRECT_MAP)
*/
/*
* How to use:
*
* Map pages at the address:
*
* uvm_emap_enter(va, pgs, npages, VM_PROT_READ);
* gen = uvm_emap_produce();
*
* Read pages via the mapping:
*
* uvm_emap_consume(gen);
* some_access(va);
*
* After finishing using the mapping:
*
* uvm_emap_remove(va, len);
*/
/*
* Notes for pmap developers:
*
* Generic (more expensive) stubs are implemented for architectures which
* do not support emap.
*
* Note that uvm_emap_update() is called from lower pmap(9) layer, while
* other functions call to pmap(9). Typical pattern of update in pmap:
*
* u_int gen = uvm_emap_gen_return();
* tlbflush();
* uvm_emap_update();
*
* It is also used from IPI context, therefore functions must safe.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_emap.c,v 1.13 2018/04/20 19:02:18 jdolecek Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/cpu.h>
#include <sys/atomic.h>
#include <sys/lwp.h>
#include <sys/vmem.h>
#include <sys/types.h>
#include <uvm/uvm.h>
#include <uvm/uvm_extern.h>
/* XXX: Arbitrary. */
#ifdef _LP64
#define UVM_EMAP_SIZE (128 * 1024 * 1024) /* 128 MB */
#else
#define UVM_EMAP_SIZE (32 * 1024 * 1024) /* 32 MB */
#endif
static u_int _uvm_emap_gen[COHERENCY_UNIT - sizeof(u_int)]
__aligned(COHERENCY_UNIT);
#define uvm_emap_gen (_uvm_emap_gen[0])
u_int uvm_emap_size = UVM_EMAP_SIZE;
static vaddr_t uvm_emap_va;
static vmem_t * uvm_emap_vmem;
/*
* uvm_emap_init: initialize subsystem.
*/
void
uvm_emap_sysinit(void)
{
struct uvm_cpu *ucpu;
/* size_t qmax; */
u_int i;
uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE);
#if 0
qmax = 16 * PAGE_SIZE;
uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0,
UVM_KMF_VAONLY | UVM_KMF_WAITVA);
if (uvm_emap_va == 0) {
panic("uvm_emap_init: KVA allocation failed");
}
uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size,
PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE);
if (uvm_emap_vmem == NULL) {
panic("uvm_emap_init: vmem creation failed");
}
#else
uvm_emap_va = 0;
uvm_emap_vmem = NULL;
#endif
/* Initial generation value is 1. */
uvm_emap_gen = 1;
for (i = 0; i < maxcpus; i++) {
ucpu = uvm.cpus[i];
if (ucpu != NULL) {
ucpu->emap_gen = 1;
}
}
}
/*
* uvm_emap_alloc: allocate a window.
*/
vaddr_t
uvm_emap_alloc(vsize_t size, bool waitok)
{
vmem_addr_t addr;
KASSERT(size > 0);
KASSERT(round_page(size) == size);
if (vmem_alloc(uvm_emap_vmem, size,
VM_INSTANTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP), &addr) == 0)
return (vaddr_t)addr;
return (vaddr_t)0;
}
/*
* uvm_emap_free: free a window.
*/
void
uvm_emap_free(vaddr_t va, size_t size)
{
KASSERT(va >= uvm_emap_va);
KASSERT(size <= uvm_emap_size);
KASSERT(va + size <= uvm_emap_va + uvm_emap_size);
vmem_free(uvm_emap_vmem, va, size);
}
#ifdef __HAVE_PMAP_EMAP
/*
* uvm_emap_enter: enter a new mapping, without TLB flush.
*/
void
uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages, vm_prot_t prot)
{
paddr_t pa;
u_int n;
for (n = 0; n < npages; n++, va += PAGE_SIZE) {
pa = VM_PAGE_TO_PHYS(pgs[n]);
pmap_emap_enter(va, pa, prot);
}
}
/*
* uvm_emap_remove: remove a mapping.
*/
void
uvm_emap_remove(vaddr_t sva, vsize_t len)
{
pmap_emap_remove(sva, len);
}
/*
* uvm_emap_gen_return: get the global generation number.
*
* => can be called from IPI handler, therefore function must be safe.
*/
u_int
uvm_emap_gen_return(void)
{
u_int gen;
gen = uvm_emap_gen;
if (__predict_false(gen == UVM_EMAP_INACTIVE)) {
/*
* Instead of looping, just increase in our side.
* Other thread could race and increase it again,
* but without any negative effect.
*/
gen = atomic_inc_uint_nv(&uvm_emap_gen);
}
KASSERT(gen != UVM_EMAP_INACTIVE);
return gen;
}
/*
* uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility,
* perform TLB flush and thus update the local view. Main purpose is
* to handle kernel preemption, while emap is in use.
*
* => called from mi_switch(), when LWP returns after block or preempt.
*/
void
uvm_emap_switch(lwp_t *l)
{
struct uvm_cpu *ucpu;
u_int curgen, gen;
KASSERT(kpreempt_disabled());
/* If LWP did not use emap, then nothing to do. */
if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) {
return;
}
/*
* No need to synchronise if generation number of current CPU is
* newer than the number of this LWP.
*
* This test assumes two's complement arithmetic and allows
* ~2B missed updates before it will produce bad results.
*/
ucpu = curcpu()->ci_data.cpu_uvm;
curgen = ucpu->emap_gen;
gen = l->l_emap_gen;
if (__predict_true((signed int)(curgen - gen) >= 0)) {
return;
}
/*
* See comments in uvm_emap_consume() about memory
* barriers and race conditions.
*/
curgen = uvm_emap_gen_return();
pmap_emap_sync(false);
ucpu->emap_gen = curgen;
}
/*
* uvm_emap_consume: update the current CPU and LWP to the given generation
* of the emap. In a case of LWP migration to a different CPU after block
* or preempt, uvm_emap_switch() will synchronise.
*
* => may be called from both interrupt and thread context.
*/
void
uvm_emap_consume(u_int gen)
{
struct cpu_info *ci;
struct uvm_cpu *ucpu;
lwp_t *l = curlwp;
u_int curgen;
if (gen == UVM_EMAP_INACTIVE) {
return;
}
/*
* No need to synchronise if generation number of current CPU is
* newer than the number of this LWP.
*
* This test assumes two's complement arithmetic and allows
* ~2B missed updates before it will produce bad results.
*/
kpreempt_disable();
ci = l->l_cpu;
ucpu = ci->ci_data.cpu_uvm;
if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) {
l->l_emap_gen = ucpu->emap_gen;
kpreempt_enable();
return;
}
/*
* Record the current generation _before_ issuing the TLB flush.
* No need for a memory barrier before, as reading a stale value
* for uvm_emap_gen is not a problem.
*
* pmap_emap_sync() must implicitly perform a full memory barrier,
* which prevents us from fetching a value from after the TLB flush
* has occurred (which would be bad).
*
* We can race with an interrupt on the current CPU updating the
* counter to a newer value. This could cause us to set a stale
* value into ucpu->emap_gen, overwriting a newer update from the
* interrupt. However, it does not matter since:
* (1) Interrupts always run to completion or block.
* (2) Interrupts will only ever install a newer value and,
* (3) We will roll the value forward later.
*/
curgen = uvm_emap_gen_return();
pmap_emap_sync(true);
ucpu->emap_gen = curgen;
l->l_emap_gen = curgen;
KASSERT((signed int)(curgen - gen) >= 0);
kpreempt_enable();
}
/*
* uvm_emap_produce: increment emap generation counter.
*
* => pmap updates must be globally visible.
* => caller must have already entered mappings.
* => may be called from both interrupt and thread context.
*/
u_int
uvm_emap_produce(void)
{
u_int gen;
again:
gen = atomic_inc_uint_nv(&uvm_emap_gen);
if (__predict_false(gen == UVM_EMAP_INACTIVE)) {
goto again;
}
return gen;
}
/*
* uvm_emap_update: update global emap generation number for current CPU.
*
* Function is called by MD code (eg. pmap) to take advantage of TLB flushes
* initiated for other reasons, that sync the emap as a side effect. Note
* update should be performed before the actual TLB flush, to avoid race
* with newly generated number.
*
* => can be called from IPI handler, therefore function must be safe.
* => should be called _after_ TLB flush.
* => emap generation number should be taken _before_ TLB flush.
* => must be called with preemption disabled.
*/
void
uvm_emap_update(u_int gen)
{
struct uvm_cpu *ucpu;
/*
* See comments in uvm_emap_consume() about memory barriers and
* race conditions. Store is atomic if emap_gen size is word.
*/
CTASSERT(sizeof(ucpu->emap_gen) == sizeof(int));
/* XXX: KASSERT(kpreempt_disabled()); */
ucpu = curcpu()->ci_data.cpu_uvm;
ucpu->emap_gen = gen;
}
#else
/*
* Stubs for architectures which do not support emap.
*/
void
uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages, vm_prot_t prot)
{
paddr_t pa;
u_int n;
for (n = 0; n < npages; n++, va += PAGE_SIZE) {
pa = VM_PAGE_TO_PHYS(pgs[n]);
pmap_kenter_pa(va, pa, prot, 0);
}
pmap_update(pmap_kernel());
}
void
uvm_emap_remove(vaddr_t sva, vsize_t len)
{
pmap_kremove(sva, len);
pmap_update(pmap_kernel());
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.211 2018/05/08 19:33:57 christos Exp $ */
/* $NetBSD: uvm_extern.h,v 1.212 2018/05/19 11:39:37 jdolecek Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -618,32 +618,6 @@ int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t,
void ubc_zerorange(struct uvm_object *, off_t, size_t, int);
void ubc_purge(struct uvm_object *);
/* uvm_emap.c */
void uvm_emap_sysinit(void);
#ifdef __HAVE_PMAP_EMAP
void uvm_emap_switch(lwp_t *);
#else
#define uvm_emap_switch(l)
#endif
u_int uvm_emap_gen_return(void);
void uvm_emap_update(u_int);
vaddr_t uvm_emap_alloc(vsize_t, bool);
void uvm_emap_free(vaddr_t, size_t);
void uvm_emap_enter(vaddr_t, struct vm_page **, u_int,
vm_prot_t);
void uvm_emap_remove(vaddr_t, vsize_t);
#ifdef __HAVE_PMAP_EMAP
void uvm_emap_consume(u_int);
u_int uvm_emap_produce(void);
#else
#define uvm_emap_consume(x)
#define uvm_emap_produce() UVM_EMAP_INACTIVE
#endif
/* uvm_fault.c */
#define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0)
int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_init.c,v 1.48 2016/12/23 07:15:28 cherry Exp $ */
/* $NetBSD: uvm_init.c,v 1.49 2018/05/19 11:39:37 jdolecek Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.48 2016/12/23 07:15:28 cherry Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.49 2018/05/19 11:39:37 jdolecek Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -166,12 +166,6 @@ uvm_init(void)
uvm_loan_init();
/*
* Init emap subsystem.
*/
uvm_emap_sysinit();
/*
* The VM system is now up! Now that kmem is up we can resize the
* <obj,off> => <page> hash table for general use and enable paging