Provide pmap_enter_ma(), pmap_extract_ma(), pmap_kenter_ma() in all x86

kernels, and use them in the bus_space(9) implementation instead of ugly
Xen #ifdef-age.  In a non-Xen kernel, the _ma() functions either call or
alias the equivalent _pa() functions.

Reviewed on port-xen@netbsd.org and port-i386@netbsd.org.  Passes
rmind@'s and bouyer@'s inspection.  Tested on i386 and on Xen DOMU /
DOM0.
This commit is contained in:
dyoung 2010-05-10 18:46:58 +00:00
parent 6ea92ed1bd
commit 19265b4bb9
5 changed files with 288 additions and 129 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.29 2010/02/09 22:51:13 jym Exp $ */
/* $NetBSD: pmap.h,v 1.30 2010/05/10 18:46:58 dyoung Exp $ */
/*
*
@ -228,6 +228,12 @@ void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
void pmap_emap_remove(vaddr_t, vsize_t);
void pmap_emap_sync(bool);
void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
pd_entry_t * const **);
void pmap_unmap_ptes(struct pmap *, struct pmap *);
int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
@ -248,6 +254,12 @@ bool pmap_pageidlezero(paddr_t);
* inline functions
*/
__inline static bool __unused
pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
{
return pmap_pdes_invalid(va, pdes, lastpde) == 0;
}
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
@ -401,17 +413,17 @@ xpmap_update (pt_entry_t *pte, pt_entry_t npte)
/* Xen helpers to change bits of a pte */
#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
#endif /* XEN */
/* pmap functions with machine addresses */
void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
vm_prot_t, u_int, int);
bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
#endif /* XEN */
/*
* Hooks for the pool allocator.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_space.c,v 1.28 2010/04/28 20:27:36 dyoung Exp $ */
/* $NetBSD: bus_space.c,v 1.29 2010/05/10 18:46:58 dyoung Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.28 2010/04/28 20:27:36 dyoung Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.29 2010/05/10 18:46:58 dyoung Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -48,9 +48,6 @@ __KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.28 2010/04/28 20:27:36 dyoung Exp $"
#ifdef XEN
#include <xen/hypervisor.h>
#include <xen/xenpmap.h>
#define pmap_extract(a, b, c) pmap_extract_ma(a, b, c)
#endif
/*
@ -339,11 +336,7 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size,
*bshp = (bus_space_handle_t)(sva + (bpa & PGOFSET));
for (va = sva; pa != endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
#ifdef XEN
pmap_kenter_ma(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
#else
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
#endif /* XEN */
}
pmap_update(pmap_kernel());
@ -395,7 +388,7 @@ _x86_memio_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
}
#endif
if (pmap_extract(pmap_kernel(), va, &bpa) == FALSE) {
if (pmap_extract_ma(pmap_kernel(), va, &bpa) == FALSE) {
panic("_x86_memio_unmap:"
" wrong virtual address");
}
@ -447,7 +440,7 @@ bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
panic("x86_memio_unmap: overflow");
#endif
(void) pmap_extract(pmap_kernel(), va, &bpa);
(void) pmap_extract_ma(pmap_kernel(), va, &bpa);
bpa += (bsh & PGOFSET);
pmap_kremove(va, endva - va);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.108 2010/05/04 23:27:14 jym Exp $ */
/* $NetBSD: pmap.c,v 1.109 2010/05/10 18:46:58 dyoung Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@ -149,7 +149,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.108 2010/05/04 23:27:14 jym Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.109 2010/05/10 18:46:58 dyoung Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -531,6 +531,8 @@ static struct pool_cache pmap_pv_cache;
static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *early_zero_pte;
static char *csrcp, *cdstp, *zerop, *ptpp, *early_zerop;
int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
/*
* pool and cache that PDPs are allocated from
*/
@ -580,8 +582,6 @@ static void pmap_free_ptp(struct pmap *, struct vm_page *,
pd_entry_t * const *);
static bool pmap_is_curpmap(struct pmap *);
static bool pmap_is_active(struct pmap *, struct cpu_info *, bool);
static void pmap_map_ptes(struct pmap *, struct pmap **,
pt_entry_t **, pd_entry_t * const **);
static bool pmap_remove_pte(struct pmap *, struct vm_page *,
pt_entry_t *, vaddr_t,
struct pv_entry **);
@ -589,13 +589,8 @@ static pt_entry_t pmap_remove_ptes(struct pmap *, struct vm_page *,
vaddr_t, vaddr_t, vaddr_t,
struct pv_entry **);
static void pmap_unmap_ptes(struct pmap *, struct pmap *);
static void pmap_unmap_apdp(void);
static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
static int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *,
pd_entry_t *);
#define pmap_pdes_valid(va, pdes, lastpde) \
(pmap_pdes_invalid((va), (pdes), (lastpde)) == 0)
static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int,
long *);
@ -781,7 +776,7 @@ pmap_reference(struct pmap *pmap)
* => must be undone with pmap_unmap_ptes before returning
*/
static void
void
pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2,
pd_entry_t **ptepp, pd_entry_t * const **pdeppp)
{
@ -914,7 +909,7 @@ pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2,
* pmap_unmap_ptes: unlock the PTE mapping of "pmap"
*/
static void
void
pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2)
{
@ -1122,47 +1117,7 @@ pmap_emap_remove(vaddr_t sva, vsize_t len)
}
}
#ifdef XEN
/*
* pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
* => we expect a MACHINE address
*/
void
pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
{
pt_entry_t *pte, opte, npte;
if (va < VM_MIN_KERNEL_ADDRESS)
pte = vtopte(va);
else
pte = kvtopte(va);
npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
PG_V | PG_k;
if (flags & PMAP_NOCACHE)
npte |= PG_N;
if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
npte |= PG_NX;
opte = pmap_pte_testset (pte, npte); /* zap! */
if (pmap_valid_entry(opte)) {
#if defined(MULTIPROCESSOR)
kpreempt_disable();
pmap_tlb_shootdown(pmap_kernel(), va, 0, opte);
kpreempt_enable();
#else
/* Don't bother deferring in the single CPU case. */
pmap_update_pg(va);
#endif
}
}
#endif /* XEN */
__weak_alias(pmap_kenter_ma, pmap_kenter_pa);
#if defined(__x86_64__)
/*
@ -2883,7 +2838,7 @@ pmap_deactivate(struct lwp *l)
* some misc. functions
*/
static int
int
pmap_pdes_invalid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
{
int i;
@ -2977,39 +2932,9 @@ vtophys(vaddr_t va)
return (0);
}
#ifdef XEN
/*
* pmap_extract_ma: extract a MA for the given VA
*/
__weak_alias(pmap_extract_ma, pmap_extract);
bool
pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
{
pt_entry_t *ptes, pte;
pd_entry_t pde;
pd_entry_t * const *pdes;
struct pmap *pmap2;
kpreempt_disable();
pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
if (!pmap_pdes_valid(va, pdes, &pde)) {
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
return false;
}
pte = ptes[pl1_i(va)];
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
if (__predict_true((pte & PG_V) != 0)) {
if (pap != NULL)
*pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
return true;
}
return false;
}
#ifdef XEN
/*
* vtomach: virtual address to machine address. For use by
@ -3028,8 +2953,6 @@ vtomach(vaddr_t va)
#endif /* XEN */
/*
* pmap_virtual_space: used during bootup [pmap_steal_memory] to
* determine the bounds of the kernel virtual addess space.
@ -3985,24 +3908,25 @@ pmap_unwire(struct pmap *pmap, vaddr_t va)
* defined as macro in pmap.h
*/
__weak_alias(pmap_enter, pmap_enter_default);
int
pmap_enter_default(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
u_int flags)
{
return pmap_enter_ma(pmap, va, pa, pa, prot, flags, 0);
}
/*
* pmap_enter: enter a mapping into a pmap
*
* => must be done "now" ... no lazy-evaluation
* => we set pmap => pv_head locking
*/
#ifdef XEN
int
pmap_enter_ma(struct pmap *pmap, vaddr_t va, paddr_t ma, paddr_t pa,
vm_prot_t prot, u_int flags, int domid)
{
#else /* XEN */
int
pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
u_int flags)
{
paddr_t ma = pa;
#endif /* XEN */
pt_entry_t *ptes, opte, npte;
pt_entry_t *ptep;
pd_entry_t * const *pdes;
@ -4214,22 +4138,6 @@ out2:
return error;
}
#ifdef XEN
int
pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
{
paddr_t ma;
if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
ma = pa; /* XXX hack */
} else {
ma = xpmap_ptom(pa);
}
return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF);
}
#endif /* XEN */
static bool
pmap_get_physpage(vaddr_t va, int level, paddr_t *paddrp)
{

View File

@ -1,4 +1,4 @@
# $NetBSD: files.xen,v 1.106 2009/11/03 05:23:28 dyoung Exp $
# $NetBSD: files.xen,v 1.107 2010/05/10 18:46:58 dyoung Exp $
# NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp
# NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp
@ -106,6 +106,7 @@ file arch/xen/x86/hypervisor_machdep.c
# file arch/x86/x86/mtrr_i686.c mtrr
file arch/x86/x86/syscall.c
file arch/xen/x86/x86_xpmap.c
file arch/xen/x86/xen_pmap.c
file arch/xen/x86/xen_intr.c
file arch/xen/x86/xenfunc.c

245
sys/arch/xen/x86/xen_pmap.c Normal file
View File

@ -0,0 +1,245 @@
/*
* Copyright (c) 2007 Manuel Bouyer.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright 2001 (c) Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.1 2010/05/10 18:46:59 dyoung Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include "opt_xen.h"
#if !defined(__x86_64__)
#include "opt_kstack_dr0.h"
#endif /* !defined(__x86_64__) */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/pool.h>
#include <sys/kernel.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
#include <sys/intr.h>
#include <sys/xcall.h>
#include <uvm/uvm.h>
#include <dev/isa/isareg.h>
#include <machine/specialreg.h>
#include <machine/gdt.h>
#include <machine/isa_machdep.h>
#include <machine/cpuvar.h>
#include <x86/pmap.h>
#include <x86/pmap_pv.h>
#include <x86/i82489reg.h>
#include <x86/i82489var.h>
#ifdef XEN
#include <xen/xen3-public/xen.h>
#include <xen/hypervisor.h>
#endif
/* flag to be used for kernel mappings: PG_u on Xen/amd64, 0 otherwise */
#if defined(XEN) && defined(__x86_64__)
#define PG_k PG_u
#else
#define PG_k 0
#endif
extern paddr_t pmap_pa_start; /* PA of first physical page for this domain */
extern paddr_t pmap_pa_end; /* PA of last physical page for this domain */
int
pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
{
paddr_t ma;
if (__predict_false(pa < pmap_pa_start || pmap_pa_end <= pa)) {
ma = pa; /* XXX hack */
} else {
ma = xpmap_ptom(pa);
}
return pmap_enter_ma(pmap, va, ma, pa, prot, flags, DOMID_SELF);
}
/*
* pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
* => we expect a MACHINE address
*/
void
pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
{
pt_entry_t *pte, opte, npte;
if (va < VM_MIN_KERNEL_ADDRESS)
pte = vtopte(va);
else
pte = kvtopte(va);
npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
PG_V | PG_k;
if (flags & PMAP_NOCACHE)
npte |= PG_N;
if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
npte |= PG_NX;
opte = pmap_pte_testset (pte, npte); /* zap! */
if (pmap_valid_entry(opte)) {
#if defined(MULTIPROCESSOR)
kpreempt_disable();
pmap_tlb_shootdown(pmap_kernel(), va, 0, opte);
kpreempt_enable();
#else
/* Don't bother deferring in the single CPU case. */
pmap_update_pg(va);
#endif
}
}
/*
* pmap_extract_ma: extract a MA for the given VA
*/
bool
pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
{
pt_entry_t *ptes, pte;
pd_entry_t pde;
pd_entry_t * const *pdes;
struct pmap *pmap2;
kpreempt_disable();
pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
if (!pmap_pdes_valid(va, pdes, &pde)) {
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
return false;
}
pte = ptes[pl1_i(va)];
pmap_unmap_ptes(pmap, pmap2);
kpreempt_enable();
if (__predict_true((pte & PG_V) != 0)) {
if (pap != NULL)
*pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
return true;
}
return false;
}