x86: Split most of pmap.h into pmap_private.h or vmparam.h.

This way pmap.h only contains the MD definition of the MI pmap(9)
API, which loads of things in the kernel rely on, so changing x86
pmap internals no longer requires recompiling the entire kernel every
time.

Callers needing these internals must now use machine/pmap_private.h.
Note: This is not x86/pmap_private.h because it contains three parts:

1. CPU-specific (different for i386/amd64) definitions used by...

2. common definitions, including Xenisms like xpmap_ptetomach,
   further used by...

3. more CPU-specific inlines for pmap_pte_* operations

So {amd64,i386}/pmap_private.h defines 1, includes x86/pmap_private.h
for 2, and then defines 3.  Maybe we should split that out into a new
pmap_pte.h to reduce this trouble.

No functional change intended, other than that some .c files must
include machine/pmap_private.h when previously uvm/uvm_pmap.h
polluted the namespace with pmap internals.

Note: This migrates part of i386/pmap.h into i386/vmparam.h --
specifically the parts that are needed for several constants defined
in vmparam.h:

VM_MAXUSER_ADDRESS
VM_MAX_ADDRESS
VM_MAX_KERNEL_ADDRESS
VM_MIN_KERNEL_ADDRESS

Since i386 needs PDP_SIZE in vmparam.h, I added it there on amd64
too, just to keep things parallel.
This commit is contained in:
riastradh 2022-08-20 23:48:50 +00:00
parent c45e9f40fe
commit 8f18579d5d
36 changed files with 1138 additions and 1002 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: gdt.c,v 1.47 2019/03/09 08:42:25 maxv Exp $ */
/* $NetBSD: gdt.c,v 1.48 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.47 2019/03/09 08:42:25 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.48 2022/08/20 23:48:50 riastradh Exp $");
#include "opt_multiprocessor.h"
#include "opt_xen.h"
@ -52,6 +52,7 @@ __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.47 2019/03/09 08:42:25 maxv Exp $");
#include <uvm/uvm.h>
#include <machine/gdt.h>
#include <machine/pmap_private.h>
#ifdef XENPV
#include <xen/hypervisor.h>

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.87 2022/06/12 11:36:42 bouyer Exp $
# $NetBSD: genassym.cf,v 1.88 2022/08/20 23:48:50 riastradh Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -93,6 +93,7 @@ include <uvm/uvm.h>
include <machine/trap.h>
include <machine/pmap.h>
include <machine/pmap_private.h>
include <machine/vmparam.h>
include <machine/intr.h>
include <machine/types.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.362 2022/08/20 23:15:36 riastradh Exp $ */
/* $NetBSD: machdep.c,v 1.363 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@ -110,7 +110,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.362 2022/08/20 23:15:36 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.363 2022/08/20 23:48:50 riastradh Exp $");
#include "opt_modular.h"
#include "opt_user_ldt.h"
@ -182,12 +182,12 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.362 2022/08/20 23:15:36 riastradh Exp
#include <x86/dbregs.h>
#include <machine/mtrr.h>
#include <machine/mpbiosvar.h>
#include <machine/pmap_private.h>
#include <x86/bootspace.h>
#include <x86/cputypes.h>
#include <x86/cpuvar.h>
#include <x86/machdep.h>
#include <x86/x86/tsc.h>
#include <dev/isa/isareg.h>

View File

@ -1,316 +1,4 @@
/* $NetBSD: pmap.h,v 1.67 2022/08/20 23:18:20 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _AMD64_PMAP_H_
#define _AMD64_PMAP_H_
#ifdef __x86_64__
#if defined(_KERNEL_OPT)
#include "opt_xen.h"
#include "opt_kasan.h"
#include "opt_kmsan.h"
#include "opt_kubsan.h"
#endif
#include <sys/atomic.h>
#include <machine/pte.h>
#include <machine/segments.h>
#ifdef _KERNEL
#include <machine/cpufunc.h>
#endif
#include <uvm/uvm_object.h>
#ifdef XENPV
#include <xen/xenfunc.h>
#include <xen/xenpmap.h>
#endif
/*
* Mask to get rid of the sign-extended part of addresses.
*/
#define VA_SIGN_MASK 0xffff000000000000
#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
/* XXXfvdl this one's not right. */
#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
#ifdef KASAN
#define L4_SLOT_KASAN 256
#define NL4_SLOT_KASAN 32
#endif
#ifdef KMSAN
#define L4_SLOT_KMSAN 256
#define NL4_SLOT_KMSAN 4
#endif
#define NL4_SLOT_DIRECT 32
#ifndef XENPV
#define L4_SLOT_PTE slotspace.area[SLAREA_PTE].sslot
#else
#define L4_SLOT_PTE 509
#endif
#define L4_SLOT_KERN slotspace.area[SLAREA_MAIN].sslot
#define L4_SLOT_KERNBASE 511 /* pl4_i(KERNBASE) */
#define PDIR_SLOT_USERLIM 255
#define PDIR_SLOT_KERN L4_SLOT_KERN
#define PDIR_SLOT_PTE L4_SLOT_PTE
/*
* The following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE: the base VA of the linear PTE mappings
* PDP_BASE: the base VA of the recursive mapping of the PTD
*/
#ifndef XENPV
extern pt_entry_t *pte_base;
#define PTE_BASE pte_base
#else
#define PTE_BASE ((pt_entry_t *)VA_SIGN_NEG((L4_SLOT_PTE * NBPD_L4)))
#endif
#define L1_BASE PTE_BASE
#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
#define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
#define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
#define PDP_BASE L4_BASE
#if defined(KMSAN)
#define NKL4_MAX_ENTRIES (unsigned long)1 /* 512GB only */
#else
#define NKL4_MAX_ENTRIES (unsigned long)64
#endif
#define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512)
#define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512)
#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512)
#define NKL4_KIMG_ENTRIES 1
#define NKL3_KIMG_ENTRIES 1
#if defined(KUBSAN) || defined(KMSAN)
#define NKL2_KIMG_ENTRIES 64 /* really big kernel */
#else
#define NKL2_KIMG_ENTRIES 48
#endif
/*
* Since kva space is below the kernel in its entirety, we start off
* with zero entries on each level.
*/
#define NKL4_START_ENTRIES 0
#define NKL3_START_ENTRIES 0
#define NKL2_START_ENTRIES 0
#define NKL1_START_ENTRIES 0
#define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK, L3_MASK, L4_MASK }
#define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
NKL3_START_ENTRIES, NKL4_START_ENTRIES }
#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
#define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE }
/* $NetBSD: pmap.h,v 1.68 2022/08/20 23:48:50 riastradh Exp $ */
#define PTP_LEVELS 4
/*
* PTE_AVL usage: we make use of the ignored bits of the PTE
*/
#define PTE_WIRED PTE_AVL1 /* Wired Mapping */
#define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
#define PTE_X 0 /* Dummy */
/* XXX To be deleted. */
#define PG_W PTE_WIRED
#define PG_PVLIST PTE_PVLIST
#define PG_X PTE_X
void svs_pmap_sync(struct pmap *, int);
void svs_ldt_sync(struct pmap *);
void svs_lwp_switch(struct lwp *, struct lwp *);
void svs_pdir_switch(struct pmap *);
void svs_init(void);
extern bool svs_enabled;
extern bool svs_pcid;
#include <x86/pmap.h>
#ifndef XENPV
#define pmap_pa2pte(a) (a)
#define pmap_pte2pa(a) ((a) & PTE_FRAME)
#define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_ulong((volatile unsigned long *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_ulong((volatile unsigned long *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_ulong((volatile unsigned long *)p, ~(b))
#define pmap_pte_flush() /* nothing */
#else
extern kmutex_t pte_lock;
static __inline pt_entry_t
pmap_pa2pte(paddr_t pa)
{
return (pt_entry_t)xpmap_ptom_masked(pa);
}
static __inline paddr_t
pmap_pte2pa(pt_entry_t pte)
{
return xpmap_mtop_masked(pte & PTE_FRAME);
}
static __inline void
pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
{
int s = splvm();
xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
splx(s);
}
static __inline pt_entry_t
pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *ptep;
if (opte == o) {
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
xpq_flush_queue();
}
mutex_exit(&pte_lock);
return opte;
}
static __inline pt_entry_t
pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *pte;
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
xpq_flush_queue();
mutex_exit(&pte_lock);
return opte;
}
static __inline void
pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
(*pte) & ~bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_flush(void)
{
int s = splvm();
xpq_flush_queue();
splx(s);
}
#endif
#ifdef __HAVE_DIRECT_MAP
#define PMAP_DIRECT
static __inline int
pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
int (*process)(void *, size_t, void *), void *arg)
{
vaddr_t va = PMAP_DIRECT_MAP(pa);
return process((void *)(va + pgoff), len, arg);
}
#endif /* __HAVE_DIRECT_MAP */
void pmap_changeprot_local(vaddr_t, vm_prot_t);
#else /* !__x86_64__ */
#include <i386/pmap.h>
#endif /* __x86_64__ */
#endif /* _AMD64_PMAP_H_ */

View File

@ -0,0 +1,316 @@
/* $NetBSD: pmap_private.h,v 1.1 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _AMD64_PMAP_PRIVATE_H_
#define _AMD64_PMAP_PRIVATE_H_
#ifdef __x86_64__
#if defined(_KERNEL_OPT)
#include "opt_xen.h"
#include "opt_kasan.h"
#include "opt_kmsan.h"
#include "opt_kubsan.h"
#endif
#include <sys/atomic.h>
#include <machine/pte.h>
#include <machine/segments.h>
#ifdef _KERNEL
#include <machine/cpufunc.h>
#endif
#include <uvm/uvm_object.h>
#ifdef XENPV
#include <xen/xenfunc.h>
#include <xen/xenpmap.h>
#endif
/*
* Mask to get rid of the sign-extended part of addresses.
*/
#define VA_SIGN_MASK 0xffff000000000000
#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
/* XXXfvdl this one's not right. */
#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
#ifdef KASAN
#define L4_SLOT_KASAN 256
#define NL4_SLOT_KASAN 32
#endif
#ifdef KMSAN
#define L4_SLOT_KMSAN 256
#define NL4_SLOT_KMSAN 4
#endif
#define NL4_SLOT_DIRECT 32
#ifndef XENPV
#define L4_SLOT_PTE slotspace.area[SLAREA_PTE].sslot
#else
#define L4_SLOT_PTE 509
#endif
#define L4_SLOT_KERN slotspace.area[SLAREA_MAIN].sslot
#define L4_SLOT_KERNBASE 511 /* pl4_i(KERNBASE) */
#define PDIR_SLOT_USERLIM 255
#define PDIR_SLOT_KERN L4_SLOT_KERN
#define PDIR_SLOT_PTE L4_SLOT_PTE
/*
* The following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE: the base VA of the linear PTE mappings
* PDP_BASE: the base VA of the recursive mapping of the PTD
*/
#ifndef XENPV
extern pt_entry_t *pte_base;
#define PTE_BASE pte_base
#else
#define PTE_BASE ((pt_entry_t *)VA_SIGN_NEG((L4_SLOT_PTE * NBPD_L4)))
#endif
#define L1_BASE PTE_BASE
#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
#define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
#define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
#define PDP_BASE L4_BASE
#if defined(KMSAN)
#define NKL4_MAX_ENTRIES (unsigned long)1 /* 512GB only */
#else
#define NKL4_MAX_ENTRIES (unsigned long)64
#endif
#define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512)
#define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512)
#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512)
#define NKL4_KIMG_ENTRIES 1
#define NKL3_KIMG_ENTRIES 1
#if defined(KUBSAN) || defined(KMSAN)
#define NKL2_KIMG_ENTRIES 64 /* really big kernel */
#else
#define NKL2_KIMG_ENTRIES 48
#endif
/*
* Since kva space is below the kernel in its entirety, we start off
* with zero entries on each level.
*/
#define NKL4_START_ENTRIES 0
#define NKL3_START_ENTRIES 0
#define NKL2_START_ENTRIES 0
#define NKL1_START_ENTRIES 0
#define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK, L3_MASK, L4_MASK }
#define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
NKL3_START_ENTRIES, NKL4_START_ENTRIES }
#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
#define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE }
/*
* PTE_AVL usage: we make use of the ignored bits of the PTE
*/
#define PTE_WIRED PTE_AVL1 /* Wired Mapping */
#define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
#define PTE_X 0 /* Dummy */
/* XXX To be deleted. */
#define PG_W PTE_WIRED
#define PG_PVLIST PTE_PVLIST
#define PG_X PTE_X
void svs_pmap_sync(struct pmap *, int);
void svs_ldt_sync(struct pmap *);
void svs_lwp_switch(struct lwp *, struct lwp *);
void svs_pdir_switch(struct pmap *);
void svs_init(void);
extern bool svs_enabled;
extern bool svs_pcid;
#define _MACHINE_PMAP_PRIVATE_H_X86
#include <x86/pmap_private.h>
#undef _MACHINE_PMAP_PRIVATE_H_X86
#ifndef XENPV
#define pmap_pa2pte(a) (a)
#define pmap_pte2pa(a) ((a) & PTE_FRAME)
#define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_ulong((volatile unsigned long *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_ulong((volatile unsigned long *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_ulong((volatile unsigned long *)p, ~(b))
#define pmap_pte_flush() /* nothing */
#else
extern kmutex_t pte_lock;
static __inline pt_entry_t
pmap_pa2pte(paddr_t pa)
{
return (pt_entry_t)xpmap_ptom_masked(pa);
}
static __inline paddr_t
pmap_pte2pa(pt_entry_t pte)
{
return xpmap_mtop_masked(pte & PTE_FRAME);
}
static __inline void
pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
{
int s = splvm();
xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
splx(s);
}
static __inline pt_entry_t
pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *ptep;
if (opte == o) {
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
xpq_flush_queue();
}
mutex_exit(&pte_lock);
return opte;
}
static __inline pt_entry_t
pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *pte;
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
xpq_flush_queue();
mutex_exit(&pte_lock);
return opte;
}
static __inline void
pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
(*pte) & ~bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_flush(void)
{
int s = splvm();
xpq_flush_queue();
splx(s);
}
#endif
#ifdef __HAVE_DIRECT_MAP
#define PMAP_DIRECT
static __inline int
pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
int (*process)(void *, size_t, void *), void *arg)
{
vaddr_t va = PMAP_DIRECT_MAP(pa);
return process((void *)(va + pgoff), len, arg);
}
#endif /* __HAVE_DIRECT_MAP */
void pmap_changeprot_local(vaddr_t, vm_prot_t);
#else /* !__x86_64__ */
#include <i386/pmap_private.h>
#endif /* __x86_64__ */
#endif /* _AMD64_PMAP_PRIVATE_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.54 2020/11/26 20:50:45 christos Exp $ */
/* $NetBSD: vmparam.h,v 1.55 2022/08/20 23:48:50 riastradh Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -150,6 +150,8 @@ extern vaddr_t vm_max_kernel_address;
#define VM_MAX_KERNEL_ADDRESS vm_max_kernel_address
#endif
#define PDP_SIZE 1
/*
* The address to which unspecified mapping requests default
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: dumpsys.c,v 1.16 2011/12/12 19:03:09 mrg Exp $ */
/* $NetBSD: dumpsys.c,v 1.17 2022/08/20 23:48:50 riastradh Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008 The NetBSD Foundation, Inc.
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: dumpsys.c,v 1.16 2011/12/12 19:03:09 mrg Exp $");
__KERNEL_RCSID(0, "$NetBSD: dumpsys.c,v 1.17 2022/08/20 23:48:50 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -81,6 +81,7 @@ __KERNEL_RCSID(0, "$NetBSD: dumpsys.c,v 1.16 2011/12/12 19:03:09 mrg Exp $");
#include <sys/exec_aout.h>
#include <machine/kcore.h>
#include <machine/pmap_private.h>
#include <uvm/uvm_extern.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: gdt.c,v 1.72 2021/04/30 13:54:26 christos Exp $ */
/* $NetBSD: gdt.c,v 1.73 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.72 2021/04/30 13:54:26 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.73 2022/08/20 23:48:50 riastradh Exp $");
#include "opt_multiprocessor.h"
#include "opt_xen.h"
@ -44,6 +44,7 @@ __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.72 2021/04/30 13:54:26 christos Exp $");
#include <uvm/uvm.h>
#include <machine/gdt.h>
#include <machine/pmap_private.h>
#define NSLOTS(sz) \
(((sz) - DYNSEL_START) / sizeof(union descriptor))

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.124 2022/06/12 11:36:42 bouyer Exp $
# $NetBSD: genassym.cf,v 1.125 2022/08/20 23:48:50 riastradh Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -92,6 +92,7 @@ include <uvm/uvm.h>
include <machine/trap.h>
include <machine/pmap.h>
include <machine/pmap_private.h>
include <machine/vmparam.h>
include <machine/intr.h>
include <machine/types.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.836 2022/08/20 23:15:36 riastradh Exp $ */
/* $NetBSD: machdep.c,v 1.837 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009, 2017
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.836 2022/08/20 23:15:36 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.837 2022/08/20 23:48:50 riastradh Exp $");
#include "opt_beep.h"
#include "opt_compat_freebsd.h"
@ -134,6 +134,7 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.836 2022/08/20 23:15:36 riastradh Exp
#include <machine/specialreg.h>
#include <machine/bootinfo.h>
#include <machine/mtrr.h>
#include <machine/pmap_private.h>
#include <x86/x86/tsc.h>
#include <x86/bootspace.h>

View File

@ -1,5 +1,5 @@
/* $NetBSD: trap.c,v 1.307 2020/09/05 07:26:37 maxv Exp $ */
/* $NetBSD: trap.c,v 1.308 2022/08/20 23:48:50 riastradh Exp $ */
/*-
* Copyright (c) 1998, 2000, 2005, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.307 2020/09/05 07:26:37 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.308 2022/08/20 23:48:50 riastradh Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -100,6 +100,7 @@ __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.307 2020/09/05 07:26:37 maxv Exp $");
#include <machine/trap.h>
#include <machine/userret.h>
#include <machine/db_machdep.h>
#include <machine/pmap_private.h>
#include "mca.h"
#if NMCA > 0

View File

@ -1,403 +1,4 @@
/* $NetBSD: pmap.h,v 1.127 2022/08/20 23:18:20 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _I386_PMAP_H_
#define _I386_PMAP_H_
#if defined(_KERNEL_OPT)
#include "opt_xen.h"
#endif
#include <sys/atomic.h>
#include <i386/pte.h>
#include <machine/segments.h>
#if defined(_KERNEL)
#include <machine/cpufunc.h>
#endif
#include <uvm/uvm_object.h>
#ifdef XENPV
#include <xen/xenfunc.h>
#include <xen/xenpmap.h>
#endif /* XENPV */
/*
* see pte.h for a description of i386 MMU terminology and hardware
* interface.
*
* a pmap describes a processes' 4GB virtual address space. when PAE
* is not in use, this virtual address space can be broken up into 1024 4MB
* regions which are described by PDEs in the PDP. the PDEs are defined as
* follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xc0000000)
*
* PDE#s VA range usage
* 0->766 0x0 -> 0xbfc00000 user address space
* 767 0xbfc00000-> recursive mapping of PDP (used for
* 0xc0000000 linear mapping of PTPs)
* 768->1023 0xc0000000-> kernel address space (constant
* 0xffc00000 across all pmap's/processes)
* <end>
*
*
* note: a recursive PDP mapping provides a way to map all the PTEs for
* a 4GB address space into a linear chunk of virtual memory. in other
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
* area. the PTE for page 1 is the second int. the very last int in the
* 4MB range is the PTE that maps VA 0xfffff000 (the last page in a 4GB
* address).
*
* all pmap's PD's must have the same values in slots 768->1023 so that
* the kernel is always mapped in every process. these values are loaded
* into the PD at pmap creation time.
*
* at any one time only one pmap can be active on a processor. this is
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
* will have all its PTEs mapped into memory at the recursive mapping
* point (slot #767 as show above). when the pmap code wants to find the
* PTE for a virtual address, all it has to do is the following:
*
* address of PTE = (767 * 4MB) + (VA / PAGE_SIZE) * sizeof(pt_entry_t)
* = 0xbfc00000 + (VA / 4096) * 4
*
* what happens if the pmap layer is asked to perform an operation
* on a pmap that is not the one which is currently active? in that
* case we temporarily load this pmap, perform the operation, and mark
* the currently active one as pending lazy reload.
*
* the following figure shows the effects of the recursive PDP mapping:
*
* PDP (%cr3)
* +----+
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
* | |
* | |
* | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
* | 768| -> first kernel PTP (maps 0xc0000000 -> 0xc0400000)
* | |
* +----+
*
* note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
*
* starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
* PTP:
*
* PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
* +----+
* | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
* | |
* | |
* | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbfeff000
* | 768| -> maps contents of first kernel PTP
* | |
* |1023|
* +----+
*
* note that mapping of the PDP at PTP#767's VA (0xbfeff000) is
* defined as "PDP_BASE".... within that mapping there are two
* defines:
* "PDP_PDE" (0xbfeffbfc) is the VA of the PDE in the PDP
* which points back to itself.
*
* - PAE support -
* ---------------
*
* PAE adds another layer of indirection during address translation, breaking
* up the translation process in 3 different levels:
* - L3 page directory, containing 4 * 64-bits addresses (index determined by
* bits [31:30] from the virtual address). This breaks up the address space
* in 4 1GB regions.
* - the PD (L2), containing 512 64-bits addresses, breaking each L3 region
* in 512 * 2MB regions.
* - the PT (L1), also containing 512 64-bits addresses (at L1, the size of
* the pages is still 4K).
*
* The kernel virtual space is mapped by the last entry in the L3 page,
* the first 3 entries mapping the user VA space.
*
* Because the L3 has only 4 entries of 1GB each, we can't use recursive
* mappings at this level for PDP_PDE (this would eat up 2 of the 4GB
* virtual space). There are also restrictions imposed by Xen on the
* last entry of the L3 PD (reference count to this page cannot be
* bigger than 1), which makes it hard to use one L3 page per pmap to
* switch between pmaps using %cr3.
*
* As such, each CPU gets its own L3 page that is always loaded into its %cr3
* (ci_pae_l3_pd in the associated cpu_info struct). We claim that the VM has
* only a 2-level PTP (similar to the non-PAE case). L2 PD is now 4 contiguous
* pages long (corresponding to the 4 entries of the L3), and the different
* index/slots (like PDP_PDE) are adapted accordingly.
*
* Kernel space remains in L3[3], L3[0-2] maps the user VA space. Switching
* between pmaps consists in modifying the first 3 entries of the CPU's L3 page.
*
* PTE_BASE will need 4 entries in the L2 PD pages to map the L2 pages
* recursively.
*
* In addition, for Xen, we can't recursively map L3[3] (Xen wants the ref
* count on this page to be exactly one), so we use a shadow PD page for
* the last L2 PD. The shadow page could be static too, but to make pm_pdir[]
* contiguous we'll allocate/copy one page per pmap.
*/
/*
* Mask to get rid of the sign-extended part of addresses.
*/
#define VA_SIGN_MASK 0
#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
/*
* XXXfvdl this one's not right.
*/
#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
/*
* the following defines identify the slots used as described above.
*/
#ifdef PAE
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-4) /* 1532: for recursive PDP map */
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 1536: start of kernel space */
#else /* PAE */
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-1) /* 767: for recursive PDP map */
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 768: start of kernel space */
#endif /* PAE */
#define L2_SLOT_KERNBASE L2_SLOT_KERN
#define PDIR_SLOT_KERN L2_SLOT_KERN
#define PDIR_SLOT_PTE L2_SLOT_PTE
/*
* the following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE: the base VA of the linear PTE mappings
* PDP_BASE: the base VA of the recursive mapping of the PDP
* PDP_PDE: the VA of the PDE that points back to the PDP
*/
#define PTE_BASE ((pt_entry_t *) (PDIR_SLOT_PTE * NBPD_L2))
#define L1_BASE PTE_BASE
#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L2_SLOT_PTE * NBPD_L1))
#define PDP_PDE (L2_BASE + PDIR_SLOT_PTE)
#define PDP_BASE L2_BASE
/* largest value (-1 for APTP space) */
#define NKL2_MAX_ENTRIES (NTOPLEVEL_PDES - (KERNBASE/NBPD_L2) - 1)
#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * NPDPG)
#define NKL2_KIMG_ENTRIES 0 /* XXX unused */
#define NKL2_START_ENTRIES 0 /* XXX computed on runtime */
#define NKL1_START_ENTRIES 0 /* XXX unused */
#ifndef XENPV
#define NTOPLEVEL_PDES (PAGE_SIZE * PDP_SIZE / (sizeof (pd_entry_t)))
#else /* !XENPV */
#ifdef PAE
#define NTOPLEVEL_PDES 1964 /* 1964-2047 reserved by Xen */
#else /* PAE */
#define NTOPLEVEL_PDES 1008 /* 1008-1023 reserved by Xen */
#endif /* PAE */
#endif /* !XENPV */
#define NPDPG (PAGE_SIZE / sizeof (pd_entry_t))
#define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK }
#define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME }
#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT }
#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES }
#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES }
#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2 }
#define PDES_INITIALIZER { L2_BASE }
/* $NetBSD: pmap.h,v 1.128 2022/08/20 23:48:50 riastradh Exp $ */
#define PTP_LEVELS 2
/*
* PTE_AVL usage: we make use of the ignored bits of the PTE
*/
#define PTE_WIRED PTE_AVL1 /* Wired Mapping */
#define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
#define PTE_X PTE_AVL3 /* Executable */
/* XXX To be deleted. */
#define PG_W PTE_WIRED
#define PG_PVLIST PTE_PVLIST
#define PG_X PTE_X
#include <x86/pmap.h>
#ifndef XENPV
#define pmap_pa2pte(a) (a)
#define pmap_pte2pa(a) ((a) & PTE_FRAME)
#define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
#define pmap_pte_flush() /* nothing */
#ifdef PAE
#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_64((volatile uint64_t *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_64((volatile uint64_t *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_64((volatile uint64_t *)p, ~(b))
#else /* PAE */
#define pmap_pte_cas(p, o, n) atomic_cas_32((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_ulong((volatile unsigned long *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_ulong((volatile unsigned long *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_ulong((volatile unsigned long *)p, ~(b))
#endif /* PAE */
#else /* XENPV */
extern kmutex_t pte_lock;
static __inline pt_entry_t
pmap_pa2pte(paddr_t pa)
{
return (pt_entry_t)xpmap_ptom_masked(pa);
}
static __inline paddr_t
pmap_pte2pa(pt_entry_t pte)
{
return xpmap_mtop_masked(pte & PTE_FRAME);
}
static __inline void
pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
{
int s = splvm();
xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
splx(s);
}
static __inline pt_entry_t
pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *ptep;
if (opte == o) {
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
xpq_flush_queue();
}
mutex_exit(&pte_lock);
return opte;
}
static __inline pt_entry_t
pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *pte;
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
npte);
xpq_flush_queue();
mutex_exit(&pte_lock);
return opte;
}
static __inline void
pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
(*pte) & ~bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_flush(void)
{
int s = splvm();
xpq_flush_queue();
splx(s);
}
#endif
struct vm_map;
struct trapframe;
struct pcb;
int pmap_exec_fixup(struct vm_map *, struct trapframe *, struct pcb *);
#endif /* _I386_PMAP_H_ */

View File

@ -0,0 +1,370 @@
/* $NetBSD: pmap_private.h,v 1.1 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _I386_PMAP_H_
#define _I386_PMAP_H_
#if defined(_KERNEL_OPT)
#include "opt_xen.h"
#endif
#include <sys/atomic.h>
#include <i386/pte.h>
#include <i386/vmparam.h>
#include <machine/segments.h>
#if defined(_KERNEL)
#include <machine/cpufunc.h>
#endif
#include <uvm/uvm_object.h>
#ifdef XENPV
#include <xen/xenfunc.h>
#include <xen/xenpmap.h>
#endif /* XENPV */
/*
* see pte.h for a description of i386 MMU terminology and hardware
* interface.
*
* a pmap describes a processes' 4GB virtual address space. when PAE
* is not in use, this virtual address space can be broken up into 1024 4MB
* regions which are described by PDEs in the PDP. the PDEs are defined as
* follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xc0000000)
*
* PDE#s VA range usage
* 0->766 0x0 -> 0xbfc00000 user address space
* 767 0xbfc00000-> recursive mapping of PDP (used for
* 0xc0000000 linear mapping of PTPs)
* 768->1023 0xc0000000-> kernel address space (constant
* 0xffc00000 across all pmap's/processes)
* <end>
*
*
* note: a recursive PDP mapping provides a way to map all the PTEs for
* a 4GB address space into a linear chunk of virtual memory. in other
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
* area. the PTE for page 1 is the second int. the very last int in the
* 4MB range is the PTE that maps VA 0xfffff000 (the last page in a 4GB
* address).
*
* all pmap's PD's must have the same values in slots 768->1023 so that
* the kernel is always mapped in every process. these values are loaded
* into the PD at pmap creation time.
*
* at any one time only one pmap can be active on a processor. this is
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
* will have all its PTEs mapped into memory at the recursive mapping
* point (slot #767 as show above). when the pmap code wants to find the
* PTE for a virtual address, all it has to do is the following:
*
* address of PTE = (767 * 4MB) + (VA / PAGE_SIZE) * sizeof(pt_entry_t)
* = 0xbfc00000 + (VA / 4096) * 4
*
* what happens if the pmap layer is asked to perform an operation
* on a pmap that is not the one which is currently active? in that
* case we temporarily load this pmap, perform the operation, and mark
* the currently active one as pending lazy reload.
*
* the following figure shows the effects of the recursive PDP mapping:
*
* PDP (%cr3)
* +----+
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
* | |
* | |
* | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
* | 768| -> first kernel PTP (maps 0xc0000000 -> 0xc0400000)
* | |
* +----+
*
* note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
*
* starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
* PTP:
*
* PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
* +----+
* | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
* | |
* | |
* | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbfeff000
* | 768| -> maps contents of first kernel PTP
* | |
* |1023|
* +----+
*
* note that mapping of the PDP at PTP#767's VA (0xbfeff000) is
* defined as "PDP_BASE".... within that mapping there are two
* defines:
* "PDP_PDE" (0xbfeffbfc) is the VA of the PDE in the PDP
* which points back to itself.
*
* - PAE support -
* ---------------
*
* PAE adds another layer of indirection during address translation, breaking
* up the translation process in 3 different levels:
* - L3 page directory, containing 4 * 64-bits addresses (index determined by
* bits [31:30] from the virtual address). This breaks up the address space
* in 4 1GB regions.
* - the PD (L2), containing 512 64-bits addresses, breaking each L3 region
* in 512 * 2MB regions.
* - the PT (L1), also containing 512 64-bits addresses (at L1, the size of
* the pages is still 4K).
*
* The kernel virtual space is mapped by the last entry in the L3 page,
* the first 3 entries mapping the user VA space.
*
* Because the L3 has only 4 entries of 1GB each, we can't use recursive
* mappings at this level for PDP_PDE (this would eat up 2 of the 4GB
* virtual space). There are also restrictions imposed by Xen on the
* last entry of the L3 PD (reference count to this page cannot be
* bigger than 1), which makes it hard to use one L3 page per pmap to
* switch between pmaps using %cr3.
*
* As such, each CPU gets its own L3 page that is always loaded into its %cr3
* (ci_pae_l3_pd in the associated cpu_info struct). We claim that the VM has
* only a 2-level PTP (similar to the non-PAE case). L2 PD is now 4 contiguous
* pages long (corresponding to the 4 entries of the L3), and the different
* index/slots (like PDP_PDE) are adapted accordingly.
*
* Kernel space remains in L3[3], L3[0-2] maps the user VA space. Switching
* between pmaps consists in modifying the first 3 entries of the CPU's L3 page.
*
* PTE_BASE will need 4 entries in the L2 PD pages to map the L2 pages
* recursively.
*
* In addition, for Xen, we can't recursively map L3[3] (Xen wants the ref
* count on this page to be exactly one), so we use a shadow PD page for
* the last L2 PD. The shadow page could be static too, but to make pm_pdir[]
* contiguous we'll allocate/copy one page per pmap.
*/
/*
* Mask to get rid of the sign-extended part of addresses.
*/
#define VA_SIGN_MASK 0
#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
/*
* XXXfvdl this one's not right.
*/
#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
/*
* the following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE: the base VA of the linear PTE mappings
* PDP_BASE: the base VA of the recursive mapping of the PDP
* PDP_PDE: the VA of the PDE that points back to the PDP
*/
#define PTE_BASE ((pt_entry_t *) (PDIR_SLOT_PTE * NBPD_L2))
#define L1_BASE PTE_BASE
#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L2_SLOT_PTE * NBPD_L1))
#define PDP_PDE (L2_BASE + PDIR_SLOT_PTE)
#define PDP_BASE L2_BASE
#define NPDPG (PAGE_SIZE / sizeof (pd_entry_t))
#define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK }
#define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME }
#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT }
#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES }
#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES }
#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2 }
#define PDES_INITIALIZER { L2_BASE }
/*
* PTE_AVL usage: we make use of the ignored bits of the PTE
*/
#define PTE_WIRED PTE_AVL1 /* Wired Mapping */
#define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
#define PTE_X PTE_AVL3 /* Executable */
/* XXX To be deleted. */
#define PG_W PTE_WIRED
#define PG_PVLIST PTE_PVLIST
#define PG_X PTE_X
#define _MACHINE_PMAP_PRIVATE_H_X86
#include <x86/pmap_private.h>
#undef _MACHINE_PMAP_PRIVATE_H_X86
#ifndef XENPV
#define pmap_pa2pte(a) (a)
#define pmap_pte2pa(a) ((a) & PTE_FRAME)
#define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
#define pmap_pte_flush() /* nothing */
#ifdef PAE
#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_64((volatile uint64_t *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_64((volatile uint64_t *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_64((volatile uint64_t *)p, ~(b))
#else /* PAE */
#define pmap_pte_cas(p, o, n) atomic_cas_32((p), (o), (n))
#define pmap_pte_testset(p, n) \
atomic_swap_ulong((volatile unsigned long *)p, n)
#define pmap_pte_setbits(p, b) \
atomic_or_ulong((volatile unsigned long *)p, b)
#define pmap_pte_clearbits(p, b) \
atomic_and_ulong((volatile unsigned long *)p, ~(b))
#endif /* PAE */
#else /* XENPV */
extern kmutex_t pte_lock;
static __inline pt_entry_t
pmap_pa2pte(paddr_t pa)
{
return (pt_entry_t)xpmap_ptom_masked(pa);
}
static __inline paddr_t
pmap_pte2pa(pt_entry_t pte)
{
return xpmap_mtop_masked(pte & PTE_FRAME);
}
static __inline void
pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
{
int s = splvm();
xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
splx(s);
}
static __inline pt_entry_t
pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *ptep;
if (opte == o) {
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
xpq_flush_queue();
}
mutex_exit(&pte_lock);
return opte;
}
static __inline pt_entry_t
pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
{
pt_entry_t opte;
mutex_enter(&pte_lock);
opte = *pte;
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
npte);
xpq_flush_queue();
mutex_exit(&pte_lock);
return opte;
}
static __inline void
pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
{
mutex_enter(&pte_lock);
xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
(*pte) & ~bits);
xpq_flush_queue();
mutex_exit(&pte_lock);
}
static __inline void
pmap_pte_flush(void)
{
int s = splvm();
xpq_flush_queue();
splx(s);
}
#endif
struct vm_map;
struct trapframe;
struct pcb;
int pmap_exec_fixup(struct vm_map *, struct trapframe *, struct pcb *);
#endif /* _I386_PMAP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.86 2019/02/11 14:59:32 cherry Exp $ */
/* $NetBSD: vmparam.h,v 1.87 2022/08/20 23:48:50 riastradh Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -94,6 +94,48 @@
*/
#define USRIOSIZE 300
/*
* See pmap_private.h for details.
*/
#ifdef PAE
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-4) /* 1532: for recursive PDP map */
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 1536: start of kernel space */
#else /* PAE */
#define L2_SLOT_PTE (KERNBASE/NBPD_L2-1) /* 767: for recursive PDP map */
#define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 768: start of kernel space */
#endif /* PAE */
#define L2_SLOT_KERNBASE L2_SLOT_KERN
#define PDIR_SLOT_KERN L2_SLOT_KERN
#define PDIR_SLOT_PTE L2_SLOT_PTE
/* size of a PDP: usually one page, except for PAE */
#ifdef PAE
#define PDP_SIZE 4
#else
#define PDP_SIZE 1
#endif
/* largest value (-1 for APTP space) */
#define NKL2_MAX_ENTRIES (NTOPLEVEL_PDES - (KERNBASE/NBPD_L2) - 1)
#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * NPDPG)
#define NKL2_KIMG_ENTRIES 0 /* XXX unused */
#define NKL2_START_ENTRIES 0 /* XXX computed on runtime */
#define NKL1_START_ENTRIES 0 /* XXX unused */
#ifndef XENPV
#define NTOPLEVEL_PDES (PAGE_SIZE * PDP_SIZE / (sizeof (pd_entry_t)))
#else /* !XENPV */
#ifdef PAE
#define NTOPLEVEL_PDES 1964 /* 1964-2047 reserved by Xen */
#else /* PAE */
#define NTOPLEVEL_PDES 1008 /* 1008-1023 reserved by Xen */
#endif /* PAE */
#endif /* !XENPV */
/*
* Mach derived constants
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_machdep.c,v 1.32 2021/05/12 23:22:33 thorpej Exp $ */
/* $NetBSD: acpi_machdep.c,v 1.33 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -40,7 +40,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_machdep.c,v 1.32 2021/05/12 23:22:33 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_machdep.c,v 1.33 2022/08/20 23:48:50 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -64,6 +64,7 @@ __KERNEL_RCSID(0, "$NetBSD: acpi_machdep.c,v 1.32 2021/05/12 23:22:33 thorpej Ex
#include <machine/i82093reg.h>
#include <machine/i82093var.h>
#include <machine/pic.h>
#include <machine/pmap_private.h>
#include <x86/efi.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.132 2022/08/20 23:18:51 riastradh Exp $ */
/* $NetBSD: pmap.h,v 1.133 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -67,87 +67,12 @@
#ifndef _X86_PMAP_H_
#define _X86_PMAP_H_
/* size of a PDP: usually one page, except for PAE */
#ifdef PAE
#define PDP_SIZE 4
#else
#define PDP_SIZE 1
#endif
#if defined(_KERNEL)
#include <sys/kcpuset.h>
#include <sys/rwlock.h>
#include <x86/pmap_pv.h>
#include <uvm/pmap/pmap_pvt.h>
#define SLAREA_USER 0
#define SLAREA_PTE 1
#define SLAREA_MAIN 2
#define SLAREA_PCPU 3
#define SLAREA_DMAP 4
#define SLAREA_HYPV 5
#define SLAREA_ASAN 6
#define SLAREA_MSAN 7
#define SLAREA_KERN 8
#define SLSPACE_NAREAS 9
struct slotspace {
struct {
size_t sslot; /* start slot */
size_t nslot; /* # of slots */
bool active; /* area is active */
} area[SLSPACE_NAREAS];
};
extern struct slotspace slotspace;
#include <x86/gdt.h>
struct pcpu_entry {
uint8_t gdt[MAXGDTSIZ];
uint8_t ldt[MAX_USERLDT_SIZE];
uint8_t idt[PAGE_SIZE];
uint8_t tss[PAGE_SIZE];
uint8_t ist0[PAGE_SIZE];
uint8_t ist1[PAGE_SIZE];
uint8_t ist2[PAGE_SIZE];
uint8_t ist3[PAGE_SIZE];
uint8_t rsp0[2 * PAGE_SIZE];
} __packed;
struct pcpu_area {
#ifdef SVS
uint8_t utls[PAGE_SIZE];
#endif
uint8_t ldt[PAGE_SIZE];
struct pcpu_entry ent[MAXCPUS];
} __packed;
extern struct pcpu_area *pcpuarea;
#define PMAP_PCID_KERN 0
#define PMAP_PCID_USER 1
/*
* pmap data structures: see pmap.c for details of locking.
*/
/*
* we maintain a list of all non-kernel pmaps
*/
LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
/*
* linked list of all non-kernel pmaps
*/
extern struct pmap_head pmaps;
extern kmutex_t pmaps_lock; /* protects pmaps */
/*
* pool_cache(9) that pmaps are allocated from
*/
extern struct pool_cache pmap_cache;
#include <uvm/uvm_object.h>
/*
* the pmap structure
@ -210,38 +135,10 @@ struct pmap {
krwlock_t pm_dummy_lock; /* ugly hack for abusing uvm_object */
};
/* macro to access pm_pdirpa slots */
#ifdef PAE
#define pmap_pdirpa(pmap, index) \
((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
#else
#define pmap_pdirpa(pmap, index) \
((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
#endif
/*
* MD flags that we use for pmap_enter and pmap_kenter_pa:
*/
/*
* global kernel variables
*/
/*
* PDPpaddr is the physical address of the kernel's PDP.
* - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
* value associated to the kernel process, proc0.
* - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
* the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
* - Xen: it corresponds to the PFN of the kernel's PDP.
*/
extern u_long PDPpaddr;
extern pd_entry_t pmap_pg_g; /* do we support PTE_G? */
extern pd_entry_t pmap_pg_nx; /* do we support PTE_NX? */
extern int pmap_largepages;
extern long nkptp[PTP_LEVELS];
/*
* macros
*/
@ -257,7 +154,6 @@ extern long nkptp[PTP_LEVELS];
#define pmap_move(DP,SP,D,L,S)
#define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
#define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
#define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */
#if defined(__x86_64__) || defined(PAE)
#define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
@ -294,47 +190,8 @@ void pmap_pv_init(void);
void pmap_pv_track(paddr_t, psize_t);
void pmap_pv_untrack(paddr_t, psize_t);
void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
pd_entry_t * const **);
void pmap_unmap_ptes(struct pmap *, struct pmap *);
bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *,
int *lastlvl);
u_int x86_mmap_flags(paddr_t);
bool pmap_is_curpmap(struct pmap *);
void pmap_ept_transform(struct pmap *);
#ifndef __HAVE_DIRECT_MAP
void pmap_vpage_cpu_init(struct cpu_info *);
#endif
vaddr_t slotspace_rand(int, size_t, size_t, size_t, vaddr_t);
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
typedef enum tlbwhy {
TLBSHOOT_REMOVE_ALL,
TLBSHOOT_KENTER,
TLBSHOOT_KREMOVE,
TLBSHOOT_FREE_PTP,
TLBSHOOT_REMOVE_PTE,
TLBSHOOT_SYNC_PV,
TLBSHOOT_WRITE_PROTECT,
TLBSHOOT_ENTER,
TLBSHOOT_NVMM,
TLBSHOOT_BUS_DMA,
TLBSHOOT_BUS_SPACE,
TLBSHOOT__MAX,
} tlbwhy_t;
void pmap_tlb_init(void);
void pmap_tlb_cpu_init(struct cpu_info *);
void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
void pmap_tlb_shootnow(void);
void pmap_tlb_intr(void);
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
#define PMAP_FORK /* turn on pmap_fork interface */
@ -342,17 +199,6 @@ void pmap_tlb_intr(void);
* inline functions
*/
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
*/
__inline static void __unused
pmap_update_pg(vaddr_t va)
{
invlpg(va);
}
/*
* pmap_page_protect: change the protection of all recorded mappings
* of a managed page
@ -411,77 +257,10 @@ pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
}
}
/*
* various address inlines
*
* vtopte: return a pointer to the PTE mapping a VA, works only for
* user and PT addresses
*
* kvtopte: return a pointer to the PTE mapping a kernel VA
*/
#include <lib/libkern/libkern.h>
static __inline pt_entry_t * __unused
vtopte(vaddr_t va)
{
KASSERT(va < VM_MIN_KERNEL_ADDRESS);
return (PTE_BASE + pl1_i(va));
}
static __inline pt_entry_t * __unused
kvtopte(vaddr_t va)
{
pd_entry_t *pde;
KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
pde = L2_BASE + pl2_i(va);
if (*pde & PTE_PS)
return ((pt_entry_t *)pde);
return (PTE_BASE + pl1_i(va));
}
paddr_t vtophys(vaddr_t);
vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
void pmap_cpu_init_late(struct cpu_info *);
#ifdef XENPV
#include <sys/bitops.h>
#define XPTE_MASK L1_FRAME
/* Selects the index of a PTE in (A)PTE_BASE */
#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
/* PTE access inline functions */
/*
* Get the machine address of the pointed pte
* We use hardware MMU to get value so works only for levels 1-3
*/
static __inline paddr_t
xpmap_ptetomach(pt_entry_t *pte)
{
pt_entry_t *up_pte;
vaddr_t va = (vaddr_t) pte;
va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
up_pte = (pt_entry_t *) va;
return (paddr_t) (((*up_pte) & PTE_FRAME) + (((vaddr_t) pte) & (~PTE_FRAME & ~VA_SIGN_MASK)));
}
/* Xen helpers to change bits of a pte */
#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
#endif /* XENPV */
/* pmap functions with machine addresses */
void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
@ -495,12 +274,6 @@ paddr_t pmap_get_physpage(void);
*/
#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
#ifdef __HAVE_PCPU_AREA
extern struct pcpu_area *pcpuarea;
#define PDIR_SLOT_PCPU 510
#define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
#endif
#ifdef __HAVE_DIRECT_MAP
extern vaddr_t pmap_direct_base;
@ -520,8 +293,6 @@ extern vaddr_t pmap_direct_end;
#endif /* __HAVE_DIRECT_MAP */
void svs_quad_copy(void *, void *, long);
#define __HAVE_VM_PAGE_MD
#define VM_MDPAGE_INIT(pg) \
memset(&(pg)->mdpage, 0, sizeof((pg)->mdpage)); \

View File

@ -0,0 +1,316 @@
/* $NetBSD: pmap_private.h,v 1.1 2022/08/20 23:48:50 riastradh Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _X86_PMAP_PRIVATE_H_
#define _X86_PMAP_PRIVATE_H_
#ifndef _MACHINE_PMAP_PRIVATE_H_X86
#error Include machine/pmap_private.h, not x86/pmap_private.h.
#endif
#ifdef _KERNEL_OPT
#include "opt_svs.h"
#endif
#include <sys/param.h>
#include <sys/types.h>
#include <sys/mutex.h>
#include <sys/pool.h>
#include <sys/queue.h>
#include <machine/pte.h>
#include <machine/vmparam.h>
#include <uvm/uvm_pmap.h>
struct pmap;
#define SLAREA_USER 0
#define SLAREA_PTE 1
#define SLAREA_MAIN 2
#define SLAREA_PCPU 3
#define SLAREA_DMAP 4
#define SLAREA_HYPV 5
#define SLAREA_ASAN 6
#define SLAREA_MSAN 7
#define SLAREA_KERN 8
#define SLSPACE_NAREAS 9
struct slotspace {
struct {
size_t sslot; /* start slot */
size_t nslot; /* # of slots */
bool active; /* area is active */
} area[SLSPACE_NAREAS];
};
extern struct slotspace slotspace;
#include <x86/gdt.h>
struct pcpu_entry {
uint8_t gdt[MAXGDTSIZ];
uint8_t ldt[MAX_USERLDT_SIZE];
uint8_t idt[PAGE_SIZE];
uint8_t tss[PAGE_SIZE];
uint8_t ist0[PAGE_SIZE];
uint8_t ist1[PAGE_SIZE];
uint8_t ist2[PAGE_SIZE];
uint8_t ist3[PAGE_SIZE];
uint8_t rsp0[2 * PAGE_SIZE];
} __packed;
struct pcpu_area {
#ifdef SVS
uint8_t utls[PAGE_SIZE];
#endif
uint8_t ldt[PAGE_SIZE];
struct pcpu_entry ent[MAXCPUS];
} __packed;
extern struct pcpu_area *pcpuarea;
#define PMAP_PCID_KERN 0
#define PMAP_PCID_USER 1
/*
* pmap data structures: see pmap.c for details of locking.
*/
/*
* we maintain a list of all non-kernel pmaps
*/
LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
/*
* linked list of all non-kernel pmaps
*/
extern struct pmap_head pmaps;
extern kmutex_t pmaps_lock; /* protects pmaps */
/*
* pool_cache(9) that pmaps are allocated from
*/
extern struct pool_cache pmap_cache;
/* macro to access pm_pdirpa slots */
#ifdef PAE
#define pmap_pdirpa(pmap, index) \
((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
#else
#define pmap_pdirpa(pmap, index) \
((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
#endif
/*
* global kernel variables
*/
/*
* PDPpaddr is the physical address of the kernel's PDP.
* - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
* value associated to the kernel process, proc0.
* - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
* the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
* - Xen: it corresponds to the PFN of the kernel's PDP.
*/
extern u_long PDPpaddr;
extern pd_entry_t pmap_pg_g; /* do we support PTE_G? */
extern pd_entry_t pmap_pg_nx; /* do we support PTE_NX? */
extern int pmap_largepages;
extern long nkptp[PTP_LEVELS];
#define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */
void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
pd_entry_t * const **);
void pmap_unmap_ptes(struct pmap *, struct pmap *);
bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *,
int *lastlvl);
bool pmap_is_curpmap(struct pmap *);
void pmap_ept_transform(struct pmap *);
#ifndef __HAVE_DIRECT_MAP
void pmap_vpage_cpu_init(struct cpu_info *);
#endif
vaddr_t slotspace_rand(int, size_t, size_t, size_t, vaddr_t);
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
typedef enum tlbwhy {
TLBSHOOT_REMOVE_ALL,
TLBSHOOT_KENTER,
TLBSHOOT_KREMOVE,
TLBSHOOT_FREE_PTP,
TLBSHOOT_REMOVE_PTE,
TLBSHOOT_SYNC_PV,
TLBSHOOT_WRITE_PROTECT,
TLBSHOOT_ENTER,
TLBSHOOT_NVMM,
TLBSHOOT_BUS_DMA,
TLBSHOOT_BUS_SPACE,
TLBSHOOT__MAX,
} tlbwhy_t;
void pmap_tlb_init(void);
void pmap_tlb_cpu_init(struct cpu_info *);
void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
void pmap_tlb_shootnow(void);
void pmap_tlb_intr(void);
/*
* inline functions
*/
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
*/
__inline static void __unused
pmap_update_pg(vaddr_t va)
{
invlpg(va);
}
/*
* various address inlines
*
* vtopte: return a pointer to the PTE mapping a VA, works only for
* user and PT addresses
*
* kvtopte: return a pointer to the PTE mapping a kernel VA
*/
#include <lib/libkern/libkern.h>
static __inline pt_entry_t * __unused
vtopte(vaddr_t va)
{
KASSERT(va < VM_MIN_KERNEL_ADDRESS);
return (PTE_BASE + pl1_i(va));
}
static __inline pt_entry_t * __unused
kvtopte(vaddr_t va)
{
pd_entry_t *pde;
KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
pde = L2_BASE + pl2_i(va);
if (*pde & PTE_PS)
return ((pt_entry_t *)pde);
return (PTE_BASE + pl1_i(va));
}
#ifdef XENPV
#include <sys/bitops.h>
#define XPTE_MASK L1_FRAME
/* Selects the index of a PTE in (A)PTE_BASE */
#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
/* PTE access inline functions */
/*
* Get the machine address of the pointed pte
* We use hardware MMU to get value so works only for levels 1-3
*/
static __inline paddr_t
xpmap_ptetomach(pt_entry_t *pte)
{
pt_entry_t *up_pte;
vaddr_t va = (vaddr_t) pte;
va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
up_pte = (pt_entry_t *) va;
return (paddr_t) (((*up_pte) & PTE_FRAME) + (((vaddr_t) pte) & (~PTE_FRAME & ~VA_SIGN_MASK)));
}
/* Xen helpers to change bits of a pte */
#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
#endif /* XENPV */
#ifdef __HAVE_PCPU_AREA
extern struct pcpu_area *pcpuarea;
#define PDIR_SLOT_PCPU 510
#define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
#endif
void svs_quad_copy(void *, void *, long);
#endif /* _X86_PMAP_PRIVATE_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.88 2022/08/13 06:59:56 skrll Exp $ */
/* $NetBSD: bus_dma.c,v 1.89 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2007, 2020 The NetBSD Foundation, Inc.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.88 2022/08/13 06:59:56 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.89 2022/08/20 23:48:51 riastradh Exp $");
/*
* The following is included because _bus_dma_uiomove is derived from
@ -106,6 +106,7 @@ __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.88 2022/08/13 06:59:56 skrll Exp $");
#ifdef MPBIOS
#include <machine/mpbiosvar.h>
#endif
#include <machine/pmap_private.h>
#if NISA > 0
#include <dev/isa/isareg.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.204 2022/08/14 07:49:33 mlelstv Exp $ */
/* $NetBSD: cpu.c,v 1.205 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2000-2020 NetBSD Foundation, Inc.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.204 2022/08/14 07:49:33 mlelstv Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.205 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_ddb.h"
#include "opt_mpbios.h" /* for MPDEBUG */
@ -106,6 +106,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.204 2022/08/14 07:49:33 mlelstv Exp $");
#include <machine/mtrr.h>
#include <machine/pio.h>
#include <machine/cpu_counter.h>
#include <machine/pmap_private.h>
#include <x86/fpu.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_memrw.c,v 1.13 2022/08/20 23:15:37 riastradh Exp $ */
/* $NetBSD: db_memrw.c,v 1.14 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 1996, 2000 The NetBSD Foundation, Inc.
@ -53,13 +53,14 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: db_memrw.c,v 1.13 2022/08/20 23:15:37 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: db_memrw.c,v 1.14 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <machine/db_machdep.h>
#include <machine/pmap_private.h>
#include <x86/bootspace.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: idt.c,v 1.16 2022/02/13 19:21:21 riastradh Exp $ */
/* $NetBSD: idt.c,v 1.17 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2009 The NetBSD Foundation, Inc.
@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: idt.c,v 1.16 2022/02/13 19:21:21 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: idt.c,v 1.17 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_pcpu_idt.h"
@ -77,6 +77,7 @@ __KERNEL_RCSID(0, "$NetBSD: idt.c,v 1.16 2022/02/13 19:21:21 riastradh Exp $");
#include <uvm/uvm.h>
#include <machine/pmap_private.h>
#include <machine/segments.h>
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: lapic.c,v 1.87 2022/04/26 05:29:15 msaitoh Exp $ */
/* $NetBSD: lapic.c,v 1.88 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 2000, 2008, 2020 The NetBSD Foundation, Inc.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lapic.c,v 1.87 2022/04/26 05:29:15 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: lapic.c,v 1.88 2022/08/20 23:48:51 riastradh Exp $");
#include "acpica.h"
#include "ioapic.h"
@ -64,6 +64,7 @@ __KERNEL_RCSID(0, "$NetBSD: lapic.c,v 1.87 2022/04/26 05:29:15 msaitoh Exp $");
#include <machine/mpacpi.h>
#include <machine/mpbiosvar.h>
#include <machine/pcb.h>
#include <machine/pmap_private.h>
#include <machine/specialreg.h>
#include <machine/segments.h>
#include <x86/x86/tsc.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: patch.c,v 1.52 2022/08/20 23:15:37 riastradh Exp $ */
/* $NetBSD: patch.c,v 1.53 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.52 2022/08/20 23:15:37 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.53 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_lockdebug.h"
#ifdef i386
@ -51,6 +51,7 @@ __KERNEL_RCSID(0, "$NetBSD: patch.c,v 1.52 2022/08/20 23:15:37 riastradh Exp $")
#include <uvm/uvm.h>
#include <machine/pmap.h>
#include <machine/pmap_private.h>
#include <x86/bootspace.h>
#include <x86/cpuvar.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.418 2022/08/20 23:18:51 riastradh Exp $ */
/* $NetBSD: pmap.c,v 1.419 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.418 2022/08/20 23:18:51 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.419 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -166,6 +166,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.418 2022/08/20 23:18:51 riastradh Exp $")
#include <machine/isa_machdep.h>
#include <machine/cpuvar.h>
#include <machine/cputypes.h>
#include <machine/pmap_private.h>
#include <x86/bootspace.h>
#include <x86/pat.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: svs.c,v 1.40 2021/10/07 12:52:27 msaitoh Exp $ */
/* $NetBSD: svs.c,v 1.41 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.40 2021/10/07 12:52:27 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.41 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_svs.h"
#include "opt_user_ldt.h"
@ -45,9 +45,11 @@ __KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.40 2021/10/07 12:52:27 msaitoh Exp $");
#include <sys/reboot.h>
#include <x86/cputypes.h>
#include <machine/cpuvar.h>
#include <machine/frameasm.h>
#include <machine/gdt.h>
#include <machine/pmap_private.h>
#include <uvm/uvm.h>
#include <uvm/uvm_page.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_machdep.c,v 1.151 2022/08/20 23:15:37 riastradh Exp $ */
/* $NetBSD: x86_machdep.c,v 1.152 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.151 2022/08/20 23:15:37 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.152 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_modular.h"
#include "opt_physmem.h"
@ -69,6 +69,7 @@ __KERNEL_RCSID(0, "$NetBSD: x86_machdep.c,v 1.151 2022/08/20 23:15:37 riastradh
#include <dev/mm.h>
#include <machine/bootinfo.h>
#include <machine/pmap_private.h>
#include <machine/vmparam.h>
#include <uvm/uvm_extern.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_tlb.c,v 1.19 2021/10/07 12:52:27 msaitoh Exp $ */
/* $NetBSD: x86_tlb.c,v 1.20 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 2008-2020 The NetBSD Foundation, Inc.
@ -40,7 +40,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_tlb.c,v 1.19 2021/10/07 12:52:27 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_tlb.c,v 1.20 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -52,6 +52,8 @@ __KERNEL_RCSID(0, "$NetBSD: x86_tlb.c,v 1.19 2021/10/07 12:52:27 msaitoh Exp $")
#include <uvm/uvm.h>
#include <machine/cpuvar.h>
#include <machine/pmap_private.h>
#ifdef XENPV
#include <xen/xenpmap.h>
#endif /* XENPV */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.141 2021/08/07 16:19:08 thorpej Exp $ */
/* $NetBSD: cpu.c,v 1.142 2022/08/20 23:48:51 riastradh Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.141 2021/08/07 16:19:08 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.142 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -93,6 +93,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.141 2021/08/07 16:19:08 thorpej Exp $");
#include <machine/cpufunc.h>
#include <machine/cpuvar.h>
#include <machine/pmap.h>
#include <machine/pmap_private.h>
#include <machine/vmparam.h>
#include <machine/mpbiosvar.h>
#include <machine/pcb.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor_machdep.c,v 1.43 2022/05/31 18:01:22 bouyer Exp $ */
/* $NetBSD: hypervisor_machdep.c,v 1.44 2022/08/20 23:48:51 riastradh Exp $ */
/*
*
@ -54,7 +54,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.43 2022/05/31 18:01:22 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.44 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -66,6 +66,7 @@ __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.43 2022/05/31 18:01:22 bouy
#include <machine/vmparam.h>
#include <machine/pmap.h>
#include <machine/pmap_private.h>
#include <x86/machdep.h>
#include <x86/cpuvar.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_xpmap.c,v 1.91 2022/05/11 16:22:46 bouyer Exp $ */
/* $NetBSD: x86_xpmap.c,v 1.92 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2017 The NetBSD Foundation, Inc.
@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.91 2022/05/11 16:22:46 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.92 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_xen.h"
#include "opt_ddb.h"
@ -110,6 +110,7 @@ __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.91 2022/05/11 16:22:46 bouyer Exp $"
#include <uvm/uvm.h>
#include <machine/gdt.h>
#include <machine/pmap_private.h>
#include <xen/xenfunc.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_bus_dma.c,v 1.32 2020/05/06 19:50:26 bouyer Exp $ */
/* $NetBSD: xen_bus_dma.c,v 1.33 2022/08/20 23:48:51 riastradh Exp $ */
/* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
/*-
@ -32,16 +32,17 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.32 2020/05/06 19:50:26 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.33 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/bus.h>
#include <machine/bus_private.h>
#include <machine/pmap_private.h>
#include <uvm/uvm.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_pmap.c,v 1.39 2020/09/06 02:18:53 riastradh Exp $ */
/* $NetBSD: xen_pmap.c,v 1.40 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@ -101,7 +101,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.39 2020/09/06 02:18:53 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.40 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
@ -124,6 +124,7 @@ __KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.39 2020/09/06 02:18:53 riastradh Exp
#include <machine/gdt.h>
#include <machine/isa_machdep.h>
#include <machine/cpuvar.h>
#include <machine/pmap_private.h>
#include <x86/pmap_pv.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: xenfunc.c,v 1.28 2020/05/06 19:47:05 bouyer Exp $ */
/* $NetBSD: xenfunc.c,v 1.29 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2004 Christian Limpach.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.28 2020/05/06 19:47:05 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.29 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
@ -35,6 +35,7 @@ __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.28 2020/05/06 19:47:05 bouyer Exp $");
#include <machine/intr.h>
#include <machine/vmparam.h>
#include <machine/pmap.h>
#include <machine/pmap_private.h>
#include <xen/xen.h>
#include <xen/hypervisor.h>
//#include <xen/evtchn.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen_machdep.c,v 1.26 2020/06/04 08:24:44 msaitoh Exp $ */
/* $NetBSD: xen_machdep.c,v 1.27 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -53,7 +53,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.26 2020/06/04 08:24:44 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.27 2022/08/20 23:48:51 riastradh Exp $");
#include "opt_xen.h"
@ -75,6 +75,8 @@ __KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.26 2020/06/04 08:24:44 msaitoh Exp
#include <xen/shutdown_xenbus.h>
#include <xen/include/public/version.h>
#include <machine/pmap_private.h>
#define DPRINTK(x) printk x
#if 0
#define DPRINTK(x)

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_x86_svm.c,v 1.83 2021/03/26 15:59:53 reinoud Exp $ */
/* $NetBSD: nvmm_x86_svm.c,v 1.84 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.83 2021/03/26 15:59:53 reinoud Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.84 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -48,6 +48,7 @@ __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.83 2021/03/26 15:59:53 reinoud Ex
#include <x86/cpu_counter.h>
#include <machine/cpuvar.h>
#include <machine/pmap_private.h>
#include <dev/nvmm/nvmm.h>
#include <dev/nvmm/nvmm_internal.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: nvmm_x86_vmx.c,v 1.83 2022/05/13 19:34:47 tnn Exp $ */
/* $NetBSD: nvmm_x86_vmx.c,v 1.84 2022/08/20 23:48:51 riastradh Exp $ */
/*
* Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.83 2022/05/13 19:34:47 tnn Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.84 2022/08/20 23:48:51 riastradh Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -49,6 +49,7 @@ __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.83 2022/05/13 19:34:47 tnn Exp $"
#include <x86/cpu_counter.h>
#include <machine/cpuvar.h>
#include <machine/pmap_private.h>
#include <dev/nvmm/nvmm.h>
#include <dev/nvmm/nvmm_internal.h>