Provide MI PMAP support on AARCH64

This commit is contained in:
skrll 2022-11-03 09:04:56 +00:00
parent daa7d68ea3
commit a856f9893d
10 changed files with 1529 additions and 147 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile.inc,v 1.130 2022/10/29 08:52:47 jmcneill Exp $ # $NetBSD: Makefile.inc,v 1.131 2022/11/03 09:04:56 skrll Exp $
# #
# etc.evbarm/Makefile.inc -- evbarm-specific etc Makefile targets # etc.evbarm/Makefile.inc -- evbarm-specific etc Makefile targets
# #
@ -31,7 +31,9 @@ KERNEL_SETS.armv6hf+= RPI2
KERNEL_SETS.armv7+= GENERIC KERNEL_SETS.armv7+= GENERIC
KERNEL_SETS.armv7hf+= GENERIC KERNEL_SETS.armv7hf+= GENERIC
KERNEL_SETS.arm64+= GENERIC64 KERNEL_SETS.arm64+= GENERIC64
KERNEL_SETS.arm64+= GENERIC64_PMAPMI
.else .else
IMAGEENDIAN= le IMAGEENDIAN= le
# little endian boards # little endian boards
@ -65,7 +67,9 @@ KERNEL_SETS.armv6hf+= RPI2
KERNEL_SETS.armv7+= GENERIC KERNEL_SETS.armv7+= GENERIC
KERNEL_SETS.armv7hf+= GENERIC KERNEL_SETS.armv7hf+= GENERIC
KERNEL_SETS.arm64+= GENERIC64 KERNEL_SETS.arm64+= GENERIC64
KERNEL_SETS.arm64+= GENERIC64_PMAPMI
.endif .endif
IMAGE.rel= ${RELEASEDIR}/${RELEASEMACHINEDIR} IMAGE.rel= ${RELEASEDIR}/${RELEASEMACHINEDIR}

View File

@ -0,0 +1,780 @@
/* $NetBSD: pmap_machdep.c,v 1.1 2022/11/03 09:04:56 skrll Exp $ */
/*-
* Copyright (c) 2022 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Nick Hudson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_arm_debug.h"
#include "opt_efi.h"
#include "opt_multiprocessor.h"
#include "opt_uvmhist.h"
#define __PMAP_PRIVATE
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.1 2022/11/03 09:04:56 skrll Exp $");
#include <sys/param.h>
#include <sys/types.h>
#include <sys/buf.h>
#include <sys/cpu.h>
#include <sys/kernel.h>
#include <uvm/uvm.h>
#include <uvm/uvm_page.h>
#include <uvm/pmap/pmap_pvt.h>
#include <aarch64/cpufunc.h>
#include <arm/locore.h>
#ifdef VERBOSE_INIT_ARM
#define VPRINTF(...) printf(__VA_ARGS__)
#else
#define VPRINTF(...) __nothing
#endif
/* Set to LX_BLKPAG_GP if supported. */
uint64_t pmap_attr_gp = 0;
/*
* Misc variables
*/
vaddr_t virtual_avail;
vaddr_t virtual_end;
bool pmap_devmap_bootstrap_done = false;
paddr_t
vtophys(vaddr_t va)
{
paddr_t pa;
if (pmap_extract(pmap_kernel(), va, &pa) == false)
return 0;
return pa;
}
bool
pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp)
{
paddr_t pa;
bool coherency = false;
if (pm == pmap_kernel()) {
if (pmap_md_direct_mapped_vaddr_p(va)) {
pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
goto done;
}
if (pmap_md_io_vaddr_p(va))
panic("pmap_extract: io address %#"PRIxVADDR"", va);
if (va >= pmap_limits.virtual_end)
panic("%s: illegal kernel mapped address %#"PRIxVADDR,
__func__, va);
}
kpreempt_disable();
const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
pt_entry_t pte;
if (ptep == NULL || !pte_valid_p(pte = *ptep)) {
kpreempt_enable();
return false;
}
kpreempt_enable();
pa = pte_to_paddr(pte) | (va & PGOFSET);
switch (pte & LX_BLKPAG_ATTR_MASK) {
case LX_BLKPAG_ATTR_NORMAL_NC:
case LX_BLKPAG_ATTR_DEVICE_MEM:
case LX_BLKPAG_ATTR_DEVICE_MEM_NP:
coherency = true;
break;
}
done:
if (pap != NULL) {
*pap = pa;
}
if (coherentp != NULL) {
*coherentp = coherency;
}
return true;
}
bool
pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, bool user)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
KASSERT(!user || (pm != pmap_kernel()));
UVMHIST_LOG(pmaphist, " pm=%#jx, va=%#jx, ftype=%#jx, user=%jd",
(uintptr_t)pm, va, ftype, user);
UVMHIST_LOG(pmaphist, " ti=%#jx pai=%#jx asid=%#jx",
(uintptr_t)cpu_tlb_info(curcpu()),
(uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
(uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
kpreempt_disable();
bool fixed = false;
pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
if (ptep == NULL) {
UVMHIST_LOG(pmaphist, "... no ptep", 0, 0, 0, 0);
goto done;
}
const pt_entry_t opte = *ptep;
if (!l3pte_valid(opte)) {
UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
opte, va, 0, 0);
goto done;
}
const paddr_t pa = l3pte_pa(opte);
struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
if (pg == NULL) {
UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
goto done;
}
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
UVMHIST_LOG(pmaphist, " pg=%#jx, opte=%#jx, ptep=%#jx", (uintptr_t)pg,
opte, (uintptr_t)ptep, 0);
if ((ftype & VM_PROT_WRITE) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) {
/*
* This looks like a good candidate for "page modified"
* emulation...
*/
pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
/*
* Enable write permissions for the page by setting the Access Flag.
*/
// XXXNH LX_BLKPAG_OS_0?
const pt_entry_t npte = opte | LX_BLKPAG_AF | LX_BLKPAG_OS_0;
atomic_swap_64(ptep, npte);
dsb(ishst);
fixed = true;
UVMHIST_LOG(pmaphist, " <-- done (mod emul: changed pte "
"from %#jx to %#jx)", opte, npte, 0, 0);
} else if ((ftype & VM_PROT_READ) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO) {
/*
* This looks like a good candidate for "page referenced"
* emulation.
*/
pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
/*
* Enable write permissions for the page by setting the Access Flag.
*/
const pt_entry_t npte = opte | LX_BLKPAG_AF;
atomic_swap_64(ptep, npte);
dsb(ishst);
fixed = true;
UVMHIST_LOG(pmaphist, " <-- done (ref emul: changed pte "
"from %#jx to %#jx)", opte, npte, 0, 0);
}
done:
kpreempt_enable();
return fixed;
}
void
pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
{
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "pm %#jx sva %#jx eva %#jx",
(uintptr_t)pm, sva, eva, 0);
KASSERT((sva & PAGE_MASK) == 0);
KASSERT((eva & PAGE_MASK) == 0);
pmap_lock(pm);
for (vaddr_t va = sva; va < eva; va += PAGE_SIZE) {
pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
if (ptep == NULL)
continue;
pt_entry_t opte = *ptep;
if (!l3pte_valid(opte)) {
UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
opte, va, 0, 0);
goto done;
}
if (l3pte_readable(opte)) {
cpu_icache_sync_range(va, PAGE_SIZE);
} else {
/*
* change to accessible temporarily
* to do cpu_icache_sync_range()
*/
struct pmap_asid_info * const pai = PMAP_PAI(pm,
cpu_tlb_info(ci));
atomic_swap_64(ptep, opte | LX_BLKPAG_AF);
// tlb_invalidate_addr does the dsb(ishst);
tlb_invalidate_addr(pai->pai_asid, va);
cpu_icache_sync_range(va, PAGE_SIZE);
atomic_swap_64(ptep, opte);
tlb_invalidate_addr(pai->pai_asid, va);
}
}
done:
pmap_unlock(pm);
}
struct vm_page *
pmap_md_alloc_poolpage(int flags)
{
/*
* Any managed page works for us.
*/
return uvm_pagealloc(NULL, 0, NULL, flags);
}
vaddr_t
pmap_md_map_poolpage(paddr_t pa, size_t len)
{
struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
const vaddr_t va = pmap_md_direct_map_paddr(pa);
KASSERT(cold || pg != NULL);
if (pg != NULL) {
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
const pv_entry_t pv = &mdpg->mdpg_first;
const vaddr_t last_va = trunc_page(pv->pv_va);
KASSERT(len == PAGE_SIZE || last_va == pa);
KASSERT(pv->pv_pmap == NULL);
KASSERT(pv->pv_next == NULL);
KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
pv->pv_va = va;
}
return va;
}
paddr_t
pmap_md_unmap_poolpage(vaddr_t va, size_t len)
{
KASSERT(len == PAGE_SIZE);
KASSERT(pmap_md_direct_mapped_vaddr_p(va));
const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg);
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
const pv_entry_t pv = &mdpg->mdpg_first;
/* Note last mapped address for future color check */
pv->pv_va = va;
KASSERT(pv->pv_pmap == NULL);
KASSERT(pv->pv_next == NULL);
return pa;
}
bool
pmap_md_direct_mapped_vaddr_p(vaddr_t va)
{
if (!AARCH64_KVA_P(va))
return false;
paddr_t pa = AARCH64_KVA_TO_PA(va);
if (physical_start <= pa && pa < physical_end)
return true;
return false;
}
paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
{
return AARCH64_KVA_TO_PA(va);
}
vaddr_t
pmap_md_direct_map_paddr(paddr_t pa)
{
return AARCH64_PA_TO_KVA(pa);
}
bool
pmap_md_io_vaddr_p(vaddr_t va)
{
if (pmap_devmap_find_va(va, PAGE_SIZE)) {
return true;
}
return false;
}
static void
pmap_md_grow(pmap_pdetab_t *ptb, vaddr_t va, vsize_t vshift,
vsize_t *remaining)
{
KASSERT((va & (NBSEG - 1)) == 0);
const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
const vsize_t vinc = 1UL << vshift;
for (size_t i = (va >> vshift) & pdetab_mask;
i < PMAP_PDETABSIZE; i++, va += vinc) {
pd_entry_t * const pde_p =
&ptb->pde_pde[(va >> vshift) & pdetab_mask];
vaddr_t pdeva;
if (pte_pde_valid_p(*pde_p)) {
const paddr_t pa = pte_pde_to_paddr(*pde_p);
pdeva = pmap_md_direct_map_paddr(pa);
} else {
/*
* uvm_pageboot_alloc() returns a direct mapped address
*/
pdeva = uvm_pageboot_alloc(Ln_TABLE_SIZE);
paddr_t pdepa = AARCH64_KVA_TO_PA(pdeva);
*pde_p = pte_pde_pdetab(pdepa, true);
memset((void *)pdeva, 0, PAGE_SIZE);
}
if (vshift > SEGSHIFT) {
pmap_md_grow((pmap_pdetab_t *)pdeva, va,
vshift - SEGLENGTH, remaining);
} else {
if (*remaining > vinc)
*remaining -= vinc;
else
*remaining = 0;
}
if (*remaining == 0)
return;
}
}
void
pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
{
pmap_t pm = pmap_kernel();
/*
* Initialise the kernel pmap object
*/
curcpu()->ci_pmap_cur = pm;
virtual_avail = vstart;
virtual_end = vend;
aarch64_tlbi_all();
pm->pm_l0_pa = __SHIFTOUT(reg_ttbr1_el1_read(), TTBR_BADDR);
pm->pm_pdetab = (pmap_pdetab_t *)AARCH64_PA_TO_KVA(pm->pm_l0_pa);
VPRINTF("common ");
pmap_bootstrap_common();
VPRINTF("tlb0 ");
pmap_tlb_info_init(&pmap_tlb0_info);
#ifdef MULTIPROCESSOR
VPRINTF("kcpusets ");
kcpuset_create(&pm->pm_onproc, true);
kcpuset_create(&pm->pm_active, true);
KASSERT(pm->pm_onproc != NULL);
KASSERT(pm->pm_active != NULL);
kcpuset_set(pm->pm_onproc, cpu_number());
kcpuset_set(pm->pm_active, cpu_number());
#endif
VPRINTF("nkmempages ");
/*
* Compute the number of pages kmem_arena will have. This will also
* be called by uvm_km_bootstrap later, but that doesn't matter
*/
kmeminit_nkmempages();
/* Get size of buffer cache and set an upper limit */
buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
vsize_t bufsz = buf_memcalc();
buf_setvalimit(bufsz);
vsize_t kvmsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
bufsz + 16 * NCARGS + pager_map_size) +
/*(maxproc * UPAGES) + */nkmempages * NBPG;
#ifdef SYSVSHM
kvmsize += shminfo.shmall;
#endif
/* Calculate VA address space and roundup to NBSEG tables */
kvmsize = roundup(kvmsize, NBSEG);
/*
* Initialize `FYI' variables. Note we're relying on
* the fact that BSEARCH sorts the vm_physmem[] array
* for us. Must do this before uvm_pageboot_alloc()
* can be called.
*/
pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
/*
* Update the naive settings in pmap_limits to the actual KVA range.
*/
pmap_limits.virtual_start = vstart;
pmap_limits.virtual_end = vend;
VPRINTF("\nlimits: %" PRIxVADDR " - %" PRIxVADDR "\n", vstart, vend);
const vaddr_t kvmstart = vstart;
pmap_curmaxkvaddr = vstart + kvmsize;
VPRINTF("kva : %" PRIxVADDR " - %" PRIxVADDR "\n", kvmstart,
pmap_curmaxkvaddr);
pmap_md_grow(pmap_kernel()->pm_pdetab, kvmstart, XSEGSHIFT, &kvmsize);
#if defined(EFI_RUNTIME)
vaddr_t efi_l0va = uvm_pageboot_alloc(Ln_TABLE_SIZE);
KASSERT((efi_l0va & PAGE_MASK) == 0);
pmap_t efipm = pmap_efirt();
efipm->pm_l0_pa = AARCH64_KVA_TO_PA(efi_l0va);
efipm->pm_pdetab = (pmap_pdetab_t *)efi_l0va;
#endif
pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
&pool_allocator_nointr, IPL_NONE);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
#ifdef KASAN
NULL,
#else
&pmap_pv_page_allocator,
#endif
IPL_NONE);
pmap_pvlist_lock_init(/*arm_dcache_align*/ 128);
VPRINTF("done\n");
}
void
pmap_md_xtab_activate(pmap_t pm, struct lwp *l)
{
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, " (pm=%#jx l=%#jx)", (uintptr_t)pm, (uintptr_t)l, 0, 0);
/*
* Assume that TTBR1 has only global mappings and TTBR0 only
* has non-global mappings. To prevent speculation from doing
* evil things we disable translation table walks using TTBR0
* before setting the CONTEXTIDR (ASID) or new TTBR0 value.
* Once both are set, table walks are reenabled.
*/
const uint64_t old_tcrel1 = reg_tcr_el1_read();
reg_tcr_el1_write(old_tcrel1 | TCR_EPD0);
isb();
struct cpu_info * const ci = curcpu();
struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci));
const uint64_t ttbr =
__SHIFTIN(pai->pai_asid, TTBR_ASID) |
__SHIFTIN(pm->pm_l0_pa, TTBR_BADDR);
cpu_set_ttbr0(ttbr);
if (pm != pmap_kernel()) {
reg_tcr_el1_write(old_tcrel1 & ~TCR_EPD0);
}
UVMHIST_LOG(maphist, " pm %#jx pm->pm_l0 %016jx pm->pm_l0_pa %016jx asid %ju... done",
(uintptr_t)pm, (uintptr_t)pm->pm_pdetab, (uintptr_t)pm->pm_l0_pa,
(uintptr_t)pai->pai_asid);
KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u",
ci->ci_pmap_asid_cur, pai->pai_asid);
ci->ci_pmap_cur = pm;
}
void
pmap_md_xtab_deactivate(pmap_t pm)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
struct cpu_info * const ci = curcpu();
/*
* Disable translation table walks from TTBR0 while no pmap has been
* activated.
*/
const uint64_t old_tcrel1 = reg_tcr_el1_read();
reg_tcr_el1_write(old_tcrel1 | TCR_EPD0);
isb();
cpu_set_ttbr0(0);
ci->ci_pmap_cur = pmap_kernel();
KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u",
ci->ci_pmap_asid_cur);
}
#if defined(EFI_RUNTIME)
void
pmap_md_activate_efirt(void)
{
kpreempt_disable();
pmap_md_xtab_activate(pmap_efirt(), NULL);
}
void
pmap_md_deactivate_efirt(void)
{
pmap_md_xtab_deactivate(pmap_efirt());
kpreempt_enable();
}
#endif
void
pmap_md_pdetab_init(struct pmap *pm)
{
KASSERT(pm != NULL);
pmap_extract(pmap_kernel(), (vaddr_t)pm->pm_pdetab, &pm->pm_l0_pa);
}
void
pmap_md_pdetab_destroy(struct pmap *pm)
{
KASSERT(pm != NULL);
}
void
pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
//XXXNH
}
bool
pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
{
return true;
}
pd_entry_t *
pmap_l0table(struct pmap *pm)
{
return pm->pm_pdetab->pde_pde;
}
static const struct pmap_devmap *pmap_devmap_table;
vaddr_t virtual_devmap_addr;
#define L1_BLK_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
#define L2_BLK_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L2_OFFSET) == 0 && (size) >= L2_SIZE)
static vsize_t
pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size,
vm_prot_t prot, u_int flags)
{
pt_entry_t attr;
psize_t blocksize;
vsize_t resid = round_page(size);
vsize_t mapped = 0;
while (resid > 0) {
if (L1_BLK_MAPPABLE_P(va, pa, resid)) {
blocksize = L1_SIZE;
attr = L1_BLOCK;
} else if (L2_BLK_MAPPABLE_P(va, pa, resid)) {
blocksize = L2_SIZE;
attr = L2_BLOCK;
} else {
blocksize = L3_SIZE;
attr = L3_PAGE;
}
pt_entry_t pte = pte_make_kenter_pa(pa, NULL, prot, flags);
pte &= ~LX_TYPE;
attr |= pte;
pmapboot_enter(va, pa, blocksize, blocksize, attr, NULL);
va += blocksize;
pa += blocksize;
resid -= blocksize;
mapped += blocksize;
}
return mapped;
}
void
pmap_devmap_register(const struct pmap_devmap *table)
{
pmap_devmap_table = table;
}
void
pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table)
{
vaddr_t va;
int i;
pmap_devmap_register(table);
VPRINTF("%s:\n", __func__);
for (i = 0; table[i].pd_size != 0; i++) {
VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
table[i].pd_pa,
table[i].pd_pa + table[i].pd_size - 1,
table[i].pd_va);
va = table[i].pd_va;
KASSERT((VM_KERNEL_IO_ADDRESS <= va) &&
(va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE)));
/* update and check virtual_devmap_addr */
if ((virtual_devmap_addr == 0) ||
(virtual_devmap_addr > va)) {
virtual_devmap_addr = va;
}
pmap_map_chunk(
table[i].pd_va,
table[i].pd_pa,
table[i].pd_size,
table[i].pd_prot,
table[i].pd_flags);
pmap_devmap_bootstrap_done = true;
}
}
const struct pmap_devmap *
pmap_devmap_find_va(vaddr_t va, vsize_t size)
{
if (pmap_devmap_table == NULL)
return NULL;
const vaddr_t endva = va + size;
for (size_t i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if ((va >= pmap_devmap_table[i].pd_va) &&
(endva <= pmap_devmap_table[i].pd_va +
pmap_devmap_table[i].pd_size)) {
return &pmap_devmap_table[i];
}
}
return NULL;
}
const struct pmap_devmap *
pmap_devmap_find_pa(paddr_t pa, psize_t size)
{
if (pmap_devmap_table == NULL)
return NULL;
const paddr_t endpa = pa + size;
for (size_t i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (pa >= pmap_devmap_table[i].pd_pa &&
(endpa <= pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size))
return (&pmap_devmap_table[i]);
}
return NULL;
}
#ifdef MULTIPROCESSOR
void
pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
{
/* nothing */
}
#endif /* MULTIPROCESSOR */

View File

@ -1,4 +1,4 @@
# $NetBSD: files.aarch64,v 1.40 2022/10/28 07:16:34 skrll Exp $ # $NetBSD: files.aarch64,v 1.41 2022/11/03 09:04:56 skrll Exp $
defflag opt_cpuoptions.h AARCH64_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_ALIGNMENT_CHECK
defflag opt_cpuoptions.h AARCH64_EL0_STACK_ALIGNMENT_CHECK defflag opt_cpuoptions.h AARCH64_EL0_STACK_ALIGNMENT_CHECK
@ -55,6 +55,10 @@ defflag opt_pmap.h PMAPCOUNTERS PMAP_STEAL_MEMORY
PMAP_NEED_ALLOC_POOLPAGE PMAP_NEED_ALLOC_POOLPAGE
defflag opt_pmapboot.h PMAPBOOT_DEBUG defflag opt_pmapboot.h PMAPBOOT_DEBUG
# MI PMAP flags
#
defflag opt_pmap.h PMAP_MI
# MI support # MI support
file dev/cons.c file dev/cons.c
@ -113,11 +117,14 @@ file arch/aarch64/aarch64/vm_machdep.c
# pmap # pmap
file arch/aarch64/aarch64/aarch64_tlb.c file arch/aarch64/aarch64/aarch64_tlb.c
file arch/aarch64/aarch64/pmap.c file arch/aarch64/aarch64/pmap.c !pmap_mi
file arch/aarch64/aarch64/pmapboot.c file arch/aarch64/aarch64/pmapboot.c
file arch/aarch64/aarch64/pmap_page.S file arch/aarch64/aarch64/pmap_page.S
file uvm/pmap/pmap_tlb.c file arch/aarch64/aarch64/pmap_machdep.c pmap_mi
file uvm/pmap/pmap.c pmap_mi
file uvm/pmap/pmap_pvt.c file uvm/pmap/pmap_pvt.c
file uvm/pmap/pmap_segtab.c pmap_mi
file uvm/pmap/pmap_tlb.c
# EFI runtime (machdep) # EFI runtime (machdep)
file arch/aarch64/aarch64/efi_machdep.c efi_runtime file arch/aarch64/aarch64/efi_machdep.c efi_runtime

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.47 2022/06/25 13:24:34 jmcneill Exp $ */ /* $NetBSD: cpu.h,v 1.48 2022/11/03 09:04:56 skrll Exp $ */
/*- /*-
* Copyright (c) 2014, 2020 The NetBSD Foundation, Inc. * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
@ -39,6 +39,7 @@
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_gprof.h" #include "opt_gprof.h"
#include "opt_multiprocessor.h" #include "opt_multiprocessor.h"
#include "opt_pmap.h"
#endif #endif
#include <sys/param.h> #include <sys/param.h>
@ -137,6 +138,12 @@ struct cpu_info {
int ci_kfpu_spl; int ci_kfpu_spl;
#if defined(PMAP_MI)
struct pmap_tlb_info *ci_tlb_info;
struct pmap *ci_pmap_lastuser;
struct pmap *ci_pmap_cur;
#endif
/* ASID of current pmap */ /* ASID of current pmap */
tlb_asid_t ci_pmap_asid_cur; tlb_asid_t ci_pmap_asid_cur;
@ -190,6 +197,7 @@ static __inline struct cpu_info *lwp_getcpu(struct lwp *);
#define setsoftast(ci) (cpu_signotify((ci)->ci_onproc)) #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc))
#undef curlwp #undef curlwp
#define curlwp (aarch64_curlwp()) #define curlwp (aarch64_curlwp())
#define curpcb ((struct pcb *)lwp_getpcb(curlwp))
void cpu_signotify(struct lwp *l); void cpu_signotify(struct lwp *l);
void cpu_need_proftick(struct lwp *l); void cpu_need_proftick(struct lwp *l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.56 2022/10/29 08:29:28 skrll Exp $ */ /* $NetBSD: pmap.h,v 1.57 2022/11/03 09:04:56 skrll Exp $ */
/*- /*-
* Copyright (c) 2014 The NetBSD Foundation, Inc. * Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -37,6 +37,7 @@
#ifdef _KERNEL #ifdef _KERNEL
#ifdef _KERNEL_OPT #ifdef _KERNEL_OPT
#include "opt_kasan.h" #include "opt_kasan.h"
#include "opt_pmap.h"
#endif #endif
#include <sys/types.h> #include <sys/types.h>
@ -48,15 +49,6 @@
#include <aarch64/armreg.h> #include <aarch64/armreg.h>
#include <aarch64/pte.h> #include <aarch64/pte.h>
#define PMAP_NEED_PROCWR
#define PMAP_GROWKERNEL
#define PMAP_STEAL_MEMORY
#define __HAVE_VM_PAGE_MD
#define __HAVE_PMAP_PV_TRACK 1
#define PMAP_HWPAGEWALKER 1
#define PMAP_TLB_MAX 1 #define PMAP_TLB_MAX 1
#if PMAP_TLB_MAX > 1 #if PMAP_TLB_MAX > 1
#define PMAP_TLB_NEED_SHOOTDOWN 1 #define PMAP_TLB_NEED_SHOOTDOWN 1
@ -92,96 +84,6 @@ pmap_md_tlb_asid_max(void)
#define KERNEL_PID 0 /* The kernel uses ASID 0 */ #define KERNEL_PID 0 /* The kernel uses ASID 0 */
#ifndef KASAN
#define PMAP_MAP_POOLPAGE(pa) AARCH64_PA_TO_KVA(pa)
#define PMAP_UNMAP_POOLPAGE(va) AARCH64_KVA_TO_PA(va)
#define PMAP_DIRECT
static __inline int
pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
int (*process)(void *, size_t, void *), void *arg)
{
vaddr_t va = AARCH64_PA_TO_KVA(pa);
return process((void *)(va + pgoff), len, arg);
}
#endif
struct pmap {
kmutex_t pm_lock;
struct pool *pm_pvpool;
pd_entry_t *pm_l0table; /* L0 table: 512G*512 */
paddr_t pm_l0table_pa;
LIST_HEAD(, vm_page) pm_vmlist; /* for L[0123] tables */
LIST_HEAD(, pv_entry) pm_pvlist; /* all pv of this process */
struct pmap_statistics pm_stats;
unsigned int pm_refcnt;
unsigned int pm_idlepdp;
kcpuset_t *pm_onproc;
kcpuset_t *pm_active;
struct pmap_asid_info pm_pai[PMAP_TLB_MAX];
bool pm_activated;
};
static inline paddr_t
pmap_l0pa(struct pmap *pm)
{
return pm->pm_l0table_pa;
}
/*
* should be kept <=32 bytes sized to reduce memory consumption & cache misses,
* but it doesn't...
*/
struct pv_entry {
struct pv_entry *pv_next;
struct pmap *pv_pmap;
vaddr_t pv_va; /* for embedded entry (pp_pv) also includes flags */
void *pv_ptep; /* pointer for fast pte lookup */
LIST_ENTRY(pv_entry) pv_proc; /* belonging to the process */
};
struct pmap_page {
kmutex_t pp_pvlock;
struct pv_entry pp_pv;
};
/* try to keep vm_page at or under 128 bytes to reduce cache misses */
struct vm_page_md {
struct pmap_page mdpg_pp;
};
/* for page descriptor page only */
#define mdpg_ptep_parent mdpg_pp.pp_pv.pv_ptep
#define VM_MDPAGE_INIT(pg) \
do { \
PMAP_PAGE_INIT(&(pg)->mdpage.mdpg_pp); \
} while (/*CONSTCOND*/ 0)
#define PMAP_PAGE_INIT(pp) \
do { \
mutex_init(&(pp)->pp_pvlock, MUTEX_NODEBUG, IPL_NONE); \
(pp)->pp_pv.pv_next = NULL; \
(pp)->pp_pv.pv_pmap = NULL; \
(pp)->pp_pv.pv_va = 0; \
(pp)->pp_pv.pv_ptep = NULL; \
} while (/*CONSTCOND*/ 0)
/* saved permission bit for referenced/modified emulation */
#define LX_BLKPAG_OS_READ LX_BLKPAG_OS_0
#define LX_BLKPAG_OS_WRITE LX_BLKPAG_OS_1
#define LX_BLKPAG_OS_WIRED LX_BLKPAG_OS_2
#define LX_BLKPAG_OS_BOOT LX_BLKPAG_OS_3
#define LX_BLKPAG_OS_RWMASK (LX_BLKPAG_OS_WRITE | LX_BLKPAG_OS_READ)
#define PMAP_PTE_OS0 "read"
#define PMAP_PTE_OS1 "write"
#define PMAP_PTE_OS2 "wired"
#define PMAP_PTE_OS3 "boot"
/* memory attributes are configured MAIR_EL1 in locore */ /* memory attributes are configured MAIR_EL1 in locore */
#define LX_BLKPAG_ATTR_NORMAL_WB __SHIFTIN(0, LX_BLKPAG_ATTR_INDX) #define LX_BLKPAG_ATTR_NORMAL_WB __SHIFTIN(0, LX_BLKPAG_ATTR_INDX)
@ -219,30 +121,13 @@ struct vm_page_md {
#define l3pte_index(v) (((vaddr_t)(v) & L3_ADDR_BITS) >> L3_SHIFT) #define l3pte_index(v) (((vaddr_t)(v) & L3_ADDR_BITS) >> L3_SHIFT)
#define l3pte_valid(pde) lxpde_valid(pde) #define l3pte_valid(pde) lxpde_valid(pde)
#define l3pte_is_page(pde) (((pde) & LX_TYPE) == L3_TYPE_PAG) #define l3pte_is_page(pde) (((pde) & LX_TYPE) == L3_TYPE_PAG)
/* l3pte contains always page entries */
static inline uint64_t
pte_value(pt_entry_t pte)
{
return pte;
}
static inline bool
pte_valid_p(pt_entry_t pte)
{
return l3pte_valid(pte);
}
pd_entry_t *pmap_l0table(struct pmap *);
void pmap_bootstrap(vaddr_t, vaddr_t); void pmap_bootstrap(vaddr_t, vaddr_t);
bool pmap_fault_fixup(struct pmap *, vaddr_t, vm_prot_t, bool user); bool pmap_fault_fixup(struct pmap *, vaddr_t, vm_prot_t, bool user);
/* for ddb */ bool pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
pt_entry_t *kvtopte(vaddr_t);
void pmap_db_pmap_print(struct pmap *, void (*)(const char *, ...) __printflike(1, 2));
void pmap_db_mdpg_print(struct vm_page *, void (*)(const char *, ...) __printflike(1, 2));
pd_entry_t *pmap_l0table(struct pmap *);
/* change attribute of kernel segment */ /* change attribute of kernel segment */
static inline pt_entry_t static inline pt_entry_t
@ -276,19 +161,6 @@ pmap_kvattr(pt_entry_t *ptep, vm_prot_t prot)
return opte; return opte;
} }
/* pmapboot.c */
pd_entry_t *pmapboot_pagealloc(void);
void pmapboot_enter(vaddr_t, paddr_t, psize_t, psize_t, pt_entry_t,
void (*pr)(const char *, ...) __printflike(1, 2));
void pmapboot_enter_range(vaddr_t, paddr_t, psize_t, pt_entry_t,
void (*)(const char *, ...) __printflike(1, 2));
int pmapboot_protect(vaddr_t, vaddr_t, vm_prot_t);
/* Hooks for the pool allocator */
paddr_t vtophys(vaddr_t);
#define VTOPHYS_FAILED ((paddr_t)-1L) /* POOL_PADDR_INVALID */
#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
/* devmap */ /* devmap */
struct pmap_devmap { struct pmap_devmap {
vaddr_t pd_va; /* virtual address */ vaddr_t pd_va; /* virtual address */
@ -325,6 +197,9 @@ paddr_t pmap_devmap_vtophys(paddr_t);
} }
#define DEVMAP_ENTRY_END { 0 } #define DEVMAP_ENTRY_END { 0 }
/* Hooks for the pool allocator */
paddr_t vtophys(vaddr_t);
/* mmap cookie and flags */ /* mmap cookie and flags */
#define AARCH64_MMAP_FLAG_SHIFT (64 - PGSHIFT) #define AARCH64_MMAP_FLAG_SHIFT (64 - PGSHIFT)
#define AARCH64_MMAP_FLAG_MASK 0xf #define AARCH64_MMAP_FLAG_MASK 0xf
@ -383,6 +258,145 @@ aarch64_mmap_flags(paddr_t mdpgno)
#define pmap_phys_address(pa) aarch64_ptob((pa)) #define pmap_phys_address(pa) aarch64_ptob((pa))
#define pmap_mmap_flags(ppn) aarch64_mmap_flags((ppn)) #define pmap_mmap_flags(ppn) aarch64_mmap_flags((ppn))
void pmap_bootstrap(vaddr_t, vaddr_t);
bool pmap_fault_fixup(struct pmap *, vaddr_t, vm_prot_t, bool user);
pd_entry_t *pmapboot_pagealloc(void);
void pmapboot_enter(vaddr_t, paddr_t, psize_t, psize_t, pt_entry_t,
void (*pr)(const char *, ...) __printflike(1, 2));
void pmapboot_enter_range(vaddr_t, paddr_t, psize_t, pt_entry_t,
void (*)(const char *, ...) __printflike(1, 2));
int pmapboot_protect(vaddr_t, vaddr_t, vm_prot_t);
#if defined(DDB)
void pmap_db_pte_print(pt_entry_t, int, void (*)(const char *, ...) __printflike(1, 2));
void pmap_db_pteinfo(vaddr_t, void (*)(const char *, ...) __printflike(1, 2));
void pmap_db_ttbrdump(bool, vaddr_t, void (*)(const char *, ...) __printflike(1, 2));
#endif
#define LX_BLKPAG_OS_WIRED LX_BLKPAG_OS_2
#define LX_BLKPAG_OS_BOOT LX_BLKPAG_OS_3
#define PMAP_PTE_OS2 "wired"
#define PMAP_PTE_OS3 "boot"
#if defined(PMAP_MI)
#include <aarch64/pmap_machdep.h>
#else
#define PMAP_NEED_PROCWR
#define PMAP_GROWKERNEL
#define PMAP_STEAL_MEMORY
#define __HAVE_VM_PAGE_MD
#define __HAVE_PMAP_PV_TRACK 1
struct pmap {
kmutex_t pm_lock;
struct pool *pm_pvpool;
pd_entry_t *pm_l0table; /* L0 table: 512G*512 */
paddr_t pm_l0table_pa;
LIST_HEAD(, vm_page) pm_vmlist; /* for L[0123] tables */
LIST_HEAD(, pv_entry) pm_pvlist; /* all pv of this process */
struct pmap_statistics pm_stats;
unsigned int pm_refcnt;
unsigned int pm_idlepdp;
kcpuset_t *pm_onproc;
kcpuset_t *pm_active;
struct pmap_asid_info pm_pai[PMAP_TLB_MAX];
bool pm_activated;
};
static inline paddr_t
pmap_l0pa(struct pmap *pm)
{
return pm->pm_l0table_pa;
}
/*
* should be kept <=32 bytes sized to reduce memory consumption & cache misses,
* but it doesn't...
*/
struct pv_entry {
struct pv_entry *pv_next;
struct pmap *pv_pmap;
vaddr_t pv_va; /* for embedded entry (pp_pv) also includes flags */
void *pv_ptep; /* pointer for fast pte lookup */
LIST_ENTRY(pv_entry) pv_proc; /* belonging to the process */
};
struct pmap_page {
kmutex_t pp_pvlock;
struct pv_entry pp_pv;
};
/* try to keep vm_page at or under 128 bytes to reduce cache misses */
struct vm_page_md {
struct pmap_page mdpg_pp;
};
/* for page descriptor page only */
#define mdpg_ptep_parent mdpg_pp.pp_pv.pv_ptep
#define VM_MDPAGE_INIT(pg) \
do { \
PMAP_PAGE_INIT(&(pg)->mdpage.mdpg_pp); \
} while (/*CONSTCOND*/ 0)
#define PMAP_PAGE_INIT(pp) \
do { \
mutex_init(&(pp)->pp_pvlock, MUTEX_NODEBUG, IPL_NONE); \
(pp)->pp_pv.pv_next = NULL; \
(pp)->pp_pv.pv_pmap = NULL; \
(pp)->pp_pv.pv_va = 0; \
(pp)->pp_pv.pv_ptep = NULL; \
} while (/*CONSTCOND*/ 0)
/* saved permission bit for referenced/modified emulation */
#define LX_BLKPAG_OS_READ LX_BLKPAG_OS_0
#define LX_BLKPAG_OS_WRITE LX_BLKPAG_OS_1
#define LX_BLKPAG_OS_RWMASK (LX_BLKPAG_OS_WRITE | LX_BLKPAG_OS_READ)
#define PMAP_PTE_OS0 "read"
#define PMAP_PTE_OS1 "write"
#define VTOPHYS_FAILED ((paddr_t)-1L) /* POOL_PADDR_INVALID */
#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
#ifndef KASAN
#define PMAP_MAP_POOLPAGE(pa) AARCH64_PA_TO_KVA(pa)
#define PMAP_UNMAP_POOLPAGE(va) AARCH64_KVA_TO_PA(va)
#define PMAP_DIRECT
static __inline int
pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
int (*process)(void *, size_t, void *), void *arg)
{
vaddr_t va = AARCH64_PA_TO_KVA(pa);
return process((void *)(va + pgoff), len, arg);
}
#endif
/* l3pte contains always page entries */
static inline uint64_t
pte_value(pt_entry_t pte)
{
return pte;
}
static inline bool
pte_valid_p(pt_entry_t pte)
{
return l3pte_valid(pte);
}
pt_entry_t *kvtopte(vaddr_t);
#define pmap_update(pmap) ((void)0) #define pmap_update(pmap) ((void)0)
#define pmap_copy(dp,sp,d,l,s) ((void)0) #define pmap_copy(dp,sp,d,l,s) ((void)0)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
@ -394,7 +408,6 @@ void pmap_activate_efirt(void);
void pmap_deactivate_efirt(void); void pmap_deactivate_efirt(void);
void pmap_procwr(struct proc *, vaddr_t, int); void pmap_procwr(struct proc *, vaddr_t, int);
bool pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t); void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
void pmap_pv_init(void); void pmap_pv_init(void);
@ -404,6 +417,12 @@ void pmap_pv_protect(paddr_t, vm_prot_t);
#define PMAP_MAPSIZE1 L2_SIZE #define PMAP_MAPSIZE1 L2_SIZE
/* for ddb */
void pmap_db_pmap_print(struct pmap *, void (*)(const char *, ...) __printflike(1, 2));
void pmap_db_mdpg_print(struct vm_page *, void (*)(const char *, ...) __printflike(1, 2));
#endif /* !PMAP_MI */
#endif /* _KERNEL */ #endif /* _KERNEL */
#elif defined(__arm__) #elif defined(__arm__)

View File

@ -0,0 +1,519 @@
/* $NetBSD: pmap_machdep.h,v 1.1 2022/11/03 09:04:56 skrll Exp $ */
/*-
* Copyright (c) 2022 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Nick Hudson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _AARCH64_PMAP_MACHDEP_H_
#define _AARCH64_PMAP_MACHDEP_H_
#include <arm/cpufunc.h>
#define PMAP_HWPAGEWALKER 1
#define PMAP_PDETABSIZE (PAGE_SIZE / sizeof(pd_entry_t))
#define PMAP_SEGTABSIZE NSEGPG
#define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
#define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
#define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
#define NPDEPG (PAGE_SIZE / sizeof(pd_entry_t))
#define PTPSHIFT 3
#define PTPLENGTH (PGSHIFT - PTPSHIFT)
#define SEGSHIFT (PGSHIFT + PTPLENGTH) /* LOG2(NBSEG) */
#define NBSEG (1 << SEGSHIFT) /* bytes/segment */
#define SEGOFSET (NBSEG - 1) /* byte offset into segment */
#define SEGLENGTH (PGSHIFT - 3)
#define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH)
/* LOG2(NBXSEG) */
#define NBXSEG (1UL << XSEGSHIFT) /* bytes/xsegment */
#define XSEGOFSET (NBXSEG - 1) /* byte offset into xsegment */
#define XSEGLENGTH (PGSHIFT - 3)
#define NXSEGPG (1 << XSEGLENGTH)
#define NSEGPG (1 << SEGLENGTH)
#ifndef __BSD_PTENTRY_T__
#define __BSD_PTENTRY_T__
#define PRIxPTE PRIx64
#endif /* __BSD_PTENTRY_T__ */
#define KERNEL_PID 0
#define __HAVE_PMAP_PV_TRACK
#define __HAVE_PMAP_MD
/* XXX temporary */
#define __HAVE_UNLOCKED_PMAP
#define PMAP_PAGE_INIT(pp) \
do { \
(pp)->pp_md.mdpg_first.pv_next = NULL; \
(pp)->pp_md.mdpg_first.pv_pmap = NULL; \
(pp)->pp_md.mdpg_first.pv_va = 0; \
(pp)->pp_md.mdpg_attrs = 0; \
VM_PAGEMD_PVLIST_LOCK_INIT(&(pp)->pp_md); \
} while (/* CONSTCOND */ 0)
struct pmap_md {
paddr_t pmd_l0_pa;
};
#define pm_l0_pa pm_md.pmd_l0_pa
void pmap_md_pdetab_init(struct pmap *);
void pmap_md_pdetab_destroy(struct pmap *);
vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
struct vm_page *pmap_md_alloc_poolpage(int);
bool pmap_md_kernel_vaddr_p(vaddr_t);
paddr_t pmap_md_kernel_vaddr_to_paddr(vaddr_t);
bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
vaddr_t pmap_md_direct_map_paddr(paddr_t);
bool pmap_md_io_vaddr_p(vaddr_t);
void pmap_md_activate_efirt(void);
void pmap_md_deactivate_efirt(void);
void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
#include <uvm/pmap/vmpagemd.h>
#include <uvm/pmap/pmap.h>
#include <uvm/pmap/pmap_pvt.h>
#include <uvm/pmap/pmap_tlb.h>
#include <uvm/pmap/pmap_synci.h>
#include <uvm/pmap/tlb.h>
#include <uvm/uvm_page.h>
#define POOL_VTOPHYS(va) vtophys((vaddr_t)(va))
struct pmap_page {
struct vm_page_md pp_md;
};
#define PMAP_PAGE_TO_MD(ppage) (&((ppage)->pp_md))
#define PVLIST_EMPTY_P(pg) VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(pg))
#define LX_BLKPAG_OS_MODIFIED LX_BLKPAG_OS_0
#define PMAP_PTE_OS0 "modified"
#define PMAP_PTE_OS1 "(unk)"
static inline paddr_t
pmap_l0pa(struct pmap *pm)
{
return pm->pm_l0_pa;
}
#if defined(__PMAP_PRIVATE)
#include <uvm/uvm_physseg.h>
struct vm_page_md;
void pmap_md_icache_sync_all(void);
void pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
bool pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
void pmap_md_vca_clean(struct vm_page_md *, int);
void pmap_md_vca_remove(struct vm_page_md *, vaddr_t, bool, bool);
bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
void pmap_md_xtab_activate(pmap_t, struct lwp *);
void pmap_md_xtab_deactivate(pmap_t);
vaddr_t pmap_md_direct_map_paddr(paddr_t);
#ifdef MULTIPROCESSOR
#define PMAP_NO_PV_UNCACHED
#endif
static inline void
pmap_md_init(void)
{
// nothing
}
static inline bool
pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
{
// TLB not walked and so not called.
return false;
}
static inline bool
pmap_md_virtual_cache_aliasing_p(void)
{
return false;
}
static inline vsize_t
pmap_md_cache_prefer_mask(void)
{
return 0;
}
static inline pt_entry_t *
pmap_md_nptep(pt_entry_t *ptep)
{
return ptep + 1;
}
#endif /* __PMAP_PRIVATE */
#ifdef __PMAP_PRIVATE
static __inline paddr_t
pte_to_paddr(pt_entry_t pte)
{
return l3pte_pa(pte);
}
static inline bool
pte_valid_p(pt_entry_t pte)
{
return l3pte_valid(pte);
}
static inline void
pmap_md_clean_page(struct vm_page_md *md, bool is_src)
{
}
static inline bool
pte_modified_p(pt_entry_t pte)
{
return (pte & LX_BLKPAG_OS_MODIFIED) != 0;
}
static inline bool
pte_wired_p(pt_entry_t pte)
{
return (pte & LX_BLKPAG_OS_WIRED) != 0;
}
static inline pt_entry_t
pte_wire_entry(pt_entry_t pte)
{
return pte | LX_BLKPAG_OS_WIRED;
}
static inline pt_entry_t
pte_unwire_entry(pt_entry_t pte)
{
return pte & ~LX_BLKPAG_OS_WIRED;
}
static inline uint64_t
pte_value(pt_entry_t pte)
{
return pte;
}
static inline bool
pte_cached_p(pt_entry_t pte)
{
return ((pte & LX_BLKPAG_ATTR_MASK) == LX_BLKPAG_ATTR_NORMAL_WB);
}
static inline bool
pte_deferred_exec_p(pt_entry_t pte)
{
return false;
}
static inline pt_entry_t
pte_nv_entry(bool kernel_p)
{
/* Not valid entry */
return kernel_p ? 0 : 0;
}
static inline pt_entry_t
pte_prot_downgrade(pt_entry_t pte, vm_prot_t prot)
{
return (pte & ~LX_BLKPAG_AP)
| (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
}
static inline pt_entry_t
pte_prot_nowrite(pt_entry_t pte)
{
return pte & ~LX_BLKPAG_AF;
}
static inline pt_entry_t
pte_cached_change(pt_entry_t pte, bool cached)
{
pte &= ~LX_BLKPAG_ATTR_MASK;
pte |= (cached ? LX_BLKPAG_ATTR_NORMAL_WB : LX_BLKPAG_ATTR_NORMAL_NC);
return pte;
}
static inline void
pte_set(pt_entry_t *ptep, pt_entry_t pte)
{
*ptep = pte;
dsb(ishst);
/*
* if this mapping is going to be used by userland then the eret *can* act
* as the isb, but might not (apple m1).
*
* if this mapping is kernel then the isb is always needed (for some micro-
* architectures)
*/
isb();
}
static inline pd_entry_t
pte_invalid_pde(void)
{
return 0;
}
static inline pd_entry_t
pte_pde_pdetab(paddr_t pa, bool kernel_p)
{
return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
}
static inline pd_entry_t
pte_pde_ptpage(paddr_t pa, bool kernel_p)
{
return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
}
static inline bool
pte_pde_valid_p(pd_entry_t pde)
{
return lxpde_valid(pde);
}
static inline paddr_t
pte_pde_to_paddr(pd_entry_t pde)
{
return lxpde_pa(pde);
}
static inline pd_entry_t
pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
{
#ifdef MULTIPROCESSOR
opde = atomic_cas_64(pdep, opde, npde);
dsb(ishst);
#else
*pdep = npde;
#endif
return opde;
}
static inline void
pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
{
*pdep = npde;
}
static inline pt_entry_t
pte_memattr(u_int flags)
{
switch (flags & (PMAP_DEV_MASK | PMAP_CACHE_MASK)) {
case PMAP_DEV_NP ... PMAP_DEV_NP | PMAP_CACHE_MASK:
/* Device-nGnRnE */
return LX_BLKPAG_ATTR_DEVICE_MEM_NP;
case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
/* Device-nGnRE */
return LX_BLKPAG_ATTR_DEVICE_MEM;
case PMAP_NOCACHE:
case PMAP_NOCACHE_OVR:
case PMAP_WRITE_COMBINE:
/* only no-cache */
return LX_BLKPAG_ATTR_NORMAL_NC;
case PMAP_WRITE_BACK:
case 0:
default:
return LX_BLKPAG_ATTR_NORMAL_WB;
}
}
static inline pt_entry_t
pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
u_int flags)
{
KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
pt_entry_t pte = pa
| LX_VALID
#ifdef MULTIPROCESSOR
| LX_BLKPAG_SH_IS
#endif
| L3_TYPE_PAG
| LX_BLKPAG_AF
| LX_BLKPAG_UXN | LX_BLKPAG_PXN
| (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW)
| LX_BLKPAG_OS_WIRED;
if (prot & VM_PROT_EXECUTE)
pte &= ~LX_BLKPAG_PXN;
pte &= ~LX_BLKPAG_ATTR_MASK;
pte |= pte_memattr(flags);
return pte;
}
static inline pt_entry_t
pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
{
KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
pt_entry_t npte = pa
| LX_VALID
#ifdef MULTIPROCESSOR
| LX_BLKPAG_SH_IS
#endif
| L3_TYPE_PAG
| LX_BLKPAG_AF
| LX_BLKPAG_NG /* | LX_BLKPAG_APUSER */
| LX_BLKPAG_UXN | LX_BLKPAG_PXN
| (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
if (prot & VM_PROT_EXECUTE)
npte &= ~LX_BLKPAG_PXN;
npte &= ~LX_BLKPAG_ATTR_MASK;
npte |= pte_memattr(flags);
return npte;
}
static inline pt_entry_t
pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot,
u_int flags, bool is_kernel_pmap_p)
{
KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
pt_entry_t npte = pa
| LX_VALID
#ifdef MULTIPROCESSOR
| LX_BLKPAG_SH_IS
#endif
| L3_TYPE_PAG
| LX_BLKPAG_UXN | LX_BLKPAG_PXN
| (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
if ((prot & VM_PROT_WRITE) != 0 &&
((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
/*
* This is a writable mapping, and the page's mod state
* indicates it has already been modified. No need for
* modified emulation.
*/
npte |= LX_BLKPAG_AF;
} else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
/*
* - The access type indicates that we don't need to do
* referenced emulation.
* OR
* - The physical page has already been referenced so no need
* to re-do referenced emulation here.
*/
npte |= LX_BLKPAG_AF;
}
if (prot & VM_PROT_EXECUTE)
npte &= (is_kernel_pmap_p ? ~LX_BLKPAG_PXN : ~LX_BLKPAG_UXN);
npte &= ~LX_BLKPAG_ATTR_MASK;
npte |= pte_memattr(flags);
/*
* Make sure userland mappings get the right permissions
*/
if (!is_kernel_pmap_p) {
npte |= LX_BLKPAG_NG | LX_BLKPAG_APUSER;
}
return npte;
}
#endif /* __PMAP_PRIVATE */
#endif /* _AARCH64_PMAP_MACHDEP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: types.h,v 1.20 2021/10/10 07:15:25 skrll Exp $ */ /* $NetBSD: types.h,v 1.21 2022/11/03 09:04:56 skrll Exp $ */
/*- /*-
* Copyright (c) 2014 The NetBSD Foundation, Inc. * Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -34,6 +34,10 @@
#ifdef __aarch64__ #ifdef __aarch64__
#ifdef _KERNEL_OPT
#include "opt_pmap.h"
#endif
#include <sys/cdefs.h> #include <sys/cdefs.h>
#include <sys/featuretest.h> #include <sys/featuretest.h>
#include <arm/int_types.h> #include <arm/int_types.h>
@ -117,6 +121,11 @@ typedef __uint64_t __register_t;
#define __HAVE_RAS #define __HAVE_RAS
#endif #endif
#if defined(PMAP_MI)
/* XXX temporary */
#define __HAVE_UNLOCKED_PMAP
#endif
#elif defined(__arm__) #elif defined(__arm__)
#include <arm/types.h> #include <arm/types.h>

View File

@ -0,0 +1,12 @@
#
include "arch/evbarm/conf/GENERIC64"
options PMAP_MI
#options VERBOSE_INIT_ARM # verbose bootstrapping messages
#options UVMHIST # kernhist for uvm/pmap subsystems
#options UVMHIST_PRINT,KERNHIST_DELAY=0
#options LOCKDEBUG

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.73 2022/11/02 08:05:17 skrll Exp $ */ /* $NetBSD: pmap.c,v 1.74 2022/11/03 09:04:57 skrll Exp $ */
/*- /*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2022/11/02 08:05:17 skrll Exp $"); __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.74 2022/11/03 09:04:57 skrll Exp $");
/* /*
* Manages physical address maps. * Manages physical address maps.
@ -658,6 +658,33 @@ pmap_bootstrap_common(void)
TAILQ_INIT(&pm->pm_segtab_list); TAILQ_INIT(&pm->pm_segtab_list);
#endif #endif
#if defined(EFI_RUNTIME)
const pmap_t efipm = pmap_efirt();
struct pmap_asid_info * const efipai = PMAP_PAI(efipm, cpu_tlb_info(ci));
rw_init(&efipm->pm_obj_lock);
uvm_obj_init(&efipm->pm_uobject, &pmap_pager, false, 1);
uvm_obj_setlock(&efipm->pm_uobject, &efipm->pm_obj_lock);
efipai->pai_asid = KERNEL_PID;
TAILQ_INIT(&efipm->pm_ppg_list);
#if defined(PMAP_HWPAGEWALKER)
TAILQ_INIT(&efipm->pm_pdetab_list);
#endif
#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
TAILQ_INIT(&efipm->pm_segtab_list);
#endif
#endif
/*
* Initialize the segtab lock.
*/
mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
pmap_tlb_miss_lock_init(); pmap_tlb_miss_lock_init();
} }
@ -672,11 +699,6 @@ pmap_init(void)
UVMHIST_FUNC(__func__); UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist); UVMHIST_CALLED(pmaphist);
/*
* Initialize the segtab lock.
*/
mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
/* /*
* Set a low water mark on the pv_entry pool, so that we are * Set a low water mark on the pv_entry pool, so that we are
* more likely to have these around even in extreme memory * more likely to have these around even in extreme memory

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.24 2022/10/27 06:19:56 skrll Exp $ */ /* $NetBSD: pmap.h,v 1.25 2022/11/03 09:04:57 skrll Exp $ */
/* /*
* Copyright (c) 1992, 1993 * Copyright (c) 1992, 1993
@ -71,6 +71,8 @@
* @(#)pmap.h 8.1 (Berkeley) 6/10/93 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
*/ */
#include "opt_efi.h"
#ifndef _UVM_PMAP_PMAP_H_ #ifndef _UVM_PMAP_PMAP_H_
#define _UVM_PMAP_PMAP_H_ #define _UVM_PMAP_PMAP_H_