Support __HAVE_PMAP_PV_TRACK in sys/uvm/pmap based pmaps (aka common pmap)

This commit is contained in:
skrll 2020-12-20 16:38:25 +00:00
parent cdb6e12087
commit 31d27c3628
11 changed files with 187 additions and 109 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_coldfire.h,v 1.3 2020/08/07 07:19:45 skrll Exp $ */
/* $NetBSD: pmap_coldfire.h,v 1.4 2020/12/20 16:38:25 skrll Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
* All rights reserved.
@ -92,7 +92,7 @@ vaddr_t pmap_kvptefill(vaddr_t, vaddr_t, pt_entry_t);
#endif
#endif
void pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
vaddr_t pmap_bootstrap(vaddr_t, vaddr_t, phys_ram_seg_t *, size_t);
bool pmap_extract(struct pmap *, vaddr_t, paddr_t *);
@ -114,7 +114,7 @@ vtophys(vaddr_t va)
* Virtual Cache Alias helper routines. Not a problem for Booke CPUs.
*/
static inline bool
pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *nptep)
pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
{
return false;
}
@ -126,7 +126,7 @@ pmap_md_vca_remove(struct vm_page *pg, vaddr_t va)
}
static inline void
pmap_md_vca_clean(struct vm_page *pg, vaddr_t va, int op)
pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
{
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.74 2020/08/17 03:19:35 mrg Exp $ */
/* $NetBSD: pmap.h,v 1.75 2020/12/20 16:38:25 skrll Exp $ */
/*
* Copyright (c) 1992, 1993
@ -93,6 +93,7 @@ typedef uint32_t pt_entry_t;
#define KERNEL_PID 0
#if defined(__PMAP_PRIVATE)
struct vm_page_md;
#include <mips/locore.h>
#include <mips/cache.h>
@ -122,9 +123,9 @@ typedef uint32_t pt_entry_t;
void pmap_md_init(void);
void pmap_md_icache_sync_all(void);
void pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
void pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
bool pmap_md_vca_add(struct vm_page *, vaddr_t, pt_entry_t *);
void pmap_md_vca_clean(struct vm_page *, int);
void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
bool pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
void pmap_md_vca_clean(struct vm_page_md *, int);
void pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
bool pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
@ -178,6 +179,7 @@ struct tlbmask {
#include <uvm/uvm_pmap.h>
#include <uvm/pmap/vmpagemd.h>
#include <uvm/pmap/pmap.h>
#include <uvm/pmap/pmap_pvt.h>
#include <uvm/pmap/pmap_tlb.h>
#include <uvm/pmap/pmap_synci.h>
@ -270,5 +272,13 @@ int sbmips_cca_for_pa(paddr_t);
#define PMAP_CCA_FOR_PA(pa) sbmips_cca_for_pa(pa)
#endif
#ifdef __HAVE_PMAP_PV_TRACK
struct pmap_page {
struct vm_page_md pp_md;
};
#define PMAP_PAGE_TO_MD(ppage) (&((ppage)->pp_md))
#endif
#endif /* _KERNEL */
#endif /* _MIPS_PMAP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_machdep.c,v 1.32 2020/12/20 15:59:28 skrll Exp $ */
/* $NetBSD: pmap_machdep.c,v 1.33 2020/12/20 16:38:25 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.32 2020/12/20 15:59:28 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.33 2020/12/20 16:38:25 skrll Exp $");
/*
* Manages physical address maps.
@ -170,17 +170,19 @@ u_int pmap_page_cache_alias_mask;
#define pmap_md_cache_indexof(x) (((vaddr_t)(x)) & pmap_page_cache_alias_mask)
static register_t
pmap_md_map_ephemeral_page(struct vm_page *pg, bool locked_p, int prot,
pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot,
pt_entry_t *old_pte_p)
{
KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
pv_entry_t pv = &mdpg->mdpg_first;
register_t va = 0;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
UVMHIST_LOG(pmaphist, "(pg=%p, prot=%d, ptep=%p)",
pg, prot, old_pte_p, 0);
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, prot=%d, ptep=%#jx)",
(uintptr_t)pg, prot, (uintptr_t)old_pte_p, 0);
KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
@ -260,15 +262,16 @@ pmap_md_map_ephemeral_page(struct vm_page *pg, bool locked_p, int prot,
}
static void
pmap_md_unmap_ephemeral_page(struct vm_page *pg, bool locked_p, register_t va,
pt_entry_t old_pte)
pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, bool locked_p,
register_t va, pt_entry_t old_pte)
{
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
pv_entry_t pv = &mdpg->mdpg_first;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
UVMHIST_LOG(pmaphist, "(pg=%p, va=%#lx, pte=%#"PRIxPTE")",
pg, va, pte_value(old_pte), 0);
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(pg=%#jx, va=%#lx, pte=%#"PRIxPTE")",
(uintptr_t)VM_MD_TO_PAGE(mdpg), va, pte_value(old_pte), 0);
KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
@ -305,17 +308,17 @@ pmap_md_unmap_ephemeral_page(struct vm_page *pg, bool locked_p, register_t va,
}
static void
pmap_md_vca_page_wbinv(struct vm_page *pg, bool locked_p)
pmap_md_vca_page_wbinv(struct vm_page_md *mdpg, bool locked_p)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
pt_entry_t pte;
const register_t va = pmap_md_map_ephemeral_page(pg, locked_p,
const register_t va = pmap_md_map_ephemeral_page(mdpg, locked_p,
VM_PROT_READ, &pte);
mips_dcache_wbinv_range(va, PAGE_SIZE);
pmap_md_unmap_ephemeral_page(pg, locked_p, va, pte);
pmap_md_unmap_ephemeral_page(mdpg, locked_p, va, pte);
}
bool
@ -609,20 +612,21 @@ pmap_zero_page(paddr_t dst_pa)
{
pt_entry_t dst_pte;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
UVMHIST_LOG(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
PMAP_COUNT(zeroed_pages);
struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
mips_pagezero(dst_va);
pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
}
@ -635,39 +639,41 @@ pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
{
pt_entry_t src_pte, dst_pte;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
UVMHIST_LOG(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa, 0, 0);
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa,
0, 0);
PMAP_COUNT(copied_pages);
struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
const register_t src_va = pmap_md_map_ephemeral_page(src_pg, false,
struct vm_page_md * const src_mdpg = VM_PAGE_TO_MD(src_pg);
struct vm_page_md * const dst_mdpg = VM_PAGE_TO_MD(dst_pg);
const register_t src_va = pmap_md_map_ephemeral_page(src_mdpg, false,
VM_PROT_READ, &src_pte);
KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(dst_pg)));
KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(dst_pg)));
const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
KASSERT(VM_PAGEMD_PVLIST_EMPTY_P(dst_mdpg));
KASSERT(!VM_PAGEMD_EXECPAGE_P(dst_mdpg));
const register_t dst_va = pmap_md_map_ephemeral_page(dst_mdpg, false,
VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
mips_pagecopy(dst_va, src_va);
pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
pmap_md_unmap_ephemeral_page(src_pg, false, src_va, src_pte);
pmap_md_unmap_ephemeral_page(dst_mdpg, false, dst_va, dst_pte);
pmap_md_unmap_ephemeral_page(src_mdpg, false, src_va, src_pte);
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
}
void
pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
struct mips_options * const opts = &mips_options;
if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
return;
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
/*
* If onproc is empty, we could do a
* pmap_page_protect(pg, VM_PROT_NONE) and remove all
@ -679,16 +685,19 @@ pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
if (VM_PAGEMD_CACHED_P(mdpg)) {
/* This was probably mapped cached by UBC so flush it */
pt_entry_t pte;
const register_t tva = pmap_md_map_ephemeral_page(pg, false,
VM_PROT_READ, &pte);
const register_t tva = pmap_md_map_ephemeral_page(mdpg,
false, VM_PROT_READ, &pte);
UVMHIST_LOG(pmaphist, " va %#"PRIxVADDR, tva, 0, 0, 0);
mips_dcache_wbinv_range(tva, PAGE_SIZE);
mips_icache_sync_range(tva, PAGE_SIZE);
pmap_md_unmap_ephemeral_page(pg, false, tva, pte);
pmap_md_unmap_ephemeral_page(mdpg, false, tva, pte);
}
} else {
KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
struct vm_page *pg = VM_MD_TO_PAGE(mdpg);
mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
PAGE_SIZE);
}
@ -738,7 +747,7 @@ pmap_md_map_poolpage(paddr_t pa, size_t len)
*/
if (MIPS_CACHE_VIRTUAL_ALIAS
&& mips_cache_badalias(last_va, va)) {
pmap_md_vca_page_wbinv(pg, false);
pmap_md_vca_page_wbinv(mdpg, false);
}
pv->pv_va = va;
@ -910,10 +919,9 @@ tlb_walk(void *ctx, tlb_walkfunc_t func)
}
bool
pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *ptep)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
return false;
@ -979,8 +987,8 @@ pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
* share the same cache index again.
*/
if (mips_cache_badalias(pv->pv_va, va)) {
pmap_page_cache(pg, false);
pmap_md_vca_page_wbinv(pg, true);
pmap_page_cache(mdpg, false);
pmap_md_vca_page_wbinv(mdpg, true);
*ptep = pte_cached_change(*ptep, false);
PMAP_COUNT(page_cache_evictions);
}
@ -993,17 +1001,17 @@ pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
}
void
pmap_md_vca_clean(struct vm_page *pg, int op)
pmap_md_vca_clean(struct vm_page_md *mdpg, int op)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
return;
UVMHIST_LOG(pmaphist, "(pg=%p, op=%d)", pg, op, 0, 0);
KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(VM_PAGE_TO_MD(pg)));
UVMHIST_LOG(pmaphist, "(mdpg=%#jx, op=%d)", (uintptr_t)mdpg, op, 0, 0);
KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
if (op == PMAP_WB || op == PMAP_WBINV) {
pmap_md_vca_page_wbinv(pg, true);
pmap_md_vca_page_wbinv(mdpg, true);
} else if (op == PMAP_INV) {
KASSERT(op == PMAP_INV && false);
//mips_dcache_inv_range_index(va, PAGE_SIZE);
@ -1040,7 +1048,7 @@ pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty, bool last)
break;
}
if (pv0 == NULL)
pmap_page_cache(pg, true);
pmap_page_cache(mdpg, true);
VM_PAGEMD_PVLIST_UNLOCK(mdpg);
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: booke_pmap.c,v 1.29 2020/07/06 10:09:23 rin Exp $ */
/* $NetBSD: booke_pmap.c,v 1.30 2020/12/20 16:38:25 skrll Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@ -37,7 +37,7 @@
#define __PMAP_PRIVATE
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.29 2020/07/06 10:09:23 rin Exp $");
__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.30 2020/12/20 16:38:25 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_multiprocessor.h"
@ -87,8 +87,12 @@ pmap_procwr(struct proc *p, vaddr_t va, size_t len)
}
void
pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
{
KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
struct vm_page * const pg = VM_MD_TO_PAGE(mdpg);
/*
* If onproc is empty, we could do a
* pmap_page_protect(pg, VM_PROT_NONE) and remove all

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.23 2020/08/07 07:19:45 skrll Exp $ */
/* $NetBSD: pmap.h,v 1.24 2020/12/20 16:38:25 skrll Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@ -104,7 +104,7 @@ vaddr_t pmap_kvptefill(vaddr_t, vaddr_t, pt_entry_t);
#endif
#endif
void pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
vaddr_t pmap_bootstrap(vaddr_t, vaddr_t, phys_ram_seg_t *, size_t);
bool pmap_extract(struct pmap *, vaddr_t, paddr_t *);
@ -126,19 +126,19 @@ vtophys(vaddr_t va)
* Virtual Cache Alias helper routines. Not a problem for Booke CPUs.
*/
static __inline bool
pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *nptep)
pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
{
return false;
}
static __inline void
pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty)
pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va, bool dirty)
{
}
static __inline void
pmap_md_vca_clean(struct vm_page *pg, vaddr_t va, int op)
pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
{
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.7 2020/11/15 08:09:56 skrll Exp $ */
/* $NetBSD: pmap.h,v 1.8 2020/12/20 16:38:25 skrll Exp $ */
/*
* Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
@ -120,7 +120,7 @@ paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
vaddr_t pmap_md_direct_map_paddr(paddr_t);
void pmap_md_init(void);
bool pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
//void pmap_md_page_syncicache(struct vm_page *, const kcpuset_t *);
void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
void pmap_md_pdetab_activate(struct pmap *);
void pmap_md_pdetab_init(struct pmap *);
@ -133,7 +133,7 @@ extern vaddr_t pmap_direct_end;
#ifdef __PMAP_PRIVATE
static inline void
pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *kc)
pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *kc)
{
__asm __volatile("fence\trw,rw; fence.i");
}
@ -142,19 +142,19 @@ pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *kc)
* Virtual Cache Alias helper routines. Not a problem for RISCV CPUs.
*/
static inline bool
pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *nptep)
pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
{
return false;
}
static inline void
pmap_md_vca_remove(struct vm_page *pg, vaddr_t va)
pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va)
{
}
static inline void
pmap_md_vca_clean(struct vm_page *pg, vaddr_t va, int op)
pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
{
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.14 2020/11/14 13:05:14 skrll Exp $ */
/* $NetBSD: trap.c,v 1.15 2020/12/20 16:38:26 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
#define __PMAP_PRIVATE
#define __UFETCHSTORE_PRIVATE
__RCSID("$NetBSD: trap.c,v 1.14 2020/11/14 13:05:14 skrll Exp $");
__RCSID("$NetBSD: trap.c,v 1.15 2020/12/20 16:38:26 skrll Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -276,7 +276,8 @@ trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause,
pmap_tlb_update_addr(pmap, addr, npte, 0);
if (attr & VM_PAGEMD_EXECPAGE)
pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset);
pmap_md_page_syncicache(VM_PAGE_TO_MD(pg),
curcpu()->ci_data.cpu_kcpuset);
return true;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.57 2020/10/08 14:02:40 skrll Exp $ */
/* $NetBSD: pmap.c,v 1.58 2020/12/20 16:38:26 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.57 2020/10/08 14:02:40 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.58 2020/12/20 16:38:26 skrll Exp $");
/*
* Manages physical address maps.
@ -111,6 +111,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.57 2020/10/08 14:02:40 skrll Exp $");
#include <uvm/uvm.h>
#include <uvm/uvm_physseg.h>
#include <uvm/pmap/pmap_pvt.h>
#if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
&& !defined(PMAP_NO_PV_UNCACHED)
@ -148,6 +149,7 @@ PMAP_COUNTER(user_mappings_changed, "user mapping changed");
PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
PMAP_COUNTER(managed_mappings, "managed pages mapped");
PMAP_COUNTER(mappings, "pages mapped");
PMAP_COUNTER(remappings, "pages remapped");
@ -246,10 +248,10 @@ u_int pmap_page_colormask;
(pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
/* Forward function declarations */
void pmap_page_remove(struct vm_page *);
void pmap_page_remove(struct vm_page_md *);
static void pmap_pvlist_check(struct vm_page_md *);
void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int);
void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
/*
* PV table management functions.
@ -430,7 +432,7 @@ pmap_page_syncicache(struct vm_page *pg)
pmap_pvlist_check(mdpg);
VM_PAGEMD_PVLIST_UNLOCK(mdpg);
kpreempt_disable();
pmap_md_page_syncicache(pg, onproc);
pmap_md_page_syncicache(mdpg, onproc);
kpreempt_enable();
#ifdef MULTIPROCESSOR
kcpuset_destroy(onproc);
@ -746,17 +748,25 @@ pmap_activate(struct lwp *l)
* Reflects back modify bits to the pager.
*/
void
pmap_page_remove(struct vm_page *pg)
pmap_page_remove(struct vm_page_md *mdpg)
{
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
kpreempt_disable();
VM_PAGEMD_PVLIST_LOCK(mdpg);
pmap_pvlist_check(mdpg);
struct vm_page * const pg =
VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmapexechist, "pg %#jx (pa %#jx) [page removed]: "
"execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
if (pg) {
UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
"execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
VM_PAGE_TO_PHYS(pg), 0);
} else {
UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
0, 0);
}
#ifdef PMAP_VIRTUAL_CACHE_ALIASES
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED);
#else
@ -862,6 +872,28 @@ pmap_page_remove(struct vm_page *pg)
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
}
#ifdef __HAVE_PMAP_PV_TRACK
/*
* pmap_pv_protect: change protection of an unmanaged pv-tracked page from
* all pmaps that map it
*/
void
pmap_pv_protect(paddr_t pa, vm_prot_t prot)
{
/* the only case is remove at the moment */
KASSERT(prot == VM_PROT_NONE);
struct pmap_page *pp;
pp = pmap_pv_tracked(pa);
if (pp == NULL)
panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
pa);
struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
pmap_page_remove(mdpg);
}
#endif
/*
* Make a previously active pmap (vmspace) inactive.
@ -1068,7 +1100,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
/* remove_all */
default:
pmap_page_remove(pg);
pmap_page_remove(mdpg);
}
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
@ -1167,13 +1199,16 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
* Change all mappings of a managed page to cached/uncached.
*/
void
pmap_page_cache(struct vm_page *pg, bool cached)
pmap_page_cache(struct vm_page_md *mdpg, bool cached)
{
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
#ifdef UVMHIST
const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
#endif
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)",
(uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0);
UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
(uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
KASSERT(kpreempt_disabled());
KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
@ -1255,7 +1290,13 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
if (pg) {
struct vm_page_md *mdpp = NULL;
#ifdef __HAVE_PMAP_PV_TRACK
struct pmap_page *pp = pmap_pv_tracked(pa);
mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
#endif
if (mdpg) {
/* Set page referenced/modified status based on flags */
if (flags & VM_PROT_WRITE) {
pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
@ -1271,6 +1312,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
#endif
PMAP_COUNT(managed_mappings);
} else if (mdpp) {
#ifdef __HAVE_PMAP_PV_TRACK
pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
PMAP_COUNT(pvtracked_mappings);
#endif
} else {
/*
* Assumption: if it is not part of our managed memory
@ -1281,7 +1328,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
PMAP_COUNT(unmanaged_mappings);
}
pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags,
KASSERTMSG(mdpg == NULL || mdpp == NULL, "mdpg %p mdpp %p", mdpg, mdpp);
struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
pt_entry_t npte = pte_make_enter(pa, md, prot, flags,
is_kernel_pmap_p);
kpreempt_disable();
@ -1314,8 +1364,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
}
/* Done after case that may sleep/return. */
if (pg)
pmap_enter_pv(pmap, va, pg, &npte, 0);
if (md)
pmap_enter_pv(pmap, va, pa, md, &npte, 0);
/*
* Now validate mapping with desired protection/wiring.
@ -1407,7 +1457,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
#ifdef PMAP_VIRTUAL_CACHE_ALIASES
if (pg != NULL && (flags & PMAP_KMPAGE) == 0
&& pmap_md_virtual_cache_aliasing_p()) {
pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
}
#endif
@ -1809,18 +1859,19 @@ pmap_pvlist_check(struct vm_page_md *mdpg)
* physical to virtual map table.
*/
void
pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, pt_entry_t *nptep,
u_int flags)
pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
pt_entry_t *nptep, u_int flags)
{
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
pv_entry_t pv, npv, apv;
#ifdef UVMHIST
bool first = false;
struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
NULL;
#endif
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
(uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
(uintptr_t)pmap, va, (uintptr_t)pg, pa);
UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
(uintptr_t)nptep, pte_value(*nptep), 0, 0);
@ -1849,14 +1900,14 @@ again:
// If the new mapping has an incompatible color the last
// mapping of this page, clean the page before using it.
if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
pmap_md_vca_clean(pg, PMAP_WBINV);
pmap_md_vca_clean(mdpg, PMAP_WBINV);
}
#endif
pv->pv_pmap = pmap;
pv->pv_va = va | flags;
} else {
#ifdef PMAP_VIRTUAL_CACHE_ALIASES
if (pmap_md_vca_add(pg, va, nptep)) {
if (pmap_md_vca_add(mdpg, va, nptep)) {
goto again;
}
#endif
@ -1869,9 +1920,6 @@ again:
* we are only changing the protection bits.
*/
#ifdef PARANOIADIAG
const paddr_t pa = VM_PAGE_TO_PHYS(pg);
#endif
for (npv = pv; npv; npv = npv->pv_next) {
if (pmap == npv->pv_pmap
&& va == trunc_page(npv->pv_va)) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.17 2020/08/20 05:54:32 mrg Exp $ */
/* $NetBSD: pmap.h,v 1.18 2020/12/20 16:38:26 skrll Exp $ */
/*
* Copyright (c) 1992, 1993
@ -195,9 +195,12 @@ bool pmap_page_clear_attributes(struct vm_page_md *, u_int);
void pmap_page_set_attributes(struct vm_page_md *, u_int);
void pmap_pvlist_lock_init(size_t);
#ifdef PMAP_VIRTUAL_CACHE_ALIASES
void pmap_page_cache(struct vm_page *, bool cached);
void pmap_page_cache(struct vm_page_md *, bool cached);
#endif
#ifdef __HAVE_PMAP_PV_TRACK
void pmap_pv_protect(paddr_t, vm_prot_t);
#endif
#define PMAP_WB 0
#define PMAP_WBINV 1

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmpagemd.h,v 1.16 2019/12/30 18:28:06 ad Exp $ */
/* $NetBSD: vmpagemd.h,v 1.17 2020/12/20 16:38:26 skrll Exp $ */
/*-
* Copyright (c) 2011 The NetBSD Foundation, Inc.
@ -67,14 +67,16 @@ typedef struct pv_entry {
#ifndef _MODULE
#define VM_PAGEMD_REFERENCED __BIT(0) /* page has been referenced */
#define VM_PAGEMD_MODIFIED __BIT(1) /* page has been modified */
#define VM_PAGEMD_POOLPAGE __BIT(2) /* page is used as a poolpage */
#define VM_PAGEMD_EXECPAGE __BIT(3) /* page is exec mapped */
#define VM_PAGEMD_VMPAGE __BIT(0) /* page is vm managed */
#define VM_PAGEMD_REFERENCED __BIT(1) /* page has been referenced */
#define VM_PAGEMD_MODIFIED __BIT(2) /* page has been modified */
#define VM_PAGEMD_POOLPAGE __BIT(3) /* page is used as a poolpage */
#define VM_PAGEMD_EXECPAGE __BIT(4) /* page is exec mapped */
#ifdef PMAP_VIRTUAL_CACHE_ALIASES
#define VM_PAGEMD_UNCACHED __BIT(4) /* page is mapped uncached */
#define VM_PAGEMD_UNCACHED __BIT(5) /* page is mapped uncached */
#endif
#define VM_PAGEMD_VMPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_VMPAGE) != 0)
#define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0)
#define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0)
#define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0)
@ -141,6 +143,7 @@ pmap_pvlist_unlock(struct vm_page_md *mdpg)
static __inline bool
pmap_pvlist_locked_p(struct vm_page_md *mdpg)
{
return mutex_owned(pmap_pvlist_lock_addr(mdpg));
}
#endif /* _KERNEL */
@ -150,7 +153,7 @@ do { \
(pg)->mdpage.mdpg_first.pv_next = NULL; \
(pg)->mdpage.mdpg_first.pv_pmap = NULL; \
(pg)->mdpage.mdpg_first.pv_va = VM_PAGE_TO_PHYS(pg); \
(pg)->mdpage.mdpg_attrs = 0; \
(pg)->mdpage.mdpg_attrs = VM_PAGEMD_VMPAGE; \
VM_PAGEMD_PVLIST_LOCK_INIT(&(pg)->mdpage); \
} while (/* CONSTCOND */ 0)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.h,v 1.108 2020/12/20 15:50:44 skrll Exp $ */
/* $NetBSD: uvm_page.h,v 1.109 2020/12/20 16:38:26 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -406,6 +406,7 @@ int uvm_direct_process(struct vm_page **, u_int, voff_t, vsize_t,
#ifdef __HAVE_VM_PAGE_MD
#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
#define VM_MD_TO_PAGE(md) (container_of((md), struct vm_page, mdpage))
#endif
/*