Expose pmap_pdp_cache publicly to x86/xen pmap. Provide suspend/resume
callbacks for Xen pmap. Turn static internal callbacks of pmap_pdp_cache. XXX the implementation of pool_cache_invalidate(9) is still wrong, and IMHO this needs fixing before -6. See http://mail-index.netbsd.org/tech-kern/2011/11/18/msg011924.html
This commit is contained in:
parent
6894cdda89
commit
6bfeabc65a
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.45 2011/11/08 17:16:52 cherry Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.46 2011/11/20 19:41:27 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
|
@ -124,6 +124,11 @@ LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
|
|||
extern struct pmap_head pmaps;
|
||||
extern kmutex_t pmaps_lock; /* protects pmaps */
|
||||
|
||||
/*
|
||||
* pool_cache(9) that PDPs are allocated from
|
||||
*/
|
||||
extern struct pool_cache pmap_pdp_cache;
|
||||
|
||||
/*
|
||||
* the pmap structure
|
||||
*
|
||||
|
@ -261,8 +266,6 @@ u_int x86_mmap_flags(paddr_t);
|
|||
|
||||
bool pmap_is_curpmap(struct pmap *);
|
||||
|
||||
void pmap_invalidate_pool_caches(void);
|
||||
|
||||
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
|
||||
|
||||
typedef enum tlbwhy {
|
||||
|
@ -411,15 +414,7 @@ vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
|
|||
void pmap_cpu_init_late(struct cpu_info *);
|
||||
bool sse2_idlezero_page(void *);
|
||||
|
||||
|
||||
#ifdef XEN
|
||||
|
||||
void pmap_unmap_all_apdp_pdes(void);
|
||||
#ifdef PAE
|
||||
void pmap_map_recursive_entries(void);
|
||||
void pmap_unmap_recursive_entries(void);
|
||||
#endif /* PAE */
|
||||
|
||||
#include <sys/bitops.h>
|
||||
|
||||
#define XPTE_MASK L1_FRAME
|
||||
|
@ -468,9 +463,17 @@ xpmap_update (pt_entry_t *pte, pt_entry_t npte)
|
|||
paddr_t vtomach(vaddr_t);
|
||||
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
|
||||
|
||||
void pmap_xen_resume(void);
|
||||
void pmap_xen_suspend(void);
|
||||
|
||||
void pmap_apte_flush(struct pmap *);
|
||||
void pmap_unmap_apdp(void);
|
||||
|
||||
#ifdef PAE
|
||||
void pmap_map_recursive_entries(void);
|
||||
void pmap_unmap_recursive_entries(void);
|
||||
#endif /* PAE */
|
||||
|
||||
#endif /* XEN */
|
||||
|
||||
/* pmap functions with machine addresses */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.141 2011/11/08 17:16:52 cherry Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
|
||||
|
@ -171,7 +171,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.141 2011/11/08 17:16:52 cherry Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $");
|
||||
|
||||
#include "opt_user_ldt.h"
|
||||
#include "opt_lockdebug.h"
|
||||
|
@ -508,17 +508,14 @@ static char *csrcp, *cdstp, *zerop, *ptpp, *early_zerop;
|
|||
|
||||
int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
|
||||
|
||||
/*
|
||||
* pool and cache that PDPs are allocated from
|
||||
*/
|
||||
|
||||
static struct pool_cache pmap_pdp_cache;
|
||||
int pmap_pdp_ctor(void *, void *, int);
|
||||
void pmap_pdp_dtor(void *, void *);
|
||||
/* PDP pool_cache(9) and its callbacks */
|
||||
struct pool_cache pmap_pdp_cache;
|
||||
static int pmap_pdp_ctor(void *, void *, int);
|
||||
static void pmap_pdp_dtor(void *, void *);
|
||||
#ifdef PAE
|
||||
/* need to allocate items of 4 pages */
|
||||
void *pmap_pdp_alloc(struct pool *, int);
|
||||
void pmap_pdp_free(struct pool *, void *);
|
||||
static void *pmap_pdp_alloc(struct pool *, int);
|
||||
static void pmap_pdp_free(struct pool *, void *);
|
||||
static struct pool_allocator pmap_pdp_allocator = {
|
||||
.pa_alloc = pmap_pdp_alloc,
|
||||
.pa_free = pmap_pdp_free,
|
||||
|
@ -2014,7 +2011,7 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t * const *pdes)
|
|||
/*
|
||||
* pmap_pdp_ctor: constructor for the PDP cache.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
pmap_pdp_ctor(void *arg, void *v, int flags)
|
||||
{
|
||||
pd_entry_t *pdir = v;
|
||||
|
@ -2121,7 +2118,7 @@ pmap_pdp_ctor(void *arg, void *v, int flags)
|
|||
* pmap_pdp_dtor: destructor for the PDP cache.
|
||||
*/
|
||||
|
||||
void
|
||||
static void
|
||||
pmap_pdp_dtor(void *arg, void *v)
|
||||
{
|
||||
#ifdef XEN
|
||||
|
@ -2152,7 +2149,7 @@ pmap_pdp_dtor(void *arg, void *v)
|
|||
|
||||
/* pmap_pdp_alloc: Allocate a page for the pdp memory pool. */
|
||||
|
||||
void *
|
||||
static void *
|
||||
pmap_pdp_alloc(struct pool *pp, int flags)
|
||||
{
|
||||
return (void *)uvm_km_alloc(kernel_map,
|
||||
|
@ -2165,7 +2162,7 @@ pmap_pdp_alloc(struct pool *pp, int flags)
|
|||
* pmap_pdp_free: free a PDP
|
||||
*/
|
||||
|
||||
void
|
||||
static void
|
||||
pmap_pdp_free(struct pool *pp, void *v)
|
||||
{
|
||||
uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE * PDP_SIZE,
|
||||
|
@ -4466,19 +4463,3 @@ x86_mmap_flags(paddr_t mdpgno)
|
|||
|
||||
return pflag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidates pool_cache(9) used by pmap(9).
|
||||
*/
|
||||
void
|
||||
pmap_invalidate_pool_caches(void)
|
||||
{
|
||||
#ifdef XEN
|
||||
/*
|
||||
* We must invalidate all shadow pages found inside the pmap_pdp_cache.
|
||||
* They are technically considered by Xen as L2 pages, although they
|
||||
* are not currently found inside pmaps list.
|
||||
*/
|
||||
pool_cache_invalidate(&pmap_pdp_cache);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: xen_pmap.c,v 1.8 2011/11/08 17:16:52 cherry Exp $ */
|
||||
/* $NetBSD: xen_pmap.c,v 1.9 2011/11/20 19:41:27 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2007 Manuel Bouyer.
|
||||
|
@ -102,7 +102,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.8 2011/11/08 17:16:52 cherry Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.9 2011/11/20 19:41:27 jym Exp $");
|
||||
|
||||
#include "opt_user_ldt.h"
|
||||
#include "opt_lockdebug.h"
|
||||
|
@ -434,13 +434,12 @@ pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
|
|||
|
||||
/*
|
||||
* Flush all APDP entries found in pmaps
|
||||
* Required during Xen save/restore operations, as it does not
|
||||
* Required during Xen save/restore operations, as Xen does not
|
||||
* handle alternative recursive mappings properly
|
||||
*/
|
||||
void
|
||||
pmap_unmap_all_apdp_pdes(void)
|
||||
pmap_xen_suspend(void)
|
||||
{
|
||||
|
||||
int i;
|
||||
int s;
|
||||
struct pmap *pm;
|
||||
|
@ -470,6 +469,17 @@ pmap_unmap_all_apdp_pdes(void)
|
|||
|
||||
splx(s);
|
||||
|
||||
#ifdef PAE
|
||||
pmap_unmap_recursive_entries();
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
pmap_xen_resume(void)
|
||||
{
|
||||
#ifdef PAE
|
||||
pmap_map_recursive_entries();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef PAE
|
||||
|
@ -486,12 +496,10 @@ pmap_unmap_all_apdp_pdes(void)
|
|||
void
|
||||
pmap_map_recursive_entries(void)
|
||||
{
|
||||
|
||||
int i;
|
||||
struct pmap *pm;
|
||||
|
||||
mutex_enter(&pmaps_lock);
|
||||
|
||||
LIST_FOREACH(pm, &pmaps, pm_list) {
|
||||
for (i = 0; i < PDP_SIZE; i++) {
|
||||
xpq_queue_pte_update(
|
||||
|
@ -499,7 +507,6 @@ pmap_map_recursive_entries(void)
|
|||
xpmap_ptom((pm)->pm_pdirpa[i]) | PG_V);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&pmaps_lock);
|
||||
|
||||
for (i = 0; i < PDP_SIZE; i++) {
|
||||
|
@ -514,21 +521,24 @@ pmap_map_recursive_entries(void)
|
|||
void
|
||||
pmap_unmap_recursive_entries(void)
|
||||
{
|
||||
|
||||
int i;
|
||||
struct pmap *pm;
|
||||
|
||||
pmap_invalidate_pool_caches();
|
||||
/*
|
||||
* Invalidate pmap_pdp_cache as it contains L2-pinned objects with
|
||||
* recursive entries.
|
||||
* XXX jym@ : find a way to drain per-CPU caches to. pool_cache_inv
|
||||
* does not do that.
|
||||
*/
|
||||
pool_cache_invalidate(&pmap_pdp_cache);
|
||||
|
||||
mutex_enter(&pmaps_lock);
|
||||
|
||||
LIST_FOREACH(pm, &pmaps, pm_list) {
|
||||
for (i = 0; i < PDP_SIZE; i++) {
|
||||
xpq_queue_pte_update(
|
||||
xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + i)), 0);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&pmaps_lock);
|
||||
|
||||
/* do it for pmap_kernel() too! */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: xen_machdep.c,v 1.8 2011/09/20 00:12:24 jym Exp $ */
|
||||
/* $NetBSD: xen_machdep.c,v 1.9 2011/11/20 19:41:27 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006 Manuel Bouyer.
|
||||
|
@ -53,7 +53,7 @@
|
|||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.8 2011/09/20 00:12:24 jym Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.9 2011/11/20 19:41:27 jym Exp $");
|
||||
|
||||
#include "opt_xen.h"
|
||||
|
||||
|
@ -285,16 +285,7 @@ xen_prepare_suspend(void)
|
|||
{
|
||||
kpreempt_disable();
|
||||
|
||||
/*
|
||||
* Xen lazy evaluation of recursive mappings requires
|
||||
* to flush the APDP entries
|
||||
*/
|
||||
pmap_unmap_all_apdp_pdes();
|
||||
|
||||
#ifdef PAE
|
||||
pmap_unmap_recursive_entries();
|
||||
#endif
|
||||
|
||||
pmap_xen_suspend();
|
||||
xen_suspendclocks();
|
||||
|
||||
/*
|
||||
|
@ -330,9 +321,7 @@ xen_prepare_resume(void)
|
|||
HYPERVISOR_crash();
|
||||
}
|
||||
|
||||
#ifdef PAE
|
||||
pmap_map_recursive_entries();
|
||||
#endif
|
||||
pmap_xen_resume();
|
||||
|
||||
if (xen_start_info.nr_pages != physmem) {
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue