Move the per-cpu l3 page allocation code to a separate MD function. Avoids code duplication for xen PAE

This commit is contained in:
cherry 2011-12-30 17:57:49 +00:00
parent d12f2f3b2f
commit 1f285b3396
4 changed files with 47 additions and 34 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.714 2011/11/29 11:12:26 martin Exp $ */
/* $NetBSD: machdep.c,v 1.715 2011/12/30 17:57:49 cherry Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.714 2011/11/29 11:12:26 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.715 2011/12/30 17:57:49 cherry Exp $");
#include "opt_beep.h"
#include "opt_compat_ibcs2.h"
@ -1917,3 +1917,38 @@ mm_md_open(dev_t dev, int flag, int mode, struct lwp *l)
}
return 0;
}
#ifdef PAE
void
cpu_alloc_l3_page(struct cpu_info *ci)
{
int ret;
struct pglist pg;
struct vm_page *vmap;
KASSERT(ci != NULL);
/*
* Allocate a page for the per-CPU L3 PD. cr3 being 32 bits, PA musts
* resides below the 4GB boundary.
*/
ret = uvm_pglistalloc(PAGE_SIZE, 0, 0x100000000ULL, 32, 0, &pg, 1, 0);
vmap = TAILQ_FIRST(&pg);
if (ret != 0 || vmap == NULL)
panic("%s: failed to allocate L3 pglist for CPU %d (ret %d)\n",
__func__, cpu_index(ci), ret);
ci->ci_pae_l3_pdirpa = vmap->phys_addr;
ci->ci_pae_l3_pdir = (paddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
if (ci->ci_pae_l3_pdir == NULL)
panic("%s: failed to allocate L3 PD for CPU %d\n",
__func__, cpu_index(ci));
pmap_kenter_pa((vaddr_t)ci->ci_pae_l3_pdir, ci->ci_pae_l3_pdirpa,
VM_PROT_READ | VM_PROT_WRITE, 0);
pmap_update(pmap_kernel());
}
#endif /* PAE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.177 2011/07/26 12:56:39 yamt Exp $ */
/* $NetBSD: cpu.h,v 1.178 2011/12/30 17:57:49 cherry Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -86,6 +86,10 @@ cpu_set_curpri(int pri)
#define CLKF_INTR(frame) (curcpu()->ci_idepth > 0)
#define LWP_PC(l) ((l)->l_md.md_regs->tf_eip)
#ifdef PAE
void cpu_alloc_l3_page(struct cpu_info *);
#endif /* PAE */
#endif /* _KERNEL */
#endif /* !_I386_CPU_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.44 2011/12/07 15:47:42 cegger Exp $ */
/* $NetBSD: cpu.h,v 1.45 2011/12/30 17:57:49 cherry Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -177,7 +177,7 @@ struct cpu_info {
#endif
#ifdef PAE
paddr_t ci_pae_l3_pdirpa; /* PA of L3 PD */
uint32_t ci_pae_l3_pdirpa; /* PA of L3 PD */
pd_entry_t * ci_pae_l3_pdir; /* VA pointer to L3 PD */
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $ */
/* $NetBSD: pmap.c,v 1.149 2011/12/30 17:57:49 cherry Exp $ */
/*-
* Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@ -171,7 +171,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.148 2011/12/30 16:55:21 cherry Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.149 2011/12/30 17:57:49 cherry Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@ -1663,33 +1663,7 @@ pmap_cpu_init_late(struct cpu_info *ci)
return;
#ifdef PAE
int ret;
struct pglist pg;
struct vm_page *vmap;
/*
* Allocate a page for the per-CPU L3 PD. cr3 being 32 bits, PA musts
* resides below the 4GB boundary.
*/
ret = uvm_pglistalloc(PAGE_SIZE, 0, 0x100000000ULL, 32, 0, &pg, 1, 0);
vmap = TAILQ_FIRST(&pg);
if (ret != 0 || vmap == NULL)
panic("%s: failed to allocate L3 pglist for CPU %d (ret %d)\n",
__func__, cpu_index(ci), ret);
ci->ci_pae_l3_pdirpa = vmap->phys_addr;
ci->ci_pae_l3_pdir = (paddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
if (ci->ci_pae_l3_pdir == NULL)
panic("%s: failed to allocate L3 PD for CPU %d\n",
__func__, cpu_index(ci));
pmap_kenter_pa((vaddr_t)ci->ci_pae_l3_pdir, ci->ci_pae_l3_pdirpa,
VM_PROT_READ | VM_PROT_WRITE, 0);
pmap_update(pmap_kernel());
cpu_alloc_l3_page(ci);
#endif
}
#endif