Introduce locking primitives for Xen pte operations, and xen helper calls for MP related MMU ops

This commit is contained in:
cherry 2011-08-10 09:50:37 +00:00
parent 1a57823234
commit 1f0a8a809d
2 changed files with 174 additions and 6 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: xenpmap.h,v 1.27 2011/04/29 22:45:41 jym Exp $ */
/* $NetBSD: xenpmap.h,v 1.28 2011/08/10 09:50:37 cherry Exp $ */
/*
*
@ -36,6 +36,8 @@
#define INVALID_P2M_ENTRY (~0UL)
void xpq_queue_lock(void);
void xpq_queue_unlock(void);
void xpq_queue_machphys_update(paddr_t, paddr_t);
void xpq_queue_invlpg(vaddr_t);
void xpq_queue_pte_update(paddr_t, pt_entry_t);
@ -46,6 +48,13 @@ void xpq_queue_tlb_flush(void);
void xpq_queue_pin_table(paddr_t, int);
void xpq_queue_unpin_table(paddr_t);
int xpq_update_foreign(paddr_t, pt_entry_t, int);
void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, uint32_t);
void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t);
void xen_mcast_tlbflush(uint32_t);
void xen_bcast_tlbflush(void);
void xen_mcast_invlpg(vaddr_t, uint32_t);
void xen_bcast_invlpg(vaddr_t);
#define xpq_queue_pin_l1_table(pa) \
xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)

View File

@ -1,4 +1,4 @@
/* $NetBSD: x86_xpmap.c,v 1.28 2011/06/15 20:50:02 rmind Exp $ */
/* $NetBSD: x86_xpmap.c,v 1.29 2011/08/10 09:50:37 cherry Exp $ */
/*
* Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
@ -69,7 +69,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.28 2011/06/15 20:50:02 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.29 2011/08/10 09:50:37 cherry Exp $");
#include "opt_xen.h"
#include "opt_ddb.h"
@ -77,6 +77,7 @@ __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.28 2011/06/15 20:50:02 rmind Exp $")
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/simplelock.h>
#include <uvm/uvm.h>
@ -152,7 +153,9 @@ xen_set_ldt(vaddr_t base, uint32_t entries)
pmap_pte_clearbits(ptp, PG_RW);
}
s = splvm();
xpq_queue_lock();
xpq_queue_set_ldt(base, entries);
xpq_queue_unlock();
splx(s);
}
@ -163,12 +166,27 @@ void xpq_debug_dump(void);
#define XPQUEUE_SIZE 2048
static mmu_update_t xpq_queue[XPQUEUE_SIZE];
static int xpq_idx = 0;
static struct simplelock xpq_lock = SIMPLELOCK_INITIALIZER;
void
xpq_queue_lock(void)
{
simple_lock(&xpq_lock);
}
void
xpq_queue_unlock(void)
{
simple_unlock(&xpq_lock);
}
/* Must be called with xpq_lock held */
void
xpq_flush_queue(void)
{
int i, ok, ret;
KASSERT(simple_lock_held(&xpq_lock));
XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
for (i = 0; i < xpq_idx; i++)
XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
@ -187,10 +205,12 @@ xpq_flush_queue(void)
xpq_idx = 0;
}
/* Must be called with xpq_lock held */
static inline void
xpq_increment_idx(void)
{
KASSERT(simple_lock_held(&xpq_lock));
xpq_idx++;
if (__predict_false(xpq_idx == XPQUEUE_SIZE))
xpq_flush_queue();
@ -201,6 +221,7 @@ xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
{
XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
"\n", (int64_t)ma, (int64_t)pa));
KASSERT(simple_lock_held(&xpq_lock));
xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
xpq_increment_idx();
@ -214,6 +235,7 @@ xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
{
KASSERT((ptr & 3) == 0);
KASSERT(simple_lock_held(&xpq_lock));
xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
xpq_queue[xpq_idx].val = val;
xpq_increment_idx();
@ -226,6 +248,7 @@ void
xpq_queue_pt_switch(paddr_t pa)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
@ -240,6 +263,8 @@ void
xpq_queue_pin_table(paddr_t pa, int lvl)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
@ -256,6 +281,8 @@ void
xpq_queue_unpin_table(paddr_t pa)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
@ -269,6 +296,8 @@ void
xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_set_ldt\n"));
@ -284,6 +313,8 @@ void
xpq_queue_tlb_flush(void)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_tlb_flush\n"));
@ -296,20 +327,25 @@ void
xpq_flush_cache(void)
{
struct mmuext_op op;
int s = splvm();
int s = splvm(), err;
xpq_queue_lock();
xpq_flush_queue();
XENPRINTK2(("xpq_queue_flush_cache\n"));
op.cmd = MMUEXT_FLUSH_CACHE;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0)
printf("errno == %d\n", err);
panic("xpq_flush_cache");
splx(s);
xpq_queue_unlock();
splx(s); /* XXX: removeme */
}
void
xpq_queue_invlpg(vaddr_t va)
{
struct mmuext_op op;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
@ -319,11 +355,134 @@ xpq_queue_invlpg(vaddr_t va)
panic("xpq_queue_invlpg");
}
void
xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
{
mmuext_op_t op;
KASSERT(simple_lock_held(&xpq_lock));
/* Flush pending page updates */
xpq_flush_queue();
op.cmd = MMUEXT_INVLPG_MULTI;
op.arg1.linear_addr = va;
op.arg2.vcpumask = &cpumask;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
panic("xpq_queue_invlpg_all");
}
return;
}
void
xen_bcast_invlpg(vaddr_t va)
{
mmuext_op_t op;
/* Flush pending page updates */
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
op.cmd = MMUEXT_INVLPG_ALL;
op.arg1.linear_addr = va;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
panic("xpq_queue_invlpg_all");
}
return;
}
/* This is a synchronous call. */
void
xen_mcast_tlbflush(uint32_t cpumask)
{
mmuext_op_t op;
/* Flush pending page updates */
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
op.arg2.vcpumask = &cpumask;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
panic("xpq_queue_invlpg_all");
}
return;
}
/* This is a synchronous call. */
void
xen_bcast_tlbflush(void)
{
mmuext_op_t op;
/* Flush pending page updates */
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
op.cmd = MMUEXT_TLB_FLUSH_ALL;
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
panic("xpq_queue_invlpg_all");
}
return;
}
/* This is a synchronous call. */
void
xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
{
KASSERT(eva > sva);
/* Flush pending page updates */
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
/* Align to nearest page boundary */
sva &= ~PAGE_MASK;
eva &= ~PAGE_MASK;
for ( ; sva <= eva; sva += PAGE_SIZE) {
xen_mcast_invlpg(sva, cpumask);
}
return;
}
/* This is a synchronous call. */
void
xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
{
KASSERT(eva > sva);
/* Flush pending page updates */
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
/* Align to nearest page boundary */
sva &= ~PAGE_MASK;
eva &= ~PAGE_MASK;
for ( ; sva <= eva; sva += PAGE_SIZE) {
xen_bcast_invlpg(sva);
}
return;
}
int
xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
{
mmu_update_t op;
int ok;
KASSERT(simple_lock_held(&xpq_lock));
xpq_flush_queue();
op.ptr = ptr;