XXX: workaround we'd like to remove when pmap / uvm locking is cleaned up:
- rename pseg_get() and pseg_set() to pseg_get_real() and pseg_set_real(). - if USE_LOCKSAFE_PSEG_GETSET is defined, which it current is by default, define pseg_[gs]et() in terms of functions that take a new pseg_lock mutex at IPL_VM while calling into the real functions. this seems to avoid the pseg_set() crashes we've seen: 1 - spare needed, when pseg_get() just worked for this pmap 2 - the 2rd ldxa via ASI_PHYS_CACHED in pseg_set() loads garbage into %o4, and causes the 3rd ldxa to fault
This commit is contained in:
parent
a39bec69a6
commit
599646785c
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: db_interface.c,v 1.123 2010/03/06 08:08:29 mrg Exp $ */
|
||||
/* $NetBSD: db_interface.c,v 1.124 2010/03/10 06:57:22 mrg Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1996-2002 Eduardo Horvath. All rights reserved.
|
||||
|
@ -34,7 +34,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.123 2010/03/06 08:08:29 mrg Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.124 2010/03/10 06:57:22 mrg Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_multiprocessor.h"
|
||||
|
@ -650,7 +650,8 @@ db_pload_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif)
|
|||
}
|
||||
}
|
||||
|
||||
int64_t pseg_get(struct pmap *, vaddr_t);
|
||||
/* XXX no locking; shouldn't matter */
|
||||
int64_t pseg_get_real(struct pmap *, vaddr_t);
|
||||
|
||||
void
|
||||
db_dump_pmap(struct pmap *pm)
|
||||
|
@ -706,7 +707,7 @@ db_pmap_kernel(db_expr_t addr, bool have_addr, db_expr_t count, const char *modi
|
|||
if (have_addr) {
|
||||
/* lookup an entry for this VA */
|
||||
|
||||
if ((data = pseg_get(pmap_kernel(), (vaddr_t)addr))) {
|
||||
if ((data = pseg_get_real(pmap_kernel(), (vaddr_t)addr))) {
|
||||
db_printf("pmap_kernel(%p)->pm_segs[%lx][%lx][%lx]=>%qx\n",
|
||||
(void *)(uintptr_t)addr, (u_long)va_to_seg(addr),
|
||||
(u_long)va_to_dir(addr), (u_long)va_to_pte(addr),
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: locore.s,v 1.326 2010/03/08 08:59:06 mrg Exp $ */
|
||||
/* $NetBSD: locore.s,v 1.327 2010/03/10 06:57:22 mrg Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006-2010 Matthew R. Green
|
||||
|
@ -6568,14 +6568,15 @@ ENTRY(pmap_copy_page_phys)
|
|||
retl
|
||||
mov %o4, %g1 ! Restore g1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* extern int64_t pseg_get(struct pmap *pm, vaddr_t addr);
|
||||
* extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr);
|
||||
*
|
||||
* Return TTE at addr in pmap. Uses physical addressing only.
|
||||
* pmap->pm_physaddr must by the physical address of pm_segs
|
||||
*
|
||||
*/
|
||||
ENTRY(pseg_get)
|
||||
ENTRY(pseg_get_real)
|
||||
! flushw ! Make sure we don't have stack probs & lose hibits of %o
|
||||
ldx [%o0 + PM_PHYS], %o2 ! pmap->pm_segs
|
||||
|
||||
|
@ -6637,13 +6638,13 @@ ENTRY(pseg_get)
|
|||
/*
|
||||
* In 32-bit mode:
|
||||
*
|
||||
* extern int pseg_set(struct pmap* %o0, vaddr_t addr %o1, int64_t tte %o2:%o3,
|
||||
* paddr_t spare %o4:%o5);
|
||||
* extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
|
||||
* int64_t tte %o2:%o3, paddr_t spare %o4:%o5);
|
||||
*
|
||||
* In 64-bit mode:
|
||||
*
|
||||
* extern int pseg_set(struct pmap* %o0, vaddr_t addr %o1, int64_t tte %o2,
|
||||
* paddr_t spare %o3);
|
||||
* extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
|
||||
* int64_t tte %o2, paddr_t spare %o3);
|
||||
*
|
||||
* Set a pseg entry to a particular TTE value. Return values are:
|
||||
*
|
||||
|
@ -6673,7 +6674,7 @@ ENTRY(pseg_get)
|
|||
* The counters are 32 bit or 64 bit wide, depending on the kernel type we are
|
||||
* running!
|
||||
*/
|
||||
ENTRY(pseg_set)
|
||||
ENTRY(pseg_set_real)
|
||||
#ifndef _LP64
|
||||
sllx %o4, 32, %o4 ! Put args into 64-bit format
|
||||
sllx %o2, 32, %o2 ! Shift to high 32-bits
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.258 2010/03/08 08:59:06 mrg Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.259 2010/03/10 06:57:22 mrg Exp $ */
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 1996-1999 Eduardo Horvath.
|
||||
|
@ -26,7 +26,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.258 2010/03/08 08:59:06 mrg Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.259 2010/03/10 06:57:22 mrg Exp $");
|
||||
|
||||
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
|
||||
#define HWREF
|
||||
|
@ -79,8 +79,8 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.258 2010/03/08 08:59:06 mrg Exp $");
|
|||
paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
|
||||
|
||||
/* These routines are in assembly to allow access thru physical mappings */
|
||||
extern int64_t pseg_get(struct pmap *, vaddr_t);
|
||||
extern int pseg_set(struct pmap *, vaddr_t, int64_t, paddr_t);
|
||||
extern int64_t pseg_get_real(struct pmap *, vaddr_t);
|
||||
extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
|
||||
|
||||
/*
|
||||
* Diatribe on ref/mod counting:
|
||||
|
@ -349,6 +349,53 @@ struct page_size_map page_size_map[] = {
|
|||
PSMAP_ENTRY(0, 0),
|
||||
};
|
||||
|
||||
/*
|
||||
* This probably shouldn't be necessary, but it stops USIII machines from
|
||||
* breaking in general, and not just for MULTIPROCESSOR.
|
||||
*/
|
||||
#define USE_LOCKSAFE_PSEG_GETSET
|
||||
#if defined(USE_LOCKSAFE_PSEG_GETSET)
|
||||
|
||||
static kmutex_t pseg_lock;
|
||||
|
||||
static __inline__ int64_t
|
||||
pseg_get_locksafe(struct pmap *pm, vaddr_t va)
|
||||
{
|
||||
int64_t rv;
|
||||
bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
|
||||
|
||||
if (__predict_true(took_lock))
|
||||
mutex_enter(&pseg_lock);
|
||||
rv = pseg_get_real(pm, va);
|
||||
if (__predict_true(took_lock))
|
||||
mutex_exit(&pseg_lock);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static __inline__ int
|
||||
pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
|
||||
{
|
||||
int rv;
|
||||
bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
|
||||
|
||||
if (__predict_true(took_lock))
|
||||
mutex_enter(&pseg_lock);
|
||||
rv = pseg_set_real(pm, va, data, ptp);
|
||||
if (__predict_true(took_lock))
|
||||
mutex_exit(&pseg_lock);
|
||||
return rv;
|
||||
}
|
||||
|
||||
#define pseg_get(pm, va) pseg_get_locksafe(pm, va)
|
||||
#define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp)
|
||||
|
||||
#else /* USE_LOCKSAFE_PSEG_GETSET */
|
||||
|
||||
#define pseg_get(pm, va) pseg_get_real(pm, va)
|
||||
#define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp)
|
||||
|
||||
#endif /* USE_LOCKSAFE_PSEG_GETSET */
|
||||
|
||||
/*
|
||||
* Enter a TTE into the kernel pmap only. Don't do anything else.
|
||||
*
|
||||
|
@ -1247,6 +1294,9 @@ pmap_init(void)
|
|||
vm_num_phys = avail_end - avail_start;
|
||||
|
||||
mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
|
||||
#if defined(USE_LOCKSAFE_PSEG_GETSET)
|
||||
mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM);
|
||||
#endif
|
||||
lock_available = true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue