* ppc4xx_tlb_reserve(): allocate "reserved" TLB entries dynamically

* ppc4xx_tlb_mapiodev(): resolve pa to va from reserved TLB entries

OK by matt@

XXX we'll keep TLB_NRESERVED defined until we fix explora to use new API
This commit is contained in:
freza 2006-08-31 22:13:51 +00:00
parent 037fe176f0
commit ca97defaa7
2 changed files with 151 additions and 20 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.41 2006/07/12 06:22:17 simonb Exp $ */
/* $NetBSD: pmap.c,v 1.42 2006/08/31 22:13:51 freza Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.41 2006/07/12 06:22:17 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.42 2006/08/31 22:13:51 freza Exp $");
#include <sys/param.h>
#include <sys/malloc.h>
@ -96,12 +96,14 @@ caddr_t kernmap;
#define MINCTX 2
#define NUMCTX 256
volatile struct pmap *ctxbusy[NUMCTX];
#define TLBF_USED 0x1
#define TLBF_REF 0x2
#define TLBF_LOCKED 0x4
#define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED)
typedef struct tlb_info_s {
char ti_flags;
char ti_ctx; /* TLB_PID assiciated with the entry */
@ -110,7 +112,10 @@ typedef struct tlb_info_s {
volatile tlb_info_t tlb_info[NTLB];
/* We'll use a modified FIFO replacement policy cause it's cheap */
volatile int tlbnext = TLB_NRESERVED;
volatile int tlbnext;
static int tlb_nreserved = 0;
static int pmap_bootstrap_done = 0;
/* Event counters */
struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
@ -151,6 +156,18 @@ struct pv_entry {
struct pmap *pv_pm;
};
/* Each index corresponds to TLB_SIZE_* value. */
static size_t tlbsize[] = {
1024, /* TLB_SIZE_1K */
4096, /* TLB_SIZE_4K */
16384, /* TLB_SIZE_16K */
65536, /* TLB_SIZE_64K */
262144, /* TLB_SIZE_256K */
1048576, /* TLB_SIZE_1M */
4194304, /* TLB_SIZE_4M */
16777216, /* TLB_SIZE_16M */
};
struct pv_entry *pv_table;
static struct pool pv_pool;
@ -167,6 +184,8 @@ static inline int pte_enter(struct pmap *, vaddr_t, u_int);
static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, boolean_t);
static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
static int ppc4xx_tlb_size_mask(size_t, int *, int *);
inline struct pv_entry *
pa_to_pv(paddr_t pa)
@ -248,6 +267,12 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
int cnt, i;
u_int s, e, sz;
/* XXXfreza: compat, we used to statically reserve 4 entries. */
if (tlb_nreserved == 0)
tlb_nreserved = TLB_NRESERVED;
tlbnext = tlb_nreserved;
/*
* Allocate the kernel page table at the end of
* kernel space so it's in the locked TTE.
@ -391,11 +416,12 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
pmap_kernel()->pm_ctx = KERNEL_PID;
nextavail = avail->start;
evcnt_attach_static(&tlbmiss_ev);
evcnt_attach_static(&tlbhit_ev);
evcnt_attach_static(&tlbflush_ev);
evcnt_attach_static(&tlbenter_ev);
pmap_bootstrap_done = 1;
}
/*
@ -1185,7 +1211,7 @@ ppc4xx_tlb_flush(vaddr_t va, int pid)
if (!pid)
return;
__asm("mfpid %1;" /* Save PID */
__asm( "mfpid %1;" /* Save PID */
"mfmsr %2;" /* Save MSR */
"li %0,0;" /* Now clear MSR */
"mtmsr %0;"
@ -1244,7 +1270,7 @@ ppc4xx_tlb_find_victim(void)
for (;;) {
if (++tlbnext >= NTLB)
tlbnext = TLB_NRESERVED;
tlbnext = tlb_nreserved;
flags = tlb_info[tlbnext].ti_flags;
if (!(flags & TLBF_USED) ||
(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
@ -1287,7 +1313,7 @@ ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
idx = ppc4xx_tlb_find_victim();
#ifdef DIAGNOSTIC
if ((idx < TLB_NRESERVED) || (idx >= NTLB)) {
if ((idx < tlb_nreserved) || (idx >= NTLB)) {
panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
}
#endif
@ -1323,7 +1349,7 @@ ppc4xx_tlb_init(void)
int i;
/* Mark reserved TLB entries */
for (i = 0; i < TLB_NRESERVED; i++) {
for (i = 0; i < tlb_nreserved; i++) {
tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
tlb_info[i].ti_ctx = KERNEL_PID;
}
@ -1340,6 +1366,109 @@ ppc4xx_tlb_init(void)
:: "K"(SPR_ZPR), "r" (0x1b000000));
}
/*
* ppc4xx_tlb_size_mask:
*
* Roundup size to supported page size, return TLBHI mask and real size.
*/
static int
ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
{
int i;
for (i = 0; i < __arraycount(tlbsize); i++)
if (size <= tlbsize[i]) {
*mask = (i << TLB_SIZE_SHFT);
*rsiz = tlbsize[i];
return (0);
}
return (EINVAL);
}
/*
* ppc4xx_tlb_mapiodev:
*
* Lookup virtual address of mapping previously entered via
* ppc4xx_tlb_reserve. Search TLB directly so that we don't
* need to waste extra storage for reserved mappings. Note
* that reading TLBHI also sets PID, but all reserved mappings
* use KERNEL_PID, so the side effect is nil.
*/
void *
ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
{
paddr_t pa;
vaddr_t va;
u_int lo, hi, sz;
int i;
/* tlb_nreserved is only allowed to grow, so this is safe. */
for (i = 0; i < tlb_nreserved; i++) {
__asm volatile (
" tlbre %0,%2,1 \n" /* TLBLO */
" tlbre %1,%2,0 \n" /* TLBHI */
: "=&r" (lo), "=&r" (hi)
: "r" (i));
KASSERT(hi & TLB_VALID);
KASSERT(mfspr(SPR_PID) == KERNEL_PID);
pa = (lo & TLB_RPN_MASK);
if (base < pa)
continue;
sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
if ((base + len) > (pa + sz))
continue;
va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); /* sz = 2^n */
return (void *)(va);
}
return (NULL);
}
/*
* ppc4xx_tlb_reserve:
*
* Map physical range to kernel virtual chunk via reserved TLB entry.
*/
void
ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
{
u_int lo, hi;
int szmask, rsize;
/* Called before pmap_bootstrap(), va outside kernel space. */
KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
KASSERT(! pmap_bootstrap_done);
KASSERT(tlb_nreserved < NTLB);
/* Resolve size. */
if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
size, tlb_nreserved);
/* Real size will be power of two >= 1024, so this is OK. */
pa &= ~(rsize - 1); /* RPN */
va &= ~(rsize - 1); /* EPN */
lo = pa | TLB_WR | flags;
hi = va | TLB_VALID | szmask | KERNEL_PID;
#ifdef PPC_4XX_NOCACHE
lo |= TLB_I;
#endif
__asm volatile(
" tlbwe %1,%0,1 \n" /* write TLBLO */
" tlbwe %2,%0,0 \n" /* write TLBHI */
" sync \n"
" isync \n"
: : "r" (tlb_nreserved), "r" (lo), "r" (hi));
tlb_nreserved++;
}
/*
* We should pass the ctx in from trap code.
@ -1388,7 +1517,7 @@ ctx_flush(int cnum)
int i;
/* We gotta steal this context */
for (i = TLB_NRESERVED; i < NTLB; i++) {
for (i = tlb_nreserved; i < NTLB; i++) {
if (tlb_info[i].ti_ctx == cnum) {
/* Can't steal ctx if it has a locked entry. */
if (TLB_LOCKED(i)) {
@ -1403,7 +1532,7 @@ ctx_flush(int cnum)
return (1);
}
#ifdef DIAGNOSTIC
if (i < TLB_NRESERVED)
if (i < tlb_nreserved)
panic("TLB entry %d not locked", i);
#endif
/* Invalidate particular TLB entry regardless of locked status */

View File

@ -1,4 +1,4 @@
/* $NetBSD: tlb.h,v 1.2 2006/07/12 06:22:17 simonb Exp $ */
/* $NetBSD: tlb.h,v 1.3 2006/08/31 22:13:51 freza Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -58,14 +58,14 @@
#define TLB_SIZE_4M 6
#define TLB_SIZE_16M 7
#define TLB_PG_1K (TLB_SIZE_1K<<TLB_SIZE_SHFT)
#define TLB_PG_4K (TLB_SIZE_4K<<TLB_SIZE_SHFT)
#define TLB_PG_16K (TLB_SIZE_16K<<TLB_SIZE_SHFT)
#define TLB_PG_64K (TLB_SIZE_64K<<TLB_SIZE_SHFT)
#define TLB_PG_256K (TLB_SIZE_256K<<TLB_SIZE_SHFT)
#define TLB_PG_1M (TLB_SIZE_1M<<TLB_SIZE_SHFT)
#define TLB_PG_4M (TLB_SIZE_4M<<TLB_SIZE_SHFT)
#define TLB_PG_16M (TLB_SIZE_16M<<TLB_SIZE_SHFT)
#define TLB_PG_1K (TLB_SIZE_1K << TLB_SIZE_SHFT)
#define TLB_PG_4K (TLB_SIZE_4K << TLB_SIZE_SHFT)
#define TLB_PG_16K (TLB_SIZE_16K << TLB_SIZE_SHFT)
#define TLB_PG_64K (TLB_SIZE_64K << TLB_SIZE_SHFT)
#define TLB_PG_256K (TLB_SIZE_256K << TLB_SIZE_SHFT)
#define TLB_PG_1M (TLB_SIZE_1M << TLB_SIZE_SHFT)
#define TLB_PG_4M (TLB_SIZE_4M << TLB_SIZE_SHFT)
#define TLB_PG_16M (TLB_SIZE_16M << TLB_SIZE_SHFT)
/* TLBLO entries */
#define TLB_RPN_MASK 0xfffffc00 /* Real Page Number mask */
@ -102,11 +102,13 @@ void ppc4xx_tlb_flush(vaddr_t, int);
void ppc4xx_tlb_flush_all(void);
void ppc4xx_tlb_init(void);
int ppc4xx_tlb_new_pid(struct pmap *);
void ppc4xx_tlb_reserve(paddr_t, vaddr_t, size_t, int);
void *ppc4xx_tlb_mapiodev(paddr_t, psize_t);
#endif
#define TLB_PID_INVALID 0xFFFF
#define TLB_NRESERVED 4 /* Reserve 4 TLB entries for kernel */
#define TLB_NRESERVED 4 /* XXXfreza: kill. */
#endif /* _IBM4XX_TLB_H_ */