More PPC64 changes. (latent for now).

This commit is contained in:
matt 2003-11-21 22:57:14 +00:00
parent 4e755ddba8
commit 99f7a6b7db
5 changed files with 174 additions and 48 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.5 2003/08/24 17:52:34 chs Exp $ */
/* $NetBSD: pmap.h,v 1.6 2003/11/21 22:57:14 matt Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -41,19 +41,32 @@
* Pmap stuff
*/
struct pmap {
#ifdef PPC_OEA64
struct steg *pm_steg_table; /* segment table pointer */
/* XXX need way to track exec pages */
#endif
#ifdef PPC_OEA
register_t pm_sr[16]; /* segments used in this pmap */
int pm_exec[16]; /* counts of exec mappings */
#endif
register_t pm_vsid; /* VSID bits */
int pm_refs; /* ref count */
struct pmap_statistics pm_stats; /* pmap statistics */
unsigned int pm_evictions; /* pvo's not in page table */
#ifdef PPC_OEA64
unsigned int pm_ste_evictions;
#endif
};
typedef struct pmap *pmap_t;
#ifdef _KERNEL
#include <sys/param.h>
#include <sys/systm.h>
#ifdef PPC_OEA
extern register_t iosrtable[];
#endif
extern int pmap_use_altivec;
extern struct pmap kernel_pmap_;
#define pmap_kernel() (&kernel_pmap_)
@ -83,6 +96,10 @@ void pmap_real_memory (paddr_t *, psize_t *);
void pmap_pinit (struct pmap *);
boolean_t pmap_pageidlezero (paddr_t);
void pmap_syncicache (paddr_t, psize_t);
#ifdef PPC_OEA64
vaddr_t pmap_setusr (vaddr_t);
vaddr_t pmap_unsetusr (void);
#endif
#define PMAP_NEED_PROCWR
void pmap_procwr(struct proc *, vaddr_t, size_t);
@ -101,9 +118,11 @@ static __inline paddr_t vtophys (vaddr_t);
* VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
* this problem, too.
*/
#ifndef PPC_OEA64
#define PMAP_MAP_POOLPAGE(pa) (pa)
#define PMAP_UNMAP_POOLPAGE(pa) (pa)
#define POOL_VTOPHYS(va) vtophys((vaddr_t) va)
#endif
static __inline paddr_t
vtophys(vaddr_t va)

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.4 2003/11/21 17:40:48 matt Exp $ */
/* $NetBSD: pte.h,v 1.5 2003/11/21 22:57:14 matt Exp $ */
/*-
* Copyright (C) 2003 Matt Thomas
@ -133,6 +133,10 @@ struct steg {
/* Low Word */
#define STE_VSID (~0xfffL) /* Virtual Segment ID */
#define STE_VSID_SHFT 12
#defien STE_VSID_WIDTH 52
#define SR_VSID_SHFT STE_VSID_SHFT /* compatibility with PPC_OEA */
#define SR_VSID_WIDTH STE_VSID_WIDTH /* compatibility with PPC_OEA */
#define SR_KEY_LEN 9 /* 64 groups of 8 segment entries */
#else /* !defined(PPC_OEA64) */
@ -145,8 +149,12 @@ struct steg {
#define SR_SUKEY 0x40000000 /* Supervisor protection key */
#define SR_PRKEY 0x20000000 /* User protection key */
#define SR_NOEXEC 0x10000000 /* No-execute protection bit */
#define SR_VSID 0x00ffffff /* Virtual segment ID */
#define SR_VSID_SHFT 0 /* Starts at LSB */
#define SR_VSID_WIDTH 24 /* Goes for 24 bits */
#endif /* PPC_OEA64 */
/* Virtual segment ID */
#define SR_VSID (((1L << SR_VSID_WIDTH) - 1) << SR_VSID_SHFT)
#endif /* _POWERPC_OEA_PTE_H_ */

View File

@ -114,18 +114,32 @@
*/
#if 0
/*
* Move the SR# to the top 4 bits to make the lower 20 bits entirely random
* Move the SR# to the top bits to make the lower bits entirely random
* so to give better PTE distribution.
*/
#define VSID_MAKE(sr, hash) (((sr) << (ADDR_SR_SHFT-4))|((hash) & 0xfffff))
#define VSID_TO_SR(vsid) (((vsid) >> (ADDR_SR_SHFT-4)) & 0xF)
#define VSID_TO_HASH(vsid) ((vsid) & 0xfffff)
#define VSID_SR_INCREMENT 0x00100000
#define VSID__KEYSHFT (SR_VSID_WIDTH - SR_KEY_LEN)
#define VSID_SR_INCREMENT ((1L << VSID__KEYSHFT) - 1)
#define VSID__HASHMASK (VSID_SR_INCREMENT - 1)
#define VSID_MAKE(sr, hash) \
(( \
(((sr) << VSID__KEYSHFT) | ((hash) & VSID__HASMASK))
<< SR_VSID_SHFT) & SR_VSID)
#define VSID_TO_SR(vsid) \
(((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__KEYSHFT))
#define VSID_TO_HASH(vsid) \
(((vsid) & SR_VSID) >> SR_VSID_SHFT) & VSID__HASHMASK)
#else
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
#define VSID_TO_SR(vsid) ((vsid) & 0xF)
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
#define VSID_SR_INCREMENT 0x00000001
#define VSID__HASHSHFT (SR_KEY_LEN)
#define VSID_SR_INCREMENT (1L << 0)
#define VSID__KEYMASK ((1L << VSID__HASHSHFT) - 1)
#define VSID_MAKE(sr, hash) \
(( \
(((hash) << VSID__HASHSHFT) | ((sr) & VSID__KEYMASK)) \
<< SR_VSID_SHFT) & SR_VSID)
#define VSID_TO_SR(vsid) \
(((vsid) >> SR_VSID_SHFT) & VSID__KEYMASK)
#define VSID_TO_HASH(vsid) \
(((vsid) & SR_VSID) >> (SR_VSID_SHFT + VSID__HASHSHFT))
#endif
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.17 2003/11/21 18:09:27 matt Exp $ */
/* $NetBSD: pmap.c,v 1.18 2003/11/21 22:57:14 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@ -67,8 +67,9 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.17 2003/11/21 18:09:27 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.18 2003/11/21 22:57:14 matt Exp $");
#include "opt_ppcarch.h"
#include "opt_altivec.h"
#include "opt_pmap.h"
#include <sys/param.h>
@ -92,11 +93,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.17 2003/11/21 18:09:27 matt Exp $");
#include <machine/powerpc.h>
#include <powerpc/spr.h>
#include <powerpc/oea/sr_601.h>
#if __NetBSD_Version__ > 105010000
#include <powerpc/oea/bat.h>
#else
#include <powerpc/bat.h>
#endif
#if defined(DEBUG) || defined(PMAPCHECK)
#define STATIC
@ -415,6 +412,7 @@ extern struct evcnt pmap_evcnt_idlezeroed_pages;
#define MFSRIN(va) mfsrin(va)
#define MFTB() mfrtcltbl()
#ifndef PPC_OEA64
static __inline register_t
mfsrin(vaddr_t va)
{
@ -422,6 +420,7 @@ mfsrin(vaddr_t va)
__asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
return sr;
}
#endif /* PPC_OEA64 */
static __inline register_t
pmap_interrupts_off(void)
@ -478,7 +477,45 @@ tlbia(void)
static __inline register_t
va_to_vsid(const struct pmap *pm, vaddr_t addr)
{
return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID);
#ifdef PPC_OEA64
#if 0
const struct ste *ste;
register_t hash;
int i;
hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
/*
* Try the primary group first
*/
ste = pm->pm_stes[hash].stes;
for (i = 0; i < 8; i++, ste++) {
if (ste->ste_hi & STE_V) &&
(addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
return ste;
}
/*
* Then the secondary group.
*/
ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
for (i = 0; i < 8; i++, ste++) {
if (ste->ste_hi & STE_V) &&
(addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
return addr;
}
return NULL;
#else
/*
* Rather than searching the STE groups for the VSID, we know
* how we generate that from the ESID and so do that.
*/
return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
#endif
#else
return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
#endif
}
static __inline register_t
@ -506,15 +543,21 @@ pmap_pte_to_va(volatile const struct pte *pt)
if (pt->pte_hi & PTE_HID)
ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
/* PPC Bits 10-19 */
/* PPC Bits 10-19 PPC64 Bits 42-51 */
va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
va <<= ADDR_PIDX_SHFT;
/* PPC Bits 4-9 */
/* PPC Bits 4-9 PPC64 Bits 36-41 */
va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
#ifdef PPC_OEA64
/* PPC63 Bits 0-35 */
/* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
#endif
#ifdef PPC_OEA
/* PPC Bits 0-3 */
va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
#endif
return va;
}
@ -726,7 +769,7 @@ pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
int i;
#if defined(DEBUG)
DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%lx 0x%lx\n",
DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%x 0x%x\n",
ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
#endif
/*
@ -1082,8 +1125,8 @@ pmap_create(void)
pmap_pinit(pm);
DPRINTFN(CREATE,("pmap_create: pm %p:\n"
"\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n"
"\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n", pm,
"\t%06x %06x %06x %06x %06x %06x %06x %06x\n"
"\t%06x %06x %06x %06x %06x %06x %06x %06x\n", pm,
pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
@ -1134,15 +1177,14 @@ pmap_pinit(pmap_t pm)
hash &= ~(VSID_NBPW-1);
hash |= i;
}
/*
* Make sure clear out SR_KEY_LEN bits because we put our
* our data in those bits (to identify the segment).
*/
hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN);
hash &= PTE_VSID >> PTE_VSID_SHFT;
pmap_vsid_bitmap[n] |= mask;
pm->pm_vsid = hash;
#ifndef PPC_OEA64
for (i = 0; i < 16; i++)
pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
SR_NOEXEC;
#endif
return;
}
panic("pmap_pinit: out of segments");
@ -1181,7 +1223,7 @@ pmap_release(pmap_t pm)
if (pm->pm_sr[0] == 0)
panic("pmap_release");
idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1);
idx = VSID_TO_HASH(pm->pm_vsid) & (NPMAPS-1);
mask = 1 << (idx % VSID_NBPW);
idx /= VSID_NBPW;
pmap_vsid_bitmap[idx] &= ~mask;
@ -1408,13 +1450,13 @@ pmap_pvo_check(const struct pvo_entry *pvo)
}
if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
printf("pmap_pvo_check: pvo %p: pte_hi differ: "
"%#lx/%#lx\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
"%#x/%#x\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
failed = 1;
}
if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
(PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
printf("pmap_pvo_check: pvo %p: pte_lo differ: "
"%#lx/%#lx\n", pvo,
"%#x/%#x\n", pvo,
pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN),
pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN));
failed = 1;
@ -1473,9 +1515,9 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
~(PTE_REF|PTE_CHG)) == 0 &&
va < VM_MIN_KERNEL_ADDRESS) {
printf("pmap_pvo_enter: pvo %p: dup %#lx/%#lx\n",
printf("pmap_pvo_enter: pvo %p: dup %#x/%#lx\n",
pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
printf("pmap_pvo_enter: pte_hi=%#lx sr=%#lx\n",
printf("pmap_pvo_enter: pte_hi=%#x sr=%#x\n",
pvo->pvo_pte.pte_hi,
pm->pm_sr[va >> ADDR_SR_SHFT]);
pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
@ -1659,16 +1701,19 @@ STATIC void
pvo_set_exec(struct pvo_entry *pvo)
{
struct pmap *pm = pvo->pvo_pmap;
int sr;
if (pm == pmap_kernel() || PVO_ISEXECUTABLE(pvo)) {
return;
}
pvo->pvo_vaddr |= PVO_EXECUTABLE;
sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (pm->pm_exec[sr]++ == 0) {
pm->pm_sr[sr] &= ~SR_NOEXEC;
#ifdef PPC_OEA
{
int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (pm->pm_exec[sr]++ == 0) {
pm->pm_sr[sr] &= ~SR_NOEXEC;
}
}
#endif
}
/*
@ -1680,16 +1725,19 @@ STATIC void
pvo_clear_exec(struct pvo_entry *pvo)
{
struct pmap *pm = pvo->pvo_pmap;
int sr;
if (pm == pmap_kernel() || !PVO_ISEXECUTABLE(pvo)) {
return;
}
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (--pm->pm_exec[sr] == 0) {
pm->pm_sr[sr] |= SR_NOEXEC;
#ifdef PPC_OEA
{
int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
if (--pm->pm_exec[sr] == 0) {
pm->pm_sr[sr] |= SR_NOEXEC;
}
}
#endif
}
/*
@ -2340,8 +2388,10 @@ pmap_print_mmuregs(void)
{
int i;
u_int cpuvers;
#ifndef PPC_OEA64
vaddr_t addr;
register_t soft_sr[16];
#endif
struct bat soft_ibat[4];
struct bat soft_dbat[4];
register_t sdr1;
@ -2349,12 +2399,13 @@ pmap_print_mmuregs(void)
cpuvers = MFPVR() >> 16;
__asm __volatile ("mfsdr1 %0" : "=r"(sdr1));
#ifndef PPC_OEA64
addr = 0;
for (i=0; i<16; i++) {
soft_sr[i] = MFSRIN(addr);
addr += (1 << ADDR_SR_SHFT);
}
#endif
/* read iBAT (601: uBAT) registers */
__asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
@ -2379,7 +2430,8 @@ pmap_print_mmuregs(void)
__asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
}
printf("SDR1:\t%#lx\n", sdr1);
printf("SDR1:\t0x%lx\n", (long) sdr1);
#ifndef PPC_OEA64
printf("SR[]:\t");
for (i=0; i<4; i++)
printf("0x%08lx, ", soft_sr[i]);
@ -2393,6 +2445,7 @@ pmap_print_mmuregs(void)
for ( ; i<16; i++)
printf("0x%08lx, ", soft_sr[i]);
printf("\n");
#endif
printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
for (i=0; i<4; i++) {
@ -3012,6 +3065,7 @@ pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
/*
* Initialize kernel pmap and hardware.
*/
#ifndef PPC_OEA64
for (i = 0; i < 16; i++) {
pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT;
__asm __volatile ("mtsrin %0,%1"
@ -3033,7 +3087,8 @@ pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
:: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
}
}
#endif /* !PPC_OEA64 */
__asm __volatile ("sync; mtsdr1 %0; isync"
:: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
tlbia();

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.95 2003/11/06 08:49:13 he Exp $ */
/* $NetBSD: trap.c,v 1.96 2003/11/21 22:57:14 matt Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.95 2003/11/06 08:49:13 he Exp $");
__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.96 2003/11/21 22:57:14 matt Exp $");
#include "opt_altivec.h"
#include "opt_ddb.h"
@ -131,6 +131,16 @@ trap(struct trapframe *frame)
va |= pcb->pcb_umapsr << ADDR_SR_SHFT;
map = &p->p_vmspace->vm_map;
/* KERNEL_PROC_LOCK(l); */
#ifdef PPC_OEA64
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map),
trunc_page(va), FALSE)) {
/* KERNEL_PROC_UNLOCK(l); */
KERNEL_UNLOCK();
return;
}
#endif
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_evictions > 0 &&
@ -209,6 +219,16 @@ trap(struct trapframe *frame)
* has some evicted pte's.
*/
map = &p->p_vmspace->vm_map;
#ifdef PPC_OEA64
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map), trunc_page(frame->dar),
FALSE)) {
KERNEL_PROC_UNLOCK(l);
break;
}
#endif
if ((frame->dsisr & DSISR_NOTFOUND) &&
vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->dar),
@ -276,7 +296,17 @@ trap(struct trapframe *frame)
* has some evicted pte's.
*/
map = &p->p_vmspace->vm_map;
if (pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0),
#ifdef PPC_OEA64
if (vm_map_pmap(map)->pm_ste_evictions > 0 &&
pmap_ste_spill(vm_map_pmap(map), trunc_page(frame->srr0),
TRUE)) {
KERNEL_PROC_UNLOCK(l);
break;
}
#endif
if (vm_map_pmap(map)->pm_evictions > 0 &&
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0),
TRUE)) {
KERNEL_PROC_UNLOCK(l);
break;