* convert ibm4xx-based evbppc from reserved-TLB entry allocation to recently

introduced ppc4xx_tlb_reserve() API.
* ibm405gp UART0 used to be linear mapped. The VA happens to be inside kernel
  segment, giving us the possibility of multiple VA matches in the TLB. This
  is considered "programming error" by 405 core and results in "undefined
  behaviour". We now avoid mapping peripherals in kernel segment.
* Some boards used to map hardwired RAM size. We now use the real size as
  passed in by boot firmware.
This commit is contained in:
kiyohara 2006-10-16 18:14:37 +00:00
parent 471efec80f
commit d7e6f0e206
3 changed files with 19 additions and 104 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.29 2006/07/13 07:36:04 simonb Exp $ */
/* $NetBSD: machdep.c,v 1.30 2006/10/16 18:14:37 kiyohara Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.29 2006/07/13 07:36:04 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.30 2006/10/16 18:14:37 kiyohara Exp $");
#include "opt_compat_netbsd.h"
#include "opt_ddb.h"
@ -115,6 +115,9 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.29 2006/07/13 07:36:04 simonb Exp $");
#include <ddb/db_extern.h>
#endif
#define TLB_PG_SIZE (16*1024*1024)
/*
* Global variables used here and there
*/
@ -171,6 +174,7 @@ initppc(u_int startkernel, u_int endkernel, char *args, void *info_block)
#ifdef IPKDB
extern int ipkdblow, ipkdbsize;
#endif
vaddr_t va;
int exc, dbcr0;
struct cpu_info * const ci = curcpu();
@ -191,6 +195,13 @@ initppc(u_int startkernel, u_int endkernel, char *args, void *info_block)
availmemr[0].start = startkernel;
availmemr[0].size = board_data.mem_size - availmemr[0].start;
/* Linear map whole physmem */
for (va = 0; va < board_data.mem_size; va += TLB_PG_SIZE)
ppc4xx_tlb_reserve(va, va, TLB_PG_SIZE, TLB_EX);
/* Map console just after RAM */
ppc4xx_tlb_reserve(0xef000000, va, TLB_PG_SIZE, TLB_I | TLB_G);
/*
* Initialize lwp0 and current pcb and pmap pointers.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: walnut_start.S,v 1.13 2006/06/30 17:54:51 freza Exp $ */
/* $NetBSD: walnut_start.S,v 1.14 2006/10/16 18:14:37 kiyohara Exp $ */
/* $OpenBSD: locore.S,v 1.4 1997/01/26 09:06:38 rahnds Exp $ */
/*
@ -192,59 +192,11 @@ __start_cpu0:
stw %r8,0(%r7)
#endif
/*
* Set up TLB entry to cover kernel addresses.
*
* XXX: Skip TLB 0 for now, due to unresolved TLB 0 replacement
* and hard hangs
*/
li %r0,1
/* Set kernel MMU context. */
li %r0,KERNEL_PID
mtpid %r0
sync
li %r0,0
#ifdef PPC_4XX_NOCACHE
li %r4,TLB_EX|TLB_WR|TLB_I /* |TLB_W */
#else
li %r4,TLB_EX|TLB_WR /* |TLB_W */
#endif
li %r3,TLB_VALID|TLB_PG_16M
tlbwe %r4,%r0,1 /* Load the data(Low) portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag(High) portion of the entry */
#if 1
/* Damn. Have to be able to access all real memory.... Hardcode for 32M for now. */
li %r0,1
lis %r4,0x01000000@h
ori %r3,%r4,0
#ifdef PPC_4XX_NOCACHE
addi %r4,%r4,TLB_EX|TLB_WR|TLB_I /* |TLB_W */
#else
addi %r4,%r4,TLB_EX|TLB_WR /* |TLB_W */
#endif
addi %r3,%r3,TLB_VALID|TLB_PG_16M
tlbwe %r4,%r0,1 /* Load the data(Low) portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag(High) portion of the entry */
#endif
/* set up a TLB mapping to cover uart0 */
lis %r3,0xef000000@h /* Load the virtual address */
ori %r4,%r3,0 /* Load the physical address */
clrrwi %r4,%r4,10 /* Mask off the real page number */
/* write, execute, cache inhibit, guarded */
ori %r4,%r4,(TLB_WR|TLB_EX|TLB_I|TLB_G)
clrrwi %r3,%r3,10 /* Mask off the effective page number */
ori %r3,%r3,(TLB_VALID|TLB_PG_16M)
li %r0,2
tlbwe %r4,%r0,1 /* Load the data portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag portion of the entry */
/* END of TLB setup */
INIT_CPUINFO(8,1,9,0)
mr %r4,%r8

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.2 2005/12/11 12:18:43 christos Exp $ */
/* $NetBSD: locore.S,v 1.3 2006/10/16 18:14:37 kiyohara Exp $ */
/* $OpenBSD: locore.S,v 1.4 1997/01/26 09:06:38 rahnds Exp $ */
/*
@ -210,59 +210,11 @@ __start_cpu0:
stw %r8,0(%r7)
#endif
/*
* Set up TLB entry to cover kernel addresses.
*
* XXX: Skip TLB 0 for now, due to unresolved TLB 0 replacement
* and hard hangs
*/
li %r0,1
/* Set kernel MMU context. */
li %r0,KERNEL_PID
mtpid %r0
sync
li %r0,0
#ifdef PPC_4XX_NOCACHE
li %r4,TLB_EX|TLB_WR|TLB_I /* |TLB_W */
#else
li %r4,TLB_EX|TLB_WR /* |TLB_W */
#endif
li %r3,TLB_VALID|TLB_PG_16M
tlbwe %r4,%r0,1 /* Load the data(Low) portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag(High) portion of the entry */
#if 1
/* Damn. Have to be able to access all real memory.... Hardcode for 32M for now. */
li %r0,1
lis %r4,0x01000000@h
ori %r3,%r4,0
#ifdef PPC_4XX_NOCACHE
addi %r4,%r4,TLB_EX|TLB_WR|TLB_I /* |TLB_W */
#else
addi %r4,%r4,TLB_EX|TLB_WR /* |TLB_W */
#endif
addi %r3,%r3,TLB_VALID|TLB_PG_16M
tlbwe %r4,%r0,1 /* Load the data(Low) portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag(High) portion of the entry */
#endif
/* set up a TLB mapping to cover uart0 */
lis %r3,0xef000000@h /* Load the virtual address */
ori %r4,%r3,0 /* Load the physical address */
clrrwi %r4,%r4,10 /* Mask off the real page number */
/* write, execute, cache inhibit, guarded */
ori %r4,%r4,(TLB_WR|TLB_EX|TLB_I|TLB_G)
clrrwi %r3,%r3,10 /* Mask off the effective page number */
ori %r3,%r3,(TLB_VALID|TLB_PG_16M)
li %r0,2
tlbwe %r4,%r0,1 /* Load the data portion of the entry */
tlbwe %r3,%r0,0 /* Load the tag portion of the entry */
/* END of TLB setup */
INIT_CPUINFO(8,1,9,0)
mr %r4,%r8