Implement r5k indexed writeback-invalidate, and fix usage of Page_Invalidate_S.

Originally written by rafal@ back in April 2003.  Field-tested by many
people since.

(I am not committing the pmap hack at this time; although pmap changes are
necessary to fully address the r5k panic/coma problems, the implementation
needs further thought)
This commit is contained in:
sekiya 2004-12-13 08:39:21 +00:00
parent d1eab478d8
commit 07b6e1e982
2 changed files with 34 additions and 14 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.c,v 1.24 2003/12/21 07:59:25 nisimura Exp $ */
/* $NetBSD: cache.c,v 1.25 2004/12/13 08:39:21 sekiya Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.24 2003/12/21 07:59:25 nisimura Exp $");
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.25 2004/12/13 08:39:21 sekiya Exp $");
#include "opt_cputype.h"
#include "opt_mips_cache.h"
@ -738,12 +738,13 @@ primary_cache_is_2way:
case MIPS_R5000:
#endif
case MIPS_RM5200:
mips_sdcache_write_through = 1;
mips_cache_ops.mco_sdcache_wbinv_all =
r5k_sdcache_wbinv_all;
mips_cache_ops.mco_sdcache_wbinv_range =
r5k_sdcache_wbinv_range;
mips_cache_ops.mco_sdcache_wbinv_range_index =
r5k_sdcache_wbinv_rangeall; /* XXX? */
r5k_sdcache_wbinv_range_index;
mips_cache_ops.mco_sdcache_inv_range =
r5k_sdcache_wbinv_range;
mips_cache_ops.mco_sdcache_wb_range =

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache_r5k.c,v 1.9 2003/07/15 02:43:37 lukem Exp $ */
/* $NetBSD: cache_r5k.c,v 1.10 2004/12/13 08:39:21 sekiya Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cache_r5k.c,v 1.9 2003/07/15 02:43:37 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: cache_r5k.c,v 1.10 2004/12/13 08:39:21 sekiya Exp $");
#include <sys/param.h>
@ -603,20 +603,22 @@ __asm(".set mips3");
void
r5k_sdcache_wbinv_all(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_sdcache_size;
while (va < eva) {
cache_op_r4k_line(va, R5K_Page_Invalidate_S);
va += (128 * 32);
}
r5k_sdcache_wbinv_range(MIPS_PHYS_TO_KSEG0(0), mips_sdcache_size);
}
/* XXX: want wbinv_range_index here instead? */
void
r5k_sdcache_wbinv_rangeall(vaddr_t va, vsize_t size)
r5k_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
{
r5k_sdcache_wbinv_all();
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
r5k_sdcache_wbinv_range(va, size);
}
#define round_page(x) (((x) + (128 * 32 - 1)) & ~(128 * 32 - 1))
@ -625,13 +627,30 @@ r5k_sdcache_wbinv_rangeall(vaddr_t va, vsize_t size)
void
r5k_sdcache_wbinv_range(vaddr_t va, vsize_t size)
{
uint32_t ostatus, taglo;
vaddr_t eva = round_page(va + size);
va = trunc_page(va);
__asm __volatile(
".set noreorder \n\t"
".set noat \n\t"
"mfc0 %0, $12 \n\t"
"mtc0 $0, $12 \n\t"
".set reorder \n\t"
".set at"
: "=r"(ostatus));
__asm __volatile("mfc0 %0, $28" : "=r"(taglo));
__asm __volatile("mtc0 $0, $28");
while (va < eva) {
cache_op_r4k_line(va, R5K_Page_Invalidate_S);
va += (128 * 32);
}
__asm __volatile("mtc0 %0, $12; nop" :: "r"(ostatus));
__asm __volatile("mtc0 %0, $28; nop" :: "r"(taglo));
}
void