Add a workaround to handle virtual alias which may cause data corruption

on R5000/Rm52xx machines:
- Add a new global variable mips_cache_virtual_alias in mips/cache.c,
  which indicates that VIPT cache on the CPU could cause virtual alias
  and software support is required to handle it. (i.e. no VCED/VCEI)
- Add several cache flush/invalidate ops around KSEG0 access which
  might cause virtual alias if mips_cache_virtual_alias is true.
  (note checking mips_sdcache_line_size isn't valid for R5000/Rm52xx
   because only R4000/R4400 with L2 cache have VCED/VCEI)
- Remove a global variable mips_sdcache_forceinv, which is now superseded
  by new mips_cache_virtual_alias.

While here, also change some R4000/R4400 cache ops:
- Don't override mips_cache_alias_mask and mips_cache_prefer_mask with
  values based on MIPS3_MAX_PCACHE_SIZE for R4000/R4400 with L2 cache
  because it's still worth to reduce VCED/VCEI.
- Flush dcache in pmap_zero_page(9) unconditionally on all MIPS_HAS_R4K_MMU
  CPUs and remove cache flush code from cpu_lwp_fork() in vm_machdep.c.

Thanks to Markus W Kilbinger for testing patches on port-cobalt/port-mips.


XXX This fix is just a workaround because it doesn't handle all possible
XXX virtual aliases. As discussed on port-mips, maybe the real fix
XXX for virtual alias is to change MI UVM to adapt it to VIPT cache.
XXX (all VA mappings against the same PA must have the same VAC index etc.)
This commit is contained in:
tsutsui 2005-03-26 09:51:02 +00:00
parent 5067941219
commit a8d2e55834
6 changed files with 135 additions and 44 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.h,v 1.7 2005/03/01 04:23:44 sekiya Exp $ */
/* $NetBSD: cache.h,v 1.8 2005/03/26 09:51:02 tsutsui Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -185,8 +185,6 @@ extern int mips_sdcache_write_through;
extern int mips_scache_unified;
extern u_int mips_sdcache_forceinv; /* force pmap to invalidate for r5ksc */
/* TERTIARY CACHE VARIABLES */
extern u_int mips_tcache_size; /* always unified */
extern u_int mips_tcache_line_size;
@ -201,6 +199,8 @@ extern u_int mips_dcache_align_mask;
extern u_int mips_cache_alias_mask;
extern u_int mips_cache_prefer_mask;
extern int mips_cache_virtual_alias;
/*
* XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.46 2005/01/17 04:54:14 atatat Exp $ */
/* $NetBSD: pmap.h,v 1.47 2005/03/26 09:51:02 tsutsui Exp $ */
/*
* Copyright (c) 1992, 1993
@ -180,8 +180,10 @@ void pmap_prefer(vaddr_t, vaddr_t *, int);
/*
* Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
*/
#define PMAP_MAP_POOLPAGE(pa) MIPS_PHYS_TO_KSEG0((pa))
#define PMAP_UNMAP_POOLPAGE(va) MIPS_KSEG0_TO_PHYS((va))
vaddr_t mips_pmap_map_poolpage(paddr_t);
paddr_t mips_pmap_unmap_poolpage(vaddr_t);
#define PMAP_MAP_POOLPAGE(pa) mips_pmap_map_poolpage(pa)
#define PMAP_UNMAP_POOLPAGE(va) mips_pmap_unmap_poolpage(va)
/*
* Other hooks for the pool allocator.

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.c,v 1.26 2005/03/01 04:23:44 sekiya Exp $ */
/* $NetBSD: cache.c,v 1.27 2005/03/26 09:51:02 tsutsui Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.26 2005/03/01 04:23:44 sekiya Exp $");
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.27 2005/03/26 09:51:02 tsutsui Exp $");
#include "opt_cputype.h"
#include "opt_mips_cache.h"
@ -129,8 +129,6 @@ int mips_sdcache_write_through;
int mips_scache_unified;
u_int mips_sdcache_forceinv = 0;
/* TERTIARY CACHE VARIABLES */
u_int mips_tcache_size; /* always unified */
u_int mips_tcache_line_size;
@ -154,6 +152,8 @@ u_int mips_dcache_align_mask;
u_int mips_cache_alias_mask; /* for virtually-indexed caches */
u_int mips_cache_prefer_mask;
int mips_cache_virtual_alias;
struct mips_cache_ops mips_cache_ops;
#ifdef MIPS1
@ -421,6 +421,11 @@ mips_config_cache_prehistoric(void)
mips3_get_cache_config(csizebase);
if (mips_picache_size > PAGE_SIZE ||
mips_pdcache_size > PAGE_SIZE)
/* no VCE support if there is no L2 cache */
mips_cache_virtual_alias = 1;
switch (mips_picache_line_size) {
case 16:
mips_cache_ops.mco_icache_sync_all =
@ -494,6 +499,10 @@ primary_cache_is_2way:
mips3_get_cache_config(csizebase);
if (mips_picache_size > PAGE_SIZE ||
mips_pdcache_size > PAGE_SIZE)
mips_cache_virtual_alias = 1;
switch (mips_picache_line_size) {
case 32:
mips_cache_ops.mco_icache_sync_all =
@ -590,6 +599,7 @@ primary_cache_is_2way:
~(PAGE_SIZE - 1);
mips_cache_prefer_mask =
max(mips_pdcache_size, mips_picache_size) - 1;
mips_cache_virtual_alias = 1;
/* cache ops */
mips_cache_ops.mco_icache_sync_all =
r5900_icache_sync_all_64;
@ -619,6 +629,8 @@ primary_cache_is_2way:
mips4_get_cache_config(csizebase);
/* VCE is handled by hardware */
mips_cache_ops.mco_icache_sync_all =
r10k_icache_sync_all;
mips_cache_ops.mco_icache_sync_range =
@ -669,14 +681,22 @@ primary_cache_is_2way:
switch (MIPS_PRID_IMPL(cpu_id)) {
#if defined(MIPS3) || defined(MIPS4)
case MIPS_R4000:
#if 0
/*
* R4000/R4400 always detects virtual alias as if
* primary cache size is 32KB. Actual primary cache size
* is ignored wrt VCED/VCEI.
*/
/*
* XXX
* It's still better to avoid virtual alias even with VCE,
* isn't it?
*/
mips_cache_alias_mask =
(MIPS3_MAX_PCACHE_SIZE - 1) & ~(PAGE_SIZE - 1);
mips_cache_prefer_mask = MIPS3_MAX_PCACHE_SIZE - 1;
#endif
mips_cache_virtual_alias = 0;
/* FALLTHROUGH */
case MIPS_R4600:
#ifdef ENABLE_MIPS_R4700

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.29 2003/08/07 16:28:33 agc Exp $ */
/* $NetBSD: mem.c,v 1.30 2005/03/26 09:51:02 tsutsui Exp $ */
/*
* Copyright (c) 1982, 1986, 1990, 1993
@ -76,8 +76,11 @@
* Memory special file
*/
#include "opt_cputype.h"
#include "opt_mips_cache.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.29 2003/08/07 16:28:33 agc Exp $");
__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.30 2005/03/26 09:51:02 tsutsui Exp $");
#include <sys/param.h>
#include <sys/conf.h>
@ -89,6 +92,8 @@ __KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.29 2003/08/07 16:28:33 agc Exp $");
#include <machine/cpu.h>
#include <mips/cache.h>
#include <uvm/uvm_extern.h>
extern paddr_t avail_end;
@ -142,6 +147,10 @@ mmrw(dev, uio, flags)
return (EFAULT);
v += MIPS_KSEG0_START;
error = uiomove((void *)v, c, uio);
#if defined(MIPS3_PLUS)
if (mips_cache_virtual_alias)
mips_dcache_wbinv_range(v, c);
#endif
continue;
case DEV_KMEM:
@ -156,6 +165,10 @@ mmrw(dev, uio, flags)
uio->uio_rw == UIO_READ ? B_READ : B_WRITE)))
return (EFAULT);
error = uiomove((void *)v, c, uio);
#if defined(MIPS3_PLUS)
if (mips_cache_virtual_alias)
mips_dcache_wbinv_range(v, c);
#endif
continue;
case DEV_NULL:

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.157 2005/03/01 04:23:44 sekiya Exp $ */
/* $NetBSD: pmap.c,v 1.158 2005/03/26 09:51:02 tsutsui Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@ -74,7 +74,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.157 2005/03/01 04:23:44 sekiya Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.158 2005/03/26 09:51:02 tsutsui Exp $");
/*
* Manages physical address maps.
@ -641,10 +641,10 @@ pmap_destroy(pmap)
* were being accessed by KSEG0 (cached) addresses and
* may cause cache coherency problems when the page
* is reused with KSEG2 (mapped) addresses. This may
* cause problems on machines without secondary caches.
* cause problems on machines without VCED/VCEI.
*/
if (MIPS_HAS_R4K_MMU)
mips_dcache_wbinv_range((vaddr_t) pte,
if (mips_cache_virtual_alias)
mips_dcache_inv_range((vaddr_t)pte,
PAGE_SIZE);
#endif /* MIPS3_PLUS */
uvm_pagefree(PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS(pte)));
@ -1577,6 +1577,11 @@ void
pmap_zero_page(phys)
paddr_t phys;
{
vaddr_t va;
#if defined(MIPS3_PLUS)
pv_entry_t pv;
#endif
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_zero_page(%lx)\n", (u_long)phys);
@ -1585,8 +1590,18 @@ pmap_zero_page(phys)
if (! (phys < MIPS_MAX_MEM_ADDR))
printf("pmap_zero_page(%lx) nonphys\n", (u_long)phys);
#endif
va = MIPS_PHYS_TO_KSEG0(phys);
mips_pagezero((caddr_t)MIPS_PHYS_TO_KSEG0(phys));
#if defined(MIPS3_PLUS) /* XXX mmu XXX */
if (mips_cache_virtual_alias) {
pv = pa_to_pvh(phys);
if ((pv->pv_flags & PV_UNCACHED) == 0 &&
mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
}
#endif
mips_pagezero((caddr_t)va);
#if defined(MIPS3_PLUS) /* XXX mmu XXX */
/*
@ -1598,9 +1613,8 @@ pmap_zero_page(phys)
*
* XXXJRT This is totally disgusting.
*/
if (MIPS_HAS_R4K_MMU &&
( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv) ) )
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(phys), NBPG);
if (MIPS_HAS_R4K_MMU) /* XXX VCED on kernel stack is not allowed */
mips_dcache_wbinv_range(va, PAGE_SIZE);
#endif /* MIPS3_PLUS */
}
@ -1636,11 +1650,12 @@ pmap_copy_page(src, dst)
* It would probably be better to map the destination as a
* write-through no allocate to reduce cache thrash.
*/
if (MIPS_HAS_R4K_MMU &&
( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv)) ) {
if (mips_cache_virtual_alias) {
/*XXX FIXME Not very sophisticated */
mips_flushcache_allpvh(src);
/* mips_flushcache_allpvh(dst); */
#if 0
mips_flushcache_allpvh(dst);
#endif
}
#endif /* MIPS3_PLUS */
@ -1659,10 +1674,9 @@ pmap_copy_page(src, dst)
*
* XXXJRT -- This is totally disgusting.
*/
if (MIPS_HAS_R4K_MMU &&
( (mips_sdcache_line_size == 0) || (mips_sdcache_forceinv)) ) {
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(src), NBPG);
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(dst), NBPG);
if (mips_cache_virtual_alias) {
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(src), PAGE_SIZE);
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(dst), PAGE_SIZE);
}
#endif /* MIPS3_PLUS */
}
@ -1891,7 +1905,7 @@ again:
pv->pv_next = NULL;
} else {
#if defined(MIPS3_PLUS) /* XXX mmu XXX */
if (MIPS_HAS_R4K_MMU && mips_sdcache_line_size == 0) {
if (mips_cache_virtual_alias) {
/*
* There is at least one other VA mapping this page.
* Check if they are cache index compatible.
@ -2103,12 +2117,27 @@ void *
pmap_pv_page_alloc(struct pool *pp, int flags)
{
struct vm_page *pg;
paddr_t phys;
#if defined(MIPS3_PLUS)
pv_entry_t pv;
#endif
vaddr_t va;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL) {
return NULL;
}
return ((void *)MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)));
phys = VM_PAGE_TO_PHYS(pg);
va = MIPS_PHYS_TO_KSEG0(phys);
#if defined(MIPS3_PLUS)
if (mips_cache_virtual_alias) {
pv = pa_to_pvh(phys);
if ((pv->pv_flags & PV_UNCACHED) == 0 &&
mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
}
#endif
return ((void *)va);
}
/*
@ -2119,6 +2148,11 @@ pmap_pv_page_alloc(struct pool *pp, int flags)
void
pmap_pv_page_free(struct pool *pp, void *v)
{
#ifdef MIPS3_PLUS
if (mips_cache_virtual_alias)
mips_dcache_inv_range((vaddr_t)v, PAGE_SIZE);
#endif
uvm_pagefree(PHYS_TO_VM_PAGE(MIPS_KSEG0_TO_PHYS((vaddr_t)v)));
}
@ -2162,6 +2196,40 @@ pmap_prefer(foff, vap, td)
}
#endif /* MIPS3_PLUS */
vaddr_t
mips_pmap_map_poolpage(paddr_t pa)
{
vaddr_t va;
#if defined(MIPS3_PLUS)
pv_entry_t pv;
#endif
va = MIPS_PHYS_TO_KSEG0(pa);
#if defined(MIPS3_PLUS)
if (mips_cache_virtual_alias) {
pv = pa_to_pvh(pa);
if ((pv->pv_flags & PV_UNCACHED) == 0 &&
mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
}
#endif
return va;
}
paddr_t
mips_pmap_unmap_poolpage(vaddr_t va)
{
paddr_t pa;
pa = MIPS_KSEG0_TO_PHYS(va);
#if defined(MIPS3_PLUS)
if (mips_cache_virtual_alias) {
mips_dcache_inv_range(va, PAGE_SIZE);
}
#endif
return pa;
}
/******************** page table page management ********************/
/* TO BE DONE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.105 2005/01/01 03:25:46 simonb Exp $ */
/* $NetBSD: vm_machdep.c,v 1.106 2005/03/26 09:51:02 tsutsui Exp $ */
/*
* Copyright (c) 1992, 1993
@ -79,7 +79,7 @@
#include "opt_ddb.h"
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.105 2005/01/01 03:25:46 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.106 2005/03/26 09:51:02 tsutsui Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -131,18 +131,6 @@ cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
pt_entry_t *pte;
int i, x;
#ifdef MIPS3_PLUS
/*
* To eliminate virtual aliases created by pmap_zero_page(),
* this cache flush operation is necessary.
* VCED on kernel stack is not allowed.
* XXXJRT Confirm that this is necessry, and/or fix
* XXXJRT pmap_zero_page().
*/
if (CPUISMIPS3 && mips_sdcache_line_size)
mips_dcache_wbinv_range((vaddr_t) l2->l_addr, USPACE);
#endif
#ifdef DIAGNOSTIC
/*
* If l1 != curlwp && l1 == &lwp0, we're creating a kernel thread.