Merge the thorpej-mips-cache branch onto the trunk. This is an

overhaul of how caches are handled for NetBSD's MIPS ports.
This commit is contained in:
thorpej 2001-11-14 18:26:21 +00:00
parent af66038f73
commit bd15cfaed8
14 changed files with 3366 additions and 21 deletions

View File

@ -0,0 +1,235 @@
/* $NetBSD: cache.h,v 1.2 2001/11/14 18:26:21 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cache operations.
*
* We define the following primitives:
*
* --- Instruction cache synchronization (mandatory):
*
* icache_sync_all Synchronize I-cache
*
* icache_sync_range Synchronize I-cache range
*
* icache_sync_range_index (index ops)
*
* --- Primary data cache (mandatory):
*
* pdcache_wbinv_all Write-back Invalidate primary D-cache
*
* pdcache_wbinv_range Write-back Invalidate primary D-cache range
*
* pdcache_wbinv_range_index (index ops)
*
* pdcache_inv_range Invalidate primary D-cache range
*
* pdcache_wb_range Write-back primary D-cache range
*
* --- Secondary data cache (optional):
*
* sdcache_wbinv_all Write-back Invalidate secondary D-cache
*
* sdcache_wbinv_range Write-back Invalidate secondary D-cache range
*
* sdcache_wbinv_range_index (index ops)
*
* sdcache_inv_range Invalidate secondary D-cache range
*
* sdcache_wb_range Write-back secondary D-cache range
*
* There are some rules that must be followed:
*
* I-cache Synch (all or range):
* The goal is to synchronize the instruction stream,
* so you may need to write-back dirty data cache
* blocks first. If a range is requested, and you
* can't synchronize just a range, you have to hit
* the whole thing.
*
* D-cache Write-back Invalidate range:
* If you can't WB-Inv a range, you must WB-Inv the
* entire D-cache.
*
* D-cache Invalidate:
* If you can't Inv the D-cache without doing a
* Write-back, YOU MUST PANIC. This is to catch
* errors in calling code. Callers must be aware
* of this scenario, and must handle it appropriately
* (consider the bus_dma(9) operations).
*
* D-cache Write-back:
* If you can't Write-back without doing an invalidate,
* that's fine. Then treat this as a WB-Inv. Skipping
* the invalidate is merely an optimization.
*
* All operations:
* Valid virtual addresses must be passed to the
* cache operation.
*
* Finally, these primitives are grouped together in reasonable
* ways. For all operations described here, first the primary
* cache is frobbed, then the secondary cache frobbed, if the
* operation for the secondary cache exists.
*
* mips_icache_sync_all Synchronize I-cache
*
* mips_icache_sync_range Synchronize I-cache range
*
* mips_icache_sync_range_index (index ops)
*
* mips_dcache_wbinv_all Write-back Invalidate D-cache
*
* mips_dcache_wbinv_range Write-back Invalidate D-cache range
*
* mips_dcache_wbinv_range_index (index ops)
*
* mips_dcache_inv_range Invalidate D-cache range
*
* mips_dcache_wb_range Write-back D-cache range
*/
struct mips_cache_ops {
void (*mco_icache_sync_all)(void);
void (*mco_icache_sync_range)(vaddr_t, vsize_t);
void (*mco_icache_sync_range_index)(vaddr_t, vsize_t);
void (*mco_pdcache_wbinv_all)(void);
void (*mco_pdcache_wbinv_range)(vaddr_t, vsize_t);
void (*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
void (*mco_pdcache_inv_range)(vaddr_t, vsize_t);
void (*mco_pdcache_wb_range)(vaddr_t, vsize_t);
void (*mco_sdcache_wbinv_all)(void);
void (*mco_sdcache_wbinv_range)(vaddr_t, vsize_t);
void (*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
void (*mco_sdcache_inv_range)(vaddr_t, vsize_t);
void (*mco_sdcache_wb_range)(vaddr_t, vsize_t);
};
#ifdef _KERNEL
extern struct mips_cache_ops mips_cache_ops;
/* PRIMARY CACHE VARIABLES */
extern int mips_picache_size;
extern int mips_picache_line_size;
extern int mips_picache_ways;
extern int mips_picache_way_size;
extern int mips_picache_way_mask;
extern int mips_pdcache_size; /* and unified */
extern int mips_pdcache_line_size;
extern int mips_pdcache_ways;
extern int mips_pdcache_way_size;
extern int mips_pdcache_way_mask;
extern int mips_pdcache_write_through;
extern int mips_pcache_unified;
/* SECONDARY CACHE VARIABLES */
extern int mips_sicache_size;
extern int mips_sicache_line_size;
extern int mips_sicache_ways;
extern int mips_sicache_way_size;
extern int mips_sicache_way_mask;
extern int mips_sdcache_size; /* and unified */
extern int mips_sdcache_line_size;
extern int mips_sdcache_ways;
extern int mips_sdcache_way_size;
extern int mips_sdcache_way_mask;
extern int mips_sdcache_write_through;
extern int mips_scache_unified;
/* TERTIARY CACHE VARIABLES */
extern int mips_tcache_size; /* always unified */
extern int mips_tcache_line_size;
extern int mips_tcache_ways;
extern int mips_tcache_way_size;
extern int mips_tcache_way_mask;
extern int mips_tcache_write_through;
extern int mips_cache_alias_mask;
extern int mips_cache_prefer_mask;
/*
* XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
*/
#define mips_cache_indexof(x) (((vaddr_t)(x)) & mips_cache_alias_mask)
#define __mco_noargs(x) \
do { \
(*mips_cache_ops.mco_p ## x )(); \
if (*mips_cache_ops.mco_s ## x ) \
(*mips_cache_ops.mco_s ## x )(); \
} while (/*CONSTCOND*/0)
#define __mco_2args(x, a, b) \
do { \
(*mips_cache_ops.mco_p ## x )((a), (b)); \
if (*mips_cache_ops.mco_s ## x ) \
(*mips_cache_ops.mco_s ## x )((a), (b)); \
} while (/*CONSTCOND*/0)
#define mips_icache_sync_all() \
(*mips_cache_ops.mco_icache_sync_all)()
#define mips_icache_sync_range(v, s) \
(*mips_cache_ops.mco_icache_sync_range)((v), (s))
#define mips_icache_sync_range_index(v, s) \
(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
#define mips_dcache_wbinv_all() \
__mco_noargs(dcache_wbinv_all)
#define mips_dcache_wbinv_range(v, s) \
__mco_2args(dcache_wbinv_range, (v), (s))
#define mips_dcache_wbinv_range_index(v, s) \
__mco_2args(dcache_wbinv_range_index, (v), (s))
#define mips_dcache_inv_range(v, s) \
__mco_2args(dcache_inv_range, (v), (s))
#define mips_dcache_wb_range(v, s) \
__mco_2args(dcache_wb_range, (v), (s))
void mips_config_cache(void);
#endif /* _KERNEL */

View File

@ -1,11 +1,10 @@
/* $NetBSD: locore.h,v 1.1 2001/10/16 16:31:35 uch Exp $ */
/* $NetBSD: cache_r3k.h,v 1.2 2001/11/14 18:26:21 thorpej Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -17,16 +16,16 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
@ -36,13 +35,24 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LOCORE
extern void r5900_init(void);
/*
* Cache definitions/operations for R3000-style caches.
*/
extern void r5900_FlushCache(void);
extern void r5900_FlushDCache(vaddr_t, vaddr_t);
extern void r5900_FlushICache(vaddr_t, vaddr_t);
extern void r5900_HitFlushDCache(vaddr_t, vsize_t);
extern void r5900_InvalidateDCache(vaddr_t, vsize_t);
#endif /* !_LOCORE */
#ifdef _KERNEL
void r3k_icache_sync_all(void);
void r3k_icache_sync_range(vaddr_t, vsize_t);
void r3k_pdcache_wbinv_all(void);
void r3k_pdcache_inv_range(vaddr_t, vsize_t);
void r3k_pdcache_wb_range(vaddr_t, vsize_t);
void r3k_picache_do_inv(vaddr_t, vaddr_t);
void r3k_pdcache_do_inv(vaddr_t, vaddr_t);
int r3k_picache_size(void);
int r3k_pdcache_size(void);
#endif /* _KERNEL */

View File

@ -0,0 +1,214 @@
/* $NetBSD: cache_r4k.h,v 1.2 2001/11/14 18:26:21 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cache definitions/operations for R4000-style caches.
*/
#define CACHE_R4K_I 0
#define CACHE_R4K_D 1
#define CACHE_R4K_SI 2
#define CACHE_R4K_SD 3
#define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
#define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
#define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
#define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
#define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
#define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
#define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
#define CACHEOP_R4K_FILL (5 << 2) /* I */
#define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
#define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
#ifdef _KERNEL
/*
* cache_r4k_op_line:
*
* Perform the specified cache operation on a single line.
*/
#define cache_op_r4k_line(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"cache %1, 0(%0) \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
/*
* cache_r4k_op_32lines_16:
*
* Perform the specified cache operation on 32 16-byte
* cache lines.
*/
#define cache_r4k_op_32lines_16(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \
"cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \
"cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \
"cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \
"cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \
"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \
"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \
"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \
"cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \
"cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \
"cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \
"cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \
"cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \
"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \
"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \
"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
/*
* cache_r4k_op_32lines_32:
*
* Perform the specified cache operation on 32 32-byte
* cache lines.
*/
#define cache_r4k_op_32lines_32(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \
"cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \
"cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \
"cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \
"cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \
"cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \
"cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \
"cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \
"cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \
"cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \
"cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \
"cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \
"cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \
"cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \
"cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \
"cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
/*
* cache_r4k_op_16lines_32_2way:
*
* Perform the specified cache operation on 16 32-byte
* cache lines, 2-ways.
*/
#define cache_r4k_op_16lines_32_2way(va1, va2, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
"cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
"cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
"cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
"cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
"cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \
"cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \
"cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \
"cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \
"cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \
"cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \
"cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \
"cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \
".set reorder" \
: \
: "r" (va1), "r" (va2), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
void r4k_icache_sync_all_16(void);
void r4k_icache_sync_range_16(vaddr_t, vsize_t);
void r4k_icache_sync_range_index_16(vaddr_t, vsize_t);
void r4k_pdcache_wbinv_all_16(void);
void r4k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
void r4k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
void r4k_pdcache_inv_range_16(vaddr_t, vsize_t);
void r4k_pdcache_wb_range_16(vaddr_t, vsize_t);
void r5k_icache_sync_all_32(void);
void r5k_icache_sync_range_32(vaddr_t, vsize_t);
void r5k_icache_sync_range_index_32(vaddr_t, vsize_t);
void r5k_pdcache_wbinv_all_32(void);
void r4600v1_pdcache_wbinv_range_32(vaddr_t, vsize_t);
void r4600v2_pdcache_wbinv_range_32(vaddr_t, vsize_t);
void r5k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
void r5k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
void r4600v1_pdcache_inv_range_32(vaddr_t, vsize_t);
void r4600v2_pdcache_inv_range_32(vaddr_t, vsize_t);
void r5k_pdcache_inv_range_32(vaddr_t, vsize_t);
void r4600v1_pdcache_wb_range_32(vaddr_t, vsize_t);
void r4600v2_pdcache_wb_range_32(vaddr_t, vsize_t);
void r5k_pdcache_wb_range_32(vaddr_t, vsize_t);
void r4k_sdcache_wbinv_all_32(void);
void r4k_sdcache_wbinv_range_32(vaddr_t, vsize_t);
void r4k_sdcache_wbinv_range_index_32(vaddr_t, vsize_t);
void r4k_sdcache_inv_range_32(vaddr_t, vsize_t);
void r4k_sdcache_wb_range_32(vaddr_t, vsize_t);
void r4k_sdcache_wbinv_all_generic(void);
void r4k_sdcache_wbinv_range_generic(vaddr_t, vsize_t);
void r4k_sdcache_wbinv_range_index_generic(vaddr_t, vsize_t);
void r4k_sdcache_inv_range_generic(vaddr_t, vsize_t);
void r4k_sdcache_wb_range_generic(vaddr_t, vsize_t);
#endif /* _KERNEL */

View File

@ -0,0 +1,147 @@
/* $NetBSD: cache_r5900.h,v 1.2 2001/11/14 18:26:21 thorpej Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define CACHE_R5900_SIZE_I 16384
#define CACHE_R5900_SIZE_D 8192
#define CACHE_R5900_LSIZE_I 64
#define CACHE_R5900_LSIZE_D 64
#define CACHEOP_R5900_IINV_I 0x07 /* INDEX INVALIDATE */
#define CACHEOP_R5900_HINV_I 0x0b /* HIT INVALIDATE */
#define CACHEOP_R5900_IWBINV_D 0x14
/* INDEX WRITE BACK INVALIDATE */
#define CACHEOP_R5900_ILTG_D 0x10 /* INDEX LOAD TAG */
#define CACHEOP_R5900_ISTG_D 0x12 /* INDEX STORE TAG */
#define CACHEOP_R5900_IINV_D 0x16 /* INDEX INVALIDATE */
#define CACHEOP_R5900_HINV_D 0x1a /* HIT INVALIDATE */
#define CACHEOP_R5900_HWBINV_D 0x18 /* HIT WRITEBACK INVALIDATE */
#define CACHEOP_R5900_ILDT_D 0x11 /* INDEX LOAD DATA */
#define CACHEOP_R5900_ISDT_D 0x13 /* INDEX STORE DATA */
#define CACHEOP_R5900_HWB_D 0x1c
/* HIT WRITEBACK W/O INVALIDATE */
#ifdef _KERNEL
#define cache_op_r5900_line_64(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 0(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
#define cache_r5900_op_4lines_64(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 0(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 64(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 128(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 192(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
#define cache_r5900_op_4lines_64_2way(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 0(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 1(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 64(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 65(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 128(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 129(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 192(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
"cache %1, 193(%0) \n\t" \
"sync.l \n\t" \
"sync.p \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
void r5900_icache_sync_all_64(void);
void r5900_icache_sync_range_64(vaddr_t, vsize_t);
void r5900_icache_sync_range_index_64(vaddr_t, vsize_t);
void r5900_pdcache_wbinv_all_64(void);
void r5900_pdcache_wbinv_range_64(vaddr_t, vsize_t);
void r5900_pdcache_wbinv_range_index_64(vaddr_t, vsize_t);
void r5900_pdcache_inv_range_64(vaddr_t, vsize_t);
void r5900_pdcache_wb_range_64(vaddr_t, vsize_t);
#endif /* _KERNEL */

View File

@ -0,0 +1,178 @@
/* $NetBSD: cache_tx39.h,v 1.2 2001/11/14 18:26:22 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi; and by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Cache definitions/operations for TX3900-style caches.
*
* XXX THIS IS NOT YET COMPLETE.
*/
#define CACHE_TX39_I 0
#define CACHE_TX39_D 1
#define CACHEOP_TX3900_INDEX_INV (0 << 2) /* I */
#define CACHEOP_TX3900_ILRUC (1 << 2) /* I, D */
#define CACHEOP_TX3900_ILCKC (2 << 2) /* D */
#define CACHEOP_TX3900_HIT_INV (4 << 2) /* D */
#define CACHEOP_TX3920_INDEX_INV CACHEOP_TX3900_INDEX_INV
#define CACHEOP_TX3920_INDEX_WB_INV (0 << 2) /* D */
#define CACHEOP_TX3920_ILRUC CACHEOP_TX3900_ILRUC
#define CACHEOP_TX3920_INDEX_LOAD_TAG (3 << 2) /* I, D */
#define CACHEOP_TX3920_HIT_INV (4 << 2) /* I, D */
#define CACHEOP_TX3920_HIT_WB_INV (5 << 2) /* D */
#define CACHEOP_TX3920_HIT_WB (6 << 2) /* D */
#define CACHEOP_TX3920_ISTTAG (7 << 2) /* I, D */
#ifdef _KERNEL
#ifndef _LOCORE
/*
* cache_tx39_op_line:
*
* Perform the specified cache operation on a single line.
*/
#define cache_op_tx39_line(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
".set push \n\t" \
".set mips3 \n\t" \
"cache %1, 0(%0) \n\t" \
".set pop \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
/*
* cache_tx39_op_32lines_4:
*
* Perform the specified cache operation on 32 4-byte
* cache lines.
*/
#define cache_tx39_op_32lines_4(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
".set push \n\t" \
".set mips3 \n\t" \
"cache %1, 0x00(%0); cache %1, 0x04(%0); \n\t" \
"cache %1, 0x08(%0); cache %1, 0x0c(%0); \n\t" \
"cache %1, 0x10(%0); cache %1, 0x14(%0); \n\t" \
"cache %1, 0x18(%0); cache %1, 0x1c(%0); \n\t" \
"cache %1, 0x20(%0); cache %1, 0x24(%0); \n\t" \
"cache %1, 0x28(%0); cache %1, 0x2c(%0); \n\t" \
"cache %1, 0x30(%0); cache %1, 0x34(%0); \n\t" \
"cache %1, 0x38(%0); cache %1, 0x3c(%0); \n\t" \
"cache %1, 0x40(%0); cache %1, 0x44(%0); \n\t" \
"cache %1, 0x48(%0); cache %1, 0x4c(%0); \n\t" \
"cache %1, 0x50(%0); cache %1, 0x54(%0); \n\t" \
"cache %1, 0x58(%0); cache %1, 0x5c(%0); \n\t" \
"cache %1, 0x60(%0); cache %1, 0x64(%0); \n\t" \
"cache %1, 0x68(%0); cache %1, 0x6c(%0); \n\t" \
"cache %1, 0x70(%0); cache %1, 0x74(%0); \n\t" \
"cache %1, 0x78(%0); cache %1, 0x7c(%0); \n\t" \
".set pop \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
/*
* cache_tx39_op_32lines_16:
*
* Perform the specified cache operation on 32 16-byte
* cache lines.
*/
#define cache_tx39_op_32lines_16(va, op) \
do { \
__asm __volatile( \
".set noreorder \n\t" \
".set push \n\t" \
".set mips3 \n\t" \
"cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \
"cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \
"cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \
"cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \
"cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \
"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \
"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \
"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \
"cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \
"cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \
"cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \
"cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \
"cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \
"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \
"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \
"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \
".set pop \n\t" \
".set reorder" \
: \
: "r" (va), "i" (op) \
: "memory"); \
} while (/*CONSTCOND*/0)
void tx3900_icache_sync_all_16(void);
void tx3900_icache_sync_range_16(vaddr_t, vsize_t);
void tx3900_pdcache_wbinv_all_4(void);
void tx3900_pdcache_inv_range_4(vaddr_t, vsize_t);
void tx3900_pdcache_wb_range_4(vaddr_t, vsize_t);
void tx3920_icache_sync_all_16wb(void);
void tx3920_icache_sync_range_16wt(vaddr_t, vsize_t);
void tx3920_icache_sync_range_16wb(vaddr_t, vsize_t);
void tx3920_pdcache_wbinv_all_16wt(void);
void tx3920_pdcache_wbinv_all_16wb(void);
void tx3920_pdcache_wbinv_range_16wb(vaddr_t, vsize_t);
void tx3920_pdcache_inv_range_16(vaddr_t, vsize_t);
void tx3920_pdcache_wb_range_16wt(vaddr_t, vsize_t);
void tx3920_pdcache_wb_range_16wb(vaddr_t, vsize_t);
void tx3900_icache_do_inv_index_16(vaddr_t, vsize_t);
void tx3920_icache_do_inv_16(vaddr_t, vsize_t);
#endif /* !_LOCORE */
#endif /* _KERNEL */

583
sys/arch/mips/mips/cache.c Normal file
View File

@ -0,0 +1,583 @@
/* $NetBSD: cache.c,v 1.2 2001/11/14 18:26:22 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_cputype.h"
#include <sys/param.h>
#include <uvm/uvm_extern.h>
#include <mips/cache.h>
#include <mips/locore.h>
#ifdef MIPS1
#include <mips/cache_r3k.h>
#endif
#ifdef MIPS3
#include <mips/cache_r4k.h> /* includes r5k */
#endif
/* PRIMARY CACHE VARIABLES */
int mips_picache_size;
int mips_picache_line_size;
int mips_picache_ways;
int mips_picache_way_size;
int mips_picache_way_mask;
int mips_pdcache_size; /* and unified */
int mips_pdcache_line_size;
int mips_pdcache_ways;
int mips_pdcache_way_size;
int mips_pdcache_way_mask;
int mips_pdcache_write_through;
int mips_pcache_unified;
/* SECONDARY CACHE VARIABLES */
int mips_sicache_size;
int mips_sicache_line_size;
int mips_sicache_ways;
int mips_sicache_way_size;
int mips_sicache_way_mask;
int mips_sdcache_size; /* and unified */
int mips_sdcache_line_size;
int mips_sdcache_ways;
int mips_sdcache_way_size;
int mips_sdcache_way_mask;
int mips_sdcache_write_through;
int mips_scache_unified;
/* TERTIARY CACHE VARIABLES */
int mips_tcache_size; /* always unified */
int mips_tcache_line_size;
int mips_tcache_ways;
int mips_tcache_way_size;
int mips_tcache_way_mask;
int mips_tcache_write_through;
int mips_cache_alias_mask; /* for virtually-indexed caches */
int mips_cache_prefer_mask;
struct mips_cache_ops mips_cache_ops;
#ifdef MIPS1
#ifdef ENABLE_MIPS_TX3900
#include <mips/cache_tx39.h>
void tx3900_get_cache_config(void);
void tx3920_get_cache_config(void);
#endif /* ENABLE_MIPS_TX3900 */
#endif /* MIPS1 */
#ifdef MIPS3
#ifdef MIPS3_5900
#include <mips/cache_r5900.h>
#endif /* MIPS3_5900 */
#endif /* MIPS3 */
#ifdef MIPS3
void mips3_get_cache_config(int);
#endif /* MIPS3 */
/*
* mips_config_cache:
*
* Configure the cache for the system.
*
* XXX DOES NOT HANDLE SPLIT SECONDARY CACHES.
*/
void
mips_config_cache(void)
{
#ifdef MIPS3
int csizebase = MIPS3_CONFIG_C_DEFBASE;
#endif
KASSERT(PAGE_SIZE != 0);
/*
* Configure primary caches.
*/
switch (MIPS_PRID_IMPL(cpu_id)) {
#ifdef MIPS1
case MIPS_R2000:
case MIPS_R3000:
mips_picache_size = r3k_picache_size();
mips_pdcache_size = r3k_pdcache_size();
mips_picache_line_size = 4;
mips_pdcache_line_size = 4;
mips_picache_ways = 1;
mips_pdcache_ways = 1;
mips_pdcache_write_through = 1;
mips_cache_ops.mco_icache_sync_all =
r3k_icache_sync_all;
mips_cache_ops.mco_icache_sync_range =
r3k_icache_sync_range;
mips_cache_ops.mco_icache_sync_range_index =
mips_cache_ops.mco_icache_sync_range;
mips_cache_ops.mco_pdcache_wbinv_all =
r3k_pdcache_wbinv_all;
mips_cache_ops.mco_pdcache_wbinv_range =
r3k_pdcache_inv_range;
mips_cache_ops.mco_pdcache_wbinv_range_index =
mips_cache_ops.mco_pdcache_wbinv_range;
mips_cache_ops.mco_pdcache_inv_range =
r3k_pdcache_inv_range;
mips_cache_ops.mco_pdcache_wb_range =
r3k_pdcache_wb_range;
uvmexp.ncolors = atop(mips_pdcache_size);
break;
#ifdef ENABLE_MIPS_TX3900
case MIPS_TX3900:
switch (MIPS_PRID_REV_MAJ(cpu_id)) {
case 1: /* TX3912 */
mips_picache_ways = 1;
mips_picache_line_size = 16;
mips_pdcache_line_size = 4;
tx3900_get_cache_config();
mips_pdcache_write_through = 1;
mips_cache_ops.mco_icache_sync_all =
tx3900_icache_sync_all_16;
mips_cache_ops.mco_icache_sync_range =
tx3900_icache_sync_range_16;
mips_cache_ops.mco_icache_sync_range_index =
tx3900_icache_sync_range_16;
mips_cache_ops.mco_pdcache_wbinv_all =
tx3900_pdcache_wbinv_all_4;
mips_cache_ops.mco_pdcache_wbinv_range =
tx3900_pdcache_inv_range_4;
mips_cache_ops.mco_pdcache_wbinv_range_index =
tx3900_pdcache_inv_range_4;
mips_cache_ops.mco_pdcache_inv_range =
tx3900_pdcache_inv_range_4;
mips_cache_ops.mco_pdcache_wb_range =
tx3900_pdcache_wb_range_4;
break;
case 3: /* TX3922 */
mips_picache_ways = 2;
mips_picache_line_size = 16;
mips_pdcache_line_size = 16;
tx3920_get_cache_config();
mips_cache_ops.mco_icache_sync_all =
mips_pdcache_write_through ?
tx3900_icache_sync_all_16 :
tx3920_icache_sync_all_16wb;
mips_cache_ops.mco_icache_sync_range =
mips_pdcache_write_through ?
tx3920_icache_sync_range_16wt :
tx3920_icache_sync_range_16wb;
mips_cache_ops.mco_icache_sync_range_index =
mips_cache_ops.mco_icache_sync_range;
mips_cache_ops.mco_pdcache_wbinv_all =
mips_pdcache_write_through ?
tx3920_pdcache_wbinv_all_16wt :
tx3920_pdcache_wbinv_all_16wb;
mips_cache_ops.mco_pdcache_wbinv_range =
mips_pdcache_write_through ?
tx3920_pdcache_inv_range_16 :
tx3920_pdcache_wbinv_range_16wb;
mips_cache_ops.mco_pdcache_wbinv_range_index =
mips_cache_ops.mco_pdcache_wbinv_range;
mips_cache_ops.mco_pdcache_inv_range =
tx3920_pdcache_inv_range_16;
mips_cache_ops.mco_pdcache_wb_range =
mips_pdcache_write_through ?
tx3920_pdcache_wb_range_16wt :
tx3920_pdcache_wb_range_16wb;
break;
default:
panic("mips_config_cache: unsupported TX3900");
}
mips_pdcache_ways = 2;
tx3900_get_cache_config();
uvmexp.ncolors = atop(mips_pdcache_size) / mips_pdcache_ways;
break;
#endif /* ENABLE_MIPS_TX3900 */
#endif /* MIPS1 */
#ifdef MIPS3
case MIPS_R4000:
case MIPS_R4100:
case MIPS_R4300:
mips_picache_ways = 1;
mips_pdcache_ways = 1;
mips_sdcache_ways = 1;
mips3_get_cache_config(csizebase);
switch (mips_picache_line_size) {
case 16:
mips_cache_ops.mco_icache_sync_all =
r4k_icache_sync_all_16;
mips_cache_ops.mco_icache_sync_range =
r4k_icache_sync_range_16;
mips_cache_ops.mco_icache_sync_range_index =
r4k_icache_sync_range_index_16;
break;
default:
panic("r4k picache line size %d",
mips_picache_line_size);
}
switch (mips_pdcache_line_size) {
case 16:
mips_cache_ops.mco_pdcache_wbinv_all =
r4k_pdcache_wbinv_all_16;
mips_cache_ops.mco_pdcache_wbinv_range =
r4k_pdcache_wbinv_range_16;
mips_cache_ops.mco_pdcache_wbinv_range_index =
r4k_pdcache_wbinv_range_index_16;
mips_cache_ops.mco_pdcache_inv_range =
r4k_pdcache_inv_range_16;
mips_cache_ops.mco_pdcache_wb_range =
r4k_pdcache_wb_range_16;
break;
default:
panic("r4k pdcache line size %d",
mips_pdcache_line_size);
}
/* Virtually-indexed cache; no use for colors. */
break;
case MIPS_R4600:
#ifdef ENABLE_MIPS_R4700
case MIPS_R4700:
#endif
#ifndef ENABLE_MIPS_R3NKK
case MIPS_R5000:
#endif
case MIPS_RM5200:
mips_picache_ways = 2;
mips_pdcache_ways = 2;
mips3_get_cache_config(csizebase);
switch (mips_picache_line_size) {
case 32:
mips_cache_ops.mco_icache_sync_all =
r5k_icache_sync_all_32;
mips_cache_ops.mco_icache_sync_range =
r5k_icache_sync_range_32;
mips_cache_ops.mco_icache_sync_range_index =
r5k_icache_sync_range_index_32;
break;
default:
panic("r5k picache line size %d",
mips_picache_line_size);
}
switch (mips_pdcache_line_size) {
case 32:
mips_cache_ops.mco_pdcache_wbinv_all =
r5k_pdcache_wbinv_all_32;
mips_cache_ops.mco_pdcache_wbinv_range =
r5k_pdcache_wbinv_range_32;
mips_cache_ops.mco_pdcache_wbinv_range_index =
r5k_pdcache_wbinv_range_index_32;
mips_cache_ops.mco_pdcache_inv_range =
r5k_pdcache_inv_range_32;
mips_cache_ops.mco_pdcache_wb_range =
r5k_pdcache_wb_range_32;
break;
default:
panic("r5k pdcache line size %d",
mips_pdcache_line_size);
}
/*
* Deal with R4600 chip bugs.
*/
if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 &&
MIPS_PRID_REV_MAJ(cpu_id) == 1) {
KASSERT(mips_pdcache_line_size == 32);
mips_cache_ops.mco_pdcache_wbinv_range =
r4600v1_pdcache_wbinv_range_32;
mips_cache_ops.mco_pdcache_inv_range =
r4600v1_pdcache_inv_range_32;
mips_cache_ops.mco_pdcache_wb_range =
r4600v1_pdcache_wb_range_32;
} else if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 &&
MIPS_PRID_REV_MAJ(cpu_id) == 2) {
KASSERT(mips_pdcache_line_size == 32);
mips_cache_ops.mco_pdcache_wbinv_range =
r4600v2_pdcache_wbinv_range_32;
mips_cache_ops.mco_pdcache_inv_range =
r4600v2_pdcache_inv_range_32;
mips_cache_ops.mco_pdcache_wb_range =
r4600v2_pdcache_wb_range_32;
}
/* Virtually-indexed cache; no use for colors. */
break;
#ifdef MIPS3_5900
case MIPS_R5900:
/* cache spec */
mips_picache_ways = 2;
mips_pdcache_ways = 2;
mips_picache_size = CACHE_R5900_SIZE_I;
mips_picache_line_size = CACHE_R5900_LSIZE_I;
mips_pdcache_size = CACHE_R5900_SIZE_D;
mips_pdcache_line_size = CACHE_R5900_LSIZE_D;
mips_cache_alias_mask =
((mips_pdcache_size / mips_pdcache_ways) - 1) &
~(PAGE_SIZE - 1);
mips_cache_prefer_mask =
max(mips_pdcache_size, mips_picache_size) - 1;
/* cache ops */
mips_cache_ops.mco_icache_sync_all =
r5900_icache_sync_all_64;
mips_cache_ops.mco_icache_sync_range =
r5900_icache_sync_range_64;
mips_cache_ops.mco_icache_sync_range_index =
r5900_icache_sync_range_index_64;
mips_cache_ops.mco_pdcache_wbinv_all =
r5900_pdcache_wbinv_all_64;
mips_cache_ops.mco_pdcache_wbinv_range =
r5900_pdcache_wbinv_range_64;
mips_cache_ops.mco_pdcache_wbinv_range_index =
r5900_pdcache_wbinv_range_index_64;
mips_cache_ops.mco_pdcache_inv_range =
r5900_pdcache_inv_range_64;
mips_cache_ops.mco_pdcache_wb_range =
r5900_pdcache_wb_range_64;
break;
#endif /* MIPS3_5900 */
#endif /* MIPS3 */
default:
panic("can't handle primary cache on impl 0x%x\n",
MIPS_PRID_IMPL(cpu_id));
}
/*
* Compute the "way mask" for each cache.
*/
if (mips_picache_size) {
KASSERT(mips_picache_ways != 0);
mips_picache_way_size = (mips_picache_size / mips_picache_ways);
mips_picache_way_mask = mips_picache_way_size - 1;
}
if (mips_pdcache_size) {
KASSERT(mips_pdcache_ways != 0);
mips_pdcache_way_size = (mips_pdcache_size / mips_pdcache_ways);
mips_pdcache_way_mask = mips_pdcache_way_size - 1;
}
if (mips_sdcache_line_size == 0)
return;
/*
* Configure the secondary cache.
*/
switch (MIPS_PRID_IMPL(cpu_id)) {
#ifdef MIPS3
case MIPS_R4000:
case MIPS_R4100:
case MIPS_R4300:
case MIPS_R4600:
#ifdef ENABLE_MIPS_R4700
case MIPS_R4700:
#endif
#ifndef ENABLE_MIPS_R3NKK
case MIPS_R5000:
#endif
case MIPS_RM5200:
switch (mips_sdcache_ways) {
case 1:
switch (mips_sdcache_line_size) {
case 32:
mips_cache_ops.mco_sdcache_wbinv_all =
r4k_sdcache_wbinv_all_32;
mips_cache_ops.mco_sdcache_wbinv_range =
r4k_sdcache_wbinv_range_32;
mips_cache_ops.mco_sdcache_wbinv_range_index =
r4k_sdcache_wbinv_range_index_32;
mips_cache_ops.mco_sdcache_inv_range =
r4k_sdcache_inv_range_32;
mips_cache_ops.mco_sdcache_wb_range =
r4k_sdcache_wb_range_32;
break;
case 16:
case 64:
case 128:
mips_cache_ops.mco_sdcache_wbinv_all =
r4k_sdcache_wbinv_all_generic;
mips_cache_ops.mco_sdcache_wbinv_range =
r4k_sdcache_wbinv_range_generic;
mips_cache_ops.mco_sdcache_wbinv_range_index =
r4k_sdcache_wbinv_range_index_generic;
mips_cache_ops.mco_sdcache_inv_range =
r4k_sdcache_inv_range_generic;
mips_cache_ops.mco_sdcache_wb_range =
r4k_sdcache_wb_range_generic;
break;
default:
panic("r4k sdcache %d way line size %d\n",
mips_sdcache_ways, mips_sdcache_line_size);
}
break;
default:
panic("r4k sdcache %d way line size %d\n",
mips_sdcache_ways, mips_sdcache_line_size);
}
break;
#endif /* MIPS3 */
default:
panic("can't handle secondary cache on impl 0x%x\n",
MIPS_PRID_IMPL(cpu_id));
}
/*
* Compute the "way mask" for each secondary cache.
*/
if (mips_sdcache_size) {
KASSERT(mips_sdcache_ways != 0);
mips_sdcache_way_size = (mips_sdcache_size / mips_sdcache_ways);
mips_sdcache_way_mask = mips_sdcache_way_size - 1;
}
}
#ifdef MIPS1
#ifdef ENABLE_MIPS_TX3900
/*
* tx3900_get_cache_config:
*
* Fetch cache size information for the TX3900.
*/
void
tx3900_get_cache_config(void)
{
uint32_t config;
config = tx3900_cp0_config_read();
mips_picache_size = R3900_C_SIZE_MIN <<
((config & R3900_CONFIG_ICS_MASK) >> R3900_CONFIG_ICS_SHIFT);
mips_pdcache_size = R3900_C_SIZE_MIN <<
((config & R3900_CONFIG_DCS_MASK) >> R3900_CONFIG_DCS_SHIFT);
}
/*
* tx3920_get_cache_config:
*
* Fetch cache size information for the TX3920.
*/
void
tx3920_get_cache_config(void)
{
/* Size is the same as TX3900. */
tx3900_get_cache_config();
/* Now determine write-through/write-back mode. */
if ((tx3900_cp0_config_read() & R3900_CONFIG_WBON) == 0)
mips_pdcache_write_through = 1;
}
#endif /* ENABLE_MIPS_TX3900 */
#endif /* MIPS1 */
#ifdef MIPS3
/*
* mips3_get_cache_config:
*
* Fetch the cache config information for a MIPS-3 or MIPS-4
* processor (virtually-indexed cache).
*
* NOTE: Fetching the size of the secondary cache is something
* that platform specific code has to do. We'd appreciate it
* if they initialized the size before now.
*
* ALSO NOTE: The number of ways in the cache must already be
* initialized.
*/
void
mips3_get_cache_config(int csizebase)
{
uint32_t config = mips3_cp0_config_read();
mips_picache_size = MIPS3_CONFIG_CACHE_SIZE(config,
MIPS3_CONFIG_IC_MASK, csizebase, MIPS3_CONFIG_IC_SHIFT);
mips_picache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config,
MIPS3_CONFIG_IB);
mips_pdcache_size = MIPS3_CONFIG_CACHE_SIZE(config,
MIPS3_CONFIG_DC_MASK, csizebase, MIPS3_CONFIG_DC_SHIFT);
mips_pdcache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config,
MIPS3_CONFIG_DB);
mips_cache_alias_mask =
((mips_pdcache_size / mips_pdcache_ways) - 1) & ~(PAGE_SIZE - 1);
mips_cache_prefer_mask =
max(mips_pdcache_size, mips_picache_size) - 1;
if ((config & MIPS3_CONFIG_SC) == 0) {
mips_sdcache_line_size = MIPS3_CONFIG_CACHE_L2_LSIZE(config);
if ((config & MIPS3_CONFIG_SS) == 0)
mips_scache_unified = 1;
}
}
#endif /* MIPS3 */

View File

@ -0,0 +1,112 @@
/* $NetBSD: cache_r3k.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <mips/cache.h>
#include <mips/cache_r3k.h>
/*
* Cache operations for R3000-style caches:
*
* - Direct-mapped
* - Write-through
* - Physically indexed, physically tagged
*/
#define round_line(x) (((x) + 31) & ~31)
#define trunc_line(x) ((x) & ~31)
void
r3k_icache_sync_all(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_picache_size;
r3k_picache_do_inv(va, eva);
}
void
r3k_icache_sync_range(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
if ((eva - va) >= mips_picache_size) {
r3k_icache_sync_all();
return;
}
r3k_picache_do_inv(va, eva);
}
void
r3k_pdcache_wbinv_all(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
/* Cache is write-through. */
r3k_pdcache_do_inv(va, eva);
}
void
r3k_pdcache_inv_range(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
if ((eva - va) >= mips_pdcache_size) {
r3k_pdcache_wbinv_all();
return;
}
r3k_pdcache_do_inv(va, eva);
}
void
r3k_pdcache_wb_range(vaddr_t va, vsize_t size)
{
/* Cache is write-though. */
}
#undef round_line
#undef trunc_line

View File

@ -0,0 +1,225 @@
/* $NetBSD: cache_r3k_subr.S,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Digital Equipment Corporation and Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permited provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Copyright (C) 1989 Digital Equipment Corporation.
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby granted,
* provided that the above copyright notice appears in all copies.
* Digital Equipment Corporation makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
* v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
* from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
* v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
* from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
* v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
*
* @(#)locore.s 8.5 (Berkeley) 1/4/94
*/
#include <mips/asm.h>
#include <mips/cpuregs.h>
.set noreorder
/*
* r3k_size_cache:
*
* This works by writing marker values repeatedly at possible
* cache boundaries (256K to 512). Since the cache is direct-mapped
* and the address ranges have aliased with given cache size,
* whatever value is found at cache location zero indicates the size.
*/
LEAF_NOPROFILE(r3k_size_cache)
la v0, 1f
or v0, MIPS_KSEG1_START
j v0 # run uncached
mfc0 v1, MIPS_COP_0_STATUS
1:
mtc0 a0, MIPS_COP_0_STATUS # disable interrupts,
nop # isolating cache
nop
nop
nop
li t1, 512 # minimum cache size
li t2, 256*1024 # maximum cache size
2: sw t2, MIPS_KSEG0_START(t2)
bne t2, t1, 2b
srl t2, 1 # BDSLOT
lw v0, MIPS_KSEG0_START(zero) # magic! cache size is there
li t1, MIPS_KSEG0_START
addu t2, t1, v0
3: sw zero, 0(t1) # clear parity
sw zero, 4(t1)
sw zero, 8(t1)
sw zero, 12(t1)
sb zero, 0(t1) # invalidate
sb zero, 4(t1)
sb zero, 8(t1)
addu t1, 16
bne t1, t2, 3b
sb zero, -4(t1) # BDSLOT
# 4 cycles to pipeline to drain.
nop
nop
nop
nop
mtc0 v1, MIPS_COP_0_STATUS
nop
nop
nop
nop
j ra
nop
END(r3k_size_cache)
/*
* r3k_picache_size:
*
* Determine the size of the R3000 I-cache.
*/
LEAF_NOPROFILE(r3k_picache_size)
li a0, MIPS1_ISOL_CACHES|MIPS1_SWAP_CACHES
j _C_LABEL(r3k_size_cache)
nop
END(r3k_picache_size)
/*
* r3k_pdcache_size:
*
* Determine the size of the R3000 D-cache.
*/
LEAF_NOPROFILE(r3k_pdcache_size)
li a0, MIPS1_ISOL_CACHES
j _C_LABEL(r3k_size_cache)
nop
END(r3k_pdcache_size)
/*
* r3k_picache_do_inv:
*
* Invalidate a range from the R3000 I-cache.
*
* Arguments: a0 starting address
* a1 ending address
*
* Addresses must already be cache-line aligned.
*/
LEAF(r3k_picache_do_inv)
mfc0 t0, MIPS_COP_0_STATUS # Save SR.
mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts.
la v1, 1f
or v1, MIPS_KSEG1_START # Run uncached.
j v1
nop
1:
# Swap the caches (so we store to I$) and isolate them from memory.
li v1, MIPS_SR_ISOL_CACHES | MIPS_SR_SWAP_CACHES
mtc0 v1, MIPS_COP_0_STATUS
nop
nop
1:
addu a0, a0, 4
bne a0, a1, 1b
sb zero, -4(a0)
# 4 cycles to wait for pipeline to drain.
nop
nop
mtc0 t0, MIPS_COP_0_STATUS # Restore SR.
nop
nop
j ra # Jumps back to cached address.
nop
END(r3k_picache_do_inv)
/*
* r3k_pdcache_do_inv:
*
* Invalidate a range from the R3000 D-cache.
*
* Arguments: a0 starting address
* a1 ending address
*
* Addresses must already be cache-line aligned.
*/
LEAF(r3k_pdcache_do_inv)
mfc0 t0, MIPS_COP_0_STATUS # Save SR.
mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts.
nop
# Isolate the caches from memory.
li v1, MIPS_SR_ISOL_CACHES
mtc0 v1, MIPS_COP_0_STATUS
nop
nop
1:
sb zero, 0(a0)
sb zero, 4(a0)
sb zero, 8(a0)
sb zero, 12(a0)
sb zero, 16(a0)
sb zero, 20(a0)
sb zero, 24(a0)
addu a0, 32
bltu a0, a1, 1b
sb zero, -4(a0)
# 4 cycles to wait for pipeline to drain.
nop
nop
mtc0 t0, MIPS_COP_0_STATUS # Restore SR.
nop
nop
j ra # Jumps back to cached address.
nop
END(r3k_pdcache_do_inv)

View File

@ -0,0 +1,408 @@
/* $NetBSD: cache_r4k.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <mips/cache.h>
#include <mips/cache_r4k.h>
/*
* Cache operations for R4000/R4400-style caches:
*
* - Direct-mapped
* - Write-back
* - Virtually indexed, physically tagged
*
* XXX Does not handle split secondary caches.
*/
#define round_line(x) (((x) + 15) & ~15)
#define trunc_line(x) ((x) & ~15)
__asm(".set mips3");
void
r4k_icache_sync_all_16(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_picache_size;
mips_dcache_wbinv_all();
__asm __volatile("sync");
while (va < eva) {
cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += (32 * 16);
}
}
void
r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wb_range(va, (eva - va));
__asm __volatile("sync");
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
va += 16;
}
}
void
r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
{
vaddr_t eva;
eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wbinv_range_index(va, (eva - va));
__asm __volatile("sync");
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += 16;
}
}
void
r4k_pdcache_wbinv_all_16(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
while (va < eva) {
cache_r4k_op_32lines_16(va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 16);
}
}
void
r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va,
CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += 16;
}
}
void
r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
{
vaddr_t eva;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += 16;
}
}
void
r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += 16;
}
}
void
r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += (32 * 16);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += 16;
}
}
#undef round_line
#undef trunc_line
#define round_line(x) (((x) + 31) & ~31)
#define trunc_line(x) ((x) & ~31)
void
r4k_sdcache_wbinv_all_32(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_sdcache_size;
while (va < eva) {
cache_r4k_op_32lines_32(va,
CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 32);
}
}
void
r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va,
CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
va += 32;
}
}
void
r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
{
vaddr_t eva;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_16(va,
CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += 32;
}
}
void
r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
va += 32;
}
}
void
r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
va += 32;
}
}
#undef round_line
#undef trunc_line
#define round_line(x) (((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
#define trunc_line(x) ((x) & ~(mips_sdcache_line_size - 1))
void
r4k_sdcache_wbinv_all_generic(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_sdcache_size;
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += mips_sdcache_line_size;
}
}
void
r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
va += mips_sdcache_line_size;
}
}
void
r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
{
vaddr_t eva;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
eva = round_line(va + size);
va = trunc_line(va);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
va += mips_sdcache_line_size;
}
}
void
r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
va += mips_sdcache_line_size;
}
}
void
r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
va += mips_sdcache_line_size;
}
}
#undef round_line
#undef trunc_line

View File

@ -0,0 +1,229 @@
/* $NetBSD: cache_r5900.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <mips/cache.h>
#include <mips/cache_r5900.h>
#include <mips/locore.h>
#include <mips/r5900/cpuregs.h>
/*
* Cache operations for R5900-style caches:
* I-cache 16KB/64B 2-way assoc.
* D-cache 8KB/64B 2-way assoc.
* No L2-cache.
* and sync.p/sync.l are needed after/before cache instruction.
*
*/
#define round_line(x) (((x) + 63) & ~63)
#define trunc_line(x) ((x) & ~63)
void
r5900_icache_sync_all_64()
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + (CACHE_R5900_SIZE_I >> 1); /* 2way */
int s;
mips_dcache_wbinv_all();
s = _intr_suspend();
while (va < eva) {
cache_r5900_op_4lines_64_2way(va, CACHEOP_R5900_IINV_I);
va += (4 * 64);
}
_intr_resume(s);
}
void
r5900_icache_sync_range_64(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
int s;
va = trunc_line(va);
mips_dcache_wb_range(va, (eva - va));
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64(va, CACHEOP_R5900_HINV_I);
va += (4 * 64);
}
while (va < eva) {
cache_op_r5900_line_64(va, CACHEOP_R5900_HINV_I);
va += 64;
}
_intr_resume(s);
}
void
r5900_icache_sync_range_index_64(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
int s;
va = trunc_line(va);
mips_dcache_wbinv_range_index(va, (eva - va));
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64_2way(va, CACHEOP_R5900_IINV_I);
va += (4 * 64);
}
while (va < eva) {
/* way 0 */
cache_op_r5900_line_64(va, CACHEOP_R5900_IINV_I);
/* way 1 */
cache_op_r5900_line_64(va + 1, CACHEOP_R5900_IINV_I);
va += 64;
}
_intr_resume(s);
}
void
r5900_pdcache_wbinv_all_64()
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + (CACHE_R5900_SIZE_D >> 1); /* 2way */
int s;
s = _intr_suspend();
while (va < eva) {
cache_r5900_op_4lines_64_2way(va, CACHEOP_R5900_IWBINV_D);
va += (4 * 64);
}
_intr_resume(s);
}
void
r5900_pdcache_wbinv_range_64(vaddr_t va, vsize_t size)
{
vaddr_t eva;
int s;
eva = round_line(va + size);
va = trunc_line(va);
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64(va, CACHEOP_R5900_HWBINV_D);
va += (4 * 64);
}
while (va < eva) {
cache_op_r5900_line_64(va, CACHEOP_R5900_HWBINV_D);
va += 64;
}
_intr_resume(s);
}
void
r5900_pdcache_wbinv_range_index_64(vaddr_t va, vsize_t size)
{
vaddr_t eva;
int s;
eva = round_line(va + size);
va = trunc_line(va);
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64_2way(va, CACHEOP_R5900_IWBINV_D);
va += (4 * 64);
}
while (va < eva) {
/* way 0 */
cache_op_r5900_line_64(va, CACHEOP_R5900_IWBINV_D);
/* way 1 */
cache_op_r5900_line_64(va + 1, CACHEOP_R5900_IWBINV_D);
va += 64;
}
_intr_resume(s);
}
void
r5900_pdcache_inv_range_64(vaddr_t va, vsize_t size)
{
vaddr_t eva;
int s;
eva = round_line(va + size);
va = trunc_line(va);
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64(va, CACHEOP_R5900_HINV_D);
va += (4 * 64);
}
while (va < eva) {
cache_op_r5900_line_64(va, CACHEOP_R5900_HINV_D);
va += 64;
}
_intr_resume(s);
}
void
r5900_pdcache_wb_range_64(vaddr_t va, vsize_t size)
{
vaddr_t eva;
int s;
eva = round_line(va + size);
va = trunc_line(va);
s = _intr_suspend();
while ((eva - va) >= (4 * 64)) {
cache_r5900_op_4lines_64(va, CACHEOP_R5900_HWB_D);
va += (4 * 64);
}
while (va < eva) {
cache_op_r5900_line_64(va, CACHEOP_R5900_HWB_D);
va += 64;
}
_intr_resume(s);
}

View File

@ -0,0 +1,450 @@
/* $NetBSD: cache_r5k.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <mips/cache.h>
#include <mips/cache_r4k.h>
#include <mips/locore.h>
/*
* Cache operations for R5000-style caches:
*
* - 2-way set-associative
* - Write-back
* - Virtually indexed, physically tagged
*
* Since the R4600 is so similar (2-way set-associative, 32b/l),
* we handle that here, too. Note for R4600, we have to work
* around some chip bugs. From the v1.7 errata:
*
* 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
* Hit_Invalidate_D and Create_Dirty_Excl_D should only be
* executed if there is no other dcache activity. If the dcache is
* accessed for another instruction immeidately preceding when these
* cache instructions are executing, it is possible that the dcache
* tag match outputs used by these cache instructions will be
* incorrect. These cache instructions should be preceded by at least
* four instructions that are not any kind of load or store
* instruction.
*
* ...and from the v2.0 errata:
*
* The CACHE instructions Hit_Writeback_Inv_D, Hit_Writeback_D,
* Hit_Invalidate_D and Create_Dirty_Exclusive_D will only operate
* correctly if the internal data cache refill buffer is empty. These
* CACHE instructions should be separated from any potential data cache
* miss by a load instruction to an uncached address to empty the response
* buffer.
*
* XXX Does not handle split secondary caches.
*/
#define round_line(x) (((x) + 31) & ~31)
#define trunc_line(x) ((x) & ~31)
__asm(".set mips3");
void
r5k_icache_sync_all_32(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_picache_size;
/*
* Since we're hitting the whole thing, we don't have to
* worry about the 2 different "ways".
*/
mips_dcache_wbinv_all();
__asm __volatile("sync");
while (va < eva) {
cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += (32 * 32);
}
}
void
r5k_icache_sync_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wb_range(va, (eva - va));
__asm __volatile("sync");
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
va += 32;
}
}
void
r5k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
{
vaddr_t w2va, eva;
eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wbinv_range_index(va, (eva - va));
__asm __volatile("sync");
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
eva = round_line(va + size);
va = trunc_line(va);
w2va = va + mips_picache_way_size;
while ((eva - va) >= (16 * 32)) {
cache_r4k_op_16lines_32_2way(va, w2va,
CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += (16 * 32);
w2va += (16 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
cache_op_r4k_line(w2va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
va += 32;
w2va += 32;
}
}
void
r5k_pdcache_wbinv_all_32(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
/*
* Since we're hitting the whole thing, we don't have to
* worry about the 2 different "ways".
*/
while (va < eva) {
cache_r4k_op_32lines_32(va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += (32 * 32);
}
}
void
r4600v1_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
/*
* This is pathetically slow, but the chip bug is pretty
* nasty, and we hope that not too many v1.x R4600s are
* around.
*/
va = trunc_line(va);
/*
* To make this a little less painful, just hit the entire
* cache if we have a range >= the cache size.
*/
if ((eva - va) >= mips_pdcache_size) {
r5k_pdcache_wbinv_all_32();
return;
}
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
while (va < eva) {
__asm __volatile("nop; nop; nop; nop;");
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r4600v2_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
va = trunc_line(va);
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
while ((eva - va) >= (32 * 32)) {
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
cache_r4k_op_32lines_32(va,
CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += (32 * 32);
}
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r5k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va,
CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
va += 32;
}
}
void
r5k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
{
vaddr_t w2va, eva;
/*
* Since we're doing Index ops, we expect to not be able
* to access the address we've been given. So, get the
* bits that determine the cache index, and make a KSEG0
* address out of them.
*/
va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask);
eva = round_line(va + size);
va = trunc_line(va);
w2va = va + mips_pdcache_way_size;
while ((eva - va) >= (16 * 32)) {
cache_r4k_op_16lines_32_2way(va, w2va,
CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += (16 * 32);
w2va += (16 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
cache_op_r4k_line(w2va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
va += 32;
w2va += 32;
}
}
void
r4600v1_pdcache_inv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
/*
* This is pathetically slow, but the chip bug is pretty
* nasty, and we hope that not too many v1.x R4600s are
* around.
*/
va = trunc_line(va);
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
while (va < eva) {
__asm __volatile("nop; nop; nop; nop;");
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r4600v2_pdcache_inv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
va = trunc_line(va);
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
/*
* Between blasts of big cache chunks, give interrupts
* a chance to get though.
*/
while ((eva - va) >= (32 * 32)) {
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += (32 * 32);
}
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r5k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
va += 32;
}
}
void
r4600v1_pdcache_wb_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
/*
* This is pathetically slow, but the chip bug is pretty
* nasty, and we hope that not too many v1.x R4600s are
* around.
*/
va = trunc_line(va);
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
while (va < eva) {
__asm __volatile("nop; nop; nop; nop;");
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r4600v2_pdcache_wb_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
uint32_t ostatus;
va = trunc_line(va);
ostatus = mips_cp0_status_read();
mips_cp0_status_write(ostatus & ~MIPS_SR_INT_IE);
/*
* Between blasts of big cache chunks, give interrupts
* a chance to get though.
*/
while ((eva - va) >= (32 * 32)) {
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += (32 * 32);
}
(void) *(__volatile int *)MIPS_PHYS_TO_KSEG1(0);
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += 32;
}
mips_cp0_status_write(ostatus);
}
void
r5k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 32)) {
cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += (32 * 32);
}
while (va < eva) {
cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
va += 32;
}
}
#undef round_line
#undef trunc_line

View File

@ -0,0 +1,282 @@
/* $NetBSD: cache_tx39.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <mips/cache.h>
#include <mips/cache_tx39.h>
#include <mips/locore.h>
/*
* Cache operations for TX3900/TX3920-style caches:
*
* - I-cache direct-mapped (TX3900) or 2-way set-associative (TX3920)
* - D-cache 2-way set-associative
* - Write-through (TX3900, TX3920) or write-back (TX3920)
* - Physically indexed, phyiscally tagged
*
* XXX THIS IS NOT YET COMPLETE.
*/
#define round_line(x) (((x) + 15) & ~15)
#define trunc_line(x) ((x) & ~15)
void
tx3900_icache_sync_all_16(void)
{
tx3900_icache_do_inv_index_16(MIPS_PHYS_TO_KSEG0(0),
MIPS_PHYS_TO_KSEG0(mips_picache_size));
}
void
tx3900_icache_sync_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
if ((eva - va) >= mips_picache_size) {
/* Just hit the whole thing. */
va = MIPS_PHYS_TO_KSEG0(0);
eva = MIPS_PHYS_TO_KSEG0(mips_picache_size);
}
tx3900_icache_do_inv_index_16(va, eva);
}
#undef round_line
#undef trunc_line
#define round_line(x) (((x) + 3) & ~3)
#define trunc_line(x) ((x) & ~3)
static int tx3900_dummy_buffer[R3900_C_SIZE_MAX / sizeof(int)];
void
tx3900_pdcache_wbinv_all_4(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
__volatile int *p;
/*
* No Index Invalidate for the TX3900 -- have to execute a
* series of load instructions from the dummy buffer, instead.
*/
p = tx3900_dummy_buffer;
while (va < eva) {
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
(void) *p++; (void) *p++; (void) *p++; (void) *p++;
va += (32 * 4);
}
}
void
tx3900_pdcache_inv_range_4(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 4)) {
cache_tx39_op_32lines_4(va,
CACHE_TX39_D|CACHEOP_TX3900_HIT_INV);
va += (32 * 4);
};
while (va < eva) {
cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3900_HIT_INV);
va += 4;
}
}
void
tx3900_pdcache_wb_range_4(vaddr_t va, vsize_t size)
{
/* Cache is write-through. */
}
#undef round_line
#undef trunc_line
#define round_line(x) (((x) + 15) & ~15)
#define trunc_line(x) ((x) & ~15)
void
tx3920_icache_sync_all_16wb(void)
{
mips_dcache_wbinv_all();
__asm __volatile("sync");
tx3920_icache_do_inv_16(MIPS_PHYS_TO_KSEG0(0),
MIPS_PHYS_TO_KSEG0(mips_picache_size));
}
void
tx3920_icache_sync_range_16wt(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
tx3920_icache_do_inv_16(va, eva);
}
void
tx3920_icache_sync_range_16wb(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
mips_dcache_wb_range(va, (eva - va));
__asm __volatile("sync");
tx3920_icache_do_inv_16(va, eva);
}
void
tx3920_pdcache_wbinv_all_16wt(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
/*
* Since we're hitting the whole thing, we don't have to
* worry about the 2 different "ways".
*/
while (va < eva) {
cache_tx39_op_32lines_16(va,
CACHE_TX39_D|CACHEOP_TX3920_INDEX_INV);
va += (32 * 16);
}
}
void
tx3920_pdcache_wbinv_all_16wb(void)
{
vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
vaddr_t eva = va + mips_pdcache_size;
/*
* Since we're hitting the whole thing, we don't have to
* worry about the 2 different "ways".
*/
while (va < eva) {
cache_tx39_op_32lines_16(va,
CACHE_TX39_D|CACHEOP_TX3920_INDEX_WB_INV);
va += (32 * 16);
}
}
void
tx3920_pdcache_wbinv_range_16wb(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_tx39_op_32lines_16(va,
CACHE_TX39_D|CACHEOP_TX3920_HIT_WB_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3920_HIT_WB_INV);
va += 16;
}
}
void
tx3920_pdcache_inv_range_16(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_tx39_op_32lines_16(va,
CACHE_TX39_D|CACHEOP_TX3920_HIT_INV);
va += (32 * 16);
}
while (va < eva) {
cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3920_HIT_INV);
va += 16;
}
}
void
tx3920_pdcache_wb_range_16wt(vaddr_t va, vsize_t size)
{
/* Cache is write-through. */
}
void
tx3920_pdcache_wb_range_16wb(vaddr_t va, vsize_t size)
{
vaddr_t eva = round_line(va + size);
va = trunc_line(va);
while ((eva - va) >= (32 * 16)) {
cache_tx39_op_32lines_16(va,
CACHE_TX39_D|CACHEOP_TX3920_HIT_WB);
va += (32 * 16);
}
while (va < eva) {
cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3920_HIT_WB);
va += 16;
}
}

View File

@ -0,0 +1,142 @@
/* $NetBSD: cache_tx39_subr.S,v 1.2 2001/11/14 18:26:23 thorpej Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by UCHIYAMA Yasushi; and by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* XXX THIS IS NOT YET COMPLETE.
*/
#include <mips/asm.h>
#include <mips/cpuregs.h>
#include <mips/r3900regs.h>
#include <mips/cache_tx39.h>
.set noreorder
/*
* tx3900_icache_do_inv_index_16:
*
* Do an Index Invalidate of the I-cache for the specified
* range.
*
* Arguments: a0 starting address
* a1 ending address
*
* Addresses must already be cache-line aligned.
*/
LEAF(tx3900_icache_do_inv_index_16)
mfc0 t0, MIPS_COP_0_STATUS # Save SR.
nop
mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts.
nop
# Disable the I-cache.
li t1, ~R3900_CONFIG_ICE
mfc0 t2, R3900_COP_0_CONFIG
and t1, t1, t2
nop
mtc0 t1, R3900_COP_0_CONFIG
# Break the I-stream.
j 1f
nop
.set push
.set mips3
1:
cache CACHE_TX39_I|CACHEOP_TX3900_INDEX_INV, 0(a0)
addiu a0, a0, 16
bltu a0, a1, 1b
nop
.set pop
# Re-enable I-cache.
nop
mtc0 t2, R3900_COP_0_CONFIG
nop
mtc0 t0, MIPS_COP_0_STATUS # Restore SR.
j ra
nop
END(tx3900_icache_do_inv_index_16)
/*
* tx3920_icache_do_inv_16:
*
* Do a Hit Invalidate of the I-cache for the specified
* range.
*
* Arguments: a0 starting address
* a1 ending address
*
* Addresses must already be cache-line aligned.
*/
LEAF(tx3920_icache_do_inv_16)
mfc0 t0, MIPS_COP_0_STATUS # Save SR.
nop
mtc0 zero, MIPS_COP_0_STATUS # Disable interrupts.
nop
# Disable the I-cache.
li t1, ~R3900_CONFIG_ICE
mfc0 t2, R3900_COP_0_CONFIG
and t1, t1, t2
nop
mtc0 t1, R3900_COP_0_CONFIG
# Break the I-stream.
j 1f
nop
.set push
.set mips3
1:
cache CACHE_TX39_I|CACHEOP_TX3920_HIT_INV, 0(a0)
addiu a0, a0, 16
bltu a0, a1, 1b
nop
.set pop
# Re-enable I-cache.
nop
mtc0 t2, R3900_COP_0_CONFIG
nop
mtc0 t0, MIPS_COP_0_STATUS # Restore SR.
j ra
nop
END(tx3920_icache_do_inv_16)

View File

@ -0,0 +1,130 @@
/* $NetBSD: ip22_cache.S,v 1.2 2001/11/14 18:26:24 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Code to manipulate the L2 SysAD cache on some (R4600/R5000-based)
* SGI IP-22 (Indy) systems.
*/
#include <mips/asm.h>
#include <mips/cpuregs.h>
.set noreorder
.set mips3
#define PROLOGUE \
mfc0 t0, MIPS_COP_0_STATUS ; \
nop ; \
nop ; \
nop ; \
li v0, ~MIPS_SR_INT_IE /* ints off */ ; \
and t1, v0, t0 ; \
or t1, MIPS3_SR_KX /* enable 64-bit */ ; \
mtc0 t1, MIPS_COP_0_STATUS ; \
nop ; \
nop ; \
nop ; \
nop
#define EPILOGUE \
mtc0 t0, MIPS_COP_0_STATUS ; \
nop ; \
nop ; \
nop ; \
nop
/*
* ip22_sdcache_do_wbinv:
*
* Write-back and invalidate the cache lines [a0..a1].
*/
LEAF_NOPROFILE(ip22_sdcache_do_wbinv)
PROLOGUE
/*
* Translate the cache indices into the magic cache
* flush space.
*/
dli v0, 0x9000000080000000 /* base of cache flush space */
or a0, v0 /* first */
or a1, v0 /* last */
/*
* Flush the cache by performing a store into the
* magic cache flush space.
*/
1: sw zero, 0(a0)
bne a0, a1, 1b
daddu a0, 32
EPILOGUE
j ra
nop
END(ip22_sdcache_do_wbinv)
LEAF_NOPROFILE(ip22_sdcache_enable)
PROLOGUE
li a0, 0x1
dsll a0, 31
lui a1, 0x9000
dsll32 a1, 0
or a0, a1, a0
sb zero, 0(a0)
EPILOGUE
j ra
nop
END(ip22_sdcache_enable)
LEAF_NOPROFILE(ip22_sdcache_disable)
PROLOGUE
li a0, 0x1
dsll a0, 31
lui a1, 0x9000
dsll32 a1, 0
or a0, a1, a0
sh zero, 0(a0)
EPILOGUE
j ra
nop
END(ip22_sdcache_disable)