Remove the cache_flush() trampoline; it's no longer directly cross-called.

Instead, implement the MP parts in terms of cross-callable vcache_flush_range()
function.
This commit is contained in:
pk 2004-04-17 23:45:40 +00:00
parent 35b9f3ec72
commit 38bdc6fab2
5 changed files with 129 additions and 136 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.66 2004/01/04 11:33:31 jdolecek Exp $ */
/* $NetBSD: cpu.h,v 1.67 2004/04/17 23:45:40 pk Exp $ */
/*
* Copyright (c) 1992, 1993
@ -218,8 +218,6 @@ void zs_kgdb_init(void);
#endif
/* fb.c */
void fb_unblank(void);
/* cache.c */
void cache_flush(caddr_t, u_int);
/* kgdb_stub.c */
#ifdef KGDB
void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.c,v 1.85 2004/04/17 10:13:13 pk Exp $ */
/* $NetBSD: cache.c,v 1.86 2004/04/17 23:45:40 pk Exp $ */
/*
* Copyright (c) 1996
@ -59,7 +59,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.85 2004/04/17 10:13:13 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.86 2004/04/17 23:45:40 pk Exp $");
#include "opt_multiprocessor.h"
#include "opt_sparc_arch.h"
@ -339,15 +339,6 @@ turbosparc_cache_enable()
#endif /* SUN4M || SUN4D */
/* XXX - should inline */
void
cache_flush(base, len)
caddr_t base;
u_int len;
{
cpuinfo.cache_flush(base, len, getcontext());
}
/*
* Note: the sun4 & sun4c the cache flush functions ignore the `ctx'
* parameter. This can be done since the pmap operations that need
@ -503,10 +494,9 @@ sun4_vcache_flush_page_hw(va, ctx)
#define CACHE_FLUSH_MAGIC (CACHEINFO.c_totalsize / PAGE_SIZE)
void
sun4_cache_flush(base, len, ctx)
sun4_cache_flush(base, len)
caddr_t base;
u_int len;
int ctx;
{
int i, ls, baseoff;
char *p;
@ -540,7 +530,7 @@ sun4_cache_flush(base, len, ctx)
cachestats.cs_ra[min(i, MAXCACHERANGE)]++;
#endif
if (i < CACHE_FLUSH_MAGIC) {
if (__predict_true(i < CACHE_FLUSH_MAGIC)) {
/* cache_flush_page, for i pages */
p = (char *)((int)base & ~baseoff);
if (CACHEINFO.c_hwflush) {
@ -557,19 +547,20 @@ sun4_cache_flush(base, len, ctx)
baseoff = (u_int)base & SGOFSET;
i = (baseoff + len + SGOFSET) >> SGSHIFT;
if (i == 1)
sun4_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), ctx);
else {
if (HASSUN4_MMU3L) {
baseoff = (u_int)base & RGOFSET;
i = (baseoff + len + RGOFSET) >> RGSHIFT;
if (i == 1)
sun4_vcache_flush_region(VA_VREG(base), ctx);
else
sun4_vcache_flush_context(ctx);
} else
sun4_vcache_flush_context(ctx);
if (__predict_true(i == 1)) {
sun4_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), 0);
return;
}
if (HASSUN4_MMU3L) {
baseoff = (u_int)base & RGOFSET;
i = (baseoff + len + RGOFSET) >> RGSHIFT;
if (i == 1)
sun4_vcache_flush_region(VA_VREG(base), 0);
else
sun4_vcache_flush_context(0);
} else
sun4_vcache_flush_context(0);
}
@ -694,7 +685,7 @@ srmmu_vcache_flush_page(va, ctx)
* functions will not always cross flush it in the MP case (because
* may not be active on this CPU) we flush the TLB entry now.
*/
if (cpuinfo.cpu_type == CPUTYP_HS_MBUS)
/*if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) -- more work than it's worth */
sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
#endif
@ -711,56 +702,60 @@ srmmu_cache_flush_all()
srmmu_vcache_flush_context(0);
}
void
srmmu_vcache_flush_range(int va, int len, int ctx)
{
int i, ls, offset;
char *p;
int octx;
/* Compute # of cache lines covered by this range */
ls = CACHEINFO.c_linesize;
offset = va & (ls - 1);
i = (offset + len + ls - 1) >> CACHEINFO.c_l2linesize;
p = (char *)(va & -ls);
octx = getcontext4m();
trapoff();
setcontext4m(ctx);
for (; --i >= 0; p += ls)
sta(p, ASI_IDCACHELFP, 0);
#if defined(MULTIPROCESSOR)
if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
/*
* See hypersparc comment in srmmu_vcache_flush_page().
*/
offset = va & PGOFSET;
i = (offset + len + PGOFSET) >> PGSHIFT;
va = va & ~PGOFSET;
for (; --i >= 0; va += PAGE_SIZE)
sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
}
#endif
setcontext4m(octx);
trapon();
return;
}
/*
* Flush a range of virtual addresses (in the current context).
* The first byte is at (base&~PGOFSET) and the last one is just
* before byte (base+len).
*
* We choose the best of (context,segment,page) here.
*/
#define CACHE_FLUSH_MAGIC (CACHEINFO.c_totalsize / PAGE_SIZE)
void
srmmu_cache_flush(base, len, ctx)
srmmu_cache_flush(base, len)
caddr_t base;
u_int len;
int ctx;
{
int i, ls, baseoff;
char *p;
int ctx = getcontext4m();
int i, baseoff;
if (len < PAGE_SIZE) {
int octx;
/* less than a page, flush just the covered cache lines */
ls = CACHEINFO.c_linesize;
baseoff = (int)base & (ls - 1);
i = (baseoff + len + ls - 1) >> CACHEINFO.c_l2linesize;
p = (char *)((int)base & -ls);
octx = getcontext4m();
trapoff();
setcontext4m(ctx);
for (; --i >= 0; p += ls)
sta(p, ASI_IDCACHELFP, 0);
#if defined(MULTIPROCESSOR)
if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
/*
* See hypersparc comment in srmmu_vcache_flush_page().
* Just flush both possibly touched pages
* from the TLB.
*/
int va = (int)base & ~0xfff;
sta(va | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
sta((va+4096) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0);
}
#endif
setcontext4m(octx);
trapon();
return;
}
/*
* Figure out how much must be flushed.
* Figure out the most efficient way to flush.
*
* If we need to do CACHE_FLUSH_MAGIC pages, we can do a segment
* in the same number of loop iterations. We can also do the whole
@ -775,62 +770,54 @@ srmmu_cache_flush(base, len, ctx)
* segments), but I did not want to debug that now and it is
* not clear it would help much.
*
* (XXX the magic number 16 is now wrong, must review policy)
*/
baseoff = (int)base & PGOFSET;
i = (baseoff + len + PGOFSET) >> PGSHIFT;
cachestats.cs_nraflush++;
#ifdef notyet
cachestats.cs_ra[min(i, MAXCACHERANGE)]++;
#endif
if (i < CACHE_FLUSH_MAGIC) {
int octx;
/* cache_flush_page, for i pages */
p = (char *)((int)base & ~baseoff);
ls = CACHEINFO.c_linesize;
i <<= PGSHIFT - CACHEINFO.c_l2linesize;
octx = getcontext4m();
trapoff();
setcontext4m(ctx);
for (; --i >= 0; p += ls)
sta(p, ASI_IDCACHELFP, 0);
if (__predict_true(len < CACHEINFO.c_totalsize)) {
#if defined(MULTIPROCESSOR)
if (cpuinfo.cpu_type == CPUTYP_HS_MBUS) {
/* Just flush the segment(s) from the TLB */
/* XXX - assumes CACHE_FLUSH_MAGIC <= NBPSG */
int va = (int)base & ~SGOFSET;
sta(va | ASI_SRMMUFP_L2, ASI_SRMMUFP, 0);
sta((va+NBPSG) | ASI_SRMMUFP_L2, ASI_SRMMUFP, 0);
}
FXCALL3(cpuinfo.sp_vcache_flush_range,
cpuinfo.ft_vcache_flush_range,
(int)base, len, ctx, CPUSET_ALL);
#else
cpuinfo.sp_vcache_flush_range((int)base, len, ctx);
#endif
setcontext4m(octx);
trapon();
return;
}
cachestats.cs_nraflush++;
baseoff = (u_int)base & SGOFSET;
i = (baseoff + len + SGOFSET) >> SGSHIFT;
if (i == 1)
if (__predict_true(i == 1)) {
#if defined(MULTIPROCESSOR)
FXCALL3(cpuinfo.sp_vcache_flush_segment,
cpuinfo.ft_vcache_flush_segment,
VA_VREG(base), VA_VSEG(base), ctx, CPUSET_ALL);
#else
srmmu_vcache_flush_segment(VA_VREG(base), VA_VSEG(base), ctx);
else {
baseoff = (u_int)base & RGOFSET;
i = (baseoff + len + RGOFSET) >> RGSHIFT;
p = (char *)VA_VREG(base);
while (i--) {
srmmu_vcache_flush_region((int)p, ctx);
p += NBPRG;
}
#endif
return;
}
baseoff = (u_int)base & RGOFSET;
i = (baseoff + len + RGOFSET) >> RGSHIFT;
while (i--) {
#if defined(MULTIPROCESSOR)
FXCALL2(cpuinfo.sp_vcache_flush_region,
cpuinfo.ft_vcache_flush_region,
VA_VREG(base), ctx, CPUSET_ALL);
#else
srmmu_vcache_flush_region(VA_VREG(base), ctx);
#endif
base += NBPRG;
}
}
int ms1_cacheflush_magic = 0;
#define MS1_CACHEFLUSH_MAGIC ms1_cacheflush_magic
void
ms1_cache_flush(base, len, ctx)
ms1_cache_flush(base, len)
caddr_t base;
u_int len;
int ctx;
{
/*
* Although physically tagged, we still need to flush the
@ -924,10 +911,9 @@ cypress_cache_flush_all()
void
viking_cache_flush(base, len, ctx)
viking_cache_flush(base, len)
caddr_t base;
u_int len;
int ctx;
{
}
@ -1078,14 +1064,4 @@ smp_vcache_flush_context(ctx)
FXCALL1(cpuinfo.sp_vcache_flush_context, cpuinfo.ft_vcache_flush_context,
ctx, CPUSET_ALL);
}
void
smp_cache_flush(va, size, ctx)
caddr_t va;
u_int size;
int ctx;
{
XCALL3(cpuinfo.sp_cache_flush, va, size, ctx, CPUSET_ALL);
}
#endif /* MULTIPROCESSOR */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.h,v 1.30 2004/04/17 10:13:13 pk Exp $ */
/* $NetBSD: cache.h,v 1.31 2004/04/17 23:45:40 pk Exp $ */
/*
* Copyright (c) 1996
@ -169,13 +169,14 @@ void sun4_vcache_flush_region(int, int); /* flush region in cur ctx */
void sun4_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
void sun4_vcache_flush_page(int va, int); /* flush page in cur ctx */
void sun4_vcache_flush_page_hw(int va, int); /* flush page in cur ctx */
void sun4_cache_flush(caddr_t, u_int, int); /* flush region */
void sun4_cache_flush(caddr_t, u_int); /* flush range */
void srmmu_vcache_flush_context(int); /* flush current context */
void srmmu_vcache_flush_region(int, int); /* flush region in cur ctx */
void srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
void srmmu_vcache_flush_page(int va, int); /* flush page in cur ctx */
void srmmu_cache_flush(caddr_t, u_int, int); /* flush region */
void srmmu_vcache_flush_range(int, int, int);
void srmmu_cache_flush(caddr_t, u_int); /* flush range */
/* `Fast trap' versions for use in cross-call cache flushes on MP systems */
#if defined(MULTIPROCESSOR)
@ -183,15 +184,17 @@ void ft_srmmu_vcache_flush_context(int); /* flush current context */
void ft_srmmu_vcache_flush_region(int, int); /* flush region in cur ctx */
void ft_srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
void ft_srmmu_vcache_flush_page(int va, int);/* flush page in cur ctx */
void ft_srmmu_vcache_flush_range(int, int, int);/* flush range in cur ctx */
#else
#define ft_srmmu_vcache_flush_context 0
#define ft_srmmu_vcache_flush_region 0
#define ft_srmmu_vcache_flush_segment 0
#define ft_srmmu_vcache_flush_page 0
#define ft_srmmu_vcache_flush_range 0
#endif /* MULTIPROCESSOR */
void ms1_cache_flush(caddr_t, u_int, int);
void viking_cache_flush(caddr_t, u_int, int);
void ms1_cache_flush(caddr_t, u_int);
void viking_cache_flush(caddr_t, u_int);
void viking_pcache_flush_page(paddr_t, int);
void srmmu_pcache_flush_line(int, int);
void hypersparc_pure_vcache_flush(void);
@ -207,7 +210,8 @@ extern void sparc_noop(void);
#define noop_vcache_flush_region (void (*)(int,int))sparc_noop
#define noop_vcache_flush_segment (void (*)(int,int,int))sparc_noop
#define noop_vcache_flush_page (void (*)(int,int))sparc_noop
#define noop_cache_flush (void (*)(caddr_t,u_int,int))sparc_noop
#define noop_vcache_flush_range (void (*)(int,int,int))sparc_noop
#define noop_cache_flush (void (*)(caddr_t,u_int))sparc_noop
#define noop_pcache_flush_page (void (*)(paddr_t,int))sparc_noop
#define noop_pure_vcache_flush (void (*)(void))sparc_noop
#define noop_cache_flush_all (void (*)(void))sparc_noop
@ -220,13 +224,13 @@ void smp_vcache_flush_context(int); /* flush current context */
void smp_vcache_flush_region(int,int); /* flush region in cur ctx */
void smp_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
void smp_vcache_flush_page(int va,int); /* flush page in cur ctx */
void smp_cache_flush(caddr_t, u_int, int); /* flush region */
#define cache_flush_page(va,ctx) cpuinfo.vcache_flush_page(va,ctx)
#define cache_flush_segment(vr,vs,ctx) cpuinfo.vcache_flush_segment(vr,vs,ctx)
#define cache_flush_region(vr,ctx) cpuinfo.vcache_flush_region(vr,ctx)
#define cache_flush_context(ctx) cpuinfo.vcache_flush_context(ctx)
#define cache_flush(va,len) cpuinfo.cache_flush(va,len)
#define pcache_flush_page(pa,flag) cpuinfo.pcache_flush_page(pa,flag)

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.184 2004/04/17 10:01:11 pk Exp $ */
/* $NetBSD: cpu.c,v 1.185 2004/04/17 23:45:40 pk Exp $ */
/*
* Copyright (c) 1996
@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.184 2004/04/17 10:01:11 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.185 2004/04/17 23:45:40 pk Exp $");
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
@ -529,7 +529,6 @@ cpu_attach(struct cpu_softc *sc, int node, int mid)
continue;
#define SET_CACHE_FUNC(x) \
if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
SET_CACHE_FUNC(cache_flush);
SET_CACHE_FUNC(vcache_flush_page);
SET_CACHE_FUNC(vcache_flush_segment);
SET_CACHE_FUNC(vcache_flush_region);
@ -1061,6 +1060,7 @@ struct module_info module_sun4 = {
sun4_vcache_flush_segment, NULL,
sun4_vcache_flush_region, NULL,
sun4_vcache_flush_context, NULL,
NULL, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
@ -1087,7 +1087,7 @@ getcacheinfo_sun4(sc, node)
ci->c_nlines = 0;
/* Override cache flush functions */
sc->sp_cache_flush = noop_cache_flush;
sc->cache_flush = noop_cache_flush;
sc->sp_vcache_flush_page = noop_vcache_flush_page;
sc->sp_vcache_flush_segment = noop_vcache_flush_segment;
sc->sp_vcache_flush_region = noop_vcache_flush_region;
@ -1186,6 +1186,7 @@ struct module_info module_sun4c = {
sun4_vcache_flush_segment, NULL,
sun4_vcache_flush_region, NULL,
sun4_vcache_flush_context, NULL,
NULL, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
@ -1393,6 +1394,7 @@ struct module_info module_ms1 = {
noop_vcache_flush_segment, NULL,
noop_vcache_flush_region, NULL,
noop_vcache_flush_context, NULL,
noop_vcache_flush_range, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
ms1_cache_flush_all,
@ -1438,6 +1440,7 @@ struct module_info module_ms2 = { /* UNTESTED */
srmmu_vcache_flush_segment, NULL,
srmmu_vcache_flush_region, NULL,
srmmu_vcache_flush_context, NULL,
srmmu_vcache_flush_range, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
@ -1464,6 +1467,7 @@ struct module_info module_swift = {
srmmu_vcache_flush_segment, NULL,
srmmu_vcache_flush_region, NULL,
srmmu_vcache_flush_context, NULL,
srmmu_vcache_flush_range, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
@ -1507,6 +1511,7 @@ struct module_info module_hypersparc = {
srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
noop_pcache_flush_page,
hypersparc_pure_vcache_flush,
hypersparc_cache_flush_all,
@ -1573,6 +1578,7 @@ struct module_info module_cypress = {
srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
noop_pcache_flush_page,
noop_pure_vcache_flush,
cypress_cache_flush_all,
@ -1600,6 +1606,7 @@ struct module_info module_turbosparc = {
srmmu_vcache_flush_segment, NULL,
srmmu_vcache_flush_region, NULL,
srmmu_vcache_flush_context, NULL,
srmmu_vcache_flush_range, NULL,
noop_pcache_flush_page,
noop_pure_vcache_flush,
srmmu_cache_flush_all,
@ -1635,7 +1642,7 @@ cpumatch_turbosparc(sc, mp, node)
sc->mmu_enable = 0;
sc->cache_enable = 0;
sc->get_syncflt = 0;
sc->sp_cache_flush = 0;
sc->cache_flush = 0;
sc->sp_vcache_flush_page = 0;
sc->sp_vcache_flush_segment = 0;
sc->sp_vcache_flush_region = 0;
@ -1677,6 +1684,7 @@ struct module_info module_viking = {
noop_vcache_flush_segment, NULL,
noop_vcache_flush_region, NULL,
noop_vcache_flush_context, NULL,
noop_vcache_flush_range, NULL,
viking_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
@ -1872,6 +1880,7 @@ struct module_info module_viking_sun4d = {
noop_vcache_flush_segment, NULL,
noop_vcache_flush_region, NULL,
noop_vcache_flush_context, NULL,
noop_vcache_flush_range, NULL,
viking_pcache_flush_page,
noop_pure_vcache_flush,
noop_cache_flush_all,
@ -2048,15 +2057,17 @@ getcpuinfo(sc, node)
MPCOPY(cache_enable);
MPCOPY(get_syncflt);
MPCOPY(get_asyncflt);
MPCOPY(sp_cache_flush);
MPCOPY(cache_flush);
MPCOPY(sp_vcache_flush_page);
MPCOPY(sp_vcache_flush_segment);
MPCOPY(sp_vcache_flush_region);
MPCOPY(sp_vcache_flush_context);
MPCOPY(sp_vcache_flush_range);
MPCOPY(ft_vcache_flush_page);
MPCOPY(ft_vcache_flush_segment);
MPCOPY(ft_vcache_flush_region);
MPCOPY(ft_vcache_flush_context);
MPCOPY(ft_vcache_flush_range);
MPCOPY(pcache_flush_page);
MPCOPY(pure_vcache_flush);
MPCOPY(cache_flush_all);
@ -2068,7 +2079,6 @@ getcpuinfo(sc, node)
* Use the single-processor cache flush functions until
* all CPUs are initialized.
*/
sc->cache_flush = sc->sp_cache_flush;
sc->vcache_flush_page = sc->sp_vcache_flush_page;
sc->vcache_flush_segment = sc->sp_vcache_flush_segment;
sc->vcache_flush_region = sc->sp_vcache_flush_region;

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuvar.h,v 1.61 2004/04/17 11:50:23 pk Exp $ */
/* $NetBSD: cpuvar.h,v 1.62 2004/04/17 23:45:40 pk Exp $ */
/*
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@ struct module_info {
void (*get_syncflt)(void);
int (*get_asyncflt)(u_int *, u_int *);
void (*sp_cache_flush)(caddr_t, u_int, int);
void (*cache_flush)(caddr_t, u_int);
void (*sp_vcache_flush_page)(int, int);
void (*ft_vcache_flush_page)(int, int);
void (*sp_vcache_flush_segment)(int, int, int);
@ -84,6 +84,8 @@ struct module_info {
void (*ft_vcache_flush_region)(int, int);
void (*sp_vcache_flush_context)(int);
void (*ft_vcache_flush_context)(int);
void (*sp_vcache_flush_range)(int, int, int);
void (*ft_vcache_flush_range)(int, int, int);
void (*pcache_flush_page)(paddr_t, int);
void (*pure_vcache_flush)(void);
void (*cache_flush_all)(void);
@ -230,8 +232,7 @@ struct cpu_info {
* all processor modules.
* The `ft_' versions are fast trap cache flush handlers.
*/
void (*cache_flush)(caddr_t, u_int, int);
void (*sp_cache_flush)(caddr_t, u_int, int);
void (*cache_flush)(caddr_t, u_int);
void (*vcache_flush_page)(int, int);
void (*sp_vcache_flush_page)(int, int);
void (*ft_vcache_flush_page)(int, int);
@ -245,6 +246,10 @@ struct cpu_info {
void (*sp_vcache_flush_context)(int);
void (*ft_vcache_flush_context)(int);
/* The are helpers for (*cache_flush)() */
void (*sp_vcache_flush_range)(int, int, int);
void (*ft_vcache_flush_range)(int, int, int);
void (*pcache_flush_page)(paddr_t, int);
void (*pure_vcache_flush)(void);
void (*cache_flush_all)(void);