2020-12-14 17:02:33 +03:00
|
|
|
/*
|
|
|
|
* Flush the host cpu caches.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qemu/cacheflush.h"
|
2022-02-08 23:08:55 +03:00
|
|
|
#include "qemu/cacheinfo.h"
|
2020-12-12 19:46:34 +03:00
|
|
|
#include "qemu/bitops.h"
|
2020-12-14 17:02:33 +03:00
|
|
|
|
|
|
|
|
|
|
|
#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
|
|
|
|
|
|
|
|
/* Caches are coherent and do not require flushing; symbol inline. */
|
|
|
|
|
2020-12-12 19:46:34 +03:00
|
|
|
#elif defined(__aarch64__)
|
|
|
|
|
|
|
|
#ifdef CONFIG_DARWIN
|
|
|
|
/* Apple does not expose CTR_EL0, so we must use system interfaces. */
|
|
|
|
extern void sys_icache_invalidate(void *start, size_t len);
|
|
|
|
extern void sys_dcache_flush(void *start, size_t len);
|
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
|
|
|
{
|
|
|
|
sys_dcache_flush((void *)rw, len);
|
|
|
|
sys_icache_invalidate((void *)rx, len);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: unify this with cacheinfo.c.
|
|
|
|
* We want to save the whole contents of CTR_EL0, so that we
|
|
|
|
* have more than the linesize, but also IDC and DIC.
|
|
|
|
*/
|
2021-01-15 10:56:56 +03:00
|
|
|
static uint64_t save_ctr_el0;
|
2020-12-12 19:46:34 +03:00
|
|
|
static void __attribute__((constructor)) init_ctr_el0(void)
|
|
|
|
{
|
|
|
|
asm volatile("mrs\t%0, ctr_el0" : "=r"(save_ctr_el0));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a copy of gcc's __aarch64_sync_cache_range, modified
|
|
|
|
* to fit this three-operand interface.
|
|
|
|
*/
|
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
|
|
|
{
|
|
|
|
const unsigned CTR_IDC = 1u << 28;
|
|
|
|
const unsigned CTR_DIC = 1u << 29;
|
2021-01-15 10:56:56 +03:00
|
|
|
const uint64_t ctr_el0 = save_ctr_el0;
|
|
|
|
const uintptr_t icache_lsize = 4 << extract64(ctr_el0, 0, 4);
|
|
|
|
const uintptr_t dcache_lsize = 4 << extract64(ctr_el0, 16, 4);
|
2020-12-12 19:46:34 +03:00
|
|
|
uintptr_t p;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification
|
|
|
|
* is not required for instruction to data coherence.
|
|
|
|
*/
|
|
|
|
if (!(ctr_el0 & CTR_IDC)) {
|
|
|
|
/*
|
|
|
|
* Loop over the address range, clearing one cache line at once.
|
|
|
|
* Data cache must be flushed to unification first to make sure
|
|
|
|
* the instruction cache fetches the updated data.
|
|
|
|
*/
|
|
|
|
for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
|
|
|
|
asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
|
|
|
|
}
|
|
|
|
asm volatile("dsb\tish" : : : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
|
|
|
|
* of Unification is not required for instruction to data coherence.
|
|
|
|
*/
|
|
|
|
if (!(ctr_el0 & CTR_DIC)) {
|
|
|
|
for (p = rx & -icache_lsize; p < rx + len; p += icache_lsize) {
|
|
|
|
asm volatile("ic\tivau, %0" : : "r"(p) : "memory");
|
|
|
|
}
|
|
|
|
asm volatile ("dsb\tish" : : : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
asm volatile("isb" : : : "memory");
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DARWIN */
|
|
|
|
|
2020-12-14 17:02:33 +03:00
|
|
|
#elif defined(__mips__)
|
|
|
|
|
|
|
|
#ifdef __OpenBSD__
|
|
|
|
#include <machine/sysarch.h>
|
|
|
|
#else
|
|
|
|
#include <sys/cachectl.h>
|
|
|
|
#endif
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
2020-12-14 17:02:33 +03:00
|
|
|
{
|
2020-12-12 19:38:21 +03:00
|
|
|
if (rx != rw) {
|
|
|
|
cacheflush((void *)rw, len, DCACHE);
|
|
|
|
}
|
|
|
|
cacheflush((void *)rx, len, ICACHE);
|
2020-12-14 17:02:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__powerpc__)
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
2020-12-14 17:02:33 +03:00
|
|
|
{
|
2020-12-12 19:38:21 +03:00
|
|
|
uintptr_t p, b, e;
|
2020-12-14 17:02:33 +03:00
|
|
|
size_t dsize = qemu_dcache_linesize;
|
|
|
|
size_t isize = qemu_icache_linesize;
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
b = rw & ~(dsize - 1);
|
|
|
|
e = (rw + len + dsize - 1) & ~(dsize - 1);
|
|
|
|
for (p = b; p < e; p += dsize) {
|
2020-12-14 17:02:33 +03:00
|
|
|
asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
|
|
|
|
}
|
|
|
|
asm volatile ("sync" : : : "memory");
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
b = rx & ~(isize - 1);
|
|
|
|
e = (rx + len + isize - 1) & ~(isize - 1);
|
|
|
|
for (p = b; p < e; p += isize) {
|
2020-12-14 17:02:33 +03:00
|
|
|
asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
|
|
|
|
}
|
|
|
|
asm volatile ("sync" : : : "memory");
|
|
|
|
asm volatile ("isync" : : : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(__sparc__)
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
2020-12-14 17:02:33 +03:00
|
|
|
{
|
2020-12-12 19:38:21 +03:00
|
|
|
/* No additional data flush to the RW virtual address required. */
|
|
|
|
uintptr_t p, end = (rx + len + 7) & -8;
|
|
|
|
for (p = rx & -8; p < end; p += 8) {
|
2020-12-14 17:02:33 +03:00
|
|
|
__asm__ __volatile__("flush\t%0" : : "r" (p));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2020-12-12 19:38:21 +03:00
|
|
|
void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
|
2020-12-14 17:02:33 +03:00
|
|
|
{
|
2020-12-12 19:38:21 +03:00
|
|
|
if (rw != rx) {
|
|
|
|
__builtin___clear_cache((char *)rw, (char *)rw + len);
|
|
|
|
}
|
|
|
|
__builtin___clear_cache((char *)rx, (char *)rx + len);
|
2020-12-14 17:02:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|