2010-08-31 19:41:25 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 Citrix Ltd.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 20:44:23 +04:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2010-08-31 19:41:25 +04:00
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:06 +03:00
|
|
|
#include "qemu/osdep.h"
|
2018-06-25 15:42:03 +03:00
|
|
|
#include "qemu/units.h"
|
2017-09-11 22:52:53 +03:00
|
|
|
#include "qemu/error-report.h"
|
2010-08-31 19:41:25 +04:00
|
|
|
|
|
|
|
#include <sys/resource.h>
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
#include "hw/xen/xen-hvm-common.h"
|
2023-01-02 03:39:13 +03:00
|
|
|
#include "hw/xen/xen_native.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/bitmap.h"
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2019-08-12 08:23:59 +03:00
|
|
|
#include "sysemu/runstate.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/xen-mapcache.h"
|
2017-04-06 02:21:31 +03:00
|
|
|
#include "trace.h"
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
#include <xenevtchn.h>
|
|
|
|
#include <xengnttab.h>
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2013-12-18 23:17:32 +04:00
|
|
|
#if HOST_LONG_BITS == 32
|
2011-03-22 17:50:28 +03:00
|
|
|
# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
|
2013-12-18 23:17:32 +04:00
|
|
|
#else
|
2011-03-22 17:50:28 +03:00
|
|
|
# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
|
2010-08-31 19:41:25 +04:00
|
|
|
#endif
|
|
|
|
|
2011-09-09 16:50:18 +04:00
|
|
|
/* This is the size of the virtual address space reserve to QEMU that will not
|
|
|
|
* be use by MapCache.
|
|
|
|
* From empirical tests I observed that qemu use 75MB more than the
|
|
|
|
* max_mcache_size.
|
|
|
|
*/
|
2018-06-25 15:42:03 +03:00
|
|
|
#define NON_MCACHE_MEMORY_SIZE (80 * MiB)
|
2011-09-09 16:50:18 +04:00
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
typedef struct MapCacheEntry {
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr_index;
|
2010-08-31 19:41:25 +04:00
|
|
|
uint8_t *vaddr_base;
|
2011-05-19 21:35:42 +04:00
|
|
|
unsigned long *valid_mapping;
|
2022-01-24 13:44:50 +03:00
|
|
|
uint32_t lock;
|
2017-07-11 01:40:01 +03:00
|
|
|
#define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
|
2024-04-30 11:26:45 +03:00
|
|
|
#define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
|
2017-07-11 01:40:01 +03:00
|
|
|
uint8_t flags;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr size;
|
2010-08-31 19:41:25 +04:00
|
|
|
struct MapCacheEntry *next;
|
|
|
|
} MapCacheEntry;
|
|
|
|
|
|
|
|
typedef struct MapCacheRev {
|
|
|
|
uint8_t *vaddr_req;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2010-08-31 19:41:25 +04:00
|
|
|
QTAILQ_ENTRY(MapCacheRev) next;
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-04 00:00:35 +03:00
|
|
|
bool dma;
|
2010-08-31 19:41:25 +04:00
|
|
|
} MapCacheRev;
|
|
|
|
|
|
|
|
typedef struct MapCache {
|
|
|
|
MapCacheEntry *entry;
|
|
|
|
unsigned long nr_buckets;
|
2018-12-06 13:58:10 +03:00
|
|
|
QTAILQ_HEAD(, MapCacheRev) locked_entries;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
|
|
|
/* For most cases (>99.9%), the page address is the same. */
|
2013-04-02 17:23:40 +04:00
|
|
|
MapCacheEntry *last_entry;
|
2010-08-31 19:41:25 +04:00
|
|
|
unsigned long max_mcache_size;
|
2024-04-29 17:24:51 +03:00
|
|
|
unsigned int bucket_shift;
|
|
|
|
unsigned long bucket_size;
|
2012-01-18 16:21:38 +04:00
|
|
|
|
|
|
|
phys_offset_to_gaddr_t phys_offset_to_gaddr;
|
2015-01-14 13:20:56 +03:00
|
|
|
QemuMutex lock;
|
2012-01-18 16:21:38 +04:00
|
|
|
void *opaque;
|
2010-08-31 19:41:25 +04:00
|
|
|
} MapCache;
|
|
|
|
|
|
|
|
static MapCache *mapcache;
|
2024-04-30 11:26:45 +03:00
|
|
|
static MapCache *mapcache_grants;
|
|
|
|
static xengnttab_handle *xen_region_gnttabdev;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 19:49:25 +03:00
|
|
|
static inline void mapcache_lock(MapCache *mc)
|
2015-01-14 13:20:56 +03:00
|
|
|
{
|
2024-04-30 19:49:25 +03:00
|
|
|
qemu_mutex_lock(&mc->lock);
|
2015-01-14 13:20:56 +03:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:25 +03:00
|
|
|
static inline void mapcache_unlock(MapCache *mc)
|
2015-01-14 13:20:56 +03:00
|
|
|
{
|
2024-04-30 19:49:25 +03:00
|
|
|
qemu_mutex_unlock(&mc->lock);
|
2015-01-14 13:20:56 +03:00
|
|
|
}
|
|
|
|
|
2011-05-19 21:35:42 +04:00
|
|
|
static inline int test_bits(int nr, int size, const unsigned long *addr)
|
|
|
|
{
|
|
|
|
unsigned long res = find_next_zero_bit(addr, size + nr, nr);
|
|
|
|
if (res >= nr + size)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:32 +03:00
|
|
|
static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
|
|
|
|
void *opaque,
|
2024-04-29 17:24:51 +03:00
|
|
|
unsigned int bucket_shift,
|
2024-04-30 19:49:32 +03:00
|
|
|
unsigned long max_size)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
|
|
|
unsigned long size;
|
2024-04-30 19:49:32 +03:00
|
|
|
MapCache *mc;
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
assert(bucket_shift >= XC_PAGE_SHIFT);
|
|
|
|
|
2024-04-30 19:49:32 +03:00
|
|
|
mc = g_new0(MapCache, 1);
|
|
|
|
|
|
|
|
mc->phys_offset_to_gaddr = f;
|
|
|
|
mc->opaque = opaque;
|
|
|
|
qemu_mutex_init(&mc->lock);
|
|
|
|
|
|
|
|
QTAILQ_INIT(&mc->locked_entries);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
mc->bucket_shift = bucket_shift;
|
|
|
|
mc->bucket_size = 1UL << bucket_shift;
|
2024-04-30 19:49:32 +03:00
|
|
|
mc->max_mcache_size = max_size;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 19:49:32 +03:00
|
|
|
mc->nr_buckets =
|
|
|
|
(((mc->max_mcache_size >> XC_PAGE_SHIFT) +
|
2024-04-29 17:24:51 +03:00
|
|
|
(1UL << (bucket_shift - XC_PAGE_SHIFT)) - 1) >>
|
|
|
|
(bucket_shift - XC_PAGE_SHIFT));
|
2012-01-18 16:21:38 +04:00
|
|
|
|
2024-04-30 19:49:32 +03:00
|
|
|
size = mc->nr_buckets * sizeof(MapCacheEntry);
|
|
|
|
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
|
|
|
|
trace_xen_map_cache_init(mc->nr_buckets, size);
|
|
|
|
mc->entry = g_malloc0(size);
|
|
|
|
return mc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
|
|
|
|
{
|
|
|
|
struct rlimit rlimit_as;
|
|
|
|
unsigned long max_mcache_size;
|
2024-04-29 17:24:51 +03:00
|
|
|
unsigned int bucket_shift;
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
xen_region_gnttabdev = xengnttab_open(NULL, 0);
|
|
|
|
if (xen_region_gnttabdev == NULL) {
|
|
|
|
error_report("mapcache: Failed to open gnttab device");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
if (HOST_LONG_BITS == 32) {
|
|
|
|
bucket_shift = 16;
|
|
|
|
} else {
|
|
|
|
bucket_shift = 20;
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2011-09-09 16:50:18 +04:00
|
|
|
if (geteuid() == 0) {
|
|
|
|
rlimit_as.rlim_cur = RLIM_INFINITY;
|
|
|
|
rlimit_as.rlim_max = RLIM_INFINITY;
|
2024-04-30 19:49:32 +03:00
|
|
|
max_mcache_size = MCACHE_MAX_SIZE;
|
2011-03-22 17:50:28 +03:00
|
|
|
} else {
|
2011-09-09 16:50:18 +04:00
|
|
|
getrlimit(RLIMIT_AS, &rlimit_as);
|
|
|
|
rlimit_as.rlim_cur = rlimit_as.rlim_max;
|
|
|
|
|
|
|
|
if (rlimit_as.rlim_max != RLIM_INFINITY) {
|
2017-09-11 22:52:53 +03:00
|
|
|
warn_report("QEMU's maximum size of virtual"
|
2017-09-11 22:52:56 +03:00
|
|
|
" memory is not infinity");
|
2011-09-09 16:50:18 +04:00
|
|
|
}
|
|
|
|
if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
|
2024-04-30 19:49:32 +03:00
|
|
|
max_mcache_size = rlimit_as.rlim_max - NON_MCACHE_MEMORY_SIZE;
|
2011-09-09 16:50:18 +04:00
|
|
|
} else {
|
2024-04-30 19:49:32 +03:00
|
|
|
max_mcache_size = MCACHE_MAX_SIZE;
|
2011-09-09 16:50:18 +04:00
|
|
|
}
|
2011-03-22 17:50:28 +03:00
|
|
|
}
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
mapcache = xen_map_cache_init_single(f, opaque,
|
|
|
|
bucket_shift,
|
|
|
|
max_mcache_size);
|
2024-04-30 11:26:45 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Grant mappings must use XC_PAGE_SIZE granularity since we can't
|
|
|
|
* map anything beyond the number of pages granted to us.
|
|
|
|
*/
|
|
|
|
mapcache_grants = xen_map_cache_init_single(f, opaque,
|
|
|
|
XC_PAGE_SHIFT,
|
|
|
|
max_mcache_size);
|
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
setrlimit(RLIMIT_AS, &rlimit_as);
|
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:27 +03:00
|
|
|
static void xen_remap_bucket(MapCache *mc,
|
|
|
|
MapCacheEntry *entry,
|
2017-07-11 01:40:02 +03:00
|
|
|
void *vaddr,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr size,
|
2017-07-11 01:40:01 +03:00
|
|
|
hwaddr address_index,
|
2024-04-30 11:14:01 +03:00
|
|
|
bool dummy,
|
2024-04-30 11:26:45 +03:00
|
|
|
bool grant,
|
|
|
|
bool is_write,
|
2024-04-30 11:14:01 +03:00
|
|
|
ram_addr_t ram_offset)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
|
|
|
uint8_t *vaddr_base;
|
2024-04-30 11:26:45 +03:00
|
|
|
g_autofree uint32_t *refs = NULL;
|
|
|
|
g_autofree xen_pfn_t *pfns = NULL;
|
|
|
|
g_autofree int *err;
|
2011-03-22 17:50:28 +03:00
|
|
|
unsigned int i;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2011-06-22 00:59:08 +04:00
|
|
|
trace_xen_remap_bucket(address_index);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
if (grant) {
|
|
|
|
refs = g_new0(uint32_t, nb_pfn);
|
|
|
|
} else {
|
|
|
|
pfns = g_new0(xen_pfn_t, nb_pfn);
|
|
|
|
}
|
2022-03-15 17:41:56 +03:00
|
|
|
err = g_new0(int, nb_pfn);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
|
|
|
if (entry->vaddr_base != NULL) {
|
2017-07-11 01:40:02 +03:00
|
|
|
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
|
2021-04-29 14:27:00 +03:00
|
|
|
ram_block_notify_remove(entry->vaddr_base, entry->size,
|
|
|
|
entry->size);
|
2017-07-11 01:40:02 +03:00
|
|
|
}
|
2021-04-20 06:35:02 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If an entry is being replaced by another mapping and we're using
|
|
|
|
* MAP_FIXED flag for it - there is possibility of a race for vaddr
|
|
|
|
* address with another thread doing an mmap call itself
|
|
|
|
* (see man 2 mmap). To avoid that we skip explicit unmapping here
|
|
|
|
* and allow the kernel to destroy the previous mappings by replacing
|
|
|
|
* them in mmap call later.
|
|
|
|
*
|
|
|
|
* Non-identical replacements are not allowed therefore.
|
|
|
|
*/
|
|
|
|
assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size));
|
|
|
|
|
|
|
|
if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) {
|
2010-08-31 19:41:25 +04:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
}
|
2015-08-26 15:02:53 +03:00
|
|
|
g_free(entry->valid_mapping);
|
|
|
|
entry->valid_mapping = NULL;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
if (grant) {
|
|
|
|
hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT);
|
|
|
|
|
|
|
|
for (i = 0; i < nb_pfn; i++) {
|
|
|
|
refs[i] = grant_base + i;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < nb_pfn; i++) {
|
|
|
|
pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
|
|
|
|
|
2017-07-11 01:40:01 +03:00
|
|
|
if (!dummy) {
|
2024-04-30 11:26:45 +03:00
|
|
|
if (grant) {
|
|
|
|
int prot = PROT_READ;
|
|
|
|
|
|
|
|
if (is_write) {
|
|
|
|
prot |= PROT_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
|
|
|
|
assert(vaddr == NULL);
|
|
|
|
vaddr_base = xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
|
|
|
|
nb_pfn,
|
|
|
|
xen_domid, refs,
|
|
|
|
prot);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the caller has requested the mapping at a specific address use
|
|
|
|
* MAP_FIXED to make sure it's honored.
|
|
|
|
*
|
|
|
|
* We don't yet support upgrading mappings from RO to RW, to handle
|
|
|
|
* models using ordinary address_space_rw(), foreign mappings ignore
|
|
|
|
* is_write and are always mapped RW.
|
|
|
|
*/
|
|
|
|
vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
vaddr ? MAP_FIXED : 0,
|
|
|
|
nb_pfn, pfns, err);
|
|
|
|
}
|
2017-07-11 01:40:01 +03:00
|
|
|
if (vaddr_base == NULL) {
|
2024-04-30 11:26:45 +03:00
|
|
|
perror(grant ? "xengnttab_map_domain_grant_refs"
|
|
|
|
: "xenforeignmemory_map2");
|
2017-07-11 01:40:01 +03:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We create dummy mappings where we are unable to create a foreign
|
|
|
|
* mapping immediately due to certain circumstances (i.e. on resume now)
|
|
|
|
*/
|
2017-07-11 01:40:02 +03:00
|
|
|
vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE,
|
2019-03-18 20:37:31 +03:00
|
|
|
MAP_ANON | MAP_SHARED | (vaddr ? MAP_FIXED : 0),
|
|
|
|
-1, 0);
|
2017-12-01 21:31:57 +03:00
|
|
|
if (vaddr_base == MAP_FAILED) {
|
2017-07-11 01:40:01 +03:00
|
|
|
perror("mmap");
|
|
|
|
exit(-1);
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2017-07-11 01:40:02 +03:00
|
|
|
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
|
2021-04-29 14:27:00 +03:00
|
|
|
ram_block_notify_add(vaddr_base, size, size);
|
2017-07-11 01:40:02 +03:00
|
|
|
}
|
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
entry->vaddr_base = vaddr_base;
|
|
|
|
entry->paddr_index = address_index;
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->size = size;
|
2022-03-15 17:41:56 +03:00
|
|
|
entry->valid_mapping = g_new0(unsigned long,
|
|
|
|
BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2017-07-11 01:40:01 +03:00
|
|
|
if (dummy) {
|
|
|
|
entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY;
|
|
|
|
} else {
|
|
|
|
entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY);
|
|
|
|
}
|
|
|
|
|
2011-03-22 17:50:28 +03:00
|
|
|
bitmap_zero(entry->valid_mapping, nb_pfn);
|
|
|
|
for (i = 0; i < nb_pfn; i++) {
|
|
|
|
if (!err[i]) {
|
|
|
|
bitmap_set(entry->valid_mapping, i, 1);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:26 +03:00
|
|
|
static uint8_t *xen_map_cache_unlocked(MapCache *mc,
|
|
|
|
hwaddr phys_addr, hwaddr size,
|
2024-04-30 11:14:01 +03:00
|
|
|
ram_addr_t ram_offset,
|
2024-04-30 11:26:45 +03:00
|
|
|
uint8_t lock, bool dma,
|
|
|
|
bool grant, bool is_write)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
xen-mapcache: Fix the bug when overlapping emulated DMA operations may cause inconsistency in guest memory mappings
Under certain circumstances normal xen-mapcache functioning may be broken
by guest's actions. This may lead to either QEMU performing exit() due to
a caught bad pointer (and with QEMU process gone the guest domain simply
appears hung afterwards) or actual use of the incorrect pointer inside
QEMU address space -- a write to unmapped memory is possible. The bug is
hard to reproduce on a i440 machine as multiple DMA sources are required
(though it's possible in theory, using multiple emulated devices), but can
be reproduced somewhat easily on a Q35 machine using an emulated AHCI
controller -- each NCQ queue command slot may be used as an independent
DMA source ex. using READ FPDMA QUEUED command, so a single storage
device on the AHCI controller port will be enough to produce multiple DMAs
(up to 32). The detailed description of the issue follows.
Xen-mapcache provides an ability to map parts of a guest memory into
QEMU's own address space to work with.
There are two types of cache lookups:
- translating a guest physical address into a pointer in QEMU's address
space, mapping a part of guest domain memory if necessary (while trying
to reduce a number of such (re)mappings to a minimum)
- translating a QEMU's pointer back to its physical address in guest RAM
These lookups are managed via two linked-lists of structures.
MapCacheEntry is used for forward cache lookups, while MapCacheRev -- for
reverse lookups.
Every guest physical address is broken down into 2 parts:
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
MCACHE_BUCKET_SHIFT depends on a system (32/64) and is equal to 20 for
a 64-bit system (which assumed for the further description). Basically,
this means that we deal with 1 MB chunks and offsets within those 1 MB
chunks. All mappings are created with 1MB-granularity, i.e. 1MB/2MB/3MB
etc. Most DMA transfers typically are less than 1MB, however, if the
transfer crosses any 1MB border(s) - than a nearest larger mapping size
will be used, so ex. a 512-byte DMA transfer with the start address
700FFF80h will actually require a 2MB range.
Current implementation assumes that MapCacheEntries are unique for a given
address_index and size pair and that a single MapCacheEntry may be reused
by multiple requests -- in this case the 'lock' field will be larger than
1. On other hand, each requested guest physical address (with 'lock' flag)
is described by each own MapCacheRev. So there may be multiple MapCacheRev
entries corresponding to a single MapCacheEntry. The xen-mapcache code
uses MapCacheRev entries to retrieve the address_index & size pair which
in turn used to find a related MapCacheEntry. The 'lock' field within
a MapCacheEntry structure is actually a reference counter which shows
a number of corresponding MapCacheRev entries.
The bug lies in ability for the guest to indirectly manipulate with the
xen-mapcache MapCacheEntries list via a special sequence of DMA
operations, typically for storage devices. In order to trigger the bug,
guest needs to issue DMA operations in specific order and timing.
Although xen-mapcache is protected by the mutex lock -- this doesn't help
in this case, as the bug is not due to a race condition.
Suppose we have 3 DMA transfers, namely A, B and C, where
- transfer A crosses 1MB border and thus uses a 2MB mapping
- transfers B and C are normal transfers within 1MB range
- and all 3 transfers belong to the same address_index
In this case, if all these transfers are to be executed one-by-one
(without overlaps), no special treatment necessary -- each transfer's
mapping lock will be set and then cleared on unmap before starting
the next transfer.
The situation changes when DMA transfers overlap in time, ex. like this:
|===== transfer A (2MB) =====|
|===== transfer B (1MB) =====|
|===== transfer C (1MB) =====|
time --->
In this situation the following sequence of actions happens:
1. transfer A creates a mapping to 2MB area (lock=1)
2. transfer B (1MB) tries to find available mapping but cannot find one
because transfer A is still in progress, and it has 2MB size + non-zero
lock. So transfer B creates another mapping -- same address_index,
but 1MB size.
3. transfer A completes, making 1st mapping entry available by setting its
lock to 0
4. transfer C starts and tries to find available mapping entry and sees
that 1st entry has lock=0, so it uses this entry but remaps the mapping
to a 1MB size
5. transfer B completes and by this time
- there are two locked entries in the MapCacheEntry list with the SAME
values for both address_index and size
- the entry for transfer B actually resides farther in list while
transfer C's entry is first
6. xen_ram_addr_from_mapcache() for transfer B gets correct address_index
and size pair from corresponding MapCacheRev entry, but then it starts
looking for MapCacheEntry with these values and finds the first entry
-- which belongs to transfer C.
At this point there may be following possible (bad) consequences:
1. xen_ram_addr_from_mapcache() will use a wrong entry->vaddr_base value
in this statement:
raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
resulting in an incorrent raddr value returned from the function. The
(ptr - entry->vaddr_base) expression may produce both positive and negative
numbers and its actual value may differ greatly as there are many
map/unmap operations take place. If the value will be beyond guest RAM
limits then a "Bad RAM offset" error will be triggered and logged,
followed by exit() in QEMU.
2. If raddr value won't exceed guest RAM boundaries, the same sequence
of actions will be performed for xen_invalidate_map_cache_entry() on DMA
unmap, resulting in a wrong MapCacheEntry being unmapped while DMA
operation which uses it is still active. The above example must
be extended by one more DMA transfer in order to allow unmapping as the
first mapping in the list is sort of resident.
The patch modifies the behavior in which MapCacheEntry's are added to the
list, avoiding duplicates.
Signed-off-by: Alexey Gerasimenko <x1917x@gmail.com>
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
2017-07-22 03:34:20 +03:00
|
|
|
MapCacheEntry *entry, *pentry = NULL,
|
|
|
|
*free_entry = NULL, *free_pentry = NULL;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr address_index;
|
|
|
|
hwaddr address_offset;
|
2015-01-14 13:20:55 +03:00
|
|
|
hwaddr cache_size = size;
|
|
|
|
hwaddr test_bit_size;
|
2017-07-11 01:40:03 +03:00
|
|
|
bool translated G_GNUC_UNUSED = false;
|
2017-07-11 01:40:01 +03:00
|
|
|
bool dummy = false;
|
2012-01-18 16:21:38 +04:00
|
|
|
|
|
|
|
tryagain:
|
2024-04-29 17:24:51 +03:00
|
|
|
address_index = phys_addr >> mc->bucket_shift;
|
|
|
|
address_offset = phys_addr & (mc->bucket_size - 1);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2011-06-22 00:59:08 +04:00
|
|
|
trace_xen_map_cache(phys_addr);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2015-01-14 13:20:55 +03:00
|
|
|
/* test_bit_size is always a multiple of XC_PAGE_SIZE */
|
2013-04-02 17:22:41 +04:00
|
|
|
if (size) {
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
|
2013-04-02 17:22:41 +04:00
|
|
|
|
2015-01-14 13:20:55 +03:00
|
|
|
if (test_bit_size % XC_PAGE_SIZE) {
|
|
|
|
test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
|
2013-04-02 17:22:41 +04:00
|
|
|
}
|
|
|
|
} else {
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size = XC_PAGE_SIZE;
|
2013-04-02 17:22:41 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:26 +03:00
|
|
|
if (mc->last_entry != NULL &&
|
|
|
|
mc->last_entry->paddr_index == address_index &&
|
2015-01-14 13:20:55 +03:00
|
|
|
!lock && !size &&
|
2013-04-02 17:22:41 +04:00
|
|
|
test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2024-04-30 19:49:26 +03:00
|
|
|
mc->last_entry->valid_mapping)) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_map_cache_return(
|
2024-04-30 19:49:26 +03:00
|
|
|
mc->last_entry->vaddr_base + address_offset
|
2024-01-29 19:09:40 +03:00
|
|
|
);
|
2024-04-30 19:49:26 +03:00
|
|
|
return mc->last_entry->vaddr_base + address_offset;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
/* size is always a multiple of mc->bucket_size */
|
2012-04-13 21:18:56 +04:00
|
|
|
if (size) {
|
2015-01-14 13:20:55 +03:00
|
|
|
cache_size = size + address_offset;
|
2024-04-29 17:24:51 +03:00
|
|
|
if (cache_size % mc->bucket_size) {
|
|
|
|
cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
|
2012-04-13 21:18:56 +04:00
|
|
|
}
|
|
|
|
} else {
|
2024-04-29 17:24:51 +03:00
|
|
|
cache_size = mc->bucket_size;
|
2012-04-13 21:18:56 +04:00
|
|
|
}
|
2011-05-19 21:35:42 +04:00
|
|
|
|
2024-04-30 19:49:26 +03:00
|
|
|
entry = &mc->entry[address_index % mc->nr_buckets];
|
2010-08-31 19:41:25 +04:00
|
|
|
|
xen-mapcache: Fix the bug when overlapping emulated DMA operations may cause inconsistency in guest memory mappings
Under certain circumstances normal xen-mapcache functioning may be broken
by guest's actions. This may lead to either QEMU performing exit() due to
a caught bad pointer (and with QEMU process gone the guest domain simply
appears hung afterwards) or actual use of the incorrect pointer inside
QEMU address space -- a write to unmapped memory is possible. The bug is
hard to reproduce on a i440 machine as multiple DMA sources are required
(though it's possible in theory, using multiple emulated devices), but can
be reproduced somewhat easily on a Q35 machine using an emulated AHCI
controller -- each NCQ queue command slot may be used as an independent
DMA source ex. using READ FPDMA QUEUED command, so a single storage
device on the AHCI controller port will be enough to produce multiple DMAs
(up to 32). The detailed description of the issue follows.
Xen-mapcache provides an ability to map parts of a guest memory into
QEMU's own address space to work with.
There are two types of cache lookups:
- translating a guest physical address into a pointer in QEMU's address
space, mapping a part of guest domain memory if necessary (while trying
to reduce a number of such (re)mappings to a minimum)
- translating a QEMU's pointer back to its physical address in guest RAM
These lookups are managed via two linked-lists of structures.
MapCacheEntry is used for forward cache lookups, while MapCacheRev -- for
reverse lookups.
Every guest physical address is broken down into 2 parts:
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
MCACHE_BUCKET_SHIFT depends on a system (32/64) and is equal to 20 for
a 64-bit system (which assumed for the further description). Basically,
this means that we deal with 1 MB chunks and offsets within those 1 MB
chunks. All mappings are created with 1MB-granularity, i.e. 1MB/2MB/3MB
etc. Most DMA transfers typically are less than 1MB, however, if the
transfer crosses any 1MB border(s) - than a nearest larger mapping size
will be used, so ex. a 512-byte DMA transfer with the start address
700FFF80h will actually require a 2MB range.
Current implementation assumes that MapCacheEntries are unique for a given
address_index and size pair and that a single MapCacheEntry may be reused
by multiple requests -- in this case the 'lock' field will be larger than
1. On other hand, each requested guest physical address (with 'lock' flag)
is described by each own MapCacheRev. So there may be multiple MapCacheRev
entries corresponding to a single MapCacheEntry. The xen-mapcache code
uses MapCacheRev entries to retrieve the address_index & size pair which
in turn used to find a related MapCacheEntry. The 'lock' field within
a MapCacheEntry structure is actually a reference counter which shows
a number of corresponding MapCacheRev entries.
The bug lies in ability for the guest to indirectly manipulate with the
xen-mapcache MapCacheEntries list via a special sequence of DMA
operations, typically for storage devices. In order to trigger the bug,
guest needs to issue DMA operations in specific order and timing.
Although xen-mapcache is protected by the mutex lock -- this doesn't help
in this case, as the bug is not due to a race condition.
Suppose we have 3 DMA transfers, namely A, B and C, where
- transfer A crosses 1MB border and thus uses a 2MB mapping
- transfers B and C are normal transfers within 1MB range
- and all 3 transfers belong to the same address_index
In this case, if all these transfers are to be executed one-by-one
(without overlaps), no special treatment necessary -- each transfer's
mapping lock will be set and then cleared on unmap before starting
the next transfer.
The situation changes when DMA transfers overlap in time, ex. like this:
|===== transfer A (2MB) =====|
|===== transfer B (1MB) =====|
|===== transfer C (1MB) =====|
time --->
In this situation the following sequence of actions happens:
1. transfer A creates a mapping to 2MB area (lock=1)
2. transfer B (1MB) tries to find available mapping but cannot find one
because transfer A is still in progress, and it has 2MB size + non-zero
lock. So transfer B creates another mapping -- same address_index,
but 1MB size.
3. transfer A completes, making 1st mapping entry available by setting its
lock to 0
4. transfer C starts and tries to find available mapping entry and sees
that 1st entry has lock=0, so it uses this entry but remaps the mapping
to a 1MB size
5. transfer B completes and by this time
- there are two locked entries in the MapCacheEntry list with the SAME
values for both address_index and size
- the entry for transfer B actually resides farther in list while
transfer C's entry is first
6. xen_ram_addr_from_mapcache() for transfer B gets correct address_index
and size pair from corresponding MapCacheRev entry, but then it starts
looking for MapCacheEntry with these values and finds the first entry
-- which belongs to transfer C.
At this point there may be following possible (bad) consequences:
1. xen_ram_addr_from_mapcache() will use a wrong entry->vaddr_base value
in this statement:
raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
resulting in an incorrent raddr value returned from the function. The
(ptr - entry->vaddr_base) expression may produce both positive and negative
numbers and its actual value may differ greatly as there are many
map/unmap operations take place. If the value will be beyond guest RAM
limits then a "Bad RAM offset" error will be triggered and logged,
followed by exit() in QEMU.
2. If raddr value won't exceed guest RAM boundaries, the same sequence
of actions will be performed for xen_invalidate_map_cache_entry() on DMA
unmap, resulting in a wrong MapCacheEntry being unmapped while DMA
operation which uses it is still active. The above example must
be extended by one more DMA transfer in order to allow unmapping as the
first mapping in the list is sort of resident.
The patch modifies the behavior in which MapCacheEntry's are added to the
list, avoiding duplicates.
Signed-off-by: Alexey Gerasimenko <x1917x@gmail.com>
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
2017-07-22 03:34:20 +03:00
|
|
|
while (entry && (lock || entry->lock) && entry->vaddr_base &&
|
2015-01-14 13:20:55 +03:00
|
|
|
(entry->paddr_index != address_index || entry->size != cache_size ||
|
2013-04-02 17:22:41 +04:00
|
|
|
!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->valid_mapping))) {
|
xen-mapcache: Fix the bug when overlapping emulated DMA operations may cause inconsistency in guest memory mappings
Under certain circumstances normal xen-mapcache functioning may be broken
by guest's actions. This may lead to either QEMU performing exit() due to
a caught bad pointer (and with QEMU process gone the guest domain simply
appears hung afterwards) or actual use of the incorrect pointer inside
QEMU address space -- a write to unmapped memory is possible. The bug is
hard to reproduce on a i440 machine as multiple DMA sources are required
(though it's possible in theory, using multiple emulated devices), but can
be reproduced somewhat easily on a Q35 machine using an emulated AHCI
controller -- each NCQ queue command slot may be used as an independent
DMA source ex. using READ FPDMA QUEUED command, so a single storage
device on the AHCI controller port will be enough to produce multiple DMAs
(up to 32). The detailed description of the issue follows.
Xen-mapcache provides an ability to map parts of a guest memory into
QEMU's own address space to work with.
There are two types of cache lookups:
- translating a guest physical address into a pointer in QEMU's address
space, mapping a part of guest domain memory if necessary (while trying
to reduce a number of such (re)mappings to a minimum)
- translating a QEMU's pointer back to its physical address in guest RAM
These lookups are managed via two linked-lists of structures.
MapCacheEntry is used for forward cache lookups, while MapCacheRev -- for
reverse lookups.
Every guest physical address is broken down into 2 parts:
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
MCACHE_BUCKET_SHIFT depends on a system (32/64) and is equal to 20 for
a 64-bit system (which assumed for the further description). Basically,
this means that we deal with 1 MB chunks and offsets within those 1 MB
chunks. All mappings are created with 1MB-granularity, i.e. 1MB/2MB/3MB
etc. Most DMA transfers typically are less than 1MB, however, if the
transfer crosses any 1MB border(s) - than a nearest larger mapping size
will be used, so ex. a 512-byte DMA transfer with the start address
700FFF80h will actually require a 2MB range.
Current implementation assumes that MapCacheEntries are unique for a given
address_index and size pair and that a single MapCacheEntry may be reused
by multiple requests -- in this case the 'lock' field will be larger than
1. On other hand, each requested guest physical address (with 'lock' flag)
is described by each own MapCacheRev. So there may be multiple MapCacheRev
entries corresponding to a single MapCacheEntry. The xen-mapcache code
uses MapCacheRev entries to retrieve the address_index & size pair which
in turn used to find a related MapCacheEntry. The 'lock' field within
a MapCacheEntry structure is actually a reference counter which shows
a number of corresponding MapCacheRev entries.
The bug lies in ability for the guest to indirectly manipulate with the
xen-mapcache MapCacheEntries list via a special sequence of DMA
operations, typically for storage devices. In order to trigger the bug,
guest needs to issue DMA operations in specific order and timing.
Although xen-mapcache is protected by the mutex lock -- this doesn't help
in this case, as the bug is not due to a race condition.
Suppose we have 3 DMA transfers, namely A, B and C, where
- transfer A crosses 1MB border and thus uses a 2MB mapping
- transfers B and C are normal transfers within 1MB range
- and all 3 transfers belong to the same address_index
In this case, if all these transfers are to be executed one-by-one
(without overlaps), no special treatment necessary -- each transfer's
mapping lock will be set and then cleared on unmap before starting
the next transfer.
The situation changes when DMA transfers overlap in time, ex. like this:
|===== transfer A (2MB) =====|
|===== transfer B (1MB) =====|
|===== transfer C (1MB) =====|
time --->
In this situation the following sequence of actions happens:
1. transfer A creates a mapping to 2MB area (lock=1)
2. transfer B (1MB) tries to find available mapping but cannot find one
because transfer A is still in progress, and it has 2MB size + non-zero
lock. So transfer B creates another mapping -- same address_index,
but 1MB size.
3. transfer A completes, making 1st mapping entry available by setting its
lock to 0
4. transfer C starts and tries to find available mapping entry and sees
that 1st entry has lock=0, so it uses this entry but remaps the mapping
to a 1MB size
5. transfer B completes and by this time
- there are two locked entries in the MapCacheEntry list with the SAME
values for both address_index and size
- the entry for transfer B actually resides farther in list while
transfer C's entry is first
6. xen_ram_addr_from_mapcache() for transfer B gets correct address_index
and size pair from corresponding MapCacheRev entry, but then it starts
looking for MapCacheEntry with these values and finds the first entry
-- which belongs to transfer C.
At this point there may be following possible (bad) consequences:
1. xen_ram_addr_from_mapcache() will use a wrong entry->vaddr_base value
in this statement:
raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
resulting in an incorrent raddr value returned from the function. The
(ptr - entry->vaddr_base) expression may produce both positive and negative
numbers and its actual value may differ greatly as there are many
map/unmap operations take place. If the value will be beyond guest RAM
limits then a "Bad RAM offset" error will be triggered and logged,
followed by exit() in QEMU.
2. If raddr value won't exceed guest RAM boundaries, the same sequence
of actions will be performed for xen_invalidate_map_cache_entry() on DMA
unmap, resulting in a wrong MapCacheEntry being unmapped while DMA
operation which uses it is still active. The above example must
be extended by one more DMA transfer in order to allow unmapping as the
first mapping in the list is sort of resident.
The patch modifies the behavior in which MapCacheEntry's are added to the
list, avoiding duplicates.
Signed-off-by: Alexey Gerasimenko <x1917x@gmail.com>
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
2017-07-22 03:34:20 +03:00
|
|
|
if (!free_entry && !entry->lock) {
|
|
|
|
free_entry = entry;
|
|
|
|
free_pentry = pentry;
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
pentry = entry;
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
xen-mapcache: Fix the bug when overlapping emulated DMA operations may cause inconsistency in guest memory mappings
Under certain circumstances normal xen-mapcache functioning may be broken
by guest's actions. This may lead to either QEMU performing exit() due to
a caught bad pointer (and with QEMU process gone the guest domain simply
appears hung afterwards) or actual use of the incorrect pointer inside
QEMU address space -- a write to unmapped memory is possible. The bug is
hard to reproduce on a i440 machine as multiple DMA sources are required
(though it's possible in theory, using multiple emulated devices), but can
be reproduced somewhat easily on a Q35 machine using an emulated AHCI
controller -- each NCQ queue command slot may be used as an independent
DMA source ex. using READ FPDMA QUEUED command, so a single storage
device on the AHCI controller port will be enough to produce multiple DMAs
(up to 32). The detailed description of the issue follows.
Xen-mapcache provides an ability to map parts of a guest memory into
QEMU's own address space to work with.
There are two types of cache lookups:
- translating a guest physical address into a pointer in QEMU's address
space, mapping a part of guest domain memory if necessary (while trying
to reduce a number of such (re)mappings to a minimum)
- translating a QEMU's pointer back to its physical address in guest RAM
These lookups are managed via two linked-lists of structures.
MapCacheEntry is used for forward cache lookups, while MapCacheRev -- for
reverse lookups.
Every guest physical address is broken down into 2 parts:
address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
MCACHE_BUCKET_SHIFT depends on a system (32/64) and is equal to 20 for
a 64-bit system (which assumed for the further description). Basically,
this means that we deal with 1 MB chunks and offsets within those 1 MB
chunks. All mappings are created with 1MB-granularity, i.e. 1MB/2MB/3MB
etc. Most DMA transfers typically are less than 1MB, however, if the
transfer crosses any 1MB border(s) - than a nearest larger mapping size
will be used, so ex. a 512-byte DMA transfer with the start address
700FFF80h will actually require a 2MB range.
Current implementation assumes that MapCacheEntries are unique for a given
address_index and size pair and that a single MapCacheEntry may be reused
by multiple requests -- in this case the 'lock' field will be larger than
1. On other hand, each requested guest physical address (with 'lock' flag)
is described by each own MapCacheRev. So there may be multiple MapCacheRev
entries corresponding to a single MapCacheEntry. The xen-mapcache code
uses MapCacheRev entries to retrieve the address_index & size pair which
in turn used to find a related MapCacheEntry. The 'lock' field within
a MapCacheEntry structure is actually a reference counter which shows
a number of corresponding MapCacheRev entries.
The bug lies in ability for the guest to indirectly manipulate with the
xen-mapcache MapCacheEntries list via a special sequence of DMA
operations, typically for storage devices. In order to trigger the bug,
guest needs to issue DMA operations in specific order and timing.
Although xen-mapcache is protected by the mutex lock -- this doesn't help
in this case, as the bug is not due to a race condition.
Suppose we have 3 DMA transfers, namely A, B and C, where
- transfer A crosses 1MB border and thus uses a 2MB mapping
- transfers B and C are normal transfers within 1MB range
- and all 3 transfers belong to the same address_index
In this case, if all these transfers are to be executed one-by-one
(without overlaps), no special treatment necessary -- each transfer's
mapping lock will be set and then cleared on unmap before starting
the next transfer.
The situation changes when DMA transfers overlap in time, ex. like this:
|===== transfer A (2MB) =====|
|===== transfer B (1MB) =====|
|===== transfer C (1MB) =====|
time --->
In this situation the following sequence of actions happens:
1. transfer A creates a mapping to 2MB area (lock=1)
2. transfer B (1MB) tries to find available mapping but cannot find one
because transfer A is still in progress, and it has 2MB size + non-zero
lock. So transfer B creates another mapping -- same address_index,
but 1MB size.
3. transfer A completes, making 1st mapping entry available by setting its
lock to 0
4. transfer C starts and tries to find available mapping entry and sees
that 1st entry has lock=0, so it uses this entry but remaps the mapping
to a 1MB size
5. transfer B completes and by this time
- there are two locked entries in the MapCacheEntry list with the SAME
values for both address_index and size
- the entry for transfer B actually resides farther in list while
transfer C's entry is first
6. xen_ram_addr_from_mapcache() for transfer B gets correct address_index
and size pair from corresponding MapCacheRev entry, but then it starts
looking for MapCacheEntry with these values and finds the first entry
-- which belongs to transfer C.
At this point there may be following possible (bad) consequences:
1. xen_ram_addr_from_mapcache() will use a wrong entry->vaddr_base value
in this statement:
raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
resulting in an incorrent raddr value returned from the function. The
(ptr - entry->vaddr_base) expression may produce both positive and negative
numbers and its actual value may differ greatly as there are many
map/unmap operations take place. If the value will be beyond guest RAM
limits then a "Bad RAM offset" error will be triggered and logged,
followed by exit() in QEMU.
2. If raddr value won't exceed guest RAM boundaries, the same sequence
of actions will be performed for xen_invalidate_map_cache_entry() on DMA
unmap, resulting in a wrong MapCacheEntry being unmapped while DMA
operation which uses it is still active. The above example must
be extended by one more DMA transfer in order to allow unmapping as the
first mapping in the list is sort of resident.
The patch modifies the behavior in which MapCacheEntry's are added to the
list, avoiding duplicates.
Signed-off-by: Alexey Gerasimenko <x1917x@gmail.com>
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
2017-07-22 03:34:20 +03:00
|
|
|
if (!entry && free_entry) {
|
|
|
|
entry = free_entry;
|
|
|
|
pentry = free_pentry;
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
if (!entry) {
|
2022-03-15 17:41:56 +03:00
|
|
|
entry = g_new0(MapCacheEntry, 1);
|
2010-08-31 19:41:25 +04:00
|
|
|
pentry->next = entry;
|
2024-04-30 11:14:01 +03:00
|
|
|
xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
|
2024-04-30 11:26:45 +03:00
|
|
|
grant, is_write, ram_offset);
|
2010-08-31 19:41:25 +04:00
|
|
|
} else if (!entry->lock) {
|
|
|
|
if (!entry->vaddr_base || entry->paddr_index != address_index ||
|
2015-01-14 13:20:55 +03:00
|
|
|
entry->size != cache_size ||
|
2013-04-02 17:22:41 +04:00
|
|
|
!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->valid_mapping)) {
|
2024-04-30 11:14:01 +03:00
|
|
|
xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
|
2024-04-30 11:26:45 +03:00
|
|
|
grant, is_write, ram_offset);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-02 17:22:41 +04:00
|
|
|
if(!test_bits(address_offset >> XC_PAGE_SHIFT,
|
2015-01-14 13:20:55 +03:00
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->valid_mapping)) {
|
2024-04-30 19:49:26 +03:00
|
|
|
mc->last_entry = NULL;
|
2017-07-11 01:40:03 +03:00
|
|
|
#ifdef XEN_COMPAT_PHYSMAP
|
2024-04-30 19:49:26 +03:00
|
|
|
if (!translated && mc->phys_offset_to_gaddr) {
|
|
|
|
phys_addr = mc->phys_offset_to_gaddr(phys_addr, size);
|
2012-01-18 16:21:38 +04:00
|
|
|
translated = true;
|
|
|
|
goto tryagain;
|
2017-07-11 01:40:01 +03:00
|
|
|
}
|
2017-07-11 01:40:03 +03:00
|
|
|
#endif
|
2017-07-11 01:40:01 +03:00
|
|
|
if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
dummy = true;
|
|
|
|
goto tryagain;
|
2012-01-18 16:21:38 +04:00
|
|
|
}
|
2011-06-22 00:59:08 +04:00
|
|
|
trace_xen_map_cache_return(NULL);
|
2010-08-31 19:41:25 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:26 +03:00
|
|
|
mc->last_entry = entry;
|
2010-08-31 19:41:25 +04:00
|
|
|
if (lock) {
|
2022-03-15 17:41:56 +03:00
|
|
|
MapCacheRev *reventry = g_new0(MapCacheRev, 1);
|
2010-08-31 19:41:25 +04:00
|
|
|
entry->lock++;
|
2022-01-24 13:44:50 +03:00
|
|
|
if (entry->lock == 0) {
|
2024-01-29 19:09:42 +03:00
|
|
|
error_report("mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p",
|
|
|
|
entry->paddr_index, entry->vaddr_base);
|
2022-01-24 13:44:50 +03:00
|
|
|
abort();
|
|
|
|
}
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-04 00:00:35 +03:00
|
|
|
reventry->dma = dma;
|
2024-04-30 19:49:26 +03:00
|
|
|
reventry->vaddr_req = mc->last_entry->vaddr_base + address_offset;
|
|
|
|
reventry->paddr_index = mc->last_entry->paddr_index;
|
2011-05-19 21:35:42 +04:00
|
|
|
reventry->size = entry->size;
|
2024-04-30 19:49:26 +03:00
|
|
|
QTAILQ_INSERT_HEAD(&mc->locked_entries, reventry, next);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_map_cache_return(
|
2024-04-30 19:49:26 +03:00
|
|
|
mc->last_entry->vaddr_base + address_offset
|
2024-01-29 19:09:40 +03:00
|
|
|
);
|
2024-04-30 19:49:26 +03:00
|
|
|
return mc->last_entry->vaddr_base + address_offset;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:35 +03:00
|
|
|
uint8_t *xen_map_cache(MemoryRegion *mr,
|
|
|
|
hwaddr phys_addr, hwaddr size,
|
2024-04-30 11:14:01 +03:00
|
|
|
ram_addr_t ram_addr_offset,
|
2024-04-30 19:49:35 +03:00
|
|
|
uint8_t lock, bool dma,
|
|
|
|
bool is_write)
|
2015-01-14 13:20:56 +03:00
|
|
|
{
|
2024-04-30 11:26:45 +03:00
|
|
|
bool grant = xen_mr_is_grants(mr);
|
|
|
|
MapCache *mc = grant ? mapcache_grants : mapcache;
|
2015-01-14 13:20:56 +03:00
|
|
|
uint8_t *p;
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
if (grant && !lock) {
|
|
|
|
/*
|
|
|
|
* Grants are only supported via address_space_map(). Anything
|
|
|
|
* else is considered a user/guest error.
|
|
|
|
*
|
|
|
|
* QEMU generally doesn't expect these mappings to ever fail, so
|
|
|
|
* if this happens we report an error message and abort().
|
|
|
|
*/
|
|
|
|
error_report("Tried to access a grant reference without mapping it.");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
mapcache_lock(mc);
|
|
|
|
p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset,
|
|
|
|
lock, dma, grant, is_write);
|
|
|
|
mapcache_unlock(mc);
|
2015-01-14 13:20:56 +03:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:28 +03:00
|
|
|
static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
2011-07-26 18:33:11 +04:00
|
|
|
MapCacheEntry *entry = NULL;
|
2010-08-31 19:41:25 +04:00
|
|
|
MapCacheRev *reventry;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2015-01-14 13:20:56 +03:00
|
|
|
ram_addr_t raddr;
|
2010-08-31 19:41:25 +04:00
|
|
|
int found = 0;
|
|
|
|
|
2024-04-30 19:49:28 +03:00
|
|
|
mapcache_lock(mc);
|
|
|
|
QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
|
2010-08-31 19:41:25 +04:00
|
|
|
if (reventry->vaddr_req == ptr) {
|
|
|
|
paddr_index = reventry->paddr_index;
|
2011-05-19 21:35:42 +04:00
|
|
|
size = reventry->size;
|
2010-08-31 19:41:25 +04:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_ram_addr_from_mapcache_not_found(ptr);
|
2024-04-30 19:49:28 +03:00
|
|
|
mapcache_unlock(mc);
|
2023-10-05 21:18:01 +03:00
|
|
|
return RAM_ADDR_INVALID;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:28 +03:00
|
|
|
entry = &mc->entry[paddr_index % mc->nr_buckets];
|
2011-05-19 21:35:42 +04:00
|
|
|
while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_ram_addr_from_mapcache_not_in_cache(ptr);
|
2023-10-05 21:18:01 +03:00
|
|
|
raddr = RAM_ADDR_INVALID;
|
2015-01-14 13:20:56 +03:00
|
|
|
} else {
|
2024-04-29 17:24:51 +03:00
|
|
|
raddr = (reventry->paddr_index << mc->bucket_shift) +
|
2015-01-14 13:20:56 +03:00
|
|
|
((unsigned long) ptr - (unsigned long) entry->vaddr_base);
|
2011-05-19 21:35:42 +04:00
|
|
|
}
|
2024-04-30 19:49:28 +03:00
|
|
|
mapcache_unlock(mc);
|
2015-01-14 13:20:56 +03:00
|
|
|
return raddr;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:28 +03:00
|
|
|
ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
|
|
|
|
{
|
2024-04-30 11:26:45 +03:00
|
|
|
ram_addr_t addr;
|
|
|
|
|
|
|
|
addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
|
|
|
|
if (addr == RAM_ADDR_INVALID) {
|
|
|
|
addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
2024-04-30 19:49:28 +03:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:30 +03:00
|
|
|
static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
|
|
|
|
uint8_t *buffer)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
|
|
|
MapCacheEntry *entry = NULL, *pentry = NULL;
|
|
|
|
MapCacheRev *reventry;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr paddr_index;
|
|
|
|
hwaddr size;
|
2010-08-31 19:41:25 +04:00
|
|
|
int found = 0;
|
2024-04-30 11:26:45 +03:00
|
|
|
int rc;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 19:49:30 +03:00
|
|
|
QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
|
2010-08-31 19:41:25 +04:00
|
|
|
if (reventry->vaddr_req == buffer) {
|
|
|
|
paddr_index = reventry->paddr_index;
|
2011-05-19 21:35:42 +04:00
|
|
|
size = reventry->size;
|
2010-08-31 19:41:25 +04:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_invalidate_map_cache_entry_unlocked_not_found(buffer);
|
2024-04-30 19:49:30 +03:00
|
|
|
QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_invalidate_map_cache_entry_unlocked_found(
|
|
|
|
reventry->paddr_index,
|
|
|
|
reventry->vaddr_req
|
|
|
|
);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2024-04-30 19:49:30 +03:00
|
|
|
QTAILQ_REMOVE(&mc->locked_entries, reventry, next);
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(reventry);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 19:49:30 +03:00
|
|
|
if (mc->last_entry != NULL &&
|
|
|
|
mc->last_entry->paddr_index == paddr_index) {
|
|
|
|
mc->last_entry = NULL;
|
2012-08-22 14:17:04 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:30 +03:00
|
|
|
entry = &mc->entry[paddr_index % mc->nr_buckets];
|
2011-05-19 21:35:42 +04:00
|
|
|
while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
|
2010-08-31 19:41:25 +04:00
|
|
|
pentry = entry;
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_invalidate_map_cache_entry_unlocked_miss(buffer);
|
2010-08-31 19:41:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
entry->lock--;
|
2024-04-29 20:12:42 +03:00
|
|
|
if (entry->lock > 0) {
|
2010-08-31 19:41:25 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-04-29 14:27:00 +03:00
|
|
|
ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
|
2024-04-30 11:26:45 +03:00
|
|
|
if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) {
|
|
|
|
rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base,
|
|
|
|
entry->size >> mc->bucket_shift);
|
|
|
|
} else {
|
|
|
|
rc = munmap(entry->vaddr_base, entry->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc) {
|
2010-08-31 19:41:25 +04:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
2024-04-29 20:12:42 +03:00
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(entry->valid_mapping);
|
2024-04-29 20:12:42 +03:00
|
|
|
if (pentry) {
|
|
|
|
pentry->next = entry->next;
|
|
|
|
g_free(entry);
|
|
|
|
} else {
|
2024-07-02 01:44:21 +03:00
|
|
|
/*
|
|
|
|
* Invalidate mapping but keep entry->next pointing to the rest
|
|
|
|
* of the list.
|
|
|
|
*
|
|
|
|
* Note that lock is already zero here, otherwise we don't unmap.
|
|
|
|
*/
|
|
|
|
entry->paddr_index = 0;
|
|
|
|
entry->vaddr_base = NULL;
|
|
|
|
entry->valid_mapping = NULL;
|
|
|
|
entry->flags = 0;
|
|
|
|
entry->size = 0;
|
2024-04-29 20:12:42 +03:00
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
xen: Drop out of coroutine context xen_invalidate_map_cache_entry
xen_invalidate_map_cache_entry is not expected to run in a
coroutine. Without this, there is crash:
signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44
threadid=<optimized out>) at pthread_kill.c:78
at /usr/src/debug/glibc/2.38+git-r0/sysdeps/posix/raise.c:26
fmt=0xffff9e1ca8a8 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n",
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:92
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:101
at ../qemu-xen-dir-remote/block/graph-lock.c:260
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/block/graph-lock.h:259
host=host@entry=0xffff742c8000, size=size@entry=2097152)
at ../qemu-xen-dir-remote/block/io.c:3362
host=0xffff742c8000, size=2097152)
at ../qemu-xen-dir-remote/block/block-backend.c:2859
host=<optimized out>, size=<optimized out>, max_size=<optimized out>)
at ../qemu-xen-dir-remote/block/block-ram-registrar.c:33
size=2097152, max_size=2097152)
at ../qemu-xen-dir-remote/hw/core/numa.c:883
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:475
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:487
as=as@entry=0xaaaae1ca3ae8 <address_space_memory>, buffer=0xffff743c5000,
len=<optimized out>, is_write=is_write@entry=true,
access_len=access_len@entry=32768)
at ../qemu-xen-dir-remote/system/physmem.c:3199
dir=DMA_DIRECTION_FROM_DEVICE, len=<optimized out>,
buffer=<optimized out>, as=0xaaaae1ca3ae8 <address_space_memory>)
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/sysemu/dma.h:236
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:758
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769, idx=idx@entry=0)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:919
elem=elem@entry=0xaaaaf620aa30, len=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:994
req=req@entry=0xaaaaf620aa30, status=status@entry=0 '\000')
at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:67
ret=0) at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:136
at ../qemu-xen-dir-remote/block/block-backend.c:1559
--Type <RET> for more, q to quit, c to continue without paging--
at ../qemu-xen-dir-remote/block/block-backend.c:1614
i1=<optimized out>) at ../qemu-xen-dir-remote/util/coroutine-ucontext.c:177
at ../sysdeps/unix/sysv/linux/aarch64/setcontext.S:123
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-Id: <20240124021450.21656-1-peng.fan@oss.nxp.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
2024-01-24 05:14:50 +03:00
|
|
|
typedef struct XenMapCacheData {
|
|
|
|
Coroutine *co;
|
|
|
|
uint8_t *buffer;
|
|
|
|
} XenMapCacheData;
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
|
|
|
|
{
|
|
|
|
mapcache_lock(mc);
|
|
|
|
xen_invalidate_map_cache_entry_unlocked(mc, buffer);
|
|
|
|
mapcache_unlock(mc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
|
|
|
|
{
|
|
|
|
xen_invalidate_map_cache_entry_single(mapcache, buffer);
|
|
|
|
xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
|
|
|
|
}
|
|
|
|
|
xen: Drop out of coroutine context xen_invalidate_map_cache_entry
xen_invalidate_map_cache_entry is not expected to run in a
coroutine. Without this, there is crash:
signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44
threadid=<optimized out>) at pthread_kill.c:78
at /usr/src/debug/glibc/2.38+git-r0/sysdeps/posix/raise.c:26
fmt=0xffff9e1ca8a8 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n",
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:92
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:101
at ../qemu-xen-dir-remote/block/graph-lock.c:260
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/block/graph-lock.h:259
host=host@entry=0xffff742c8000, size=size@entry=2097152)
at ../qemu-xen-dir-remote/block/io.c:3362
host=0xffff742c8000, size=2097152)
at ../qemu-xen-dir-remote/block/block-backend.c:2859
host=<optimized out>, size=<optimized out>, max_size=<optimized out>)
at ../qemu-xen-dir-remote/block/block-ram-registrar.c:33
size=2097152, max_size=2097152)
at ../qemu-xen-dir-remote/hw/core/numa.c:883
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:475
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:487
as=as@entry=0xaaaae1ca3ae8 <address_space_memory>, buffer=0xffff743c5000,
len=<optimized out>, is_write=is_write@entry=true,
access_len=access_len@entry=32768)
at ../qemu-xen-dir-remote/system/physmem.c:3199
dir=DMA_DIRECTION_FROM_DEVICE, len=<optimized out>,
buffer=<optimized out>, as=0xaaaae1ca3ae8 <address_space_memory>)
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/sysemu/dma.h:236
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:758
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769, idx=idx@entry=0)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:919
elem=elem@entry=0xaaaaf620aa30, len=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:994
req=req@entry=0xaaaaf620aa30, status=status@entry=0 '\000')
at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:67
ret=0) at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:136
at ../qemu-xen-dir-remote/block/block-backend.c:1559
--Type <RET> for more, q to quit, c to continue without paging--
at ../qemu-xen-dir-remote/block/block-backend.c:1614
i1=<optimized out>) at ../qemu-xen-dir-remote/util/coroutine-ucontext.c:177
at ../sysdeps/unix/sysv/linux/aarch64/setcontext.S:123
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-Id: <20240124021450.21656-1-peng.fan@oss.nxp.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
2024-01-24 05:14:50 +03:00
|
|
|
static void xen_invalidate_map_cache_entry_bh(void *opaque)
|
2015-01-14 13:20:56 +03:00
|
|
|
{
|
xen: Drop out of coroutine context xen_invalidate_map_cache_entry
xen_invalidate_map_cache_entry is not expected to run in a
coroutine. Without this, there is crash:
signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44
threadid=<optimized out>) at pthread_kill.c:78
at /usr/src/debug/glibc/2.38+git-r0/sysdeps/posix/raise.c:26
fmt=0xffff9e1ca8a8 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n",
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:92
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:101
at ../qemu-xen-dir-remote/block/graph-lock.c:260
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/block/graph-lock.h:259
host=host@entry=0xffff742c8000, size=size@entry=2097152)
at ../qemu-xen-dir-remote/block/io.c:3362
host=0xffff742c8000, size=2097152)
at ../qemu-xen-dir-remote/block/block-backend.c:2859
host=<optimized out>, size=<optimized out>, max_size=<optimized out>)
at ../qemu-xen-dir-remote/block/block-ram-registrar.c:33
size=2097152, max_size=2097152)
at ../qemu-xen-dir-remote/hw/core/numa.c:883
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:475
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:487
as=as@entry=0xaaaae1ca3ae8 <address_space_memory>, buffer=0xffff743c5000,
len=<optimized out>, is_write=is_write@entry=true,
access_len=access_len@entry=32768)
at ../qemu-xen-dir-remote/system/physmem.c:3199
dir=DMA_DIRECTION_FROM_DEVICE, len=<optimized out>,
buffer=<optimized out>, as=0xaaaae1ca3ae8 <address_space_memory>)
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/sysemu/dma.h:236
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:758
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769, idx=idx@entry=0)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:919
elem=elem@entry=0xaaaaf620aa30, len=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:994
req=req@entry=0xaaaaf620aa30, status=status@entry=0 '\000')
at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:67
ret=0) at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:136
at ../qemu-xen-dir-remote/block/block-backend.c:1559
--Type <RET> for more, q to quit, c to continue without paging--
at ../qemu-xen-dir-remote/block/block-backend.c:1614
i1=<optimized out>) at ../qemu-xen-dir-remote/util/coroutine-ucontext.c:177
at ../sysdeps/unix/sysv/linux/aarch64/setcontext.S:123
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-Id: <20240124021450.21656-1-peng.fan@oss.nxp.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
2024-01-24 05:14:50 +03:00
|
|
|
XenMapCacheData *data = opaque;
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
xen_invalidate_map_cache_entry_all(data->buffer);
|
xen: Drop out of coroutine context xen_invalidate_map_cache_entry
xen_invalidate_map_cache_entry is not expected to run in a
coroutine. Without this, there is crash:
signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44
threadid=<optimized out>) at pthread_kill.c:78
at /usr/src/debug/glibc/2.38+git-r0/sysdeps/posix/raise.c:26
fmt=0xffff9e1ca8a8 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n",
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:92
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:101
at ../qemu-xen-dir-remote/block/graph-lock.c:260
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/block/graph-lock.h:259
host=host@entry=0xffff742c8000, size=size@entry=2097152)
at ../qemu-xen-dir-remote/block/io.c:3362
host=0xffff742c8000, size=2097152)
at ../qemu-xen-dir-remote/block/block-backend.c:2859
host=<optimized out>, size=<optimized out>, max_size=<optimized out>)
at ../qemu-xen-dir-remote/block/block-ram-registrar.c:33
size=2097152, max_size=2097152)
at ../qemu-xen-dir-remote/hw/core/numa.c:883
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:475
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:487
as=as@entry=0xaaaae1ca3ae8 <address_space_memory>, buffer=0xffff743c5000,
len=<optimized out>, is_write=is_write@entry=true,
access_len=access_len@entry=32768)
at ../qemu-xen-dir-remote/system/physmem.c:3199
dir=DMA_DIRECTION_FROM_DEVICE, len=<optimized out>,
buffer=<optimized out>, as=0xaaaae1ca3ae8 <address_space_memory>)
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/sysemu/dma.h:236
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:758
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769, idx=idx@entry=0)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:919
elem=elem@entry=0xaaaaf620aa30, len=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:994
req=req@entry=0xaaaaf620aa30, status=status@entry=0 '\000')
at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:67
ret=0) at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:136
at ../qemu-xen-dir-remote/block/block-backend.c:1559
--Type <RET> for more, q to quit, c to continue without paging--
at ../qemu-xen-dir-remote/block/block-backend.c:1614
i1=<optimized out>) at ../qemu-xen-dir-remote/util/coroutine-ucontext.c:177
at ../sysdeps/unix/sysv/linux/aarch64/setcontext.S:123
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-Id: <20240124021450.21656-1-peng.fan@oss.nxp.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
2024-01-24 05:14:50 +03:00
|
|
|
aio_co_wake(data->co);
|
|
|
|
}
|
|
|
|
|
|
|
|
void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer)
|
|
|
|
{
|
|
|
|
if (qemu_in_coroutine()) {
|
|
|
|
XenMapCacheData data = {
|
|
|
|
.co = qemu_coroutine_self(),
|
|
|
|
.buffer = buffer,
|
|
|
|
};
|
|
|
|
aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
|
|
|
|
xen_invalidate_map_cache_entry_bh, &data);
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
} else {
|
2024-04-30 11:26:45 +03:00
|
|
|
xen_invalidate_map_cache_entry_all(buffer);
|
xen: Drop out of coroutine context xen_invalidate_map_cache_entry
xen_invalidate_map_cache_entry is not expected to run in a
coroutine. Without this, there is crash:
signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44
threadid=<optimized out>) at pthread_kill.c:78
at /usr/src/debug/glibc/2.38+git-r0/sysdeps/posix/raise.c:26
fmt=0xffff9e1ca8a8 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n",
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:92
assertion=assertion@entry=0xaaaae0d25740 "!qemu_in_coroutine()",
file=file@entry=0xaaaae0d301a8 "../qemu-xen-dir-remote/block/graph-lock.c", line=line@entry=260,
function=function@entry=0xaaaae0e522c0 <__PRETTY_FUNCTION__.3> "bdrv_graph_rdlock_main_loop") at assert.c:101
at ../qemu-xen-dir-remote/block/graph-lock.c:260
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/block/graph-lock.h:259
host=host@entry=0xffff742c8000, size=size@entry=2097152)
at ../qemu-xen-dir-remote/block/io.c:3362
host=0xffff742c8000, size=2097152)
at ../qemu-xen-dir-remote/block/block-backend.c:2859
host=<optimized out>, size=<optimized out>, max_size=<optimized out>)
at ../qemu-xen-dir-remote/block/block-ram-registrar.c:33
size=2097152, max_size=2097152)
at ../qemu-xen-dir-remote/hw/core/numa.c:883
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:475
buffer=buffer@entry=0xffff743c5000 "")
at ../qemu-xen-dir-remote/hw/xen/xen-mapcache.c:487
as=as@entry=0xaaaae1ca3ae8 <address_space_memory>, buffer=0xffff743c5000,
len=<optimized out>, is_write=is_write@entry=true,
access_len=access_len@entry=32768)
at ../qemu-xen-dir-remote/system/physmem.c:3199
dir=DMA_DIRECTION_FROM_DEVICE, len=<optimized out>,
buffer=<optimized out>, as=0xaaaae1ca3ae8 <address_space_memory>)
at /home/Freenix/work/sw-stash/xen/upstream/tools/qemu-xen-dir-remote/include/sysemu/dma.h:236
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:758
elem=elem@entry=0xaaaaf620aa30, len=len@entry=32769, idx=idx@entry=0)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:919
elem=elem@entry=0xaaaaf620aa30, len=32769)
at ../qemu-xen-dir-remote/hw/virtio/virtio.c:994
req=req@entry=0xaaaaf620aa30, status=status@entry=0 '\000')
at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:67
ret=0) at ../qemu-xen-dir-remote/hw/block/virtio-blk.c:136
at ../qemu-xen-dir-remote/block/block-backend.c:1559
--Type <RET> for more, q to quit, c to continue without paging--
at ../qemu-xen-dir-remote/block/block-backend.c:1614
i1=<optimized out>) at ../qemu-xen-dir-remote/util/coroutine-ucontext.c:177
at ../sysdeps/unix/sysv/linux/aarch64/setcontext.S:123
Signed-off-by: Peng Fan <peng.fan@nxp.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-Id: <20240124021450.21656-1-peng.fan@oss.nxp.com>
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
2024-01-24 05:14:50 +03:00
|
|
|
}
|
2015-01-14 13:20:56 +03:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
static void xen_invalidate_map_cache_single(MapCache *mc)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
MapCacheRev *reventry;
|
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
mapcache_lock(mc);
|
2015-01-14 13:20:56 +03:00
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
|
xen/mapcache: store dma information in revmapcache entries for debugging
The Xen mapcache is able to create long term mappings, they are called
"locked" mappings. The third parameter of the xen_map_cache call
specifies if a mapping is a "locked" mapping.
>From the QEMU point of view there are two kinds of long term mappings:
[a] device memory mappings, such as option roms and video memory
[b] dma mappings, created by dma_memory_map & friends
After certain operations, ballooning a VM in particular, Xen asks QEMU
kindly to destroy all mappings. However, certainly [a] mappings are
present and cannot be removed. That's not a problem as they are not
affected by balloonning. The *real* problem is that if there are any
mappings of type [b], any outstanding dma operations could fail. This is
a known shortcoming. In other words, when Xen asks QEMU to destroy all
mappings, it is an error if any [b] mappings exist.
However today we have no way of distinguishing [a] from [b]. Because of
that, we cannot even print a decent warning.
This patch introduces a new "dma" bool field to MapCacheRev entires, to
remember if a given mapping is for dma or is a long term device memory
mapping. When xen_invalidate_map_cache is called, we print a warning if
any [b] mappings exist. We ignore [a] mappings.
Mappings created by qemu_map_ram_ptr are assumed to be [a], while
mappings created by address_space_map->qemu_ram_ptr_length are assumed
to be [b].
The goal of the patch is to make debugging and system understanding
easier.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
2017-05-04 00:00:35 +03:00
|
|
|
if (!reventry->dma) {
|
|
|
|
continue;
|
|
|
|
}
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_invalidate_map_cache(reventry->paddr_index,
|
|
|
|
reventry->vaddr_req);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
for (i = 0; i < mc->nr_buckets; i++) {
|
|
|
|
MapCacheEntry *entry = &mc->entry[i];
|
2010-08-31 19:41:25 +04:00
|
|
|
|
|
|
|
if (entry->vaddr_base == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-13 21:33:02 +04:00
|
|
|
if (entry->lock > 0) {
|
|
|
|
continue;
|
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2011-05-19 21:35:42 +04:00
|
|
|
if (munmap(entry->vaddr_base, entry->size) != 0) {
|
2010-08-31 19:41:25 +04:00
|
|
|
perror("unmap fails");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->paddr_index = 0;
|
|
|
|
entry->vaddr_base = NULL;
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->size = 0;
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(entry->valid_mapping);
|
2011-05-19 21:35:42 +04:00
|
|
|
entry->valid_mapping = NULL;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
mc->last_entry = NULL;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2024-04-30 19:49:31 +03:00
|
|
|
mapcache_unlock(mc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void xen_invalidate_map_cache(void)
|
|
|
|
{
|
|
|
|
/* Flush pending AIO before destroying the mapcache */
|
|
|
|
bdrv_drain_all();
|
|
|
|
|
|
|
|
xen_invalidate_map_cache_single(mapcache);
|
2024-04-30 11:26:45 +03:00
|
|
|
xen_invalidate_map_cache_single(mapcache_grants);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
2017-07-11 01:40:02 +03:00
|
|
|
|
2024-04-30 19:49:29 +03:00
|
|
|
static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
|
|
|
|
hwaddr old_phys_addr,
|
2017-07-11 01:40:02 +03:00
|
|
|
hwaddr new_phys_addr,
|
|
|
|
hwaddr size)
|
|
|
|
{
|
|
|
|
MapCacheEntry *entry;
|
|
|
|
hwaddr address_index, address_offset;
|
|
|
|
hwaddr test_bit_size, cache_size = size;
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
address_index = old_phys_addr >> mc->bucket_shift;
|
|
|
|
address_offset = old_phys_addr & (mc->bucket_size - 1);
|
2017-07-11 01:40:02 +03:00
|
|
|
|
|
|
|
assert(size);
|
|
|
|
/* test_bit_size is always a multiple of XC_PAGE_SIZE */
|
|
|
|
test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1));
|
|
|
|
if (test_bit_size % XC_PAGE_SIZE) {
|
|
|
|
test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
|
|
|
|
}
|
|
|
|
cache_size = size + address_offset;
|
2024-04-29 17:24:51 +03:00
|
|
|
if (cache_size % mc->bucket_size) {
|
|
|
|
cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
|
2017-07-11 01:40:02 +03:00
|
|
|
}
|
|
|
|
|
2024-04-30 19:49:29 +03:00
|
|
|
entry = &mc->entry[address_index % mc->nr_buckets];
|
2017-07-11 01:40:02 +03:00
|
|
|
while (entry && !(entry->paddr_index == address_index &&
|
|
|
|
entry->size == cache_size)) {
|
|
|
|
entry = entry->next;
|
|
|
|
}
|
|
|
|
if (!entry) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_replace_cache_entry_unlocked(old_phys_addr);
|
2017-07-11 01:40:02 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:26:45 +03:00
|
|
|
assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0);
|
|
|
|
|
2024-04-29 17:24:51 +03:00
|
|
|
address_index = new_phys_addr >> mc->bucket_shift;
|
|
|
|
address_offset = new_phys_addr & (mc->bucket_size - 1);
|
2017-07-11 01:40:02 +03:00
|
|
|
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
|
2017-07-11 01:40:02 +03:00
|
|
|
|
2024-04-30 19:49:29 +03:00
|
|
|
xen_remap_bucket(mc, entry, entry->vaddr_base,
|
2024-04-30 11:26:45 +03:00
|
|
|
cache_size, address_index, false,
|
|
|
|
false, false, old_phys_addr);
|
2017-07-11 01:40:02 +03:00
|
|
|
if (!test_bits(address_offset >> XC_PAGE_SHIFT,
|
|
|
|
test_bit_size >> XC_PAGE_SHIFT,
|
|
|
|
entry->valid_mapping)) {
|
2024-01-29 19:09:40 +03:00
|
|
|
trace_xen_replace_cache_entry_unlocked_could_not_update_entry(
|
|
|
|
old_phys_addr
|
|
|
|
);
|
2017-07-11 01:40:02 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry->vaddr_base + address_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
|
|
|
|
hwaddr new_phys_addr,
|
|
|
|
hwaddr size)
|
|
|
|
{
|
|
|
|
uint8_t *p;
|
|
|
|
|
2024-04-30 19:49:25 +03:00
|
|
|
mapcache_lock(mapcache);
|
2024-04-30 19:49:29 +03:00
|
|
|
p = xen_replace_cache_entry_unlocked(mapcache, old_phys_addr,
|
|
|
|
new_phys_addr, size);
|
2024-04-30 19:49:25 +03:00
|
|
|
mapcache_unlock(mapcache);
|
2017-07-11 01:40:02 +03:00
|
|
|
return p;
|
|
|
|
}
|