From 9288e803e61e8d56d1c6c6aa8beb58596fb84ed9 Mon Sep 17 00:00:00 2001 From: Jason Andryuk Date: Fri, 10 Dec 2021 14:34:34 -0500 Subject: [PATCH 1/2] xen-hvm: Allow disabling buffer_io_timer commit f37f29d31488 "xen: slightly simplify bufioreq handling" hard coded setting req.count = 1 during initial field setup before the main loop. This missed a subtlety that an early exit from the loop when there are no ioreqs to process, would have req.count == 0 for the return value. handle_buffered_io() would then remove state->buffered_io_timer. Instead handle_buffered_iopage() is basically always returning true and handle_buffered_io() always re-setting the timer. Restore the disabling of the timer by introducing a new handled_ioreq boolean and use as the return value. The named variable will more clearly show the intent of the code. Signed-off-by: Jason Andryuk Reviewed-by: Paul Durrant Message-Id: <20211210193434.75566-1-jandryuk@gmail.com> Signed-off-by: Anthony PERARD --- hw/i386/xen/xen-hvm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 482be95415..cf8e500514 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -1087,10 +1087,11 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req) } } -static int handle_buffered_iopage(XenIOState *state) +static bool handle_buffered_iopage(XenIOState *state) { buffered_iopage_t *buf_page = state->buffered_io_page; buf_ioreq_t *buf_req = NULL; + bool handled_ioreq = false; ioreq_t req; int qw; @@ -1144,9 +1145,10 @@ static int handle_buffered_iopage(XenIOState *state) assert(!req.data_is_ptr); qatomic_add(&buf_page->read_pointer, qw + 1); + handled_ioreq = true; } - return req.count; + return handled_ioreq; } static void handle_buffered_io(void *opaque) From a021a2dd8b790437d27db95774969349632f856a Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Mon, 24 Jan 2022 10:44:50 +0000 Subject: [PATCH 2/2] xen-mapcache: Avoid entry->lock overflow In some cases, a particular mapcache entry may be mapped 256 times causing the lock field to wrap to 0. For example, this may happen when using emulated NVME and the guest submits a large scatter-gather write. At this point, the entry map be remapped causing QEMU to write the wrong data or crash (since remap is not atomic). Avoid this overflow by increasing the lock field to a uint32_t and also detect it and abort rather than continuing regardless. Signed-off-by: Ross Lagerwall Reviewed-by: Paul Durrant Reviewed-by: Stefano Stabellini Message-Id: <20220124104450.152481-1-ross.lagerwall@citrix.com> Signed-off-by: Anthony PERARD --- hw/i386/xen/xen-mapcache.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c index bd47c3d672..f2ef977963 100644 --- a/hw/i386/xen/xen-mapcache.c +++ b/hw/i386/xen/xen-mapcache.c @@ -52,7 +52,7 @@ typedef struct MapCacheEntry { hwaddr paddr_index; uint8_t *vaddr_base; unsigned long *valid_mapping; - uint8_t lock; + uint32_t lock; #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0) uint8_t flags; hwaddr size; @@ -355,6 +355,12 @@ tryagain: if (lock) { MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); entry->lock++; + if (entry->lock == 0) { + fprintf(stderr, + "mapcache entry lock overflow: "TARGET_FMT_plx" -> %p\n", + entry->paddr_index, entry->vaddr_base); + abort(); + } reventry->dma = dma; reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; reventry->paddr_index = mapcache->last_entry->paddr_index;