2010-08-19 15:27:56 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 Citrix Ltd.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 20:44:23 +04:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2010-08-19 15:27:56 +04:00
|
|
|
*/
|
|
|
|
|
2016-01-26 21:17:06 +03:00
|
|
|
#include "qemu/osdep.h"
|
2020-06-01 17:29:29 +03:00
|
|
|
#include "qemu/units.h"
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2016-03-15 18:58:45 +03:00
|
|
|
#include "cpu.h"
|
2012-12-12 16:24:50 +04:00
|
|
|
#include "hw/pci/pci.h"
|
2018-05-31 21:01:13 +03:00
|
|
|
#include "hw/pci/pci_host.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/i386/pc.h"
|
2019-10-15 15:05:42 +03:00
|
|
|
#include "hw/southbridge/piix.h"
|
2019-08-12 08:23:42 +03:00
|
|
|
#include "hw/irq.h"
|
2019-08-12 08:23:48 +03:00
|
|
|
#include "hw/hw.h"
|
2016-01-13 17:59:09 +03:00
|
|
|
#include "hw/i386/apic-msidef.h"
|
2013-02-05 20:06:20 +04:00
|
|
|
#include "hw/xen/xen_common.h"
|
2019-01-08 17:48:46 +03:00
|
|
|
#include "hw/xen/xen-legacy-backend.h"
|
2019-01-08 17:48:47 +03:00
|
|
|
#include "hw/xen/xen-bus.h"
|
2020-09-08 18:55:29 +03:00
|
|
|
#include "hw/xen/xen-x86.h"
|
2018-02-01 14:18:31 +03:00
|
|
|
#include "qapi/error.h"
|
2020-10-12 15:15:36 +03:00
|
|
|
#include "qapi/qapi-commands-migration.h"
|
2016-01-14 18:09:38 +03:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 08:23:50 +03:00
|
|
|
#include "qemu/main-loop.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/range.h"
|
2019-08-12 08:23:59 +03:00
|
|
|
#include "sysemu/runstate.h"
|
2019-08-12 08:23:57 +03:00
|
|
|
#include "sysemu/sysemu.h"
|
2020-05-08 13:02:22 +03:00
|
|
|
#include "sysemu/xen.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/xen-mapcache.h"
|
2017-04-06 02:21:30 +03:00
|
|
|
#include "trace.h"
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
#include <xen/hvm/ioreq.h>
|
2011-07-20 12:17:43 +04:00
|
|
|
#include <xen/hvm/e820.h>
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2014-05-07 20:16:43 +04:00
|
|
|
//#define DEBUG_XEN_HVM
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2014-05-07 20:16:43 +04:00
|
|
|
#ifdef DEBUG_XEN_HVM
|
2010-09-06 23:07:14 +04:00
|
|
|
#define DPRINTF(fmt, ...) \
|
|
|
|
do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
|
|
|
|
#else
|
|
|
|
#define DPRINTF(fmt, ...) \
|
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2011-12-18 18:27:48 +04:00
|
|
|
static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
|
2011-12-18 18:40:50 +04:00
|
|
|
static MemoryRegion *framebuffer;
|
2012-10-03 17:48:19 +04:00
|
|
|
static bool xen_in_migration;
|
2011-12-18 18:27:48 +04:00
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
/* Compatibility with older version */
|
2014-10-20 23:49:12 +04:00
|
|
|
|
|
|
|
/* This allows QEMU to build on a system that has Xen 4.5 or earlier
|
|
|
|
* installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
|
|
|
|
* needs to be included before this block and hw/xen/xen_common.h needs to
|
|
|
|
* be included before xen/hvm/ioreq.h
|
|
|
|
*/
|
|
|
|
#ifndef IOREQ_TYPE_VMWARE_PORT
|
|
|
|
#define IOREQ_TYPE_VMWARE_PORT 3
|
|
|
|
struct vmware_regs {
|
|
|
|
uint32_t esi;
|
|
|
|
uint32_t edi;
|
|
|
|
uint32_t ebx;
|
|
|
|
uint32_t ecx;
|
|
|
|
uint32_t edx;
|
|
|
|
};
|
|
|
|
typedef struct vmware_regs vmware_regs_t;
|
|
|
|
|
|
|
|
struct shared_vmport_iopage {
|
|
|
|
struct vmware_regs vcpu_vmport_regs[1];
|
|
|
|
};
|
|
|
|
typedef struct shared_vmport_iopage shared_vmport_iopage_t;
|
|
|
|
#endif
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
|
|
|
|
{
|
|
|
|
return shared_page->vcpu_ioreq[i].vp_eport;
|
|
|
|
}
|
|
|
|
static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
|
|
|
|
{
|
|
|
|
return &shared_page->vcpu_ioreq[vcpu];
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BUFFER_IO_MAX_DELAY 100
|
|
|
|
|
2011-05-24 17:34:21 +04:00
|
|
|
typedef struct XenPhysmap {
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr start_addr;
|
2011-05-24 17:34:21 +04:00
|
|
|
ram_addr_t size;
|
2014-08-26 07:09:13 +04:00
|
|
|
const char *name;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_offset;
|
2011-05-24 17:34:21 +04:00
|
|
|
|
|
|
|
QLIST_ENTRY(XenPhysmap) list;
|
|
|
|
} XenPhysmap;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
static QLIST_HEAD(, XenPhysmap) xen_physmap;
|
|
|
|
|
2018-05-31 21:01:13 +03:00
|
|
|
typedef struct XenPciDevice {
|
|
|
|
PCIDevice *pci_dev;
|
|
|
|
uint32_t sbdf;
|
|
|
|
QLIST_ENTRY(XenPciDevice) entry;
|
|
|
|
} XenPciDevice;
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
typedef struct XenIOState {
|
2015-01-20 14:06:19 +03:00
|
|
|
ioservid_t ioservid;
|
2010-09-06 23:07:14 +04:00
|
|
|
shared_iopage_t *shared_page;
|
2014-10-20 23:49:12 +04:00
|
|
|
shared_vmport_iopage_t *shared_vmport_page;
|
2010-09-06 23:07:14 +04:00
|
|
|
buffered_iopage_t *buffered_io_page;
|
2021-04-30 19:37:42 +03:00
|
|
|
xenforeignmemory_resource_handle *fres;
|
2010-09-06 23:07:14 +04:00
|
|
|
QEMUTimer *buffered_io_timer;
|
2014-10-20 23:49:12 +04:00
|
|
|
CPUState **cpu_by_vcpu_id;
|
2010-09-06 23:07:14 +04:00
|
|
|
/* the evtchn port for polling the notification, */
|
|
|
|
evtchn_port_t *ioreq_local_port;
|
2018-05-15 19:40:51 +03:00
|
|
|
/* evtchn remote and local ports for buffered io */
|
|
|
|
evtchn_port_t bufioreq_remote_port;
|
2012-04-13 21:46:01 +04:00
|
|
|
evtchn_port_t bufioreq_local_port;
|
2010-09-06 23:07:14 +04:00
|
|
|
/* the evtchn fd for polling */
|
2016-01-15 16:23:38 +03:00
|
|
|
xenevtchn_handle *xce_handle;
|
2010-09-06 23:07:14 +04:00
|
|
|
/* which vcpu we are serving */
|
|
|
|
int send_vcpu;
|
|
|
|
|
2010-09-06 23:07:33 +04:00
|
|
|
struct xs_handle *xenstore;
|
2011-12-19 14:07:50 +04:00
|
|
|
MemoryListener memory_listener;
|
2015-01-20 14:06:19 +03:00
|
|
|
MemoryListener io_listener;
|
2018-05-31 21:01:13 +03:00
|
|
|
QLIST_HEAD(, XenPciDevice) dev_list;
|
2015-01-20 14:06:19 +03:00
|
|
|
DeviceListener device_listener;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr free_phys_offset;
|
2011-05-24 17:34:21 +04:00
|
|
|
const XenPhysmap *log_for_dirtybit;
|
2019-06-18 14:23:41 +03:00
|
|
|
/* Buffer used by xen_sync_dirty_bitmap */
|
|
|
|
unsigned long *dirty_bitmap;
|
2010-09-06 23:07:33 +04:00
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
Notifier exit;
|
2012-02-23 16:45:20 +04:00
|
|
|
Notifier suspend;
|
2013-09-25 20:40:23 +04:00
|
|
|
Notifier wakeup;
|
2010-09-06 23:07:14 +04:00
|
|
|
} XenIOState;
|
|
|
|
|
2010-07-16 17:55:39 +04:00
|
|
|
/* Xen specific function for piix pci */
|
|
|
|
|
|
|
|
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
|
|
|
|
{
|
2020-10-11 18:04:23 +03:00
|
|
|
return irq_num + (PCI_SLOT(pci_dev->devfn) << 2);
|
2010-07-16 17:55:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void xen_piix3_set_irq(void *opaque, int irq_num, int level)
|
|
|
|
{
|
2017-03-07 13:55:32 +03:00
|
|
|
xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
|
|
|
|
irq_num & 3, level);
|
2010-07-16 17:55:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Scan for updates to PCI link routes (0x60-0x63). */
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
uint8_t v = (val >> (8 * i)) & 0xff;
|
|
|
|
if (v & 0x80) {
|
|
|
|
v = 0;
|
|
|
|
}
|
|
|
|
v &= 0xf;
|
2019-10-15 15:05:42 +03:00
|
|
|
if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) {
|
|
|
|
xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v);
|
2010-07-16 17:55:39 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-13 17:59:09 +03:00
|
|
|
int xen_is_pirq_msi(uint32_t msi_data)
|
|
|
|
{
|
|
|
|
/* If vector is 0, the msi is remapped into a pirq, passed as
|
|
|
|
* dest_id.
|
|
|
|
*/
|
|
|
|
return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
|
|
|
|
}
|
|
|
|
|
2012-04-12 14:01:43 +04:00
|
|
|
void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
|
|
|
|
{
|
2017-03-07 13:55:32 +03:00
|
|
|
xen_inject_msi(xen_domid, addr, data);
|
2012-04-12 14:01:43 +04:00
|
|
|
}
|
|
|
|
|
2012-02-23 16:45:20 +04:00
|
|
|
static void xen_suspend_notifier(Notifier *notifier, void *data)
|
2010-10-05 19:40:22 +04:00
|
|
|
{
|
2012-02-23 16:45:20 +04:00
|
|
|
xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
|
2010-10-05 19:40:22 +04:00
|
|
|
}
|
|
|
|
|
2010-06-30 20:50:10 +04:00
|
|
|
/* Xen Interrupt Controller */
|
|
|
|
|
|
|
|
static void xen_set_irq(void *opaque, int irq, int level)
|
|
|
|
{
|
2017-03-07 13:55:32 +03:00
|
|
|
xen_set_isa_irq_level(xen_domid, irq, level);
|
2010-06-30 20:50:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
qemu_irq *xen_interrupt_controller_init(void)
|
|
|
|
{
|
|
|
|
return qemu_allocate_irqs(xen_set_irq, NULL, 16);
|
|
|
|
}
|
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
/* Memory Ops */
|
|
|
|
|
2015-08-17 21:42:29 +03:00
|
|
|
static void xen_ram_init(PCMachineState *pcms,
|
2014-06-20 05:40:24 +04:00
|
|
|
ram_addr_t ram_size, MemoryRegion **ram_memory_p)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
2019-10-22 10:39:50 +03:00
|
|
|
X86MachineState *x86ms = X86_MACHINE(pcms);
|
2011-12-18 18:27:48 +04:00
|
|
|
MemoryRegion *sysmem = get_system_memory();
|
|
|
|
ram_addr_t block_len;
|
2019-10-22 10:39:50 +03:00
|
|
|
uint64_t user_lowmem =
|
|
|
|
object_property_get_uint(qdev_get_machine(),
|
2020-05-29 10:39:56 +03:00
|
|
|
PC_MACHINE_MAX_RAM_BELOW_4G,
|
2019-10-22 10:39:50 +03:00
|
|
|
&error_abort);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2014-07-07 23:00:41 +04:00
|
|
|
/* Handle the machine opt max-ram-below-4g. It is basically doing
|
2014-06-20 05:40:26 +04:00
|
|
|
* min(xen limit, user limit).
|
|
|
|
*/
|
2016-06-24 14:35:17 +03:00
|
|
|
if (!user_lowmem) {
|
|
|
|
user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
|
|
|
|
}
|
2014-06-20 05:40:26 +04:00
|
|
|
if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
|
|
|
|
user_lowmem = HVM_BELOW_4G_RAM_END;
|
2011-07-20 12:17:43 +04:00
|
|
|
}
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2014-06-20 05:40:26 +04:00
|
|
|
if (ram_size >= user_lowmem) {
|
2019-10-22 10:39:50 +03:00
|
|
|
x86ms->above_4g_mem_size = ram_size - user_lowmem;
|
|
|
|
x86ms->below_4g_mem_size = user_lowmem;
|
2010-08-31 19:41:25 +04:00
|
|
|
} else {
|
2019-10-22 10:39:50 +03:00
|
|
|
x86ms->above_4g_mem_size = 0;
|
|
|
|
x86ms->below_4g_mem_size = ram_size;
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
2019-10-22 10:39:50 +03:00
|
|
|
if (!x86ms->above_4g_mem_size) {
|
2014-06-20 05:40:26 +04:00
|
|
|
block_len = ram_size;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Xen does not allocate the memory continuously, it keeps a
|
|
|
|
* hole of the size computed above or passed in.
|
|
|
|
*/
|
2020-06-01 17:29:29 +03:00
|
|
|
block_len = (4 * GiB) + x86ms->above_4g_mem_size;
|
2014-06-20 05:40:26 +04:00
|
|
|
}
|
2017-07-07 17:42:53 +03:00
|
|
|
memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
|
Fix bad error handling after memory_region_init_ram()
Symptom:
$ qemu-system-x86_64 -m 10000000
Unexpected error in ram_block_add() at /work/armbru/qemu/exec.c:1456:
upstream-qemu: cannot set up guest memory 'pc.ram': Cannot allocate memory
Aborted (core dumped)
Root cause: commit ef701d7 screwed up handling of out-of-memory
conditions. Before the commit, we report the error and exit(1), in
one place, ram_block_add(). The commit lifts the error handling up
the call chain some, to three places. Fine. Except it uses
&error_abort in these places, changing the behavior from exit(1) to
abort(), and thus undoing the work of commit 3922825 "exec: Don't
abort when we can't allocate guest memory".
The three places are:
* memory_region_init_ram()
Commit 4994653 (right after commit ef701d7) lifted the error
handling further, through memory_region_init_ram(), multiplying the
incorrect use of &error_abort. Later on, imitation of existing
(bad) code may have created more.
* memory_region_init_ram_ptr()
The &error_abort is still there.
* memory_region_init_rom_device()
Doesn't need fixing, because commit 33e0eb5 (soon after commit
ef701d7) lifted the error handling further, and in the process
changed it from &error_abort to passing it up the call chain.
Correct, because the callers are realize() methods.
Fix the error handling after memory_region_init_ram() with a
Coccinelle semantic patch:
@r@
expression mr, owner, name, size, err;
position p;
@@
memory_region_init_ram(mr, owner, name, size,
(
- &error_abort
+ &error_fatal
|
err@p
)
);
@script:python@
p << r.p;
@@
print "%s:%s:%s" % (p[0].file, p[0].line, p[0].column)
When the last argument is &error_abort, it gets replaced by
&error_fatal. This is the fix.
If the last argument is anything else, its position is reported. This
lets us check the fix is complete. Four positions get reported:
* ram_backend_memory_alloc()
Error is passed up the call chain, ultimately through
user_creatable_complete(). As far as I can tell, it's callers all
handle the error sanely.
* fsl_imx25_realize(), fsl_imx31_realize(), dp8393x_realize()
DeviceClass.realize() methods, errors handled sanely further up the
call chain.
We're good. Test case again behaves:
$ qemu-system-x86_64 -m 10000000
qemu-system-x86_64: cannot set up guest memory 'pc.ram': Cannot allocate memory
[Exit 1 ]
The next commits will repair the rest of commit ef701d7's damage.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <1441983105-26376-3-git-send-email-armbru@redhat.com>
Reviewed-by: Peter Crosthwaite <crosthwaite.peter@gmail.com>
2015-09-11 17:51:43 +03:00
|
|
|
&error_fatal);
|
2014-06-20 05:40:26 +04:00
|
|
|
*ram_memory_p = &ram_memory;
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2013-06-06 13:41:28 +04:00
|
|
|
memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
|
2011-12-18 18:27:48 +04:00
|
|
|
&ram_memory, 0, 0xa0000);
|
|
|
|
memory_region_add_subregion(sysmem, 0, &ram_640k);
|
2011-07-20 12:17:43 +04:00
|
|
|
/* Skip of the VGA IO memory space, it will be registered later by the VGA
|
|
|
|
* emulated device.
|
|
|
|
*
|
|
|
|
* The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
|
|
|
|
* the Options ROM, so it is registered here as RAM.
|
|
|
|
*/
|
2013-06-06 13:41:28 +04:00
|
|
|
memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
|
2014-06-20 05:40:24 +04:00
|
|
|
&ram_memory, 0xc0000,
|
2019-10-22 10:39:50 +03:00
|
|
|
x86ms->below_4g_mem_size - 0xc0000);
|
2011-12-18 18:27:48 +04:00
|
|
|
memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
|
2019-10-22 10:39:50 +03:00
|
|
|
if (x86ms->above_4g_mem_size > 0) {
|
2013-06-06 13:41:28 +04:00
|
|
|
memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
|
2011-12-18 18:27:48 +04:00
|
|
|
&ram_memory, 0x100000000ULL,
|
2019-10-22 10:39:50 +03:00
|
|
|
x86ms->above_4g_mem_size);
|
2011-12-18 18:27:48 +04:00
|
|
|
memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-14 18:09:39 +03:00
|
|
|
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
|
|
|
|
Error **errp)
|
2010-08-31 19:41:25 +04:00
|
|
|
{
|
|
|
|
unsigned long nr_pfn;
|
|
|
|
xen_pfn_t *pfn_list;
|
|
|
|
int i;
|
|
|
|
|
2012-01-25 16:36:06 +04:00
|
|
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
/* RAM already populated in Xen */
|
|
|
|
fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
|
|
|
|
" bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
|
2019-10-22 10:39:50 +03:00
|
|
|
__func__, size, ram_addr);
|
2012-01-25 16:36:06 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-12-18 18:27:48 +04:00
|
|
|
if (mr == &ram_memory) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
trace_xen_ram_alloc(ram_addr, size);
|
|
|
|
|
|
|
|
nr_pfn = size >> TARGET_PAGE_BITS;
|
2011-08-21 07:09:37 +04:00
|
|
|
pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
|
|
|
for (i = 0; i < nr_pfn; i++) {
|
|
|
|
pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
|
2016-01-14 18:09:39 +03:00
|
|
|
error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
|
|
|
|
ram_addr);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(pfn_list);
|
2010-08-31 19:41:25 +04:00
|
|
|
}
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
|
|
|
XenPhysmap *physmap = NULL;
|
|
|
|
|
|
|
|
start_addr &= TARGET_PAGE_MASK;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
QLIST_FOREACH(physmap, &xen_physmap, list) {
|
2011-05-24 17:34:21 +04:00
|
|
|
if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
|
|
|
|
return physmap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size)
|
2012-01-18 16:21:38 +04:00
|
|
|
{
|
2018-04-25 16:46:47 +03:00
|
|
|
hwaddr addr = phys_offset & TARGET_PAGE_MASK;
|
2012-01-18 16:21:38 +04:00
|
|
|
XenPhysmap *physmap = NULL;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
QLIST_FOREACH(physmap, &xen_physmap, list) {
|
2012-01-18 16:21:38 +04:00
|
|
|
if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
|
2018-04-25 16:46:47 +03:00
|
|
|
return physmap->start_addr + (phys_offset - physmap->phys_offset);
|
2012-01-18 16:21:38 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
return phys_offset;
|
2012-01-18 16:21:38 +04:00
|
|
|
}
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
#ifdef XEN_COMPAT_PHYSMAP
|
2017-07-11 01:40:00 +03:00
|
|
|
static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
|
|
|
|
{
|
|
|
|
char path[80], value[17];
|
|
|
|
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
|
|
|
|
xen_domid, (uint64_t)physmap->phys_offset);
|
|
|
|
snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
|
|
|
|
if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
|
|
|
|
xen_domid, (uint64_t)physmap->phys_offset);
|
|
|
|
snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
|
|
|
|
if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (physmap->name) {
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
|
|
|
|
xen_domid, (uint64_t)physmap->phys_offset);
|
|
|
|
if (!xs_write(state->xenstore, 0, path,
|
|
|
|
physmap->name, strlen(physmap->name))) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2017-07-11 01:40:03 +03:00
|
|
|
#else
|
|
|
|
static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2017-07-11 01:40:00 +03:00
|
|
|
|
2011-05-24 17:34:21 +04:00
|
|
|
static int xen_add_to_physmap(XenIOState *state,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr start_addr,
|
2011-05-24 17:34:21 +04:00
|
|
|
ram_addr_t size,
|
2011-12-19 14:07:50 +04:00
|
|
|
MemoryRegion *mr,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr offset_within_region)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2017-10-23 12:27:27 +03:00
|
|
|
unsigned long nr_pages;
|
2011-05-24 17:34:21 +04:00
|
|
|
int rc = 0;
|
|
|
|
XenPhysmap *physmap = NULL;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr pfn, start_gpfn;
|
|
|
|
hwaddr phys_offset = memory_region_get_ram_addr(mr);
|
2014-08-26 07:09:48 +04:00
|
|
|
const char *mr_name;
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
if (get_physmapping(start_addr, size)) {
|
2011-05-24 17:34:21 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (size <= 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-15 20:29:27 +04:00
|
|
|
/* Xen can only handle a single dirty log region for now and we want
|
|
|
|
* the linear framebuffer to be that region.
|
|
|
|
* Avoid tracking any regions that is not videoram and avoid tracking
|
|
|
|
* the legacy vga region. */
|
2011-12-19 14:07:50 +04:00
|
|
|
if (mr == framebuffer && start_addr > 0xbffff) {
|
|
|
|
goto go_physmap;
|
2011-06-15 20:29:27 +04:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
go_physmap:
|
2012-12-17 15:37:43 +04:00
|
|
|
DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
|
|
|
|
start_addr, start_addr + size);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2017-07-11 01:40:03 +03:00
|
|
|
mr_name = memory_region_name(mr);
|
|
|
|
|
|
|
|
physmap = g_malloc(sizeof(XenPhysmap));
|
|
|
|
|
|
|
|
physmap->start_addr = start_addr;
|
|
|
|
physmap->size = size;
|
|
|
|
physmap->name = mr_name;
|
|
|
|
physmap->phys_offset = phys_offset;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
|
2017-07-11 01:40:03 +03:00
|
|
|
|
|
|
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
/* Now when we have a physmap entry we can replace a dummy mapping with
|
|
|
|
* a real one of guest foreign memory. */
|
|
|
|
uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
|
|
|
|
assert(p && p == memory_region_get_ram_ptr(mr));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-24 17:34:21 +04:00
|
|
|
pfn = phys_offset >> TARGET_PAGE_BITS;
|
|
|
|
start_gpfn = start_addr >> TARGET_PAGE_BITS;
|
2017-10-23 12:27:27 +03:00
|
|
|
nr_pages = size >> TARGET_PAGE_BITS;
|
|
|
|
rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
|
|
|
|
start_gpfn);
|
|
|
|
if (rc) {
|
|
|
|
int saved_errno = errno;
|
|
|
|
|
|
|
|
error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
|
|
|
|
" to GFN %"HWADDR_PRIx" failed: %s",
|
|
|
|
nr_pages, pfn, start_gpfn, strerror(saved_errno));
|
|
|
|
errno = saved_errno;
|
|
|
|
return -1;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2017-10-23 12:27:27 +03:00
|
|
|
rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
|
2011-05-24 17:34:21 +04:00
|
|
|
start_addr >> TARGET_PAGE_BITS,
|
2014-05-07 17:48:37 +04:00
|
|
|
(start_addr + size - 1) >> TARGET_PAGE_BITS,
|
2011-05-24 17:34:21 +04:00
|
|
|
XEN_DOMCTL_MEM_CACHEATTR_WB);
|
2017-10-23 12:27:27 +03:00
|
|
|
if (rc) {
|
|
|
|
error_report("pin_memory_cacheattr failed: %s", strerror(errno));
|
|
|
|
}
|
2017-07-11 01:40:00 +03:00
|
|
|
return xen_save_physmap(state, physmap);
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_remove_from_physmap(XenIOState *state,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr start_addr,
|
2011-05-24 17:34:21 +04:00
|
|
|
ram_addr_t size)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
XenPhysmap *physmap = NULL;
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr phys_offset = 0;
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
physmap = get_physmapping(start_addr, size);
|
2011-05-24 17:34:21 +04:00
|
|
|
if (physmap == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_offset = physmap->phys_offset;
|
|
|
|
size = physmap->size;
|
|
|
|
|
2013-10-14 16:53:53 +04:00
|
|
|
DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
|
|
|
|
"%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
|
|
|
size >>= TARGET_PAGE_BITS;
|
|
|
|
start_addr >>= TARGET_PAGE_BITS;
|
|
|
|
phys_offset >>= TARGET_PAGE_BITS;
|
2017-10-23 12:27:27 +03:00
|
|
|
rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
|
|
|
|
phys_offset);
|
|
|
|
if (rc) {
|
|
|
|
int saved_errno = errno;
|
|
|
|
|
|
|
|
error_report("relocate_memory "RAM_ADDR_FMT" pages"
|
|
|
|
" from GFN %"HWADDR_PRIx
|
|
|
|
" to GFN %"HWADDR_PRIx" failed: %s",
|
|
|
|
size, start_addr, phys_offset, strerror(saved_errno));
|
|
|
|
errno = saved_errno;
|
|
|
|
return -1;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
QLIST_REMOVE(physmap, list);
|
|
|
|
if (state->log_for_dirtybit == physmap) {
|
|
|
|
state->log_for_dirtybit = NULL;
|
2019-06-18 14:23:41 +03:00
|
|
|
g_free(state->dirty_bitmap);
|
|
|
|
state->dirty_bitmap = NULL;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
2013-06-11 00:36:22 +04:00
|
|
|
g_free(physmap);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static void xen_set_memory(struct MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section,
|
|
|
|
bool add)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2011-12-19 14:07:50 +04:00
|
|
|
XenIOState *state = container_of(listener, XenIOState, memory_listener);
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr start_addr = section->offset_within_address_space;
|
2013-05-27 12:08:27 +04:00
|
|
|
ram_addr_t size = int128_get64(section->size);
|
2015-03-23 12:50:57 +03:00
|
|
|
bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
|
2011-05-24 17:34:21 +04:00
|
|
|
hvmmem_type_t mem_type;
|
|
|
|
|
2015-01-20 14:06:19 +03:00
|
|
|
if (section->mr == &ram_memory) {
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
if (add) {
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_map_memory_section(xen_domid, state->ioservid,
|
2015-01-20 14:06:19 +03:00
|
|
|
section);
|
|
|
|
} else {
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_unmap_memory_section(xen_domid, state->ioservid,
|
2015-01-20 14:06:19 +03:00
|
|
|
section);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
if (!memory_region_is_ram(section->mr)) {
|
2011-05-24 17:34:21 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-01-20 14:06:19 +03:00
|
|
|
if (log_dirty != add) {
|
2011-12-19 14:07:50 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_xen_client_set_memory(start_addr, size, log_dirty);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
|
|
|
start_addr &= TARGET_PAGE_MASK;
|
|
|
|
size = TARGET_PAGE_ALIGN(size);
|
2011-12-19 14:07:50 +04:00
|
|
|
|
|
|
|
if (add) {
|
|
|
|
if (!memory_region_is_rom(section->mr)) {
|
|
|
|
xen_add_to_physmap(state, start_addr, size,
|
|
|
|
section->mr, section->offset_within_region);
|
|
|
|
} else {
|
|
|
|
mem_type = HVMMEM_ram_ro;
|
2017-03-07 13:55:32 +03:00
|
|
|
if (xen_set_mem_type(xen_domid, mem_type,
|
|
|
|
start_addr >> TARGET_PAGE_BITS,
|
|
|
|
size >> TARGET_PAGE_BITS)) {
|
|
|
|
DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
|
2011-12-19 14:07:50 +04:00
|
|
|
start_addr);
|
|
|
|
}
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
2011-12-19 14:07:50 +04:00
|
|
|
} else {
|
2011-05-24 17:34:21 +04:00
|
|
|
if (xen_remove_from_physmap(state, start_addr, size) < 0) {
|
|
|
|
DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static void xen_region_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
2013-05-06 12:46:11 +04:00
|
|
|
memory_region_ref(section->mr);
|
2011-12-19 14:07:50 +04:00
|
|
|
xen_set_memory(listener, section, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_region_del(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
xen_set_memory(listener, section, false);
|
2013-05-06 12:46:11 +04:00
|
|
|
memory_region_unref(section->mr);
|
2011-12-19 14:07:50 +04:00
|
|
|
}
|
|
|
|
|
2015-01-20 14:06:19 +03:00
|
|
|
static void xen_io_add(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
XenIOState *state = container_of(listener, XenIOState, io_listener);
|
2016-05-09 19:31:20 +03:00
|
|
|
MemoryRegion *mr = section->mr;
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2016-05-09 19:31:20 +03:00
|
|
|
if (mr->ops == &unassigned_io_ops) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_region_ref(mr);
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_map_io_section(xen_domid, state->ioservid, section);
|
2015-01-20 14:06:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_io_del(MemoryListener *listener,
|
|
|
|
MemoryRegionSection *section)
|
|
|
|
{
|
|
|
|
XenIOState *state = container_of(listener, XenIOState, io_listener);
|
2016-05-09 19:31:20 +03:00
|
|
|
MemoryRegion *mr = section->mr;
|
|
|
|
|
|
|
|
if (mr->ops == &unassigned_io_ops) {
|
|
|
|
return;
|
|
|
|
}
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_unmap_io_section(xen_domid, state->ioservid, section);
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2016-05-09 19:31:20 +03:00
|
|
|
memory_region_unref(mr);
|
2015-01-20 14:06:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_device_realize(DeviceListener *listener,
|
2018-12-14 01:37:37 +03:00
|
|
|
DeviceState *dev)
|
2015-01-20 14:06:19 +03:00
|
|
|
{
|
|
|
|
XenIOState *state = container_of(listener, XenIOState, device_listener);
|
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
|
|
|
|
PCIDevice *pci_dev = PCI_DEVICE(dev);
|
2018-05-31 21:01:13 +03:00
|
|
|
XenPciDevice *xendev = g_new(XenPciDevice, 1);
|
|
|
|
|
|
|
|
xendev->pci_dev = pci_dev;
|
|
|
|
xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
|
|
|
|
pci_dev->devfn);
|
|
|
|
QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
|
2015-01-20 14:06:19 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_device_unrealize(DeviceListener *listener,
|
2018-12-14 01:37:37 +03:00
|
|
|
DeviceState *dev)
|
2015-01-20 14:06:19 +03:00
|
|
|
{
|
|
|
|
XenIOState *state = container_of(listener, XenIOState, device_listener);
|
|
|
|
|
|
|
|
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
|
|
|
|
PCIDevice *pci_dev = PCI_DEVICE(dev);
|
2018-05-31 21:01:13 +03:00
|
|
|
XenPciDevice *xendev, *next;
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
|
2018-05-31 21:01:13 +03:00
|
|
|
|
|
|
|
QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
|
|
|
|
if (xendev->pci_dev == pci_dev) {
|
|
|
|
QLIST_REMOVE(xendev, entry);
|
|
|
|
g_free(xendev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-20 14:06:19 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-05 16:47:08 +04:00
|
|
|
static void xen_sync_dirty_bitmap(XenIOState *state,
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr start_addr,
|
2012-01-05 16:47:08 +04:00
|
|
|
ram_addr_t size)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2012-10-23 14:30:10 +04:00
|
|
|
hwaddr npages = size >> TARGET_PAGE_BITS;
|
2011-05-24 17:34:21 +04:00
|
|
|
const int width = sizeof(unsigned long) * 8;
|
2019-06-18 14:23:41 +03:00
|
|
|
size_t bitmap_size = DIV_ROUND_UP(npages, width);
|
2011-05-24 17:34:21 +04:00
|
|
|
int rc, i, j;
|
|
|
|
const XenPhysmap *physmap = NULL;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
physmap = get_physmapping(start_addr, size);
|
2011-05-24 17:34:21 +04:00
|
|
|
if (physmap == NULL) {
|
|
|
|
/* not handled */
|
2012-01-05 16:47:08 +04:00
|
|
|
return;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (state->log_for_dirtybit == NULL) {
|
|
|
|
state->log_for_dirtybit = physmap;
|
2019-06-18 14:23:41 +03:00
|
|
|
state->dirty_bitmap = g_new(unsigned long, bitmap_size);
|
2011-05-24 17:34:21 +04:00
|
|
|
} else if (state->log_for_dirtybit != physmap) {
|
2012-01-05 16:47:08 +04:00
|
|
|
/* Only one range for dirty bitmap can be tracked. */
|
|
|
|
return;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2017-03-07 13:55:32 +03:00
|
|
|
rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
|
2019-06-18 14:23:41 +03:00
|
|
|
npages, state->dirty_bitmap);
|
2012-01-05 16:47:08 +04:00
|
|
|
if (rc < 0) {
|
2014-05-23 19:57:47 +04:00
|
|
|
#ifndef ENODATA
|
|
|
|
#define ENODATA ENOENT
|
|
|
|
#endif
|
|
|
|
if (errno == ENODATA) {
|
2012-10-03 17:49:40 +04:00
|
|
|
memory_region_set_dirty(framebuffer, 0, size);
|
|
|
|
DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
|
2012-01-05 16:47:08 +04:00
|
|
|
", 0x" TARGET_FMT_plx "): %s\n",
|
2014-05-23 19:57:47 +04:00
|
|
|
start_addr, start_addr + size, strerror(errno));
|
2012-01-05 16:47:08 +04:00
|
|
|
}
|
|
|
|
return;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2019-06-18 14:23:41 +03:00
|
|
|
for (i = 0; i < bitmap_size; i++) {
|
|
|
|
unsigned long map = state->dirty_bitmap[i];
|
2011-05-24 17:34:21 +04:00
|
|
|
while (map != 0) {
|
2014-04-29 18:17:28 +04:00
|
|
|
j = ctzl(map);
|
2011-05-24 17:34:21 +04:00
|
|
|
map &= ~(1ul << j);
|
2011-12-21 16:21:27 +04:00
|
|
|
memory_region_set_dirty(framebuffer,
|
2011-10-16 20:04:59 +04:00
|
|
|
(i * width + j) * TARGET_PAGE_SIZE,
|
|
|
|
TARGET_PAGE_SIZE);
|
2011-05-24 17:34:21 +04:00
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static void xen_log_start(MemoryListener *listener,
|
2015-04-25 15:38:30 +03:00
|
|
|
MemoryRegionSection *section,
|
|
|
|
int old, int new)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2011-12-19 14:07:50 +04:00
|
|
|
XenIOState *state = container_of(listener, XenIOState, memory_listener);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2015-04-25 15:38:30 +03:00
|
|
|
if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
|
|
|
|
xen_sync_dirty_bitmap(state, section->offset_within_address_space,
|
|
|
|
int128_get64(section->size));
|
|
|
|
}
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-04-25 15:38:30 +03:00
|
|
|
static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
|
|
|
|
int old, int new)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2011-12-19 14:07:50 +04:00
|
|
|
XenIOState *state = container_of(listener, XenIOState, memory_listener);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2015-04-25 15:38:30 +03:00
|
|
|
if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
|
|
|
|
state->log_for_dirtybit = NULL;
|
2019-06-18 14:23:41 +03:00
|
|
|
g_free(state->dirty_bitmap);
|
|
|
|
state->dirty_bitmap = NULL;
|
2015-04-25 15:38:30 +03:00
|
|
|
/* Disable dirty bit tracking */
|
2017-03-07 13:55:32 +03:00
|
|
|
xen_track_dirty_vram(xen_domid, 0, 0, NULL);
|
2015-04-25 15:38:30 +03:00
|
|
|
}
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2011-12-19 14:07:50 +04:00
|
|
|
XenIOState *state = container_of(listener, XenIOState, memory_listener);
|
2011-05-24 17:34:21 +04:00
|
|
|
|
2012-01-05 16:47:08 +04:00
|
|
|
xen_sync_dirty_bitmap(state, section->offset_within_address_space,
|
2013-05-27 12:08:27 +04:00
|
|
|
int128_get64(section->size));
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static void xen_log_global_start(MemoryListener *listener)
|
|
|
|
{
|
2012-10-03 17:48:19 +04:00
|
|
|
if (xen_enabled()) {
|
|
|
|
xen_in_migration = true;
|
|
|
|
}
|
2011-12-19 14:07:50 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_log_global_stop(MemoryListener *listener)
|
2011-05-24 17:34:21 +04:00
|
|
|
{
|
2012-10-03 17:48:19 +04:00
|
|
|
xen_in_migration = false;
|
2011-05-24 17:34:21 +04:00
|
|
|
}
|
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
static MemoryListener xen_memory_listener = {
|
2021-08-17 04:35:52 +03:00
|
|
|
.name = "xen-memory",
|
2011-12-19 14:07:50 +04:00
|
|
|
.region_add = xen_region_add,
|
|
|
|
.region_del = xen_region_del,
|
2011-05-24 17:34:21 +04:00
|
|
|
.log_start = xen_log_start,
|
|
|
|
.log_stop = xen_log_stop,
|
2011-12-19 14:07:50 +04:00
|
|
|
.log_sync = xen_log_sync,
|
|
|
|
.log_global_start = xen_log_global_start,
|
|
|
|
.log_global_stop = xen_log_global_stop,
|
2012-02-08 17:05:50 +04:00
|
|
|
.priority = 10,
|
2011-05-24 17:34:21 +04:00
|
|
|
};
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2015-01-20 14:06:19 +03:00
|
|
|
static MemoryListener xen_io_listener = {
|
2021-08-17 04:35:52 +03:00
|
|
|
.name = "xen-io",
|
2015-01-20 14:06:19 +03:00
|
|
|
.region_add = xen_io_add,
|
|
|
|
.region_del = xen_io_del,
|
|
|
|
.priority = 10,
|
|
|
|
};
|
|
|
|
|
|
|
|
static DeviceListener xen_device_listener = {
|
|
|
|
.realize = xen_device_realize,
|
|
|
|
.unrealize = xen_device_unrealize,
|
|
|
|
};
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
/* get the ioreq packets from share mem */
|
|
|
|
static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
|
|
|
|
{
|
|
|
|
ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
|
|
|
|
|
|
|
|
if (req->state != STATE_IOREQ_READY) {
|
|
|
|
DPRINTF("I/O request not ready: "
|
|
|
|
"%x, ptr: %x, port: %"PRIx64", "
|
2016-02-10 14:07:05 +03:00
|
|
|
"data: %"PRIx64", count: %u, size: %u\n",
|
2010-09-06 23:07:14 +04:00
|
|
|
req->state, req->data_is_ptr, req->addr,
|
|
|
|
req->data, req->count, req->size);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
|
|
|
|
|
|
|
|
req->state = STATE_IOREQ_INPROCESS;
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* use poll to get the port notification */
|
|
|
|
/* ioreq_vec--out,the */
|
|
|
|
/* retval--the number of ioreq packet */
|
|
|
|
static ioreq_t *cpu_get_ioreq(XenIOState *state)
|
|
|
|
{
|
2019-05-18 23:54:25 +03:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
|
|
unsigned int max_cpus = ms->smp.max_cpus;
|
2010-09-06 23:07:14 +04:00
|
|
|
int i;
|
|
|
|
evtchn_port_t port;
|
|
|
|
|
2016-01-15 16:23:38 +03:00
|
|
|
port = xenevtchn_pending(state->xce_handle);
|
2012-04-13 21:46:01 +04:00
|
|
|
if (port == state->bufioreq_local_port) {
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_mod(state->buffered_io_timer,
|
|
|
|
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
|
2012-04-13 21:46:01 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (port != -1) {
|
2013-09-25 20:41:48 +04:00
|
|
|
for (i = 0; i < max_cpus; i++) {
|
2010-09-06 23:07:14 +04:00
|
|
|
if (state->ioreq_local_port[i] == port) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-25 20:41:48 +04:00
|
|
|
if (i == max_cpus) {
|
2010-09-06 23:07:14 +04:00
|
|
|
hw_error("Fatal error while trying to get io event!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unmask the wanted port again */
|
2016-01-15 16:23:38 +03:00
|
|
|
xenevtchn_unmask(state->xce_handle, port);
|
2010-09-06 23:07:14 +04:00
|
|
|
|
|
|
|
/* get the io packet from shared memory */
|
|
|
|
state->send_vcpu = i;
|
|
|
|
return cpu_get_ioreq_from_shared_memory(state, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read error or read nothing */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-03-16 12:20:34 +03:00
|
|
|
static uint32_t do_inp(uint32_t addr, unsigned long size)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
return cpu_inb(addr);
|
|
|
|
case 2:
|
|
|
|
return cpu_inw(addr);
|
|
|
|
case 4:
|
|
|
|
return cpu_inl(addr);
|
|
|
|
default:
|
2016-03-16 12:20:34 +03:00
|
|
|
hw_error("inp: bad size: %04x %lx", addr, size);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-16 12:20:34 +03:00
|
|
|
static void do_outp(uint32_t addr,
|
2010-09-06 23:07:14 +04:00
|
|
|
unsigned long size, uint32_t val)
|
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
return cpu_outb(addr, val);
|
|
|
|
case 2:
|
|
|
|
return cpu_outw(addr, val);
|
|
|
|
case 4:
|
|
|
|
return cpu_outl(addr, val);
|
|
|
|
default:
|
2016-03-16 12:20:34 +03:00
|
|
|
hw_error("outp: bad size: %04x %lx", addr, size);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-17 15:43:19 +04:00
|
|
|
/*
|
|
|
|
* Helper functions which read/write an object from/to physical guest
|
|
|
|
* memory, as part of the implementation of an ioreq.
|
|
|
|
*
|
|
|
|
* Equivalent to
|
|
|
|
* cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
|
|
|
|
* val, req->size, 0/1)
|
|
|
|
* except without the integer overflow problems.
|
|
|
|
*/
|
|
|
|
static void rw_phys_req_item(hwaddr addr,
|
|
|
|
ioreq_t *req, uint32_t i, void *val, int rw)
|
|
|
|
{
|
|
|
|
/* Do everything unsigned so overflow just results in a truncated result
|
|
|
|
* and accesses to undesired parts of guest memory, which is up
|
|
|
|
* to the guest */
|
|
|
|
hwaddr offset = (hwaddr)req->size * i;
|
|
|
|
if (req->df) {
|
|
|
|
addr -= offset;
|
|
|
|
} else {
|
|
|
|
addr += offset;
|
|
|
|
}
|
|
|
|
cpu_physical_memory_rw(addr, val, req->size, rw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void read_phys_req_item(hwaddr addr,
|
|
|
|
ioreq_t *req, uint32_t i, void *val)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
2012-12-17 15:43:19 +04:00
|
|
|
rw_phys_req_item(addr, req, i, val, 0);
|
|
|
|
}
|
|
|
|
static inline void write_phys_req_item(hwaddr addr,
|
|
|
|
ioreq_t *req, uint32_t i, void *val)
|
|
|
|
{
|
|
|
|
rw_phys_req_item(addr, req, i, val, 1);
|
|
|
|
}
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2012-12-17 15:43:19 +04:00
|
|
|
|
|
|
|
static void cpu_ioreq_pio(ioreq_t *req)
|
|
|
|
{
|
2012-12-17 15:44:02 +04:00
|
|
|
uint32_t i;
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2015-04-30 21:27:09 +03:00
|
|
|
trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
|
|
|
|
req->data, req->count, req->size);
|
|
|
|
|
2016-11-22 15:56:51 +03:00
|
|
|
if (req->size > sizeof(uint32_t)) {
|
|
|
|
hw_error("PIO: bad size (%u)", req->size);
|
|
|
|
}
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
if (!req->data_is_ptr) {
|
|
|
|
req->data = do_inp(req->addr, req->size);
|
2015-04-30 21:27:09 +03:00
|
|
|
trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
|
|
|
|
req->size);
|
2010-09-06 23:07:14 +04:00
|
|
|
} else {
|
|
|
|
uint32_t tmp;
|
|
|
|
|
|
|
|
for (i = 0; i < req->count; i++) {
|
|
|
|
tmp = do_inp(req->addr, req->size);
|
2012-12-17 15:43:19 +04:00
|
|
|
write_phys_req_item(req->data, req, i, &tmp);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (req->dir == IOREQ_WRITE) {
|
|
|
|
if (!req->data_is_ptr) {
|
2015-04-30 21:27:09 +03:00
|
|
|
trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
|
|
|
|
req->size);
|
2010-09-06 23:07:14 +04:00
|
|
|
do_outp(req->addr, req->size, req->data);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < req->count; i++) {
|
|
|
|
uint32_t tmp = 0;
|
|
|
|
|
2012-12-17 15:43:19 +04:00
|
|
|
read_phys_req_item(req->data, req, i, &tmp);
|
2010-09-06 23:07:14 +04:00
|
|
|
do_outp(req->addr, req->size, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_ioreq_move(ioreq_t *req)
|
|
|
|
{
|
2012-12-17 15:44:02 +04:00
|
|
|
uint32_t i;
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2015-04-30 21:27:09 +03:00
|
|
|
trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
|
|
|
|
req->data, req->count, req->size);
|
|
|
|
|
2016-11-22 15:56:51 +03:00
|
|
|
if (req->size > sizeof(req->data)) {
|
|
|
|
hw_error("MMIO: bad size (%u)", req->size);
|
|
|
|
}
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (!req->data_is_ptr) {
|
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
for (i = 0; i < req->count; i++) {
|
2012-12-17 15:43:19 +04:00
|
|
|
read_phys_req_item(req->addr, req, i, &req->data);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
} else if (req->dir == IOREQ_WRITE) {
|
|
|
|
for (i = 0; i < req->count; i++) {
|
2012-12-17 15:43:19 +04:00
|
|
|
write_phys_req_item(req->addr, req, i, &req->data);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2011-05-20 20:57:24 +04:00
|
|
|
uint64_t tmp;
|
2010-09-06 23:07:14 +04:00
|
|
|
|
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
for (i = 0; i < req->count; i++) {
|
2012-12-17 15:43:19 +04:00
|
|
|
read_phys_req_item(req->addr, req, i, &tmp);
|
|
|
|
write_phys_req_item(req->data, req, i, &tmp);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
} else if (req->dir == IOREQ_WRITE) {
|
|
|
|
for (i = 0; i < req->count; i++) {
|
2012-12-17 15:43:19 +04:00
|
|
|
read_phys_req_item(req->data, req, i, &tmp);
|
|
|
|
write_phys_req_item(req->addr, req, i, &tmp);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 21:01:13 +03:00
|
|
|
static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
|
|
|
|
{
|
|
|
|
uint32_t sbdf = req->addr >> 32;
|
|
|
|
uint32_t reg = req->addr;
|
|
|
|
XenPciDevice *xendev;
|
|
|
|
|
|
|
|
if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
|
|
|
|
req->size != sizeof(uint32_t)) {
|
|
|
|
hw_error("PCI config access: bad size (%u)", req->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req->count != 1) {
|
|
|
|
hw_error("PCI config access: bad count (%u)", req->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
QLIST_FOREACH(xendev, &state->dev_list, entry) {
|
|
|
|
if (xendev->sbdf != sbdf) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!req->data_is_ptr) {
|
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
req->data = pci_host_config_read_common(
|
|
|
|
xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
|
|
|
|
req->size);
|
|
|
|
trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
|
|
|
|
req->size, req->data);
|
|
|
|
} else if (req->dir == IOREQ_WRITE) {
|
|
|
|
trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
|
|
|
|
req->size, req->data);
|
|
|
|
pci_host_config_write_common(
|
|
|
|
xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
|
|
|
|
req->data, req->size);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint32_t tmp;
|
|
|
|
|
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
tmp = pci_host_config_read_common(
|
|
|
|
xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
|
|
|
|
req->size);
|
|
|
|
trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
|
|
|
|
req->size, tmp);
|
|
|
|
write_phys_req_item(req->data, req, 0, &tmp);
|
|
|
|
} else if (req->dir == IOREQ_WRITE) {
|
|
|
|
read_phys_req_item(req->data, req, 0, &tmp);
|
|
|
|
trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
|
|
|
|
req->size, tmp);
|
|
|
|
pci_host_config_write_common(
|
|
|
|
xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
|
|
|
|
tmp, req->size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-20 23:49:12 +04:00
|
|
|
static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
|
|
|
|
{
|
|
|
|
X86CPU *cpu;
|
|
|
|
CPUX86State *env;
|
|
|
|
|
|
|
|
cpu = X86_CPU(current_cpu);
|
|
|
|
env = &cpu->env;
|
|
|
|
env->regs[R_EAX] = req->data;
|
|
|
|
env->regs[R_EBX] = vmport_regs->ebx;
|
|
|
|
env->regs[R_ECX] = vmport_regs->ecx;
|
|
|
|
env->regs[R_EDX] = vmport_regs->edx;
|
|
|
|
env->regs[R_ESI] = vmport_regs->esi;
|
|
|
|
env->regs[R_EDI] = vmport_regs->edi;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regs_from_cpu(vmware_regs_t *vmport_regs)
|
|
|
|
{
|
|
|
|
X86CPU *cpu = X86_CPU(current_cpu);
|
|
|
|
CPUX86State *env = &cpu->env;
|
|
|
|
|
|
|
|
vmport_regs->ebx = env->regs[R_EBX];
|
|
|
|
vmport_regs->ecx = env->regs[R_ECX];
|
|
|
|
vmport_regs->edx = env->regs[R_EDX];
|
|
|
|
vmport_regs->esi = env->regs[R_ESI];
|
|
|
|
vmport_regs->edi = env->regs[R_EDI];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
|
|
|
|
{
|
|
|
|
vmware_regs_t *vmport_regs;
|
|
|
|
|
|
|
|
assert(state->shared_vmport_page);
|
|
|
|
vmport_regs =
|
|
|
|
&state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
|
|
|
|
|
|
|
|
current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
|
|
|
|
regs_to_cpu(vmport_regs, req);
|
|
|
|
cpu_ioreq_pio(req);
|
|
|
|
regs_from_cpu(vmport_regs);
|
|
|
|
current_cpu = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_ioreq(XenIOState *state, ioreq_t *req)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
2015-04-30 21:27:09 +03:00
|
|
|
trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
|
|
|
|
req->addr, req->data, req->count, req->size);
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
|
|
|
|
(req->size < sizeof (target_ulong))) {
|
|
|
|
req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
|
|
|
|
}
|
|
|
|
|
2015-04-30 21:27:09 +03:00
|
|
|
if (req->dir == IOREQ_WRITE)
|
|
|
|
trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
|
|
|
|
req->addr, req->data, req->count, req->size);
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
switch (req->type) {
|
|
|
|
case IOREQ_TYPE_PIO:
|
|
|
|
cpu_ioreq_pio(req);
|
|
|
|
break;
|
|
|
|
case IOREQ_TYPE_COPY:
|
|
|
|
cpu_ioreq_move(req);
|
|
|
|
break;
|
2014-10-20 23:49:12 +04:00
|
|
|
case IOREQ_TYPE_VMWARE_PORT:
|
|
|
|
handle_vmport_ioreq(state, req);
|
|
|
|
break;
|
2010-09-06 23:07:14 +04:00
|
|
|
case IOREQ_TYPE_TIMEOFFSET:
|
|
|
|
break;
|
|
|
|
case IOREQ_TYPE_INVALIDATE:
|
2011-06-22 00:59:08 +04:00
|
|
|
xen_invalidate_map_cache();
|
2010-09-06 23:07:14 +04:00
|
|
|
break;
|
2018-05-31 21:01:13 +03:00
|
|
|
case IOREQ_TYPE_PCI_CONFIG:
|
|
|
|
cpu_ioreq_config(state, req);
|
2015-01-20 14:06:19 +03:00
|
|
|
break;
|
2010-09-06 23:07:14 +04:00
|
|
|
default:
|
|
|
|
hw_error("Invalid ioreq type 0x%x\n", req->type);
|
|
|
|
}
|
2015-04-30 21:27:09 +03:00
|
|
|
if (req->dir == IOREQ_READ) {
|
|
|
|
trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
|
|
|
|
req->addr, req->data, req->count, req->size);
|
|
|
|
}
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2021-12-10 22:34:34 +03:00
|
|
|
static bool handle_buffered_iopage(XenIOState *state)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
2015-07-24 12:38:28 +03:00
|
|
|
buffered_iopage_t *buf_page = state->buffered_io_page;
|
2010-09-06 23:07:14 +04:00
|
|
|
buf_ioreq_t *buf_req = NULL;
|
2021-12-10 22:34:34 +03:00
|
|
|
bool handled_ioreq = false;
|
2010-09-06 23:07:14 +04:00
|
|
|
ioreq_t req;
|
|
|
|
int qw;
|
|
|
|
|
2015-07-24 12:38:28 +03:00
|
|
|
if (!buf_page) {
|
2012-04-13 21:46:01 +04:00
|
|
|
return 0;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2012-04-13 21:46:01 +04:00
|
|
|
memset(&req, 0x00, sizeof(req));
|
2016-11-25 13:06:33 +03:00
|
|
|
req.state = STATE_IOREQ_READY;
|
|
|
|
req.count = 1;
|
2016-11-25 13:06:58 +03:00
|
|
|
req.dir = IOREQ_WRITE;
|
2012-04-13 21:46:01 +04:00
|
|
|
|
2015-07-24 12:38:28 +03:00
|
|
|
for (;;) {
|
|
|
|
uint32_t rdptr = buf_page->read_pointer, wrptr;
|
|
|
|
|
|
|
|
xen_rmb();
|
|
|
|
wrptr = buf_page->write_pointer;
|
|
|
|
xen_rmb();
|
|
|
|
if (rdptr != buf_page->read_pointer) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (rdptr == wrptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
|
2016-11-25 13:06:33 +03:00
|
|
|
req.size = 1U << buf_req->size;
|
2010-09-06 23:07:14 +04:00
|
|
|
req.addr = buf_req->addr;
|
|
|
|
req.data = buf_req->data;
|
|
|
|
req.type = buf_req->type;
|
2016-11-22 15:56:51 +03:00
|
|
|
xen_rmb();
|
2010-09-06 23:07:14 +04:00
|
|
|
qw = (req.size == 8);
|
|
|
|
if (qw) {
|
2016-11-25 13:05:57 +03:00
|
|
|
if (rdptr + 1 == wrptr) {
|
|
|
|
hw_error("Incomplete quad word buffered ioreq");
|
|
|
|
}
|
2015-07-24 12:38:28 +03:00
|
|
|
buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
|
|
|
|
IOREQ_BUFFER_SLOT_NUM];
|
2010-09-06 23:07:14 +04:00
|
|
|
req.data |= ((uint64_t)buf_req->data) << 32;
|
2016-11-22 15:56:51 +03:00
|
|
|
xen_rmb();
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2014-10-20 23:49:12 +04:00
|
|
|
handle_ioreq(state, &req);
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2016-11-25 13:06:33 +03:00
|
|
|
/* Only req.data may get updated by handle_ioreq(), albeit even that
|
2016-11-25 13:06:58 +03:00
|
|
|
* should not happen as such data would never make it to the guest (we
|
|
|
|
* can only usefully see writes here after all).
|
2016-11-25 13:06:33 +03:00
|
|
|
*/
|
|
|
|
assert(req.state == STATE_IOREQ_READY);
|
|
|
|
assert(req.count == 1);
|
2016-11-25 13:06:58 +03:00
|
|
|
assert(req.dir == IOREQ_WRITE);
|
2016-11-25 13:06:33 +03:00
|
|
|
assert(!req.data_is_ptr);
|
|
|
|
|
2020-09-23 13:56:46 +03:00
|
|
|
qatomic_add(&buf_page->read_pointer, qw + 1);
|
2021-12-10 22:34:34 +03:00
|
|
|
handled_ioreq = true;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
2012-04-13 21:46:01 +04:00
|
|
|
|
2021-12-10 22:34:34 +03:00
|
|
|
return handled_ioreq;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_buffered_io(void *opaque)
|
|
|
|
{
|
|
|
|
XenIOState *state = opaque;
|
|
|
|
|
2012-04-13 21:46:01 +04:00
|
|
|
if (handle_buffered_iopage(state)) {
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_mod(state->buffered_io_timer,
|
|
|
|
BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
|
2012-04-13 21:46:01 +04:00
|
|
|
} else {
|
2013-08-21 19:03:08 +04:00
|
|
|
timer_del(state->buffered_io_timer);
|
2016-01-15 16:23:38 +03:00
|
|
|
xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
|
2012-04-13 21:46:01 +04:00
|
|
|
}
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_handle_ioreq(void *opaque)
|
|
|
|
{
|
|
|
|
XenIOState *state = opaque;
|
|
|
|
ioreq_t *req = cpu_get_ioreq(state);
|
|
|
|
|
|
|
|
handle_buffered_iopage(state);
|
|
|
|
if (req) {
|
2016-11-22 15:56:51 +03:00
|
|
|
ioreq_t copy = *req;
|
|
|
|
|
|
|
|
xen_rmb();
|
|
|
|
handle_ioreq(state, ©);
|
|
|
|
req->data = copy.data;
|
2010-09-06 23:07:14 +04:00
|
|
|
|
|
|
|
if (req->state != STATE_IOREQ_INPROCESS) {
|
|
|
|
fprintf(stderr, "Badness in I/O request ... not in service?!: "
|
|
|
|
"%x, ptr: %x, port: %"PRIx64", "
|
2016-02-10 14:07:05 +03:00
|
|
|
"data: %"PRIx64", count: %u, size: %u, type: %u\n",
|
2010-09-06 23:07:14 +04:00
|
|
|
req->state, req->data_is_ptr, req->addr,
|
2014-10-20 23:49:12 +04:00
|
|
|
req->data, req->count, req->size, req->type);
|
2012-05-17 14:33:09 +04:00
|
|
|
destroy_hvm_domain(false);
|
2010-09-06 23:07:14 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
xen_wmb(); /* Update ioreq contents /then/ update state. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We do this before we send the response so that the tools
|
|
|
|
* have the opportunity to pick up on the reset before the
|
|
|
|
* guest resumes and does a hlt with interrupts disabled which
|
|
|
|
* causes Xen to powerdown the domain.
|
|
|
|
*/
|
2011-07-29 22:36:43 +04:00
|
|
|
if (runstate_is_running()) {
|
shutdown: Prepare for use of an enum in reset/shutdown_request
We want to track why a guest was shutdown; in particular, being able
to tell the difference between a guest request (such as ACPI request)
and host request (such as SIGINT) will prove useful to libvirt.
Since all requests eventually end up changing shutdown_requested in
vl.c, the logical change is to make that value track the reason,
rather than its current 0/1 contents.
Since command-line options control whether a reset request is turned
into a shutdown request instead, the same treatment is given to
reset_requested.
This patch adds an internal enum ShutdownCause that describes reasons
that a shutdown can be requested, and changes qemu_system_reset() to
pass the reason through, although for now nothing is actually changed
with regards to what gets reported. The enum could be exported via
QAPI at a later date, if deemed necessary, but for now, there has not
been a request to expose that much detail to end clients.
For the most part, we turn 0 into SHUTDOWN_CAUSE_NONE, and 1 into
SHUTDOWN_CAUSE_HOST_ERROR; the only specific case where we have enough
information right now to use a different value is when we are reacting
to a host signal. It will take a further patch to edit all call-sites
that can trigger a reset or shutdown request to properly pass in any
other reasons; this patch includes TODOs to point such places out.
qemu_system_reset() trades its 'bool report' parameter for a
'ShutdownCause reason', with all non-zero values having the same
effect; this lets us get rid of the weird #defines for VMRESET_*
as synonyms for bools.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170515214114.15442-3-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2017-05-16 00:41:11 +03:00
|
|
|
ShutdownCause request;
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (qemu_shutdown_requested_get()) {
|
2012-05-17 14:33:09 +04:00
|
|
|
destroy_hvm_domain(false);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
shutdown: Prepare for use of an enum in reset/shutdown_request
We want to track why a guest was shutdown; in particular, being able
to tell the difference between a guest request (such as ACPI request)
and host request (such as SIGINT) will prove useful to libvirt.
Since all requests eventually end up changing shutdown_requested in
vl.c, the logical change is to make that value track the reason,
rather than its current 0/1 contents.
Since command-line options control whether a reset request is turned
into a shutdown request instead, the same treatment is given to
reset_requested.
This patch adds an internal enum ShutdownCause that describes reasons
that a shutdown can be requested, and changes qemu_system_reset() to
pass the reason through, although for now nothing is actually changed
with regards to what gets reported. The enum could be exported via
QAPI at a later date, if deemed necessary, but for now, there has not
been a request to expose that much detail to end clients.
For the most part, we turn 0 into SHUTDOWN_CAUSE_NONE, and 1 into
SHUTDOWN_CAUSE_HOST_ERROR; the only specific case where we have enough
information right now to use a different value is when we are reacting
to a host signal. It will take a further patch to edit all call-sites
that can trigger a reset or shutdown request to properly pass in any
other reasons; this patch includes TODOs to point such places out.
qemu_system_reset() trades its 'bool report' parameter for a
'ShutdownCause reason', with all non-zero values having the same
effect; this lets us get rid of the weird #defines for VMRESET_*
as synonyms for bools.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170515214114.15442-3-eblake@redhat.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2017-05-16 00:41:11 +03:00
|
|
|
request = qemu_reset_requested_get();
|
|
|
|
if (request) {
|
|
|
|
qemu_system_reset(request);
|
2012-05-17 14:33:09 +04:00
|
|
|
destroy_hvm_domain(true);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
req->state = STATE_IORESP_READY;
|
2016-01-15 16:23:38 +03:00
|
|
|
xenevtchn_notify(state->xce_handle,
|
|
|
|
state->ioreq_local_port[state->send_vcpu]);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xen_main_loop_prepare(XenIOState *state)
|
|
|
|
{
|
|
|
|
int evtchn_fd = -1;
|
|
|
|
|
2016-01-15 16:23:38 +03:00
|
|
|
if (state->xce_handle != NULL) {
|
|
|
|
evtchn_fd = xenevtchn_fd(state->xce_handle);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2013-08-21 19:03:08 +04:00
|
|
|
state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
|
2010-09-06 23:07:14 +04:00
|
|
|
state);
|
|
|
|
|
|
|
|
if (evtchn_fd != -1) {
|
2014-10-20 23:49:12 +04:00
|
|
|
CPUState *cpu_state;
|
|
|
|
|
|
|
|
DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
|
|
|
|
CPU_FOREACH(cpu_state) {
|
|
|
|
DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
|
|
|
|
__func__, cpu_state->cpu_index, cpu_state);
|
|
|
|
state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
|
|
|
|
}
|
2010-09-06 23:07:14 +04:00
|
|
|
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-01-11 18:20:20 +03:00
|
|
|
static void xen_hvm_change_state_handler(void *opaque, bool running,
|
2011-07-29 21:26:33 +04:00
|
|
|
RunState rstate)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
2015-01-20 14:06:19 +03:00
|
|
|
XenIOState *state = opaque;
|
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
if (running) {
|
2015-01-20 14:06:19 +03:00
|
|
|
xen_main_loop_prepare(state);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_set_ioreq_server_state(xen_domid,
|
2015-01-20 14:06:19 +03:00
|
|
|
state->ioservid,
|
|
|
|
(rstate == RUN_STATE_RUNNING));
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2011-06-20 16:06:26 +04:00
|
|
|
static void xen_exit_notifier(Notifier *n, void *data)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
|
|
|
XenIOState *state = container_of(n, XenIOState, exit);
|
|
|
|
|
2019-07-29 22:29:23 +03:00
|
|
|
xen_destroy_ioreq_server(xen_domid, state->ioservid);
|
2021-04-30 19:37:42 +03:00
|
|
|
if (state->fres != NULL) {
|
|
|
|
xenforeignmemory_unmap_resource(xen_fmem, state->fres);
|
|
|
|
}
|
2019-07-29 22:29:23 +03:00
|
|
|
|
2016-01-15 16:23:38 +03:00
|
|
|
xenevtchn_close(state->xce_handle);
|
2010-09-06 23:07:33 +04:00
|
|
|
xs_daemon_close(state->xenstore);
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2017-07-11 01:40:03 +03:00
|
|
|
#ifdef XEN_COMPAT_PHYSMAP
|
2012-01-19 19:56:11 +04:00
|
|
|
static void xen_read_physmap(XenIOState *state)
|
|
|
|
{
|
|
|
|
XenPhysmap *physmap = NULL;
|
|
|
|
unsigned int len, num, i;
|
|
|
|
char path[80], *value = NULL;
|
|
|
|
char **entries = NULL;
|
|
|
|
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap", xen_domid);
|
|
|
|
entries = xs_directory(state->xenstore, 0, path, &num);
|
|
|
|
if (entries == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
physmap = g_malloc(sizeof (XenPhysmap));
|
|
|
|
physmap->phys_offset = strtoull(entries[i], NULL, 16);
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%s/start_addr",
|
|
|
|
xen_domid, entries[i]);
|
|
|
|
value = xs_read(state->xenstore, 0, path, &len);
|
|
|
|
if (value == NULL) {
|
2013-06-11 00:36:22 +04:00
|
|
|
g_free(physmap);
|
2012-01-19 19:56:11 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
physmap->start_addr = strtoull(value, NULL, 16);
|
|
|
|
free(value);
|
|
|
|
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%s/size",
|
|
|
|
xen_domid, entries[i]);
|
|
|
|
value = xs_read(state->xenstore, 0, path, &len);
|
|
|
|
if (value == NULL) {
|
2013-06-11 00:36:22 +04:00
|
|
|
g_free(physmap);
|
2012-01-19 19:56:11 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
physmap->size = strtoull(value, NULL, 16);
|
|
|
|
free(value);
|
|
|
|
|
|
|
|
snprintf(path, sizeof(path),
|
|
|
|
"/local/domain/0/device-model/%d/physmap/%s/name",
|
|
|
|
xen_domid, entries[i]);
|
|
|
|
physmap->name = xs_read(state->xenstore, 0, path, &len);
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
|
2012-01-19 19:56:11 +04:00
|
|
|
}
|
|
|
|
free(entries);
|
|
|
|
}
|
2017-07-11 01:40:03 +03:00
|
|
|
#else
|
|
|
|
static void xen_read_physmap(XenIOState *state)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2012-01-19 19:56:11 +04:00
|
|
|
|
2013-09-25 20:40:23 +04:00
|
|
|
static void xen_wakeup_notifier(Notifier *notifier, void *data)
|
|
|
|
{
|
|
|
|
xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
|
|
|
|
}
|
|
|
|
|
2018-05-15 19:40:51 +03:00
|
|
|
static int xen_map_ioreq_server(XenIOState *state)
|
2010-06-30 15:58:34 +04:00
|
|
|
{
|
2018-05-15 19:40:53 +03:00
|
|
|
void *addr = NULL;
|
2015-01-20 14:06:19 +03:00
|
|
|
xen_pfn_t ioreq_pfn;
|
|
|
|
xen_pfn_t bufioreq_pfn;
|
|
|
|
evtchn_port_t bufioreq_evtchn;
|
2018-05-15 19:40:51 +03:00
|
|
|
int rc;
|
|
|
|
|
2018-05-15 19:40:53 +03:00
|
|
|
/*
|
|
|
|
* Attempt to map using the resource API and fall back to normal
|
|
|
|
* foreign mapping if this is not supported.
|
|
|
|
*/
|
|
|
|
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
|
|
|
|
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
|
2021-04-30 19:37:42 +03:00
|
|
|
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
|
2018-05-15 19:40:53 +03:00
|
|
|
XENMEM_resource_ioreq_server,
|
|
|
|
state->ioservid, 0, 2,
|
|
|
|
&addr,
|
|
|
|
PROT_READ | PROT_WRITE, 0);
|
2021-04-30 19:37:42 +03:00
|
|
|
if (state->fres != NULL) {
|
2018-05-15 19:40:53 +03:00
|
|
|
trace_xen_map_resource_ioreq(state->ioservid, addr);
|
|
|
|
state->buffered_io_page = addr;
|
|
|
|
state->shared_page = addr + TARGET_PAGE_SIZE;
|
|
|
|
} else if (errno != EOPNOTSUPP) {
|
|
|
|
error_report("failed to map ioreq server resources: error %d handle=%p",
|
|
|
|
errno, xen_xc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-05-15 19:40:51 +03:00
|
|
|
rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
|
2018-05-15 19:40:53 +03:00
|
|
|
(state->shared_page == NULL) ?
|
|
|
|
&ioreq_pfn : NULL,
|
|
|
|
(state->buffered_io_page == NULL) ?
|
|
|
|
&bufioreq_pfn : NULL,
|
2018-05-15 19:40:51 +03:00
|
|
|
&bufioreq_evtchn);
|
|
|
|
if (rc < 0) {
|
|
|
|
error_report("failed to get ioreq server info: error %d handle=%p",
|
|
|
|
errno, xen_xc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state->shared_page == NULL) {
|
2018-05-15 19:40:53 +03:00
|
|
|
DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
|
|
|
|
|
|
|
|
state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
1, &ioreq_pfn, NULL);
|
|
|
|
if (state->shared_page == NULL) {
|
|
|
|
error_report("map shared IO page returned error %d handle=%p",
|
|
|
|
errno, xen_xc);
|
|
|
|
}
|
2018-05-15 19:40:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (state->buffered_io_page == NULL) {
|
2018-05-15 19:40:53 +03:00
|
|
|
DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
|
|
|
|
|
|
|
|
state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
|
|
|
|
PROT_READ | PROT_WRITE,
|
|
|
|
1, &bufioreq_pfn,
|
|
|
|
NULL);
|
|
|
|
if (state->buffered_io_page == NULL) {
|
|
|
|
error_report("map buffered IO page returned error %d", errno);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state->shared_page == NULL || state->buffered_io_page == NULL) {
|
2018-05-15 19:40:51 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-05-15 19:40:53 +03:00
|
|
|
DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
|
|
|
|
|
2018-05-15 19:40:51 +03:00
|
|
|
state->bufioreq_remote_port = bufioreq_evtchn;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-08 18:55:26 +03:00
|
|
|
void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
|
2018-05-15 19:40:51 +03:00
|
|
|
{
|
2019-05-18 23:54:25 +03:00
|
|
|
MachineState *ms = MACHINE(pcms);
|
|
|
|
unsigned int max_cpus = ms->smp.max_cpus;
|
2018-05-15 19:40:51 +03:00
|
|
|
int i, rc;
|
|
|
|
xen_pfn_t ioreq_pfn;
|
2010-09-06 23:07:14 +04:00
|
|
|
XenIOState *state;
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
state = g_malloc0(sizeof (XenIOState));
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2016-01-15 16:23:38 +03:00
|
|
|
state->xce_handle = xenevtchn_open(NULL, 0);
|
|
|
|
if (state->xce_handle == NULL) {
|
2010-09-06 23:07:14 +04:00
|
|
|
perror("xen: event channel open");
|
2016-01-14 18:09:38 +03:00
|
|
|
goto err;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2010-09-06 23:07:33 +04:00
|
|
|
state->xenstore = xs_daemon_open();
|
|
|
|
if (state->xenstore == NULL) {
|
|
|
|
perror("xen: xenstore open");
|
2016-01-14 18:09:38 +03:00
|
|
|
goto err;
|
2010-09-06 23:07:33 +04:00
|
|
|
}
|
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
xen_create_ioreq_server(xen_domid, &state->ioservid);
|
2015-01-20 14:06:19 +03:00
|
|
|
|
2010-09-06 23:07:14 +04:00
|
|
|
state->exit.notify = xen_exit_notifier;
|
|
|
|
qemu_add_exit_notifier(&state->exit);
|
|
|
|
|
2012-02-23 16:45:20 +04:00
|
|
|
state->suspend.notify = xen_suspend_notifier;
|
|
|
|
qemu_register_suspend_notifier(&state->suspend);
|
|
|
|
|
2013-09-25 20:40:23 +04:00
|
|
|
state->wakeup.notify = xen_wakeup_notifier;
|
|
|
|
qemu_register_wakeup_notifier(&state->wakeup);
|
|
|
|
|
qmp: query-current-machine with wakeup-suspend-support
When issuing the qmp/hmp 'system_wakeup' command, what happens in a
nutshell is:
- qmp_system_wakeup_request set runstate to RUNNING, sets a wakeup_reason
and notify the event
- in the main_loop, all vcpus are paused, a system reset is issued, all
subscribers of wakeup_notifiers receives a notification, vcpus are then
resumed and the wake up QAPI event is fired
Note that this procedure alone doesn't ensure that the guest will awake
from SUSPENDED state - the subscribers of the wake up event must take
action to resume the guest, otherwise the guest will simply reboot. At
this moment, only the ACPI machines via acpi_pm1_cnt_init and xen_hvm_init
have wake-up from suspend support.
However, only the presence of 'system_wakeup' is required for QGA to
support 'guest-suspend-ram' and 'guest-suspend-hybrid' at this moment.
This means that the user/management will expect to suspend the guest using
one of those suspend commands and then resume execution using system_wakeup,
regardless of the support offered in system_wakeup in the first place.
This patch creates a new API called query-current-machine [1], that holds
a new flag called 'wakeup-suspend-support' that indicates if the guest
supports wake up from suspend via system_wakeup. The machine is considered
to implement wake-up support if a call to a new 'qemu_register_wakeup_support'
is made during its init, as it is now being done inside acpi_pm1_cnt_init
and xen_hvm_init. This allows for any other machine type to declare wake-up
support regardless of ACPI state or wakeup_notifiers subscription, making easier
for newer implementations that might have their own mechanisms in the future.
This is the expected output of query-current-machine when running a x86
guest:
{"execute" : "query-current-machine"}
{"return": {"wakeup-suspend-support": true}}
Running the same x86 guest, but with the --no-acpi option:
{"execute" : "query-current-machine"}
{"return": {"wakeup-suspend-support": false}}
This is the output when running a pseries guest:
{"execute" : "query-current-machine"}
{"return": {"wakeup-suspend-support": false}}
With this extra tool, management can avoid situations where a guest
that does not have proper suspend/wake capabilities ends up in
inconsistent state (e.g.
https://github.com/open-power-host-os/qemu/issues/31).
[1] the decision of creating the query-current-machine API is based
on discussions in the QEMU mailing list where it was decided that
query-target wasn't a proper place to store the wake-up flag, neither
was query-machines because this isn't a static property of the
machine object. This new API can then be used to store other
dynamic machine properties that are scattered around the code
ATM. More info at:
https://lists.gnu.org/archive/html/qemu-devel/2018-05/msg04235.html
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20181205194701.17836-2-danielhb413@gmail.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-12-05 22:46:59 +03:00
|
|
|
/*
|
|
|
|
* Register wake-up support in QMP query-current-machine API
|
|
|
|
*/
|
|
|
|
qemu_register_wakeup_support();
|
|
|
|
|
2018-05-15 19:40:51 +03:00
|
|
|
rc = xen_map_ioreq_server(state);
|
2015-01-20 14:06:19 +03:00
|
|
|
if (rc < 0) {
|
2016-01-14 18:09:38 +03:00
|
|
|
goto err;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
|
2014-10-20 23:49:12 +04:00
|
|
|
rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
|
|
|
|
if (!rc) {
|
|
|
|
DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
|
|
|
|
state->shared_vmport_page =
|
xen: Switch uses of xc_map_foreign_{pages,bulk} to use libxenforeignmemory API.
In Xen 4.7 we are refactoring parts libxenctrl into a number of
separate libraries which will provide backward and forward API and ABI
compatiblity.
One such library will be libxenforeignmemory which provides access to
privileged foreign mappings and which will provide an interface
equivalent to xc_map_foreign_{pages,bulk}.
The new xenforeignmemory_map() function behaves like
xc_map_foreign_pages() when the err argument is NULL and like
xc_map_foreign_bulk() when err is non-NULL, which maps into the shim
here onto checking err == NULL and calling the appropriate old
function.
Note that xenforeignmemory_map() takes the number of pages before the
arrays themselves, in order to support potentially future use of
variable-length-arrays in the prototype (in the future, when Xen's
baseline toolchain requirements are new enough to ensure VLAs are
supported).
In preparation for adding support for libxenforeignmemory add support
to the <=4.0 and <=4.6 compat code in xen_common.h to allow us to
switch to using the new API. These shims will disappear for versions
of Xen which include libxenforeignmemory.
Since libxenforeignmemory will have its own handle type but for <= 4.6
the functionality is provided by using a libxenctrl handle we
introduce a new global xen_fmem alongside the existing xen_xc. In fact
we make xen_fmem a pointer to the existing xen_xc, which then works
correctly with both <=4.0 (xc handle is an int) and <=4.6 (xc handle
is a pointer). In the latter case xen_fmem is actually a double
indirect pointer, but it all falls out in the wash.
Unlike libxenctrl libxenforeignmemory has an explicit unmap function,
rather than just specifying that munmap should be used, so the unmap
paths are updated to use xenforeignmemory_unmap, which is a shim for
munmap on these versions of xen. The mappings in xen-hvm.c do not
appear to be unmapped (which makes sense for a qemu-dm process)
In fb_disconnect this results in a change from simply mmap over the
existing mapping (with an implicit munmap) to expliclty unmapping with
xenforeignmemory_unmap and then mapping the required anonymous memory
in the same hole. I don't think this is a problem since any other
thread which was racily touching this region would already be running
the risk of hitting the mapping halfway through the call. If this is
thought to be a problem then we could consider adding an extra API to
the libxenforeignmemory interface to replace a foreign mapping with
anonymous shared memory, but I'd prefer not to.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
2016-01-15 16:23:41 +03:00
|
|
|
xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
|
|
|
|
1, &ioreq_pfn, NULL);
|
2014-10-20 23:49:12 +04:00
|
|
|
if (state->shared_vmport_page == NULL) {
|
2016-02-10 14:07:03 +03:00
|
|
|
error_report("map shared vmport IO page returned error %d handle=%p",
|
|
|
|
errno, xen_xc);
|
2016-01-14 18:09:38 +03:00
|
|
|
goto err;
|
2014-10-20 23:49:12 +04:00
|
|
|
}
|
|
|
|
} else if (rc != -ENOSYS) {
|
2016-01-14 18:09:38 +03:00
|
|
|
error_report("get vmport regs pfn returned error %d, rc=%d",
|
|
|
|
errno, rc);
|
|
|
|
goto err;
|
2014-10-20 23:49:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: cpus is empty at this point in init */
|
|
|
|
state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
|
|
|
|
|
2017-03-07 13:55:30 +03:00
|
|
|
rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
|
2015-01-20 14:06:19 +03:00
|
|
|
if (rc < 0) {
|
2016-02-10 14:07:03 +03:00
|
|
|
error_report("failed to enable ioreq server info: error %d handle=%p",
|
2016-01-14 18:09:38 +03:00
|
|
|
errno, xen_xc);
|
|
|
|
goto err;
|
2015-01-20 14:06:19 +03:00
|
|
|
}
|
|
|
|
|
2013-09-25 20:41:48 +04:00
|
|
|
state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
|
2010-09-06 23:07:14 +04:00
|
|
|
|
|
|
|
/* FIXME: how about if we overflow the page here? */
|
2013-09-25 20:41:48 +04:00
|
|
|
for (i = 0; i < max_cpus; i++) {
|
2016-01-15 16:23:38 +03:00
|
|
|
rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
|
2010-09-06 23:07:14 +04:00
|
|
|
xen_vcpu_eport(state->shared_page, i));
|
|
|
|
if (rc == -1) {
|
2016-01-14 18:09:38 +03:00
|
|
|
error_report("shared evtchn %d bind error %d", i, errno);
|
|
|
|
goto err;
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
state->ioreq_local_port[i] = rc;
|
|
|
|
}
|
|
|
|
|
2016-01-15 16:23:38 +03:00
|
|
|
rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
|
2018-05-15 19:40:51 +03:00
|
|
|
state->bufioreq_remote_port);
|
2012-04-13 21:46:01 +04:00
|
|
|
if (rc == -1) {
|
2016-01-14 18:09:38 +03:00
|
|
|
error_report("buffered evtchn bind error %d", errno);
|
|
|
|
goto err;
|
2012-04-13 21:46:01 +04:00
|
|
|
}
|
|
|
|
state->bufioreq_local_port = rc;
|
|
|
|
|
2010-08-31 19:41:25 +04:00
|
|
|
/* Init RAM management */
|
2017-07-11 01:40:03 +03:00
|
|
|
#ifdef XEN_COMPAT_PHYSMAP
|
2012-01-18 16:21:38 +04:00
|
|
|
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
|
2017-07-11 01:40:03 +03:00
|
|
|
#else
|
|
|
|
xen_map_cache_init(NULL, state);
|
|
|
|
#endif
|
2020-10-28 13:22:23 +03:00
|
|
|
xen_ram_init(pcms, ms->ram_size, ram_memory);
|
2010-08-31 19:41:25 +04:00
|
|
|
|
2011-07-15 04:33:42 +04:00
|
|
|
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2011-12-19 14:07:50 +04:00
|
|
|
state->memory_listener = xen_memory_listener;
|
2012-10-02 22:13:51 +04:00
|
|
|
memory_listener_register(&state->memory_listener, &address_space_memory);
|
2011-05-24 17:34:21 +04:00
|
|
|
state->log_for_dirtybit = NULL;
|
|
|
|
|
2015-01-20 14:06:19 +03:00
|
|
|
state->io_listener = xen_io_listener;
|
|
|
|
memory_listener_register(&state->io_listener, &address_space_io);
|
|
|
|
|
|
|
|
state->device_listener = xen_device_listener;
|
2018-05-31 21:01:13 +03:00
|
|
|
QLIST_INIT(&state->dev_list);
|
2015-01-20 14:06:19 +03:00
|
|
|
device_listener_register(&state->device_listener);
|
|
|
|
|
2019-01-08 17:48:47 +03:00
|
|
|
xen_bus_init();
|
|
|
|
|
2011-06-24 18:54:48 +04:00
|
|
|
/* Initialize backend core & drivers */
|
|
|
|
if (xen_be_init() != 0) {
|
2016-01-14 18:09:38 +03:00
|
|
|
error_report("xen backend core setup failed");
|
|
|
|
goto err;
|
2011-06-24 18:54:48 +04:00
|
|
|
}
|
2016-08-02 09:32:32 +03:00
|
|
|
xen_be_register_common();
|
2018-04-25 16:46:47 +03:00
|
|
|
|
|
|
|
QLIST_INIT(&xen_physmap);
|
2012-01-19 19:56:11 +04:00
|
|
|
xen_read_physmap(state);
|
2016-11-08 17:07:22 +03:00
|
|
|
|
|
|
|
/* Disable ACPI build because Xen handles it */
|
|
|
|
pcms->acpi_build_enabled = false;
|
|
|
|
|
2016-01-14 18:09:38 +03:00
|
|
|
return;
|
2011-06-24 18:54:48 +04:00
|
|
|
|
2016-01-14 18:09:38 +03:00
|
|
|
err:
|
|
|
|
error_report("xen hardware virtual machine initialisation failed");
|
|
|
|
exit(1);
|
2010-06-30 15:58:34 +04:00
|
|
|
}
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2012-05-17 14:33:09 +04:00
|
|
|
void destroy_hvm_domain(bool reboot)
|
2010-09-06 23:07:14 +04:00
|
|
|
{
|
2016-02-10 14:07:03 +03:00
|
|
|
xc_interface *xc_handle;
|
2010-09-06 23:07:14 +04:00
|
|
|
int sts;
|
2017-09-15 19:51:52 +03:00
|
|
|
int rc;
|
2010-09-06 23:07:14 +04:00
|
|
|
|
2017-09-15 19:50:47 +03:00
|
|
|
unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
|
|
|
|
|
2017-09-15 19:51:52 +03:00
|
|
|
if (xen_dmod) {
|
|
|
|
rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
|
|
|
|
if (!rc) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (errno != ENOTTY /* old Xen */) {
|
|
|
|
perror("xendevicemodel_shutdown failed");
|
|
|
|
}
|
|
|
|
/* well, try the old thing then */
|
|
|
|
}
|
|
|
|
|
2016-02-10 14:07:03 +03:00
|
|
|
xc_handle = xc_interface_open(0, 0, 0);
|
|
|
|
if (xc_handle == NULL) {
|
2010-09-06 23:07:14 +04:00
|
|
|
fprintf(stderr, "Cannot acquire xenctrl handle\n");
|
|
|
|
} else {
|
2017-09-15 19:50:47 +03:00
|
|
|
sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
|
2010-09-06 23:07:14 +04:00
|
|
|
if (sts != 0) {
|
2012-05-17 14:33:09 +04:00
|
|
|
fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
|
|
|
|
"sts %d, %s\n", reboot ? "reboot" : "poweroff",
|
|
|
|
sts, strerror(errno));
|
2010-09-06 23:07:14 +04:00
|
|
|
} else {
|
2012-05-17 14:33:09 +04:00
|
|
|
fprintf(stderr, "Issued domain %d %s\n", xen_domid,
|
|
|
|
reboot ? "reboot" : "poweroff");
|
2010-09-06 23:07:14 +04:00
|
|
|
}
|
|
|
|
xc_interface_close(xc_handle);
|
|
|
|
}
|
|
|
|
}
|
2011-12-18 18:40:50 +04:00
|
|
|
|
|
|
|
void xen_register_framebuffer(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
framebuffer = mr;
|
|
|
|
}
|
2012-06-21 19:40:09 +04:00
|
|
|
|
|
|
|
void xen_shutdown_fatal_error(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vfprintf(stderr, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
fprintf(stderr, "Will destroy the domain.\n");
|
|
|
|
/* destroy the domain */
|
2017-05-16 00:41:13 +03:00
|
|
|
qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
|
2012-06-21 19:40:09 +04:00
|
|
|
}
|
2012-10-03 17:48:45 +04:00
|
|
|
|
2017-03-07 13:55:31 +03:00
|
|
|
void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
|
2012-10-03 17:48:45 +04:00
|
|
|
{
|
|
|
|
if (unlikely(xen_in_migration)) {
|
|
|
|
int rc;
|
|
|
|
ram_addr_t start_pfn, nb_pages;
|
|
|
|
|
2018-04-25 16:46:47 +03:00
|
|
|
start = xen_phys_offset_to_gaddr(start, length);
|
|
|
|
|
2012-10-03 17:48:45 +04:00
|
|
|
if (length == 0) {
|
|
|
|
length = TARGET_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
start_pfn = start >> TARGET_PAGE_BITS;
|
|
|
|
nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
|
|
|
|
- start_pfn;
|
2017-03-07 13:55:32 +03:00
|
|
|
rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
|
2012-10-03 17:48:45 +04:00
|
|
|
if (rc) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
|
2017-10-11 18:52:03 +03:00
|
|
|
__func__, start, nb_pages, errno, strerror(errno));
|
2012-10-03 17:48:45 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-07 20:16:43 +04:00
|
|
|
|
|
|
|
void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
|
|
|
|
{
|
|
|
|
if (enable) {
|
2021-06-29 19:01:19 +03:00
|
|
|
memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
|
2014-05-07 20:16:43 +04:00
|
|
|
} else {
|
2021-06-29 19:01:19 +03:00
|
|
|
memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
|
2014-05-07 20:16:43 +04:00
|
|
|
}
|
|
|
|
}
|