Merge remote-tracking branch 'qemu-kvm/uq/master' into staging

* qemu-kvm/uq/master:
  kvm: Rename irqchip_inject_ioctl to irq_set_ioctl
  kvm: Stop flushing coalesced MMIO on vmexit
  VGA: Flush coalesced MMIO on related MMIO/PIO accesses
  memory: Flush coalesced MMIO on mapping and state changes
  memory: Fold memory_region_update_topology into memory_region_transaction_commit
  memory: Use transaction_begin/commit also for single-step operations
  memory: Flush coalesced MMIO on selected region access
  kvm-all.c: Move init of irqchip_inject_ioctl out of kvm_irqchip_create()
  update-linux-headers.sh: Don't hard code list of architectures
This commit is contained in:
Anthony Liguori 2012-09-17 10:20:27 -05:00
commit 31e165f177
9 changed files with 125 additions and 53 deletions

View File

@ -2441,6 +2441,8 @@ static uint32_t cirrus_vga_ioport_read(void *opaque, uint32_t addr)
VGACommonState *s = &c->vga;
int val, index;
qemu_flush_coalesced_mmio_buffer();
if (vga_ioport_invalid(s, addr)) {
val = 0xff;
} else {
@ -2534,6 +2536,8 @@ static void cirrus_vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
VGACommonState *s = &c->vga;
int index;
qemu_flush_coalesced_mmio_buffer();
/* check port range access depending on color/monochrome mode */
if (vga_ioport_invalid(s, addr)) {
return;
@ -2854,6 +2858,7 @@ static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci,
/* I/O handler for LFB */
memory_region_init_io(&s->cirrus_linear_io, &cirrus_linear_io_ops, s,
"cirrus-linear-io", VGA_RAM_SIZE);
memory_region_set_flush_coalesced(&s->cirrus_linear_io);
/* I/O handler for LFB */
memory_region_init_io(&s->cirrus_linear_bitblt_io,
@ -2861,10 +2866,12 @@ static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci,
s,
"cirrus-bitblt-mmio",
0x400000);
memory_region_set_flush_coalesced(&s->cirrus_linear_bitblt_io);
/* I/O handler for memory-mapped I/O */
memory_region_init_io(&s->cirrus_mmio_io, &cirrus_mmio_io_ops, s,
"cirrus-mmio", CIRRUS_PNPMMIO_SIZE);
memory_region_set_flush_coalesced(&s->cirrus_mmio_io);
s->real_vram_size =
(s->device_id == CIRRUS_ID_CLGD5446) ? 4096 * 1024 : 2048 * 1024;

View File

@ -1910,6 +1910,7 @@ static int qxl_init_common(PCIQXLDevice *qxl)
if (qxl->id == 0) {
vga_dirty_log_start(&qxl->vga);
}
memory_region_set_flush_coalesced(&qxl->io_bar);
pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX,

View File

@ -107,6 +107,7 @@ static void vga_mm_init(ISAVGAMMState *s, target_phys_addr_t vram_base,
s_ioport_ctrl = g_malloc(sizeof(*s_ioport_ctrl));
memory_region_init_io(s_ioport_ctrl, &vga_mm_ctrl_ops, s,
"vga-mm-ctrl", 0x100000);
memory_region_set_flush_coalesced(s_ioport_ctrl);
vga_io_memory = g_malloc(sizeof(*vga_io_memory));
/* XXX: endianness? */

View File

@ -361,6 +361,8 @@ uint32_t vga_ioport_read(void *opaque, uint32_t addr)
VGACommonState *s = opaque;
int val, index;
qemu_flush_coalesced_mmio_buffer();
if (vga_ioport_invalid(s, addr)) {
val = 0xff;
} else {
@ -453,6 +455,8 @@ void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val)
VGACommonState *s = opaque;
int index;
qemu_flush_coalesced_mmio_buffer();
/* check port range access depending on color/monochrome mode */
if (vga_ioport_invalid(s, addr)) {
return;
@ -2338,6 +2342,7 @@ MemoryRegion *vga_init_io(VGACommonState *s,
vga_mem = g_malloc(sizeof(*vga_mem));
memory_region_init_io(vga_mem, &vga_mem_ops, s,
"vga-lowmem", 0x20000);
memory_region_set_flush_coalesced(vga_mem);
return vga_mem;
}

View File

@ -1186,6 +1186,7 @@ static int pci_vmsvga_initfn(PCIDevice *dev)
memory_region_init_io(&s->io_bar, &vmsvga_io_ops, &s->chip,
"vmsvga-io", 0x10);
memory_region_set_flush_coalesced(&s->io_bar);
pci_register_bar(&s->card, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar);
vmsvga_init(&s->chip, pci_address_space(dev),

View File

@ -92,7 +92,7 @@ struct KVMState
/* The man page (and posix) say ioctl numbers are signed int, but
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
unsigned irqchip_inject_ioctl;
unsigned irq_set_ioctl;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
@ -870,13 +870,13 @@ int kvm_set_irq(KVMState *s, int irq, int level)
event.level = level;
event.irq = irq;
ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
if (ret < 0) {
perror("kvm_set_irq");
abort();
}
return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
}
#ifdef KVM_CAP_IRQ_ROUTING
@ -1237,10 +1237,6 @@ static int kvm_irqchip_create(KVMState *s)
return ret;
}
s->irqchip_inject_ioctl = KVM_IRQ_LINE;
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
}
kvm_kernel_irqchip = true;
/* If we have an in-kernel IRQ chip then we must have asynchronous
* interrupt delivery (though the reverse is not necessarily true)
@ -1389,6 +1385,11 @@ int kvm_init(void)
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
s->irq_set_ioctl = KVM_IRQ_LINE;
if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
}
ret = kvm_arch_init(s);
if (ret < 0) {
goto err;
@ -1576,8 +1577,6 @@ int kvm_cpu_exec(CPUArchState *env)
qemu_mutex_lock_iothread();
kvm_arch_post_run(env, run);
kvm_flush_coalesced_mmio_buffer();
if (run_ret < 0) {
if (run_ret == -EINTR || run_ret == -EAGAIN) {
DPRINTF("io window exit\n");

104
memory.c
View File

@ -24,7 +24,6 @@
#include "exec-obsolete.h"
unsigned memory_region_transaction_depth = 0;
static bool memory_region_update_pending = false;
static bool global_dirty_log = false;
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
@ -311,6 +310,9 @@ static void memory_region_read_accessor(void *opaque,
MemoryRegion *mr = opaque;
uint64_t tmp;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
tmp = mr->ops->read(mr->opaque, addr, size);
*value |= (tmp & mask) << shift;
}
@ -325,6 +327,9 @@ static void memory_region_write_accessor(void *opaque,
MemoryRegion *mr = opaque;
uint64_t tmp;
if (mr->flush_coalesced_mmio) {
qemu_flush_coalesced_mmio_buffer();
}
tmp = (*value >> shift) & mask;
mr->ops->write(mr->opaque, addr, tmp, size);
}
@ -726,33 +731,9 @@ static void address_space_update_topology(AddressSpace *as)
address_space_update_ioeventfds(as);
}
static void memory_region_update_topology(MemoryRegion *mr)
{
if (memory_region_transaction_depth) {
memory_region_update_pending |= !mr || mr->enabled;
return;
}
if (mr && !mr->enabled) {
return;
}
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
if (address_space_memory.root) {
address_space_update_topology(&address_space_memory);
}
if (address_space_io.root) {
address_space_update_topology(&address_space_io);
}
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
memory_region_update_pending = false;
}
void memory_region_transaction_begin(void)
{
qemu_flush_coalesced_mmio_buffer();
++memory_region_transaction_depth;
}
@ -760,8 +741,17 @@ void memory_region_transaction_commit(void)
{
assert(memory_region_transaction_depth);
--memory_region_transaction_depth;
if (!memory_region_transaction_depth && memory_region_update_pending) {
memory_region_update_topology(NULL);
if (!memory_region_transaction_depth) {
MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
if (address_space_memory.root) {
address_space_update_topology(&address_space_memory);
}
if (address_space_io.root) {
address_space_update_topology(&address_space_io);
}
MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
}
}
@ -826,6 +816,7 @@ void memory_region_init(MemoryRegion *mr,
mr->dirty_log_mask = 0;
mr->ioeventfd_nb = 0;
mr->ioeventfds = NULL;
mr->flush_coalesced_mmio = false;
}
static bool memory_region_access_valid(MemoryRegion *mr,
@ -1069,8 +1060,9 @@ void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
uint8_t mask = 1 << client;
memory_region_transaction_begin();
mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
@ -1103,16 +1095,18 @@ void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
if (mr->readonly != readonly) {
memory_region_transaction_begin();
mr->readonly = readonly;
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
}
void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
{
if (mr->readable != readable) {
memory_region_transaction_begin();
mr->readable = readable;
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
}
@ -1176,12 +1170,16 @@ void memory_region_add_coalescing(MemoryRegion *mr,
cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
memory_region_update_coalesced_range(mr);
memory_region_set_flush_coalesced(mr);
}
void memory_region_clear_coalescing(MemoryRegion *mr)
{
CoalescedMemoryRange *cmr;
qemu_flush_coalesced_mmio_buffer();
mr->flush_coalesced_mmio = false;
while (!QTAILQ_EMPTY(&mr->coalesced)) {
cmr = QTAILQ_FIRST(&mr->coalesced);
QTAILQ_REMOVE(&mr->coalesced, cmr, link);
@ -1190,6 +1188,19 @@ void memory_region_clear_coalescing(MemoryRegion *mr)
memory_region_update_coalesced_range(mr);
}
void memory_region_set_flush_coalesced(MemoryRegion *mr)
{
mr->flush_coalesced_mmio = true;
}
void memory_region_clear_flush_coalesced(MemoryRegion *mr)
{
qemu_flush_coalesced_mmio_buffer();
if (QTAILQ_EMPTY(&mr->coalesced)) {
mr->flush_coalesced_mmio = false;
}
}
void memory_region_add_eventfd(MemoryRegion *mr,
target_phys_addr_t addr,
unsigned size,
@ -1206,6 +1217,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
};
unsigned i;
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
break;
@ -1217,7 +1229,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
mr->ioeventfds[i] = mrfd;
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
void memory_region_del_eventfd(MemoryRegion *mr,
@ -1236,6 +1248,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
};
unsigned i;
memory_region_transaction_begin();
for (i = 0; i < mr->ioeventfd_nb; ++i) {
if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
break;
@ -1247,7 +1260,7 @@ void memory_region_del_eventfd(MemoryRegion *mr,
--mr->ioeventfd_nb;
mr->ioeventfds = g_realloc(mr->ioeventfds,
sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
static void memory_region_add_subregion_common(MemoryRegion *mr,
@ -1256,6 +1269,8 @@ static void memory_region_add_subregion_common(MemoryRegion *mr,
{
MemoryRegion *other;
memory_region_transaction_begin();
assert(!subregion->parent);
subregion->parent = mr;
subregion->addr = offset;
@ -1288,7 +1303,7 @@ static void memory_region_add_subregion_common(MemoryRegion *mr,
}
QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
done:
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
@ -1314,10 +1329,11 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
void memory_region_del_subregion(MemoryRegion *mr,
MemoryRegion *subregion)
{
memory_region_transaction_begin();
assert(subregion->parent == mr);
subregion->parent = NULL;
QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
memory_region_update_topology(mr);
memory_region_transaction_commit();
}
void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
@ -1325,8 +1341,9 @@ void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
if (enabled == mr->enabled) {
return;
}
memory_region_transaction_begin();
mr->enabled = enabled;
memory_region_update_topology(NULL);
memory_region_transaction_commit();
}
void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
@ -1352,16 +1369,15 @@ void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
{
target_phys_addr_t old_offset = mr->alias_offset;
assert(mr->alias);
mr->alias_offset = offset;
if (offset == old_offset || !mr->parent) {
if (offset == mr->alias_offset) {
return;
}
memory_region_update_topology(mr);
memory_region_transaction_begin();
mr->alias_offset = offset;
memory_region_transaction_commit();
}
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
@ -1493,14 +1509,16 @@ void memory_listener_unregister(MemoryListener *listener)
void set_system_memory_map(MemoryRegion *mr)
{
memory_region_transaction_begin();
address_space_memory.root = mr;
memory_region_update_topology(NULL);
memory_region_transaction_commit();
}
void set_system_io_map(MemoryRegion *mr)
{
memory_region_transaction_begin();
address_space_io.root = mr;
memory_region_update_topology(NULL);
memory_region_transaction_commit();
}
uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)

View File

@ -133,6 +133,7 @@ struct MemoryRegion {
bool enabled;
bool rom_device;
bool warning_printed; /* For reservations */
bool flush_coalesced_mmio;
MemoryRegion *alias;
target_phys_addr_t alias_offset;
unsigned priority;
@ -520,6 +521,31 @@ void memory_region_add_coalescing(MemoryRegion *mr,
*/
void memory_region_clear_coalescing(MemoryRegion *mr);
/**
* memory_region_set_flush_coalesced: Enforce memory coalescing flush before
* accesses.
*
* Ensure that pending coalesced MMIO request are flushed before the memory
* region is accessed. This property is automatically enabled for all regions
* passed to memory_region_set_coalescing() and memory_region_add_coalescing().
*
* @mr: the memory region to be updated.
*/
void memory_region_set_flush_coalesced(MemoryRegion *mr);
/**
* memory_region_clear_flush_coalesced: Disable memory coalescing flush before
* accesses.
*
* Clear the automatic coalesced MMIO flushing enabled via
* memory_region_set_flush_coalesced. Note that this service has no effect on
* memory regions that have MMIO coalescing enabled for themselves. For them,
* automatic flushing will stop once coalescing is disabled.
*
* @mr: the memory region to be updated.
*/
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
/**
* memory_region_add_eventfd: Request an eventfd to be triggered when a word
* is written to a location.

View File

@ -28,7 +28,21 @@ if [ -z "$output" ]; then
output="$PWD"
fi
for arch in x86 powerpc s390; do
# This will pick up non-directories too (eg "Kconfig") but we will
# ignore them in the next loop.
ARCHLIST=$(cd "$linux/arch" && echo *)
for arch in $ARCHLIST; do
# Discard anything which isn't a KVM-supporting architecture
if ! [ -e "$linux/arch/$arch/include/asm/kvm.h" ]; then
continue
fi
# Blacklist architectures which have KVM headers but are actually dead
if [ "$arch" = "ia64" ]; then
continue
fi
make -C "$linux" INSTALL_HDR_PATH="$tmpdir" SRCARCH=$arch headers_install
rm -rf "$output/linux-headers/asm-$arch"