Merge remote-tracking branch 'qemu-kvm/memory/page_desc' into staging

* qemu-kvm/memory/page_desc: (22 commits)
  Remove cpu_get_physical_page_desc()
  sparc: avoid cpu_get_physical_page_desc()
  virtio-balloon: avoid cpu_get_physical_page_desc()
  vhost: avoid cpu_get_physical_page_desc()
  kvm: avoid cpu_get_physical_page_desc()
  memory: remove CPUPhysMemoryClient
  xen: convert to MemoryListener API
  memory: temporarily add memory_region_get_ram_addr()
  xen, vga: add API for registering the framebuffer
  vhost: convert to MemoryListener API
  kvm: convert to MemoryListener API
  kvm: switch kvm slots to use host virtual address instead of ram_addr_t
  memory: add API for observing updates to the physical memory map
  memory: replace cpu_physical_sync_dirty_bitmap() with a memory API
  framebuffer: drop use of cpu_physical_sync_dirty_bitmap()
  loader: remove calls to cpu_get_physical_page_desc()
  framebuffer: drop use of cpu_get_physical_page_desc()
  memory: introduce memory_region_find()
  memory: add memory_region_is_logging()
  memory: add memory_region_is_rom()
  ...
This commit is contained in:
Anthony Liguori 2012-01-03 14:39:05 -06:00
commit f3c6a169a3
27 changed files with 702 additions and 414 deletions

View File

@ -41,6 +41,7 @@
#include "net.h" #include "net.h"
#include "gdbstub.h" #include "gdbstub.h"
#include "hw/smbios.h" #include "hw/smbios.h"
#include "exec-memory.h"
#ifdef TARGET_SPARC #ifdef TARGET_SPARC
int graphic_width = 1024; int graphic_width = 1024;
@ -263,10 +264,7 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
return 0; return 0;
} }
if (cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX) != 0) { memory_global_sync_dirty_bitmap(get_system_memory());
qemu_file_set_error(f, -EINVAL);
return -EINVAL;
}
if (stage == 1) { if (stage == 1) {
RAMBlock *block; RAMBlock *block;

View File

@ -569,15 +569,6 @@ int cpu_physical_memory_set_dirty_tracking(int enable);
int cpu_physical_memory_get_dirty_tracking(void); int cpu_physical_memory_get_dirty_tracking(void);
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
target_phys_addr_t end_addr);
int cpu_physical_log_start(target_phys_addr_t start_addr,
ram_addr_t size);
int cpu_physical_log_stop(target_phys_addr_t start_addr,
ram_addr_t size);
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */

View File

@ -38,7 +38,6 @@ typedef unsigned long ram_addr_t;
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should only be used for ram local to a device. */ /* This should only be used for ram local to a device. */
void *qemu_get_ram_ptr(ram_addr_t addr); void *qemu_get_ram_ptr(ram_addr_t addr);
@ -71,29 +70,6 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
void cpu_unregister_map_client(void *cookie); void cpu_unregister_map_client(void *cookie);
struct CPUPhysMemoryClient;
typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
struct CPUPhysMemoryClient {
void (*set_memory)(struct CPUPhysMemoryClient *client,
target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
bool log_dirty);
int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
target_phys_addr_t start_addr,
target_phys_addr_t end_addr);
int (*migration_log)(struct CPUPhysMemoryClient *client,
int enable);
int (*log_start)(struct CPUPhysMemoryClient *client,
target_phys_addr_t phys_addr, ram_addr_t size);
int (*log_stop)(struct CPUPhysMemoryClient *client,
target_phys_addr_t phys_addr, ram_addr_t size);
QLIST_ENTRY(CPUPhysMemoryClient) list;
};
void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
/* Coalesced MMIO regions are areas where write operations can be reordered. /* Coalesced MMIO regions are areas where write operations can be reordered.
* This usually implies that write operations are side-effect free. This allows * This usually implies that write operations are side-effect free. This allows
* batching which can make a major impact on performance when using * batching which can make a major impact on performance when using

175
exec.c
View File

@ -1732,124 +1732,6 @@ const CPULogItem cpu_log_items[] = {
{ 0, NULL, NULL }, { 0, NULL, NULL },
}; };
#ifndef CONFIG_USER_ONLY
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
= QLIST_HEAD_INITIALIZER(memory_client_list);
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
bool log_dirty)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
client->set_memory(client, start_addr, size, phys_offset, log_dirty);
}
}
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
target_phys_addr_t end)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
int r = client->sync_dirty_bitmap(client, start, end);
if (r < 0)
return r;
}
return 0;
}
static int cpu_notify_migration_log(int enable)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
int r = client->migration_log(client, enable);
if (r < 0)
return r;
}
return 0;
}
struct last_map {
target_phys_addr_t start_addr;
ram_addr_t size;
ram_addr_t phys_offset;
};
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
* address. Each intermediate table provides the next L2_BITs of guest
* physical address space. The number of levels vary based on host and
* guest configuration, making it efficient to build the final guest
* physical address by seeding the L1 offset and shifting and adding in
* each L2 offset as we recurse through them. */
static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
void **lp, target_phys_addr_t addr,
struct last_map *map)
{
int i;
if (*lp == NULL) {
return;
}
if (level == 0) {
PhysPageDesc *pd = *lp;
addr <<= L2_BITS + TARGET_PAGE_BITS;
for (i = 0; i < L2_SIZE; ++i) {
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
if (map->size &&
start_addr == map->start_addr + map->size &&
pd[i].phys_offset == map->phys_offset + map->size) {
map->size += TARGET_PAGE_SIZE;
continue;
} else if (map->size) {
client->set_memory(client, map->start_addr,
map->size, map->phys_offset, false);
}
map->start_addr = start_addr;
map->size = TARGET_PAGE_SIZE;
map->phys_offset = pd[i].phys_offset;
}
}
} else {
void **pp = *lp;
for (i = 0; i < L2_SIZE; ++i) {
phys_page_for_each_1(client, level - 1, pp + i,
(addr << L2_BITS) | i, map);
}
}
}
static void phys_page_for_each(CPUPhysMemoryClient *client)
{
int i;
struct last_map map = { };
for (i = 0; i < P_L1_SIZE; ++i) {
phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
l1_phys_map + i, i, &map);
}
if (map.size) {
client->set_memory(client, map.start_addr, map.size, map.phys_offset,
false);
}
}
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
{
QLIST_INSERT_HEAD(&memory_client_list, client, list);
phys_page_for_each(client);
}
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
{
QLIST_REMOVE(client, list);
}
#endif
static int cmp1(const char *s1, int n, const char *s2) static int cmp1(const char *s1, int n, const char *s2)
{ {
if (strlen(s2) != n) if (strlen(s2) != n)
@ -2126,7 +2008,11 @@ int cpu_physical_memory_set_dirty_tracking(int enable)
{ {
int ret = 0; int ret = 0;
in_migration = enable; in_migration = enable;
ret = cpu_notify_migration_log(!!enable); if (enable) {
memory_global_dirty_log_start();
} else {
memory_global_dirty_log_stop();
}
return ret; return ret;
} }
@ -2135,45 +2021,6 @@ int cpu_physical_memory_get_dirty_tracking(void)
return in_migration; return in_migration;
} }
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{
int ret;
ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
return ret;
}
int cpu_physical_log_start(target_phys_addr_t start_addr,
ram_addr_t size)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
if (client->log_start) {
int r = client->log_start(client, start_addr, size);
if (r < 0) {
return r;
}
}
}
return 0;
}
int cpu_physical_log_stop(target_phys_addr_t start_addr,
ram_addr_t size)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
if (client->log_stop) {
int r = client->log_stop(client, start_addr, size);
if (r < 0) {
return r;
}
}
}
return 0;
}
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{ {
ram_addr_t ram_addr; ram_addr_t ram_addr;
@ -2676,7 +2523,6 @@ void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
subpage_t *subpage; subpage_t *subpage;
assert(size); assert(size);
cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
if (phys_offset == IO_MEM_UNASSIGNED) { if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset = start_addr; region_offset = start_addr;
@ -2749,17 +2595,6 @@ void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
} }
} }
/* XXX: temporary until new memory mapping API */
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
{
PhysPageDesc *p;
p = phys_page_find(addr >> TARGET_PAGE_BITS);
if (!p)
return IO_MEM_UNASSIGNED;
return p->phys_offset;
}
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
{ {
if (kvm_enabled()) if (kvm_enabled())

View File

@ -22,6 +22,7 @@
void framebuffer_update_display( void framebuffer_update_display(
DisplayState *ds, DisplayState *ds,
MemoryRegion *address_space,
target_phys_addr_t base, target_phys_addr_t base,
int cols, /* Width in pixels. */ int cols, /* Width in pixels. */
int rows, /* Leight in pixels. */ int rows, /* Leight in pixels. */
@ -42,28 +43,22 @@ void framebuffer_update_display(
int dirty; int dirty;
int i; int i;
ram_addr_t addr; ram_addr_t addr;
ram_addr_t pd; MemoryRegionSection mem_section;
ram_addr_t pd2; MemoryRegion *mem;
i = *first_row; i = *first_row;
*first_row = -1; *first_row = -1;
src_len = src_width * rows; src_len = src_width * rows;
cpu_physical_sync_dirty_bitmap(base, base + src_len); mem_section = memory_region_find(address_space, base, src_len);
pd = cpu_get_physical_page_desc(base); if (mem_section.size != src_len || !memory_region_is_ram(mem_section.mr)) {
pd2 = cpu_get_physical_page_desc(base + src_len - 1);
/* We should reall check that this is a continuous ram region.
Instead we just check that the first and last pages are
both ram, and the right distance apart. */
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM
|| (pd2 & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
return;
}
pd = (pd & TARGET_PAGE_MASK) + (base & ~TARGET_PAGE_MASK);
if (((pd + src_len - 1) & TARGET_PAGE_MASK) != (pd2 & TARGET_PAGE_MASK)) {
return; return;
} }
mem = mem_section.mr;
assert(mem);
assert(mem_section.offset_within_address_space == base);
memory_region_sync_dirty_bitmap(mem);
src_base = cpu_physical_memory_map(base, &src_len, 0); src_base = cpu_physical_memory_map(base, &src_len, 0);
/* If we can't map the framebuffer then bail. We could try harder, /* If we can't map the framebuffer then bail. We could try harder,
but it's not really worth it as dirty flag tracking will probably but it's not really worth it as dirty flag tracking will probably
@ -82,7 +77,7 @@ void framebuffer_update_display(
dest -= dest_row_pitch * (rows - 1); dest -= dest_row_pitch * (rows - 1);
} }
first = -1; first = -1;
addr = pd; addr = mem_section.offset_within_region;
addr += i * src_width; addr += i * src_width;
src += i * src_width; src += i * src_width;
@ -93,8 +88,8 @@ void framebuffer_update_display(
dirty = 0; dirty = 0;
dirty_offset = 0; dirty_offset = 0;
while (addr + dirty_offset < TARGET_PAGE_ALIGN(addr + src_width)) { while (addr + dirty_offset < TARGET_PAGE_ALIGN(addr + src_width)) {
dirty |= cpu_physical_memory_get_dirty(addr + dirty_offset, dirty |= memory_region_get_dirty(mem, addr + dirty_offset,
VGA_DIRTY_FLAG); DIRTY_MEMORY_VGA);
dirty_offset += TARGET_PAGE_SIZE; dirty_offset += TARGET_PAGE_SIZE;
} }
@ -112,7 +107,8 @@ void framebuffer_update_display(
if (first < 0) { if (first < 0) {
return; return;
} }
cpu_physical_memory_reset_dirty(pd, pd + src_len, VGA_DIRTY_FLAG); memory_region_reset_dirty(mem, mem_section.offset_within_region, src_len,
DIRTY_MEMORY_VGA);
*first_row = first; *first_row = first;
*last_row = last; *last_row = last;
return; return;

View File

@ -1,12 +1,15 @@
#ifndef QEMU_FRAMEBUFFER_H #ifndef QEMU_FRAMEBUFFER_H
#define QEMU_FRAMEBUFFER_H #define QEMU_FRAMEBUFFER_H
#include "memory.h"
/* Framebuffer device helper routines. */ /* Framebuffer device helper routines. */
typedef void (*drawfn)(void *, uint8_t *, const uint8_t *, int, int); typedef void (*drawfn)(void *, uint8_t *, const uint8_t *, int, int);
void framebuffer_update_display( void framebuffer_update_display(
DisplayState *ds, DisplayState *ds,
MemoryRegion *address_space,
target_phys_addr_t base, target_phys_addr_t base,
int cols, int cols,
int rows, int rows,

View File

@ -49,6 +49,8 @@
#include "uboot_image.h" #include "uboot_image.h"
#include "loader.h" #include "loader.h"
#include "fw_cfg.h" #include "fw_cfg.h"
#include "memory.h"
#include "exec-memory.h"
#include <zlib.h> #include <zlib.h>
@ -674,7 +676,7 @@ static void rom_reset(void *unused)
int rom_load_all(void) int rom_load_all(void)
{ {
target_phys_addr_t addr = 0; target_phys_addr_t addr = 0;
int memtype; MemoryRegionSection section;
Rom *rom; Rom *rom;
QTAILQ_FOREACH(rom, &roms, next) { QTAILQ_FOREACH(rom, &roms, next) {
@ -690,9 +692,8 @@ int rom_load_all(void)
} }
addr = rom->addr; addr = rom->addr;
addr += rom->romsize; addr += rom->romsize;
memtype = cpu_get_physical_page_desc(rom->addr) & (3 << IO_MEM_SHIFT); section = memory_region_find(get_system_memory(), rom->addr, 1);
if (memtype == IO_MEM_ROM) rom->isrom = section.size && memory_region_is_rom(section.mr);
rom->isrom = 1;
} }
qemu_register_reset(rom_reset, NULL); qemu_register_reset(rom_reset, NULL);
roms_loaded = 1; roms_loaded = 1;

View File

@ -120,7 +120,7 @@ static void vgafb_update_display(void *opaque)
break; break;
} }
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, sysbus_address_space(&s->busdev),
s->regs[R_BASEADDRESS] + s->fb_offset, s->regs[R_BASEADDRESS] + s->fb_offset,
s->regs[R_HRES], s->regs[R_HRES],
s->regs[R_VRES], s->regs[R_VRES],

View File

@ -22,6 +22,7 @@
#include "framebuffer.h" #include "framebuffer.h"
struct omap_lcd_panel_s { struct omap_lcd_panel_s {
MemoryRegion *sysmem;
MemoryRegion iomem; MemoryRegion iomem;
qemu_irq irq; qemu_irq irq;
DisplayState *state; DisplayState *state;
@ -211,7 +212,7 @@ static void omap_update_display(void *opaque)
step = width * bpp >> 3; step = width * bpp >> 3;
linesize = ds_get_linesize(omap_lcd->state); linesize = ds_get_linesize(omap_lcd->state);
framebuffer_update_display(omap_lcd->state, framebuffer_update_display(omap_lcd->state, omap_lcd->sysmem,
frame_base, width, height, frame_base, width, height,
step, linesize, 0, step, linesize, 0,
omap_lcd->invalidate, omap_lcd->invalidate,
@ -440,6 +441,7 @@ struct omap_lcd_panel_s *omap_lcdc_init(MemoryRegion *sysmem,
s->irq = irq; s->irq = irq;
s->dma = dma; s->dma = dma;
s->sysmem = sysmem;
omap_lcdc_reset(s); omap_lcdc_reset(s);
memory_region_init_io(&s->iomem, &omap_lcdc_ops, s, "omap.lcdc", 0x100); memory_region_init_io(&s->iomem, &omap_lcdc_ops, s, "omap.lcdc", 0x100);

View File

@ -229,7 +229,7 @@ static void pl110_update_display(void *opaque)
} }
dest_width *= s->cols; dest_width *= s->cols;
first = 0; first = 0;
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, sysbus_address_space(&s->busdev),
s->upbase, s->cols, s->rows, s->upbase, s->cols, s->rows,
src_width, dest_width, 0, src_width, dest_width, 0,
s->invalidate, s->invalidate,

View File

@ -30,6 +30,7 @@ struct DMAChannel {
}; };
struct PXA2xxLCDState { struct PXA2xxLCDState {
MemoryRegion *sysmem;
MemoryRegion iomem; MemoryRegion iomem;
qemu_irq irq; qemu_irq irq;
int irqlevel; int irqlevel;
@ -681,7 +682,7 @@ static void pxa2xx_lcdc_dma0_redraw_rot0(PXA2xxLCDState *s,
dest_width = s->xres * s->dest_width; dest_width = s->xres * s->dest_width;
*miny = 0; *miny = 0;
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, s->sysmem,
addr, s->xres, s->yres, addr, s->xres, s->yres,
src_width, dest_width, s->dest_width, src_width, dest_width, s->dest_width,
s->invalidated, s->invalidated,
@ -708,7 +709,7 @@ static void pxa2xx_lcdc_dma0_redraw_rot90(PXA2xxLCDState *s,
dest_width = s->yres * s->dest_width; dest_width = s->yres * s->dest_width;
*miny = 0; *miny = 0;
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, s->sysmem,
addr, s->xres, s->yres, addr, s->xres, s->yres,
src_width, s->dest_width, -dest_width, src_width, s->dest_width, -dest_width,
s->invalidated, s->invalidated,
@ -739,7 +740,7 @@ static void pxa2xx_lcdc_dma0_redraw_rot180(PXA2xxLCDState *s,
dest_width = s->xres * s->dest_width; dest_width = s->xres * s->dest_width;
*miny = 0; *miny = 0;
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, s->sysmem,
addr, s->xres, s->yres, addr, s->xres, s->yres,
src_width, -dest_width, -s->dest_width, src_width, -dest_width, -s->dest_width,
s->invalidated, s->invalidated,
@ -769,7 +770,7 @@ static void pxa2xx_lcdc_dma0_redraw_rot270(PXA2xxLCDState *s,
dest_width = s->yres * s->dest_width; dest_width = s->yres * s->dest_width;
*miny = 0; *miny = 0;
framebuffer_update_display(s->ds, framebuffer_update_display(s->ds, s->sysmem,
addr, s->xres, s->yres, addr, s->xres, s->yres,
src_width, -s->dest_width, dest_width, src_width, -s->dest_width, dest_width,
s->invalidated, s->invalidated,
@ -985,6 +986,7 @@ PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem,
s = (PXA2xxLCDState *) g_malloc0(sizeof(PXA2xxLCDState)); s = (PXA2xxLCDState *) g_malloc0(sizeof(PXA2xxLCDState));
s->invalidated = 1; s->invalidated = 1;
s->irq = irq; s->irq = irq;
s->sysmem = sysmem;
pxa2xx_lcdc_orientation(s, graphic_rotate); pxa2xx_lcdc_orientation(s, graphic_rotate);

View File

@ -253,3 +253,8 @@ void sysbus_del_io(SysBusDevice *dev, MemoryRegion *mem)
{ {
memory_region_del_subregion(get_system_io(), mem); memory_region_del_subregion(get_system_io(), mem);
} }
MemoryRegion *sysbus_address_space(SysBusDevice *dev)
{
return get_system_memory();
}

View File

@ -57,6 +57,7 @@ void sysbus_del_memory(SysBusDevice *dev, MemoryRegion *mem);
void sysbus_add_io(SysBusDevice *dev, target_phys_addr_t addr, void sysbus_add_io(SysBusDevice *dev, target_phys_addr_t addr,
MemoryRegion *mem); MemoryRegion *mem);
void sysbus_del_io(SysBusDevice *dev, MemoryRegion *mem); void sysbus_del_io(SysBusDevice *dev, MemoryRegion *mem);
MemoryRegion *sysbus_address_space(SysBusDevice *dev);
/* Legacy helper function for creating devices. */ /* Legacy helper function for creating devices. */
DeviceState *sysbus_create_varargs(const char *name, DeviceState *sysbus_create_varargs(const char *name,

View File

@ -28,6 +28,7 @@
#include "vga_int.h" #include "vga_int.h"
#include "pixel_ops.h" #include "pixel_ops.h"
#include "qemu-timer.h" #include "qemu-timer.h"
#include "xen.h"
//#define DEBUG_VGA //#define DEBUG_VGA
//#define DEBUG_VGA_MEM //#define DEBUG_VGA_MEM
@ -2222,6 +2223,7 @@ void vga_common_init(VGACommonState *s, int vga_ram_size)
s->is_vbe_vmstate = 0; s->is_vbe_vmstate = 0;
#endif #endif
memory_region_init_ram(&s->vram, NULL, "vga.vram", vga_ram_size); memory_region_init_ram(&s->vram, NULL, "vga.vram", vga_ram_size);
xen_register_framebuffer(&s->vram);
s->vram_ptr = memory_region_get_ram_ptr(&s->vram); s->vram_ptr = memory_region_get_ram_ptr(&s->vram);
s->vram_size = vga_ram_size; s->vram_size = vga_ram_size;
s->get_bpp = vga_get_bpp; s->get_bpp = vga_get_bpp;

View File

@ -17,6 +17,7 @@
#include <linux/vhost.h> #include <linux/vhost.h>
static void vhost_dev_sync_region(struct vhost_dev *dev, static void vhost_dev_sync_region(struct vhost_dev *dev,
MemoryRegionSection *section,
uint64_t mfirst, uint64_t mlast, uint64_t mfirst, uint64_t mlast,
uint64_t rfirst, uint64_t rlast) uint64_t rfirst, uint64_t rlast)
{ {
@ -49,38 +50,50 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
ffsll(log) : ffs(log))) { ffsll(log) : ffs(log))) {
ram_addr_t ram_addr; ram_addr_t ram_addr;
bit -= 1; bit -= 1;
ram_addr = cpu_get_physical_page_desc(addr + bit * VHOST_LOG_PAGE); ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE;
cpu_physical_memory_set_dirty(ram_addr); memory_region_set_dirty(section->mr, ram_addr);
log &= ~(0x1ull << bit); log &= ~(0x1ull << bit);
} }
addr += VHOST_LOG_CHUNK; addr += VHOST_LOG_CHUNK;
} }
} }
static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client, static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
target_phys_addr_t start_addr, MemoryRegionSection *section,
target_phys_addr_t end_addr) target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{ {
struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
int i; int i;
if (!dev->log_enabled || !dev->started) { if (!dev->log_enabled || !dev->started) {
return 0; return 0;
} }
for (i = 0; i < dev->mem->nregions; ++i) { for (i = 0; i < dev->mem->nregions; ++i) {
struct vhost_memory_region *reg = dev->mem->regions + i; struct vhost_memory_region *reg = dev->mem->regions + i;
vhost_dev_sync_region(dev, start_addr, end_addr, vhost_dev_sync_region(dev, section, start_addr, end_addr,
reg->guest_phys_addr, reg->guest_phys_addr,
range_get_last(reg->guest_phys_addr, range_get_last(reg->guest_phys_addr,
reg->memory_size)); reg->memory_size));
} }
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
struct vhost_virtqueue *vq = dev->vqs + i; struct vhost_virtqueue *vq = dev->vqs + i;
vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys, vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
range_get_last(vq->used_phys, vq->used_size)); range_get_last(vq->used_phys, vq->used_size));
} }
return 0; return 0;
} }
static void vhost_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
target_phys_addr_t start_addr = section->offset_within_address_space;
target_phys_addr_t end_addr = start_addr + section->size;
vhost_sync_dirty_bitmap(dev, section, start_addr, end_addr);
}
/* Assign/unassign. Keep an unsorted array of non-overlapping /* Assign/unassign. Keep an unsorted array of non-overlapping
* memory regions in dev->mem. */ * memory regions in dev->mem. */
static void vhost_dev_unassign_memory(struct vhost_dev *dev, static void vhost_dev_unassign_memory(struct vhost_dev *dev,
@ -250,7 +263,7 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
{ {
vhost_log_chunk_t *log; vhost_log_chunk_t *log;
uint64_t log_base; uint64_t log_base;
int r; int r, i;
if (size) { if (size) {
log = g_malloc0(size * sizeof *log); log = g_malloc0(size * sizeof *log);
} else { } else {
@ -259,8 +272,10 @@ static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
log_base = (uint64_t)(unsigned long)log; log_base = (uint64_t)(unsigned long)log;
r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base); r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
assert(r >= 0); assert(r >= 0);
vhost_client_sync_dirty_bitmap(&dev->client, 0, for (i = 0; i < dev->n_mem_sections; ++i) {
(target_phys_addr_t)~0x0ull); vhost_sync_dirty_bitmap(dev, &dev->mem_sections[i],
0, (target_phys_addr_t)~0x0ull);
}
if (dev->log) { if (dev->log) {
g_free(dev->log); g_free(dev->log);
} }
@ -335,31 +350,37 @@ static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
} }
static void vhost_client_set_memory(CPUPhysMemoryClient *client, static void vhost_set_memory(MemoryListener *listener,
target_phys_addr_t start_addr, MemoryRegionSection *section,
ram_addr_t size, bool add)
ram_addr_t phys_offset,
bool log_dirty)
{ {
struct vhost_dev *dev = container_of(client, struct vhost_dev, client); struct vhost_dev *dev = container_of(listener, struct vhost_dev,
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; memory_listener);
target_phys_addr_t start_addr = section->offset_within_address_space;
ram_addr_t size = section->size;
bool log_dirty = memory_region_is_logging(section->mr);
int s = offsetof(struct vhost_memory, regions) + int s = offsetof(struct vhost_memory, regions) +
(dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
uint64_t log_size; uint64_t log_size;
int r; int r;
void *ram;
if (!memory_region_is_ram(section->mr)) {
return;
}
dev->mem = g_realloc(dev->mem, s); dev->mem = g_realloc(dev->mem, s);
if (log_dirty) { if (log_dirty) {
flags = IO_MEM_UNASSIGNED; add = false;
} }
assert(size); assert(size);
/* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
if (flags == IO_MEM_RAM) { ram = memory_region_get_ram_ptr(section->mr);
if (!vhost_dev_cmp_memory(dev, start_addr, size, if (add) {
(uintptr_t)qemu_get_ram_ptr(phys_offset))) { if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
/* Region exists with same address. Nothing to do. */ /* Region exists with same address. Nothing to do. */
return; return;
} }
@ -371,10 +392,9 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client,
} }
vhost_dev_unassign_memory(dev, start_addr, size); vhost_dev_unassign_memory(dev, start_addr, size);
if (flags == IO_MEM_RAM) { if (add) {
/* Add given mapping, merging adjacent regions if any */ /* Add given mapping, merging adjacent regions if any */
vhost_dev_assign_memory(dev, start_addr, size, vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
(uintptr_t)qemu_get_ram_ptr(phys_offset));
} else { } else {
/* Remove old mapping for this memory, if any. */ /* Remove old mapping for this memory, if any. */
vhost_dev_unassign_memory(dev, start_addr, size); vhost_dev_unassign_memory(dev, start_addr, size);
@ -410,6 +430,38 @@ static void vhost_client_set_memory(CPUPhysMemoryClient *client,
} }
} }
static void vhost_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
++dev->n_mem_sections;
dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
dev->n_mem_sections);
dev->mem_sections[dev->n_mem_sections - 1] = *section;
vhost_set_memory(listener, section, true);
}
static void vhost_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
int i;
vhost_set_memory(listener, section, false);
for (i = 0; i < dev->n_mem_sections; ++i) {
if (dev->mem_sections[i].offset_within_address_space
== section->offset_within_address_space) {
--dev->n_mem_sections;
memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
dev->n_mem_sections - i);
break;
}
}
}
static int vhost_virtqueue_set_addr(struct vhost_dev *dev, static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
struct vhost_virtqueue *vq, struct vhost_virtqueue *vq,
unsigned idx, bool enable_log) unsigned idx, bool enable_log)
@ -467,10 +519,10 @@ err_features:
return r; return r;
} }
static int vhost_client_migration_log(CPUPhysMemoryClient *client, static int vhost_migration_log(MemoryListener *listener, int enable)
int enable)
{ {
struct vhost_dev *dev = container_of(client, struct vhost_dev, client); struct vhost_dev *dev = container_of(listener, struct vhost_dev,
memory_listener);
int r; int r;
if (!!enable == dev->log_enabled) { if (!!enable == dev->log_enabled) {
return 0; return 0;
@ -500,6 +552,38 @@ static int vhost_client_migration_log(CPUPhysMemoryClient *client,
return 0; return 0;
} }
static void vhost_log_global_start(MemoryListener *listener)
{
int r;
r = vhost_migration_log(listener, true);
if (r < 0) {
abort();
}
}
static void vhost_log_global_stop(MemoryListener *listener)
{
int r;
r = vhost_migration_log(listener, false);
if (r < 0) {
abort();
}
}
static void vhost_log_start(MemoryListener *listener,
MemoryRegionSection *section)
{
/* FIXME: implement */
}
static void vhost_log_stop(MemoryListener *listener,
MemoryRegionSection *section)
{
/* FIXME: implement */
}
static int vhost_virtqueue_init(struct vhost_dev *dev, static int vhost_virtqueue_init(struct vhost_dev *dev,
struct VirtIODevice *vdev, struct VirtIODevice *vdev,
struct vhost_virtqueue *vq, struct vhost_virtqueue *vq,
@ -645,17 +729,23 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, bool force)
} }
hdev->features = features; hdev->features = features;
hdev->client.set_memory = vhost_client_set_memory; hdev->memory_listener = (MemoryListener) {
hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap; .region_add = vhost_region_add,
hdev->client.migration_log = vhost_client_migration_log; .region_del = vhost_region_del,
hdev->client.log_start = NULL; .log_start = vhost_log_start,
hdev->client.log_stop = NULL; .log_stop = vhost_log_stop,
.log_sync = vhost_log_sync,
.log_global_start = vhost_log_global_start,
.log_global_stop = vhost_log_global_stop,
};
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
hdev->n_mem_sections = 0;
hdev->mem_sections = NULL;
hdev->log = NULL; hdev->log = NULL;
hdev->log_size = 0; hdev->log_size = 0;
hdev->log_enabled = false; hdev->log_enabled = false;
hdev->started = false; hdev->started = false;
cpu_register_phys_memory_client(&hdev->client); memory_listener_register(&hdev->memory_listener);
hdev->force = force; hdev->force = force;
return 0; return 0;
fail: fail:
@ -666,8 +756,9 @@ fail:
void vhost_dev_cleanup(struct vhost_dev *hdev) void vhost_dev_cleanup(struct vhost_dev *hdev)
{ {
cpu_unregister_phys_memory_client(&hdev->client); memory_listener_unregister(&hdev->memory_listener);
g_free(hdev->mem); g_free(hdev->mem);
g_free(hdev->mem_sections);
close(hdev->control); close(hdev->control);
} }
@ -808,8 +899,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->vqs + i, hdev->vqs + i,
i); i);
} }
vhost_client_sync_dirty_bitmap(&hdev->client, 0, for (i = 0; i < hdev->n_mem_sections; ++i) {
(target_phys_addr_t)~0x0ull); vhost_sync_dirty_bitmap(hdev, &hdev->mem_sections[i],
0, (target_phys_addr_t)~0x0ull);
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
if (r < 0) { if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);

View File

@ -3,6 +3,7 @@
#include "hw/hw.h" #include "hw/hw.h"
#include "hw/virtio.h" #include "hw/virtio.h"
#include "memory.h"
/* Generic structures common for any vhost based device. */ /* Generic structures common for any vhost based device. */
struct vhost_virtqueue { struct vhost_virtqueue {
@ -26,9 +27,11 @@ typedef unsigned long vhost_log_chunk_t;
struct vhost_memory; struct vhost_memory;
struct vhost_dev { struct vhost_dev {
CPUPhysMemoryClient client; MemoryListener memory_listener;
int control; int control;
struct vhost_memory *mem; struct vhost_memory *mem;
int n_mem_sections;
MemoryRegionSection *mem_sections;
struct vhost_virtqueue *vqs; struct vhost_virtqueue *vqs;
int nvqs; int nvqs;
unsigned long long features; unsigned long long features;

View File

@ -21,6 +21,7 @@
#include "balloon.h" #include "balloon.h"
#include "virtio-balloon.h" #include "virtio-balloon.h"
#include "kvm.h" #include "kvm.h"
#include "exec-memory.h"
#if defined(__linux__) #if defined(__linux__)
#include <sys/mman.h> #include <sys/mman.h>
@ -70,6 +71,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{ {
VirtIOBalloon *s = to_virtio_balloon(vdev); VirtIOBalloon *s = to_virtio_balloon(vdev);
VirtQueueElement elem; VirtQueueElement elem;
MemoryRegionSection section;
while (virtqueue_pop(vq, &elem)) { while (virtqueue_pop(vq, &elem)) {
size_t offset = 0; size_t offset = 0;
@ -82,13 +84,16 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT; pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
offset += 4; offset += 4;
addr = cpu_get_physical_page_desc(pa); /* FIXME: remove get_system_memory(), but how? */
if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM) section = memory_region_find(get_system_memory(), pa, 1);
if (!section.size || !memory_region_is_ram(section.mr))
continue; continue;
/* Using qemu_get_ram_ptr is bending the rules a bit, but /* Using memory_region_get_ram_ptr is bending the rules a bit, but
should be OK because we only want a single page. */ should be OK because we only want a single page. */
balloon_page(qemu_get_ram_ptr(addr), !!(vq == s->dvq)); addr = section.offset_within_region;
balloon_page(memory_region_get_ram_ptr(section.mr) + addr,
!!(vq == s->dvq));
} }
virtqueue_push(vq, &elem, offset); virtqueue_push(vq, &elem, offset);

View File

@ -49,6 +49,9 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size,
struct MemoryRegion *mr); struct MemoryRegion *mr);
#endif #endif
struct MemoryRegion;
void xen_register_framebuffer(struct MemoryRegion *mr);
#if defined(CONFIG_XEN) && CONFIG_XEN_CTRL_INTERFACE_VERSION < 400 #if defined(CONFIG_XEN) && CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
# define HVM_MAX_VCPUS 32 # define HVM_MAX_VCPUS 32
#endif #endif

151
kvm-all.c
View File

@ -27,6 +27,7 @@
#include "gdbstub.h" #include "gdbstub.h"
#include "kvm.h" #include "kvm.h"
#include "bswap.h" #include "bswap.h"
#include "memory.h"
/* This check must be after config-host.h is included */ /* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD #ifdef CONFIG_EVENTFD
@ -50,7 +51,7 @@ typedef struct KVMSlot
{ {
target_phys_addr_t start_addr; target_phys_addr_t start_addr;
ram_addr_t memory_size; ram_addr_t memory_size;
ram_addr_t phys_offset; void *ram;
int slot; int slot;
int flags; int flags;
} KVMSlot; } KVMSlot;
@ -146,17 +147,16 @@ static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
return found; return found;
} }
int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr, int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
target_phys_addr_t *phys_addr) target_phys_addr_t *phys_addr)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(s->slots); i++) { for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
KVMSlot *mem = &s->slots[i]; KVMSlot *mem = &s->slots[i];
if (ram_addr >= mem->phys_offset && if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
ram_addr < mem->phys_offset + mem->memory_size) { *phys_addr = mem->start_addr + (ram - mem->ram);
*phys_addr = mem->start_addr + (ram_addr - mem->phys_offset);
return 1; return 1;
} }
} }
@ -171,7 +171,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
mem.slot = slot->slot; mem.slot = slot->slot;
mem.guest_phys_addr = slot->start_addr; mem.guest_phys_addr = slot->start_addr;
mem.memory_size = slot->memory_size; mem.memory_size = slot->memory_size;
mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset); mem.userspace_addr = (unsigned long)slot->ram;
mem.flags = slot->flags; mem.flags = slot->flags;
if (s->migration_log) { if (s->migration_log) {
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
@ -290,16 +290,28 @@ static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
return kvm_slot_dirty_pages_log_change(mem, log_dirty); return kvm_slot_dirty_pages_log_change(mem, log_dirty);
} }
static int kvm_log_start(CPUPhysMemoryClient *client, static void kvm_log_start(MemoryListener *listener,
target_phys_addr_t phys_addr, ram_addr_t size) MemoryRegionSection *section)
{ {
return kvm_dirty_pages_log_change(phys_addr, size, true); int r;
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
section->size, true);
if (r < 0) {
abort();
}
} }
static int kvm_log_stop(CPUPhysMemoryClient *client, static void kvm_log_stop(MemoryListener *listener,
target_phys_addr_t phys_addr, ram_addr_t size) MemoryRegionSection *section)
{ {
return kvm_dirty_pages_log_change(phys_addr, size, false); int r;
r = kvm_dirty_pages_log_change(section->offset_within_address_space,
section->size, false);
if (r < 0) {
abort();
}
} }
static int kvm_set_migration_log(int enable) static int kvm_set_migration_log(int enable)
@ -328,16 +340,12 @@ static int kvm_set_migration_log(int enable)
} }
/* get kvm's dirty pages bitmap and update qemu's */ /* get kvm's dirty pages bitmap and update qemu's */
static int kvm_get_dirty_pages_log_range(unsigned long start_addr, static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap, unsigned long *bitmap)
unsigned long offset,
unsigned long mem_size)
{ {
unsigned int i, j; unsigned int i, j;
unsigned long page_number, addr, addr1, c; unsigned long page_number, addr, addr1, c;
ram_addr_t ram_addr; unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
HOST_LONG_BITS;
/* /*
* bitmap-traveling is faster than memory-traveling (for addr...) * bitmap-traveling is faster than memory-traveling (for addr...)
@ -351,9 +359,8 @@ static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
c &= ~(1ul << j); c &= ~(1ul << j);
page_number = i * HOST_LONG_BITS + j; page_number = i * HOST_LONG_BITS + j;
addr1 = page_number * TARGET_PAGE_SIZE; addr1 = page_number * TARGET_PAGE_SIZE;
addr = offset + addr1; addr = section->offset_within_region + addr1;
ram_addr = cpu_get_physical_page_desc(addr); memory_region_set_dirty(section->mr, addr);
cpu_physical_memory_set_dirty(ram_addr);
} while (c != 0); } while (c != 0);
} }
} }
@ -370,14 +377,15 @@ static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
* @start_add: start of logged region. * @start_add: start of logged region.
* @end_addr: end of logged region. * @end_addr: end of logged region.
*/ */
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
target_phys_addr_t end_addr)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
unsigned long size, allocated_size = 0; unsigned long size, allocated_size = 0;
KVMDirtyLog d; KVMDirtyLog d;
KVMSlot *mem; KVMSlot *mem;
int ret = 0; int ret = 0;
target_phys_addr_t start_addr = section->offset_within_address_space;
target_phys_addr_t end_addr = start_addr + section->size;
d.dirty_bitmap = NULL; d.dirty_bitmap = NULL;
while (start_addr < end_addr) { while (start_addr < end_addr) {
@ -416,8 +424,7 @@ static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
break; break;
} }
kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
mem->start_addr, mem->memory_size);
start_addr = mem->start_addr + mem->memory_size; start_addr = mem->start_addr + mem->memory_size;
} }
g_free(d.dirty_bitmap); g_free(d.dirty_bitmap);
@ -520,21 +527,27 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
return NULL; return NULL;
} }
static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
ram_addr_t phys_offset, bool log_dirty)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
KVMSlot *mem, old; KVMSlot *mem, old;
int err; int err;
MemoryRegion *mr = section->mr;
bool log_dirty = memory_region_is_logging(mr);
target_phys_addr_t start_addr = section->offset_within_address_space;
ram_addr_t size = section->size;
void *ram = NULL;
/* kvm works in page size chunks, but the function may be called /* kvm works in page size chunks, but the function may be called
with sub-page size and unaligned start address. */ with sub-page size and unaligned start address. */
size = TARGET_PAGE_ALIGN(size); size = TARGET_PAGE_ALIGN(size);
start_addr = TARGET_PAGE_ALIGN(start_addr); start_addr = TARGET_PAGE_ALIGN(start_addr);
/* KVM does not support read-only slots */ if (!memory_region_is_ram(mr)) {
phys_offset &= ~IO_MEM_ROM; return;
}
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region;
while (1) { while (1) {
mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
@ -542,9 +555,9 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
break; break;
} }
if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && if (add && start_addr >= mem->start_addr &&
(start_addr + size <= mem->start_addr + mem->memory_size) && (start_addr + size <= mem->start_addr + mem->memory_size) &&
(phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { (ram - start_addr == mem->ram - mem->start_addr)) {
/* The new slot fits into the existing one and comes with /* The new slot fits into the existing one and comes with
* identical parameters - update flags and done. */ * identical parameters - update flags and done. */
kvm_slot_dirty_pages_log_change(mem, log_dirty); kvm_slot_dirty_pages_log_change(mem, log_dirty);
@ -571,12 +584,11 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
* slot comes around later, we will fail (not seen in practice so far) * slot comes around later, we will fail (not seen in practice so far)
* - and actually require a recent KVM version. */ * - and actually require a recent KVM version. */
if (s->broken_set_mem_region && if (s->broken_set_mem_region &&
old.start_addr == start_addr && old.memory_size < size && old.start_addr == start_addr && old.memory_size < size && add) {
flags < IO_MEM_UNASSIGNED) {
mem = kvm_alloc_slot(s); mem = kvm_alloc_slot(s);
mem->memory_size = old.memory_size; mem->memory_size = old.memory_size;
mem->start_addr = old.start_addr; mem->start_addr = old.start_addr;
mem->phys_offset = old.phys_offset; mem->ram = old.ram;
mem->flags = kvm_mem_flags(s, log_dirty); mem->flags = kvm_mem_flags(s, log_dirty);
err = kvm_set_user_memory_region(s, mem); err = kvm_set_user_memory_region(s, mem);
@ -587,7 +599,7 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
} }
start_addr += old.memory_size; start_addr += old.memory_size;
phys_offset += old.memory_size; ram += old.memory_size;
size -= old.memory_size; size -= old.memory_size;
continue; continue;
} }
@ -597,7 +609,7 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
mem = kvm_alloc_slot(s); mem = kvm_alloc_slot(s);
mem->memory_size = start_addr - old.start_addr; mem->memory_size = start_addr - old.start_addr;
mem->start_addr = old.start_addr; mem->start_addr = old.start_addr;
mem->phys_offset = old.phys_offset; mem->ram = old.ram;
mem->flags = kvm_mem_flags(s, log_dirty); mem->flags = kvm_mem_flags(s, log_dirty);
err = kvm_set_user_memory_region(s, mem); err = kvm_set_user_memory_region(s, mem);
@ -621,7 +633,7 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
mem->start_addr = start_addr + size; mem->start_addr = start_addr + size;
size_delta = mem->start_addr - old.start_addr; size_delta = mem->start_addr - old.start_addr;
mem->memory_size = old.memory_size - size_delta; mem->memory_size = old.memory_size - size_delta;
mem->phys_offset = old.phys_offset + size_delta; mem->ram = old.ram + size_delta;
mem->flags = kvm_mem_flags(s, log_dirty); mem->flags = kvm_mem_flags(s, log_dirty);
err = kvm_set_user_memory_region(s, mem); err = kvm_set_user_memory_region(s, mem);
@ -637,14 +649,13 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
if (!size) { if (!size) {
return; return;
} }
/* KVM does not need to know about this memory */ if (!add) {
if (flags >= IO_MEM_UNASSIGNED) {
return; return;
} }
mem = kvm_alloc_slot(s); mem = kvm_alloc_slot(s);
mem->memory_size = size; mem->memory_size = size;
mem->start_addr = start_addr; mem->start_addr = start_addr;
mem->phys_offset = phys_offset; mem->ram = ram;
mem->flags = kvm_mem_flags(s, log_dirty); mem->flags = kvm_mem_flags(s, log_dirty);
err = kvm_set_user_memory_region(s, mem); err = kvm_set_user_memory_region(s, mem);
@ -655,33 +666,53 @@ static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
} }
} }
static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, static void kvm_region_add(MemoryListener *listener,
target_phys_addr_t start_addr, MemoryRegionSection *section)
ram_addr_t size, ram_addr_t phys_offset,
bool log_dirty)
{ {
kvm_set_phys_mem(start_addr, size, phys_offset, log_dirty); kvm_set_phys_mem(section, true);
} }
static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, static void kvm_region_del(MemoryListener *listener,
target_phys_addr_t start_addr, MemoryRegionSection *section)
target_phys_addr_t end_addr)
{ {
return kvm_physical_sync_dirty_bitmap(start_addr, end_addr); kvm_set_phys_mem(section, false);
} }
static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, static void kvm_log_sync(MemoryListener *listener,
int enable) MemoryRegionSection *section)
{ {
return kvm_set_migration_log(enable); int r;
r = kvm_physical_sync_dirty_bitmap(section);
if (r < 0) {
abort();
}
} }
static CPUPhysMemoryClient kvm_cpu_phys_memory_client = { static void kvm_log_global_start(struct MemoryListener *listener)
.set_memory = kvm_client_set_memory, {
.sync_dirty_bitmap = kvm_client_sync_dirty_bitmap, int r;
.migration_log = kvm_client_migration_log,
r = kvm_set_migration_log(1);
assert(r >= 0);
}
static void kvm_log_global_stop(struct MemoryListener *listener)
{
int r;
r = kvm_set_migration_log(0);
assert(r >= 0);
}
static MemoryListener kvm_memory_listener = {
.region_add = kvm_region_add,
.region_del = kvm_region_del,
.log_start = kvm_log_start, .log_start = kvm_log_start,
.log_stop = kvm_log_stop, .log_stop = kvm_log_stop,
.log_sync = kvm_log_sync,
.log_global_start = kvm_log_global_start,
.log_global_stop = kvm_log_global_stop,
}; };
static void kvm_handle_interrupt(CPUState *env, int mask) static void kvm_handle_interrupt(CPUState *env, int mask)
@ -789,7 +820,7 @@ int kvm_init(void)
} }
kvm_state = s; kvm_state = s;
cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); memory_listener_register(&kvm_memory_listener);
s->many_ioeventfds = kvm_check_many_ioeventfds(); s->many_ioeventfds = kvm_check_many_ioeventfds();

4
kvm.h
View File

@ -188,8 +188,8 @@ static inline void cpu_synchronize_post_init(CPUState *env)
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr, int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
target_phys_addr_t *phys_addr); target_phys_addr_t *phys_addr);
#endif #endif
#endif #endif

193
memory.c
View File

@ -23,6 +23,10 @@
unsigned memory_region_transaction_depth = 0; unsigned memory_region_transaction_depth = 0;
static bool memory_region_update_pending = false; static bool memory_region_update_pending = false;
static bool global_dirty_log = false;
static QLIST_HEAD(, MemoryListener) memory_listeners
= QLIST_HEAD_INITIALIZER(memory_listeners);
typedef struct AddrRange AddrRange; typedef struct AddrRange AddrRange;
@ -334,11 +338,6 @@ static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
static void as_memory_range_del(AddressSpace *as, FlatRange *fr) static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
{ {
if (fr->dirty_log_mask) {
Int128 end = addrrange_end(fr->addr);
cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start),
int128_get64(end));
}
cpu_register_physical_memory(int128_get64(fr->addr.start), cpu_register_physical_memory(int128_get64(fr->addr.start),
int128_get64(fr->addr.size), int128_get64(fr->addr.size),
IO_MEM_UNASSIGNED); IO_MEM_UNASSIGNED);
@ -346,14 +345,10 @@ static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
static void as_memory_log_start(AddressSpace *as, FlatRange *fr) static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
{ {
cpu_physical_log_start(int128_get64(fr->addr.start),
int128_get64(fr->addr.size));
} }
static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
{ {
cpu_physical_log_stop(int128_get64(fr->addr.start),
int128_get64(fr->addr.size));
} }
static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
@ -515,6 +510,20 @@ static AddressSpace address_space_io = {
.ops = &address_space_ops_io, .ops = &address_space_ops_io,
}; };
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
while (mr->parent) {
mr = mr->parent;
}
if (mr == address_space_memory.root) {
return &address_space_memory;
}
if (mr == address_space_io.root) {
return &address_space_io;
}
abort();
}
/* Render a memory region into the global view. Ranges in @view obscure /* Render a memory region into the global view. Ranges in @view obscure
* ranges in @mr. * ranges in @mr.
*/ */
@ -683,6 +692,32 @@ static void address_space_update_ioeventfds(AddressSpace *as)
as->ioeventfd_nb = ioeventfd_nb; as->ioeventfd_nb = ioeventfd_nb;
} }
typedef void ListenerCallback(MemoryListener *listener,
MemoryRegionSection *mrs);
/* Want "void (&MemoryListener::*callback)(const MemoryRegionSection& s)" */
static void memory_listener_update_region(FlatRange *fr, AddressSpace *as,
size_t callback_offset)
{
MemoryRegionSection section = {
.mr = fr->mr,
.address_space = as->root,
.offset_within_region = fr->offset_in_region,
.size = int128_get64(fr->addr.size),
.offset_within_address_space = int128_get64(fr->addr.start),
};
MemoryListener *listener;
QLIST_FOREACH(listener, &memory_listeners, link) {
ListenerCallback *callback
= *(ListenerCallback **)((void *)listener + callback_offset);
callback(listener, &section);
}
}
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, callback) \
memory_listener_update_region(fr, as, offsetof(MemoryListener, callback))
static void address_space_update_topology_pass(AddressSpace *as, static void address_space_update_topology_pass(AddressSpace *as,
FlatView old_view, FlatView old_view,
FlatView new_view, FlatView new_view,
@ -715,6 +750,7 @@ static void address_space_update_topology_pass(AddressSpace *as,
/* In old, but (not in new, or in new but attributes changed). */ /* In old, but (not in new, or in new but attributes changed). */
if (!adding) { if (!adding) {
MEMORY_LISTENER_UPDATE_REGION(frold, as, region_del);
as->ops->range_del(as, frold); as->ops->range_del(as, frold);
} }
@ -724,9 +760,11 @@ static void address_space_update_topology_pass(AddressSpace *as,
if (adding) { if (adding) {
if (frold->dirty_log_mask && !frnew->dirty_log_mask) { if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_stop);
as->ops->log_stop(as, frnew); as->ops->log_stop(as, frnew);
} else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
as->ops->log_start(as, frnew); as->ops->log_start(as, frnew);
MEMORY_LISTENER_UPDATE_REGION(frnew, as, log_start);
} }
} }
@ -737,6 +775,7 @@ static void address_space_update_topology_pass(AddressSpace *as,
if (adding) { if (adding) {
as->ops->range_add(as, frnew); as->ops->range_add(as, frnew);
MEMORY_LISTENER_UPDATE_REGION(frnew, as, region_add);
} }
++inew; ++inew;
@ -832,6 +871,7 @@ void memory_region_init(MemoryRegion *mr,
mr->offset = 0; mr->offset = 0;
mr->enabled = true; mr->enabled = true;
mr->terminates = false; mr->terminates = false;
mr->ram = false;
mr->readable = true; mr->readable = true;
mr->readonly = false; mr->readonly = false;
mr->destructor = memory_region_destructor_none; mr->destructor = memory_region_destructor_none;
@ -998,6 +1038,7 @@ void memory_region_init_ram(MemoryRegion *mr,
uint64_t size) uint64_t size)
{ {
memory_region_init(mr, name, size); memory_region_init(mr, name, size);
mr->ram = true;
mr->terminates = true; mr->terminates = true;
mr->destructor = memory_region_destructor_ram; mr->destructor = memory_region_destructor_ram;
mr->ram_addr = qemu_ram_alloc(dev, name, size, mr); mr->ram_addr = qemu_ram_alloc(dev, name, size, mr);
@ -1011,6 +1052,7 @@ void memory_region_init_ram_ptr(MemoryRegion *mr,
void *ptr) void *ptr)
{ {
memory_region_init(mr, name, size); memory_region_init(mr, name, size);
mr->ram = true;
mr->terminates = true; mr->terminates = true;
mr->destructor = memory_region_destructor_ram_from_ptr; mr->destructor = memory_region_destructor_ram_from_ptr;
mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr, mr); mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr, mr);
@ -1066,6 +1108,21 @@ uint64_t memory_region_size(MemoryRegion *mr)
return int128_get64(mr->size); return int128_get64(mr->size);
} }
bool memory_region_is_ram(MemoryRegion *mr)
{
return mr->ram;
}
bool memory_region_is_logging(MemoryRegion *mr)
{
return mr->dirty_log_mask;
}
bool memory_region_is_rom(MemoryRegion *mr)
{
return mr->ram && mr->readonly;
}
void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset) void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
{ {
mr->offset = offset; mr->offset = offset;
@ -1098,8 +1155,7 @@ void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
if (fr->mr == mr) { if (fr->mr == mr) {
cpu_physical_sync_dirty_bitmap(int128_get64(fr->addr.start), MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory, log_sync);
int128_get64(addrrange_end(fr->addr)));
} }
} }
} }
@ -1368,6 +1424,121 @@ void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
memory_region_update_topology(mr); memory_region_update_topology(mr);
} }
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
{
assert(mr->backend_registered);
return mr->ram_addr;
}
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
{
const AddrRange *addr = addr_;
const FlatRange *fr = fr_;
if (int128_le(addrrange_end(*addr), fr->addr.start)) {
return -1;
} else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
return 1;
}
return 0;
}
static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
{
return bsearch(&addr, as->current_map.ranges, as->current_map.nr,
sizeof(FlatRange), cmp_flatrange_addr);
}
MemoryRegionSection memory_region_find(MemoryRegion *address_space,
target_phys_addr_t addr, uint64_t size)
{
AddressSpace *as = memory_region_to_address_space(address_space);
AddrRange range = addrrange_make(int128_make64(addr),
int128_make64(size));
FlatRange *fr = address_space_lookup(as, range);
MemoryRegionSection ret = { .mr = NULL, .size = 0 };
if (!fr) {
return ret;
}
while (fr > as->current_map.ranges
&& addrrange_intersects(fr[-1].addr, range)) {
--fr;
}
ret.mr = fr->mr;
range = addrrange_intersection(range, fr->addr);
ret.offset_within_region = fr->offset_in_region;
ret.offset_within_region += int128_get64(int128_sub(range.start,
fr->addr.start));
ret.size = int128_get64(range.size);
ret.offset_within_address_space = int128_get64(range.start);
return ret;
}
void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
{
AddressSpace *as = memory_region_to_address_space(address_space);
FlatRange *fr;
FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
MEMORY_LISTENER_UPDATE_REGION(fr, as, log_sync);
}
}
void memory_global_dirty_log_start(void)
{
MemoryListener *listener;
global_dirty_log = true;
QLIST_FOREACH(listener, &memory_listeners, link) {
listener->log_global_start(listener);
}
}
void memory_global_dirty_log_stop(void)
{
MemoryListener *listener;
global_dirty_log = false;
QLIST_FOREACH(listener, &memory_listeners, link) {
listener->log_global_stop(listener);
}
}
static void listener_add_address_space(MemoryListener *listener,
AddressSpace *as)
{
FlatRange *fr;
if (global_dirty_log) {
listener->log_global_start(listener);
}
FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
MemoryRegionSection section = {
.mr = fr->mr,
.address_space = as->root,
.offset_within_region = fr->offset_in_region,
.size = int128_get64(fr->addr.size),
.offset_within_address_space = int128_get64(fr->addr.start),
};
listener->region_add(listener, &section);
}
}
void memory_listener_register(MemoryListener *listener)
{
QLIST_INSERT_HEAD(&memory_listeners, listener, link);
listener_add_address_space(listener, &address_space_memory);
listener_add_address_space(listener, &address_space_io);
}
void memory_listener_unregister(MemoryListener *listener)
{
QLIST_REMOVE(listener, link);
}
void set_system_memory_map(MemoryRegion *mr) void set_system_memory_map(MemoryRegion *mr)
{ {
address_space_memory.root = mr; address_space_memory.root = mr;

134
memory.h
View File

@ -122,6 +122,7 @@ struct MemoryRegion {
IORange iorange; IORange iorange;
bool terminates; bool terminates;
bool readable; bool readable;
bool ram;
bool readonly; /* For RAM regions */ bool readonly; /* For RAM regions */
bool enabled; bool enabled;
MemoryRegion *alias; MemoryRegion *alias;
@ -147,6 +148,45 @@ struct MemoryRegionPortio {
#define PORTIO_END_OF_LIST() { } #define PORTIO_END_OF_LIST() { }
typedef struct MemoryRegionSection MemoryRegionSection;
/**
* MemoryRegionSection: describes a fragment of a #MemoryRegion
*
* @mr: the region, or %NULL if empty
* @address_space: the address space the region is mapped in
* @offset_within_region: the beginning of the section, relative to @mr's start
* @size: the size of the section; will not exceed @mr's boundaries
* @offset_within_address_space: the address of the first byte of the section
* relative to the region's address space
*/
struct MemoryRegionSection {
MemoryRegion *mr;
MemoryRegion *address_space;
target_phys_addr_t offset_within_region;
uint64_t size;
target_phys_addr_t offset_within_address_space;
};
typedef struct MemoryListener MemoryListener;
/**
* MemoryListener: callbacks structure for updates to the physical memory map
*
* Allows a component to adjust to changes in the guest-visible memory map.
* Use with memory_listener_register() and memory_listener_unregister().
*/
struct MemoryListener {
void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_global_start)(MemoryListener *listener);
void (*log_global_stop)(MemoryListener *listener);
QLIST_ENTRY(MemoryListener) link;
};
/** /**
* memory_region_init: Initialize a memory region * memory_region_init: Initialize a memory region
* *
@ -266,6 +306,33 @@ void memory_region_destroy(MemoryRegion *mr);
*/ */
uint64_t memory_region_size(MemoryRegion *mr); uint64_t memory_region_size(MemoryRegion *mr);
/**
* memory_region_is_ram: check whether a memory region is random access
*
* Returns %true is a memory region is random access.
*
* @mr: the memory region being queried
*/
bool memory_region_is_ram(MemoryRegion *mr);
/**
* memory_region_is_logging: return whether a memory region is logging writes
*
* Returns %true if the memory region is logging writes
*
* @mr: the memory region being queried
*/
bool memory_region_is_logging(MemoryRegion *mr);
/**
* memory_region_is_rom: check whether a memory region is ROM
*
* Returns %true is a memory region is read-only memory.
*
* @mr: the memory region being queried
*/
bool memory_region_is_rom(MemoryRegion *mr);
/** /**
* memory_region_get_ram_ptr: Get a pointer into a RAM memory region. * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
* *
@ -491,6 +558,16 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
target_phys_addr_t offset, target_phys_addr_t offset,
MemoryRegion *subregion, MemoryRegion *subregion,
unsigned priority); unsigned priority);
/**
* memory_region_get_ram_addr: Get the ram address associated with a memory
* region
*
* DO NOT USE THIS FUCNTION. This is a temporary workaround while the Xen
* code is being reworked.
*/
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
/** /**
* memory_region_del_subregion: Remove a subregion. * memory_region_del_subregion: Remove a subregion.
* *
@ -540,6 +617,37 @@ void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr);
void memory_region_set_alias_offset(MemoryRegion *mr, void memory_region_set_alias_offset(MemoryRegion *mr,
target_phys_addr_t offset); target_phys_addr_t offset);
/**
* memory_region_find: locate a MemoryRegion in an address space
*
* Locates the first #MemoryRegion within an address space given by
* @address_space that overlaps the range given by @addr and @size.
*
* Returns a #MemoryRegionSection that describes a contiguous overlap.
* It will have the following characteristics:
* .@offset_within_address_space >= @addr
* .@offset_within_address_space + .@size <= @addr + @size
* .@size = 0 iff no overlap was found
* .@mr is non-%NULL iff an overlap was found
*
* @address_space: a top-level (i.e. parentless) region that contains
* the region to be found
* @addr: start of the area within @address_space to be searched
* @size: size of the area to be searched
*/
MemoryRegionSection memory_region_find(MemoryRegion *address_space,
target_phys_addr_t addr, uint64_t size);
/**
* memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for an entire address space.
* @address_space: a top-level (i.e. parentless) region that contains the
* memory being synchronized
*/
void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
/** /**
* memory_region_transaction_begin: Start a transaction. * memory_region_transaction_begin: Start a transaction.
* *
@ -554,6 +662,32 @@ void memory_region_transaction_begin(void);
*/ */
void memory_region_transaction_commit(void); void memory_region_transaction_commit(void);
/**
* memory_listener_register: register callbacks to be called when memory
* sections are mapped or unmapped into an address
* space
*
* @listener: an object containing the callbacks to be called
*/
void memory_listener_register(MemoryListener *listener);
/**
* memory_listener_unregister: undo the effect of memory_listener_register()
*
* @listener: an object containing the callbacks to be removed
*/
void memory_listener_unregister(MemoryListener *listener);
/**
* memory_global_dirty_log_start: begin dirty logging for all regions
*/
void memory_global_dirty_log_start(void);
/**
* memory_global_dirty_log_stop: begin dirty logging for all regions
*/
void memory_global_dirty_log_stop(void);
void mtree_info(fprintf_function mon_printf, void *f); void mtree_info(fprintf_function mon_printf, void *f);
#endif #endif

View File

@ -253,8 +253,7 @@ int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
if ((env->mcg_cap & MCG_SER_P) && addr if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) { && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
if (qemu_ram_addr_from_host(addr, &ram_addr) || if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, !kvm_physical_memory_addr_from_host(env->kvm_state, addr, &paddr)) {
&paddr)) {
fprintf(stderr, "Hardware memory error for memory used by " fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!\n"); "QEMU itself instead of guest system!\n");
/* Hope we are lucky for AO MCE */ /* Hope we are lucky for AO MCE */
@ -286,8 +285,8 @@ int kvm_arch_on_sigbus(int code, void *addr)
/* Hope we are lucky for AO MCE */ /* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) || if (qemu_ram_addr_from_host(addr, &ram_addr) ||
!kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, !kvm_physical_memory_addr_from_host(first_cpu->kvm_state, addr,
&paddr)) { &paddr)) {
fprintf(stderr, "Hardware memory error for memory used by " fprintf(stderr, "Hardware memory error for memory used by "
"QEMU itself instead of guest system!: %p\n", addr); "QEMU itself instead of guest system!: %p\n", addr);
return 0; return 0;

View File

@ -19,6 +19,7 @@
#include "cpu.h" #include "cpu.h"
#include "trace.h" #include "trace.h"
#include "exec-memory.h"
/* Sparc MMU emulation */ /* Sparc MMU emulation */
@ -839,13 +840,15 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{ {
target_phys_addr_t phys_addr; target_phys_addr_t phys_addr;
int mmu_idx = cpu_mmu_index(env); int mmu_idx = cpu_mmu_index(env);
MemoryRegionSection section;
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
return -1; return -1;
} }
} }
if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) { section = memory_region_find(get_system_memory(), phys_addr, 1);
if (!section.size) {
return -1; return -1;
} }
return phys_addr; return phys_addr;

View File

@ -462,7 +462,7 @@ mipsnet_irq(uint32_t isr, uint32_t intctl) "set irq to %d (%02x)"
# xen-all.c # xen-all.c
xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx"
xen_client_set_memory(uint64_t start_addr, unsigned long size, unsigned long phys_offset, bool log_dirty) "%#"PRIx64" size %#lx, offset %#lx, log_dirty %i" xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "%#"PRIx64" size %#lx, log_dirty %i"
# xen-mapcache.c # xen-mapcache.c
xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 xen_map_cache(uint64_t phys_addr) "want %#"PRIx64

144
xen-all.c
View File

@ -33,6 +33,7 @@
#endif #endif
static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
static MemoryRegion *framebuffer;
/* Compatibility with older version */ /* Compatibility with older version */
#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
@ -62,6 +63,7 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
typedef struct XenPhysmap { typedef struct XenPhysmap {
target_phys_addr_t start_addr; target_phys_addr_t start_addr;
ram_addr_t size; ram_addr_t size;
MemoryRegion *mr;
target_phys_addr_t phys_offset; target_phys_addr_t phys_offset;
QLIST_ENTRY(XenPhysmap) list; QLIST_ENTRY(XenPhysmap) list;
@ -79,8 +81,9 @@ typedef struct XenIOState {
int send_vcpu; int send_vcpu;
struct xs_handle *xenstore; struct xs_handle *xenstore;
CPUPhysMemoryClient client; MemoryListener memory_listener;
QLIST_HEAD(, XenPhysmap) physmap; QLIST_HEAD(, XenPhysmap) physmap;
target_phys_addr_t free_phys_offset;
const XenPhysmap *log_for_dirtybit; const XenPhysmap *log_for_dirtybit;
Notifier exit; Notifier exit;
@ -225,13 +228,14 @@ static XenPhysmap *get_physmapping(XenIOState *state,
static int xen_add_to_physmap(XenIOState *state, static int xen_add_to_physmap(XenIOState *state,
target_phys_addr_t start_addr, target_phys_addr_t start_addr,
ram_addr_t size, ram_addr_t size,
target_phys_addr_t phys_offset) MemoryRegion *mr,
target_phys_addr_t offset_within_region)
{ {
unsigned long i = 0; unsigned long i = 0;
int rc = 0; int rc = 0;
XenPhysmap *physmap = NULL; XenPhysmap *physmap = NULL;
target_phys_addr_t pfn, start_gpfn; target_phys_addr_t pfn, start_gpfn;
RAMBlock *block; target_phys_addr_t phys_offset = memory_region_get_ram_addr(mr);
if (get_physmapping(state, start_addr, size)) { if (get_physmapping(state, start_addr, size)) {
return 0; return 0;
@ -244,17 +248,13 @@ static int xen_add_to_physmap(XenIOState *state,
* the linear framebuffer to be that region. * the linear framebuffer to be that region.
* Avoid tracking any regions that is not videoram and avoid tracking * Avoid tracking any regions that is not videoram and avoid tracking
* the legacy vga region. */ * the legacy vga region. */
QLIST_FOREACH(block, &ram_list.blocks, next) { if (mr == framebuffer && start_addr > 0xbffff) {
if (!strcmp(block->idstr, "vga.vram") && block->offset == phys_offset goto go_physmap;
&& start_addr > 0xbffff) {
goto go_physmap;
}
} }
return -1; return -1;
go_physmap: go_physmap:
DPRINTF("mapping vram to %llx - %llx, from %llx\n", DPRINTF("mapping vram to %llx - %llx\n", start_addr, start_addr + size);
start_addr, start_addr + size, phys_offset);
pfn = phys_offset >> TARGET_PAGE_BITS; pfn = phys_offset >> TARGET_PAGE_BITS;
start_gpfn = start_addr >> TARGET_PAGE_BITS; start_gpfn = start_addr >> TARGET_PAGE_BITS;
@ -333,7 +333,8 @@ static int xen_remove_from_physmap(XenIOState *state,
static int xen_add_to_physmap(XenIOState *state, static int xen_add_to_physmap(XenIOState *state,
target_phys_addr_t start_addr, target_phys_addr_t start_addr,
ram_addr_t size, ram_addr_t size,
target_phys_addr_t phys_offset) MemoryRegion *mr,
target_phys_addr_t offset_within_region)
{ {
return -ENOSYS; return -ENOSYS;
} }
@ -346,49 +347,62 @@ static int xen_remove_from_physmap(XenIOState *state,
} }
#endif #endif
static void xen_client_set_memory(struct CPUPhysMemoryClient *client, static void xen_set_memory(struct MemoryListener *listener,
target_phys_addr_t start_addr, MemoryRegionSection *section,
ram_addr_t size, bool add)
ram_addr_t phys_offset,
bool log_dirty)
{ {
XenIOState *state = container_of(client, XenIOState, client); XenIOState *state = container_of(listener, XenIOState, memory_listener);
ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; target_phys_addr_t start_addr = section->offset_within_address_space;
ram_addr_t size = section->size;
bool log_dirty = memory_region_is_logging(section->mr);
hvmmem_type_t mem_type; hvmmem_type_t mem_type;
if (!(start_addr != phys_offset if (!memory_region_is_ram(section->mr)) {
&& ( (log_dirty && flags < IO_MEM_UNASSIGNED)
|| (!log_dirty && flags == IO_MEM_UNASSIGNED)))) {
return; return;
} }
trace_xen_client_set_memory(start_addr, size, phys_offset, log_dirty); if (!(section->mr != &ram_memory
&& ( (log_dirty && add) || (!log_dirty && !add)))) {
return;
}
trace_xen_client_set_memory(start_addr, size, log_dirty);
start_addr &= TARGET_PAGE_MASK; start_addr &= TARGET_PAGE_MASK;
size = TARGET_PAGE_ALIGN(size); size = TARGET_PAGE_ALIGN(size);
phys_offset &= TARGET_PAGE_MASK;
switch (flags) { if (add) {
case IO_MEM_RAM: if (!memory_region_is_rom(section->mr)) {
xen_add_to_physmap(state, start_addr, size, phys_offset); xen_add_to_physmap(state, start_addr, size,
break; section->mr, section->offset_within_region);
case IO_MEM_ROM: } else {
mem_type = HVMMEM_ram_ro; mem_type = HVMMEM_ram_ro;
if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
start_addr >> TARGET_PAGE_BITS, start_addr >> TARGET_PAGE_BITS,
size >> TARGET_PAGE_BITS)) { size >> TARGET_PAGE_BITS)) {
DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
start_addr); start_addr);
}
} }
break; } else {
case IO_MEM_UNASSIGNED:
if (xen_remove_from_physmap(state, start_addr, size) < 0) { if (xen_remove_from_physmap(state, start_addr, size) < 0) {
DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
} }
break;
} }
} }
static void xen_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
xen_set_memory(listener, section, true);
}
static void xen_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
xen_set_memory(listener, section, false);
}
static int xen_sync_dirty_bitmap(XenIOState *state, static int xen_sync_dirty_bitmap(XenIOState *state,
target_phys_addr_t start_addr, target_phys_addr_t start_addr,
ram_addr_t size) ram_addr_t size)
@ -432,43 +446,54 @@ static int xen_sync_dirty_bitmap(XenIOState *state,
return 0; return 0;
} }
static int xen_log_start(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size) static void xen_log_start(MemoryListener *listener,
MemoryRegionSection *section)
{ {
XenIOState *state = container_of(client, XenIOState, client); XenIOState *state = container_of(listener, XenIOState, memory_listener);
int r;
return xen_sync_dirty_bitmap(state, phys_addr, size); r = xen_sync_dirty_bitmap(state, section->offset_within_address_space,
section->size);
assert(r >= 0);
} }
static int xen_log_stop(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size) static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
{ {
XenIOState *state = container_of(client, XenIOState, client); XenIOState *state = container_of(listener, XenIOState, memory_listener);
int r;
state->log_for_dirtybit = NULL; state->log_for_dirtybit = NULL;
/* Disable dirty bit tracking */ /* Disable dirty bit tracking */
return xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); r = xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
assert(r >= 0);
} }
static int xen_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{ {
XenIOState *state = container_of(client, XenIOState, client); XenIOState *state = container_of(listener, XenIOState, memory_listener);
int r;
return xen_sync_dirty_bitmap(state, start_addr, end_addr - start_addr); r = xen_sync_dirty_bitmap(state, section->offset_within_address_space,
section->size);
assert(r >= 0);
} }
static int xen_client_migration_log(struct CPUPhysMemoryClient *client, static void xen_log_global_start(MemoryListener *listener)
int enable)
{ {
return 0;
} }
static CPUPhysMemoryClient xen_cpu_phys_memory_client = { static void xen_log_global_stop(MemoryListener *listener)
.set_memory = xen_client_set_memory, {
.sync_dirty_bitmap = xen_client_sync_dirty_bitmap, }
.migration_log = xen_client_migration_log,
static MemoryListener xen_memory_listener = {
.region_add = xen_region_add,
.region_del = xen_region_del,
.log_start = xen_log_start, .log_start = xen_log_start,
.log_stop = xen_log_stop, .log_stop = xen_log_stop,
.log_sync = xen_log_sync,
.log_global_start = xen_log_global_start,
.log_global_stop = xen_log_global_stop,
}; };
/* VCPU Operations, MMIO, IO ring ... */ /* VCPU Operations, MMIO, IO ring ... */
@ -946,9 +971,9 @@ int xen_hvm_init(void)
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
state->client = xen_cpu_phys_memory_client; state->memory_listener = xen_memory_listener;
QLIST_INIT(&state->physmap); QLIST_INIT(&state->physmap);
cpu_register_phys_memory_client(&state->client); memory_listener_register(&state->memory_listener);
state->log_for_dirtybit = NULL; state->log_for_dirtybit = NULL;
/* Initialize backend core & drivers */ /* Initialize backend core & drivers */
@ -982,3 +1007,8 @@ void destroy_hvm_domain(void)
xc_interface_close(xc_handle); xc_interface_close(xc_handle);
} }
} }
void xen_register_framebuffer(MemoryRegion *mr)
{
framebuffer = mr;
}

View File

@ -44,3 +44,7 @@ int xen_init(void)
{ {
return -ENOSYS; return -ENOSYS;
} }
void xen_register_framebuffer(MemoryRegion *mr)
{
}