Add target memory mapping API (Avi Kivity)
Devices accessing large amounts of memory (as with DMA) will wish to obtain a pointer to guest memory rather than access it indirectly via cpu_physical_memory_rw(). Add a new API to convert target addresses to host pointers. In case the target address does not correspond to RAM, a bounce buffer is allocated. To prevent the guest from causing the host to allocate unbounded amounts of bounce buffer, this memory is limited (currently to one page). Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6394 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
d268de04f2
commit
6d16c2f88f
@ -923,6 +923,12 @@ static inline void cpu_physical_memory_write(target_phys_addr_t addr,
|
|||||||
{
|
{
|
||||||
cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
|
cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
|
||||||
}
|
}
|
||||||
|
void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||||
|
target_phys_addr_t *plen,
|
||||||
|
int is_write);
|
||||||
|
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
||||||
|
int is_write, target_phys_addr_t access_len);
|
||||||
|
|
||||||
uint32_t ldub_phys(target_phys_addr_t addr);
|
uint32_t ldub_phys(target_phys_addr_t addr);
|
||||||
uint32_t lduw_phys(target_phys_addr_t addr);
|
uint32_t lduw_phys(target_phys_addr_t addr);
|
||||||
uint32_t ldl_phys(target_phys_addr_t addr);
|
uint32_t ldl_phys(target_phys_addr_t addr);
|
||||||
|
102
exec.c
102
exec.c
@ -3045,6 +3045,108 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
void *buffer;
|
||||||
|
target_phys_addr_t addr;
|
||||||
|
target_phys_addr_t len;
|
||||||
|
} BounceBuffer;
|
||||||
|
|
||||||
|
static BounceBuffer bounce;
|
||||||
|
|
||||||
|
/* Map a physical memory region into a host virtual address.
|
||||||
|
* May map a subset of the requested range, given by and returned in *plen.
|
||||||
|
* May return NULL if resources needed to perform the mapping are exhausted.
|
||||||
|
* Use only for reads OR writes - not for read-modify-write operations.
|
||||||
|
*/
|
||||||
|
void *cpu_physical_memory_map(target_phys_addr_t addr,
|
||||||
|
target_phys_addr_t *plen,
|
||||||
|
int is_write)
|
||||||
|
{
|
||||||
|
target_phys_addr_t len = *plen;
|
||||||
|
target_phys_addr_t done = 0;
|
||||||
|
int l;
|
||||||
|
uint8_t *ret = NULL;
|
||||||
|
uint8_t *ptr;
|
||||||
|
target_phys_addr_t page;
|
||||||
|
unsigned long pd;
|
||||||
|
PhysPageDesc *p;
|
||||||
|
unsigned long addr1;
|
||||||
|
|
||||||
|
while (len > 0) {
|
||||||
|
page = addr & TARGET_PAGE_MASK;
|
||||||
|
l = (page + TARGET_PAGE_SIZE) - addr;
|
||||||
|
if (l > len)
|
||||||
|
l = len;
|
||||||
|
p = phys_page_find(page >> TARGET_PAGE_BITS);
|
||||||
|
if (!p) {
|
||||||
|
pd = IO_MEM_UNASSIGNED;
|
||||||
|
} else {
|
||||||
|
pd = p->phys_offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
|
||||||
|
if (done || bounce.buffer) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
|
||||||
|
bounce.addr = addr;
|
||||||
|
bounce.len = l;
|
||||||
|
if (!is_write) {
|
||||||
|
cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
|
||||||
|
}
|
||||||
|
ptr = bounce.buffer;
|
||||||
|
} else {
|
||||||
|
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
|
||||||
|
ptr = phys_ram_base + addr1;
|
||||||
|
}
|
||||||
|
if (!done) {
|
||||||
|
ret = ptr;
|
||||||
|
} else if (ret + done != ptr) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
len -= l;
|
||||||
|
addr += l;
|
||||||
|
done += l;
|
||||||
|
}
|
||||||
|
*plen = done;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
|
||||||
|
* Will also mark the memory as dirty if is_write == 1. access_len gives
|
||||||
|
* the amount of memory that was actually read or written by the caller.
|
||||||
|
*/
|
||||||
|
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
||||||
|
int is_write, target_phys_addr_t access_len)
|
||||||
|
{
|
||||||
|
if (buffer != bounce.buffer) {
|
||||||
|
if (is_write) {
|
||||||
|
unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
|
||||||
|
while (access_len) {
|
||||||
|
unsigned l;
|
||||||
|
l = TARGET_PAGE_SIZE;
|
||||||
|
if (l > access_len)
|
||||||
|
l = access_len;
|
||||||
|
if (!cpu_physical_memory_is_dirty(addr1)) {
|
||||||
|
/* invalidate code */
|
||||||
|
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
|
||||||
|
/* set dirty bit */
|
||||||
|
phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
|
||||||
|
(0xff & ~CODE_DIRTY_FLAG);
|
||||||
|
}
|
||||||
|
addr1 += l;
|
||||||
|
access_len -= l;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (is_write) {
|
||||||
|
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
|
||||||
|
}
|
||||||
|
qemu_free(bounce.buffer);
|
||||||
|
bounce.buffer = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* warning: addr must be aligned */
|
/* warning: addr must be aligned */
|
||||||
uint32_t ldl_phys(target_phys_addr_t addr)
|
uint32_t ldl_phys(target_phys_addr_t addr)
|
||||||
|
Loading…
Reference in New Issue
Block a user