exec: allocate PROT_NONE pages on top of RAM

This inserts a read and write protected page between RAM and QEMU
memory, for file-backend RAM.
This makes it harder to exploit QEMU bugs resulting from buffer
overflows in devices using variants of cpu_physical_memory_map,
dma_memory_map etc.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2015-09-10 16:41:17 +03:00
parent 9fac18f03a
commit 8561c9244d

42
exec.c
View File

@ -84,6 +84,9 @@ static MemoryRegion io_mem_unassigned;
*/ */
#define RAM_RESIZEABLE (1 << 2) #define RAM_RESIZEABLE (1 << 2)
/* An extra page is mapped on top of this RAM.
*/
#define RAM_EXTRA (1 << 3)
#endif #endif
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
@ -1185,10 +1188,13 @@ static void *file_ram_alloc(RAMBlock *block,
char *filename; char *filename;
char *sanitized_name; char *sanitized_name;
char *c; char *c;
void *ptr;
void *area = NULL; void *area = NULL;
int fd; int fd;
uint64_t hpagesize; uint64_t hpagesize;
uint64_t total;
Error *local_err = NULL; Error *local_err = NULL;
size_t offset;
hpagesize = gethugepagesize(path, &local_err); hpagesize = gethugepagesize(path, &local_err);
if (local_err) { if (local_err) {
@ -1232,6 +1238,7 @@ static void *file_ram_alloc(RAMBlock *block,
g_free(filename); g_free(filename);
memory = ROUND_UP(memory, hpagesize); memory = ROUND_UP(memory, hpagesize);
total = memory + hpagesize;
/* /*
* ftruncate is not supported by hugetlbfs in older * ftruncate is not supported by hugetlbfs in older
@ -1243,16 +1250,40 @@ static void *file_ram_alloc(RAMBlock *block,
perror("ftruncate"); perror("ftruncate");
} }
area = mmap(0, memory, PROT_READ | PROT_WRITE, ptr = mmap(0, total, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
(block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE), -1, 0);
if (ptr == MAP_FAILED) {
error_setg_errno(errp, errno,
"unable to allocate memory range for hugepages");
close(fd);
goto error;
}
offset = QEMU_ALIGN_UP((uintptr_t)ptr, hpagesize) - (uintptr_t)ptr;
area = mmap(ptr + offset, memory, PROT_READ | PROT_WRITE,
(block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE) |
MAP_FIXED,
fd, 0); fd, 0);
if (area == MAP_FAILED) { if (area == MAP_FAILED) {
error_setg_errno(errp, errno, error_setg_errno(errp, errno,
"unable to map backing store for hugepages"); "unable to map backing store for hugepages");
munmap(ptr, total);
close(fd); close(fd);
goto error; goto error;
} }
if (offset > 0) {
munmap(ptr, offset);
}
ptr += offset;
total -= offset;
if (total > memory + getpagesize()) {
munmap(ptr + memory + getpagesize(),
total - memory - getpagesize());
}
if (mem_prealloc) { if (mem_prealloc) {
os_mem_prealloc(fd, area, memory); os_mem_prealloc(fd, area, memory);
} }
@ -1570,6 +1601,7 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
new_block->used_length = size; new_block->used_length = size;
new_block->max_length = size; new_block->max_length = size;
new_block->flags = share ? RAM_SHARED : 0; new_block->flags = share ? RAM_SHARED : 0;
new_block->flags |= RAM_EXTRA;
new_block->host = file_ram_alloc(new_block, size, new_block->host = file_ram_alloc(new_block, size,
mem_path, errp); mem_path, errp);
if (!new_block->host) { if (!new_block->host) {
@ -1671,7 +1703,11 @@ static void reclaim_ramblock(RAMBlock *block)
xen_invalidate_map_cache_entry(block->host); xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32 #ifndef _WIN32
} else if (block->fd >= 0) { } else if (block->fd >= 0) {
munmap(block->host, block->max_length); if (block->flags & RAM_EXTRA) {
munmap(block->host, block->max_length + getpagesize());
} else {
munmap(block->host, block->max_length);
}
close(block->fd); close(block->fd);
#endif #endif
} else { } else {