util/mmap-alloc: Factor out calculation of the pagesize for the guard page

Let's factor out calculating the size of the guard page and rename the
variable to make it clearer that this pagesize only applies to the
guard page.

Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Cc: Igor Kotrasinski <i.kotrasinsk@partner.samsung.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-2-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
David Hildenbrand 2021-05-10 13:43:14 +02:00 committed by Paolo Bonzini
parent dbb92eea38
commit adad0b3ae8

View File

@ -82,6 +82,16 @@ size_t qemu_mempath_getpagesize(const char *mem_path)
return qemu_real_host_page_size;
}
static inline size_t mmap_guard_pagesize(int fd)
{
#if defined(__powerpc64__) && defined(__linux__)
/* Mappings in the same segment must share the same page size */
return qemu_fd_getpagesize(fd);
#else
return qemu_real_host_page_size;
#endif
}
void *qemu_ram_mmap(int fd,
size_t size,
size_t align,
@ -90,12 +100,12 @@ void *qemu_ram_mmap(int fd,
bool is_pmem,
off_t map_offset)
{
const size_t guard_pagesize = mmap_guard_pagesize(fd);
int prot;
int flags;
int map_sync_flags = 0;
int guardfd;
size_t offset;
size_t pagesize;
size_t total;
void *guardptr;
void *ptr;
@ -116,8 +126,7 @@ void *qemu_ram_mmap(int fd,
* anonymous memory is OK.
*/
flags = MAP_PRIVATE;
pagesize = qemu_fd_getpagesize(fd);
if (fd == -1 || pagesize == qemu_real_host_page_size) {
if (fd == -1 || guard_pagesize == qemu_real_host_page_size) {
guardfd = -1;
flags |= MAP_ANONYMOUS;
} else {
@ -126,7 +135,6 @@ void *qemu_ram_mmap(int fd,
}
#else
guardfd = -1;
pagesize = qemu_real_host_page_size;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif
@ -138,7 +146,7 @@ void *qemu_ram_mmap(int fd,
assert(is_power_of_2(align));
/* Always align to host page size */
assert(align >= pagesize);
assert(align >= guard_pagesize);
flags = MAP_FIXED;
flags |= fd == -1 ? MAP_ANONYMOUS : 0;
@ -193,8 +201,8 @@ void *qemu_ram_mmap(int fd,
* a guard page guarding against potential buffer overflows.
*/
total -= offset;
if (total > size + pagesize) {
munmap(ptr + size + pagesize, total - size - pagesize);
if (total > size + guard_pagesize) {
munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
}
return ptr;
@ -202,15 +210,8 @@ void *qemu_ram_mmap(int fd,
void qemu_ram_munmap(int fd, void *ptr, size_t size)
{
size_t pagesize;
if (ptr) {
/* Unmap both the RAM block and the guard page */
#if defined(__powerpc64__) && defined(__linux__)
pagesize = qemu_fd_getpagesize(fd);
#else
pagesize = qemu_real_host_page_size;
#endif
munmap(ptr, size + pagesize);
munmap(ptr, size + mmap_guard_pagesize(fd));
}
}