util/mmap-alloc: Factor out reserving of a memory region to mmap_reserve()

We want to reserve a memory region without actually populating memory.
Let's factor that out.

Reviewed-by: Igor Kotrasinski <i.kotrasinsk@partner.samsung.com>
Acked-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-3-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
David Hildenbrand 2021-05-10 13:43:15 +02:00 committed by Paolo Bonzini
parent adad0b3ae8
commit 01c26ad6af

View File

@ -82,6 +82,38 @@ size_t qemu_mempath_getpagesize(const char *mem_path)
return qemu_real_host_page_size; return qemu_real_host_page_size;
} }
/*
* Reserve a new memory region of the requested size to be used for mapping
* from the given fd (if any).
*/
static void *mmap_reserve(size_t size, int fd)
{
int flags = MAP_PRIVATE;
#if defined(__powerpc64__) && defined(__linux__)
/*
* On ppc64 mappings in the same segment (aka slice) must share the same
* page size. Since we will be re-allocating part of this segment
* from the supplied fd, we should make sure to use the same page size, to
* this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
* avoid allocating backing store memory.
* We do this unless we are using the system page size, in which case
* anonymous memory is OK.
*/
if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) {
fd = -1;
flags |= MAP_ANONYMOUS;
} else {
flags |= MAP_NORESERVE;
}
#else
fd = -1;
flags |= MAP_ANONYMOUS;
#endif
return mmap(0, size, PROT_NONE, flags, fd, 0);
}
static inline size_t mmap_guard_pagesize(int fd) static inline size_t mmap_guard_pagesize(int fd)
{ {
#if defined(__powerpc64__) && defined(__linux__) #if defined(__powerpc64__) && defined(__linux__)
@ -104,7 +136,6 @@ void *qemu_ram_mmap(int fd,
int prot; int prot;
int flags; int flags;
int map_sync_flags = 0; int map_sync_flags = 0;
int guardfd;
size_t offset; size_t offset;
size_t total; size_t total;
void *guardptr; void *guardptr;
@ -116,30 +147,7 @@ void *qemu_ram_mmap(int fd,
*/ */
total = size + align; total = size + align;
#if defined(__powerpc64__) && defined(__linux__) guardptr = mmap_reserve(total, fd);
/* On ppc64 mappings in the same segment (aka slice) must share the same
* page size. Since we will be re-allocating part of this segment
* from the supplied fd, we should make sure to use the same page size, to
* this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
* avoid allocating backing store memory.
* We do this unless we are using the system page size, in which case
* anonymous memory is OK.
*/
flags = MAP_PRIVATE;
if (fd == -1 || guard_pagesize == qemu_real_host_page_size) {
guardfd = -1;
flags |= MAP_ANONYMOUS;
} else {
guardfd = fd;
flags |= MAP_NORESERVE;
}
#else
guardfd = -1;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif
guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0);
if (guardptr == MAP_FAILED) { if (guardptr == MAP_FAILED) {
return MAP_FAILED; return MAP_FAILED;
} }