2015-09-24 14:41:17 +03:00
|
|
|
/*
|
|
|
|
* Support for RAM backed by mmaped host memory.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Michael S. Tsirkin <mst@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
|
|
|
*/
|
2016-06-22 20:11:19 +03:00
|
|
|
|
2019-04-22 03:48:48 +03:00
|
|
|
#ifdef CONFIG_LINUX
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#else /* !CONFIG_LINUX */
|
|
|
|
#define MAP_SYNC 0x0
|
|
|
|
#define MAP_SHARED_VALIDATE 0x0
|
|
|
|
#endif /* CONFIG_LINUX */
|
|
|
|
|
2016-01-29 20:49:55 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-06-22 20:11:19 +03:00
|
|
|
#include "qemu/mmap-alloc.h"
|
2016-11-02 16:44:46 +03:00
|
|
|
#include "qemu/host-utils.h"
|
util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux
Let's support RAM_NORESERVE via MAP_NORESERVE on Linux. The flag has no
effect on most shared mappings - except for hugetlbfs and anonymous memory.
Linux man page:
"MAP_NORESERVE: Do not reserve swap space for this mapping. When swap
space is reserved, one has the guarantee that it is possible to modify
the mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available. See also the discussion
of the file /proc/sys/vm/overcommit_memory in proc(5). In kernels before
2.6, this flag had effect only for private writable mappings."
Note that the "guarantee" part is wrong with memory overcommit in Linux.
Also, in Linux hugetlbfs is treated differently - we configure reservation
of huge pages from the pool, not reservation of swap space (huge pages
cannot be swapped).
The rough behavior is [1]:
a) !Hugetlbfs:
1) Without MAP_NORESERVE *or* with memory overcommit under Linux
disabled ("/proc/sys/vm/overcommit_memory == 2"), the following
accounting/reservation happens:
For a file backed map
SHARED or READ-only - 0 cost (the file is the map not swap)
PRIVATE WRITABLE - size of mapping per instance
For an anonymous or /dev/zero map
SHARED - size of mapping
PRIVATE READ-only - 0 cost (but of little use)
PRIVATE WRITABLE - size of mapping per instance
2) With MAP_NORESERVE, no accounting/reservation happens.
b) Hugetlbfs:
1) Without MAP_NORESERVE, huge pages are reserved.
2) With MAP_NORESERVE, no huge pages are reserved.
Note: With "/proc/sys/vm/overcommit_memory == 0", we were already able
to configure it for !hugetlbfs globally; this toggle now allows
configuring it more fine-grained, not for the whole system.
The target use case is virtio-mem, which dynamically exposes memory
inside a large, sparse memory area to the VM.
[1] https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-10-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-10 14:43:22 +03:00
|
|
|
#include "qemu/cutils.h"
|
2021-05-10 14:43:21 +03:00
|
|
|
#include "qemu/error-report.h"
|
2015-09-24 14:41:17 +03:00
|
|
|
|
util/mmap-alloc: fix hugetlb support on ppc64
Since commit 8561c9244ddf1122d "exec: allocate PROT_NONE pages on top of
RAM", it is no longer possible to back guest RAM with hugepages on ppc64
hosts:
mmap(NULL, 285212672, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) =
0x3fff57000000
mmap(0x3fff57000000, 268435456, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, 19, 0) = -1 EBUSY (Device or resource busy)
This is because on ppc64, Linux fixes a page size for a virtual address
at mmap time, so we can't switch a range of memory from anonymous
small pages to hugetlbs with MAP_FIXED.
See commit d0f13e3c20b6fb73ccb467bdca97fa7cf5a574cd
("[POWERPC] Introduce address space "slices"") in Linux
history for the details.
Detect this and create the PROT_NONE mapping using the same fd.
Naturally, this makes the guard page bigger with hugetlbfs.
Based on patch by Greg Kurz.
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2015-12-02 22:14:12 +03:00
|
|
|
#define HUGETLBFS_MAGIC 0x958458f6
|
|
|
|
|
|
|
|
#ifdef CONFIG_LINUX
|
|
|
|
#include <sys/vfs.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
size_t qemu_fd_getpagesize(int fd)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_LINUX
|
|
|
|
struct statfs fs;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (fd != -1) {
|
|
|
|
do {
|
|
|
|
ret = fstatfs(fd, &fs);
|
|
|
|
} while (ret != 0 && errno == EINTR);
|
|
|
|
|
|
|
|
if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) {
|
|
|
|
return fs.f_bsize;
|
|
|
|
}
|
|
|
|
}
|
2017-12-08 19:57:28 +03:00
|
|
|
#ifdef __sparc__
|
|
|
|
/* SPARC Linux needs greater alignment than the pagesize */
|
|
|
|
return QEMU_VMALLOC_ALIGN;
|
|
|
|
#endif
|
util/mmap-alloc: fix hugetlb support on ppc64
Since commit 8561c9244ddf1122d "exec: allocate PROT_NONE pages on top of
RAM", it is no longer possible to back guest RAM with hugepages on ppc64
hosts:
mmap(NULL, 285212672, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) =
0x3fff57000000
mmap(0x3fff57000000, 268435456, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, 19, 0) = -1 EBUSY (Device or resource busy)
This is because on ppc64, Linux fixes a page size for a virtual address
at mmap time, so we can't switch a range of memory from anonymous
small pages to hugetlbs with MAP_FIXED.
See commit d0f13e3c20b6fb73ccb467bdca97fa7cf5a574cd
("[POWERPC] Introduce address space "slices"") in Linux
history for the details.
Detect this and create the PROT_NONE mapping using the same fd.
Naturally, this makes the guard page bigger with hugetlbfs.
Based on patch by Greg Kurz.
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2015-12-02 22:14:12 +03:00
|
|
|
#endif
|
|
|
|
|
2022-03-23 18:57:22 +03:00
|
|
|
return qemu_real_host_page_size();
|
util/mmap-alloc: fix hugetlb support on ppc64
Since commit 8561c9244ddf1122d "exec: allocate PROT_NONE pages on top of
RAM", it is no longer possible to back guest RAM with hugepages on ppc64
hosts:
mmap(NULL, 285212672, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) =
0x3fff57000000
mmap(0x3fff57000000, 268435456, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, 19, 0) = -1 EBUSY (Device or resource busy)
This is because on ppc64, Linux fixes a page size for a virtual address
at mmap time, so we can't switch a range of memory from anonymous
small pages to hugetlbs with MAP_FIXED.
See commit d0f13e3c20b6fb73ccb467bdca97fa7cf5a574cd
("[POWERPC] Introduce address space "slices"") in Linux
history for the details.
Detect this and create the PROT_NONE mapping using the same fd.
Naturally, this makes the guard page bigger with hugetlbfs.
Based on patch by Greg Kurz.
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Tested-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2015-12-02 22:14:12 +03:00
|
|
|
}
|
|
|
|
|
util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux
Let's support RAM_NORESERVE via MAP_NORESERVE on Linux. The flag has no
effect on most shared mappings - except for hugetlbfs and anonymous memory.
Linux man page:
"MAP_NORESERVE: Do not reserve swap space for this mapping. When swap
space is reserved, one has the guarantee that it is possible to modify
the mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available. See also the discussion
of the file /proc/sys/vm/overcommit_memory in proc(5). In kernels before
2.6, this flag had effect only for private writable mappings."
Note that the "guarantee" part is wrong with memory overcommit in Linux.
Also, in Linux hugetlbfs is treated differently - we configure reservation
of huge pages from the pool, not reservation of swap space (huge pages
cannot be swapped).
The rough behavior is [1]:
a) !Hugetlbfs:
1) Without MAP_NORESERVE *or* with memory overcommit under Linux
disabled ("/proc/sys/vm/overcommit_memory == 2"), the following
accounting/reservation happens:
For a file backed map
SHARED or READ-only - 0 cost (the file is the map not swap)
PRIVATE WRITABLE - size of mapping per instance
For an anonymous or /dev/zero map
SHARED - size of mapping
PRIVATE READ-only - 0 cost (but of little use)
PRIVATE WRITABLE - size of mapping per instance
2) With MAP_NORESERVE, no accounting/reservation happens.
b) Hugetlbfs:
1) Without MAP_NORESERVE, huge pages are reserved.
2) With MAP_NORESERVE, no huge pages are reserved.
Note: With "/proc/sys/vm/overcommit_memory == 0", we were already able
to configure it for !hugetlbfs globally; this toggle now allows
configuring it more fine-grained, not for the whole system.
The target use case is virtio-mem, which dynamically exposes memory
inside a large, sparse memory area to the VM.
[1] https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-10-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-10 14:43:22 +03:00
|
|
|
#define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory"
|
|
|
|
static bool map_noreserve_effective(int fd, uint32_t qemu_map_flags)
|
|
|
|
{
|
|
|
|
#if defined(__linux__)
|
|
|
|
const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
|
|
|
|
const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
|
|
|
|
gchar *content = NULL;
|
|
|
|
const char *endptr;
|
|
|
|
unsigned int tmp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hugeltb accounting is different than ordinary swap reservation:
|
|
|
|
* a) Hugetlb pages from the pool are reserved for both private and
|
|
|
|
* shared mappings. For shared mappings, all mappers have to specify
|
|
|
|
* MAP_NORESERVE.
|
|
|
|
* b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory.
|
|
|
|
*/
|
2022-03-23 18:57:22 +03:00
|
|
|
if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size()) {
|
util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux
Let's support RAM_NORESERVE via MAP_NORESERVE on Linux. The flag has no
effect on most shared mappings - except for hugetlbfs and anonymous memory.
Linux man page:
"MAP_NORESERVE: Do not reserve swap space for this mapping. When swap
space is reserved, one has the guarantee that it is possible to modify
the mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available. See also the discussion
of the file /proc/sys/vm/overcommit_memory in proc(5). In kernels before
2.6, this flag had effect only for private writable mappings."
Note that the "guarantee" part is wrong with memory overcommit in Linux.
Also, in Linux hugetlbfs is treated differently - we configure reservation
of huge pages from the pool, not reservation of swap space (huge pages
cannot be swapped).
The rough behavior is [1]:
a) !Hugetlbfs:
1) Without MAP_NORESERVE *or* with memory overcommit under Linux
disabled ("/proc/sys/vm/overcommit_memory == 2"), the following
accounting/reservation happens:
For a file backed map
SHARED or READ-only - 0 cost (the file is the map not swap)
PRIVATE WRITABLE - size of mapping per instance
For an anonymous or /dev/zero map
SHARED - size of mapping
PRIVATE READ-only - 0 cost (but of little use)
PRIVATE WRITABLE - size of mapping per instance
2) With MAP_NORESERVE, no accounting/reservation happens.
b) Hugetlbfs:
1) Without MAP_NORESERVE, huge pages are reserved.
2) With MAP_NORESERVE, no huge pages are reserved.
Note: With "/proc/sys/vm/overcommit_memory == 0", we were already able
to configure it for !hugetlbfs globally; this toggle now allows
configuring it more fine-grained, not for the whole system.
The target use case is virtio-mem, which dynamically exposes memory
inside a large, sparse memory area to the VM.
[1] https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-10-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-10 14:43:22 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Accountable mappings in the kernel that can be affected by MAP_NORESEVE
|
|
|
|
* are private writable mappings (see mm/mmap.c:accountable_mapping() in
|
|
|
|
* Linux). For all shared or readonly mappings, MAP_NORESERVE is always
|
|
|
|
* implicitly active -- no reservation; this includes shmem. The only
|
|
|
|
* exception is shared anonymous memory, it is accounted like private
|
|
|
|
* anonymous memory.
|
|
|
|
*/
|
|
|
|
if (readonly || (shared && fd >= 0)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MAP_NORESERVE is globally ignored for applicable !hugetlb mappings when
|
|
|
|
* memory overcommit is set to "never". Sparse memory regions aren't really
|
|
|
|
* possible in this system configuration.
|
|
|
|
*
|
|
|
|
* Bail out now instead of silently committing way more memory than
|
|
|
|
* currently desired by the user.
|
|
|
|
*/
|
|
|
|
if (g_file_get_contents(OVERCOMMIT_MEMORY_PATH, &content, NULL, NULL) &&
|
|
|
|
!qemu_strtoui(content, &endptr, 0, &tmp) &&
|
|
|
|
(!endptr || *endptr == '\n')) {
|
|
|
|
if (tmp == 2) {
|
|
|
|
error_report("Skipping reservation of swap space is not supported:"
|
|
|
|
" \"" OVERCOMMIT_MEMORY_PATH "\" is \"2\"");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* this interface has been around since Linux 2.6 */
|
|
|
|
error_report("Skipping reservation of swap space is not supported:"
|
|
|
|
" Could not read: \"" OVERCOMMIT_MEMORY_PATH "\"");
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* E.g., FreeBSD used to define MAP_NORESERVE, never implemented it,
|
|
|
|
* and removed it a while ago.
|
|
|
|
*/
|
|
|
|
error_report("Skipping reservation of swap space is not supported");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-10 14:43:15 +03:00
|
|
|
/*
|
|
|
|
* Reserve a new memory region of the requested size to be used for mapping
|
|
|
|
* from the given fd (if any).
|
|
|
|
*/
|
|
|
|
static void *mmap_reserve(size_t size, int fd)
|
|
|
|
{
|
|
|
|
int flags = MAP_PRIVATE;
|
|
|
|
|
|
|
|
#if defined(__powerpc64__) && defined(__linux__)
|
|
|
|
/*
|
|
|
|
* On ppc64 mappings in the same segment (aka slice) must share the same
|
|
|
|
* page size. Since we will be re-allocating part of this segment
|
|
|
|
* from the supplied fd, we should make sure to use the same page size, to
|
|
|
|
* this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
|
|
|
|
* avoid allocating backing store memory.
|
|
|
|
* We do this unless we are using the system page size, in which case
|
|
|
|
* anonymous memory is OK.
|
|
|
|
*/
|
2022-03-23 18:57:22 +03:00
|
|
|
if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size()) {
|
2021-05-10 14:43:15 +03:00
|
|
|
fd = -1;
|
|
|
|
flags |= MAP_ANONYMOUS;
|
|
|
|
} else {
|
|
|
|
flags |= MAP_NORESERVE;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
fd = -1;
|
|
|
|
flags |= MAP_ANONYMOUS;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return mmap(0, size, PROT_NONE, flags, fd, 0);
|
|
|
|
}
|
|
|
|
|
2021-05-10 14:43:16 +03:00
|
|
|
/*
|
|
|
|
* Activate memory in a reserved region from the given fd (if any), to make
|
|
|
|
* it accessible.
|
|
|
|
*/
|
2021-05-10 14:43:20 +03:00
|
|
|
static void *mmap_activate(void *ptr, size_t size, int fd,
|
|
|
|
uint32_t qemu_map_flags, off_t map_offset)
|
2021-05-10 14:43:16 +03:00
|
|
|
{
|
2021-05-10 14:43:21 +03:00
|
|
|
const bool noreserve = qemu_map_flags & QEMU_MAP_NORESERVE;
|
2021-05-10 14:43:20 +03:00
|
|
|
const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
|
|
|
|
const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
|
|
|
|
const bool sync = qemu_map_flags & QEMU_MAP_SYNC;
|
2021-05-10 14:43:16 +03:00
|
|
|
const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
|
|
|
|
int map_sync_flags = 0;
|
|
|
|
int flags = MAP_FIXED;
|
|
|
|
void *activated_ptr;
|
|
|
|
|
util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux
Let's support RAM_NORESERVE via MAP_NORESERVE on Linux. The flag has no
effect on most shared mappings - except for hugetlbfs and anonymous memory.
Linux man page:
"MAP_NORESERVE: Do not reserve swap space for this mapping. When swap
space is reserved, one has the guarantee that it is possible to modify
the mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available. See also the discussion
of the file /proc/sys/vm/overcommit_memory in proc(5). In kernels before
2.6, this flag had effect only for private writable mappings."
Note that the "guarantee" part is wrong with memory overcommit in Linux.
Also, in Linux hugetlbfs is treated differently - we configure reservation
of huge pages from the pool, not reservation of swap space (huge pages
cannot be swapped).
The rough behavior is [1]:
a) !Hugetlbfs:
1) Without MAP_NORESERVE *or* with memory overcommit under Linux
disabled ("/proc/sys/vm/overcommit_memory == 2"), the following
accounting/reservation happens:
For a file backed map
SHARED or READ-only - 0 cost (the file is the map not swap)
PRIVATE WRITABLE - size of mapping per instance
For an anonymous or /dev/zero map
SHARED - size of mapping
PRIVATE READ-only - 0 cost (but of little use)
PRIVATE WRITABLE - size of mapping per instance
2) With MAP_NORESERVE, no accounting/reservation happens.
b) Hugetlbfs:
1) Without MAP_NORESERVE, huge pages are reserved.
2) With MAP_NORESERVE, no huge pages are reserved.
Note: With "/proc/sys/vm/overcommit_memory == 0", we were already able
to configure it for !hugetlbfs globally; this toggle now allows
configuring it more fine-grained, not for the whole system.
The target use case is virtio-mem, which dynamically exposes memory
inside a large, sparse memory area to the VM.
[1] https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-10-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-10 14:43:22 +03:00
|
|
|
if (noreserve && !map_noreserve_effective(fd, qemu_map_flags)) {
|
2021-05-10 14:43:21 +03:00
|
|
|
return MAP_FAILED;
|
|
|
|
}
|
|
|
|
|
2021-05-10 14:43:16 +03:00
|
|
|
flags |= fd == -1 ? MAP_ANONYMOUS : 0;
|
|
|
|
flags |= shared ? MAP_SHARED : MAP_PRIVATE;
|
util/mmap-alloc: Support RAM_NORESERVE via MAP_NORESERVE under Linux
Let's support RAM_NORESERVE via MAP_NORESERVE on Linux. The flag has no
effect on most shared mappings - except for hugetlbfs and anonymous memory.
Linux man page:
"MAP_NORESERVE: Do not reserve swap space for this mapping. When swap
space is reserved, one has the guarantee that it is possible to modify
the mapping. When swap space is not reserved one might get SIGSEGV
upon a write if no physical memory is available. See also the discussion
of the file /proc/sys/vm/overcommit_memory in proc(5). In kernels before
2.6, this flag had effect only for private writable mappings."
Note that the "guarantee" part is wrong with memory overcommit in Linux.
Also, in Linux hugetlbfs is treated differently - we configure reservation
of huge pages from the pool, not reservation of swap space (huge pages
cannot be swapped).
The rough behavior is [1]:
a) !Hugetlbfs:
1) Without MAP_NORESERVE *or* with memory overcommit under Linux
disabled ("/proc/sys/vm/overcommit_memory == 2"), the following
accounting/reservation happens:
For a file backed map
SHARED or READ-only - 0 cost (the file is the map not swap)
PRIVATE WRITABLE - size of mapping per instance
For an anonymous or /dev/zero map
SHARED - size of mapping
PRIVATE READ-only - 0 cost (but of little use)
PRIVATE WRITABLE - size of mapping per instance
2) With MAP_NORESERVE, no accounting/reservation happens.
b) Hugetlbfs:
1) Without MAP_NORESERVE, huge pages are reserved.
2) With MAP_NORESERVE, no huge pages are reserved.
Note: With "/proc/sys/vm/overcommit_memory == 0", we were already able
to configure it for !hugetlbfs globally; this toggle now allows
configuring it more fine-grained, not for the whole system.
The target use case is virtio-mem, which dynamically exposes memory
inside a large, sparse memory area to the VM.
[1] https://www.kernel.org/doc/Documentation/vm/overcommit-accounting
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-10-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-05-10 14:43:22 +03:00
|
|
|
flags |= noreserve ? MAP_NORESERVE : 0;
|
2021-05-10 14:43:20 +03:00
|
|
|
if (shared && sync) {
|
2021-05-10 14:43:16 +03:00
|
|
|
map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd,
|
|
|
|
map_offset);
|
|
|
|
if (activated_ptr == MAP_FAILED && map_sync_flags) {
|
|
|
|
if (errno == ENOTSUP) {
|
|
|
|
char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
|
|
|
|
char *file_name = g_malloc0(PATH_MAX);
|
|
|
|
int len = readlink(proc_link, file_name, PATH_MAX - 1);
|
|
|
|
|
|
|
|
if (len < 0) {
|
|
|
|
len = 0;
|
|
|
|
}
|
|
|
|
file_name[len] = '\0';
|
|
|
|
fprintf(stderr, "Warning: requesting persistence across crashes "
|
|
|
|
"for backend file %s failed. Proceeding without "
|
|
|
|
"persistence, data might become corrupted in case of host "
|
|
|
|
"crash.\n", file_name);
|
|
|
|
g_free(proc_link);
|
|
|
|
g_free(file_name);
|
2021-01-11 23:33:32 +03:00
|
|
|
warn_report("Using non DAX backing file with 'pmem=on' option"
|
|
|
|
" is deprecated");
|
2021-05-10 14:43:16 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
|
|
|
|
* again without these flags to handle backwards compatibility.
|
|
|
|
*/
|
|
|
|
activated_ptr = mmap(ptr, size, prot, flags, fd, map_offset);
|
|
|
|
}
|
|
|
|
return activated_ptr;
|
|
|
|
}
|
|
|
|
|
2021-05-10 14:43:14 +03:00
|
|
|
static inline size_t mmap_guard_pagesize(int fd)
|
|
|
|
{
|
|
|
|
#if defined(__powerpc64__) && defined(__linux__)
|
|
|
|
/* Mappings in the same segment must share the same page size */
|
|
|
|
return qemu_fd_getpagesize(fd);
|
|
|
|
#else
|
2022-03-23 18:57:22 +03:00
|
|
|
return qemu_real_host_page_size();
|
2021-05-10 14:43:14 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-08 13:10:37 +03:00
|
|
|
void *qemu_ram_mmap(int fd,
|
|
|
|
size_t size,
|
|
|
|
size_t align,
|
2021-05-10 14:43:20 +03:00
|
|
|
uint32_t qemu_map_flags,
|
2021-01-29 19:46:04 +03:00
|
|
|
off_t map_offset)
|
2015-09-24 14:41:17 +03:00
|
|
|
{
|
2021-05-10 14:43:14 +03:00
|
|
|
const size_t guard_pagesize = mmap_guard_pagesize(fd);
|
2021-05-10 14:43:16 +03:00
|
|
|
size_t offset, total;
|
|
|
|
void *ptr, *guardptr;
|
2019-01-31 02:36:04 +03:00
|
|
|
|
2015-09-24 14:41:17 +03:00
|
|
|
/*
|
|
|
|
* Note: this always allocates at least one extra page of virtual address
|
|
|
|
* space, even if size is already aligned.
|
|
|
|
*/
|
2019-01-31 02:36:04 +03:00
|
|
|
total = size + align;
|
|
|
|
|
2021-05-10 14:43:15 +03:00
|
|
|
guardptr = mmap_reserve(total, fd);
|
2019-01-31 02:36:04 +03:00
|
|
|
if (guardptr == MAP_FAILED) {
|
2015-10-25 18:07:45 +03:00
|
|
|
return MAP_FAILED;
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
|
2016-11-02 16:44:46 +03:00
|
|
|
assert(is_power_of_2(align));
|
2015-09-24 14:41:17 +03:00
|
|
|
/* Always align to host page size */
|
2021-05-10 14:43:14 +03:00
|
|
|
assert(align >= guard_pagesize);
|
2015-09-24 14:41:17 +03:00
|
|
|
|
2019-01-31 02:36:04 +03:00
|
|
|
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
|
|
|
|
|
2021-05-10 14:43:20 +03:00
|
|
|
ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
|
2021-05-10 14:43:16 +03:00
|
|
|
map_offset);
|
2019-01-31 02:36:04 +03:00
|
|
|
if (ptr == MAP_FAILED) {
|
|
|
|
munmap(guardptr, total);
|
2015-10-25 18:07:45 +03:00
|
|
|
return MAP_FAILED;
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (offset > 0) {
|
2019-01-31 02:36:04 +03:00
|
|
|
munmap(guardptr, offset);
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave a single PROT_NONE page allocated after the RAM block, to serve as
|
|
|
|
* a guard page guarding against potential buffer overflows.
|
|
|
|
*/
|
2016-11-02 16:44:47 +03:00
|
|
|
total -= offset;
|
2021-05-10 14:43:14 +03:00
|
|
|
if (total > size + guard_pagesize) {
|
|
|
|
munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
|
2019-01-31 02:36:04 +03:00
|
|
|
return ptr;
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
|
2019-01-31 02:36:05 +03:00
|
|
|
void qemu_ram_munmap(int fd, void *ptr, size_t size)
|
2015-09-24 14:41:17 +03:00
|
|
|
{
|
|
|
|
if (ptr) {
|
|
|
|
/* Unmap both the RAM block and the guard page */
|
2021-05-10 14:43:14 +03:00
|
|
|
munmap(ptr, size + mmap_guard_pagesize(fd));
|
2015-09-24 14:41:17 +03:00
|
|
|
}
|
|
|
|
}
|