libvhost-user: Use most of mmap_offset as fd_offset

In the past, QEMU would create memory regions that could partially cover
hugetlb pages, making mmap() fail if we would use the mmap_offset as an
fd_offset. For that reason, we never used the mmap_offset as an offset into
the fd and instead always mapped the fd from the very start.

However, that can easily result in us mmap'ing a lot of unnecessary
parts of an fd, possibly repeatedly.

QEMU nowadays does not create memory regions that partially cover huge
pages -- it never really worked with postcopy. QEMU handles merging of
regions that partially cover huge pages (due to holes in boot memory) since
2018 in c1ece84e7c ("vhost: Huge page align and merge").

Let's be a bit careful and not unconditionally convert the
mmap_offset into an fd_offset. Instead, let's simply detect the hugetlb
size and pass as much as we can as fd_offset, making sure that we call
mmap() with a properly aligned offset.

With QEMU and a virtio-mem device that is fully plugged (50GiB using 50
memslots) the qemu-storage daemon process consumes in the VA space
1281GiB before this change and 58GiB after this change.

================ Vhost user message ================
Request: VHOST_USER_ADD_MEM_REG (37)
Flags:   0x9
Size:    40
Fds: 59
Adding region 4
    guest_phys_addr: 0x0000000200000000
    memory_size:     0x0000000040000000
    userspace_addr:  0x00007fb73bffe000
    old mmap_offset: 0x0000000080000000
    fd_offset:       0x0000000080000000
    new mmap_offset: 0x0000000000000000
    mmap_addr:       0x00007f02f1bdc000
Successfully added new region
================ Vhost user message ================
Request: VHOST_USER_ADD_MEM_REG (37)
Flags:   0x9
Size:    40
Fds: 59
Adding region 5
    guest_phys_addr: 0x0000000240000000
    memory_size:     0x0000000040000000
    userspace_addr:  0x00007fb77bffe000
    old mmap_offset: 0x00000000c0000000
    fd_offset:       0x00000000c0000000
    new mmap_offset: 0x0000000000000000
    mmap_addr:       0x00007f0284000000
Successfully added new region

Reviewed-by: Raphael Norwitz <raphael@enfabrica.net>
Acked-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20240214151701.29906-12-david@redhat.com>
Tested-by: Mario Casquero <mcasquer@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
David Hildenbrand 2024-02-14 16:16:58 +01:00 committed by Michael S. Tsirkin
parent a3c0118c5a
commit b2b63008b3

View File

@ -43,6 +43,8 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/vhost.h>
#include <sys/vfs.h>
#include <linux/magic.h>
#ifdef __NR_userfaultfd
#include <linux/userfaultfd.h>
@ -281,12 +283,32 @@ vu_remove_all_mem_regs(VuDev *dev)
dev->nregions = 0;
}
static size_t
get_fd_hugepagesize(int fd)
{
#if defined(__linux__)
struct statfs fs;
int ret;
do {
ret = fstatfs(fd, &fs);
} while (ret != 0 && errno == EINTR);
if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) {
return fs.f_bsize;
}
#endif
return 0;
}
static void
_vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
{
const uint64_t start_gpa = msg_region->guest_phys_addr;
const uint64_t end_gpa = start_gpa + msg_region->memory_size;
int prot = PROT_READ | PROT_WRITE;
uint64_t mmap_offset, fd_offset;
size_t hugepagesize;
VuDevRegion *r;
void *mmap_addr;
int low = 0;
@ -300,7 +322,7 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
msg_region->memory_size);
DPRINT(" userspace_addr: 0x%016"PRIx64"\n",
msg_region->userspace_addr);
DPRINT(" mmap_offset: 0x%016"PRIx64"\n",
DPRINT(" old mmap_offset: 0x%016"PRIx64"\n",
msg_region->mmap_offset);
if (dev->postcopy_listening) {
@ -335,11 +357,31 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
idx = low;
/*
* We don't use offset argument of mmap() since the mapped address has
* to be page aligned, and we use huge pages.
* Convert most of msg_region->mmap_offset to fd_offset. In almost all
* cases, this will leave us with mmap_offset == 0, mmap()'ing only
* what we really need. Only if a memory region would partially cover
* hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
* anymore (i.e., modern QEMU).
*
* Note that mmap() with hugetlb would fail if the offset into the file
* is not aligned to the huge page size.
*/
mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset,
prot, MAP_SHARED | MAP_NORESERVE, fd, 0);
hugepagesize = get_fd_hugepagesize(fd);
if (hugepagesize) {
fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize);
mmap_offset = msg_region->mmap_offset - fd_offset;
} else {
fd_offset = msg_region->mmap_offset;
mmap_offset = 0;
}
DPRINT(" fd_offset: 0x%016"PRIx64"\n",
fd_offset);
DPRINT(" new mmap_offset: 0x%016"PRIx64"\n",
mmap_offset);
mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
if (mmap_addr == MAP_FAILED) {
vu_panic(dev, "region mmap error: %s", strerror(errno));
return;
@ -354,7 +396,7 @@ _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
r->size = msg_region->memory_size;
r->qva = msg_region->userspace_addr;
r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
r->mmap_offset = msg_region->mmap_offset;
r->mmap_offset = mmap_offset;
dev->nregions++;
if (dev->postcopy_listening) {