2015-06-18 19:28:45 +03:00
|
|
|
/*
|
|
|
|
* Internal definitions for a target's KVM support
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef QEMU_KVM_INT_H
|
|
|
|
#define QEMU_KVM_INT_H
|
|
|
|
|
2019-08-12 08:23:31 +03:00
|
|
|
#include "exec/memory.h"
|
2022-09-29 10:20:13 +03:00
|
|
|
#include "qapi/qapi-types-common.h"
|
2021-02-04 19:39:24 +03:00
|
|
|
#include "qemu/accel.h"
|
kvm: Atomic memslot updates
If we update an existing memslot (e.g., resize, split), we temporarily
remove the memslot to re-add it immediately afterwards. These updates
are not atomic, especially not for KVM VCPU threads, such that we can
get spurious faults.
Let's inhibit most KVM ioctls while performing relevant updates, such
that we can perform the update just as if it would happen atomically
without additional kernel support.
We capture the add/del changes and apply them in the notifier commit
stage instead. There, we can check for overlaps and perform the ioctl
inhibiting only if really required (-> overlap).
To keep things simple we don't perform additional checks that wouldn't
actually result in an overlap -- such as !RAM memory regions in some
cases (see kvm_set_phys_mem()).
To minimize cache-line bouncing, use a separate indicator
(in_ioctl_lock) per CPU. Also, make sure to hold the kvm_slots_lock
while performing both actions (removing+re-adding).
We have to wait until all IOCTLs were exited and block new ones from
getting executed.
This approach cannot result in a deadlock as long as the inhibitor does
not hold any locks that might hinder an IOCTL from getting finished and
exited - something fairly unusual. The inhibitor will always hold the BQL.
AFAIKs, one possible candidate would be userfaultfd. If a page cannot be
placed (e.g., during postcopy), because we're waiting for a lock, or if the
userfaultfd thread cannot process a fault, because it is waiting for a
lock, there could be a deadlock. However, the BQL is not applicable here,
because any other guest memory access while holding the BQL would already
result in a deadlock.
Nothing else in the kernel should block forever and wait for userspace
intervention.
Note: pause_all_vcpus()/resume_all_vcpus() or
start_exclusive()/end_exclusive() cannot be used, as they either drop
the BQL or require to be called without the BQL - something inhibitors
cannot handle. We need a low-level locking mechanism that is
deadlock-free even when not releasing the BQL.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Tested-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-Id: <20221111154758.1372674-4-eesposit@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-11 18:47:58 +03:00
|
|
|
#include "qemu/queue.h"
|
2015-06-18 19:28:45 +03:00
|
|
|
#include "sysemu/kvm.h"
|
|
|
|
|
|
|
|
typedef struct KVMSlot
|
|
|
|
{
|
|
|
|
hwaddr start_addr;
|
|
|
|
ram_addr_t memory_size;
|
|
|
|
void *ram;
|
|
|
|
int slot;
|
|
|
|
int flags;
|
2018-05-16 12:18:34 +03:00
|
|
|
int old_flags;
|
2019-06-03 09:50:53 +03:00
|
|
|
/* Dirty bitmap cache for the slot */
|
|
|
|
unsigned long *dirty_bmap;
|
2021-05-06 19:05:46 +03:00
|
|
|
unsigned long dirty_bmap_size;
|
2021-05-06 19:05:43 +03:00
|
|
|
/* Cache of the address space ID */
|
|
|
|
int as_id;
|
2021-05-06 19:05:44 +03:00
|
|
|
/* Cache of the offset in ram address space */
|
|
|
|
ram_addr_t ram_start_offset;
|
2015-06-18 19:28:45 +03:00
|
|
|
} KVMSlot;
|
|
|
|
|
kvm: Atomic memslot updates
If we update an existing memslot (e.g., resize, split), we temporarily
remove the memslot to re-add it immediately afterwards. These updates
are not atomic, especially not for KVM VCPU threads, such that we can
get spurious faults.
Let's inhibit most KVM ioctls while performing relevant updates, such
that we can perform the update just as if it would happen atomically
without additional kernel support.
We capture the add/del changes and apply them in the notifier commit
stage instead. There, we can check for overlaps and perform the ioctl
inhibiting only if really required (-> overlap).
To keep things simple we don't perform additional checks that wouldn't
actually result in an overlap -- such as !RAM memory regions in some
cases (see kvm_set_phys_mem()).
To minimize cache-line bouncing, use a separate indicator
(in_ioctl_lock) per CPU. Also, make sure to hold the kvm_slots_lock
while performing both actions (removing+re-adding).
We have to wait until all IOCTLs were exited and block new ones from
getting executed.
This approach cannot result in a deadlock as long as the inhibitor does
not hold any locks that might hinder an IOCTL from getting finished and
exited - something fairly unusual. The inhibitor will always hold the BQL.
AFAIKs, one possible candidate would be userfaultfd. If a page cannot be
placed (e.g., during postcopy), because we're waiting for a lock, or if the
userfaultfd thread cannot process a fault, because it is waiting for a
lock, there could be a deadlock. However, the BQL is not applicable here,
because any other guest memory access while holding the BQL would already
result in a deadlock.
Nothing else in the kernel should block forever and wait for userspace
intervention.
Note: pause_all_vcpus()/resume_all_vcpus() or
start_exclusive()/end_exclusive() cannot be used, as they either drop
the BQL or require to be called without the BQL - something inhibitors
cannot handle. We need a low-level locking mechanism that is
deadlock-free even when not releasing the BQL.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Tested-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-Id: <20221111154758.1372674-4-eesposit@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-11 18:47:58 +03:00
|
|
|
typedef struct KVMMemoryUpdate {
|
|
|
|
QSIMPLEQ_ENTRY(KVMMemoryUpdate) next;
|
|
|
|
MemoryRegionSection section;
|
|
|
|
} KVMMemoryUpdate;
|
|
|
|
|
2015-06-18 19:30:13 +03:00
|
|
|
typedef struct KVMMemoryListener {
|
|
|
|
MemoryListener listener;
|
|
|
|
KVMSlot *slots;
|
2023-09-26 21:57:24 +03:00
|
|
|
unsigned int nr_used_slots;
|
2015-06-18 19:30:14 +03:00
|
|
|
int as_id;
|
kvm: Atomic memslot updates
If we update an existing memslot (e.g., resize, split), we temporarily
remove the memslot to re-add it immediately afterwards. These updates
are not atomic, especially not for KVM VCPU threads, such that we can
get spurious faults.
Let's inhibit most KVM ioctls while performing relevant updates, such
that we can perform the update just as if it would happen atomically
without additional kernel support.
We capture the add/del changes and apply them in the notifier commit
stage instead. There, we can check for overlaps and perform the ioctl
inhibiting only if really required (-> overlap).
To keep things simple we don't perform additional checks that wouldn't
actually result in an overlap -- such as !RAM memory regions in some
cases (see kvm_set_phys_mem()).
To minimize cache-line bouncing, use a separate indicator
(in_ioctl_lock) per CPU. Also, make sure to hold the kvm_slots_lock
while performing both actions (removing+re-adding).
We have to wait until all IOCTLs were exited and block new ones from
getting executed.
This approach cannot result in a deadlock as long as the inhibitor does
not hold any locks that might hinder an IOCTL from getting finished and
exited - something fairly unusual. The inhibitor will always hold the BQL.
AFAIKs, one possible candidate would be userfaultfd. If a page cannot be
placed (e.g., during postcopy), because we're waiting for a lock, or if the
userfaultfd thread cannot process a fault, because it is waiting for a
lock, there could be a deadlock. However, the BQL is not applicable here,
because any other guest memory access while holding the BQL would already
result in a deadlock.
Nothing else in the kernel should block forever and wait for userspace
intervention.
Note: pause_all_vcpus()/resume_all_vcpus() or
start_exclusive()/end_exclusive() cannot be used, as they either drop
the BQL or require to be called without the BQL - something inhibitors
cannot handle. We need a low-level locking mechanism that is
deadlock-free even when not releasing the BQL.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Tested-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-Id: <20221111154758.1372674-4-eesposit@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-11-11 18:47:58 +03:00
|
|
|
QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_add;
|
|
|
|
QSIMPLEQ_HEAD(, KVMMemoryUpdate) transaction_del;
|
2015-06-18 19:30:13 +03:00
|
|
|
} KVMMemoryListener;
|
|
|
|
|
2022-09-29 10:20:13 +03:00
|
|
|
#define KVM_MSI_HASHTAB_SIZE 256
|
|
|
|
|
|
|
|
enum KVMDirtyRingReaperState {
|
|
|
|
KVM_DIRTY_RING_REAPER_NONE = 0,
|
|
|
|
/* The reaper is sleeping */
|
|
|
|
KVM_DIRTY_RING_REAPER_WAIT,
|
|
|
|
/* The reaper is reaping for dirty pages */
|
|
|
|
KVM_DIRTY_RING_REAPER_REAPING,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KVM reaper instance, responsible for collecting the KVM dirty bits
|
|
|
|
* via the dirty ring.
|
|
|
|
*/
|
|
|
|
struct KVMDirtyRingReaper {
|
|
|
|
/* The reaper thread */
|
|
|
|
QemuThread reaper_thr;
|
|
|
|
volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
|
|
|
|
volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
|
|
|
|
};
|
|
|
|
struct KVMState
|
|
|
|
{
|
|
|
|
AccelState parent_obj;
|
|
|
|
|
|
|
|
int nr_slots;
|
|
|
|
int fd;
|
|
|
|
int vmfd;
|
|
|
|
int coalesced_mmio;
|
|
|
|
int coalesced_pio;
|
|
|
|
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
|
|
|
|
bool coalesced_flush_in_progress;
|
|
|
|
int vcpu_events;
|
|
|
|
int robust_singlestep;
|
|
|
|
int debugregs;
|
|
|
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
|
|
|
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
|
|
|
|
#endif
|
|
|
|
int max_nested_state_len;
|
|
|
|
int many_ioeventfds;
|
|
|
|
int intx_set_mask;
|
|
|
|
int kvm_shadow_mem;
|
|
|
|
bool kernel_irqchip_allowed;
|
|
|
|
bool kernel_irqchip_required;
|
|
|
|
OnOffAuto kernel_irqchip_split;
|
|
|
|
bool sync_mmu;
|
|
|
|
uint64_t manual_dirty_log_protect;
|
|
|
|
/* The man page (and posix) say ioctl numbers are signed int, but
|
|
|
|
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
|
|
|
|
* unsigned, and treating them as signed here can break things */
|
|
|
|
unsigned irq_set_ioctl;
|
|
|
|
unsigned int sigmask_len;
|
|
|
|
GHashTable *gsimap;
|
|
|
|
#ifdef KVM_CAP_IRQ_ROUTING
|
|
|
|
struct kvm_irq_routing *irq_routes;
|
|
|
|
int nr_allocated_irq_routes;
|
|
|
|
unsigned long *used_gsi_bitmap;
|
|
|
|
unsigned int gsi_count;
|
|
|
|
QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
|
|
|
|
#endif
|
|
|
|
KVMMemoryListener memory_listener;
|
|
|
|
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
|
|
|
|
|
|
|
|
/* For "info mtree -f" to tell if an MR is registered in KVM */
|
|
|
|
int nr_as;
|
|
|
|
struct KVMAs {
|
|
|
|
KVMMemoryListener *ml;
|
|
|
|
AddressSpace *as;
|
|
|
|
} *as;
|
|
|
|
uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */
|
|
|
|
uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */
|
2023-05-09 05:21:20 +03:00
|
|
|
bool kvm_dirty_ring_with_bitmap;
|
2023-09-05 12:12:46 +03:00
|
|
|
uint64_t kvm_eager_split_size; /* Eager Page Splitting chunk size */
|
2022-09-29 10:20:13 +03:00
|
|
|
struct KVMDirtyRingReaper reaper;
|
|
|
|
NotifyVmexitOption notify_vmexit;
|
|
|
|
uint32_t notify_window;
|
2022-12-03 20:51:13 +03:00
|
|
|
uint32_t xen_version;
|
|
|
|
uint32_t xen_caps;
|
2022-12-16 19:27:00 +03:00
|
|
|
uint16_t xen_gnttab_max_frames;
|
2023-01-18 17:36:23 +03:00
|
|
|
uint16_t xen_evtchn_max_pirq;
|
2022-09-29 10:20:13 +03:00
|
|
|
};
|
|
|
|
|
2015-06-18 19:30:14 +03:00
|
|
|
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
|
2021-08-17 04:35:52 +03:00
|
|
|
AddressSpace *as, int as_id, const char *name);
|
2015-06-18 19:30:14 +03:00
|
|
|
|
2019-09-24 17:47:50 +03:00
|
|
|
void kvm_set_max_memslot_size(hwaddr max_slot_size);
|
2020-05-12 06:06:06 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_hwpoison_page_add:
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* @ram_addr: the address in the RAM for the poisoned page
|
|
|
|
*
|
|
|
|
* Add a poisoned page to the list
|
|
|
|
*
|
|
|
|
* Return: None.
|
|
|
|
*/
|
|
|
|
void kvm_hwpoison_page_add(ram_addr_t ram_addr);
|
2015-06-18 19:28:45 +03:00
|
|
|
#endif
|