virtio, pc: fixes, cleanups
A bunch of fixes all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJdOciFAAoJECgfDbjSjVRp/FwH/jNag2BkBfyvTNibuZi0V4uH 1nXmZQI9gQye6eDmkg0T0fl+8zpMNO7OIGRjhOemMOpfJ1yAYrkFI4OrbTV40lOe UvH/6s8m5DlWPXrVvkv52esLPrV9cV4kULpS0yDfngfJ7CuQTMc/er9ZBav38iSG kDH92LTWqIoZq08JUkdOxqjl5me8vnvQHieo/SgFNhuJon+RkGZvcN7j4zj/l1cg ozeXoyBve27TnirGfgjY7/z3kTDTAKW8wWFj/gV2TllmPx4ReH6a6IYwPWrZaOXA m4ZhTK0D8C2UPtbseHrfriYHkuL4eBK95OJvXc3vC/w14ZmP4tuaipOL7If9oRk= =oU7D -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging virtio, pc: fixes, cleanups A bunch of fixes all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Thu 25 Jul 2019 16:19:33 BST # gpg: using RSA key 281F0DB8D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: virtio-balloon: free pbp more aggressively virtio-balloon: don't track subpages for the PBP virtio-balloon: Use temporary PBP only virtio-balloon: Rework pbp tracking data virtio-balloon: Better names for offset variables in inflate/deflate code virtio-balloon: Simplify deflate with pbp virtio-balloon: Fix QEMU crashes on pagesize > BALLOON_PAGE_SIZE virtio-balloon: Fix wrong sign extension of PFNs i386/acpi: show PCI Express bus on pxb-pcie expanders ioapic: kvm: Skip route updates for masked pins i386/acpi: fix gint overflow in crs_range_compare docs: clarify multiqueue vs multiple virtqueues Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
61c1e6a97d
@ -324,6 +324,15 @@ must support changing some configuration aspects on the fly.
|
||||
Multiple queue support
|
||||
----------------------
|
||||
|
||||
Many devices have a fixed number of virtqueues. In this case the master
|
||||
already knows the number of available virtqueues without communicating with the
|
||||
slave.
|
||||
|
||||
Some devices do not have a fixed number of virtqueues. Instead the maximum
|
||||
number of virtqueues is chosen by the slave. The number can depend on host
|
||||
resource availability or slave implementation details. Such devices are called
|
||||
multiple queue devices.
|
||||
|
||||
Multiple queue support allows the slave to advertise the maximum number of
|
||||
queues. This is treated as a protocol extension, hence the slave has to
|
||||
implement protocol features first. The multiple queues feature is supported
|
||||
@ -339,6 +348,14 @@ queue in the sent message to identify a specified queue.
|
||||
The master enables queues by sending message ``VHOST_USER_SET_VRING_ENABLE``.
|
||||
vhost-user-net has historically automatically enabled the first queue pair.
|
||||
|
||||
Slaves should always implement the ``VHOST_USER_PROTOCOL_F_MQ`` protocol
|
||||
feature, even for devices with a fixed number of virtqueues, since it is simple
|
||||
to implement and offers a degree of introspection.
|
||||
|
||||
Masters must not rely on the ``VHOST_USER_PROTOCOL_F_MQ`` protocol feature for
|
||||
devices with a fixed number of virtqueues. Only true multiqueue devices
|
||||
require this protocol feature.
|
||||
|
||||
Migration
|
||||
---------
|
||||
|
||||
|
@ -755,10 +755,16 @@ static void crs_range_set_free(CrsRangeSet *range_set)
|
||||
|
||||
static gint crs_range_compare(gconstpointer a, gconstpointer b)
|
||||
{
|
||||
CrsRangeEntry *entry_a = *(CrsRangeEntry **)a;
|
||||
CrsRangeEntry *entry_b = *(CrsRangeEntry **)b;
|
||||
CrsRangeEntry *entry_a = *(CrsRangeEntry **)a;
|
||||
CrsRangeEntry *entry_b = *(CrsRangeEntry **)b;
|
||||
|
||||
return (int64_t)entry_a->base - (int64_t)entry_b->base;
|
||||
if (entry_a->base < entry_b->base) {
|
||||
return -1;
|
||||
} else if (entry_a->base > entry_b->base) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1908,10 +1914,13 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
|
||||
scope = aml_scope("\\_SB");
|
||||
dev = aml_device("PC%.02X", bus_num);
|
||||
aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
|
||||
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
|
||||
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
|
||||
if (pci_bus_is_express(bus)) {
|
||||
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
|
||||
aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
|
||||
aml_append(dev, build_q35_osc_method());
|
||||
} else {
|
||||
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
|
||||
}
|
||||
|
||||
if (numa_node != NUMA_NODE_UNASSIGNED) {
|
||||
|
@ -197,9 +197,11 @@ static void ioapic_update_kvm_routes(IOAPICCommonState *s)
|
||||
MSIMessage msg;
|
||||
struct ioapic_entry_info info;
|
||||
ioapic_entry_parse(s->ioredtbl[i], &info);
|
||||
msg.address = info.addr;
|
||||
msg.data = info.data;
|
||||
kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL);
|
||||
if (!info.masked) {
|
||||
msg.address = info.addr;
|
||||
msg.data = info.data;
|
||||
kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL);
|
||||
}
|
||||
}
|
||||
kvm_irqchip_commit_routes(kvm_state);
|
||||
}
|
||||
|
@ -34,31 +34,53 @@
|
||||
|
||||
#define BALLOON_PAGE_SIZE (1 << VIRTIO_BALLOON_PFN_SHIFT)
|
||||
|
||||
struct PartiallyBalloonedPage {
|
||||
RAMBlock *rb;
|
||||
ram_addr_t base;
|
||||
unsigned long bitmap[];
|
||||
};
|
||||
typedef struct PartiallyBalloonedPage {
|
||||
ram_addr_t base_gpa;
|
||||
unsigned long *bitmap;
|
||||
} PartiallyBalloonedPage;
|
||||
|
||||
static void virtio_balloon_pbp_free(PartiallyBalloonedPage *pbp)
|
||||
{
|
||||
if (!pbp->bitmap) {
|
||||
return;
|
||||
}
|
||||
g_free(pbp->bitmap);
|
||||
pbp->bitmap = NULL;
|
||||
}
|
||||
|
||||
static void virtio_balloon_pbp_alloc(PartiallyBalloonedPage *pbp,
|
||||
ram_addr_t base_gpa,
|
||||
long subpages)
|
||||
{
|
||||
pbp->base_gpa = base_gpa;
|
||||
pbp->bitmap = bitmap_new(subpages);
|
||||
}
|
||||
|
||||
static bool virtio_balloon_pbp_matches(PartiallyBalloonedPage *pbp,
|
||||
ram_addr_t base_gpa)
|
||||
{
|
||||
return pbp->base_gpa == base_gpa;
|
||||
}
|
||||
|
||||
static void balloon_inflate_page(VirtIOBalloon *balloon,
|
||||
MemoryRegion *mr, hwaddr offset)
|
||||
MemoryRegion *mr, hwaddr mr_offset,
|
||||
PartiallyBalloonedPage *pbp)
|
||||
{
|
||||
void *addr = memory_region_get_ram_ptr(mr) + offset;
|
||||
void *addr = memory_region_get_ram_ptr(mr) + mr_offset;
|
||||
ram_addr_t rb_offset, rb_aligned_offset, base_gpa;
|
||||
RAMBlock *rb;
|
||||
size_t rb_page_size;
|
||||
int subpages;
|
||||
ram_addr_t ram_offset, host_page_base;
|
||||
|
||||
/* XXX is there a better way to get to the RAMBlock than via a
|
||||
* host address? */
|
||||
rb = qemu_ram_block_from_host(addr, false, &ram_offset);
|
||||
rb = qemu_ram_block_from_host(addr, false, &rb_offset);
|
||||
rb_page_size = qemu_ram_pagesize(rb);
|
||||
host_page_base = ram_offset & ~(rb_page_size - 1);
|
||||
|
||||
if (rb_page_size == BALLOON_PAGE_SIZE) {
|
||||
/* Easy case */
|
||||
|
||||
ram_block_discard_range(rb, ram_offset, rb_page_size);
|
||||
ram_block_discard_range(rb, rb_offset, rb_page_size);
|
||||
/* We ignore errors from ram_block_discard_range(), because it
|
||||
* has already reported them, and failing to discard a balloon
|
||||
* page is not fatal */
|
||||
@ -74,81 +96,51 @@ static void balloon_inflate_page(VirtIOBalloon *balloon,
|
||||
warn_report_once(
|
||||
"Balloon used with backing page size > 4kiB, this may not be reliable");
|
||||
|
||||
rb_aligned_offset = QEMU_ALIGN_DOWN(rb_offset, rb_page_size);
|
||||
subpages = rb_page_size / BALLOON_PAGE_SIZE;
|
||||
base_gpa = memory_region_get_ram_addr(mr) + mr_offset -
|
||||
(rb_offset - rb_aligned_offset);
|
||||
|
||||
if (balloon->pbp
|
||||
&& (rb != balloon->pbp->rb
|
||||
|| host_page_base != balloon->pbp->base)) {
|
||||
if (pbp->bitmap && !virtio_balloon_pbp_matches(pbp, base_gpa)) {
|
||||
/* We've partially ballooned part of a host page, but now
|
||||
* we're trying to balloon part of a different one. Too hard,
|
||||
* give up on the old partial page */
|
||||
g_free(balloon->pbp);
|
||||
balloon->pbp = NULL;
|
||||
virtio_balloon_pbp_free(pbp);
|
||||
}
|
||||
|
||||
if (!balloon->pbp) {
|
||||
/* Starting on a new host page */
|
||||
size_t bitlen = BITS_TO_LONGS(subpages) * sizeof(unsigned long);
|
||||
balloon->pbp = g_malloc0(sizeof(PartiallyBalloonedPage) + bitlen);
|
||||
balloon->pbp->rb = rb;
|
||||
balloon->pbp->base = host_page_base;
|
||||
if (!pbp->bitmap) {
|
||||
virtio_balloon_pbp_alloc(pbp, base_gpa, subpages);
|
||||
}
|
||||
|
||||
bitmap_set(balloon->pbp->bitmap,
|
||||
(ram_offset - balloon->pbp->base) / BALLOON_PAGE_SIZE,
|
||||
subpages);
|
||||
set_bit((rb_offset - rb_aligned_offset) / BALLOON_PAGE_SIZE,
|
||||
pbp->bitmap);
|
||||
|
||||
if (bitmap_full(balloon->pbp->bitmap, subpages)) {
|
||||
if (bitmap_full(pbp->bitmap, subpages)) {
|
||||
/* We've accumulated a full host page, we can actually discard
|
||||
* it now */
|
||||
|
||||
ram_block_discard_range(rb, balloon->pbp->base, rb_page_size);
|
||||
ram_block_discard_range(rb, rb_aligned_offset, rb_page_size);
|
||||
/* We ignore errors from ram_block_discard_range(), because it
|
||||
* has already reported them, and failing to discard a balloon
|
||||
* page is not fatal */
|
||||
|
||||
g_free(balloon->pbp);
|
||||
balloon->pbp = NULL;
|
||||
virtio_balloon_pbp_free(pbp);
|
||||
}
|
||||
}
|
||||
|
||||
static void balloon_deflate_page(VirtIOBalloon *balloon,
|
||||
MemoryRegion *mr, hwaddr offset)
|
||||
MemoryRegion *mr, hwaddr mr_offset)
|
||||
{
|
||||
void *addr = memory_region_get_ram_ptr(mr) + offset;
|
||||
void *addr = memory_region_get_ram_ptr(mr) + mr_offset;
|
||||
ram_addr_t rb_offset;
|
||||
RAMBlock *rb;
|
||||
size_t rb_page_size;
|
||||
ram_addr_t ram_offset, host_page_base;
|
||||
void *host_addr;
|
||||
int ret;
|
||||
|
||||
/* XXX is there a better way to get to the RAMBlock than via a
|
||||
* host address? */
|
||||
rb = qemu_ram_block_from_host(addr, false, &ram_offset);
|
||||
rb = qemu_ram_block_from_host(addr, false, &rb_offset);
|
||||
rb_page_size = qemu_ram_pagesize(rb);
|
||||
host_page_base = ram_offset & ~(rb_page_size - 1);
|
||||
|
||||
if (balloon->pbp
|
||||
&& rb == balloon->pbp->rb
|
||||
&& host_page_base == balloon->pbp->base) {
|
||||
int subpages = rb_page_size / BALLOON_PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* This means the guest has asked to discard some of the 4kiB
|
||||
* subpages of a host page, but then changed its mind and
|
||||
* asked to keep them after all. It's exceedingly unlikely
|
||||
* for a guest to do this in practice, but handle it anyway,
|
||||
* since getting it wrong could mean discarding memory the
|
||||
* guest is still using. */
|
||||
bitmap_clear(balloon->pbp->bitmap,
|
||||
(ram_offset - balloon->pbp->base) / BALLOON_PAGE_SIZE,
|
||||
subpages);
|
||||
|
||||
if (bitmap_empty(balloon->pbp->bitmap, subpages)) {
|
||||
g_free(balloon->pbp);
|
||||
balloon->pbp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1));
|
||||
|
||||
@ -335,16 +327,18 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
MemoryRegionSection section;
|
||||
|
||||
for (;;) {
|
||||
PartiallyBalloonedPage pbp = {};
|
||||
size_t offset = 0;
|
||||
uint32_t pfn;
|
||||
|
||||
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
|
||||
if (!elem) {
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) {
|
||||
unsigned int p = virtio_ldl_p(vdev, &pfn);
|
||||
hwaddr pa;
|
||||
int p = virtio_ldl_p(vdev, &pfn);
|
||||
|
||||
pa = (hwaddr) p << VIRTIO_BALLOON_PFN_SHIFT;
|
||||
offset += 4;
|
||||
@ -368,7 +362,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
if (!qemu_balloon_is_inhibited()) {
|
||||
if (vq == s->ivq) {
|
||||
balloon_inflate_page(s, section.mr,
|
||||
section.offset_within_region);
|
||||
section.offset_within_region, &pbp);
|
||||
} else if (vq == s->dvq) {
|
||||
balloon_deflate_page(s, section.mr, section.offset_within_region);
|
||||
} else {
|
||||
@ -381,6 +375,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
|
||||
virtqueue_push(vq, elem, offset);
|
||||
virtio_notify(vdev, vq);
|
||||
g_free(elem);
|
||||
virtio_balloon_pbp_free(&pbp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,6 @@ typedef struct virtio_balloon_stat_modern {
|
||||
uint64_t val;
|
||||
} VirtIOBalloonStatModern;
|
||||
|
||||
typedef struct PartiallyBalloonedPage PartiallyBalloonedPage;
|
||||
|
||||
enum virtio_balloon_free_page_report_status {
|
||||
FREE_PAGE_REPORT_S_STOP = 0,
|
||||
FREE_PAGE_REPORT_S_REQUESTED = 1,
|
||||
@ -70,7 +68,6 @@ typedef struct VirtIOBalloon {
|
||||
int64_t stats_last_update;
|
||||
int64_t stats_poll_interval;
|
||||
uint32_t host_features;
|
||||
PartiallyBalloonedPage *pbp;
|
||||
|
||||
bool qemu_4_0_config_size;
|
||||
} VirtIOBalloon;
|
||||
|
Loading…
Reference in New Issue
Block a user