virtio,pci,pc: fixes
revert virtio pci/SR-IOV emulation at author's request a couple of fixes in virtio,vtd Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmarSFUPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRp7fwH/3wNCGhgHhF5dhKRKRn8hqhxYl2rXnv0LKYI Rgsoxh3kw6oKBXxLG/B4V2GkqDSU8q8NuHnvGmmAUQ/uHmwTWbBbrZ+HwMMmaRhT Ox8kIXiVYAtw24yLKDvyoKbMLjLKb9/QqTT4rbsQ9yl5PLxwoGGJEu/ifM1MbZZY f5CDtj3hRArIZEjMt0Q3h+G7///BRVZxQ/0de57whGXcr349qgMpiIThvlCOj7Yf rQ68AGS4yk1Jk0oxiYyWjo43o8JbB5bMnCrkzDy4ZdY5Sw9zGb48CmcrBUl4J9lv NVDYK63dsvRS0ew7PxaEwu32MIQLJcn5s521m81/ZAhbdyzLnlI= =/2+K -----END PGP SIGNATURE----- Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging virtio,pci,pc: fixes revert virtio pci/SR-IOV emulation at author's request a couple of fixes in virtio,vtd Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmarSFUPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRp7fwH/3wNCGhgHhF5dhKRKRn8hqhxYl2rXnv0LKYI # Rgsoxh3kw6oKBXxLG/B4V2GkqDSU8q8NuHnvGmmAUQ/uHmwTWbBbrZ+HwMMmaRhT # Ox8kIXiVYAtw24yLKDvyoKbMLjLKb9/QqTT4rbsQ9yl5PLxwoGGJEu/ifM1MbZZY # f5CDtj3hRArIZEjMt0Q3h+G7///BRVZxQ/0de57whGXcr349qgMpiIThvlCOj7Yf # rQ68AGS4yk1Jk0oxiYyWjo43o8JbB5bMnCrkzDy4ZdY5Sw9zGb48CmcrBUl4J9lv # NVDYK63dsvRS0ew7PxaEwu32MIQLJcn5s521m81/ZAhbdyzLnlI= # =/2+K # -----END PGP SIGNATURE----- # gpg: Signature made Thu 01 Aug 2024 06:33:25 PM AEST # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [undefined] # gpg: WARNING: The key's User ID is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: intel_iommu: Fix for IQA reg read dropped DW field hw/i386/amd_iommu: Don't leak memory in amdvi_update_iotlb() Revert "hw/pci: Rename has_power to enabled" Revert "hw/ppc/spapr_pci: Do not create DT for disabled PCI device" Revert "hw/ppc/spapr_pci: Do not reject VFs created after a PF" Revert "pcie_sriov: Do not manually unrealize" Revert "pcie_sriov: Ensure VF function number does not overflow" Revert "pcie_sriov: Reuse SR-IOV VF device instances" Revert "pcie_sriov: Release VFs failed to realize" Revert "pcie_sriov: Remove num_vfs from PCIESriovPF" Revert "pcie_sriov: Register VFs after migration" Revert "hw/pci: Fix SR-IOV VF number calculation" Revert "pcie_sriov: Ensure PF and VF are mutually exclusive" Revert "pcie_sriov: Check PCI Express for SR-IOV PF" Revert "pcie_sriov: Allow user to create SR-IOV device" Revert "virtio-pci: Implement SR-IOV PF" Revert "virtio-net: Implement SR-IOV VF" Revert "docs: Document composable SR-IOV device" virtio-rng: block max-bytes=0 Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
5e25c93ccb
@ -2011,7 +2011,6 @@ F: hw/pci-bridge/*
|
||||
F: qapi/pci.json
|
||||
F: docs/pci*
|
||||
F: docs/specs/*pci*
|
||||
F: docs/system/sriov.rst
|
||||
|
||||
PCIE DOE
|
||||
M: Huai-Cheng Kuo <hchkuo@avery-design.com.tw>
|
||||
|
@ -52,11 +52,9 @@ setting up a BAR for a VF.
|
||||
...
|
||||
|
||||
/* Add and initialize the SR/IOV capability */
|
||||
if (!pcie_sriov_pf_init(d, 0x200, "your_virtual_dev",
|
||||
vf_devid, initial_vfs, total_vfs,
|
||||
fun_offset, stride, errp)) {
|
||||
return;
|
||||
}
|
||||
pcie_sriov_pf_init(d, 0x200, "your_virtual_dev",
|
||||
vf_devid, initial_vfs, total_vfs,
|
||||
fun_offset, stride);
|
||||
|
||||
/* Set up individual VF BARs (parameters as for normal BARs) */
|
||||
pcie_sriov_pf_init_vf_bar( ... )
|
||||
|
@ -39,4 +39,3 @@ or Hypervisor.Framework.
|
||||
multi-process
|
||||
confidential-guest-support
|
||||
vm-templating
|
||||
sriov
|
||||
|
@ -1,36 +0,0 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
Compsable SR-IOV device
|
||||
=======================
|
||||
|
||||
SR-IOV (Single Root I/O Virtualization) is an optional extended capability of a
|
||||
PCI Express device. It allows a single physical function (PF) to appear as
|
||||
multiple virtual functions (VFs) for the main purpose of eliminating software
|
||||
overhead in I/O from virtual machines.
|
||||
|
||||
There are devices with predefined SR-IOV configurations, but it is also possible
|
||||
to compose an SR-IOV device yourself. Composing an SR-IOV device is currently
|
||||
only supported by virtio-net-pci.
|
||||
|
||||
Users can configure an SR-IOV-capable virtio-net device by adding
|
||||
virtio-net-pci functions to a bus. Below is a command line example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
-netdev user,id=n -netdev user,id=o
|
||||
-netdev user,id=p -netdev user,id=q
|
||||
-device pcie-root-port,id=b
|
||||
-device virtio-net-pci,bus=b,addr=0x0.0x3,netdev=q,sriov-pf=f
|
||||
-device virtio-net-pci,bus=b,addr=0x0.0x2,netdev=p,sriov-pf=f
|
||||
-device virtio-net-pci,bus=b,addr=0x0.0x1,netdev=o,sriov-pf=f
|
||||
-device virtio-net-pci,bus=b,addr=0x0.0x0,netdev=n,id=f
|
||||
|
||||
The VFs specify the paired PF with ``sriov-pf`` property. The PF must be
|
||||
added after all VFs. It is the user's responsibility to ensure that VFs have
|
||||
function numbers larger than one of the PF, and that the function numbers
|
||||
have a consistent stride.
|
||||
|
||||
You may also need to perform additional steps to activate the SR-IOV feature on
|
||||
your guest. For Linux, refer to [1]_.
|
||||
|
||||
.. [1] https://docs.kernel.org/PCI/pci-iov-howto.html
|
@ -357,12 +357,12 @@ static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
|
||||
uint64_t gpa, IOMMUTLBEntry to_cache,
|
||||
uint16_t domid)
|
||||
{
|
||||
AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
|
||||
uint64_t *key = g_new(uint64_t, 1);
|
||||
uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
|
||||
|
||||
/* don't cache erroneous translations */
|
||||
if (to_cache.perm != IOMMU_NONE) {
|
||||
AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
|
||||
uint64_t *key = g_new(uint64_t, 1);
|
||||
uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
|
||||
|
||||
trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
|
||||
PCI_FUNC(devid), gpa, to_cache.translated_addr);
|
||||
|
||||
|
@ -2947,7 +2947,9 @@ static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
|
||||
|
||||
/* Invalidation Queue Address Register, 64-bit */
|
||||
case DMAR_IQA_REG:
|
||||
val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
|
||||
val = s->iq |
|
||||
(vtd_get_quad(s, DMAR_IQA_REG) &
|
||||
(VTD_IQA_QS | VTD_IQA_DW_MASK));
|
||||
if (size == 4) {
|
||||
val = val & ((1ULL << 32) - 1);
|
||||
}
|
||||
|
13
hw/net/igb.c
13
hw/net/igb.c
@ -446,16 +446,9 @@ static void igb_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
|
||||
pcie_ari_init(pci_dev, 0x150);
|
||||
|
||||
if (!pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET,
|
||||
TYPE_IGBVF, IGB_82576_VF_DEV_ID,
|
||||
IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
|
||||
IGB_VF_OFFSET, IGB_VF_STRIDE,
|
||||
errp)) {
|
||||
pcie_cap_exit(pci_dev);
|
||||
igb_cleanup_msix(s);
|
||||
msi_uninit(pci_dev);
|
||||
return;
|
||||
}
|
||||
pcie_sriov_pf_init(pci_dev, IGB_CAP_SRIOV_OFFSET, TYPE_IGBVF,
|
||||
IGB_82576_VF_DEV_ID, IGB_MAX_VF_FUNCTIONS, IGB_MAX_VF_FUNCTIONS,
|
||||
IGB_VF_OFFSET, IGB_VF_STRIDE);
|
||||
|
||||
pcie_sriov_pf_init_vf_bar(pci_dev, IGBVF_MMIO_BAR_IDX,
|
||||
PCI_BASE_ADDRESS_MEM_TYPE_64 | PCI_BASE_ADDRESS_MEM_PREFETCH,
|
||||
|
@ -8271,8 +8271,7 @@ out:
|
||||
return pow2ceil(bar_size);
|
||||
}
|
||||
|
||||
static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
|
||||
Error **errp)
|
||||
static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset)
|
||||
{
|
||||
uint16_t vf_dev_id = n->params.use_intel_id ?
|
||||
PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME;
|
||||
@ -8281,17 +8280,12 @@ static bool nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
|
||||
le16_to_cpu(cap->vifrsm),
|
||||
NULL, NULL);
|
||||
|
||||
if (!pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
|
||||
n->params.sriov_max_vfs, n->params.sriov_max_vfs,
|
||||
NVME_VF_OFFSET, NVME_VF_STRIDE,
|
||||
errp)) {
|
||||
return false;
|
||||
}
|
||||
pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
|
||||
n->params.sriov_max_vfs, n->params.sriov_max_vfs,
|
||||
NVME_VF_OFFSET, NVME_VF_STRIDE);
|
||||
|
||||
pcie_sriov_pf_init_vf_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
|
||||
PCI_BASE_ADDRESS_MEM_TYPE_64, bar_size);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
|
||||
@ -8416,12 +8410,6 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs &&
|
||||
!nvme_init_sriov(n, pci_dev, 0x120, errp)) {
|
||||
msix_uninit(pci_dev, &n->bar0, &n->bar0);
|
||||
return false;
|
||||
}
|
||||
|
||||
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
|
||||
|
||||
pcie_cap_deverr_init(pci_dev);
|
||||
@ -8451,6 +8439,10 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
nvme_init_pmr(n, pci_dev);
|
||||
}
|
||||
|
||||
if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
|
||||
nvme_init_sriov(n, pci_dev, 0x120);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
89
hw/pci/pci.c
89
hw/pci/pci.c
@ -85,7 +85,6 @@ static Property pci_props[] = {
|
||||
QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
|
||||
DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
|
||||
QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
|
||||
DEFINE_PROP_STRING("sriov-pf", PCIDevice, sriov_pf),
|
||||
DEFINE_PROP_END_OF_LIST()
|
||||
};
|
||||
|
||||
@ -734,17 +733,10 @@ static bool migrate_is_not_pcie(void *opaque, int version_id)
|
||||
return !pci_is_express((PCIDevice *)opaque);
|
||||
}
|
||||
|
||||
static int pci_post_load(void *opaque, int version_id)
|
||||
{
|
||||
pcie_sriov_pf_post_load(opaque);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const VMStateDescription vmstate_pci_device = {
|
||||
.name = "PCIDevice",
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 1,
|
||||
.post_load = pci_post_load,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
|
||||
VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
|
||||
@ -960,8 +952,13 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
|
||||
dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
|
||||
}
|
||||
|
||||
/* SR/IOV is not handled here. */
|
||||
if (pci_is_vf(dev)) {
|
||||
/*
|
||||
* With SR/IOV and ARI, a device at function 0 need not be a multifunction
|
||||
* device, as it may just be a VF that ended up with function 0 in
|
||||
* the legacy PCI interpretation. Avoid failing in such cases:
|
||||
*/
|
||||
if (pci_is_vf(dev) &&
|
||||
dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -994,8 +991,7 @@ static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
|
||||
}
|
||||
/* function 0 indicates single function, so function > 0 must be NULL */
|
||||
for (func = 1; func < PCI_FUNC_MAX; ++func) {
|
||||
PCIDevice *device = bus->devices[PCI_DEVFN(slot, func)];
|
||||
if (device && !pci_is_vf(device)) {
|
||||
if (bus->devices[PCI_DEVFN(slot, func)]) {
|
||||
error_setg(errp, "PCI: %x.0 indicates single function, "
|
||||
"but %x.%x is already populated.",
|
||||
slot, slot, func);
|
||||
@ -1280,7 +1276,6 @@ static void pci_qdev_unrealize(DeviceState *dev)
|
||||
|
||||
pci_unregister_io_regions(pci_dev);
|
||||
pci_del_option_rom(pci_dev);
|
||||
pcie_sriov_unregister_device(pci_dev);
|
||||
|
||||
if (pc->exit) {
|
||||
pc->exit(pci_dev);
|
||||
@ -1312,6 +1307,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
|
||||
pcibus_t size = memory_region_size(memory);
|
||||
uint8_t hdr_type;
|
||||
|
||||
assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
|
||||
assert(region_num >= 0);
|
||||
assert(region_num < PCI_NUM_REGIONS);
|
||||
assert(is_power_of_2(size));
|
||||
@ -1322,6 +1318,7 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
|
||||
assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
|
||||
|
||||
r = &pci_dev->io_regions[region_num];
|
||||
r->addr = PCI_BAR_UNMAPPED;
|
||||
r->size = size;
|
||||
r->type = type;
|
||||
r->memory = memory;
|
||||
@ -1329,35 +1326,22 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num,
|
||||
? pci_get_bus(pci_dev)->address_space_io
|
||||
: pci_get_bus(pci_dev)->address_space_mem;
|
||||
|
||||
if (pci_is_vf(pci_dev)) {
|
||||
PCIDevice *pf = pci_dev->exp.sriov_vf.pf;
|
||||
assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]);
|
||||
wmask = ~(size - 1);
|
||||
if (region_num == PCI_ROM_SLOT) {
|
||||
/* ROM enable bit is writable */
|
||||
wmask |= PCI_ROM_ADDRESS_ENABLE;
|
||||
}
|
||||
|
||||
r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size);
|
||||
if (r->addr != PCI_BAR_UNMAPPED) {
|
||||
memory_region_add_subregion_overlap(r->address_space,
|
||||
r->addr, r->memory, 1);
|
||||
}
|
||||
addr = pci_bar(pci_dev, region_num);
|
||||
pci_set_long(pci_dev->config + addr, type);
|
||||
|
||||
if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
|
||||
r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
|
||||
pci_set_quad(pci_dev->wmask + addr, wmask);
|
||||
pci_set_quad(pci_dev->cmask + addr, ~0ULL);
|
||||
} else {
|
||||
r->addr = PCI_BAR_UNMAPPED;
|
||||
|
||||
wmask = ~(size - 1);
|
||||
if (region_num == PCI_ROM_SLOT) {
|
||||
/* ROM enable bit is writable */
|
||||
wmask |= PCI_ROM_ADDRESS_ENABLE;
|
||||
}
|
||||
|
||||
addr = pci_bar(pci_dev, region_num);
|
||||
pci_set_long(pci_dev->config + addr, type);
|
||||
|
||||
if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
|
||||
r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
|
||||
pci_set_quad(pci_dev->wmask + addr, wmask);
|
||||
pci_set_quad(pci_dev->cmask + addr, ~0ULL);
|
||||
} else {
|
||||
pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
|
||||
pci_set_long(pci_dev->cmask + addr, 0xffffffff);
|
||||
}
|
||||
pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
|
||||
pci_set_long(pci_dev->cmask + addr, 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1446,11 +1430,7 @@ static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
|
||||
pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
|
||||
uint16_t vf_stride =
|
||||
pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
|
||||
uint32_t vf_num = d->devfn - (pf->devfn + vf_offset);
|
||||
|
||||
if (vf_num) {
|
||||
vf_num /= vf_stride;
|
||||
}
|
||||
uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
|
||||
|
||||
if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
|
||||
new_addr = pci_get_quad(pf->config + bar);
|
||||
@ -1545,7 +1525,7 @@ static void pci_update_mappings(PCIDevice *d)
|
||||
continue;
|
||||
|
||||
new_addr = pci_bar_address(d, i, r->type, r->size);
|
||||
if (!d->enabled) {
|
||||
if (!d->has_power) {
|
||||
new_addr = PCI_BAR_UNMAPPED;
|
||||
}
|
||||
|
||||
@ -1633,7 +1613,7 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int
|
||||
pci_update_irq_disabled(d, was_irq_disabled);
|
||||
memory_region_set_enabled(&d->bus_master_enable_region,
|
||||
(pci_get_word(d->config + PCI_COMMAND)
|
||||
& PCI_COMMAND_MASTER) && d->enabled);
|
||||
& PCI_COMMAND_MASTER) && d->has_power);
|
||||
}
|
||||
|
||||
msi_write_config(d, addr, val_in, l);
|
||||
@ -2118,11 +2098,6 @@ static void pci_qdev_realize(DeviceState *qdev, Error **errp)
|
||||
}
|
||||
}
|
||||
|
||||
if (!pcie_sriov_register_device(pci_dev, errp)) {
|
||||
pci_qdev_unrealize(DEVICE(pci_dev));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A PCIe Downstream Port that do not have ARI Forwarding enabled must
|
||||
* associate only Device 0 with the device attached to the bus
|
||||
@ -2909,18 +2884,18 @@ MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
|
||||
return msg;
|
||||
}
|
||||
|
||||
void pci_set_enabled(PCIDevice *d, bool state)
|
||||
void pci_set_power(PCIDevice *d, bool state)
|
||||
{
|
||||
if (d->enabled == state) {
|
||||
if (d->has_power == state) {
|
||||
return;
|
||||
}
|
||||
|
||||
d->enabled = state;
|
||||
d->has_power = state;
|
||||
pci_update_mappings(d);
|
||||
memory_region_set_enabled(&d->bus_master_enable_region,
|
||||
(pci_get_word(d->config + PCI_COMMAND)
|
||||
& PCI_COMMAND_MASTER) && d->enabled);
|
||||
if (d->qdev.realized) {
|
||||
& PCI_COMMAND_MASTER) && d->has_power);
|
||||
if (!d->has_power) {
|
||||
pci_device_reset(d);
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr,
|
||||
* allowing direct removal of unexposed functions.
|
||||
*/
|
||||
if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) ||
|
||||
!pci_dev->enabled || is_pci_dev_ejected(pci_dev)) {
|
||||
!pci_dev->has_power || is_pci_dev_ejected(pci_dev)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr,
|
||||
* allowing direct removal of unexposed functions.
|
||||
*/
|
||||
if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) ||
|
||||
!pci_dev->enabled || is_pci_dev_ejected(pci_dev)) {
|
||||
!pci_dev->has_power || is_pci_dev_ejected(pci_dev)) {
|
||||
return ~0x0;
|
||||
}
|
||||
|
||||
|
@ -20,90 +20,23 @@
|
||||
#include "qapi/error.h"
|
||||
#include "trace.h"
|
||||
|
||||
static GHashTable *pfs;
|
||||
static PCIDevice *register_vf(PCIDevice *pf, int devfn,
|
||||
const char *name, uint16_t vf_num);
|
||||
static void unregister_vfs(PCIDevice *dev);
|
||||
|
||||
static void unparent_vfs(PCIDevice *dev, uint16_t total_vfs)
|
||||
{
|
||||
for (uint16_t i = 0; i < total_vfs; i++) {
|
||||
PCIDevice *vf = dev->exp.sriov_pf.vf[i];
|
||||
object_unparent(OBJECT(vf));
|
||||
object_unref(OBJECT(vf));
|
||||
}
|
||||
g_free(dev->exp.sriov_pf.vf);
|
||||
dev->exp.sriov_pf.vf = NULL;
|
||||
}
|
||||
|
||||
static void clear_ctrl_vfe(PCIDevice *dev)
|
||||
{
|
||||
uint8_t *ctrl = dev->config + dev->exp.sriov_cap + PCI_SRIOV_CTRL;
|
||||
pci_set_word(ctrl, pci_get_word(ctrl) & ~PCI_SRIOV_CTRL_VFE);
|
||||
}
|
||||
|
||||
static void register_vfs(PCIDevice *dev)
|
||||
{
|
||||
uint16_t num_vfs;
|
||||
uint16_t i;
|
||||
uint16_t sriov_cap = dev->exp.sriov_cap;
|
||||
|
||||
assert(sriov_cap > 0);
|
||||
num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
|
||||
if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) {
|
||||
clear_ctrl_vfe(dev);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn),
|
||||
PCI_FUNC(dev->devfn), num_vfs);
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
pci_set_enabled(dev->exp.sriov_pf.vf[i], true);
|
||||
}
|
||||
}
|
||||
|
||||
static void unregister_vfs(PCIDevice *dev)
|
||||
{
|
||||
uint16_t i;
|
||||
uint8_t *cfg = dev->config + dev->exp.sriov_cap;
|
||||
|
||||
trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn),
|
||||
PCI_FUNC(dev->devfn));
|
||||
for (i = 0; i < pci_get_word(cfg + PCI_SRIOV_TOTAL_VF); i++) {
|
||||
pci_set_enabled(dev->exp.sriov_pf.vf[i], false);
|
||||
}
|
||||
}
|
||||
|
||||
static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset,
|
||||
uint16_t vf_dev_id, uint16_t init_vfs,
|
||||
uint16_t total_vfs, uint16_t vf_offset,
|
||||
uint16_t vf_stride, Error **errp)
|
||||
void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
|
||||
const char *vfname, uint16_t vf_dev_id,
|
||||
uint16_t init_vfs, uint16_t total_vfs,
|
||||
uint16_t vf_offset, uint16_t vf_stride)
|
||||
{
|
||||
uint8_t *cfg = dev->config + offset;
|
||||
uint8_t *wmask;
|
||||
|
||||
if (!pci_is_express(dev)) {
|
||||
error_setg(errp, "PCI Express is required for SR-IOV PF");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pci_is_vf(dev)) {
|
||||
error_setg(errp, "a device cannot be both an SR-IOV PF and a VF");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (total_vfs) {
|
||||
uint16_t ari_cap = pcie_find_capability(dev, PCI_EXT_CAP_ID_ARI);
|
||||
uint16_t first_vf_devfn = dev->devfn + vf_offset;
|
||||
uint16_t last_vf_devfn = first_vf_devfn + vf_stride * (total_vfs - 1);
|
||||
|
||||
if ((!ari_cap && PCI_SLOT(dev->devfn) != PCI_SLOT(last_vf_devfn)) ||
|
||||
last_vf_devfn >= PCI_DEVFN_MAX) {
|
||||
error_setg(errp, "VF function number overflows");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
pcie_add_capability(dev, PCI_EXT_CAP_ID_SRIOV, 1,
|
||||
offset, PCI_EXT_CAP_SRIOV_SIZEOF);
|
||||
dev->exp.sriov_cap = offset;
|
||||
dev->exp.sriov_pf.num_vfs = 0;
|
||||
dev->exp.sriov_pf.vfname = g_strdup(vfname);
|
||||
dev->exp.sriov_pf.vf = NULL;
|
||||
|
||||
pci_set_word(cfg + PCI_SRIOV_VF_OFFSET, vf_offset);
|
||||
@ -136,76 +69,13 @@ static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset,
|
||||
pci_set_word(wmask + PCI_SRIOV_SYS_PGSIZE, 0x553);
|
||||
|
||||
qdev_prop_set_bit(&dev->qdev, "multifunction", true);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
|
||||
const char *vfname, uint16_t vf_dev_id,
|
||||
uint16_t init_vfs, uint16_t total_vfs,
|
||||
uint16_t vf_offset, uint16_t vf_stride,
|
||||
Error **errp)
|
||||
{
|
||||
BusState *bus = qdev_get_parent_bus(&dev->qdev);
|
||||
int32_t devfn = dev->devfn + vf_offset;
|
||||
|
||||
if (pfs && g_hash_table_contains(pfs, dev->qdev.id)) {
|
||||
error_setg(errp, "attaching user-created SR-IOV VF unsupported");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, init_vfs,
|
||||
total_vfs, vf_offset, vf_stride, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
dev->exp.sriov_pf.vf = g_new(PCIDevice *, total_vfs);
|
||||
|
||||
for (uint16_t i = 0; i < total_vfs; i++) {
|
||||
PCIDevice *vf = pci_new(devfn, vfname);
|
||||
vf->exp.sriov_vf.pf = dev;
|
||||
vf->exp.sriov_vf.vf_number = i;
|
||||
|
||||
if (!qdev_realize(&vf->qdev, bus, errp)) {
|
||||
object_unparent(OBJECT(vf));
|
||||
object_unref(vf);
|
||||
unparent_vfs(dev, i);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* set vid/did according to sr/iov spec - they are not used */
|
||||
pci_config_set_vendor_id(vf->config, 0xffff);
|
||||
pci_config_set_device_id(vf->config, 0xffff);
|
||||
|
||||
dev->exp.sriov_pf.vf[i] = vf;
|
||||
devfn += vf_stride;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void pcie_sriov_pf_exit(PCIDevice *dev)
|
||||
{
|
||||
uint8_t *cfg = dev->config + dev->exp.sriov_cap;
|
||||
|
||||
if (dev->exp.sriov_pf.vf_user_created) {
|
||||
uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
|
||||
uint16_t total_vfs = pci_get_word(dev->config + PCI_SRIOV_TOTAL_VF);
|
||||
uint16_t vf_dev_id = pci_get_word(dev->config + PCI_SRIOV_VF_DID);
|
||||
|
||||
unregister_vfs(dev);
|
||||
|
||||
for (uint16_t i = 0; i < total_vfs; i++) {
|
||||
PCIDevice *vf = dev->exp.sriov_pf.vf[i];
|
||||
|
||||
vf->exp.sriov_vf.pf = NULL;
|
||||
|
||||
pci_config_set_vendor_id(vf->config, ven_id);
|
||||
pci_config_set_device_id(vf->config, vf_dev_id);
|
||||
}
|
||||
} else {
|
||||
unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF));
|
||||
}
|
||||
unregister_vfs(dev);
|
||||
g_free((char *)dev->exp.sriov_pf.vfname);
|
||||
dev->exp.sriov_pf.vfname = NULL;
|
||||
}
|
||||
|
||||
void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
|
||||
@ -238,173 +108,113 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
|
||||
void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
|
||||
MemoryRegion *memory)
|
||||
{
|
||||
PCIIORegion *r;
|
||||
PCIBus *bus = pci_get_bus(dev);
|
||||
uint8_t type;
|
||||
pcibus_t size = memory_region_size(memory);
|
||||
|
||||
assert(dev->exp.sriov_vf.pf);
|
||||
assert(pci_is_vf(dev)); /* PFs must use pci_register_bar */
|
||||
assert(region_num >= 0);
|
||||
assert(region_num < PCI_NUM_REGIONS);
|
||||
type = dev->exp.sriov_vf.pf->exp.sriov_pf.vf_bar_type[region_num];
|
||||
|
||||
return pci_register_bar(dev, region_num, type, memory);
|
||||
if (!is_power_of_2(size)) {
|
||||
error_report("%s: PCI region size must be a power"
|
||||
" of two - type=0x%x, size=0x%"FMT_PCIBUS,
|
||||
__func__, type, size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
r = &dev->io_regions[region_num];
|
||||
r->memory = memory;
|
||||
r->address_space =
|
||||
type & PCI_BASE_ADDRESS_SPACE_IO
|
||||
? bus->address_space_io
|
||||
: bus->address_space_mem;
|
||||
r->size = size;
|
||||
r->type = type;
|
||||
|
||||
r->addr = pci_bar_address(dev, region_num, r->type, r->size);
|
||||
if (r->addr != PCI_BAR_UNMAPPED) {
|
||||
memory_region_add_subregion_overlap(r->address_space,
|
||||
r->addr, r->memory, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static gint compare_vf_devfns(gconstpointer a, gconstpointer b)
|
||||
static PCIDevice *register_vf(PCIDevice *pf, int devfn, const char *name,
|
||||
uint16_t vf_num)
|
||||
{
|
||||
return (*(PCIDevice **)a)->devfn - (*(PCIDevice **)b)->devfn;
|
||||
PCIDevice *dev = pci_new(devfn, name);
|
||||
dev->exp.sriov_vf.pf = pf;
|
||||
dev->exp.sriov_vf.vf_number = vf_num;
|
||||
PCIBus *bus = pci_get_bus(pf);
|
||||
Error *local_err = NULL;
|
||||
|
||||
qdev_realize(&dev->qdev, &bus->qbus, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* set vid/did according to sr/iov spec - they are not used */
|
||||
pci_config_set_vendor_id(dev->config, 0xffff);
|
||||
pci_config_set_device_id(dev->config, 0xffff);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev,
|
||||
uint16_t offset,
|
||||
Error **errp)
|
||||
static void register_vfs(PCIDevice *dev)
|
||||
{
|
||||
GPtrArray *pf;
|
||||
PCIDevice **vfs;
|
||||
BusState *bus = qdev_get_parent_bus(DEVICE(dev));
|
||||
uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
|
||||
uint16_t vf_dev_id;
|
||||
uint16_t vf_offset;
|
||||
uint16_t vf_stride;
|
||||
uint16_t num_vfs;
|
||||
uint16_t i;
|
||||
uint16_t sriov_cap = dev->exp.sriov_cap;
|
||||
uint16_t vf_offset =
|
||||
pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
|
||||
uint16_t vf_stride =
|
||||
pci_get_word(dev->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
|
||||
int32_t devfn = dev->devfn + vf_offset;
|
||||
|
||||
assert(sriov_cap > 0);
|
||||
num_vfs = pci_get_word(dev->config + sriov_cap + PCI_SRIOV_NUM_VF);
|
||||
if (num_vfs > pci_get_word(dev->config + sriov_cap + PCI_SRIOV_TOTAL_VF)) {
|
||||
return;
|
||||
}
|
||||
|
||||
dev->exp.sriov_pf.vf = g_new(PCIDevice *, num_vfs);
|
||||
|
||||
trace_sriov_register_vfs(dev->name, PCI_SLOT(dev->devfn),
|
||||
PCI_FUNC(dev->devfn), num_vfs);
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
dev->exp.sriov_pf.vf[i] = register_vf(dev, devfn,
|
||||
dev->exp.sriov_pf.vfname, i);
|
||||
if (!dev->exp.sriov_pf.vf[i]) {
|
||||
num_vfs = i;
|
||||
break;
|
||||
}
|
||||
devfn += vf_stride;
|
||||
}
|
||||
dev->exp.sriov_pf.num_vfs = num_vfs;
|
||||
}
|
||||
|
||||
static void unregister_vfs(PCIDevice *dev)
|
||||
{
|
||||
uint16_t num_vfs = dev->exp.sriov_pf.num_vfs;
|
||||
uint16_t i;
|
||||
|
||||
if (!pfs || !dev->qdev.id) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
pf = g_hash_table_lookup(pfs, dev->qdev.id);
|
||||
if (!pf) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pf->len > UINT16_MAX) {
|
||||
error_setg(errp, "too many VFs");
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_ptr_array_sort(pf, compare_vf_devfns);
|
||||
vfs = (void *)pf->pdata;
|
||||
|
||||
if (vfs[0]->devfn <= dev->devfn) {
|
||||
error_setg(errp, "a VF function number is less than the PF function number");
|
||||
return -1;
|
||||
}
|
||||
|
||||
vf_dev_id = pci_get_word(vfs[0]->config + PCI_DEVICE_ID);
|
||||
vf_offset = vfs[0]->devfn - dev->devfn;
|
||||
vf_stride = pf->len < 2 ? 0 : vfs[1]->devfn - vfs[0]->devfn;
|
||||
|
||||
for (i = 0; i < pf->len; i++) {
|
||||
if (bus != qdev_get_parent_bus(&vfs[i]->qdev)) {
|
||||
error_setg(errp, "SR-IOV VF parent bus mismatches with PF");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ven_id != pci_get_word(vfs[i]->config + PCI_VENDOR_ID)) {
|
||||
error_setg(errp, "SR-IOV VF vendor ID mismatches with PF");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vf_dev_id != pci_get_word(vfs[i]->config + PCI_DEVICE_ID)) {
|
||||
error_setg(errp, "inconsistent SR-IOV VF device IDs");
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < PCI_NUM_REGIONS; j++) {
|
||||
if (vfs[i]->io_regions[j].size != vfs[0]->io_regions[j].size ||
|
||||
vfs[i]->io_regions[j].type != vfs[0]->io_regions[j].type) {
|
||||
error_setg(errp, "inconsistent SR-IOV BARs");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (vfs[i]->devfn - vfs[0]->devfn != vf_stride * i) {
|
||||
error_setg(errp, "inconsistent SR-IOV stride");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pcie_sriov_pf_init_common(dev, offset, vf_dev_id, pf->len,
|
||||
pf->len, vf_offset, vf_stride, errp)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < pf->len; i++) {
|
||||
vfs[i]->exp.sriov_vf.pf = dev;
|
||||
vfs[i]->exp.sriov_vf.vf_number = i;
|
||||
|
||||
/* set vid/did according to sr/iov spec - they are not used */
|
||||
pci_config_set_vendor_id(vfs[i]->config, 0xffff);
|
||||
pci_config_set_device_id(vfs[i]->config, 0xffff);
|
||||
}
|
||||
|
||||
dev->exp.sriov_pf.vf = vfs;
|
||||
dev->exp.sriov_pf.vf_user_created = true;
|
||||
|
||||
for (i = 0; i < PCI_NUM_REGIONS; i++) {
|
||||
PCIIORegion *region = &vfs[0]->io_regions[i];
|
||||
|
||||
if (region->size) {
|
||||
pcie_sriov_pf_init_vf_bar(dev, i, region->type, region->size);
|
||||
}
|
||||
}
|
||||
|
||||
return PCI_EXT_CAP_SRIOV_SIZEOF;
|
||||
}
|
||||
|
||||
bool pcie_sriov_register_device(PCIDevice *dev, Error **errp)
|
||||
{
|
||||
if (!dev->exp.sriov_pf.vf && dev->qdev.id &&
|
||||
pfs && g_hash_table_contains(pfs, dev->qdev.id)) {
|
||||
error_setg(errp, "attaching user-created SR-IOV VF unsupported");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dev->sriov_pf) {
|
||||
PCIDevice *pci_pf;
|
||||
GPtrArray *pf;
|
||||
|
||||
if (!PCI_DEVICE_GET_CLASS(dev)->sriov_vf_user_creatable) {
|
||||
error_setg(errp, "user cannot create SR-IOV VF with this device type");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pci_is_express(dev)) {
|
||||
error_setg(errp, "PCI Express is required for SR-IOV VF");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pci_qdev_find_device(dev->sriov_pf, &pci_pf)) {
|
||||
error_setg(errp, "PCI device specified as SR-IOV PF already exists");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pfs) {
|
||||
pfs = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL);
|
||||
}
|
||||
|
||||
pf = g_hash_table_lookup(pfs, dev->sriov_pf);
|
||||
if (!pf) {
|
||||
pf = g_ptr_array_new();
|
||||
g_hash_table_insert(pfs, g_strdup(dev->sriov_pf), pf);
|
||||
}
|
||||
|
||||
g_ptr_array_add(pf, dev);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void pcie_sriov_unregister_device(PCIDevice *dev)
|
||||
{
|
||||
if (dev->sriov_pf && pfs) {
|
||||
GPtrArray *pf = g_hash_table_lookup(pfs, dev->sriov_pf);
|
||||
|
||||
if (pf) {
|
||||
g_ptr_array_remove_fast(pf, dev);
|
||||
|
||||
if (!pf->len) {
|
||||
g_hash_table_remove(pfs, dev->sriov_pf);
|
||||
g_ptr_array_free(pf, FALSE);
|
||||
}
|
||||
}
|
||||
trace_sriov_unregister_vfs(dev->name, PCI_SLOT(dev->devfn),
|
||||
PCI_FUNC(dev->devfn), num_vfs);
|
||||
for (i = 0; i < num_vfs; i++) {
|
||||
Error *err = NULL;
|
||||
PCIDevice *vf = dev->exp.sriov_pf.vf[i];
|
||||
if (!object_property_set_bool(OBJECT(vf), "realized", false, &err)) {
|
||||
error_reportf_err(err, "Failed to unplug: ");
|
||||
}
|
||||
object_unparent(OBJECT(vf));
|
||||
object_unref(OBJECT(vf));
|
||||
}
|
||||
g_free(dev->exp.sriov_pf.vf);
|
||||
dev->exp.sriov_pf.vf = NULL;
|
||||
dev->exp.sriov_pf.num_vfs = 0;
|
||||
}
|
||||
|
||||
void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
|
||||
@ -425,21 +235,15 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
|
||||
PCI_FUNC(dev->devfn), off, val, len);
|
||||
|
||||
if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) {
|
||||
if (val & PCI_SRIOV_CTRL_VFE) {
|
||||
register_vfs(dev);
|
||||
if (dev->exp.sriov_pf.num_vfs) {
|
||||
if (!(val & PCI_SRIOV_CTRL_VFE)) {
|
||||
unregister_vfs(dev);
|
||||
}
|
||||
} else {
|
||||
unregister_vfs(dev);
|
||||
if (val & PCI_SRIOV_CTRL_VFE) {
|
||||
register_vfs(dev);
|
||||
}
|
||||
}
|
||||
} else if (range_covers_byte(off, len, PCI_SRIOV_NUM_VF)) {
|
||||
clear_ctrl_vfe(dev);
|
||||
unregister_vfs(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void pcie_sriov_pf_post_load(PCIDevice *dev)
|
||||
{
|
||||
if (dev->exp.sriov_cap) {
|
||||
register_vfs(dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,7 +294,7 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize)
|
||||
|
||||
uint16_t pcie_sriov_vf_number(PCIDevice *dev)
|
||||
{
|
||||
assert(dev->exp.sriov_vf.pf);
|
||||
assert(pci_is_vf(dev));
|
||||
return dev->exp.sriov_vf.vf_number;
|
||||
}
|
||||
|
||||
@ -502,7 +306,7 @@ PCIDevice *pcie_sriov_get_pf(PCIDevice *dev)
|
||||
PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n)
|
||||
{
|
||||
assert(!pci_is_vf(dev));
|
||||
if (n < pcie_sriov_num_vfs(dev)) {
|
||||
if (n < dev->exp.sriov_pf.num_vfs) {
|
||||
return dev->exp.sriov_pf.vf[n];
|
||||
}
|
||||
return NULL;
|
||||
@ -510,10 +314,5 @@ PCIDevice *pcie_sriov_get_vf_at_index(PCIDevice *dev, int n)
|
||||
|
||||
uint16_t pcie_sriov_num_vfs(PCIDevice *dev)
|
||||
{
|
||||
uint16_t sriov_cap = dev->exp.sriov_cap;
|
||||
uint8_t *cfg = dev->config + sriov_cap;
|
||||
|
||||
return sriov_cap &&
|
||||
(pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) ?
|
||||
pci_get_word(cfg + PCI_SRIOV_NUM_VF) : 0;
|
||||
return dev->exp.sriov_pf.num_vfs;
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d mask
|
||||
|
||||
# hw/pci/pcie_sriov.c
|
||||
sriov_register_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: creating %d vf devs"
|
||||
sriov_unregister_vfs(const char *name, int slot, int function) "%s %02x:%x: Unregistering vf devs"
|
||||
sriov_unregister_vfs(const char *name, int slot, int function, int num_vfs) "%s %02x:%x: Unregistering %d vf devs"
|
||||
sriov_config_write(const char *name, int slot, int fun, uint32_t offset, uint32_t val, uint32_t len) "%s %02x:%x: sriov offset 0x%x val 0x%x len %d"
|
||||
|
||||
# pcie.c
|
||||
|
@ -1296,10 +1296,6 @@ static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!pdev->enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
err = spapr_dt_pci_device(p->sphb, pdev, p->fdt, p->offset);
|
||||
if (err < 0) {
|
||||
p->err = err;
|
||||
@ -1573,9 +1569,7 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler,
|
||||
* hotplug, we do not allow functions to be hotplugged to a
|
||||
* slot that already has function 0 present
|
||||
*/
|
||||
if (plugged_dev->hotplugged &&
|
||||
!pci_is_vf(pdev) &&
|
||||
bus->devices[PCI_DEVFN(slotnr, 0)] &&
|
||||
if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] &&
|
||||
PCI_FUNC(pdev->devfn) != 0) {
|
||||
error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
|
||||
" additional functions can no longer be exposed to guest.",
|
||||
|
@ -75,7 +75,6 @@ static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
|
||||
k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
|
||||
k->revision = VIRTIO_PCI_ABI_VERSION;
|
||||
k->class_id = PCI_CLASS_NETWORK_ETHERNET;
|
||||
k->sriov_vf_user_creatable = true;
|
||||
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
|
||||
device_class_set_props(dc, virtio_net_properties);
|
||||
vpciklass->realize = virtio_net_pci_realize;
|
||||
|
@ -1955,7 +1955,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
||||
uint8_t *config;
|
||||
uint32_t size;
|
||||
VirtIODevice *vdev = virtio_bus_get_device(bus);
|
||||
int16_t res;
|
||||
|
||||
/*
|
||||
* Virtio capabilities present without
|
||||
@ -2101,14 +2100,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
|
||||
pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
|
||||
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
|
||||
}
|
||||
|
||||
res = pcie_sriov_pf_init_from_user_created_vfs(&proxy->pci_dev,
|
||||
proxy->last_pcie_cap_offset,
|
||||
errp);
|
||||
if (res > 0) {
|
||||
proxy->last_pcie_cap_offset += res;
|
||||
virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_pci_device_unplugged(DeviceState *d)
|
||||
@ -2196,7 +2187,7 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
|
||||
if (pcie_port && pci_is_express(pci_dev)) {
|
||||
int pos;
|
||||
proxy->last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
|
||||
uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE;
|
||||
|
||||
pos = pcie_endpoint_cap_init(pci_dev, 0);
|
||||
assert(pos > 0);
|
||||
@ -2216,9 +2207,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
|
||||
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_AER) {
|
||||
pcie_aer_init(pci_dev, PCI_ERR_VER, proxy->last_pcie_cap_offset,
|
||||
pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset,
|
||||
PCI_ERR_SIZEOF, NULL);
|
||||
proxy->last_pcie_cap_offset += PCI_ERR_SIZEOF;
|
||||
last_pcie_cap_offset += PCI_ERR_SIZEOF;
|
||||
}
|
||||
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
|
||||
@ -2243,9 +2234,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
|
||||
}
|
||||
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
|
||||
pcie_ats_init(pci_dev, proxy->last_pcie_cap_offset,
|
||||
pcie_ats_init(pci_dev, last_pcie_cap_offset,
|
||||
proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED);
|
||||
proxy->last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
|
||||
last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF;
|
||||
}
|
||||
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
|
||||
@ -2272,7 +2263,6 @@ static void virtio_pci_exit(PCIDevice *pci_dev)
|
||||
bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
|
||||
!pci_bus_is_root(pci_get_bus(pci_dev));
|
||||
|
||||
pcie_sriov_pf_exit(&proxy->pci_dev);
|
||||
msix_uninit_exclusive_bar(pci_dev);
|
||||
if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port &&
|
||||
pci_is_express(pci_dev)) {
|
||||
|
@ -184,8 +184,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
/* Workaround: Property parsing does not enforce unsigned integers,
|
||||
* So this is a hack to reject such numbers. */
|
||||
if (vrng->conf.max_bytes > INT64_MAX) {
|
||||
error_setg(errp, "'max-bytes' parameter must be non-negative, "
|
||||
if (vrng->conf.max_bytes == 0 ||
|
||||
vrng->conf.max_bytes > INT64_MAX) {
|
||||
error_setg(errp, "'max-bytes' parameter must be positive, "
|
||||
"and less than 2^63");
|
||||
return;
|
||||
}
|
||||
|
@ -678,6 +678,6 @@ static inline void pci_irq_pulse(PCIDevice *pci_dev)
|
||||
}
|
||||
|
||||
MSIMessage pci_get_msi_message(PCIDevice *dev, int vector);
|
||||
void pci_set_enabled(PCIDevice *pci_dev, bool state);
|
||||
void pci_set_power(PCIDevice *pci_dev, bool state);
|
||||
|
||||
#endif
|
||||
|
@ -38,8 +38,6 @@ struct PCIDeviceClass {
|
||||
uint16_t subsystem_id; /* only for header type = 0 */
|
||||
|
||||
const char *romfile; /* rom bar */
|
||||
|
||||
bool sriov_vf_user_creatable;
|
||||
};
|
||||
|
||||
enum PCIReqIDType {
|
||||
@ -59,7 +57,7 @@ typedef struct PCIReqIDCache PCIReqIDCache;
|
||||
struct PCIDevice {
|
||||
DeviceState qdev;
|
||||
bool partially_hotplugged;
|
||||
bool enabled;
|
||||
bool has_power;
|
||||
|
||||
/* PCI config space */
|
||||
uint8_t *config;
|
||||
@ -169,8 +167,6 @@ struct PCIDevice {
|
||||
/* ID of standby device in net_failover pair */
|
||||
char *failover_pair_id;
|
||||
uint32_t acpi_index;
|
||||
|
||||
char *sriov_pf;
|
||||
};
|
||||
|
||||
static inline int pci_intx(PCIDevice *pci_dev)
|
||||
@ -203,7 +199,7 @@ static inline int pci_is_express_downstream_port(const PCIDevice *d)
|
||||
|
||||
static inline int pci_is_vf(const PCIDevice *d)
|
||||
{
|
||||
return d->sriov_pf || d->exp.sriov_vf.pf != NULL;
|
||||
return d->exp.sriov_vf.pf != NULL;
|
||||
}
|
||||
|
||||
static inline uint32_t pci_config_size(const PCIDevice *d)
|
||||
@ -216,21 +212,6 @@ static inline uint16_t pci_get_bdf(PCIDevice *dev)
|
||||
return PCI_BUILD_BDF(pci_bus_num(pci_get_bus(dev)), dev->devfn);
|
||||
}
|
||||
|
||||
static inline void pci_set_power(PCIDevice *pci_dev, bool state)
|
||||
{
|
||||
/*
|
||||
* Don't change the enabled state of VFs when powering on/off the device.
|
||||
*
|
||||
* When powering on, VFs must not be enabled immediately but they must
|
||||
* wait until the guest configures SR-IOV.
|
||||
* When powering off, their corresponding PFs will be reset and disable
|
||||
* VFs.
|
||||
*/
|
||||
if (!pci_is_vf(pci_dev)) {
|
||||
pci_set_enabled(pci_dev, state);
|
||||
}
|
||||
}
|
||||
|
||||
uint16_t pci_requester_id(PCIDevice *dev);
|
||||
|
||||
/* DMA access functions */
|
||||
|
@ -16,9 +16,10 @@
|
||||
#include "hw/pci/pci.h"
|
||||
|
||||
typedef struct PCIESriovPF {
|
||||
uint16_t num_vfs; /* Number of virtual functions created */
|
||||
uint8_t vf_bar_type[PCI_NUM_REGIONS]; /* Store type for each VF bar */
|
||||
const char *vfname; /* Reference to the device type used for the VFs */
|
||||
PCIDevice **vf; /* Pointer to an array of num_vfs VF devices */
|
||||
bool vf_user_created; /* If VFs are created by user */
|
||||
} PCIESriovPF;
|
||||
|
||||
typedef struct PCIESriovVF {
|
||||
@ -26,11 +27,10 @@ typedef struct PCIESriovVF {
|
||||
uint16_t vf_number; /* Logical VF number of this function */
|
||||
} PCIESriovVF;
|
||||
|
||||
bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
|
||||
void pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
|
||||
const char *vfname, uint16_t vf_dev_id,
|
||||
uint16_t init_vfs, uint16_t total_vfs,
|
||||
uint16_t vf_offset, uint16_t vf_stride,
|
||||
Error **errp);
|
||||
uint16_t vf_offset, uint16_t vf_stride);
|
||||
void pcie_sriov_pf_exit(PCIDevice *dev);
|
||||
|
||||
/* Set up a VF bar in the SR/IOV bar area */
|
||||
@ -41,23 +41,6 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num,
|
||||
void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num,
|
||||
MemoryRegion *memory);
|
||||
|
||||
/**
|
||||
* pcie_sriov_pf_init_from_user_created_vfs() - Initialize PF with user-created
|
||||
* VFs.
|
||||
* @dev: A PCIe device being realized.
|
||||
* @offset: The offset of the SR-IOV capability.
|
||||
* @errp: pointer to Error*, to store an error if it happens.
|
||||
*
|
||||
* Return: The size of added capability. 0 if the user did not create VFs.
|
||||
* -1 if failed.
|
||||
*/
|
||||
int16_t pcie_sriov_pf_init_from_user_created_vfs(PCIDevice *dev,
|
||||
uint16_t offset,
|
||||
Error **errp);
|
||||
|
||||
bool pcie_sriov_register_device(PCIDevice *dev, Error **errp);
|
||||
void pcie_sriov_unregister_device(PCIDevice *dev);
|
||||
|
||||
/*
|
||||
* Default (minimal) page size support values
|
||||
* as required by the SR/IOV standard:
|
||||
@ -75,8 +58,6 @@ void pcie_sriov_pf_add_sup_pgsize(PCIDevice *dev, uint16_t opt_sup_pgsize);
|
||||
void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
|
||||
uint32_t val, int len);
|
||||
|
||||
void pcie_sriov_pf_post_load(PCIDevice *dev);
|
||||
|
||||
/* Reset SR/IOV */
|
||||
void pcie_sriov_pf_reset(PCIDevice *dev);
|
||||
|
||||
|
@ -152,7 +152,6 @@ struct VirtIOPCIProxy {
|
||||
uint32_t modern_io_bar_idx;
|
||||
uint32_t modern_mem_bar_idx;
|
||||
int config_cap;
|
||||
uint16_t last_pcie_cap_offset;
|
||||
uint32_t flags;
|
||||
bool disable_modern;
|
||||
bool ignore_backend_features;
|
||||
|
Loading…
Reference in New Issue
Block a user