2007-11-17 20:14:51 +03:00
|
|
|
#ifndef QEMU_PCI_H
|
|
|
|
#define QEMU_PCI_H
|
|
|
|
|
2009-03-06 02:01:23 +03:00
|
|
|
#include "qemu-common.h"
|
|
|
|
|
2009-05-15 01:35:07 +04:00
|
|
|
#include "qdev.h"
|
2011-07-26 15:26:19 +04:00
|
|
|
#include "memory.h"
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
#include "dma.h"
|
2009-05-15 01:35:07 +04:00
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
/* PCI includes legacy ISA access. */
|
|
|
|
#include "isa.h"
|
|
|
|
|
2010-10-19 13:06:34 +04:00
|
|
|
#include "pcie.h"
|
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
/* PCI bus */
|
|
|
|
|
2009-02-11 18:19:46 +03:00
|
|
|
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
|
|
|
|
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
|
|
|
|
#define PCI_FUNC(devfn) ((devfn) & 0x07)
|
2011-01-27 09:56:35 +03:00
|
|
|
#define PCI_SLOT_MAX 32
|
2010-06-23 11:15:26 +04:00
|
|
|
#define PCI_FUNC_MAX 8
|
2009-02-11 18:19:46 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Class, Vendor and Device IDs from Linux's pci_ids.h */
|
|
|
|
#include "pci_ids.h"
|
2009-02-01 22:26:20 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* QEMU-specific Vendor and Device ID definitions */
|
2009-02-11 18:21:54 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* IBM (0x1014) */
|
|
|
|
#define PCI_DEVICE_ID_IBM_440GX 0x027f
|
2009-02-01 15:01:04 +03:00
|
|
|
#define PCI_DEVICE_ID_IBM_OPENPIC2 0xffff
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Hitachi (0x1054) */
|
2009-01-26 18:37:35 +03:00
|
|
|
#define PCI_VENDOR_ID_HITACHI 0x1054
|
2009-03-13 18:02:23 +03:00
|
|
|
#define PCI_DEVICE_ID_HITACHI_SH7751R 0x350e
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Apple (0x106b) */
|
2009-02-01 15:01:04 +03:00
|
|
|
#define PCI_DEVICE_ID_APPLE_343S1201 0x0010
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_I_PCI 0x001e
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_PCI 0x001f
|
|
|
|
#define PCI_DEVICE_ID_APPLE_UNI_N_KEYL 0x0022
|
2009-03-13 18:02:23 +03:00
|
|
|
#define PCI_DEVICE_ID_APPLE_IPID_USB 0x003f
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Realtek (0x10ec) */
|
|
|
|
#define PCI_DEVICE_ID_REALTEK_8029 0x8029
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Xilinx (0x10ee) */
|
|
|
|
#define PCI_DEVICE_ID_XILINX_XC2VP30 0x0300
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* Marvell (0x11ab) */
|
|
|
|
#define PCI_DEVICE_ID_MARVELL_GT6412X 0x4620
|
2009-01-26 18:37:35 +03:00
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* QEMU/Bochs VGA (0x1234) */
|
2009-02-01 15:01:04 +03:00
|
|
|
#define PCI_VENDOR_ID_QEMU 0x1234
|
|
|
|
#define PCI_DEVICE_ID_QEMU_VGA 0x1111
|
|
|
|
|
2009-03-13 18:02:23 +03:00
|
|
|
/* VMWare (0x15ad) */
|
2009-01-26 18:37:35 +03:00
|
|
|
#define PCI_VENDOR_ID_VMWARE 0x15ad
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SVGA 0x0710
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_NET 0x0720
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_SCSI 0x0730
|
|
|
|
#define PCI_DEVICE_ID_VMWARE_IDE 0x1729
|
|
|
|
|
2009-03-28 20:29:07 +03:00
|
|
|
/* Intel (0x8086) */
|
2009-03-13 18:02:23 +03:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82551IT 0x1209
|
2009-09-02 00:16:10 +04:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82557 0x1229
|
2010-12-14 03:34:39 +03:00
|
|
|
#define PCI_DEVICE_ID_INTEL_82801IR 0x2922
|
2009-03-02 19:42:23 +03:00
|
|
|
|
2009-01-26 18:37:35 +03:00
|
|
|
/* Red Hat / Qumranet (for QEMU) -- see pci-ids.txt */
|
2008-12-12 00:15:42 +03:00
|
|
|
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
|
|
|
|
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
|
|
|
|
#define PCI_SUBDEVICE_ID_QEMU 0x1100
|
|
|
|
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_NET 0x1000
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_BLOCK 0x1001
|
|
|
|
#define PCI_DEVICE_ID_VIRTIO_BALLOON 0x1002
|
2009-01-26 18:22:46 +03:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_CONSOLE 0x1003
|
2011-02-11 11:40:59 +03:00
|
|
|
#define PCI_DEVICE_ID_VIRTIO_SCSI 0x1004
|
2008-12-12 00:15:42 +03:00
|
|
|
|
2009-10-30 15:21:10 +03:00
|
|
|
#define FMT_PCIBUS PRIx64
|
2009-10-30 15:21:08 +03:00
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
typedef void PCIConfigWriteFunc(PCIDevice *pci_dev,
|
|
|
|
uint32_t address, uint32_t data, int len);
|
|
|
|
typedef uint32_t PCIConfigReadFunc(PCIDevice *pci_dev,
|
|
|
|
uint32_t address, int len);
|
|
|
|
typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
|
2009-10-30 15:21:08 +03:00
|
|
|
pcibus_t addr, pcibus_t size, int type);
|
2009-02-11 18:21:10 +03:00
|
|
|
typedef int PCIUnregisterFunc(PCIDevice *pci_dev);
|
2007-11-17 20:14:51 +03:00
|
|
|
|
|
|
|
typedef struct PCIIORegion {
|
2009-10-30 15:21:08 +03:00
|
|
|
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
|
|
|
|
#define PCI_BAR_UNMAPPED (~(pcibus_t)0)
|
|
|
|
pcibus_t size;
|
2007-11-17 20:14:51 +03:00
|
|
|
uint8_t type;
|
2011-07-26 15:26:20 +04:00
|
|
|
MemoryRegion *memory;
|
2011-08-08 17:09:05 +04:00
|
|
|
MemoryRegion *address_space;
|
2007-11-17 20:14:51 +03:00
|
|
|
} PCIIORegion;
|
|
|
|
|
|
|
|
#define PCI_ROM_SLOT 6
|
|
|
|
#define PCI_NUM_REGIONS 7
|
|
|
|
|
2009-12-15 14:26:01 +03:00
|
|
|
#include "pci_regs.h"
|
|
|
|
|
|
|
|
/* PCI HEADER_TYPE */
|
2009-05-03 23:03:00 +04:00
|
|
|
#define PCI_HEADER_TYPE_MULTI_FUNCTION 0x80
|
2008-12-19 01:43:33 +03:00
|
|
|
|
2009-06-21 20:45:18 +04:00
|
|
|
/* Size of the standard PCI config header */
|
|
|
|
#define PCI_CONFIG_HEADER_SIZE 0x40
|
|
|
|
/* Size of the standard PCI config space */
|
|
|
|
#define PCI_CONFIG_SPACE_SIZE 0x100
|
2009-10-30 15:21:18 +03:00
|
|
|
/* Size of the standart PCIe config space: 4KB */
|
|
|
|
#define PCIE_CONFIG_SPACE_SIZE 0x1000
|
2009-06-21 20:45:18 +04:00
|
|
|
|
2009-10-30 15:20:56 +03:00
|
|
|
#define PCI_NUM_PINS 4 /* A-D */
|
|
|
|
|
2009-06-21 20:49:54 +04:00
|
|
|
/* Bits in cap_present field. */
|
|
|
|
enum {
|
2010-10-19 13:06:32 +04:00
|
|
|
QEMU_PCI_CAP_MSI = 0x1,
|
|
|
|
QEMU_PCI_CAP_MSIX = 0x2,
|
|
|
|
QEMU_PCI_CAP_EXPRESS = 0x4,
|
2010-06-23 11:15:30 +04:00
|
|
|
|
|
|
|
/* multifunction capable device */
|
2010-10-19 13:06:32 +04:00
|
|
|
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
|
2010-06-23 11:15:30 +04:00
|
|
|
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
|
2010-11-26 15:01:41 +03:00
|
|
|
|
|
|
|
/* command register SERR bit enabled */
|
|
|
|
#define QEMU_PCI_CAP_SERR_BITNR 4
|
|
|
|
QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR),
|
2012-02-12 16:12:21 +04:00
|
|
|
/* Standard hot plug controller. */
|
|
|
|
#define QEMU_PCI_SHPC_BITNR 5
|
|
|
|
QEMU_PCI_CAP_SHPC = (1 << QEMU_PCI_SHPC_BITNR),
|
2012-02-15 21:17:59 +04:00
|
|
|
#define QEMU_PCI_SLOTID_BITNR 6
|
|
|
|
QEMU_PCI_CAP_SLOTID = (1 << QEMU_PCI_SLOTID_BITNR),
|
2009-06-21 20:49:54 +04:00
|
|
|
};
|
|
|
|
|
2011-12-04 22:22:06 +04:00
|
|
|
#define TYPE_PCI_DEVICE "pci-device"
|
|
|
|
#define PCI_DEVICE(obj) \
|
|
|
|
OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
|
|
|
|
#define PCI_DEVICE_CLASS(klass) \
|
|
|
|
OBJECT_CLASS_CHECK(PCIDeviceClass, (klass), TYPE_PCI_DEVICE)
|
|
|
|
#define PCI_DEVICE_GET_CLASS(obj) \
|
|
|
|
OBJECT_GET_CLASS(PCIDeviceClass, (obj), TYPE_PCI_DEVICE)
|
|
|
|
|
|
|
|
typedef struct PCIDeviceClass {
|
|
|
|
DeviceClass parent_class;
|
|
|
|
|
|
|
|
int (*init)(PCIDevice *dev);
|
|
|
|
PCIUnregisterFunc *exit;
|
|
|
|
PCIConfigReadFunc *config_read;
|
|
|
|
PCIConfigWriteFunc *config_write;
|
|
|
|
|
|
|
|
uint16_t vendor_id;
|
|
|
|
uint16_t device_id;
|
|
|
|
uint8_t revision;
|
|
|
|
uint16_t class_id;
|
|
|
|
uint16_t subsystem_vendor_id; /* only for header type = 0 */
|
|
|
|
uint16_t subsystem_id; /* only for header type = 0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pci-to-pci bridge or normal device.
|
|
|
|
* This doesn't mean pci host switch.
|
|
|
|
* When card bus bridge is supported, this would be enhanced.
|
|
|
|
*/
|
|
|
|
int is_bridge;
|
|
|
|
|
|
|
|
/* pcie stuff */
|
|
|
|
int is_express; /* is this device pci express? */
|
|
|
|
|
|
|
|
/* device isn't hot-pluggable */
|
|
|
|
int no_hotplug;
|
|
|
|
|
|
|
|
/* rom bar */
|
|
|
|
const char *romfile;
|
|
|
|
} PCIDeviceClass;
|
|
|
|
|
2012-05-17 17:32:31 +04:00
|
|
|
typedef int (*MSIVectorUseNotifier)(PCIDevice *dev, unsigned int vector,
|
|
|
|
MSIMessage msg);
|
|
|
|
typedef void (*MSIVectorReleaseNotifier)(PCIDevice *dev, unsigned int vector);
|
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
struct PCIDevice {
|
2009-05-15 01:35:07 +04:00
|
|
|
DeviceState qdev;
|
2007-11-17 20:14:51 +03:00
|
|
|
/* PCI config space */
|
2009-10-30 15:21:18 +03:00
|
|
|
uint8_t *config;
|
2009-06-21 20:45:18 +04:00
|
|
|
|
2011-04-26 12:29:36 +04:00
|
|
|
/* Used to enable config checks on load. Note that writable bits are
|
2009-06-21 20:49:40 +04:00
|
|
|
* never checked even if set in cmask. */
|
2009-10-30 15:21:18 +03:00
|
|
|
uint8_t *cmask;
|
2009-06-21 20:49:40 +04:00
|
|
|
|
2009-06-21 20:45:18 +04:00
|
|
|
/* Used to implement R/W bytes */
|
2009-10-30 15:21:18 +03:00
|
|
|
uint8_t *wmask;
|
2007-11-17 20:14:51 +03:00
|
|
|
|
2010-09-15 09:38:15 +04:00
|
|
|
/* Used to implement RW1C(Write 1 to Clear) bytes */
|
|
|
|
uint8_t *w1cmask;
|
|
|
|
|
2009-06-21 20:45:40 +04:00
|
|
|
/* Used to allocate config space for capabilities. */
|
2009-10-30 15:21:18 +03:00
|
|
|
uint8_t *used;
|
2009-06-21 20:45:40 +04:00
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
/* the following fields are read only */
|
|
|
|
PCIBus *bus;
|
2012-03-04 23:38:27 +04:00
|
|
|
int32_t devfn;
|
2007-11-17 20:14:51 +03:00
|
|
|
char name[64];
|
|
|
|
PCIIORegion io_regions[PCI_NUM_REGIONS];
|
|
|
|
|
|
|
|
/* do not access the following fields */
|
|
|
|
PCIConfigReadFunc *config_read;
|
|
|
|
PCIConfigWriteFunc *config_write;
|
|
|
|
|
|
|
|
/* IRQ objects for the INTA-INTD pins. */
|
|
|
|
qemu_irq *irq;
|
|
|
|
|
|
|
|
/* Current IRQ levels. Used internally by the generic PCI code. */
|
2009-11-25 16:20:51 +03:00
|
|
|
uint8_t irq_state;
|
2009-06-21 20:49:54 +04:00
|
|
|
|
|
|
|
/* Capability bits */
|
|
|
|
uint32_t cap_present;
|
|
|
|
|
|
|
|
/* Offset of MSI-X capability in config space */
|
|
|
|
uint8_t msix_cap;
|
|
|
|
|
|
|
|
/* MSI-X entries */
|
|
|
|
int msix_entries_nr;
|
|
|
|
|
|
|
|
/* Space to store MSIX table */
|
|
|
|
uint8_t *msix_table_page;
|
|
|
|
/* MMIO index used to map MSIX table and pending bit entries. */
|
2011-08-08 17:09:26 +04:00
|
|
|
MemoryRegion msix_mmio;
|
2009-06-21 20:49:54 +04:00
|
|
|
/* Reference-count for entries actually in use by driver. */
|
|
|
|
unsigned *msix_entry_used;
|
|
|
|
/* Region including the MSI-X table */
|
|
|
|
uint32_t msix_bar_size;
|
2011-11-21 20:57:21 +04:00
|
|
|
/* MSIX function mask set or MSIX disabled */
|
|
|
|
bool msix_function_masked;
|
2009-08-20 21:42:38 +04:00
|
|
|
/* Version id needed for VMState */
|
|
|
|
int32_t version_id;
|
2009-12-18 14:01:07 +03:00
|
|
|
|
2010-10-19 13:06:32 +04:00
|
|
|
/* Offset of MSI capability in config space */
|
|
|
|
uint8_t msi_cap;
|
|
|
|
|
2010-10-19 13:06:34 +04:00
|
|
|
/* PCI Express */
|
|
|
|
PCIExpressDevice exp;
|
|
|
|
|
2012-02-12 16:12:21 +04:00
|
|
|
/* SHPC */
|
|
|
|
SHPCDevice *shpc;
|
|
|
|
|
2009-12-18 14:01:07 +03:00
|
|
|
/* Location of option rom */
|
2009-12-18 14:01:08 +03:00
|
|
|
char *romfile;
|
2011-08-08 17:09:28 +04:00
|
|
|
bool has_rom;
|
|
|
|
MemoryRegion rom;
|
2010-01-08 17:25:41 +03:00
|
|
|
uint32_t rom_bar;
|
2012-05-17 17:32:31 +04:00
|
|
|
|
|
|
|
/* MSI-X notifiers */
|
|
|
|
MSIVectorUseNotifier msix_vector_use_notifier;
|
|
|
|
MSIVectorReleaseNotifier msix_vector_release_notifier;
|
2007-11-17 20:14:51 +03:00
|
|
|
};
|
|
|
|
|
2011-08-08 17:09:31 +04:00
|
|
|
void pci_register_bar(PCIDevice *pci_dev, int region_num,
|
|
|
|
uint8_t attr, MemoryRegion *memory);
|
2011-08-08 17:08:55 +04:00
|
|
|
pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num);
|
2007-11-17 20:14:51 +03:00
|
|
|
|
2010-09-06 11:46:16 +04:00
|
|
|
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
|
|
|
|
uint8_t offset, uint8_t size);
|
2009-06-21 20:45:40 +04:00
|
|
|
|
|
|
|
void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
|
|
|
|
|
|
|
|
uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id);
|
|
|
|
|
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
uint32_t pci_default_read_config(PCIDevice *d,
|
|
|
|
uint32_t address, int len);
|
|
|
|
void pci_default_write_config(PCIDevice *d,
|
|
|
|
uint32_t address, uint32_t val, int len);
|
|
|
|
void pci_device_save(PCIDevice *s, QEMUFile *f);
|
|
|
|
int pci_device_load(PCIDevice *s, QEMUFile *f);
|
2011-08-15 18:17:36 +04:00
|
|
|
MemoryRegion *pci_address_space(PCIDevice *dev);
|
2011-08-11 02:28:10 +04:00
|
|
|
MemoryRegion *pci_address_space_io(PCIDevice *dev);
|
2007-11-17 20:14:51 +03:00
|
|
|
|
2009-08-28 17:28:17 +04:00
|
|
|
typedef void (*pci_set_irq_fn)(void *opaque, int irq_num, int level);
|
2007-11-17 20:14:51 +03:00
|
|
|
typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
|
2010-11-12 10:21:35 +03:00
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
PCI_HOTPLUG_DISABLED,
|
|
|
|
PCI_HOTPLUG_ENABLED,
|
|
|
|
PCI_COLDPLUG_ENABLED,
|
|
|
|
} PCIHotplugState;
|
|
|
|
|
|
|
|
typedef int (*pci_hotplug_fn)(DeviceState *qdev, PCIDevice *pci_dev,
|
|
|
|
PCIHotplugState state);
|
2009-09-17 00:25:31 +04:00
|
|
|
void pci_bus_new_inplace(PCIBus *bus, DeviceState *parent,
|
2011-07-26 15:26:19 +04:00
|
|
|
const char *name,
|
2011-08-08 17:09:04 +04:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
2011-07-26 15:26:19 +04:00
|
|
|
uint8_t devfn_min);
|
|
|
|
PCIBus *pci_bus_new(DeviceState *parent, const char *name,
|
2011-08-08 17:09:04 +04:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
|
|
|
uint8_t devfn_min);
|
2009-09-17 00:25:31 +04:00
|
|
|
void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
|
|
|
void *irq_opaque, int nirq);
|
2011-04-01 15:43:21 +04:00
|
|
|
int pci_bus_get_irq_level(PCIBus *bus, int irq_num);
|
2010-05-14 11:29:19 +04:00
|
|
|
void pci_bus_hotplug(PCIBus *bus, pci_hotplug_fn hotplug, DeviceState *dev);
|
2009-05-23 03:05:19 +04:00
|
|
|
PCIBus *pci_register_bus(DeviceState *parent, const char *name,
|
|
|
|
pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
|
2011-07-26 15:26:19 +04:00
|
|
|
void *irq_opaque,
|
2011-08-08 17:09:04 +04:00
|
|
|
MemoryRegion *address_space_mem,
|
|
|
|
MemoryRegion *address_space_io,
|
2011-07-26 15:26:19 +04:00
|
|
|
uint8_t devfn_min, int nirq);
|
2010-12-22 09:14:35 +03:00
|
|
|
void pci_device_reset(PCIDevice *dev);
|
2010-11-19 12:56:02 +03:00
|
|
|
void pci_bus_reset(PCIBus *bus);
|
2007-11-17 20:14:51 +03:00
|
|
|
|
2009-06-18 17:14:08 +04:00
|
|
|
PCIDevice *pci_nic_init(NICInfo *nd, const char *default_model,
|
|
|
|
const char *default_devaddr);
|
2009-09-25 05:53:51 +04:00
|
|
|
PCIDevice *pci_nic_init_nofail(NICInfo *nd, const char *default_model,
|
|
|
|
const char *default_devaddr);
|
2007-11-17 20:14:51 +03:00
|
|
|
int pci_bus_num(PCIBus *s);
|
2012-06-21 19:35:28 +04:00
|
|
|
void pci_for_each_device(PCIBus *bus, int bus_num,
|
|
|
|
void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
|
|
|
|
void *opaque);
|
2009-11-12 08:58:36 +03:00
|
|
|
PCIBus *pci_find_root_bus(int domain);
|
2010-05-28 13:30:46 +04:00
|
|
|
int pci_find_domain(const PCIBus *bus);
|
2011-01-27 09:56:36 +03:00
|
|
|
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn);
|
2010-12-24 06:14:13 +03:00
|
|
|
int pci_qdev_find_device(const char *id, PCIDevice **pdev);
|
2009-09-25 05:53:49 +04:00
|
|
|
PCIBus *pci_get_bus_devfn(int *devfnp, const char *devaddr);
|
2007-11-17 20:14:51 +03:00
|
|
|
|
2009-06-26 02:04:00 +04:00
|
|
|
int pci_read_devaddr(Monitor *mon, const char *addr, int *domp, int *busp,
|
|
|
|
unsigned *slotp);
|
2009-02-11 18:21:48 +03:00
|
|
|
|
2011-01-20 10:21:38 +03:00
|
|
|
void pci_device_deassert_intx(PCIDevice *dev);
|
|
|
|
|
2009-06-21 20:50:57 +04:00
|
|
|
static inline void
|
|
|
|
pci_set_byte(uint8_t *config, uint8_t val)
|
|
|
|
{
|
|
|
|
*config = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
2010-01-11 23:20:13 +03:00
|
|
|
pci_get_byte(const uint8_t *config)
|
2009-06-21 20:50:57 +04:00
|
|
|
{
|
|
|
|
return *config;
|
|
|
|
}
|
|
|
|
|
2009-06-21 20:45:30 +04:00
|
|
|
static inline void
|
|
|
|
pci_set_word(uint8_t *config, uint16_t val)
|
|
|
|
{
|
|
|
|
cpu_to_le16wu((uint16_t *)config, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
2010-01-11 23:20:13 +03:00
|
|
|
pci_get_word(const uint8_t *config)
|
2009-06-21 20:45:30 +04:00
|
|
|
{
|
2010-01-11 23:20:13 +03:00
|
|
|
return le16_to_cpupu((const uint16_t *)config);
|
2009-06-21 20:45:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_long(uint8_t *config, uint32_t val)
|
|
|
|
{
|
|
|
|
cpu_to_le32wu((uint32_t *)config, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
2010-01-11 23:20:13 +03:00
|
|
|
pci_get_long(const uint8_t *config)
|
2009-06-21 20:45:30 +04:00
|
|
|
{
|
2010-01-11 23:20:13 +03:00
|
|
|
return le32_to_cpupu((const uint32_t *)config);
|
2009-06-21 20:45:30 +04:00
|
|
|
}
|
|
|
|
|
2009-10-30 15:20:59 +03:00
|
|
|
static inline void
|
|
|
|
pci_set_quad(uint8_t *config, uint64_t val)
|
|
|
|
{
|
|
|
|
cpu_to_le64w((uint64_t *)config, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
2010-01-11 23:20:13 +03:00
|
|
|
pci_get_quad(const uint8_t *config)
|
2009-10-30 15:20:59 +03:00
|
|
|
{
|
2010-01-11 23:20:13 +03:00
|
|
|
return le64_to_cpup((const uint64_t *)config);
|
2009-10-30 15:20:59 +03:00
|
|
|
}
|
|
|
|
|
2009-01-26 18:37:35 +03:00
|
|
|
static inline void
|
|
|
|
pci_config_set_vendor_id(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 20:45:30 +04:00
|
|
|
pci_set_word(&pci_config[PCI_VENDOR_ID], val);
|
2009-01-26 18:37:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_config_set_device_id(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 20:45:30 +04:00
|
|
|
pci_set_word(&pci_config[PCI_DEVICE_ID], val);
|
2009-01-26 18:37:35 +03:00
|
|
|
}
|
|
|
|
|
2010-02-25 11:41:25 +03:00
|
|
|
static inline void
|
|
|
|
pci_config_set_revision(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_REVISION_ID], val);
|
|
|
|
}
|
|
|
|
|
2009-02-01 22:26:20 +03:00
|
|
|
static inline void
|
|
|
|
pci_config_set_class(uint8_t *pci_config, uint16_t val)
|
|
|
|
{
|
2009-06-21 20:45:30 +04:00
|
|
|
pci_set_word(&pci_config[PCI_CLASS_DEVICE], val);
|
2009-02-01 22:26:20 +03:00
|
|
|
}
|
|
|
|
|
2010-02-25 11:41:25 +03:00
|
|
|
static inline void
|
|
|
|
pci_config_set_prog_interface(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_CLASS_PROG], val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_config_set_interrupt_pin(uint8_t *pci_config, uint8_t val)
|
|
|
|
{
|
|
|
|
pci_set_byte(&pci_config[PCI_INTERRUPT_PIN], val);
|
|
|
|
}
|
|
|
|
|
2010-10-19 13:06:28 +04:00
|
|
|
/*
|
|
|
|
* helper functions to do bit mask operation on configuration space.
|
|
|
|
* Just to set bit, use test-and-set and discard returned value.
|
|
|
|
* Just to clear bit, use test-and-clear and discard returned value.
|
|
|
|
* NOTE: They aren't atomic.
|
|
|
|
*/
|
|
|
|
static inline uint8_t
|
|
|
|
pci_byte_test_and_clear_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
pci_set_byte(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
pci_byte_test_and_set_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
pci_set_byte(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_word_test_and_clear_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
pci_set_word(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_word_test_and_set_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
pci_set_word(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_long_test_and_clear_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
pci_set_long(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_long_test_and_set_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
pci_set_long(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_quad_test_and_clear_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
pci_set_quad(config, val & ~mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_quad_test_and_set_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
pci_set_quad(config, val | mask);
|
|
|
|
return val & mask;
|
|
|
|
}
|
|
|
|
|
2012-02-21 17:41:30 +04:00
|
|
|
/* Access a register specified by a mask */
|
|
|
|
static inline void
|
|
|
|
pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
uint8_t rval = reg << (ffs(mask) - 1);
|
|
|
|
pci_set_byte(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
pci_get_byte_by_mask(uint8_t *config, uint8_t mask)
|
|
|
|
{
|
|
|
|
uint8_t val = pci_get_byte(config);
|
|
|
|
return (val & mask) >> (ffs(mask) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
uint16_t rval = reg << (ffs(mask) - 1);
|
|
|
|
pci_set_word(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
pci_get_word_by_mask(uint8_t *config, uint16_t mask)
|
|
|
|
{
|
|
|
|
uint16_t val = pci_get_word(config);
|
|
|
|
return (val & mask) >> (ffs(mask) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
uint32_t rval = reg << (ffs(mask) - 1);
|
|
|
|
pci_set_long(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
pci_get_long_by_mask(uint8_t *config, uint32_t mask)
|
|
|
|
{
|
|
|
|
uint32_t val = pci_get_long(config);
|
|
|
|
return (val & mask) >> (ffs(mask) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
uint64_t rval = reg << (ffs(mask) - 1);
|
|
|
|
pci_set_quad(config, (~mask & val) | (mask & rval));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t
|
|
|
|
pci_get_quad_by_mask(uint8_t *config, uint64_t mask)
|
|
|
|
{
|
|
|
|
uint64_t val = pci_get_quad(config);
|
|
|
|
return (val & mask) >> (ffs(mask) - 1);
|
|
|
|
}
|
|
|
|
|
2010-06-23 11:15:30 +04:00
|
|
|
PCIDevice *pci_create_multifunction(PCIBus *bus, int devfn, bool multifunction,
|
|
|
|
const char *name);
|
|
|
|
PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
|
|
|
|
bool multifunction,
|
|
|
|
const char *name);
|
2009-09-25 05:53:53 +04:00
|
|
|
PCIDevice *pci_create(PCIBus *bus, int devfn, const char *name);
|
2009-05-15 01:35:07 +04:00
|
|
|
PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name);
|
|
|
|
|
2010-05-27 09:42:37 +04:00
|
|
|
static inline int pci_is_express(const PCIDevice *d)
|
2009-10-30 15:21:18 +03:00
|
|
|
{
|
|
|
|
return d->cap_present & QEMU_PCI_CAP_EXPRESS;
|
|
|
|
}
|
|
|
|
|
2010-05-27 09:42:37 +04:00
|
|
|
static inline uint32_t pci_config_size(const PCIDevice *d)
|
2009-10-30 15:21:18 +03:00
|
|
|
{
|
|
|
|
return pci_is_express(d) ? PCIE_CONFIG_SPACE_SIZE : PCI_CONFIG_SPACE_SIZE;
|
|
|
|
}
|
|
|
|
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
/* DMA access functions */
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
static inline DMAContext *pci_dma_context(PCIDevice *dev)
|
|
|
|
{
|
|
|
|
/* Stub for when we have no PCI iommu support */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
void *buf, dma_addr_t len, DMADirection dir)
|
|
|
|
{
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
dma_memory_rw(pci_dma_context(dev), addr, buf, len, dir);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pci_dma_read(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
void *buf, dma_addr_t len)
|
|
|
|
{
|
|
|
|
return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
const void *buf, dma_addr_t len)
|
|
|
|
{
|
|
|
|
return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \
|
|
|
|
static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
|
|
|
|
dma_addr_t addr) \
|
|
|
|
{ \
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
return ld##_l##_dma(pci_dma_context(dev), addr); \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
} \
|
|
|
|
static inline void st##_s##_pci_dma(PCIDevice *dev, \
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
dma_addr_t addr, uint##_bits##_t val) \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
{ \
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
st##_s##_dma(pci_dma_context(dev), addr, val); \
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
PCI_DMA_DEFINE_LDST(ub, b, 8);
|
|
|
|
PCI_DMA_DEFINE_LDST(uw_le, w_le, 16)
|
|
|
|
PCI_DMA_DEFINE_LDST(l_le, l_le, 32);
|
|
|
|
PCI_DMA_DEFINE_LDST(q_le, q_le, 64);
|
|
|
|
PCI_DMA_DEFINE_LDST(uw_be, w_be, 16)
|
|
|
|
PCI_DMA_DEFINE_LDST(l_be, l_be, 32);
|
|
|
|
PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
|
|
|
|
|
|
|
|
#undef PCI_DMA_DEFINE_LDST
|
|
|
|
|
|
|
|
static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
|
|
|
|
dma_addr_t *plen, DMADirection dir)
|
|
|
|
{
|
|
|
|
void *buf;
|
|
|
|
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
buf = dma_memory_map(pci_dma_context(dev), addr, plen, dir);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
|
|
|
|
DMADirection dir, dma_addr_t access_len)
|
|
|
|
{
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 08:50:38 +04:00
|
|
|
dma_memory_unmap(pci_dma_context(dev), buffer, len, dir, access_len);
|
Add stub functions for PCI device models to do PCI DMA
This patch adds functions to pci.[ch] to perform PCI DMA operations.
At present, these are just stubs which perform directly cpu physical
memory accesses. Stubs are included which are analogous to
cpu_physical_memory_{read,write}(), the stX_phys() and ldX_phys()
functions and cpu_physical_memory_{map,unmap}().
In addition, a wrapper around qemu_sglist_init() is provided, which
also takes a PCIDevice *. It's assumed that _init() is the only
sglist function which will need wrapping, the idea being that once we
have IOMMU support whatever IOMMU context handle the wrapper derives
from the PCI device will be stored within the sglist structure for
later use.
Using these stubs, however, distinguishes PCI device DMA transactions from
other accesses to physical memory, which will allow PCI IOMMU support to
be added in one place, rather than updating every PCI driver at that time.
That is, it allows us to update individual PCI drivers to support an IOMMU
without having yet determined the details of how the IOMMU emulation will
operate. This will let us remove the most bitrot-sensitive part of an
IOMMU patch in advance.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2011-10-31 10:06:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
|
|
|
|
int alloc_hint)
|
|
|
|
{
|
|
|
|
qemu_sglist_init(qsg, alloc_hint);
|
|
|
|
}
|
|
|
|
|
2012-01-13 20:07:20 +04:00
|
|
|
extern const VMStateDescription vmstate_pci_device;
|
|
|
|
|
|
|
|
#define VMSTATE_PCI_DEVICE(_field, _state) { \
|
|
|
|
.name = (stringify(_field)), \
|
|
|
|
.size = sizeof(PCIDevice), \
|
|
|
|
.vmsd = &vmstate_pci_device, \
|
|
|
|
.flags = VMS_STRUCT, \
|
|
|
|
.offset = vmstate_offset_value(_state, _field, PCIDevice), \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VMSTATE_PCI_DEVICE_POINTER(_field, _state) { \
|
|
|
|
.name = (stringify(_field)), \
|
|
|
|
.size = sizeof(PCIDevice), \
|
|
|
|
.vmsd = &vmstate_pci_device, \
|
|
|
|
.flags = VMS_STRUCT|VMS_POINTER, \
|
|
|
|
.offset = vmstate_offset_pointer(_state, _field, PCIDevice), \
|
|
|
|
}
|
|
|
|
|
2007-11-17 20:14:51 +03:00
|
|
|
#endif
|