igb: Fix DMA requester specification for Tx packet
igb used to specify the PF as DMA requester when reading Tx packets.
This made Tx requests from VFs to be performed on the address space of
the PF, defeating the purpose of SR-IOV. Add some logic to change the
requester depending on the queue, which can be assigned to a VF.
Fixes: 3a977deebe
("Intrdocue igb device emulation")
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
212f7b1dac
commit
f4fdaf009c
@ -765,7 +765,7 @@ e1000e_process_tx_desc(E1000ECore *core,
|
|||||||
}
|
}
|
||||||
|
|
||||||
tx->skip_cp = false;
|
tx->skip_cp = false;
|
||||||
net_tx_pkt_reset(tx->tx_pkt);
|
net_tx_pkt_reset(tx->tx_pkt, core->owner);
|
||||||
|
|
||||||
tx->sum_needed = 0;
|
tx->sum_needed = 0;
|
||||||
tx->cptse = 0;
|
tx->cptse = 0;
|
||||||
@ -3447,7 +3447,7 @@ e1000e_core_pci_uninit(E1000ECore *core)
|
|||||||
qemu_del_vm_change_state_handler(core->vmstate);
|
qemu_del_vm_change_state_handler(core->vmstate);
|
||||||
|
|
||||||
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
|
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
|
||||||
net_tx_pkt_reset(core->tx[i].tx_pkt);
|
net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
|
||||||
net_tx_pkt_uninit(core->tx[i].tx_pkt);
|
net_tx_pkt_uninit(core->tx[i].tx_pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3572,7 +3572,7 @@ static void e1000e_reset(E1000ECore *core, bool sw)
|
|||||||
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
|
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
|
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
|
||||||
net_tx_pkt_reset(core->tx[i].tx_pkt);
|
net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
|
||||||
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
|
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
|
||||||
core->tx[i].skip_cp = false;
|
core->tx[i].skip_cp = false;
|
||||||
}
|
}
|
||||||
|
@ -523,6 +523,7 @@ igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt)
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
igb_process_tx_desc(IGBCore *core,
|
igb_process_tx_desc(IGBCore *core,
|
||||||
|
PCIDevice *dev,
|
||||||
struct igb_tx *tx,
|
struct igb_tx *tx,
|
||||||
union e1000_adv_tx_desc *tx_desc,
|
union e1000_adv_tx_desc *tx_desc,
|
||||||
int queue_index)
|
int queue_index)
|
||||||
@ -588,7 +589,7 @@ igb_process_tx_desc(IGBCore *core,
|
|||||||
|
|
||||||
tx->first = true;
|
tx->first = true;
|
||||||
tx->skip_cp = false;
|
tx->skip_cp = false;
|
||||||
net_tx_pkt_reset(tx->tx_pkt);
|
net_tx_pkt_reset(tx->tx_pkt, dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -803,6 +804,8 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
|
|||||||
d = core->owner;
|
d = core->owner;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
net_tx_pkt_reset(txr->tx->tx_pkt, d);
|
||||||
|
|
||||||
while (!igb_ring_empty(core, txi)) {
|
while (!igb_ring_empty(core, txi)) {
|
||||||
base = igb_ring_head_descr(core, txi);
|
base = igb_ring_head_descr(core, txi);
|
||||||
|
|
||||||
@ -811,7 +814,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
|
|||||||
trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
|
trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
|
||||||
desc.read.cmd_type_len, desc.wb.status);
|
desc.read.cmd_type_len, desc.wb.status);
|
||||||
|
|
||||||
igb_process_tx_desc(core, txr->tx, &desc, txi->idx);
|
igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
|
||||||
igb_ring_advance(core, txi, 1);
|
igb_ring_advance(core, txi, 1);
|
||||||
eic |= igb_txdesc_writeback(core, base, &desc, txi);
|
eic |= igb_txdesc_writeback(core, base, &desc, txi);
|
||||||
}
|
}
|
||||||
@ -3828,7 +3831,7 @@ igb_core_pci_realize(IGBCore *core,
|
|||||||
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
|
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
|
||||||
|
|
||||||
for (i = 0; i < IGB_NUM_QUEUES; i++) {
|
for (i = 0; i < IGB_NUM_QUEUES; i++) {
|
||||||
net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
|
net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
net_rx_pkt_init(&core->rx_pkt);
|
net_rx_pkt_init(&core->rx_pkt);
|
||||||
@ -3853,7 +3856,7 @@ igb_core_pci_uninit(IGBCore *core)
|
|||||||
qemu_del_vm_change_state_handler(core->vmstate);
|
qemu_del_vm_change_state_handler(core->vmstate);
|
||||||
|
|
||||||
for (i = 0; i < IGB_NUM_QUEUES; i++) {
|
for (i = 0; i < IGB_NUM_QUEUES; i++) {
|
||||||
net_tx_pkt_reset(core->tx[i].tx_pkt);
|
net_tx_pkt_reset(core->tx[i].tx_pkt, NULL);
|
||||||
net_tx_pkt_uninit(core->tx[i].tx_pkt);
|
net_tx_pkt_uninit(core->tx[i].tx_pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4026,7 +4029,7 @@ static void igb_reset(IGBCore *core, bool sw)
|
|||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
|
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
|
||||||
tx = &core->tx[i];
|
tx = &core->tx[i];
|
||||||
net_tx_pkt_reset(tx->tx_pkt);
|
net_tx_pkt_reset(tx->tx_pkt, NULL);
|
||||||
memset(tx->ctx, 0, sizeof(tx->ctx));
|
memset(tx->ctx, 0, sizeof(tx->ctx));
|
||||||
tx->first = true;
|
tx->first = true;
|
||||||
tx->skip_cp = false;
|
tx->skip_cp = false;
|
||||||
|
@ -443,7 +443,7 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void net_tx_pkt_reset(struct NetTxPkt *pkt)
|
void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -467,6 +467,7 @@ void net_tx_pkt_reset(struct NetTxPkt *pkt)
|
|||||||
pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
|
pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pkt->pci_dev = pci_dev;
|
||||||
pkt->raw_frags = 0;
|
pkt->raw_frags = 0;
|
||||||
|
|
||||||
pkt->hdr_len = 0;
|
pkt->hdr_len = 0;
|
||||||
|
@ -148,9 +148,10 @@ void net_tx_pkt_dump(struct NetTxPkt *pkt);
|
|||||||
* reset tx packet private context (needed to be called between packets)
|
* reset tx packet private context (needed to be called between packets)
|
||||||
*
|
*
|
||||||
* @pkt: packet
|
* @pkt: packet
|
||||||
|
* @dev: PCI device processing the next packet
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
void net_tx_pkt_reset(struct NetTxPkt *pkt);
|
void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send packet to qemu. handles sw offloads if vhdr is not supported.
|
* Send packet to qemu. handles sw offloads if vhdr is not supported.
|
||||||
|
@ -678,7 +678,7 @@ static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx)
|
|||||||
vmxnet3_complete_packet(s, qidx, txd_idx);
|
vmxnet3_complete_packet(s, qidx, txd_idx);
|
||||||
s->tx_sop = true;
|
s->tx_sop = true;
|
||||||
s->skip_current_tx_pkt = false;
|
s->skip_current_tx_pkt = false;
|
||||||
net_tx_pkt_reset(s->tx_pkt);
|
net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1159,7 +1159,7 @@ static void vmxnet3_deactivate_device(VMXNET3State *s)
|
|||||||
{
|
{
|
||||||
if (s->device_active) {
|
if (s->device_active) {
|
||||||
VMW_CBPRN("Deactivating vmxnet3...");
|
VMW_CBPRN("Deactivating vmxnet3...");
|
||||||
net_tx_pkt_reset(s->tx_pkt);
|
net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
|
||||||
net_tx_pkt_uninit(s->tx_pkt);
|
net_tx_pkt_uninit(s->tx_pkt);
|
||||||
net_rx_pkt_uninit(s->rx_pkt);
|
net_rx_pkt_uninit(s->rx_pkt);
|
||||||
s->device_active = false;
|
s->device_active = false;
|
||||||
|
Loading…
Reference in New Issue
Block a user