net/cadence_gem: Prefetch rx descriptors ASAP

The real hardware prefetches rx buffer descriptors ASAP and
potentially throws relevant interrupts following the fetch
even in the absence of a received packet.

Reported-by: Deepika Dhamija <deepika@xilinx.com>
Signed-off-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
Message-id: 41629e35edfdb1f02f1e401f2c3d0e2e4c9e44b3.1386136219.git.peter.crosthwaite@xilinx.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Crosthwaite 2013-12-03 21:56:50 -08:00 committed by Peter Maydell
parent 7cfd65e41c
commit 06c2fe951d

View File

@ -346,6 +346,8 @@ typedef struct GemState {
uint32_t rx_desc_addr; uint32_t rx_desc_addr;
uint32_t tx_desc_addr; uint32_t tx_desc_addr;
unsigned rx_desc[2];
} GemState; } GemState;
/* The broadcast MAC address: 0xFFFFFFFFFFFF */ /* The broadcast MAC address: 0xFFFFFFFFFFFF */
@ -579,13 +581,30 @@ static int gem_mac_address_filter(GemState *s, const uint8_t *packet)
return GEM_RX_REJECT; return GEM_RX_REJECT;
} }
static void gem_get_rx_desc(GemState *s)
{
DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr);
/* read current descriptor */
cpu_physical_memory_read(s->rx_desc_addr,
(uint8_t *)s->rx_desc, sizeof(s->rx_desc));
/* Descriptor owned by software ? */
if (rx_desc_get_ownership(s->rx_desc) == 1) {
DB_PRINT("descriptor 0x%x owned by sw.\n",
(unsigned)s->rx_desc_addr);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]);
/* Handle interrupt consequences */
gem_update_int_status(s);
}
}
/* /*
* gem_receive: * gem_receive:
* Fit a packet handed to us by QEMU into the receive descriptor ring. * Fit a packet handed to us by QEMU into the receive descriptor ring.
*/ */
static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{ {
unsigned desc[2];
GemState *s; GemState *s;
unsigned rxbufsize, bytes_to_copy; unsigned rxbufsize, bytes_to_copy;
unsigned rxbuf_offset; unsigned rxbuf_offset;
@ -595,11 +614,6 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
s = qemu_get_nic_opaque(nc); s = qemu_get_nic_opaque(nc);
/* Do nothing if receive is not enabled. */
if (!gem_can_receive(nc)) {
return -1;
}
/* Is this destination MAC address "for us" ? */ /* Is this destination MAC address "for us" ? */
if (gem_mac_address_filter(s, buf) == GEM_RX_REJECT) { if (gem_mac_address_filter(s, buf) == GEM_RX_REJECT) {
return -1; return -1;
@ -667,53 +681,44 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size); DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size);
while (bytes_to_copy) { while (bytes_to_copy) {
DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr); /* Do nothing if receive is not enabled. */
/* read current descriptor */ if (!gem_can_receive(nc)) {
cpu_physical_memory_read(s->rx_desc_addr, assert(!first_desc);
(uint8_t *)&desc[0], sizeof(desc));
/* Descriptor owned by software ? */
if (rx_desc_get_ownership(desc) == 1) {
DB_PRINT("descriptor 0x%x owned by sw.\n",
(unsigned)s->rx_desc_addr);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]);
/* Handle interrupt consequences */
gem_update_int_status(s);
return -1; return -1;
} }
DB_PRINT("copy %d bytes to 0x%x\n", MIN(bytes_to_copy, rxbufsize), DB_PRINT("copy %d bytes to 0x%x\n", MIN(bytes_to_copy, rxbufsize),
rx_desc_get_buffer(desc)); rx_desc_get_buffer(s->rx_desc));
/* Copy packet data to emulated DMA buffer */ /* Copy packet data to emulated DMA buffer */
cpu_physical_memory_write(rx_desc_get_buffer(desc) + rxbuf_offset, cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc) + rxbuf_offset,
rxbuf_ptr, MIN(bytes_to_copy, rxbufsize)); rxbuf_ptr, MIN(bytes_to_copy, rxbufsize));
bytes_to_copy -= MIN(bytes_to_copy, rxbufsize); bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
/* Update the descriptor. */ /* Update the descriptor. */
if (first_desc) { if (first_desc) {
rx_desc_set_sof(desc); rx_desc_set_sof(s->rx_desc);
first_desc = false; first_desc = false;
} }
if (bytes_to_copy == 0) { if (bytes_to_copy == 0) {
rx_desc_set_eof(desc); rx_desc_set_eof(s->rx_desc);
rx_desc_set_length(desc, size); rx_desc_set_length(s->rx_desc, size);
} }
rx_desc_set_ownership(desc); rx_desc_set_ownership(s->rx_desc);
/* Descriptor write-back. */ /* Descriptor write-back. */
cpu_physical_memory_write(s->rx_desc_addr, cpu_physical_memory_write(s->rx_desc_addr,
(uint8_t *)&desc[0], sizeof(desc)); (uint8_t *)s->rx_desc, sizeof(s->rx_desc));
/* Next descriptor */ /* Next descriptor */
if (rx_desc_get_wrap(desc)) { if (rx_desc_get_wrap(s->rx_desc)) {
DB_PRINT("wrapping RX descriptor list\n"); DB_PRINT("wrapping RX descriptor list\n");
s->rx_desc_addr = s->regs[GEM_RXQBASE]; s->rx_desc_addr = s->regs[GEM_RXQBASE];
} else { } else {
DB_PRINT("incrementing RX descriptor list\n"); DB_PRINT("incrementing RX descriptor list\n");
s->rx_desc_addr += 8; s->rx_desc_addr += 8;
} }
gem_get_rx_desc(s);
} }
/* Count it */ /* Count it */
@ -1053,6 +1058,9 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
/* Handle register write side effects */ /* Handle register write side effects */
switch (offset) { switch (offset) {
case GEM_NWCTRL: case GEM_NWCTRL:
if (val & GEM_NWCTRL_RXENA) {
gem_get_rx_desc(s);
}
if (val & GEM_NWCTRL_TXSTART) { if (val & GEM_NWCTRL_TXSTART) {
gem_transmit(s); gem_transmit(s);
} }