net: netmap: improve netmap_receive_iov()

Changes:
  - Save CPU cycles by computing the return value while scanning the
    input iovec, rather than calling iov_size() at the end.
  - Remove check for s->tx != NULL, because it cannot happen.
  - Cache ring->tail in a local variable and use it to check for
    space in the TX ring. The use of nm_ring_empty() was invalid,
    because nobody is updating ring->cur and ring->head at that point.
  - In case we run out of netmap slots in the middle of a packet,
    move the wake-up point by advancing ring->cur, but do not
    expose the incomplete packet (i.e., by updating also ring->head).

Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Vincenzo Maffione 2018-12-06 17:59:07 +01:00 committed by Jason Wang
parent c7cbb6b48f
commit 4875bf1406

View File

@ -159,21 +159,22 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
{ {
NetmapState *s = DO_UPCAST(NetmapState, nc, nc); NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
struct netmap_ring *ring = s->tx; struct netmap_ring *ring = s->tx;
unsigned int tail = ring->tail;
ssize_t totlen = 0;
uint32_t last; uint32_t last;
uint32_t idx; uint32_t idx;
uint8_t *dst; uint8_t *dst;
int j; int j;
uint32_t i; uint32_t i;
if (unlikely(!ring)) { last = i = ring->head;
/* Drop the packet. */
return iov_size(iov, iovcnt);
}
last = i = ring->cur;
if (nm_ring_space(ring) < iovcnt) { if (nm_ring_space(ring) < iovcnt) {
/* Not enough netmap slots. */ /* Not enough netmap slots. Tell the kernel that we have seen the new
* available slots (so that it notifies us again when it has more
* ones), but without publishing any new slots to be processed
* (e.g., we don't advance ring->head). */
ring->cur = tail;
netmap_write_poll(s, true); netmap_write_poll(s, true);
return 0; return 0;
} }
@ -183,14 +184,17 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
int offset = 0; int offset = 0;
int nm_frag_size; int nm_frag_size;
totlen += iov_frag_size;
/* Split each iovec fragment over more netmap slots, if /* Split each iovec fragment over more netmap slots, if
necessary. */ necessary. */
while (iov_frag_size) { while (iov_frag_size) {
nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size); nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
if (unlikely(nm_ring_empty(ring))) { if (unlikely(i == tail)) {
/* We run out of netmap slots while splitting the /* We ran out of netmap slots while splitting the
iovec fragments. */ iovec fragments. */
ring->cur = tail;
netmap_write_poll(s, true); netmap_write_poll(s, true);
return 0; return 0;
} }
@ -212,12 +216,13 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
/* The last slot must not have NS_MOREFRAG set. */ /* The last slot must not have NS_MOREFRAG set. */
ring->slot[last].flags &= ~NS_MOREFRAG; ring->slot[last].flags &= ~NS_MOREFRAG;
/* Now update ring->cur and ring->head. */ /* Now update ring->head and ring->cur to publish the new slots and
ring->cur = ring->head = i; * the new wakeup point. */
ring->head = ring->cur = i;
ioctl(s->nmd->fd, NIOCTXSYNC, NULL); ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
return iov_size(iov, iovcnt); return totlen;
} }
static ssize_t netmap_receive(NetClientState *nc, static ssize_t netmap_receive(NetClientState *nc,