drop the disabled M_EXT_ROMAP mbuf code, and convert xennet_checksum_fill()
to use in_undefer_cksum() instead of custom code to compute the checksum for incoming packets make it possible to defer/skip the checksum computation by appropriate Rx flag similarily as we do for Tx; with
This commit is contained in:
parent
cd4b207ac9
commit
d54f034772
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_xennet_xenbus.c,v 1.88 2020/01/29 05:41:48 thorpej Exp $ */
|
||||
/* $NetBSD: if_xennet_xenbus.c,v 1.89 2020/03/16 20:49:22 jdolecek Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006 Manuel Bouyer.
|
||||
@ -84,7 +84,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.88 2020/01/29 05:41:48 thorpej Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.89 2020/03/16 20:49:22 jdolecek Exp $");
|
||||
|
||||
#include "opt_xen.h"
|
||||
#include "opt_nfs_boot.h"
|
||||
@ -386,7 +386,9 @@ xennet_xenbus_attach(device_t parent, device_t self, void *aux)
|
||||
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
||||
ifp->if_timer = 0;
|
||||
ifp->if_snd.ifq_maxlen = uimax(ifqmaxlen, NET_TX_RING_SIZE * 2);
|
||||
ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
|
||||
ifp->if_capabilities =
|
||||
IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx
|
||||
| IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx;
|
||||
IFQ_SET_READY(&ifp->if_snd);
|
||||
if_attach(ifp);
|
||||
ether_ifattach(ifp, sc->sc_enaddr);
|
||||
@ -1119,12 +1121,7 @@ again:
|
||||
m->m_flags |= M_EXT_RW; /* we own the buffer */
|
||||
}
|
||||
if ((rx->flags & NETRXF_csum_blank) != 0) {
|
||||
xennet_checksum_fill(&m);
|
||||
if (m == NULL) {
|
||||
if_statinc(ifp, if_ierrors);
|
||||
xennet_rx_free_req(req);
|
||||
continue;
|
||||
}
|
||||
xennet_checksum_fill(ifp, m);
|
||||
}
|
||||
/* free req may overwrite *rx, better doing it late */
|
||||
xennet_rx_free_req(req);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: xennet_checksum.c,v 1.4 2020/03/14 11:52:12 jdolecek Exp $ */
|
||||
/* $NetBSD: xennet_checksum.c,v 1.5 2020/03/16 20:49:22 jdolecek Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c)2006 YAMAMOTO Takashi,
|
||||
@ -27,7 +27,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennet_checksum.c,v 1.4 2020/03/14 11:52:12 jdolecek Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennet_checksum.c,v 1.5 2020/03/16 20:49:22 jdolecek Exp $");
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/param.h>
|
||||
@ -39,73 +39,51 @@ __KERNEL_RCSID(0, "$NetBSD: xennet_checksum.c,v 1.4 2020/03/14 11:52:12 jdolecek
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/in_systm.h>
|
||||
#include <netinet/in_offload.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/udp.h>
|
||||
|
||||
#include <xen/xennet_checksum.h>
|
||||
|
||||
static const void *m_extract(struct mbuf *, int, int, void *);
|
||||
static void *m_extract_write(struct mbuf *, int, int, void *);
|
||||
|
||||
static void *m_extract1(struct mbuf *, int, int, void *, int);
|
||||
#define MBUF_EXTRACT_WRITE 1
|
||||
/* ratecheck(9) for checksum validation failures */
|
||||
static const struct timeval xn_cksum_errintvl = { 600, 0 }; /* 10 min, each */
|
||||
|
||||
static void *
|
||||
m_extract1(struct mbuf *m, int off, int len, void *buf, int flags)
|
||||
m_extract(struct mbuf *m, int off, int len)
|
||||
{
|
||||
void *result;
|
||||
KASSERT(m->m_pkthdr.len >= off + len);
|
||||
KASSERT(m->m_len >= off + len);
|
||||
|
||||
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
||||
|
||||
if (m->m_pkthdr.len < off + len) {
|
||||
result = NULL;
|
||||
} else if (m->m_len >= off + len &&
|
||||
((flags & MBUF_EXTRACT_WRITE) != 0 || !M_READONLY(m))) {
|
||||
result = mtod(m, char *) + off;
|
||||
} else {
|
||||
m_copydata(m, off, len, buf);
|
||||
result = buf;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static const void *
|
||||
m_extract(struct mbuf *m, int off, int len, void *buf)
|
||||
{
|
||||
|
||||
return m_extract1(m, off, len, buf, 0);
|
||||
}
|
||||
|
||||
static void *
|
||||
m_extract_write(struct mbuf *m, int off, int len, void *buf)
|
||||
{
|
||||
|
||||
return m_extract1(m, off, len, buf, MBUF_EXTRACT_WRITE);
|
||||
if (m->m_pkthdr.len >= off + len)
|
||||
return mtod(m, char *) + off;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* xennet_checksum_fill: fill TCP/UDP checksum
|
||||
* xennet_checksum_fill: fill TCP/UDP checksum, or arrange
|
||||
* for hw offload to do it
|
||||
*/
|
||||
|
||||
int
|
||||
xennet_checksum_fill(struct mbuf **mp)
|
||||
xennet_checksum_fill(struct ifnet *ifp, struct mbuf *m)
|
||||
{
|
||||
struct mbuf *m = *mp;
|
||||
struct ether_header eh_store;
|
||||
const struct ether_header *eh;
|
||||
struct ip iph_store;
|
||||
const struct ip *iph;
|
||||
struct ip *iph;
|
||||
int ehlen;
|
||||
int iphlen;
|
||||
int iplen;
|
||||
uint16_t etype;
|
||||
uint8_t nxt;
|
||||
int error = 0;
|
||||
int sw_csum;
|
||||
|
||||
eh = m_extract(m, 0, sizeof(*eh), &eh_store);
|
||||
KASSERT(!M_READONLY(m));
|
||||
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
||||
|
||||
eh = m_extract(m, 0, sizeof(*eh));
|
||||
if (eh == NULL) {
|
||||
/* Too short, packet will be dropped by upper layer */
|
||||
return EINVAL;
|
||||
}
|
||||
etype = eh->ether_type;
|
||||
@ -114,69 +92,58 @@ xennet_checksum_fill(struct mbuf **mp)
|
||||
} else if (etype == htobe16(ETHERTYPE_IP)) {
|
||||
ehlen = ETHER_HDR_LEN;
|
||||
} else {
|
||||
static struct timeval lasttime;
|
||||
if (ratecheck(&lasttime, &xn_cksum_errintvl))
|
||||
printf("%s: unknown etype %#x passed no checksum\n",
|
||||
ifp->if_xname, ntohs(etype));
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
iph = m_extract(m, ehlen, sizeof(*iph), &iph_store);
|
||||
iph = m_extract(m, ehlen, sizeof(*iph));
|
||||
if (iph == NULL) {
|
||||
/* Too short, packet will be dropped by upper layer */
|
||||
return EINVAL;
|
||||
}
|
||||
nxt = iph->ip_p;
|
||||
iphlen = iph->ip_hl * 4;
|
||||
iphlen = iph->ip_hl << 2;
|
||||
iplen = ntohs(iph->ip_len);
|
||||
if (ehlen + iplen > m->m_pkthdr.len) {
|
||||
/* Too short, packet will be dropped by upper layer */
|
||||
return EINVAL;
|
||||
}
|
||||
if (nxt == IPPROTO_UDP) {
|
||||
struct udphdr uh_store;
|
||||
struct udphdr *uh;
|
||||
int ulen;
|
||||
|
||||
uh = m_extract_write(m, ehlen + iphlen, sizeof(*uh), &uh_store);
|
||||
ulen = ntohs(uh->uh_ulen);
|
||||
if (ehlen + iphlen + ulen > m->m_pkthdr.len) {
|
||||
return EINVAL;
|
||||
}
|
||||
m->m_len -= ehlen;
|
||||
m->m_data += ehlen;
|
||||
uh->uh_sum = 0;
|
||||
uh->uh_sum = in4_cksum(m, nxt, iphlen, iplen - iphlen);
|
||||
m->m_len += ehlen;
|
||||
m->m_data -= ehlen;
|
||||
if (uh != &uh_store) {
|
||||
m = m_copyback_cow(m, ehlen + iphlen, sizeof(*uh), uh,
|
||||
M_DONTWAIT);
|
||||
if (m == NULL) {
|
||||
error = ENOMEM;
|
||||
}
|
||||
}
|
||||
} else if (nxt == IPPROTO_TCP) {
|
||||
struct tcphdr th_store;
|
||||
struct tcphdr *th;
|
||||
int thlen;
|
||||
|
||||
th = m_extract_write(m, ehlen + iphlen, sizeof(*th), &th_store);
|
||||
thlen = th->th_off * 4;
|
||||
if (ehlen + iphlen + thlen > m->m_pkthdr.len) {
|
||||
return EINVAL;
|
||||
}
|
||||
m->m_len -= ehlen;
|
||||
m->m_data += ehlen;
|
||||
th->th_sum = 0;
|
||||
th->th_sum = in4_cksum(m, nxt, iphlen, iplen - iphlen);
|
||||
m->m_len += ehlen;
|
||||
m->m_data -= ehlen;
|
||||
if (th != &th_store) {
|
||||
m = m_copyback_cow(m, ehlen + iphlen, sizeof(*th), th,
|
||||
M_DONTWAIT);
|
||||
if (m == NULL) {
|
||||
error = ENOMEM;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (nxt) {
|
||||
case IPPROTO_UDP:
|
||||
m->m_pkthdr.csum_flags = M_CSUM_UDPv4;
|
||||
m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
|
||||
m->m_pkthdr.csum_data |= iphlen << 16;
|
||||
break;
|
||||
case IPPROTO_TCP:
|
||||
m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
|
||||
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
|
||||
m->m_pkthdr.csum_data |= iphlen << 16;
|
||||
break;
|
||||
default:
|
||||
{
|
||||
static struct timeval lasttime;
|
||||
if (ratecheck(&lasttime, &xn_cksum_errintvl))
|
||||
printf("%s: unknown proto %d passed no checksum\n",
|
||||
ifp->if_xname, nxt);
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
*mp = m;
|
||||
/*
|
||||
* Only compute the checksum if impossible to defer.
|
||||
*/
|
||||
sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_csum_flags_rx;
|
||||
|
||||
if (sw_csum & (M_CSUM_UDPv4|M_CSUM_TCPv4)) {
|
||||
in_undefer_cksum(m, ehlen,
|
||||
sw_csum & (M_CSUM_UDPv4|M_CSUM_TCPv4));
|
||||
}
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: xennetback_xenbus.c,v 1.76 2020/01/29 05:41:48 thorpej Exp $ */
|
||||
/* $NetBSD: xennetback_xenbus.c,v 1.77 2020/03/16 20:49:22 jdolecek Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006 Manuel Bouyer.
|
||||
@ -25,7 +25,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.76 2020/01/29 05:41:48 thorpej Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.77 2020/03/16 20:49:22 jdolecek Exp $");
|
||||
|
||||
#include "opt_xen.h"
|
||||
|
||||
@ -90,7 +90,7 @@ struct xni_pkt {
|
||||
/* pools for xni_pkt */
|
||||
struct pool xni_pkt_pool;
|
||||
/* ratecheck(9) for pool allocation failures */
|
||||
struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */
|
||||
static const struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */
|
||||
|
||||
/* state of a xnetback instance */
|
||||
typedef enum {
|
||||
@ -301,7 +301,9 @@ xennetback_xenbus_create(struct xenbus_device *xbusd)
|
||||
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
||||
ifp->if_snd.ifq_maxlen =
|
||||
uimax(ifqmaxlen, NET_TX_RING_SIZE * 2);
|
||||
ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
|
||||
ifp->if_capabilities =
|
||||
IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx
|
||||
| IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx;
|
||||
ifp->if_ioctl = xennetback_ifioctl;
|
||||
ifp->if_start = xennetback_ifstart;
|
||||
ifp->if_watchdog = xennetback_ifwatchdog;
|
||||
@ -857,52 +859,28 @@ xennetback_evthandler(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef notyet
|
||||
/*
|
||||
* A lot of work is needed in the tcp stack to handle read-only
|
||||
* ext storage so always copy for now.
|
||||
* This is the last TX buffer. Copy the data and
|
||||
* ack it. Delaying it until the mbuf is
|
||||
* freed will stall transmit.
|
||||
*/
|
||||
if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) ==
|
||||
(xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1)))
|
||||
#else
|
||||
if (1)
|
||||
#endif /* notyet */
|
||||
{
|
||||
/*
|
||||
* This is the last TX buffer. Copy the data and
|
||||
* ack it. Delaying it until the mbuf is
|
||||
* freed will stall transmit.
|
||||
*/
|
||||
m->m_len = uimin(MHLEN, txreq.size);
|
||||
m->m_pkthdr.len = 0;
|
||||
m_copyback(m, 0, txreq.size,
|
||||
(void *)(pkt_va + txreq.offset));
|
||||
xni_pkt_unmap(pkt, pkt_va);
|
||||
if (m->m_pkthdr.len < txreq.size) {
|
||||
if_statinc(ifp, if_ierrors);
|
||||
m_freem(m);
|
||||
xennetback_tx_response(xneti, txreq.id,
|
||||
NETIF_RSP_DROPPED);
|
||||
continue;
|
||||
}
|
||||
m->m_len = uimin(MHLEN, txreq.size);
|
||||
m->m_pkthdr.len = 0;
|
||||
m_copyback(m, 0, txreq.size,
|
||||
(void *)(pkt_va + txreq.offset));
|
||||
xni_pkt_unmap(pkt, pkt_va);
|
||||
if (m->m_pkthdr.len < txreq.size) {
|
||||
if_statinc(ifp, if_ierrors);
|
||||
m_freem(m);
|
||||
xennetback_tx_response(xneti, txreq.id,
|
||||
NETIF_RSP_OKAY);
|
||||
} else {
|
||||
|
||||
pkt->pkt_id = txreq.id;
|
||||
pkt->pkt_xneti = xneti;
|
||||
|
||||
MEXTADD(m, pkt_va + txreq.offset,
|
||||
txreq.size, M_DEVBUF, xennetback_tx_free, pkt);
|
||||
m->m_pkthdr.len = m->m_len = txreq.size;
|
||||
m->m_flags |= M_EXT_ROMAP;
|
||||
NETIF_RSP_DROPPED);
|
||||
continue;
|
||||
}
|
||||
xennetback_tx_response(xneti, txreq.id,
|
||||
NETIF_RSP_OKAY);
|
||||
|
||||
if ((txreq.flags & NETTXF_csum_blank) != 0) {
|
||||
xennet_checksum_fill(&m);
|
||||
if (m == NULL) {
|
||||
if_statinc(ifp, if_ierrors);
|
||||
continue;
|
||||
}
|
||||
xennet_checksum_fill(ifp, m);
|
||||
}
|
||||
m_set_rcvif(m, ifp);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user