Fix several annoyances related to MSS handling in BSD TCP:

- Don't overload t_maxseg.  Previous behavior was to set it to the min
  of the peer's advertised MSS, our advertised MSS, and tcp_mssdflt
  (for non-local networks).  This breaks PMTU discovery running on
  either host.  Instead, remember the MSS we advertise, and use it
  as appropriate (in silly window avoidance).
- Per last bullet, split tcp_mss() into several functions for handling
  MSS (ours and peer's), and performing various tasks when a connection
  becomes ESTABLISHED.
- Introduce a new function, tcp_segsize(), which computes the max size
  for every segment transmitted in tcp_output().  This will eventually
  be used to hook in PMTU discovery.
This commit is contained in:
thorpej 1997-09-22 21:49:55 +00:00
parent de572198ad
commit 4ed600dbd0
4 changed files with 249 additions and 184 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: tcp_input.c,v 1.31 1997/07/28 22:07:38 thorpej Exp $ */
/* $NetBSD: tcp_input.c,v 1.32 1997/09/22 21:49:55 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
@ -651,12 +651,12 @@ after_listen:
tp->irs = ti->ti_seq;
tcp_rcvseqinit(tp);
tp->t_flags |= TF_ACKNOW;
tcp_mss(tp, opti.maxseg);
tcp_mss_from_peer(tp, opti.maxseg);
tcp_rmx_rtt(tp);
if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
tcpstat.tcps_connects++;
soisconnected(so);
tp->t_state = TCPS_ESTABLISHED;
tp->t_timer[TCPT_KEEP] = tcp_keepidle;
tcp_established(tp);
/* Do window scaling on this connection? */
if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
(TF_RCVD_SCALE|TF_REQ_SCALE)) {
@ -918,8 +918,7 @@ after_listen:
goto dropwithreset;
tcpstat.tcps_connects++;
soisconnected(so);
tp->t_state = TCPS_ESTABLISHED;
tp->t_timer[TCPT_KEEP] = tcp_keepidle;
tcp_established(tp);
/* Do window scaling? */
if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
(TF_RCVD_SCALE|TF_REQ_SCALE)) {
@ -1391,7 +1390,7 @@ tcp_dooptions(tp, cp, cnt, ti, oi)
case TCPOPT_TIMESTAMP:
if (optlen != TCPOLEN_TIMESTAMP)
continue;
oi -> ts_present = 1;
oi->ts_present = 1;
bcopy(cp + 2, &oi->ts_val, sizeof(oi->ts_val));
NTOHL(oi->ts_val);
bcopy(cp + 6, &oi->ts_ecr, sizeof(oi->ts_ecr));
@ -1519,155 +1518,6 @@ tcp_xmit_timer(tp, rtt)
tp->t_softerror = 0;
}
/*
* Determine a reasonable value for maxseg size.
* If the route is known, check route for mtu.
* If none, use an mss that can be handled on the outgoing
* interface without forcing IP to fragment; if bigger than
* an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
* to utilize large mbufs. If no route is found, route has no mtu,
* or the destination isn't local, use a default, hopefully conservative
* size (usually 512 or the default IP max size, but no more than the mtu
* of the interface), as we can't discover anything about intervening
* gateways or networks. We also initialize the congestion/slow start
* window to be a single segment if the destination isn't local.
* While looking at the routing entry, we also initialize other path-dependent
* parameters from pre-set or cached values in the routing entry.
*/
int
tcp_mss(tp, offer)
register struct tcpcb *tp;
u_int offer;
{
struct route *ro;
register struct rtentry *rt;
struct ifnet *ifp;
register int rtt, mss;
u_long bufsize;
struct inpcb *inp;
struct socket *so;
inp = tp->t_inpcb;
ro = &inp->inp_route;
if ((rt = ro->ro_rt) == (struct rtentry *)0) {
/* No route yet, so try to acquire one */
if (!in_nullhost(inp->inp_faddr)) {
ro->ro_dst.sa_family = AF_INET;
ro->ro_dst.sa_len = sizeof(ro->ro_dst);
satosin(&ro->ro_dst)->sin_addr = inp->inp_faddr;
rtalloc(ro);
}
if ((rt = ro->ro_rt) == (struct rtentry *)0)
return (tcp_mssdflt);
}
ifp = rt->rt_ifp;
so = inp->inp_socket;
#ifdef RTV_MTU /* if route characteristics exist ... */
/*
* While we're here, check if there's an initial rtt
* or rttvar. Convert from the route-table units
* to scaled multiples of the slow timeout timer.
*/
if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
/*
* XXX the lock bit for MTU indicates that the value
* is also a minimum value; this is subject to time.
*/
if (rt->rt_rmx.rmx_locks & RTV_RTT)
tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ);
tp->t_srtt = rtt /
((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
if (rt->rt_rmx.rmx_rttvar)
tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
else
/* default variation is +- 1 rtt */
tp->t_rttvar =
tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
TCPT_RANGESET(tp->t_rxtcur,
((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
tp->t_rttmin, TCPTV_REXMTMAX);
}
/*
* if there's an mtu associated with the route, use it
*/
if (rt->rt_rmx.rmx_mtu)
mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
else
#endif /* RTV_MTU */
{
mss = ifp->if_mtu - sizeof(struct tcpiphdr);
#if (MCLBYTES & (MCLBYTES - 1)) == 0
if (mss > MCLBYTES)
mss &= ~(MCLBYTES-1);
#else
if (mss > MCLBYTES)
mss = mss / MCLBYTES * MCLBYTES;
#endif
if (!in_localaddr(inp->inp_faddr))
mss = min(mss, tcp_mssdflt);
}
/*
* The current mss, t_maxseg, is initialized to the default value.
* If we compute a smaller value, reduce the current mss.
* If we compute a larger value, return it for use in sending
* a max seg size option, but don't store it for use
* unless we received an offer at least that large from peer.
* However, do not accept offers under 32 bytes.
*/
if (offer)
mss = min(mss, offer);
mss = max(mss, 32); /* sanity */
if (mss < tp->t_maxseg || offer != 0) {
/*
* If there's a pipesize, change the socket buffer
* to that size. Make the socket buffers an integral
* number of mss units; if the mss is larger than
* the socket buffer, decrease the mss.
*/
#ifdef RTV_SPIPE
if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0)
#endif
bufsize = so->so_snd.sb_hiwat;
if (bufsize < mss)
mss = bufsize;
else {
bufsize = roundup(bufsize, mss);
if (bufsize > sb_max)
bufsize = sb_max;
(void)sbreserve(&so->so_snd, bufsize);
}
tp->t_maxseg = mss;
#ifdef RTV_RPIPE
if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0)
#endif
bufsize = so->so_rcv.sb_hiwat;
if (bufsize > mss) {
bufsize = roundup(bufsize, mss);
if (bufsize > sb_max)
bufsize = sb_max;
(void)sbreserve(&so->so_rcv, bufsize);
}
}
tp->snd_cwnd = mss;
#ifdef RTV_SSTHRESH
if (rt->rt_rmx.rmx_ssthresh) {
/*
* There's some sort of gateway or interface
* buffer limit on the path. Use this to set
* the slow start threshhold, but set the
* threshold to no less than 2*mss.
*/
tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
}
#endif /* RTV_MTU */
return (mss);
}
/*
* TCP compressed state engine. Currently used to hold compressed
* state for SYN_RECEIVED.
@ -2021,7 +1871,10 @@ syn_cache_get(so, m)
tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
tcpstat.tcps_accepts++;
tcp_mss(tp, sc->sc_peermaxseg);
/* Initialize tp->t_ourmss before we deal with the peer's! */
tp->t_ourmss = sc->sc_ourmaxseg;
tcp_mss_from_peer(tp, sc->sc_peermaxseg);
tcp_rmx_rtt(tp);
tp->snd_wl1 = sc->sc_irs;
tp->rcv_up = sc->sc_irs + 1;
@ -2116,6 +1969,8 @@ syn_cache_unreach(ip, th)
* this to the syn cache, and send back a segment:
* <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
* to the source.
*
* XXX We don't properly handle SYN-with-data!
*/
int
@ -2127,12 +1982,13 @@ syn_cache_add(so, m, optp, optlen, oi)
struct tcp_opt_info *oi;
{
register struct tcpiphdr *ti;
struct tcpcb tb;
struct tcpcb tb, *tp;
long win;
struct syn_cache *sc, **sc_prev;
struct syn_cache_head *scp;
extern int tcp_do_rfc1323;
tp = sototcpcb(so);
ti = mtod(m, struct tcpiphdr *);
/*
@ -2185,6 +2041,7 @@ syn_cache_add(so, m, optp, optlen, oi)
sc->sc_iss = tcp_iss;
tcp_iss += TCP_ISSINCR/2;
sc->sc_peermaxseg = oi->maxseg;
sc->sc_ourmaxseg = tcp_mss_to_advertise(tp);
sc->sc_tstmp = (tcp_do_rfc1323 && (tb.t_flags & TF_RCVD_TSTMP)) ? 1 : 0;
if ((tb.t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
(TF_RCVD_SCALE|TF_REQ_SCALE)) {
@ -2219,12 +2076,6 @@ syn_cache_respond(sc, m, ti, win, ts)
{
u_int8_t *optp;
int optlen;
u_int16_t mss;
extern unsigned long in_maxmtu;
mss = in_maxmtu - sizeof(struct tcpiphdr);
if (!in_localaddr(ti->ti_dst))
mss = min(mss, tcp_mssdflt);
/*
* Tack on the TCP options. If there isn't enough trailing
@ -2252,8 +2103,8 @@ syn_cache_respond(sc, m, ti, win, ts)
optp = (u_int8_t *)(ti + 1);
optp[0] = TCPOPT_MAXSEG;
optp[1] = 4;
optp[2] = (mss >> 8) & 0xff;
optp[3] = mss & 0xff;
optp[2] = (sc->sc_ourmaxseg >> 8) & 0xff;
optp[3] = sc->sc_ourmaxseg & 0xff;
optlen = 4;
if (sc->sc_request_r_scale != 15) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: tcp_output.c,v 1.16 1997/06/03 16:17:09 kml Exp $ */
/* $NetBSD: tcp_output.c,v 1.17 1997/09/22 21:49:59 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1988, 1990, 1993
@ -44,6 +44,7 @@
#include <sys/socketvar.h>
#include <sys/errno.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/in.h>
@ -72,6 +73,34 @@ extern struct mbuf *m_copypack();
#define MAX_TCPOPTLEN 32 /* max # bytes that go in options */
static __inline int tcp_segsize __P((struct tcpcb *));
static __inline int
tcp_segsize(tp)
struct tcpcb *tp;
{
struct inpcb *inp = tp->t_inpcb;
struct rtentry *rt;
struct ifnet *ifp;
int size;
if ((rt = in_pcbrtentry(inp)) == NULL) {
size = tcp_mssdflt;
goto out;
}
ifp = rt->rt_ifp;
if (rt->rt_rmx.rmx_mtu)
size = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
else if (in_localaddr(inp->inp_faddr) || ifp->if_flags & IFF_LOOPBACK)
size = ifp->if_mtu - sizeof(struct tcpiphdr);
else
size = tcp_mssdflt;
out:
return (min(tp->t_maxseg, size));
}
/*
* Tcp output routine: figure out what should be sent and send it.
*/
@ -86,7 +115,9 @@ tcp_output(tp)
register struct tcpiphdr *ti;
u_char opt[MAX_TCPOPTLEN];
unsigned optlen, hdrlen;
int idle, sendalot;
int idle, sendalot, segsize;
segsize = tcp_segsize(tp);
/*
* Determine length of data that should be transmitted,
@ -164,8 +195,8 @@ again:
tp->snd_nxt = tp->snd_una;
}
}
if (len > tp->t_maxseg) {
len = tp->t_maxseg;
if (len > segsize) {
len = segsize;
flags &= ~TH_FIN;
sendalot = 1;
}
@ -183,7 +214,7 @@ again:
* to send into a small window), then must resend.
*/
if (len) {
if (len == tp->t_maxseg)
if (len == segsize)
goto send;
if ((idle || tp->t_flags & TF_NODELAY) &&
len + off >= so->so_snd.sb_cc)
@ -212,7 +243,7 @@ again:
long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) -
(tp->rcv_adv - tp->rcv_nxt);
if (adv >= (long) (2 * tp->t_maxseg))
if (adv >= (long) (2 * tp->t_ourmss))
goto send;
if (2 * adv >= (long) so->so_rcv.sb_hiwat)
goto send;
@ -282,13 +313,12 @@ send:
hdrlen = sizeof (struct tcpiphdr);
if (flags & TH_SYN) {
tp->snd_nxt = tp->iss;
tp->t_ourmss = tcp_mss_to_advertise(tp);
if ((tp->t_flags & TF_NOOPT) == 0) {
u_int16_t mss;
opt[0] = TCPOPT_MAXSEG;
opt[1] = 4;
mss = htons((u_int16_t) tcp_mss(tp, 0));
bcopy((caddr_t)&mss, (caddr_t)(opt + 2), sizeof(mss));
opt[2] = (tp->t_ourmss >> 8) & 0xff;
opt[3] = tp->t_ourmss & 0xff;
optlen = 4;
if ((tp->t_flags & TF_REQ_SCALE) &&
@ -326,10 +356,10 @@ send:
/*
* Adjust data length if insertion of options will
* bump the packet length beyond the t_maxseg length.
* bump the packet length beyond the segsize length.
*/
if (len > tp->t_maxseg - optlen) {
len = tp->t_maxseg - optlen;
if (len > segsize - optlen) {
len = segsize - optlen;
flags &= ~TH_FIN;
sendalot = 1;
}
@ -450,7 +480,7 @@ send:
* Calculate receive window. Don't shrink window,
* but avoid silly window syndrome.
*/
if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg)
if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_ourmss)
win = 0;
if (win > (long)TCP_MAXWIN << tp->rcv_scale)
win = (long)TCP_MAXWIN << tp->rcv_scale;

View File

@ -1,4 +1,4 @@
/* $NetBSD: tcp_subr.c,v 1.27 1997/07/23 21:26:51 thorpej Exp $ */
/* $NetBSD: tcp_subr.c,v 1.28 1997/09/22 21:50:02 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1988, 1990, 1993
@ -221,6 +221,7 @@ tcp_newtcpcb(inp)
bzero((caddr_t)tp, sizeof(struct tcpcb));
LIST_INIT(&tp->segq);
tp->t_maxseg = tcp_mssdflt;
tp->t_ourmss = tcp_mssdflt;
tp->t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0;
tp->t_inpcb = inp;
@ -457,3 +458,177 @@ tcp_quench(inp, errno)
if (tp)
tp->snd_cwnd = tp->t_maxseg;
}
/*
* Compute the MSS to advertise to the peer. Called only during
* the 3-way handshake. If we are the server (peer initiated
* connection), we are called with the TCPCB for the listen
* socket. If we are the client (we initiated connection), we
* are called witht he TCPCB for the actual connection.
*/
int
tcp_mss_to_advertise(tp)
const struct tcpcb *tp;
{
extern u_long in_maxmtu;
struct inpcb *inp;
struct socket *so;
int mss;
inp = tp->t_inpcb;
so = inp->inp_socket;
/*
* In order to avoid defeating path MTU discovery on the peer,
* we advertise the max MTU of all attached networks as our MSS,
* per RFC 1191, section 3.1.
*
* XXX Should we allow room for the timestamp option if
* XXX rfc1323 is enabled?
*/
mss = in_maxmtu - sizeof(struct tcpiphdr);
return (mss);
}
/*
* Set connection variables based on the peer's advertised MSS.
* We are passed the TCPCB for the actual connection. If we
* are the server, we are called by the compressed state engine
* when the 3-way handshake is complete. If we are the client,
* we are called when we recieve the SYN,ACK from the server.
*
* NOTE: Our advertised MSS value must be initialized in the TCPCB
* before this routine is called!
*/
void
tcp_mss_from_peer(tp, offer)
struct tcpcb *tp;
int offer;
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
#if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
struct rtentry *rt = in_pcbrtentry(inp);
#endif
u_long bufsize;
int mss;
/*
* Assume our MSS is the MSS of the peer, unless they sent us
* an offer. Do not accept offers less than 32 bytes.
*/
mss = tp->t_ourmss;
if (offer)
mss = offer;
mss = max(mss, 32); /* sanity */
/*
* If there's a pipesize, change the socket buffer to that size.
* Make the socket buffer an integral number of MSS units. If
* the MSS is larger than the socket buffer, artificially decrease
* the MSS.
*/
#ifdef RTV_SPIPE
if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
bufsize = rt->rt_rmx.rmx_sendpipe;
else
#endif
bufsize = so->so_snd.sb_hiwat;
if (bufsize < mss)
mss = bufsize;
else {
bufsize = roundup(bufsize, mss);
if (bufsize > sb_max)
bufsize = sb_max;
(void) sbreserve(&so->so_snd, bufsize);
}
tp->t_maxseg = mss;
/* Initialize the initial congestion window. */
tp->snd_cwnd = mss;
#ifdef RTV_SSTHRESH
if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
/*
* There's some sort of gateway or interface buffer
* limit on the path. Use this to set the slow
* start threshold, but set the threshold to no less
* than 2 * MSS.
*/
tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
}
#endif
}
/*
* Processing necessary when a TCP connection is established.
*/
void
tcp_established(tp)
struct tcpcb *tp;
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
#ifdef RTV_RPIPE
struct rtentry *rt = in_pcbrtentry(inp);
#endif
u_long bufsize;
tp->t_state = TCPS_ESTABLISHED;
tp->t_timer[TCPT_KEEP] = tcp_keepidle;
#ifdef RTV_RPIPE
if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
bufsize = rt->rt_rmx.rmx_recvpipe;
else
#endif
bufsize = so->so_rcv.sb_hiwat;
if (bufsize > tp->t_ourmss) {
bufsize = roundup(bufsize, tp->t_ourmss);
if (bufsize > sb_max)
bufsize = sb_max;
(void) sbreserve(&so->so_rcv, bufsize);
}
}
/*
* Check if there's an initial rtt or rttvar. Convert from the
* route-table units to scaled multiples of the slow timeout timer.
* Called only during the 3-way handshake.
*/
void
tcp_rmx_rtt(tp)
struct tcpcb *tp;
{
#ifdef RTV_RTT
struct rtentry *rt;
int rtt;
if ((rt = in_pcbrtentry(tp->t_inpcb)) == NULL)
return;
if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
/*
* XXX The lock bit for MTU indicates that the value
* is also a minimum value; this is subject to time.
*/
if (rt->rt_rmx.rmx_locks & RTV_RTT)
tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ);
tp->t_srtt = rtt /
((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
if (rt->rt_rmx.rmx_rttvar) {
tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
((RTM_RTTUNIT / PR_SLOWHZ) >>
(TCP_RTTVAR_SHIFT + 2));
} else {
/* Default variation is +- 1 rtt */
tp->t_rttvar =
tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
}
TCPT_RANGESET(tp->t_rxtcur,
((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
tp->t_rttmin, TCPTV_REXMTMAX);
}
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: tcp_var.h,v 1.22 1997/08/29 16:02:43 gwr Exp $ */
/* $NetBSD: tcp_var.h,v 1.23 1997/09/22 21:50:04 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1993, 1994
@ -49,7 +49,8 @@ struct tcpcb {
short t_rxtshift; /* log(2) of rexmt exp. backoff */
short t_rxtcur; /* current retransmit value */
short t_dupacks; /* consecutive dup acks recd */
u_short t_maxseg; /* maximum segment size */
u_short t_maxseg; /* peer's maximum segment size */
u_short t_ourmss; /* our's maximum segment size */
char t_force; /* 1 if forcing out a byte */
u_short t_flags;
#define TF_ACKNOW 0x0001 /* ack peer immediately */
@ -143,6 +144,10 @@ struct tcp_opt_info {
* This structure should not exceed 32 bytes.
* XXX On the Alpha, it's already 36-bytes, which rounds to 40.
* XXX Need to eliminate the pointer.
*
* XXX We've blown 32 bytes on non-Alpha systems, too, since we're
* XXX storing the maxseg we advertised to the peer. Should we
* XXX create another malloc bucket? Should we care?
*/
struct syn_cache {
struct syn_cache *sc_next;
@ -155,6 +160,7 @@ struct syn_cache {
u_int16_t sc_sport;
u_int16_t sc_dport;
u_int16_t sc_peermaxseg;
u_int16_t sc_ourmaxseg;
u_int8_t sc_timer;
u_int8_t sc_request_r_scale : 4,
sc_requested_s_scale : 4;
@ -328,10 +334,12 @@ struct tcpcb *
void tcp_dooptions __P((struct tcpcb *,
u_char *, int, struct tcpiphdr *, struct tcp_opt_info *));
void tcp_drain __P((void));
void tcp_established __P((struct tcpcb *));
void tcp_fasttimo __P((void));
void tcp_init __P((void));
void tcp_input __P((struct mbuf *, ...));
int tcp_mss __P((struct tcpcb *, u_int));
int tcp_mss_to_advertise __P((const struct tcpcb *));
void tcp_mss_from_peer __P((struct tcpcb *, int));
struct tcpcb *
tcp_newtcpcb __P((struct inpcb *));
void tcp_notify __P((struct inpcb *, int));
@ -342,6 +350,7 @@ void tcp_quench __P((struct inpcb *, int));
int tcp_reass __P((struct tcpcb *, struct tcpiphdr *, struct mbuf *));
int tcp_respond __P((struct tcpcb *,
struct tcpiphdr *, struct mbuf *, tcp_seq, tcp_seq, int));
void tcp_rmx_rtt __P((struct tcpcb *));
void tcp_setpersist __P((struct tcpcb *));
void tcp_slowtimo __P((void));
struct tcpiphdr *