NetBSD/sys/netinet/ip_input.c

2460 lines
65 KiB
C
Raw Normal View History

/* $NetBSD: ip_input.c,v 1.280 2009/04/15 20:44:25 elad Exp $ */
/*
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
2002-06-09 20:33:36 +04:00
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
2002-06-09 20:33:36 +04:00
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Public Access Networks Corporation ("Panix"). It was developed under
* contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
1993-03-21 12:45:37 +03:00
/*
* Copyright (c) 1982, 1986, 1988, 1993
* The Regents of the University of California. All rights reserved.
1993-03-21 12:45:37 +03:00
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
1993-03-21 12:45:37 +03:00
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ip_input.c 8.2 (Berkeley) 1/4/94
1993-03-21 12:45:37 +03:00
*/
2001-11-13 03:32:34 +03:00
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ip_input.c,v 1.280 2009/04/15 20:44:25 elad Exp $");
2001-11-13 03:32:34 +03:00
#include "opt_inet.h"
#include "opt_compat_netbsd.h"
1998-04-30 00:45:30 +04:00
#include "opt_gateway.h"
1998-08-09 12:58:18 +04:00
#include "opt_pfil_hooks.h"
#include "opt_ipsec.h"
1998-01-12 06:02:48 +03:00
#include "opt_mrouting.h"
#include "opt_mbuftrace.h"
#include "opt_inet_csum.h"
1998-01-12 06:02:48 +03:00
1993-12-18 03:40:47 +03:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
1993-12-18 03:40:47 +03:00
#include <sys/errno.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/pool.h>
1996-02-14 02:40:59 +03:00
#include <sys/sysctl.h>
First take at security model abstraction. - Add a few scopes to the kernel: system, network, and machdep. - Add a few more actions/sub-actions (requests), and start using them as opposed to the KAUTH_GENERIC_ISSUSER place-holders. - Introduce a basic set of listeners that implement our "traditional" security model, called "bsd44". This is the default (and only) model we have at the moment. - Update all relevant documentation. - Add some code and docs to help folks who want to actually use this stuff: * There's a sample overlay model, sitting on-top of "bsd44", for fast experimenting with tweaking just a subset of an existing model. This is pretty cool because it's *really* straightforward to do stuff you had to use ugly hacks for until now... * And of course, documentation describing how to do the above for quick reference, including code samples. All of these changes were tested for regressions using a Python-based testsuite that will be (I hope) available soon via pkgsrc. Information about the tests, and how to write new ones, can be found on: http://kauth.linbsd.org/kauthwiki NOTE FOR DEVELOPERS: *PLEASE* don't add any code that does any of the following: - Uses a KAUTH_GENERIC_ISSUSER kauth(9) request, - Checks 'securelevel' directly, - Checks a uid/gid directly. (or if you feel you have to, contact me first) This is still work in progress; It's far from being done, but now it'll be a lot easier. Relevant mailing list threads: http://mail-index.netbsd.org/tech-security/2006/01/25/0011.html http://mail-index.netbsd.org/tech-security/2006/03/24/0001.html http://mail-index.netbsd.org/tech-security/2006/04/18/0000.html http://mail-index.netbsd.org/tech-security/2006/05/15/0000.html http://mail-index.netbsd.org/tech-security/2006/08/01/0000.html http://mail-index.netbsd.org/tech-security/2006/08/25/0000.html Many thanks to YAMAMOTO Takashi, Matt Thomas, and Christos Zoulas for help stablizing kauth(9). Full credit for the regression tests, making sure these changes didn't break anything, goes to Matt Fleming and Jaime Fournier. Happy birthday Randi! :)
2006-09-09 00:58:56 +04:00
#include <sys/kauth.h>
1993-03-21 12:45:37 +03:00
1993-12-18 03:40:47 +03:00
#include <net/if.h>
#include <net/if_dl.h>
1993-12-18 03:40:47 +03:00
#include <net/route.h>
#include <net/pfil.h>
1993-03-21 12:45:37 +03:00
1993-12-18 03:40:47 +03:00
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/in_pcb.h>
#include <netinet/in_proto.h>
1993-12-18 03:40:47 +03:00
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#include <netinet/ip_private.h>
1993-12-18 03:40:47 +03:00
#include <netinet/ip_icmp.h>
/* just for gif_ttl */
#include <netinet/in_gif.h>
#include "gif.h"
#include <net/if_gre.h>
#include "gre.h"
#ifdef MROUTING
#include <netinet/ip_mroute.h>
#endif
#ifdef IPSEC
#include <netinet6/ipsec.h>
#include <netinet6/ipsec_private.h>
#include <netkey/key.h>
#endif
#ifdef FAST_IPSEC
#include <netipsec/ipsec.h>
#include <netipsec/key.h>
#endif /* FAST_IPSEC*/
1993-03-21 12:45:37 +03:00
#ifndef IPFORWARDING
#ifdef GATEWAY
#define IPFORWARDING 1 /* forward IP packets not for us */
#else /* GATEWAY */
#define IPFORWARDING 0 /* don't forward IP packets not for us */
#endif /* GATEWAY */
#endif /* IPFORWARDING */
#ifndef IPSENDREDIRECTS
#define IPSENDREDIRECTS 1
#endif
#ifndef IPFORWSRCRT
#define IPFORWSRCRT 1 /* forward source-routed packets */
#endif
#ifndef IPALLOWSRCRT
#define IPALLOWSRCRT 1 /* allow source-routed packets */
#endif
#ifndef IPMTUDISC
#define IPMTUDISC 1
#endif
#ifndef IPMTUDISCTIMEOUT
#define IPMTUDISCTIMEOUT (10 * 60) /* as per RFC 1191 */
#endif
#ifdef COMPAT_50
#include <compat/sys/time.h>
#include <compat/sys/socket.h>
#endif
/*
* Note: DIRECTED_BROADCAST is handled this way so that previous
* configuration using this option will Just Work.
*/
#ifndef IPDIRECTEDBCAST
#ifdef DIRECTED_BROADCAST
#define IPDIRECTEDBCAST 1
#else
#define IPDIRECTEDBCAST 0
#endif /* DIRECTED_BROADCAST */
#endif /* IPDIRECTEDBCAST */
1993-03-21 12:45:37 +03:00
int ipforwarding = IPFORWARDING;
int ipsendredirects = IPSENDREDIRECTS;
int ip_defttl = IPDEFTTL;
int ip_forwsrcrt = IPFORWSRCRT;
int ip_directedbcast = IPDIRECTEDBCAST;
int ip_allowsrcrt = IPALLOWSRCRT;
int ip_mtudisc = IPMTUDISC;
int ip_mtudisc_timeout = IPMTUDISCTIMEOUT;
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
int ipprintfs = 0;
#endif
int ip_do_randomid = 0;
/*
* XXX - Setting ip_checkinterface mostly implements the receive side of
* the Strong ES model described in RFC 1122, but since the routing table
* and transmit implementation do not implement the Strong ES model,
* setting this to 1 results in an odd hybrid.
*
* XXX - ip_checkinterface currently must be disabled if you use ipnat
* to translate the destination address to another local interface.
*
* XXX - ip_checkinterface must be disabled if you add IP aliases
* to the loopback interface instead of the interface where the
* packets for those addresses are received.
*/
int ip_checkinterface = 0;
1993-03-21 12:45:37 +03:00
struct rttimer_queue *ip_mtudisc_timeout_q = NULL;
1993-03-21 12:45:37 +03:00
int ipqmaxlen = IFQ_MAXLEN;
2002-05-13 00:33:50 +04:00
u_long in_ifaddrhash; /* size of hash table - 1 */
int in_ifaddrentries; /* total number of addrs */
2005-02-27 01:45:09 +03:00
struct in_ifaddrhead in_ifaddrhead;
struct in_ifaddrhashhead *in_ifaddrhashtbl;
u_long in_multihash; /* size of hash table - 1 */
int in_multientries; /* total number of addrs */
struct in_multihashhead *in_multihashtbl;
struct ifqueue ipintrq;
uint16_t ip_id;
percpu_t *ipstat_percpu;
#ifdef PFIL_HOOKS
struct pfil_head inet_pfil_hook;
#endif
/*
* Cached copy of nmbclusters. If nbclusters is different,
* recalculate IP parameters derived from nmbclusters.
*/
static int ip_nmbclusters; /* copy of nmbclusters */
2005-02-03 00:41:55 +03:00
static void ip_nmbclusters_changed(void); /* recalc limits */
#define CHECK_NMBCLUSTER_PARAMS() \
do { \
if (__predict_false(ip_nmbclusters != nmbclusters)) \
ip_nmbclusters_changed(); \
} while (/*CONSTCOND*/0)
/* IP datagram reassembly queues (hashed) */
#define IPREASS_NHASH_LOG2 6
#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
#define IPREASS_HMASK (IPREASS_NHASH - 1)
#define IPREASS_HASH(x,y) \
(((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
struct ipqhead ipq[IPREASS_NHASH];
int ipq_locked;
2005-02-27 01:45:09 +03:00
static int ip_nfragpackets; /* packets in reass queue */
static int ip_nfrags; /* total fragments in reass queues */
int ip_maxfragpackets = 200; /* limit on packets. XXX sysctl */
int ip_maxfrags; /* limit on fragments. XXX sysctl */
/*
* Additive-Increase/Multiplicative-Decrease (AIMD) strategy for
* IP reassembly queue buffer managment.
2005-02-27 01:45:09 +03:00
*
* We keep a count of total IP fragments (NB: not fragmented packets!)
* awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments.
* If ip_nfrags exceeds ip_maxfrags the limit, we drop half the
* total fragments in reassembly queues.This AIMD policy avoids
* repeatedly deleting single packets under heavy fragmentation load
* (e.g., from lossy NFS peers).
*/
2005-02-27 01:45:09 +03:00
static u_int ip_reass_ttl_decr(u_int ticks);
2005-02-03 00:41:55 +03:00
static void ip_reass_drophalf(void);
static inline int ipq_lock_try(void);
static inline void ipq_unlock(void);
static inline int
2005-02-04 01:51:50 +03:00
ipq_lock_try(void)
{
int s;
/*
* Use splvm() -- we're blocking things that would cause
* mbuf allocation.
*/
s = splvm();
if (ipq_locked) {
splx(s);
return (0);
}
ipq_locked = 1;
splx(s);
return (1);
}
static inline void
2005-02-04 01:51:50 +03:00
ipq_unlock(void)
{
int s;
s = splvm();
ipq_locked = 0;
splx(s);
}
#ifdef DIAGNOSTIC
#define IPQ_LOCK() \
do { \
if (ipq_lock_try() == 0) { \
printf("%s:%d: ipq already locked\n", __FILE__, __LINE__); \
panic("ipq_lock"); \
} \
2002-11-02 10:20:42 +03:00
} while (/*CONSTCOND*/ 0)
#define IPQ_LOCK_CHECK() \
do { \
if (ipq_locked == 0) { \
printf("%s:%d: ipq lock not held\n", __FILE__, __LINE__); \
panic("ipq lock check"); \
} \
2002-11-02 10:20:42 +03:00
} while (/*CONSTCOND*/ 0)
#else
#define IPQ_LOCK() (void) ipq_lock_try()
#define IPQ_LOCK_CHECK() /* nothing */
#endif
#define IPQ_UNLOCK() ipq_unlock()
1993-03-21 12:45:37 +03:00
2008-10-04 04:09:34 +04:00
struct pool inmulti_pool;
struct pool ipqent_pool;
#ifdef INET_CSUM_COUNTERS
#include <sys/device.h>
struct evcnt ip_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "inet", "hwcsum bad");
struct evcnt ip_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "inet", "hwcsum ok");
struct evcnt ip_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "inet", "swcsum");
#define INET_CSUM_COUNTER_INCR(ev) (ev)->ev_count++
2004-05-01 06:20:42 +04:00
EVCNT_ATTACH_STATIC(ip_hwcsum_bad);
EVCNT_ATTACH_STATIC(ip_hwcsum_ok);
EVCNT_ATTACH_STATIC(ip_swcsum);
#else
#define INET_CSUM_COUNTER_INCR(ev) /* nothing */
#endif /* INET_CSUM_COUNTERS */
1993-03-21 12:45:37 +03:00
/*
* We need to save the IP options in case a protocol wants to respond
* to an incoming packet over the same route if the packet got here
* using IP source routing. This allows connection establishment and
* maintenance when the remote end is on a network that is not known
* to us.
*/
int ip_nhops = 0;
static struct ip_srcrt {
struct in_addr dst; /* final destination */
char nop; /* one NOP to align */
char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)];
} ip_srcrt;
2005-02-03 00:41:55 +03:00
static void save_rte(u_char *, struct in_addr);
#ifdef MBUFTRACE
struct mowner ip_rx_mowner = MOWNER_INIT("internet", "rx");
struct mowner ip_tx_mowner = MOWNER_INIT("internet", "tx");
#endif
/*
* Compute IP limits derived from the value of nmbclusters.
*/
static void
ip_nmbclusters_changed(void)
{
ip_maxfrags = nmbclusters / 4;
ip_nmbclusters = nmbclusters;
}
1993-03-21 12:45:37 +03:00
/*
* IP initialization: fill in IP protocol switch table.
* All protocols not implemented in kernel go to raw IP protocol handler.
*/
1994-01-09 04:06:02 +03:00
void
2005-02-04 01:51:50 +03:00
ip_init(void)
1993-03-21 12:45:37 +03:00
{
const struct protosw *pr;
2000-03-30 16:51:13 +04:00
int i;
1993-03-21 12:45:37 +03:00
2008-10-04 04:09:34 +04:00
pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl",
NULL, IPL_SOFTNET);
pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqepl",
NULL, IPL_VM);
1993-03-21 12:45:37 +03:00
pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
if (pr == 0)
panic("ip_init");
for (i = 0; i < IPPROTO_MAX; i++)
ip_protox[i] = pr - inetsw;
for (pr = inetdomain.dom_protosw;
pr < inetdomain.dom_protoswNPROTOSW; pr++)
if (pr->pr_domain->dom_family == PF_INET &&
pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW)
ip_protox[pr->pr_protocol] = pr - inetsw;
for (i = 0; i < IPREASS_NHASH; i++)
LIST_INIT(&ipq[i]);
ip_initid();
ip_id = time_second & 0xfffff;
1993-03-21 12:45:37 +03:00
ipintrq.ifq_maxlen = ipqmaxlen;
ip_nmbclusters_changed();
TAILQ_INIT(&in_ifaddrhead);
in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
&in_ifaddrhash);
in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
&in_multihash);
ip_mtudisc_timeout_q = rt_timer_queue_create(ip_mtudisc_timeout);
#ifdef GATEWAY
ipflow_init(ip_hashsize);
#endif
#ifdef PFIL_HOOKS
/* Register our Packet Filter hook. */
inet_pfil_hook.ph_type = PFIL_TYPE_AF;
inet_pfil_hook.ph_af = AF_INET;
i = pfil_head_register(&inet_pfil_hook);
if (i != 0)
printf("ip_init: WARNING: unable to register pfil hook, "
"error %d\n", i);
#endif /* PFIL_HOOKS */
#ifdef MBUFTRACE
MOWNER_ATTACH(&ip_tx_mowner);
MOWNER_ATTACH(&ip_rx_mowner);
#endif /* MBUFTRACE */
ipstat_percpu = percpu_alloc(sizeof(uint64_t) * IP_NSTATS);
1993-03-21 12:45:37 +03:00
}
2006-08-30 22:55:09 +04:00
struct sockaddr_in ipaddr = {
.sin_len = sizeof(ipaddr),
.sin_family = AF_INET,
};
1993-03-21 12:45:37 +03:00
struct route ipforward_rt;
/*
* IP software interrupt routine
*/
void
2005-02-04 01:51:50 +03:00
ipintr(void)
{
int s;
struct mbuf *m;
mutex_enter(softnet_lock);
KERNEL_LOCK(1, NULL);
while (!IF_IS_EMPTY(&ipintrq)) {
s = splnet();
IF_DEQUEUE(&ipintrq, m);
splx(s);
if (m == NULL)
break;
ip_input(m);
}
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
}
1993-03-21 12:45:37 +03:00
/*
* Ip input routine. Checksum and byte swap header. If fragmented
* try to reassemble. Process options. Pass to next level.
*/
1994-01-09 04:06:02 +03:00
void
ip_input(struct mbuf *m)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct ip *ip = NULL;
struct ipq *fp;
struct in_ifaddr *ia;
struct ifaddr *ifa;
struct ipqent *ipqe;
int hlen = 0, mff, len;
int downmatch;
int checkif;
int srcrt = 0;
int s;
u_int hash;
#ifdef FAST_IPSEC
struct m_tag *mtag;
struct tdb_ident *tdbi;
struct secpolicy *sp;
int error;
#endif /* FAST_IPSEC */
1993-03-21 12:45:37 +03:00
MCLAIM(m, &ip_rx_mowner);
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
if ((m->m_flags & M_PKTHDR) == 0)
panic("ipintr no HDR");
#endif
1993-03-21 12:45:37 +03:00
/*
* If no IP addresses have been set yet but the interfaces
* are receiving, can't do anything with incoming packets yet.
*/
if (TAILQ_FIRST(&in_ifaddrhead) == 0)
1993-03-21 12:45:37 +03:00
goto bad;
IP_STATINC(IP_STAT_TOTAL);
/*
* If the IP header is not aligned, slurp it up into a new
* mbuf with space for link headers, in the event we forward
* it. Otherwise, if it is aligned, make sure the entire
* base IP header is in the first mbuf of the chain.
*/
if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
if ((m = m_copyup(m, sizeof(struct ip),
(max_linkhdr + 3) & ~3)) == NULL) {
/* XXXJRT new stat, please */
IP_STATINC(IP_STAT_TOOSMALL);
return;
}
} else if (__predict_false(m->m_len < sizeof (struct ip))) {
if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
IP_STATINC(IP_STAT_TOOSMALL);
return;
}
1993-03-21 12:45:37 +03:00
}
ip = mtod(m, struct ip *);
if (ip->ip_v != IPVERSION) {
IP_STATINC(IP_STAT_BADVERS);
goto bad;
}
1993-03-21 12:45:37 +03:00
hlen = ip->ip_hl << 2;
if (hlen < sizeof(struct ip)) { /* minimum header length */
IP_STATINC(IP_STAT_BADHLEN);
1993-03-21 12:45:37 +03:00
goto bad;
}
if (hlen > m->m_len) {
if ((m = m_pullup(m, hlen)) == 0) {
IP_STATINC(IP_STAT_BADHLEN);
return;
1993-03-21 12:45:37 +03:00
}
ip = mtod(m, struct ip *);
}
/*
2000-02-12 21:00:00 +03:00
* RFC1122: packets with a multicast source address are
* not allowed.
*/
if (IN_MULTICAST(ip->ip_src.s_addr)) {
IP_STATINC(IP_STAT_BADADDR);
goto bad;
}
/* 127/8 must not appear on wire - RFC1122 */
if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
(ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) {
IP_STATINC(IP_STAT_BADADDR);
goto bad;
}
}
switch (m->m_pkthdr.csum_flags &
((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
M_CSUM_IPv4_BAD)) {
case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad);
goto badcsum;
case M_CSUM_IPv4:
/* Checksum was okay. */
INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok);
break;
default:
/*
* Must compute it ourselves. Maybe skip checksum on
* loopback interfaces.
*/
if (__predict_true(!(m->m_pkthdr.rcvif->if_flags &
IFF_LOOPBACK) || ip_do_loopback_cksum)) {
INET_CSUM_COUNTER_INCR(&ip_swcsum);
if (in_cksum(m, hlen) != 0)
goto badcsum;
}
break;
1993-03-21 12:45:37 +03:00
}
/* Retrieve the packet length. */
len = ntohs(ip->ip_len);
1993-03-21 12:45:37 +03:00
/*
* Check for additional length bogosity
*/
1999-04-07 09:34:32 +04:00
if (len < hlen) {
IP_STATINC(IP_STAT_BADLEN);
goto bad;
}
1993-03-21 12:45:37 +03:00
/*
* Check that the amount of data in the buffers
* is as at least much as the IP header would have us expect.
* Trim mbufs if longer than we expect.
* Drop packet if shorter than we expect.
*/
if (m->m_pkthdr.len < len) {
IP_STATINC(IP_STAT_TOOSHORT);
1993-03-21 12:45:37 +03:00
goto bad;
}
if (m->m_pkthdr.len > len) {
1993-03-21 12:45:37 +03:00
if (m->m_len == m->m_pkthdr.len) {
m->m_len = len;
m->m_pkthdr.len = len;
1993-03-21 12:45:37 +03:00
} else
m_adj(m, len - m->m_pkthdr.len);
1993-03-21 12:45:37 +03:00
}
#if defined(IPSEC)
/* ipflow (IP fast forwarding) is not compatible with IPsec. */
m->m_flags &= ~M_CANFASTFWD;
#else
/*
* Assume that we can create a fast-forward IP flow entry
* based on this packet.
*/
m->m_flags |= M_CANFASTFWD;
#endif
#ifdef PFIL_HOOKS
/*
* Run through list of hooks for input packets. If there are any
* filters which require that additional packets in the flow are
* not fast-forwarded, they must clear the M_CANFASTFWD flag.
* Note that filters must _never_ set this flag, as another filter
* in the list may have previously cleared it.
*/
/*
* let ipfilter look at packet on the wire,
* not the decapsulated packet.
*/
#ifdef IPSEC
if (!ipsec_getnhist(m))
#elif defined(FAST_IPSEC)
if (!ipsec_indone(m))
#else
if (1)
#endif
{
struct in_addr odst;
odst = ip->ip_dst;
if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif,
2003-06-30 05:21:11 +04:00
PFIL_IN) != 0)
return;
if (m == NULL)
return;
ip = mtod(m, struct ip *);
hlen = ip->ip_hl << 2;
/*
* XXX The setting of "srcrt" here is to prevent ip_forward()
* from generating ICMP redirects for packets that have
* been redirected by a hook back out on to the same LAN that
* they came from and is not an indication that the packet
* is being inffluenced by source routing options. This
* allows things like
* "rdr tlp0 0/0 port 80 -> 1.1.1.200 3128 tcp"
* where tlp0 is both on the 1.1.1.0/24 network and is the
* default route for hosts on 1.1.1.0/24. Of course this
* also requires a "map tlp0 ..." to complete the story.
* One might argue whether or not this kind of network config.
2005-02-27 01:45:09 +03:00
* should be supported in this manner...
*/
srcrt = (odst.s_addr != ip->ip_dst.s_addr);
}
#endif /* PFIL_HOOKS */
#ifdef ALTQ
/* XXX Temporary until ALTQ is changed to use a pfil hook */
if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) {
/* packet dropped by traffic conditioner */
return;
}
#endif
1993-03-21 12:45:37 +03:00
/*
* Process options and, if not destined for us,
* ship it on. ip_dooptions returns 1 when an
* error was detected (causing an icmp message
* to be sent and the original packet to be freed).
*/
ip_nhops = 0; /* for source routed packets */
if (hlen > sizeof (struct ip) && ip_dooptions(m))
return;
1993-03-21 12:45:37 +03:00
/*
* Enable a consistency check between the destination address
* and the arrival interface for a unicast packet (the RFC 1122
* strong ES model) if IP forwarding is disabled and the packet
* is not locally generated.
*
* XXX - Checking also should be disabled if the destination
* address is ipnat'ed to a different interface.
*
* XXX - Checking is incompatible with IP aliases added
* to the loopback interface instead of the interface where
* the packets are received.
*
* XXX - We need to add a per ifaddr flag for this so that
* we get finer grain control.
*/
checkif = ip_checkinterface && (ipforwarding == 0) &&
(m->m_pkthdr.rcvif != NULL) &&
((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0);
1993-03-21 12:45:37 +03:00
/*
* Check our list of addresses, to see if the packet is for us.
*
* Traditional 4.4BSD did not consult IFF_UP at all.
* The behavior here is to treat addresses on !IFF_UP interface
* as not mine.
1993-03-21 12:45:37 +03:00
*/
downmatch = 0;
LIST_FOREACH(ia, &IN_IFADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
if (in_hosteq(ia->ia_addr.sin_addr, ip->ip_dst)) {
if (checkif && ia->ia_ifp != m->m_pkthdr.rcvif)
continue;
if ((ia->ia_ifp->if_flags & IFF_UP) != 0)
break;
else
downmatch++;
}
}
if (ia != NULL)
goto ours;
if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) {
IFADDR_FOREACH(ifa, m->m_pkthdr.rcvif) {
if (ifa->ifa_addr->sa_family != AF_INET)
continue;
ia = ifatoia(ifa);
if (in_hosteq(ip->ip_dst, ia->ia_broadaddr.sin_addr) ||
in_hosteq(ip->ip_dst, ia->ia_netbroadcast) ||
1995-06-04 09:58:20 +04:00
/*
* Look for all-0's host part (old broadcast addr),
* either for subnet or net.
*/
ip->ip_dst.s_addr == ia->ia_subnet ||
ip->ip_dst.s_addr == ia->ia_net)
1993-03-21 12:45:37 +03:00
goto ours;
/*
* An interface with IP address zero accepts
* all packets that arrive on that interface.
*/
if (in_nullhost(ia->ia_addr.sin_addr))
goto ours;
1993-03-21 12:45:37 +03:00
}
}
if (IN_MULTICAST(ip->ip_dst.s_addr)) {
struct in_multi *inm;
#ifdef MROUTING
extern struct socket *ip_mrouter;
if (ip_mrouter) {
/*
* If we are acting as a multicast router, all
* incoming multicast packets are passed to the
* kernel-level multicast forwarding function.
* The packet is returned (relatively) intact; if
* ip_mforward() returns a non-zero value, the packet
* must be discarded, else it may be accepted below.
*
* (The IP ident field is put in the same byte order
* as expected when ip_mforward() is called from
* ip_output().)
*/
if (ip_mforward(m, m->m_pkthdr.rcvif) != 0) {
IP_STATINC(IP_STAT_CANTFORWARD);
m_freem(m);
return;
}
/*
* The process-level routing demon needs to receive
* all multicast IGMP packets, whether or not this
* host belongs to their destination groups.
*/
if (ip->ip_p == IPPROTO_IGMP)
goto ours;
IP_STATINC(IP_STAT_CANTFORWARD);
}
#endif
/*
* See if we belong to the destination multicast group on the
* arrival interface.
*/
IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
if (inm == NULL) {
IP_STATINC(IP_STAT_CANTFORWARD);
m_freem(m);
return;
}
goto ours;
}
1995-06-04 09:06:49 +04:00
if (ip->ip_dst.s_addr == INADDR_BROADCAST ||
in_nullhost(ip->ip_dst))
1993-03-21 12:45:37 +03:00
goto ours;
/*
* Not for us; forward if possible and desirable.
*/
if (ipforwarding == 0) {
IP_STATINC(IP_STAT_CANTFORWARD);
1993-03-21 12:45:37 +03:00
m_freem(m);
} else {
/*
* If ip_dst matched any of my address on !IFF_UP interface,
* and there's no IFF_UP interface that matches ip_dst,
* send icmp unreach. Forwarding it will result in in-kernel
* forwarding loop till TTL goes to 0.
*/
if (downmatch) {
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
IP_STATINC(IP_STAT_CANTFORWARD);
return;
}
#ifdef IPSEC
if (ipsec4_in_reject(m, NULL)) {
IPSEC_STATINC(IPSEC_STAT_IN_POLVIO);
goto bad;
}
#endif
#ifdef FAST_IPSEC
mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
s = splsoftnet();
if (mtag != NULL) {
tdbi = (struct tdb_ident *)(mtag + 1);
sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
} else {
sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
2005-02-27 01:45:09 +03:00
IP_FORWARDING, &error);
}
if (sp == NULL) { /* NB: can happen if error */
splx(s);
/*XXX error stat???*/
DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/
goto bad;
}
/*
* Check security policy against packet attributes.
*/
error = ipsec_in_reject(sp, m);
KEY_FREESP(&sp);
splx(s);
if (error) {
IP_STATINC(IP_STAT_CANTFORWARD);
goto bad;
}
/*
* Peek at the outbound SP for this packet to determine if
* it's a Fast Forward candidate.
*/
mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL);
if (mtag != NULL)
m->m_flags &= ~M_CANFASTFWD;
else {
s = splsoftnet();
sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND,
(IP_FORWARDING |
(ip_directedbcast ? IP_ALLOWBROADCAST : 0)),
&error, NULL);
if (sp != NULL) {
m->m_flags &= ~M_CANFASTFWD;
KEY_FREESP(&sp);
}
splx(s);
}
#endif /* FAST_IPSEC */
ip_forward(m, srcrt);
}
return;
1993-03-21 12:45:37 +03:00
ours:
/*
* If offset or IP_MF are set, must reassemble.
* Otherwise, nothing need be done.
* (We could look in the reassembly queue to see
* if the packet was previously fragmented,
* but it's not worth the time; just let them time out.)
*/
if (ip->ip_off & ~htons(IP_DF|IP_RF)) {
uint16_t off;
/*
* Prevent TCP blind data attacks by not allowing non-initial
* fragments to start at less than 68 bytes (minimal fragment
* size) and making sure the first fragment is at least 68
* bytes.
*/
off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) {
IP_STATINC(IP_STAT_BADFRAGS);
goto bad;
}
1993-03-21 12:45:37 +03:00
/*
* Look for queue of fragments
* of this datagram.
*/
IPQ_LOCK();
hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
Take steps to hide the radix_node implementation of the forwarding table from the forwarding table's users: Introduce rt_walktree() for walking the routing table and applying a function to each rtentry. Replace most rn_walktree() calls with it. Use rt_getkey()/rt_setkey() to get/set a route's destination. Keep a pointer to the sockaddr key in the rtentry, so that rtentry users do not have to grovel in the radix_node for the key. Add a RTM_GET method to rtrequest. Use that instead of radix_node lookups in, e.g., carp(4). Add sys/net/link_proto.c, which supplies sockaddr routines for link-layer socket addresses (sockaddr_dl). Cosmetic: Constify. KNF. Stop open-coding LIST_FOREACH, TAILQ_FOREACH, et cetera. Use NULL instead of 0 for null pointers. Use __arraycount(). Reduce gratuitous parenthesization. Stop using variadic arguments for rip6_output(), it is unnecessary. Remove the unnecessary rtentry member rt_genmask and the code to maintain it, since nothing actually used it. Make rt_maskedcopy() easier to read by using meaningful variable names. Extract a subroutine intern_netmask() for looking up a netmask in the masks table. Start converting backslash-ridden IPv6 macros in sys/netinet6/in6_var.h into inline subroutines that one can read without special eyeglasses. One functional change: when the kernel serves an RTM_GET, RTM_LOCK, or RTM_CHANGE request, it applies the netmask (if supplied) to a destination before searching for it in the forwarding table. I have changed sys/netinet/ip_carp.c, carp_setroute(), to remove the unlawful radix_node knowledge. Apart from the changes to carp(4), netiso, ATM, and strip(4), I have run the changes on three nodes in my wireless routing testbed, which involves IPv4 + IPv6 dynamic routing acrobatics, and it's working beautifully so far.
2007-07-20 00:48:52 +04:00
LIST_FOREACH(fp, &ipq[hash], ipq_q) {
1993-03-21 12:45:37 +03:00
if (ip->ip_id == fp->ipq_id &&
in_hosteq(ip->ip_src, fp->ipq_src) &&
in_hosteq(ip->ip_dst, fp->ipq_dst) &&
ip->ip_p == fp->ipq_p) {
/*
* Make sure the TOS is matches previous
* fragments.
*/
if (ip->ip_tos != fp->ipq_tos) {
IP_STATINC(IP_STAT_BADFRAGS);
IPQ_UNLOCK();
goto bad;
}
1993-03-21 12:45:37 +03:00
goto found;
}
}
1993-03-21 12:45:37 +03:00
fp = 0;
found:
/*
* Adjust ip_len to not reflect header,
* set ipqe_mff if more fragments are expected,
1993-03-21 12:45:37 +03:00
* convert offset of this to bytes.
*/
ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
mff = (ip->ip_off & htons(IP_MF)) != 0;
if (mff) {
/*
* Make sure that fragments have a data length
* that's a non-zero multiple of 8 bytes.
*/
if (ntohs(ip->ip_len) == 0 ||
(ntohs(ip->ip_len) & 0x7) != 0) {
IP_STATINC(IP_STAT_BADFRAGS);
IPQ_UNLOCK();
goto bad;
}
}
ip->ip_off = htons((ntohs(ip->ip_off) & IP_OFFMASK) << 3);
1993-03-21 12:45:37 +03:00
/*
* If datagram marked as having more fragments
* or if this is not the first fragment,
* attempt reassembly; if it succeeds, proceed.
*/
if (mff || ip->ip_off != htons(0)) {
IP_STATINC(IP_STAT_FRAGMENTS);
s = splvm();
ipqe = pool_get(&ipqent_pool, PR_NOWAIT);
splx(s);
if (ipqe == NULL) {
IP_STATINC(IP_STAT_RCVMEMDROP);
IPQ_UNLOCK();
goto bad;
}
ipqe->ipqe_mff = mff;
ipqe->ipqe_m = m;
ipqe->ipqe_ip = ip;
m = ip_reass(ipqe, fp, &ipq[hash]);
if (m == 0) {
IPQ_UNLOCK();
return;
}
IP_STATINC(IP_STAT_REASSEMBLED);
ip = mtod(m, struct ip *);
hlen = ip->ip_hl << 2;
ip->ip_len = htons(ntohs(ip->ip_len) + hlen);
1993-03-21 12:45:37 +03:00
} else
if (fp)
ip_freef(fp);
IPQ_UNLOCK();
}
1993-03-21 12:45:37 +03:00
#if defined(IPSEC)
/*
* enforce IPsec policy checking if we are seeing last header.
* note that we do not visit this with protocols with pcb layer
* code - like udp/tcp/raw ip.
*/
if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0 &&
ipsec4_in_reject(m, NULL)) {
IPSEC_STATINC(IPSEC_STAT_IN_POLVIO);
goto bad;
}
#endif
2006-05-08 22:50:12 +04:00
#ifdef FAST_IPSEC
/*
* enforce IPsec policy checking if we are seeing last header.
* note that we do not visit this with protocols with pcb layer
* code - like udp/tcp/raw ip.
*/
if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0) {
/*
* Check if the packet has already had IPsec processing
* done. If so, then just pass it along. This tag gets
* set during AH, ESP, etc. input handling, before the
* packet is returned to the ip input queue for delivery.
2005-02-27 01:45:09 +03:00
*/
mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
s = splsoftnet();
if (mtag != NULL) {
tdbi = (struct tdb_ident *)(mtag + 1);
sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
} else {
sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
2005-02-27 01:45:09 +03:00
IP_FORWARDING, &error);
}
if (sp != NULL) {
/*
* Check security policy against packet attributes.
*/
error = ipsec_in_reject(sp, m);
KEY_FREESP(&sp);
} else {
/* XXX error stat??? */
error = EINVAL;
DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
}
splx(s);
if (error)
goto bad;
}
#endif /* FAST_IPSEC */
1993-03-21 12:45:37 +03:00
/*
* Switch out to protocol's input routine.
*/
#if IFA_STATS
if (ia && ip)
ia->ia_ifa.ifa_data.ifad_inbytes += ntohs(ip->ip_len);
#endif
IP_STATINC(IP_STAT_DELIVERED);
{
int off = hlen, nh = ip->ip_p;
(*inetsw[ip_protox[nh]].pr_input)(m, off, nh);
return;
}
1993-03-21 12:45:37 +03:00
bad:
m_freem(m);
return;
badcsum:
IP_STATINC(IP_STAT_BADSUM);
m_freem(m);
1993-03-21 12:45:37 +03:00
}
/*
* Take incoming datagram fragment and try to
* reassemble it into whole datagram. If a chain for
* reassembly of this datagram already exists, then it
* is given as fp; otherwise have to make a chain.
*/
struct mbuf *
2005-02-04 01:51:50 +03:00
ip_reass(struct ipqent *ipqe, struct ipq *fp, struct ipqhead *ipqhead)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct mbuf *m = ipqe->ipqe_m;
struct ipqent *nq, *p, *q;
struct ip *ip;
1993-03-21 12:45:37 +03:00
struct mbuf *t;
int hlen = ipqe->ipqe_ip->ip_hl << 2;
int i, next, s;
1993-03-21 12:45:37 +03:00
IPQ_LOCK_CHECK();
1993-03-21 12:45:37 +03:00
/*
* Presence of header sizes in mbufs
* would confuse code below.
*/
m->m_data += hlen;
m->m_len -= hlen;
#ifdef notyet
/* make sure fragment limit is up-to-date */
CHECK_NMBCLUSTER_PARAMS();
/* If we have too many fragments, drop the older half. */
if (ip_nfrags >= ip_maxfrags)
ip_reass_drophalf(void);
#endif
/*
* We are about to add a fragment; increment frag count.
*/
ip_nfrags++;
2005-02-27 01:45:09 +03:00
1993-03-21 12:45:37 +03:00
/*
* If first fragment to arrive, create a reassembly queue.
*/
if (fp == 0) {
/*
* Enforce upper bound on number of fragmented packets
* for which we attempt reassembly;
* If maxfrag is 0, never accept fragments.
* If maxfrag is -1, accept all fragments without limitation.
*/
if (ip_maxfragpackets < 0)
;
else if (ip_nfragpackets >= ip_maxfragpackets)
goto dropfrag;
ip_nfragpackets++;
2008-12-17 23:51:31 +03:00
fp = malloc(sizeof (struct ipq), M_FTABLE, M_NOWAIT);
if (fp == NULL)
1993-03-21 12:45:37 +03:00
goto dropfrag;
LIST_INSERT_HEAD(ipqhead, fp, ipq_q);
fp->ipq_nfrags = 1;
1993-03-21 12:45:37 +03:00
fp->ipq_ttl = IPFRAGTTL;
fp->ipq_p = ipqe->ipqe_ip->ip_p;
fp->ipq_id = ipqe->ipqe_ip->ip_id;
fp->ipq_tos = ipqe->ipqe_ip->ip_tos;
TAILQ_INIT(&fp->ipq_fragq);
fp->ipq_src = ipqe->ipqe_ip->ip_src;
fp->ipq_dst = ipqe->ipqe_ip->ip_dst;
p = NULL;
1993-03-21 12:45:37 +03:00
goto insert;
} else {
fp->ipq_nfrags++;
1993-03-21 12:45:37 +03:00
}
/*
* Find a segment which begins after this one does.
*/
for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL;
p = q, q = TAILQ_NEXT(q, ipqe_q))
if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off))
1993-03-21 12:45:37 +03:00
break;
/*
* If there is a preceding segment, it may provide some of
* our data already. If so, drop the data from the incoming
* segment. If it provides all of our data, drop us.
*/
if (p != NULL) {
i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) -
ntohs(ipqe->ipqe_ip->ip_off);
1993-03-21 12:45:37 +03:00
if (i > 0) {
if (i >= ntohs(ipqe->ipqe_ip->ip_len))
1993-03-21 12:45:37 +03:00
goto dropfrag;
m_adj(ipqe->ipqe_m, i);
ipqe->ipqe_ip->ip_off =
htons(ntohs(ipqe->ipqe_ip->ip_off) + i);
ipqe->ipqe_ip->ip_len =
htons(ntohs(ipqe->ipqe_ip->ip_len) - i);
1993-03-21 12:45:37 +03:00
}
}
/*
* While we overlap succeeding segments trim them or,
* if they are completely covered, dequeue them.
*/
for (; q != NULL &&
ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) >
ntohs(q->ipqe_ip->ip_off); q = nq) {
i = (ntohs(ipqe->ipqe_ip->ip_off) +
ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off);
if (i < ntohs(q->ipqe_ip->ip_len)) {
q->ipqe_ip->ip_len =
htons(ntohs(q->ipqe_ip->ip_len) - i);
q->ipqe_ip->ip_off =
htons(ntohs(q->ipqe_ip->ip_off) + i);
m_adj(q->ipqe_m, i);
1993-03-21 12:45:37 +03:00
break;
}
nq = TAILQ_NEXT(q, ipqe_q);
m_freem(q->ipqe_m);
TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
s = splvm();
pool_put(&ipqent_pool, q);
splx(s);
fp->ipq_nfrags--;
ip_nfrags--;
1993-03-21 12:45:37 +03:00
}
insert:
/*
* Stick new segment in its place;
* check for complete reassembly.
*/
if (p == NULL) {
TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
} else {
TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q);
}
1993-03-21 12:45:37 +03:00
next = 0;
for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL;
p = q, q = TAILQ_NEXT(q, ipqe_q)) {
if (ntohs(q->ipqe_ip->ip_off) != next)
1993-03-21 12:45:37 +03:00
return (0);
next += ntohs(q->ipqe_ip->ip_len);
1993-03-21 12:45:37 +03:00
}
if (p->ipqe_mff)
1993-03-21 12:45:37 +03:00
return (0);
/*
* Reassembly is complete. Check for a bogus message size and
* concatenate fragments.
1993-03-21 12:45:37 +03:00
*/
q = TAILQ_FIRST(&fp->ipq_fragq);
ip = q->ipqe_ip;
if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
IP_STATINC(IP_STAT_TOOLONG);
ip_freef(fp);
return (0);
}
m = q->ipqe_m;
1993-03-21 12:45:37 +03:00
t = m->m_next;
m->m_next = 0;
m_cat(m, t);
nq = TAILQ_NEXT(q, ipqe_q);
s = splvm();
pool_put(&ipqent_pool, q);
splx(s);
for (q = nq; q != NULL; q = nq) {
t = q->ipqe_m;
nq = TAILQ_NEXT(q, ipqe_q);
s = splvm();
pool_put(&ipqent_pool, q);
splx(s);
1993-03-21 12:45:37 +03:00
m_cat(m, t);
}
ip_nfrags -= fp->ipq_nfrags;
1993-03-21 12:45:37 +03:00
/*
* Create header for new ip packet by
* modifying header of first packet;
* dequeue and discard fragment reassembly header.
* Make header visible.
*/
ip->ip_len = htons(next);
ip->ip_src = fp->ipq_src;
ip->ip_dst = fp->ipq_dst;
LIST_REMOVE(fp, ipq_q);
2008-12-17 23:51:31 +03:00
free(fp, M_FTABLE);
ip_nfragpackets--;
1993-03-21 12:45:37 +03:00
m->m_len += (ip->ip_hl << 2);
m->m_data -= (ip->ip_hl << 2);
/* some debugging cruft by sklower, below, will go away soon */
if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
2000-03-30 16:51:13 +04:00
int plen = 0;
for (t = m; t; t = t->m_next)
plen += t->m_len;
m->m_pkthdr.len = plen;
2005-03-29 13:37:08 +04:00
m->m_pkthdr.csum_flags = 0;
1993-03-21 12:45:37 +03:00
}
return (m);
1993-03-21 12:45:37 +03:00
dropfrag:
if (fp != 0)
fp->ipq_nfrags--;
ip_nfrags--;
IP_STATINC(IP_STAT_FRAGDROPPED);
1993-03-21 12:45:37 +03:00
m_freem(m);
s = splvm();
pool_put(&ipqent_pool, ipqe);
splx(s);
1993-03-21 12:45:37 +03:00
return (0);
}
/*
* Free a fragment reassembly header and all
* associated datagrams.
*/
1994-01-09 04:06:02 +03:00
void
2005-02-04 01:51:50 +03:00
ip_freef(struct ipq *fp)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct ipqent *q, *p;
u_int nfrags = 0;
int s;
1993-03-21 12:45:37 +03:00
IPQ_LOCK_CHECK();
for (q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; q = p) {
p = TAILQ_NEXT(q, ipqe_q);
m_freem(q->ipqe_m);
nfrags++;
TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
s = splvm();
pool_put(&ipqent_pool, q);
splx(s);
1993-03-21 12:45:37 +03:00
}
if (nfrags != fp->ipq_nfrags)
printf("ip_freef: nfrags %d != %d\n", fp->ipq_nfrags, nfrags);
ip_nfrags -= nfrags;
LIST_REMOVE(fp, ipq_q);
2008-12-17 23:51:31 +03:00
free(fp, M_FTABLE);
ip_nfragpackets--;
1993-03-21 12:45:37 +03:00
}
/*
* IP reassembly TTL machinery for multiplicative drop.
*/
static u_int fragttl_histo[(IPFRAGTTL+1)];
/*
* Decrement TTL of all reasembly queue entries by `ticks'.
* Count number of distinct fragments (as opposed to partial, fragmented
* datagrams) in the reassembly queue. While we traverse the entire
* reassembly queue, compute and return the median TTL over all fragments.
*/
static u_int
ip_reass_ttl_decr(u_int ticks)
{
u_int nfrags, median, dropfraction, keepfraction;
struct ipq *fp, *nfp;
int i;
2005-02-27 01:45:09 +03:00
nfrags = 0;
memset(fragttl_histo, 0, sizeof fragttl_histo);
2005-02-27 01:45:09 +03:00
for (i = 0; i < IPREASS_NHASH; i++) {
for (fp = LIST_FIRST(&ipq[i]); fp != NULL; fp = nfp) {
fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ?
0 : fp->ipq_ttl - ticks);
nfp = LIST_NEXT(fp, ipq_q);
if (fp->ipq_ttl == 0) {
IP_STATINC(IP_STAT_FRAGTIMEOUT);
ip_freef(fp);
} else {
nfrags += fp->ipq_nfrags;
fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags;
}
}
}
KASSERT(ip_nfrags == nfrags);
/* Find median (or other drop fraction) in histogram. */
dropfraction = (ip_nfrags / 2);
keepfraction = ip_nfrags - dropfraction;
for (i = IPFRAGTTL, median = 0; i >= 0; i--) {
median += fragttl_histo[i];
if (median >= keepfraction)
break;
}
/* Return TTL of median (or other fraction). */
return (u_int)i;
}
void
ip_reass_drophalf(void)
{
u_int median_ticks;
/*
* Compute median TTL of all fragments, and count frags
* with that TTL or lower (roughly half of all fragments).
*/
median_ticks = ip_reass_ttl_decr(0);
/* Drop half. */
median_ticks = ip_reass_ttl_decr(median_ticks);
}
1993-03-21 12:45:37 +03:00
/*
* IP timer processing;
* if a timer expires on a reassembly
* queue, discard it.
*/
1994-01-09 04:06:02 +03:00
void
2005-02-04 01:51:50 +03:00
ip_slowtimo(void)
1993-03-21 12:45:37 +03:00
{
static u_int dropscanidx = 0;
u_int i;
u_int median_ttl;
mutex_enter(softnet_lock);
KERNEL_LOCK(1, NULL);
1993-03-21 12:45:37 +03:00
IPQ_LOCK();
/* Age TTL of all fragments by 1 tick .*/
median_ttl = ip_reass_ttl_decr(1);
/* make sure fragment limit is up-to-date */
CHECK_NMBCLUSTER_PARAMS();
/* If we have too many fragments, drop the older half. */
if (ip_nfrags > ip_maxfrags)
ip_reass_ttl_decr(median_ttl);
/*
* If we are over the maximum number of fragmented packets
* (due to the limit being lowered), drain off
* enough to get down to the new limit. Start draining
* from the reassembly hashqueue most recently drained.
*/
if (ip_maxfragpackets < 0)
;
else {
int wrapped = 0;
i = dropscanidx;
while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) {
while (LIST_FIRST(&ipq[i]) != NULL)
ip_freef(LIST_FIRST(&ipq[i]));
if (++i >= IPREASS_NHASH) {
i = 0;
}
/*
* Dont scan forever even if fragment counters are
* wrong: stop after scanning entire reassembly queue.
*/
if (i == dropscanidx)
wrapped = 1;
}
dropscanidx = i;
}
IPQ_UNLOCK();
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
1993-03-21 12:45:37 +03:00
}
/*
* Drain off all datagram fragments. Don't acquire softnet_lock as
* can be called from hardware interrupt context.
1993-03-21 12:45:37 +03:00
*/
1994-01-09 04:06:02 +03:00
void
2005-02-04 01:51:50 +03:00
ip_drain(void)
1993-03-21 12:45:37 +03:00
{
KERNEL_LOCK(1, NULL);
/*
* We may be called from a device's interrupt context. If
* the ipq is already busy, just bail out now.
*/
if (ipq_lock_try() != 0) {
/*
* Drop half the total fragments now. If more mbufs are
* needed, we will be called again soon.
*/
ip_reass_drophalf();
IPQ_UNLOCK();
}
KERNEL_UNLOCK_ONE(NULL);
1993-03-21 12:45:37 +03:00
}
/*
* Do option processing on a datagram,
* possibly discarding it if bad options are encountered,
* or forwarding it if source-routed.
* Returns 1 if packet has been forwarded/freed,
* 0 if the packet should be processed further.
*/
1994-01-09 04:06:02 +03:00
int
2005-02-04 01:51:50 +03:00
ip_dooptions(struct mbuf *m)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct ip *ip = mtod(m, struct ip *);
u_char *cp, *cp0;
struct ip_timestamp *ipt;
struct in_ifaddr *ia;
1993-03-21 12:45:37 +03:00
int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
struct in_addr dst;
1993-03-21 12:45:37 +03:00
n_time ntime;
dst = ip->ip_dst;
1993-03-21 12:45:37 +03:00
cp = (u_char *)(ip + 1);
cnt = (ip->ip_hl << 2) - sizeof (struct ip);
for (; cnt > 0; cnt -= optlen, cp += optlen) {
opt = cp[IPOPT_OPTVAL];
if (opt == IPOPT_EOL)
break;
if (opt == IPOPT_NOP)
optlen = 1;
else {
if (cnt < IPOPT_OLEN + sizeof(*cp)) {
code = &cp[IPOPT_OLEN] - (u_char *)ip;
goto bad;
}
1993-03-21 12:45:37 +03:00
optlen = cp[IPOPT_OLEN];
if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1993-03-21 12:45:37 +03:00
code = &cp[IPOPT_OLEN] - (u_char *)ip;
goto bad;
}
}
switch (opt) {
default:
break;
/*
* Source routing with record.
* Find interface with current destination address.
* If none on this machine then drop if strictly routed,
* or do nothing if loosely routed.
* Record interface address and bring up next address
* component. If strictly routed make sure next
* address is on directly accessible net.
*/
case IPOPT_LSRR:
case IPOPT_SSRR:
if (ip_allowsrcrt == 0) {
type = ICMP_UNREACH;
code = ICMP_UNREACH_NET_PROHIB;
goto bad;
}
if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
code = &cp[IPOPT_OLEN] - (u_char *)ip;
goto bad;
}
1993-03-21 12:45:37 +03:00
if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
code = &cp[IPOPT_OFFSET] - (u_char *)ip;
goto bad;
}
ipaddr.sin_addr = ip->ip_dst;
1995-06-04 09:06:49 +04:00
ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr)));
1993-03-21 12:45:37 +03:00
if (ia == 0) {
if (opt == IPOPT_SSRR) {
type = ICMP_UNREACH;
code = ICMP_UNREACH_SRCFAIL;
goto bad;
}
/*
* Loose routing, and not at next destination
* yet; nothing to do except forward.
*/
break;
}
off--; /* 0 origin */
if ((off + sizeof(struct in_addr)) > optlen) {
1993-03-21 12:45:37 +03:00
/*
* End of source route. Should be for us.
*/
save_rte(cp, ip->ip_src);
break;
}
/*
* locate outgoing interface
*/
2009-03-18 20:06:41 +03:00
memcpy( (void *)&ipaddr.sin_addr, (void *)(cp + off),
1993-03-21 12:45:37 +03:00
sizeof(ipaddr.sin_addr));
if (opt == IPOPT_SSRR)
ia = ifatoia(ifa_ifwithladdr(sintosa(&ipaddr)));
else
1993-03-21 12:45:37 +03:00
ia = ip_rtaddr(ipaddr.sin_addr);
if (ia == 0) {
type = ICMP_UNREACH;
code = ICMP_UNREACH_SRCFAIL;
goto bad;
}
ip->ip_dst = ipaddr.sin_addr;
bcopy((void *)&ia->ia_addr.sin_addr,
(void *)(cp + off), sizeof(struct in_addr));
1993-03-21 12:45:37 +03:00
cp[IPOPT_OFFSET] += sizeof(struct in_addr);
/*
* Let ip_intr's mcast routing check handle mcast pkts
*/
forward = !IN_MULTICAST(ip->ip_dst.s_addr);
1993-03-21 12:45:37 +03:00
break;
case IPOPT_RR:
if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
code = &cp[IPOPT_OLEN] - (u_char *)ip;
goto bad;
}
1993-03-21 12:45:37 +03:00
if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
code = &cp[IPOPT_OFFSET] - (u_char *)ip;
goto bad;
}
/*
* If no space remains, ignore.
*/
off--; /* 0 origin */
if ((off + sizeof(struct in_addr)) > optlen)
1993-03-21 12:45:37 +03:00
break;
2009-03-18 20:06:41 +03:00
memcpy( (void *)&ipaddr.sin_addr, (void *)(&ip->ip_dst),
1993-03-21 12:45:37 +03:00
sizeof(ipaddr.sin_addr));
/*
* locate outgoing interface; if we're the destination,
* use the incoming interface (should be same).
*/
if ((ia = ifatoia(ifa_ifwithaddr(sintosa(&ipaddr))))
== NULL &&
(ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
1993-03-21 12:45:37 +03:00
type = ICMP_UNREACH;
code = ICMP_UNREACH_HOST;
goto bad;
}
bcopy((void *)&ia->ia_addr.sin_addr,
(void *)(cp + off), sizeof(struct in_addr));
1993-03-21 12:45:37 +03:00
cp[IPOPT_OFFSET] += sizeof(struct in_addr);
break;
case IPOPT_TS:
code = cp - (u_char *)ip;
ipt = (struct ip_timestamp *)cp;
if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
code = (u_char *)&ipt->ipt_len - (u_char *)ip;
1993-03-21 12:45:37 +03:00
goto bad;
}
if (ipt->ipt_ptr < 5) {
code = (u_char *)&ipt->ipt_ptr - (u_char *)ip;
goto bad;
}
if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
if (++ipt->ipt_oflw == 0) {
code = (u_char *)&ipt->ipt_ptr -
(u_char *)ip;
1993-03-21 12:45:37 +03:00
goto bad;
}
1993-03-21 12:45:37 +03:00
break;
}
cp0 = (cp + ipt->ipt_ptr - 1);
1993-03-21 12:45:37 +03:00
switch (ipt->ipt_flg) {
case IPOPT_TS_TSONLY:
break;
case IPOPT_TS_TSANDADDR:
if (ipt->ipt_ptr - 1 + sizeof(n_time) +
sizeof(struct in_addr) > ipt->ipt_len) {
code = (u_char *)&ipt->ipt_ptr -
(u_char *)ip;
1993-03-21 12:45:37 +03:00
goto bad;
}
ipaddr.sin_addr = dst;
ia = ifatoia(ifaof_ifpforaddr(sintosa(&ipaddr),
m->m_pkthdr.rcvif));
if (ia == 0)
continue;
bcopy(&ia->ia_addr.sin_addr,
cp0, sizeof(struct in_addr));
1993-03-21 12:45:37 +03:00
ipt->ipt_ptr += sizeof(struct in_addr);
break;
case IPOPT_TS_PRESPEC:
if (ipt->ipt_ptr - 1 + sizeof(n_time) +
sizeof(struct in_addr) > ipt->ipt_len) {
code = (u_char *)&ipt->ipt_ptr -
(u_char *)ip;
1993-03-21 12:45:37 +03:00
goto bad;
}
2009-03-18 20:06:41 +03:00
memcpy( &ipaddr.sin_addr, cp0,
1993-03-21 12:45:37 +03:00
sizeof(struct in_addr));
if (ifatoia(ifa_ifwithaddr(sintosa(&ipaddr)))
== NULL)
1993-03-21 12:45:37 +03:00
continue;
ipt->ipt_ptr += sizeof(struct in_addr);
break;
default:
/* XXX can't take &ipt->ipt_flg */
code = (u_char *)&ipt->ipt_ptr -
(u_char *)ip + 1;
1993-03-21 12:45:37 +03:00
goto bad;
}
ntime = iptime();
cp0 = (u_char *) &ntime; /* XXX grumble, GCC... */
memmove((char *)cp + ipt->ipt_ptr - 1, cp0,
1993-03-21 12:45:37 +03:00
sizeof(n_time));
ipt->ipt_ptr += sizeof(n_time);
}
}
if (forward) {
if (ip_forwsrcrt == 0) {
type = ICMP_UNREACH;
code = ICMP_UNREACH_SRCFAIL;
goto bad;
}
1993-03-21 12:45:37 +03:00
ip_forward(m, 1);
return (1);
}
return (0);
1993-03-21 12:45:37 +03:00
bad:
icmp_error(m, type, code, 0, 0);
IP_STATINC(IP_STAT_BADOPTIONS);
1993-03-21 12:45:37 +03:00
return (1);
}
/*
* Given address of next destination (final or next hop),
* return internet address info of interface to be used to get there.
*/
struct in_ifaddr *
2005-02-04 01:51:50 +03:00
ip_rtaddr(struct in_addr dst)
1993-03-21 12:45:37 +03:00
{
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
struct rtentry *rt;
union {
struct sockaddr dst;
struct sockaddr_in dst4;
} u;
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
sockaddr_in_init(&u.dst4, &dst, 0);
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL)
return NULL;
1993-03-21 12:45:37 +03:00
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
return ifatoia(rt->rt_ifa);
1993-03-21 12:45:37 +03:00
}
/*
* Save incoming source route for use in replies,
* to be picked up later by ip_srcroute if the receiver is interested.
*/
void
2005-02-04 01:51:50 +03:00
save_rte(u_char *option, struct in_addr dst)
1993-03-21 12:45:37 +03:00
{
unsigned olen;
olen = option[IPOPT_OLEN];
#ifdef DIAGNOSTIC
if (ipprintfs)
1996-10-13 06:03:00 +04:00
printf("save_rte: olen %d\n", olen);
#endif /* 0 */
1993-03-21 12:45:37 +03:00
if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst)))
return;
2009-03-18 20:06:41 +03:00
memcpy( (void *)ip_srcrt.srcopt, (void *)option, olen);
1993-03-21 12:45:37 +03:00
ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
ip_srcrt.dst = dst;
}
/*
* Retrieve incoming source route for use in replies,
* in the same form used by setsockopt.
* The first hop is placed before the options, will be removed later.
*/
struct mbuf *
2005-02-04 01:51:50 +03:00
ip_srcroute(void)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct in_addr *p, *q;
struct mbuf *m;
1993-03-21 12:45:37 +03:00
if (ip_nhops == 0)
2006-12-06 03:38:16 +03:00
return NULL;
1993-03-21 12:45:37 +03:00
m = m_get(M_DONTWAIT, MT_SOOPTS);
if (m == 0)
2006-12-06 03:38:16 +03:00
return NULL;
1993-03-21 12:45:37 +03:00
MCLAIM(m, &inetdomain.dom_mowner);
#define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt))
1993-03-21 12:45:37 +03:00
/* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
m->m_len = ip_nhops * sizeof(struct in_addr) + sizeof(struct in_addr) +
OPTSIZ;
#ifdef DIAGNOSTIC
if (ipprintfs)
1996-10-13 06:03:00 +04:00
printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
1993-03-21 12:45:37 +03:00
#endif
/*
* First save first hop for return route
*/
p = &ip_srcrt.route[ip_nhops - 1];
*(mtod(m, struct in_addr *)) = *p--;
#ifdef DIAGNOSTIC
if (ipprintfs)
1996-10-13 06:03:00 +04:00
printf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr));
1993-03-21 12:45:37 +03:00
#endif
/*
* Copy option fields and padding (nop) to mbuf.
*/
ip_srcrt.nop = IPOPT_NOP;
ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
memmove(mtod(m, char *) + sizeof(struct in_addr), &ip_srcrt.nop,
OPTSIZ);
q = (struct in_addr *)(mtod(m, char *) +
1993-03-21 12:45:37 +03:00
sizeof(struct in_addr) + OPTSIZ);
#undef OPTSIZ
/*
* Record return path as an IP source route,
* reversing the path (pointers are now aligned).
*/
while (p >= ip_srcrt.route) {
#ifdef DIAGNOSTIC
if (ipprintfs)
1996-10-13 06:03:00 +04:00
printf(" %x", ntohl(q->s_addr));
1993-03-21 12:45:37 +03:00
#endif
*q++ = *p--;
}
/*
* Last hop goes to final destination.
*/
*q = ip_srcrt.dst;
#ifdef DIAGNOSTIC
if (ipprintfs)
1996-10-13 06:03:00 +04:00
printf(" %x\n", ntohl(q->s_addr));
1993-03-21 12:45:37 +03:00
#endif
return (m);
}
const int inetctlerrmap[PRC_NCMDS] = {
[PRC_MSGSIZE] = EMSGSIZE,
[PRC_HOSTDEAD] = EHOSTDOWN,
[PRC_HOSTUNREACH] = EHOSTUNREACH,
[PRC_UNREACH_NET] = EHOSTUNREACH,
[PRC_UNREACH_HOST] = EHOSTUNREACH,
[PRC_UNREACH_PROTOCOL] = ECONNREFUSED,
[PRC_UNREACH_PORT] = ECONNREFUSED,
[PRC_UNREACH_SRCFAIL] = EHOSTUNREACH,
[PRC_PARAMPROB] = ENOPROTOOPT,
1993-03-21 12:45:37 +03:00
};
/*
* Forward a packet. If some error occurs return the sender
* an icmp packet. Note we can't always generate a meaningful
* icmp message because icmp doesn't have a large enough repertoire
* of codes and types.
*
* If not forwarding, just drop the packet. This could be confusing
* if ipforwarding was zero but some routing protocol was advancing
* us as a gateway to somewhere. However, we must let the routing
* protocol deal with that.
*
* The srcrt parameter indicates whether the packet is being forwarded
* via a source route.
*/
void
2005-02-04 01:51:50 +03:00
ip_forward(struct mbuf *m, int srcrt)
1993-03-21 12:45:37 +03:00
{
2000-03-30 16:51:13 +04:00
struct ip *ip = mtod(m, struct ip *);
struct rtentry *rt;
int error, type = 0, code = 0, destmtu = 0;
1993-03-21 12:45:37 +03:00
struct mbuf *mcopy;
n_long dest;
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
union {
struct sockaddr dst;
struct sockaddr_in dst4;
} u;
1993-03-21 12:45:37 +03:00
/*
* We are now in the output path.
*/
MCLAIM(m, &ip_tx_mowner);
/*
* Clear any in-bound checksum flags for this packet.
*/
m->m_pkthdr.csum_flags = 0;
dest = 0;
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
if (ipprintfs) {
printf("forward: src %s ", inet_ntoa(ip->ip_src));
printf("dst %s ttl %x\n", inet_ntoa(ip->ip_dst), ip->ip_ttl);
}
1993-03-21 12:45:37 +03:00
#endif
if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
IP_STATINC(IP_STAT_CANTFORWARD);
1993-03-21 12:45:37 +03:00
m_freem(m);
return;
}
if (ip->ip_ttl <= IPTTLDEC) {
icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0);
1993-03-21 12:45:37 +03:00
return;
}
Eliminate address family-specific route caches (struct route, struct route_in6, struct route_iso), replacing all caches with a struct route. The principle benefit of this change is that all of the protocol families can benefit from route cache-invalidation, which is necessary for correct routing. Route-cache invalidation fixes an ancient PR, kern/3508, at long last; it fixes various other PRs, also. Discussions with and ideas from Joerg Sonnenberger influenced this work tremendously. Of course, all design oversights and bugs are mine. DETAILS 1 I added to each address family a pool of sockaddrs. I have introduced routines for allocating, copying, and duplicating, and freeing sockaddrs: struct sockaddr *sockaddr_alloc(sa_family_t af, int flags); struct sockaddr *sockaddr_copy(struct sockaddr *dst, const struct sockaddr *src); struct sockaddr *sockaddr_dup(const struct sockaddr *src, int flags); void sockaddr_free(struct sockaddr *sa); sockaddr_alloc() returns either a sockaddr from the pool belonging to the specified family, or NULL if the pool is exhausted. The returned sockaddr has the right size for that family; sa_family and sa_len fields are initialized to the family and sockaddr length---e.g., sa_family = AF_INET and sa_len = sizeof(struct sockaddr_in). sockaddr_free() puts the given sockaddr back into its family's pool. sockaddr_dup() and sockaddr_copy() work analogously to strdup() and strcpy(), respectively. sockaddr_copy() KASSERTs that the family of the destination and source sockaddrs are alike. The 'flags' argumet for sockaddr_alloc() and sockaddr_dup() is passed directly to pool_get(9). 2 I added routines for initializing sockaddrs in each address family, sockaddr_in_init(), sockaddr_in6_init(), sockaddr_iso_init(), etc. They are fairly self-explanatory. 3 structs route_in6 and route_iso are no more. All protocol families use struct route. I have changed the route cache, 'struct route', so that it does not contain storage space for a sockaddr. Instead, struct route points to a sockaddr coming from the pool the sockaddr belongs to. I added a new method to struct route, rtcache_setdst(), for setting the cache destination: int rtcache_setdst(struct route *, const struct sockaddr *); rtcache_setdst() returns 0 on success, or ENOMEM if no memory is available to create the sockaddr storage. It is now possible for rtcache_getdst() to return NULL if, say, rtcache_setdst() failed. I check the return value for NULL everywhere in the kernel. 4 Each routing domain (struct domain) has a list of live route caches, dom_rtcache. rtflushall(sa_family_t af) looks up the domain indicated by 'af', walks the domain's list of route caches and invalidates each one.
2007-05-03 00:40:22 +04:00
sockaddr_in_init(&u.dst4, &ip->ip_dst, 0);
if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL) {
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NET, dest, 0);
return;
1993-03-21 12:45:37 +03:00
}
/*
* Save at most 68 bytes of the packet in case
1993-03-21 12:45:37 +03:00
* we need to generate an ICMP message to the src.
* Pullup to avoid sharing mbuf cluster between m and mcopy.
1993-03-21 12:45:37 +03:00
*/
mcopy = m_copym(m, 0, imin(ntohs(ip->ip_len), 68), M_DONTWAIT);
if (mcopy)
mcopy = m_pullup(mcopy, ip->ip_hl << 2);
1993-03-21 12:45:37 +03:00
ip->ip_ttl -= IPTTLDEC;
1993-03-21 12:45:37 +03:00
/*
* If forwarding packet using same interface that it came in on,
* perhaps should send a redirect to sender to shortcut a hop.
* Only send redirect if source is sending directly to us,
* and if packet was not source routed (or has any options).
* Also, don't send redirect if forwarding using a default route
* or a route modified by a redirect.
*/
if (rt->rt_ifp == m->m_pkthdr.rcvif &&
(rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
Take steps to hide the radix_node implementation of the forwarding table from the forwarding table's users: Introduce rt_walktree() for walking the routing table and applying a function to each rtentry. Replace most rn_walktree() calls with it. Use rt_getkey()/rt_setkey() to get/set a route's destination. Keep a pointer to the sockaddr key in the rtentry, so that rtentry users do not have to grovel in the radix_node for the key. Add a RTM_GET method to rtrequest. Use that instead of radix_node lookups in, e.g., carp(4). Add sys/net/link_proto.c, which supplies sockaddr routines for link-layer socket addresses (sockaddr_dl). Cosmetic: Constify. KNF. Stop open-coding LIST_FOREACH, TAILQ_FOREACH, et cetera. Use NULL instead of 0 for null pointers. Use __arraycount(). Reduce gratuitous parenthesization. Stop using variadic arguments for rip6_output(), it is unnecessary. Remove the unnecessary rtentry member rt_genmask and the code to maintain it, since nothing actually used it. Make rt_maskedcopy() easier to read by using meaningful variable names. Extract a subroutine intern_netmask() for looking up a netmask in the masks table. Start converting backslash-ridden IPv6 macros in sys/netinet6/in6_var.h into inline subroutines that one can read without special eyeglasses. One functional change: when the kernel serves an RTM_GET, RTM_LOCK, or RTM_CHANGE request, it applies the netmask (if supplied) to a destination before searching for it in the forwarding table. I have changed sys/netinet/ip_carp.c, carp_setroute(), to remove the unlawful radix_node knowledge. Apart from the changes to carp(4), netiso, ATM, and strip(4), I have run the changes on three nodes in my wireless routing testbed, which involves IPv4 + IPv6 dynamic routing acrobatics, and it's working beautifully so far.
2007-07-20 00:48:52 +04:00
!in_nullhost(satocsin(rt_getkey(rt))->sin_addr) &&
1993-03-21 12:45:37 +03:00
ipsendredirects && !srcrt) {
1995-06-04 09:06:49 +04:00
if (rt->rt_ifa &&
(ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_subnetmask) ==
ifatoia(rt->rt_ifa)->ia_subnet) {
if (rt->rt_flags & RTF_GATEWAY)
dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
else
dest = ip->ip_dst.s_addr;
/*
* Router requirements says to only send host
* redirects.
*/
type = ICMP_REDIRECT;
code = ICMP_REDIRECT_HOST;
1993-03-21 12:45:37 +03:00
#ifdef DIAGNOSTIC
if (ipprintfs)
printf("redirect (%d) to %x\n", code,
(u_int32_t)dest);
1993-03-21 12:45:37 +03:00
#endif
}
}
2006-12-06 03:39:56 +03:00
error = ip_output(m, NULL, &ipforward_rt,
(IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)),
(struct ip_moptions *)NULL, (struct socket *)NULL);
1993-03-21 12:45:37 +03:00
if (error)
IP_STATINC(IP_STAT_CANTFORWARD);
1993-03-21 12:45:37 +03:00
else {
uint64_t *ips = IP_STAT_GETREF();
ips[IP_STAT_FORWARD]++;
if (type) {
ips[IP_STAT_REDIRECTSENT]++;
IP_STAT_PUTREF();
} else {
IP_STAT_PUTREF();
if (mcopy) {
#ifdef GATEWAY
if (mcopy->m_flags & M_CANFASTFWD)
ipflow_create(&ipforward_rt, mcopy);
#endif
1993-03-21 12:45:37 +03:00
m_freem(mcopy);
}
1993-03-21 12:45:37 +03:00
return;
}
}
if (mcopy == NULL)
return;
1993-03-21 12:45:37 +03:00
switch (error) {
case 0: /* forwarded, but need redirect */
/* type, code set above */
break;
case ENETUNREACH: /* shouldn't happen, checked above */
case EHOSTUNREACH:
case ENETDOWN:
case EHOSTDOWN:
default:
type = ICMP_UNREACH;
code = ICMP_UNREACH_HOST;
break;
case EMSGSIZE:
type = ICMP_UNREACH;
code = ICMP_UNREACH_NEEDFRAG;
if ((rt = rtcache_validate(&ipforward_rt)) != NULL)
destmtu = rt->rt_ifp->if_mtu;
#if defined(IPSEC) || defined(FAST_IPSEC)
{
/*
* If the packet is routed over IPsec tunnel, tell the
* originator the tunnel MTU.
* tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
* XXX quickhack!!!
*/
struct secpolicy *sp;
int ipsecerror;
size_t ipsechdr;
struct route *ro;
sp = ipsec4_getpolicybyaddr(mcopy,
2003-07-03 09:03:53 +04:00
IPSEC_DIR_OUTBOUND, IP_FORWARDING,
&ipsecerror);
if (sp != NULL) {
/* count IPsec header size */
ipsechdr = ipsec4_hdrsiz(mcopy,
2003-07-03 09:03:53 +04:00
IPSEC_DIR_OUTBOUND, NULL);
/*
* find the correct route for outer IPv4
* header, compute tunnel MTU.
*/
if (sp->req != NULL
&& sp->req->sav != NULL
&& sp->req->sav->sah != NULL) {
ro = &sp->req->sav->sah->sa_route;
rt = rtcache_validate(ro);
if (rt && rt->rt_ifp) {
destmtu =
rt->rt_rmx.rmx_mtu ?
rt->rt_rmx.rmx_mtu :
rt->rt_ifp->if_mtu;
destmtu -= ipsechdr;
}
}
#ifdef IPSEC
key_freesp(sp);
#else
KEY_FREESP(&sp);
#endif
}
}
#endif /*defined(IPSEC) || defined(FAST_IPSEC)*/
IP_STATINC(IP_STAT_CANTFRAG);
1993-03-21 12:45:37 +03:00
break;
case ENOBUFS:
#if 1
/*
* a router should not generate ICMP_SOURCEQUENCH as
* required in RFC1812 Requirements for IP Version 4 Routers.
* source quench could be a big problem under DoS attacks,
* or if the underlying interface is rate-limited.
*/
if (mcopy)
m_freem(mcopy);
return;
#else
1993-03-21 12:45:37 +03:00
type = ICMP_SOURCEQUENCH;
code = 0;
break;
#endif
1993-03-21 12:45:37 +03:00
}
icmp_error(mcopy, type, code, dest, destmtu);
}
void
2005-02-04 01:51:50 +03:00
ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
struct mbuf *m)
{
if (inp->inp_socket->so_options & SO_TIMESTAMP
#ifdef SO_OTIMESTAMP
|| inp->inp_socket->so_options & SO_OTIMESTAMP
#endif
) {
struct timeval tv;
microtime(&tv);
#ifdef SO_OTIMESTAMP
if (inp->inp_socket->so_options & SO_OTIMESTAMP) {
struct timeval50 tv50;
timeval_to_timeval50(&tv, &tv50);
*mp = sbcreatecontrol((void *) &tv50, sizeof(tv50),
SCM_OTIMESTAMP, SOL_SOCKET);
} else
#endif
*mp = sbcreatecontrol((void *) &tv, sizeof(tv),
SCM_TIMESTAMP, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
if (inp->inp_flags & INP_RECVDSTADDR) {
*mp = sbcreatecontrol((void *) &ip->ip_dst,
sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
if (*mp)
mp = &(*mp)->m_next;
}
#ifdef notyet
/*
* XXX
* Moving these out of udp_input() made them even more broken
* than they already were.
* - fenner@parc.xerox.com
*/
/* options were tossed already */
if (inp->inp_flags & INP_RECVOPTS) {
*mp = sbcreatecontrol((void *) opts_deleted_above,
sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
if (*mp)
mp = &(*mp)->m_next;
}
/* ip_srcroute doesn't do what we want here, need to fix */
if (inp->inp_flags & INP_RECVRETOPTS) {
*mp = sbcreatecontrol((void *) ip_srcroute(),
sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
if (*mp)
mp = &(*mp)->m_next;
}
#endif
if (inp->inp_flags & INP_RECVIF) {
struct sockaddr_dl sdl;
sockaddr_dl_init(&sdl, sizeof(sdl),
(m->m_pkthdr.rcvif != NULL)
? m->m_pkthdr.rcvif->if_index
: 0,
0, NULL, 0, NULL, 0);
2007-08-11 02:46:16 +04:00
*mp = sbcreatecontrol(&sdl, sdl.sdl_len, IP_RECVIF, IPPROTO_IP);
if (*mp)
mp = &(*mp)->m_next;
}
}
/*
* sysctl helper routine for net.inet.ip.forwsrcrt.
*/
static int
sysctl_net_inet_ip_forwsrcrt(SYSCTLFN_ARGS)
{
int error, tmp;
struct sysctlnode node;
node = *rnode;
tmp = ip_forwsrcrt;
node.sysctl_data = &tmp;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return (error);
error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FORWSRCRT,
0, NULL, NULL, NULL);
if (error)
return (error);
ip_forwsrcrt = tmp;
return (0);
}
/*
* sysctl helper routine for net.inet.ip.mtudisctimeout. checks the
* range of the new value and tweaks timers if it changes.
*/
static int
sysctl_net_inet_ip_pmtudto(SYSCTLFN_ARGS)
{
int error, tmp;
struct sysctlnode node;
node = *rnode;
tmp = ip_mtudisc_timeout;
node.sysctl_data = &tmp;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return (error);
if (tmp < 0)
return (EINVAL);
mutex_enter(softnet_lock);
ip_mtudisc_timeout = tmp;
rt_timer_queue_change(ip_mtudisc_timeout_q, ip_mtudisc_timeout);
mutex_exit(softnet_lock);
return (0);
}
#ifdef GATEWAY
/*
* sysctl helper routine for net.inet.ip.maxflows.
*/
static int
sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
{
int error;
error = sysctl_lookup(SYSCTLFN_CALL(rnode));
if (error || newp == NULL)
return (error);
mutex_enter(softnet_lock);
KERNEL_LOCK(1, NULL);
2005-02-27 01:45:09 +03:00
ipflow_prune();
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
return (0);
}
static int
sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
{
int error, tmp;
struct sysctlnode node;
node = *rnode;
tmp = ip_hashsize;
node.sysctl_data = &tmp;
error = sysctl_lookup(SYSCTLFN_CALL(&node));
if (error || newp == NULL)
return (error);
if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
/*
* Can only fail due to malloc()
*/
mutex_enter(softnet_lock);
KERNEL_LOCK(1, NULL);
error = ipflow_invalidate_all(tmp);
KERNEL_UNLOCK_ONE(NULL);
mutex_exit(softnet_lock);
} else {
/*
* EINVAL if not a power of 2
*/
error = EINVAL;
}
return error;
}
#endif /* GATEWAY */
2003-11-12 18:00:05 +03:00
static int
sysctl_net_inet_ip_stats(SYSCTLFN_ARGS)
{
return (NETSTAT_SYSCTL(ipstat_percpu, IP_NSTATS));
}
SYSCTL_SETUP(sysctl_net_inet_ip_setup, "sysctl net.inet.ip subtree setup")
{
extern int subnetsarelocal, hostzeroisbroadcast;
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_NODE, "net", NULL,
NULL, 0, NULL, 0,
CTL_NET, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_NODE, "inet",
SYSCTL_DESCR("PF_INET related settings"),
NULL, 0, NULL, 0,
CTL_NET, PF_INET, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_NODE, "ip",
SYSCTL_DESCR("IPv4 related settings"),
NULL, 0, NULL, 0,
CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
2005-02-27 01:45:09 +03:00
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "forwarding",
SYSCTL_DESCR("Enable forwarding of INET datagrams"),
NULL, 0, &ipforwarding, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_FORWARDING, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "redirect",
SYSCTL_DESCR("Enable sending of ICMP redirect messages"),
NULL, 0, &ipsendredirects, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_SENDREDIRECTS, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "ttl",
SYSCTL_DESCR("Default TTL for an INET datagram"),
NULL, 0, &ip_defttl, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_DEFTTL, CTL_EOL);
#ifdef IPCTL_DEFMTU
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT /* |CTLFLAG_READWRITE? */,
CTLTYPE_INT, "mtu",
SYSCTL_DESCR("Default MTA for an INET route"),
NULL, 0, &ip_mtu, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_DEFMTU, CTL_EOL);
#endif /* IPCTL_DEFMTU */
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "forwsrcrt",
SYSCTL_DESCR("Enable forwarding of source-routed "
"datagrams"),
sysctl_net_inet_ip_forwsrcrt, 0, &ip_forwsrcrt, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_FORWSRCRT, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "directed-broadcast",
SYSCTL_DESCR("Enable forwarding of broadcast datagrams"),
NULL, 0, &ip_directedbcast, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_DIRECTEDBCAST, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "allowsrcrt",
SYSCTL_DESCR("Accept source-routed datagrams"),
NULL, 0, &ip_allowsrcrt, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_ALLOWSRCRT, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "subnetsarelocal",
SYSCTL_DESCR("Whether logical subnets are considered "
"local"),
NULL, 0, &subnetsarelocal, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_SUBNETSARELOCAL, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "mtudisc",
SYSCTL_DESCR("Use RFC1191 Path MTU Discovery"),
NULL, 0, &ip_mtudisc, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_MTUDISC, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "anonportmin",
SYSCTL_DESCR("Lowest ephemeral port number to assign"),
sysctl_net_inet_ip_ports, 0, &anonportmin, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_ANONPORTMIN, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "anonportmax",
SYSCTL_DESCR("Highest ephemeral port number to assign"),
sysctl_net_inet_ip_ports, 0, &anonportmax, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_ANONPORTMAX, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "mtudisctimeout",
SYSCTL_DESCR("Lifetime of a Path MTU Discovered route"),
sysctl_net_inet_ip_pmtudto, 0, &ip_mtudisc_timeout, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_MTUDISCTIMEOUT, CTL_EOL);
#ifdef GATEWAY
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "maxflows",
SYSCTL_DESCR("Number of flows for fast forwarding"),
sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_MAXFLOWS, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "hashsize",
SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
CTL_NET, PF_INET, IPPROTO_IP,
CTL_CREATE, CTL_EOL);
#endif /* GATEWAY */
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "hostzerobroadcast",
SYSCTL_DESCR("All zeroes address is broadcast address"),
NULL, 0, &hostzeroisbroadcast, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_HOSTZEROBROADCAST, CTL_EOL);
#if NGIF > 0
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "gifttl",
SYSCTL_DESCR("Default TTL for a gif tunnel datagram"),
NULL, 0, &ip_gif_ttl, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_GIF_TTL, CTL_EOL);
#endif /* NGIF */
#ifndef IPNOPRIVPORTS
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "lowportmin",
SYSCTL_DESCR("Lowest privileged ephemeral port number "
"to assign"),
sysctl_net_inet_ip_ports, 0, &lowportmin, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_LOWPORTMIN, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "lowportmax",
SYSCTL_DESCR("Highest privileged ephemeral port number "
"to assign"),
sysctl_net_inet_ip_ports, 0, &lowportmax, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_LOWPORTMAX, CTL_EOL);
#endif /* IPNOPRIVPORTS */
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "maxfragpackets",
SYSCTL_DESCR("Maximum number of fragments to retain for "
"possible reassembly"),
NULL, 0, &ip_maxfragpackets, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_MAXFRAGPACKETS, CTL_EOL);
#if NGRE > 0
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "grettl",
SYSCTL_DESCR("Default TTL for a gre tunnel datagram"),
NULL, 0, &ip_gre_ttl, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_GRE_TTL, CTL_EOL);
#endif /* NGRE */
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "checkinterface",
SYSCTL_DESCR("Enable receive side of Strong ES model "
"from RFC1122"),
NULL, 0, &ip_checkinterface, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_CHECKINTERFACE, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "random_id",
SYSCTL_DESCR("Assign random ip_id values"),
NULL, 0, &ip_do_randomid, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_RANDOMID, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "do_loopback_cksum",
SYSCTL_DESCR("Perform IP checksum on loopback"),
NULL, 0, &ip_do_loopback_cksum, 0,
CTL_NET, PF_INET, IPPROTO_IP,
IPCTL_LOOPBACKCKSUM, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_STRUCT, "stats",
SYSCTL_DESCR("IP statistics"),
sysctl_net_inet_ip_stats, 0, NULL, 0,
CTL_NET, PF_INET, IPPROTO_IP, IPCTL_STATS,
CTL_EOL);
1993-03-21 12:45:37 +03:00
}
void
ip_statinc(u_int stat)
{
KASSERT(stat < IP_NSTATS);
IP_STATINC(stat);
}