2022-08-29 12:14:02 +03:00
|
|
|
/* $NetBSD: ip_icmp.c,v 1.178 2022/08/29 09:14:02 knakahara Exp $ */
|
1999-07-01 12:12:45 +04:00
|
|
|
|
|
|
|
/*
|
2000-10-18 21:09:14 +04:00
|
|
|
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
|
1998-12-19 05:46:12 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Public Access Networks Corporation ("Panix"). It was developed under
|
|
|
|
* contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon.
|
|
|
|
*
|
2000-10-18 21:09:14 +04:00
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of Zembu Labs, Inc.
|
|
|
|
*
|
1998-12-19 05:46:12 +03:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the project nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-13 10:02:48 +04:00
|
|
|
* Copyright (c) 1982, 1986, 1988, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1994-06-29 10:29:24 +04:00
|
|
|
* @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-13 03:32:34 +03:00
|
|
|
#include <sys/cdefs.h>
|
2022-08-29 12:14:02 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: ip_icmp.c,v 1.178 2022/08/29 09:14:02 knakahara Exp $");
|
2001-11-13 03:32:34 +03:00
|
|
|
|
2015-08-25 01:21:26 +03:00
|
|
|
#ifdef _KERNEL_OPT
|
1999-07-10 02:57:15 +04:00
|
|
|
#include "opt_ipsec.h"
|
2015-08-25 01:21:26 +03:00
|
|
|
#endif
|
1999-07-10 02:57:15 +04:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/socket.h>
|
2017-01-24 10:09:24 +03:00
|
|
|
#include <sys/socketvar.h> /* For softnet_lock */
|
2014-05-19 06:51:24 +04:00
|
|
|
#include <sys/kmem.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/kernel.h>
|
2001-10-30 09:41:09 +03:00
|
|
|
#include <sys/syslog.h>
|
1996-02-14 02:40:59 +03:00
|
|
|
#include <sys/sysctl.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <net/if.h>
|
1994-05-13 10:02:48 +04:00
|
|
|
#include <net/route.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/in_var.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/ip_icmp.h>
|
1996-02-14 02:40:59 +03:00
|
|
|
#include <netinet/ip_var.h>
|
2000-01-25 20:07:56 +03:00
|
|
|
#include <netinet/in_pcb.h>
|
2005-04-29 14:39:09 +04:00
|
|
|
#include <netinet/in_proto.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <netinet/icmp_var.h>
|
2008-04-12 09:58:22 +04:00
|
|
|
#include <netinet/icmp_private.h>
|
2017-02-02 05:52:10 +03:00
|
|
|
#include <netinet/wqinput.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2013-06-05 23:01:26 +04:00
|
|
|
#ifdef IPSEC
|
2003-08-15 07:42:00 +04:00
|
|
|
#include <netipsec/ipsec.h>
|
|
|
|
#include <netipsec/key.h>
|
2018-01-23 10:15:04 +03:00
|
|
|
#endif
|
2003-08-15 07:42:00 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* ICMP routines: error generation, receive packet processing, and
|
|
|
|
* routines to turnaround packets back to the originator, and
|
|
|
|
* host table maintenance routines.
|
|
|
|
*/
|
1994-05-13 10:02:48 +04:00
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
int icmpmaskrepl = 0;
|
|
|
|
int icmpbmcastecho = 0;
|
|
|
|
int icmpreturndatabytes = 8;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2008-04-12 09:58:22 +04:00
|
|
|
percpu_t *icmpstat_percpu;
|
2004-08-03 17:58:59 +04:00
|
|
|
|
2000-10-18 21:09:14 +04:00
|
|
|
/*
|
|
|
|
* List of callbacks to notify when Path MTU changes are made.
|
|
|
|
*/
|
2000-10-19 00:34:00 +04:00
|
|
|
struct icmp_mtudisc_callback {
|
|
|
|
LIST_ENTRY(icmp_mtudisc_callback) mc_list;
|
2005-02-03 00:41:55 +03:00
|
|
|
void (*mc_func)(struct in_addr);
|
2000-10-18 21:09:14 +04:00
|
|
|
};
|
|
|
|
|
2000-10-19 00:34:00 +04:00
|
|
|
LIST_HEAD(, icmp_mtudisc_callback) icmp_mtudisc_callbacks =
|
2000-10-18 21:09:14 +04:00
|
|
|
LIST_HEAD_INITIALIZER(&icmp_mtudisc_callbacks);
|
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
/* unused... */
|
|
|
|
u_int ip_next_mtu(u_int, int);
|
1999-07-01 12:12:45 +04:00
|
|
|
|
2022-08-29 12:14:02 +03:00
|
|
|
bool icmp_dynamic_rt_msg = false;
|
|
|
|
|
2018-02-05 11:38:06 +03:00
|
|
|
static int icmperrppslim = 100; /* 100pps */
|
2000-07-10 13:31:29 +04:00
|
|
|
static int icmperrpps_count = 0;
|
|
|
|
static struct timeval icmperrppslim_last;
|
2001-10-30 09:41:09 +03:00
|
|
|
static int icmp_rediraccept = 1;
|
2002-06-13 20:25:54 +04:00
|
|
|
static int icmp_redirtimeout = 600;
|
2001-10-30 09:41:09 +03:00
|
|
|
static struct rttimer_queue *icmp_redirect_timeout_q = NULL;
|
2000-02-15 07:03:49 +03:00
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
/* Protect mtudisc and redirect stuff */
|
2017-02-13 10:18:20 +03:00
|
|
|
static kmutex_t icmp_mtx __cacheline_aligned;
|
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
static void icmp_send(struct mbuf *, struct mbuf *);
|
2005-02-03 00:41:55 +03:00
|
|
|
static void icmp_mtudisc_timeout(struct rtentry *, struct rttimer *);
|
|
|
|
static void icmp_redirect_timeout(struct rtentry *, struct rttimer *);
|
1997-10-18 02:12:14 +04:00
|
|
|
|
2009-09-16 19:23:04 +04:00
|
|
|
static void sysctl_netinet_icmp_setup(struct sysctllog **);
|
2001-10-30 09:41:09 +03:00
|
|
|
|
2017-02-02 05:52:10 +03:00
|
|
|
/* workqueue-based pr_input */
|
|
|
|
static struct wqinput *icmp_wqinput;
|
|
|
|
static void _icmp_input(struct mbuf *, int, int);
|
|
|
|
|
2001-10-30 09:41:09 +03:00
|
|
|
void
|
2005-02-04 01:51:50 +03:00
|
|
|
icmp_init(void)
|
2001-10-30 09:41:09 +03:00
|
|
|
{
|
2009-09-16 19:23:04 +04:00
|
|
|
|
|
|
|
sysctl_netinet_icmp_setup(NULL);
|
|
|
|
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_init(&icmp_mtx, MUTEX_DEFAULT, IPL_NONE);
|
2002-06-09 20:33:36 +04:00
|
|
|
/*
|
|
|
|
* This is only useful if the user initializes redirtimeout to
|
2001-10-30 09:41:09 +03:00
|
|
|
* something other than zero.
|
|
|
|
*/
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
2017-03-06 10:31:15 +03:00
|
|
|
icmp_redirect_timeout_q = rt_timer_queue_create(icmp_redirtimeout);
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_exit(&icmp_mtx);
|
2008-04-12 09:58:22 +04:00
|
|
|
|
|
|
|
icmpstat_percpu = percpu_alloc(sizeof(uint64_t) * ICMP_NSTATS);
|
2017-02-02 05:52:10 +03:00
|
|
|
icmp_wqinput = wqinput_create("icmp", _icmp_input);
|
2001-10-30 09:41:09 +03:00
|
|
|
}
|
|
|
|
|
2017-02-17 07:32:10 +03:00
|
|
|
void
|
|
|
|
icmp_mtudisc_lock(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
mutex_enter(&icmp_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
icmp_mtudisc_unlock(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
mutex_exit(&icmp_mtx);
|
|
|
|
}
|
|
|
|
|
2000-10-18 21:09:14 +04:00
|
|
|
/*
|
|
|
|
* Register a Path MTU Discovery callback.
|
|
|
|
*/
|
|
|
|
void
|
2005-02-04 01:51:50 +03:00
|
|
|
icmp_mtudisc_callback_register(void (*func)(struct in_addr))
|
2000-10-18 21:09:14 +04:00
|
|
|
{
|
2017-02-13 10:18:20 +03:00
|
|
|
struct icmp_mtudisc_callback *mc, *new;
|
|
|
|
|
|
|
|
new = kmem_alloc(sizeof(*mc), KM_SLEEP);
|
2000-10-18 21:09:14 +04:00
|
|
|
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
2000-10-18 21:09:14 +04:00
|
|
|
for (mc = LIST_FIRST(&icmp_mtudisc_callbacks); mc != NULL;
|
|
|
|
mc = LIST_NEXT(mc, mc_list)) {
|
2017-02-13 10:18:20 +03:00
|
|
|
if (mc->mc_func == func) {
|
|
|
|
mutex_exit(&icmp_mtx);
|
|
|
|
kmem_free(new, sizeof(*mc));
|
2000-10-18 21:09:14 +04:00
|
|
|
return;
|
2017-02-13 10:18:20 +03:00
|
|
|
}
|
2000-10-18 21:09:14 +04:00
|
|
|
}
|
|
|
|
|
2017-02-13 10:18:20 +03:00
|
|
|
new->mc_func = func;
|
|
|
|
LIST_INSERT_HEAD(&icmp_mtudisc_callbacks, new, mc_list);
|
|
|
|
mutex_exit(&icmp_mtx);
|
2000-10-18 21:09:14 +04:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2018-01-19 15:50:27 +03:00
|
|
|
* Generate an error packet of type error in response to a bad IP packet. 'n'
|
|
|
|
* contains this packet. We create 'm' and send it.
|
|
|
|
*
|
|
|
|
* As we are not required to return everything we have, we return whatever
|
|
|
|
* we can return at ease.
|
|
|
|
*
|
|
|
|
* Note that ICMP datagrams longer than 576 octets are out of spec according
|
|
|
|
* to RFC1812; the limit on icmpreturndatabytes will keep things below that
|
|
|
|
* limit.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
1994-01-09 02:50:41 +03:00
|
|
|
void
|
2018-01-19 15:50:27 +03:00
|
|
|
icmp_error(struct mbuf *n, int type, int code, n_long dest, int destmtu)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct ip *oip = mtod(n, struct ip *), *nip;
|
2018-01-19 15:50:27 +03:00
|
|
|
const unsigned oiphlen = oip->ip_hl << 2;
|
2000-03-30 16:51:13 +04:00
|
|
|
struct icmp *icp;
|
|
|
|
struct mbuf *m;
|
2006-07-10 19:35:39 +04:00
|
|
|
struct m_tag *mtag;
|
2018-01-19 16:17:29 +03:00
|
|
|
unsigned datalen, mblen;
|
|
|
|
int totlen;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if (type != ICMP_REDIRECT)
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_ERROR);
|
2018-01-19 15:50:27 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2018-01-19 15:50:27 +03:00
|
|
|
* Don't send error if:
|
|
|
|
* - The original packet was encrypted.
|
|
|
|
* - The packet is multicast or broadcast.
|
|
|
|
* - The packet is not the first fragment of the message.
|
|
|
|
* - The packet is an ICMP message with an unknown type.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2000-02-24 12:54:49 +03:00
|
|
|
if (n->m_flags & M_DECRYPTED)
|
|
|
|
goto freeit;
|
2018-01-19 15:50:27 +03:00
|
|
|
if (n->m_flags & (M_BCAST|M_MCAST))
|
|
|
|
goto freeit;
|
2002-08-14 04:23:27 +04:00
|
|
|
if (oip->ip_off &~ htons(IP_MF|IP_DF))
|
1993-03-21 12:45:37 +03:00
|
|
|
goto freeit;
|
|
|
|
if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT &&
|
2018-01-19 15:50:27 +03:00
|
|
|
n->m_len >= oiphlen + ICMP_MINLEN) {
|
|
|
|
struct icmp *oicp = (struct icmp *)((char *)oip + oiphlen);
|
|
|
|
if (!ICMP_INFOTYPE(oicp->icmp_type)) {
|
|
|
|
ICMP_STATINC(ICMP_STAT_OLDICMP);
|
|
|
|
goto freeit;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2000-02-15 07:03:49 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2000-02-15 07:03:49 +03:00
|
|
|
* First, do a rate limitation check.
|
|
|
|
*/
|
|
|
|
if (icmp_ratelimit(&oip->ip_src, type, code)) {
|
|
|
|
/* XXX stat */
|
|
|
|
goto freeit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-01-19 15:50:27 +03:00
|
|
|
* Compute the number of bytes we will put in 'icmp_ip'. Truncate
|
|
|
|
* it to the size of the mbuf, if it's too big.
|
2000-07-24 07:32:31 +04:00
|
|
|
*/
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
datalen = oiphlen + uimin(icmpreturndatabytes,
|
2018-01-19 15:50:27 +03:00
|
|
|
ntohs(oip->ip_len) - oiphlen);
|
2000-07-24 07:32:31 +04:00
|
|
|
mblen = 0;
|
2018-01-19 15:50:27 +03:00
|
|
|
for (m = n; m && (mblen < datalen); m = m->m_next)
|
2000-07-24 07:32:31 +04:00
|
|
|
mblen += m->m_len;
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
datalen = uimin(mblen, datalen);
|
2000-07-24 07:32:31 +04:00
|
|
|
|
|
|
|
/*
|
2018-01-19 15:50:27 +03:00
|
|
|
* Compute the total length of the new packet. Truncate it if it's
|
|
|
|
* bigger than the size of a cluster.
|
2000-07-24 07:32:31 +04:00
|
|
|
*/
|
2018-01-19 16:17:29 +03:00
|
|
|
CTASSERT(ICMP_MINLEN + sizeof(struct ip) <= MCLBYTES);
|
|
|
|
totlen = sizeof(struct ip) + ICMP_MINLEN + datalen;
|
|
|
|
if (totlen > MCLBYTES) {
|
|
|
|
datalen = MCLBYTES - ICMP_MINLEN - sizeof(struct ip);
|
|
|
|
totlen = MCLBYTES;
|
|
|
|
}
|
2000-07-24 07:32:31 +04:00
|
|
|
|
2018-01-19 15:50:27 +03:00
|
|
|
/*
|
|
|
|
* Allocate the mbuf for the new packet.
|
|
|
|
*/
|
1993-03-21 12:45:37 +03:00
|
|
|
m = m_gethdr(M_DONTWAIT, MT_HEADER);
|
2018-01-19 15:50:27 +03:00
|
|
|
if (m && (totlen > MHLEN)) {
|
2000-07-24 07:32:31 +04:00
|
|
|
MCLGET(m, M_DONTWAIT);
|
|
|
|
if ((m->m_flags & M_EXT) == 0) {
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
}
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
if (m == NULL)
|
|
|
|
goto freeit;
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(m, n->m_owner);
|
2018-01-19 15:50:27 +03:00
|
|
|
m->m_len = totlen;
|
2018-01-19 16:17:29 +03:00
|
|
|
m->m_pkthdr.len = m->m_len;
|
|
|
|
m_copy_rcvif(m, n);
|
2018-01-19 15:50:27 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if ((u_int)type > ICMP_MAXTYPE)
|
|
|
|
panic("icmp_error");
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_OUTHIST + type);
|
2018-01-19 15:50:27 +03:00
|
|
|
|
2018-01-22 09:56:25 +03:00
|
|
|
if ((m->m_flags & M_EXT) == 0)
|
2018-12-22 17:28:56 +03:00
|
|
|
m_align(m, m->m_len);
|
2018-01-22 09:56:25 +03:00
|
|
|
|
2018-01-19 16:17:29 +03:00
|
|
|
/*
|
|
|
|
* Get pointers on the IP header and the ICMP header.
|
|
|
|
*/
|
|
|
|
nip = mtod(m, struct ip *);
|
|
|
|
icp = (struct icmp *)(nip + 1);
|
|
|
|
|
2018-01-19 15:50:27 +03:00
|
|
|
/*
|
|
|
|
* Fill in the fields of the ICMP header: icmp_type, icmp_code
|
|
|
|
* and icmp_ip. icmp_cksum gets filled later.
|
|
|
|
*/
|
1993-03-21 12:45:37 +03:00
|
|
|
icp->icmp_type = type;
|
2018-01-19 15:50:27 +03:00
|
|
|
if (type == ICMP_REDIRECT) {
|
1994-05-13 10:02:48 +04:00
|
|
|
icp->icmp_gwaddr.s_addr = dest;
|
2018-01-19 15:50:27 +03:00
|
|
|
} else {
|
1993-03-21 12:45:37 +03:00
|
|
|
icp->icmp_void = 0;
|
2002-06-09 20:33:36 +04:00
|
|
|
/*
|
1994-05-13 10:02:48 +04:00
|
|
|
* The following assignments assume an overlay with the
|
|
|
|
* zeroed icmp_void field.
|
|
|
|
*/
|
|
|
|
if (type == ICMP_PARAMPROB) {
|
|
|
|
icp->icmp_pptr = code;
|
|
|
|
code = 0;
|
|
|
|
} else if (type == ICMP_UNREACH &&
|
2005-10-23 22:38:53 +04:00
|
|
|
code == ICMP_UNREACH_NEEDFRAG && destmtu)
|
|
|
|
icp->icmp_nextmtu = htons(destmtu);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
icp->icmp_code = code;
|
2018-01-19 15:50:27 +03:00
|
|
|
m_copydata(n, 0, datalen, (void *)&icp->icmp_ip);
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2018-01-19 15:50:27 +03:00
|
|
|
/*
|
|
|
|
* Now, copy the old IP header (without options) in front of the
|
|
|
|
* ICMP message. The src/dst fields will be swapped in icmp_reflect.
|
|
|
|
*/
|
2000-06-10 16:39:19 +04:00
|
|
|
/* ip_v set in ip_output */
|
1993-03-21 12:45:37 +03:00
|
|
|
nip->ip_hl = sizeof(struct ip) >> 2;
|
1994-05-13 10:02:48 +04:00
|
|
|
nip->ip_tos = 0;
|
2002-08-14 04:23:27 +04:00
|
|
|
nip->ip_len = htons(m->m_len);
|
2000-06-10 16:39:19 +04:00
|
|
|
/* ip_id set in ip_output */
|
2002-08-14 04:23:27 +04:00
|
|
|
nip->ip_off = htons(0);
|
2000-06-10 16:39:19 +04:00
|
|
|
/* ip_ttl set in icmp_reflect */
|
|
|
|
nip->ip_p = IPPROTO_ICMP;
|
|
|
|
nip->ip_src = oip->ip_src;
|
|
|
|
nip->ip_dst = oip->ip_dst;
|
2008-06-18 13:06:25 +04:00
|
|
|
/* move PF m_tag to new packet, if it exists */
|
2018-11-15 13:23:55 +03:00
|
|
|
mtag = m_tag_find(n, PACKET_TAG_PF);
|
2006-07-10 19:35:39 +04:00
|
|
|
if (mtag != NULL) {
|
|
|
|
m_tag_unlink(n, mtag);
|
|
|
|
m_tag_prepend(m, mtag);
|
|
|
|
}
|
2018-01-19 15:50:27 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
icmp_reflect(m);
|
|
|
|
|
|
|
|
freeit:
|
|
|
|
m_freem(n);
|
|
|
|
}
|
|
|
|
|
2006-08-30 22:53:04 +04:00
|
|
|
struct sockaddr_in icmpsrc = {
|
2018-01-23 10:33:49 +03:00
|
|
|
.sin_len = sizeof(struct sockaddr_in),
|
2006-08-30 22:53:04 +04:00
|
|
|
.sin_family = AF_INET,
|
|
|
|
};
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a received ICMP message.
|
|
|
|
*/
|
2017-02-02 05:52:10 +03:00
|
|
|
static void
|
|
|
|
_icmp_input(struct mbuf *m, int hlen, int proto)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct icmp *icp;
|
|
|
|
struct ip *ip = mtod(m, struct ip *);
|
1999-01-20 02:03:20 +03:00
|
|
|
int icmplen;
|
2000-03-30 16:51:13 +04:00
|
|
|
int i;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct in_ifaddr *ia;
|
KNF: de-__P, bzero -> memset, bcmp -> memcmp. Remove extraneous
parentheses in return statements.
Cosmetic: don't open-code TAILQ_FOREACH().
Cosmetic: change types of variables to avoid oodles of casts: in
in6_src.c, avoid casts by changing several route_in6 pointers
to struct route pointers. Remove unnecessary casts to caddr_t
elsewhere.
Pave the way for eliminating address family-specific route caches:
soon, struct route will not embed a sockaddr, but it will hold
a reference to an external sockaddr, instead. We will set the
destination sockaddr using rtcache_setdst(). (I created a stub
for it, but it isn't used anywhere, yet.) rtcache_free() will
free the sockaddr. I have extracted from rtcache_free() a helper
subroutine, rtcache_clear(). rtcache_clear() will "forget" a
cached route, but it will not forget the destination by releasing
the sockaddr. I use rtcache_clear() instead of rtcache_free()
in rtcache_update(), because rtcache_update() is not supposed
to forget the destination.
Constify:
1 Introduce const accessor for route->ro_dst, rtcache_getdst().
2 Constify the 'dst' argument to ifnet->if_output(). This
led me to constify a lot of code called by output routines.
3 Constify the sockaddr argument to protosw->pr_ctlinput. This
led me to constify a lot of code called by ctlinput routines.
4 Introduce const macros for converting from a generic sockaddr
to family-specific sockaddrs, e.g., sockaddr_in: satocsin6,
satocsin, et cetera.
2007-02-18 01:34:07 +03:00
|
|
|
void *(*ctlfunc)(int, const struct sockaddr *, void *);
|
1994-05-13 10:02:48 +04:00
|
|
|
int code;
|
2001-10-30 09:41:09 +03:00
|
|
|
struct rtentry *rt;
|
2018-01-23 10:33:49 +03:00
|
|
|
struct sockaddr_in icmpdst = {
|
|
|
|
.sin_len = sizeof(struct sockaddr_in),
|
|
|
|
.sin_family = AF_INET,
|
|
|
|
};
|
|
|
|
struct sockaddr_in icmpgw = {
|
|
|
|
.sin_len = sizeof(struct sockaddr_in),
|
|
|
|
.sin_family = AF_INET,
|
|
|
|
};
|
1996-02-14 02:40:59 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Locate icmp structure in mbuf, and check
|
|
|
|
* that not corrupted and of at least minimum length.
|
|
|
|
*/
|
2002-08-14 04:23:27 +04:00
|
|
|
icmplen = ntohs(ip->ip_len) - hlen;
|
1993-03-21 12:45:37 +03:00
|
|
|
if (icmplen < ICMP_MINLEN) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_TOOSHORT);
|
1993-03-21 12:45:37 +03:00
|
|
|
goto freeit;
|
|
|
|
}
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
i = hlen + uimin(icmplen, ICMP_ADVLENMIN);
|
2018-04-26 10:28:21 +03:00
|
|
|
if (M_UNWRITABLE(m, i) && (m = m_pullup(m, i)) == NULL) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_TOOSHORT);
|
1993-03-21 12:45:37 +03:00
|
|
|
return;
|
|
|
|
}
|
1994-01-09 00:21:28 +03:00
|
|
|
ip = mtod(m, struct ip *);
|
1993-03-21 12:45:37 +03:00
|
|
|
m->m_len -= hlen;
|
|
|
|
m->m_data += hlen;
|
|
|
|
icp = mtod(m, struct icmp *);
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
/* Don't need to assert alignment, here. */
|
1993-03-21 12:45:37 +03:00
|
|
|
if (in_cksum(m, icmplen)) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_CHECKSUM);
|
1993-03-21 12:45:37 +03:00
|
|
|
goto freeit;
|
|
|
|
}
|
|
|
|
m->m_len += hlen;
|
|
|
|
m->m_data -= hlen;
|
|
|
|
|
|
|
|
if (icp->icmp_type > ICMP_MAXTYPE)
|
|
|
|
goto raw;
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_INHIST + icp->icmp_type);
|
1993-03-21 12:45:37 +03:00
|
|
|
code = icp->icmp_code;
|
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
switch (icp->icmp_type) {
|
1993-03-21 12:45:37 +03:00
|
|
|
case ICMP_UNREACH:
|
1994-05-13 10:02:48 +04:00
|
|
|
switch (code) {
|
2013-03-25 22:43:30 +04:00
|
|
|
case ICMP_UNREACH_PROTOCOL:
|
|
|
|
code = PRC_UNREACH_PROTOCOL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_UNREACH_PORT:
|
|
|
|
code = PRC_UNREACH_PORT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_UNREACH_SRCFAIL:
|
|
|
|
code = PRC_UNREACH_SRCFAIL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_UNREACH_NEEDFRAG:
|
|
|
|
code = PRC_MSGSIZE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_UNREACH_NET:
|
|
|
|
case ICMP_UNREACH_NET_UNKNOWN:
|
|
|
|
case ICMP_UNREACH_NET_PROHIB:
|
|
|
|
case ICMP_UNREACH_TOSNET:
|
|
|
|
code = PRC_UNREACH_NET;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_UNREACH_HOST:
|
|
|
|
case ICMP_UNREACH_HOST_UNKNOWN:
|
|
|
|
case ICMP_UNREACH_ISOLATED:
|
|
|
|
case ICMP_UNREACH_HOST_PROHIB:
|
|
|
|
case ICMP_UNREACH_TOSHOST:
|
|
|
|
case ICMP_UNREACH_ADMIN_PROHIBIT:
|
|
|
|
case ICMP_UNREACH_HOST_PREC:
|
|
|
|
case ICMP_UNREACH_PREC_CUTOFF:
|
|
|
|
code = PRC_UNREACH_HOST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto badcode;
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
goto deliver;
|
|
|
|
|
|
|
|
case ICMP_TIMXCEED:
|
|
|
|
if (code > 1)
|
|
|
|
goto badcode;
|
|
|
|
code += PRC_TIMXCEED_INTRANS;
|
|
|
|
goto deliver;
|
|
|
|
|
|
|
|
case ICMP_PARAMPROB:
|
1994-05-13 10:02:48 +04:00
|
|
|
if (code > 1)
|
1993-03-21 12:45:37 +03:00
|
|
|
goto badcode;
|
|
|
|
code = PRC_PARAMPROB;
|
|
|
|
goto deliver;
|
|
|
|
|
|
|
|
case ICMP_SOURCEQUENCH:
|
|
|
|
if (code)
|
|
|
|
goto badcode;
|
|
|
|
code = PRC_QUENCH;
|
1996-09-09 18:51:07 +04:00
|
|
|
goto deliver;
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
deliver:
|
|
|
|
/*
|
|
|
|
* Problem with datagram; advise higher level routines.
|
|
|
|
*/
|
|
|
|
if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
|
|
|
|
icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_BADLEN);
|
1993-03-21 12:45:37 +03:00
|
|
|
goto freeit;
|
|
|
|
}
|
2018-02-08 12:32:02 +03:00
|
|
|
if (m->m_len < hlen + ICMP_ADVLEN(icp)) {
|
|
|
|
m = m_pullup(m, hlen + ICMP_ADVLEN(icp));
|
|
|
|
if (m == NULL)
|
|
|
|
goto freeit;
|
|
|
|
}
|
|
|
|
ip = mtod(m, struct ip *);
|
|
|
|
icp = (struct icmp *)(mtod(m, uint8_t *) + hlen);
|
|
|
|
|
1995-06-02 01:35:34 +04:00
|
|
|
if (IN_MULTICAST(icp->icmp_ip.ip_dst.s_addr))
|
1995-06-01 01:50:34 +04:00
|
|
|
goto badcode;
|
2018-05-11 17:38:28 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
|
2016-01-21 18:41:29 +03:00
|
|
|
ctlfunc = inetsw[ip_protox[icp->icmp_ip.ip_p]].pr_ctlinput;
|
1996-02-14 02:40:59 +03:00
|
|
|
if (ctlfunc)
|
2000-10-18 21:09:14 +04:00
|
|
|
(void) (*ctlfunc)(code, sintosa(&icmpsrc),
|
|
|
|
&icp->icmp_ip);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
badcode:
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_BADCODE);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMP_ECHO:
|
2009-12-07 21:47:24 +03:00
|
|
|
if (!icmpbmcastecho &&
|
|
|
|
(m->m_flags & (M_MCAST | M_BCAST)) != 0) {
|
|
|
|
ICMP_STATINC(ICMP_STAT_BMCASTECHO);
|
|
|
|
break;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
icp->icmp_type = ICMP_ECHOREPLY;
|
|
|
|
goto reflect;
|
|
|
|
|
|
|
|
case ICMP_TSTAMP:
|
|
|
|
if (icmplen < ICMP_TSLEN) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_BADLEN);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
}
|
2009-12-07 21:47:24 +03:00
|
|
|
if (!icmpbmcastecho &&
|
|
|
|
(m->m_flags & (M_MCAST | M_BCAST)) != 0) {
|
|
|
|
ICMP_STATINC(ICMP_STAT_BMCASTTSTAMP);
|
|
|
|
break;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
icp->icmp_type = ICMP_TSTAMPREPLY;
|
|
|
|
icp->icmp_rtime = iptime();
|
|
|
|
icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */
|
|
|
|
goto reflect;
|
2002-06-09 20:33:36 +04:00
|
|
|
|
2016-06-10 16:31:43 +03:00
|
|
|
case ICMP_MASKREQ: {
|
|
|
|
struct ifnet *rcvif;
|
2016-08-01 06:15:30 +03:00
|
|
|
int s, ss;
|
2017-02-07 05:38:08 +03:00
|
|
|
struct ifaddr *ifa = NULL;
|
2016-06-10 16:31:43 +03:00
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
if (icmpmaskrepl == 0)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* We are not able to respond with all ones broadcast
|
|
|
|
* unless we receive it over a point-to-point interface.
|
|
|
|
*/
|
1997-06-24 05:26:19 +04:00
|
|
|
if (icmplen < ICMP_MASKLEN) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_BADLEN);
|
1994-05-13 10:02:48 +04:00
|
|
|
break;
|
1997-06-24 05:26:19 +04:00
|
|
|
}
|
1995-06-02 01:46:27 +04:00
|
|
|
if (ip->ip_dst.s_addr == INADDR_BROADCAST ||
|
1996-09-09 18:51:07 +04:00
|
|
|
in_nullhost(ip->ip_dst))
|
1994-05-13 10:02:48 +04:00
|
|
|
icmpdst.sin_addr = ip->ip_src;
|
1995-06-02 01:46:27 +04:00
|
|
|
else
|
1994-05-13 10:02:48 +04:00
|
|
|
icmpdst.sin_addr = ip->ip_dst;
|
2016-08-01 06:15:30 +03:00
|
|
|
ss = pserialize_read_enter();
|
2016-06-10 16:31:43 +03:00
|
|
|
rcvif = m_get_rcvif(m, &s);
|
2017-02-07 05:38:08 +03:00
|
|
|
if (__predict_true(rcvif != NULL))
|
|
|
|
ifa = ifaof_ifpforaddr(sintosa(&icmpdst), rcvif);
|
2016-06-10 16:31:43 +03:00
|
|
|
m_put_rcvif(rcvif, &s);
|
2016-08-01 06:15:30 +03:00
|
|
|
if (ifa == NULL) {
|
|
|
|
pserialize_read_exit(ss);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
2016-08-01 06:15:30 +03:00
|
|
|
}
|
|
|
|
ia = ifatoia(ifa);
|
1993-03-21 12:45:37 +03:00
|
|
|
icp->icmp_type = ICMP_MASKREPLY;
|
|
|
|
icp->icmp_mask = ia->ia_sockmask.sin_addr.s_addr;
|
1996-09-09 18:51:07 +04:00
|
|
|
if (in_nullhost(ip->ip_src)) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (ia->ia_ifp->if_flags & IFF_BROADCAST)
|
1995-06-04 09:58:20 +04:00
|
|
|
ip->ip_src = ia->ia_broadaddr.sin_addr;
|
1993-03-21 12:45:37 +03:00
|
|
|
else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT)
|
1995-06-04 09:58:20 +04:00
|
|
|
ip->ip_src = ia->ia_dstaddr.sin_addr;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(ss);
|
1993-03-21 12:45:37 +03:00
|
|
|
reflect:
|
2008-04-12 09:58:22 +04:00
|
|
|
{
|
|
|
|
uint64_t *icps = percpu_getref(icmpstat_percpu);
|
|
|
|
icps[ICMP_STAT_REFLECT]++;
|
|
|
|
icps[ICMP_STAT_OUTHIST + icp->icmp_type]++;
|
|
|
|
percpu_putref(icmpstat_percpu);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
icmp_reflect(m);
|
|
|
|
return;
|
2016-06-10 16:31:43 +03:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
case ICMP_REDIRECT:
|
1994-05-13 10:02:48 +04:00
|
|
|
if (code > 3)
|
|
|
|
goto badcode;
|
2001-10-30 09:41:09 +03:00
|
|
|
if (icmp_rediraccept == 0)
|
|
|
|
goto freeit;
|
1994-05-13 10:02:48 +04:00
|
|
|
if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
|
|
|
|
icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_BADLEN);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Short circuit routing redirects to force
|
|
|
|
* immediate change in the kernel's routing
|
|
|
|
* tables. The message is also handed to anyone
|
|
|
|
* listening on a raw socket (e.g. the routing
|
|
|
|
* daemon for use in updating its tables).
|
|
|
|
*/
|
|
|
|
icmpgw.sin_addr = ip->ip_src;
|
|
|
|
icmpdst.sin_addr = icp->icmp_gwaddr;
|
1994-05-13 10:02:48 +04:00
|
|
|
icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
|
2001-10-30 09:41:09 +03:00
|
|
|
rt = NULL;
|
1995-06-04 09:06:49 +04:00
|
|
|
rtredirect(sintosa(&icmpsrc), sintosa(&icmpdst),
|
2007-08-27 09:39:44 +04:00
|
|
|
NULL, RTF_GATEWAY | RTF_HOST, sintosa(&icmpgw), &rt);
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
2001-10-30 09:41:09 +03:00
|
|
|
if (rt != NULL && icmp_redirtimeout != 0) {
|
2002-06-09 20:33:36 +04:00
|
|
|
i = rt_timer_add(rt, icmp_redirect_timeout,
|
2001-10-30 09:41:09 +03:00
|
|
|
icmp_redirect_timeout_q);
|
2014-12-02 23:25:47 +03:00
|
|
|
if (i) {
|
|
|
|
char buf[INET_ADDRSTRLEN];
|
2001-10-30 09:41:09 +03:00
|
|
|
log(LOG_ERR, "ICMP: redirect failed to "
|
2014-12-02 23:25:47 +03:00
|
|
|
"register timeout for route to %s, "
|
2002-06-09 20:33:36 +04:00
|
|
|
"code %d\n",
|
2014-12-02 23:25:47 +03:00
|
|
|
IN_PRINT(buf, &icp->icmp_ip.ip_dst), i);
|
|
|
|
}
|
2001-10-30 09:41:09 +03:00
|
|
|
}
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_exit(&icmp_mtx);
|
2001-10-30 09:41:09 +03:00
|
|
|
if (rt != NULL)
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
2001-10-30 09:41:09 +03:00
|
|
|
|
1995-06-04 09:06:49 +04:00
|
|
|
pfctlinput(PRC_REDIRECT_HOST, sintosa(&icmpsrc));
|
2013-06-05 23:01:26 +04:00
|
|
|
#if defined(IPSEC)
|
2014-05-30 05:39:03 +04:00
|
|
|
if (ipsec_used)
|
|
|
|
key_sa_routechange((struct sockaddr *)&icmpsrc);
|
1999-07-01 12:12:45 +04:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No kernel processing for the following;
|
|
|
|
* just fall through to send to raw listener.
|
|
|
|
*/
|
|
|
|
case ICMP_ECHOREPLY:
|
1994-05-13 10:02:48 +04:00
|
|
|
case ICMP_ROUTERADVERT:
|
|
|
|
case ICMP_ROUTERSOLICIT:
|
1993-03-21 12:45:37 +03:00
|
|
|
case ICMP_TSTAMPREPLY:
|
|
|
|
case ICMP_IREQREPLY:
|
|
|
|
case ICMP_MASKREPLY:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw:
|
2018-06-21 13:37:49 +03:00
|
|
|
/*
|
|
|
|
* Currently, pim_input() is always called holding softnet_lock
|
|
|
|
* by ipintr()(!NET_MPSAFE) or PR_INPUT_WRAP()(NET_MPSAFE).
|
|
|
|
*/
|
|
|
|
KASSERT(mutex_owned(softnet_lock));
|
1999-07-01 12:12:45 +04:00
|
|
|
rip_input(m, hlen, proto);
|
1993-03-21 12:45:37 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
freeit:
|
|
|
|
m_freem(m);
|
1999-07-01 12:12:45 +04:00
|
|
|
return;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2017-02-02 05:52:10 +03:00
|
|
|
void
|
2018-09-14 08:09:51 +03:00
|
|
|
icmp_input(struct mbuf *m, int off, int proto)
|
2017-02-02 05:52:10 +03:00
|
|
|
{
|
2018-09-14 08:09:51 +03:00
|
|
|
wqinput_input(icmp_wqinput, m, off, proto);
|
2017-02-02 05:52:10 +03:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Reflect the ip packet back to the source
|
|
|
|
*/
|
1994-01-09 02:50:41 +03:00
|
|
|
void
|
2005-02-04 01:51:50 +03:00
|
|
|
icmp_reflect(struct mbuf *m)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct ip *ip = mtod(m, struct ip *);
|
|
|
|
struct in_ifaddr *ia;
|
|
|
|
struct ifaddr *ifa;
|
2015-05-09 21:46:25 +03:00
|
|
|
struct sockaddr_in *sin;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct in_addr t;
|
2015-04-24 05:56:51 +03:00
|
|
|
struct mbuf *opts = NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
int optlen = (ip->ip_hl << 2) - sizeof(struct ip);
|
2016-06-10 16:31:43 +03:00
|
|
|
struct ifnet *rcvif;
|
2016-08-01 06:15:30 +03:00
|
|
|
struct psref psref, psref_ia;
|
|
|
|
int s;
|
|
|
|
int bound;
|
|
|
|
|
|
|
|
bound = curlwp_bind();
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
if (!in_canforward(ip->ip_src) &&
|
1995-06-02 01:35:34 +04:00
|
|
|
((ip->ip_src.s_addr & IN_CLASSA_NET) !=
|
|
|
|
htonl(IN_LOOPBACKNET << IN_CLASSA_NSHIFT))) {
|
1994-05-13 10:02:48 +04:00
|
|
|
m_freem(m); /* Bad return address */
|
1995-05-15 05:25:21 +04:00
|
|
|
goto done; /* ip_output() will check for broadcast */
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
t = ip->ip_dst;
|
|
|
|
ip->ip_dst = ip->ip_src;
|
2018-01-23 10:15:04 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2000-01-25 20:07:56 +03:00
|
|
|
* If the incoming packet was addressed directly to us, use
|
|
|
|
* dst as the src for the reply. Otherwise (broadcast or
|
|
|
|
* anonymous), use an address which corresponds to the
|
|
|
|
* incoming interface, with a preference for the address which
|
|
|
|
* corresponds to the route to the destination of the ICMP.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2000-01-25 20:07:56 +03:00
|
|
|
|
|
|
|
/* Look for packet addressed to us */
|
2016-08-01 06:15:30 +03:00
|
|
|
ia = in_get_ia_psref(t, &psref_ia);
|
|
|
|
if (ia && (ia->ia4_flags & IN_IFF_NOTREADY)) {
|
|
|
|
ia4_release(ia, &psref_ia);
|
2015-05-02 17:41:32 +03:00
|
|
|
ia = NULL;
|
2016-08-01 06:15:30 +03:00
|
|
|
}
|
2000-01-25 20:07:56 +03:00
|
|
|
|
2016-06-10 16:31:43 +03:00
|
|
|
rcvif = m_get_rcvif_psref(m, &psref);
|
|
|
|
|
2000-01-25 20:07:56 +03:00
|
|
|
/* look for packet sent to broadcast address */
|
2016-06-10 16:31:43 +03:00
|
|
|
if (ia == NULL && rcvif &&
|
|
|
|
(rcvif->if_flags & IFF_BROADCAST)) {
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
2016-07-07 12:32:01 +03:00
|
|
|
IFADDR_READER_FOREACH(ifa, rcvif) {
|
1998-02-13 21:21:38 +03:00
|
|
|
if (ifa->ifa_addr->sa_family != AF_INET)
|
|
|
|
continue;
|
2000-01-25 20:07:56 +03:00
|
|
|
if (in_hosteq(t,ifatoia(ifa)->ia_broadaddr.sin_addr)) {
|
|
|
|
ia = ifatoia(ifa);
|
2015-05-02 17:41:32 +03:00
|
|
|
if ((ia->ia4_flags & IN_IFF_NOTREADY) == 0)
|
|
|
|
break;
|
|
|
|
ia = NULL;
|
2000-01-25 20:07:56 +03:00
|
|
|
}
|
1998-02-13 21:21:38 +03:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
if (ia != NULL)
|
|
|
|
ia4_acquire(ia, &psref_ia);
|
|
|
|
pserialize_read_exit(s);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1998-02-13 21:21:38 +03:00
|
|
|
|
2015-05-09 21:46:25 +03:00
|
|
|
sin = ia ? &ia->ia_addr : NULL;
|
2000-01-25 20:07:56 +03:00
|
|
|
|
2002-08-14 04:23:27 +04:00
|
|
|
/*
|
|
|
|
* if the packet is addressed somewhere else, compute the
|
|
|
|
* source address for packets routed back to the source, and
|
|
|
|
* use that, if it's an address on the interface which
|
|
|
|
* received the packet
|
|
|
|
*/
|
2016-06-10 16:31:43 +03:00
|
|
|
if (sin == NULL && rcvif) {
|
2000-01-25 20:07:56 +03:00
|
|
|
struct sockaddr_in sin_dst;
|
|
|
|
struct route icmproute;
|
|
|
|
int errornum;
|
|
|
|
|
2007-11-10 02:42:56 +03:00
|
|
|
sockaddr_in_init(&sin_dst, &ip->ip_dst, 0);
|
2007-01-29 08:46:33 +03:00
|
|
|
memset(&icmproute, 0, sizeof(icmproute));
|
2000-01-25 20:07:56 +03:00
|
|
|
errornum = 0;
|
2016-08-01 06:15:30 +03:00
|
|
|
ia = in_selectsrc(&sin_dst, &icmproute, 0, NULL, &errornum,
|
|
|
|
&psref_ia);
|
2000-01-25 20:07:56 +03:00
|
|
|
/* errornum is never used */
|
2006-12-16 00:18:52 +03:00
|
|
|
rtcache_free(&icmproute);
|
2000-01-25 20:07:56 +03:00
|
|
|
/* check to make sure sin is a source address on rcvif */
|
2016-08-01 06:15:30 +03:00
|
|
|
if (ia != NULL) {
|
|
|
|
sin = &ia->ia_addr;
|
2000-01-25 20:07:56 +03:00
|
|
|
t = sin->sin_addr;
|
2007-11-10 02:42:56 +03:00
|
|
|
sin = NULL;
|
2016-08-01 06:15:30 +03:00
|
|
|
ia4_release(ia, &psref_ia);
|
|
|
|
ia = in_get_ia_on_iface_psref(t, rcvif, &psref_ia);
|
2016-07-08 07:33:30 +03:00
|
|
|
if (ia != NULL)
|
|
|
|
sin = &ia->ia_addr;
|
2000-01-25 20:07:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-08-14 04:23:27 +04:00
|
|
|
/*
|
|
|
|
* if it was not addressed to us, but the route doesn't go out
|
|
|
|
* the source interface, pick an address on the source
|
|
|
|
* interface. This can happen when routing is asymmetric, or
|
|
|
|
* when the incoming packet was encapsulated
|
|
|
|
*/
|
2016-06-10 16:31:43 +03:00
|
|
|
if (sin == NULL && rcvif) {
|
2016-08-01 06:15:30 +03:00
|
|
|
KASSERT(ia == NULL);
|
|
|
|
s = pserialize_read_enter();
|
2016-07-07 12:32:01 +03:00
|
|
|
IFADDR_READER_FOREACH(ifa, rcvif) {
|
2000-01-25 20:07:56 +03:00
|
|
|
if (ifa->ifa_addr->sa_family != AF_INET)
|
|
|
|
continue;
|
|
|
|
sin = &(ifatoia(ifa)->ia_addr);
|
2016-10-19 04:10:15 +03:00
|
|
|
ia = ifatoia(ifa);
|
|
|
|
ia4_acquire(ia, &psref_ia);
|
2000-01-25 20:07:56 +03:00
|
|
|
break;
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
2000-01-25 20:07:56 +03:00
|
|
|
}
|
|
|
|
|
2016-06-10 16:31:43 +03:00
|
|
|
m_put_rcvif_psref(rcvif, &psref);
|
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
/*
|
|
|
|
* The following happens if the packet was not addressed to us,
|
1998-02-13 21:21:38 +03:00
|
|
|
* and was received on an interface with no IP address:
|
|
|
|
* We find the first AF_INET address on the first non-loopback
|
|
|
|
* interface.
|
1994-05-13 10:02:48 +04:00
|
|
|
*/
|
2016-08-01 06:15:30 +03:00
|
|
|
if (sin == NULL) {
|
|
|
|
KASSERT(ia == NULL);
|
|
|
|
s = pserialize_read_enter();
|
2016-07-06 11:42:34 +03:00
|
|
|
IN_ADDRLIST_READER_FOREACH(ia) {
|
1998-02-13 21:21:38 +03:00
|
|
|
if (ia->ia_ifp->if_flags & IFF_LOOPBACK)
|
|
|
|
continue;
|
2000-01-25 20:07:56 +03:00
|
|
|
sin = &ia->ia_addr;
|
2016-08-01 06:15:30 +03:00
|
|
|
ia4_acquire(ia, &psref_ia);
|
1998-02-13 21:21:38 +03:00
|
|
|
break;
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
|
|
|
}
|
2000-01-25 20:07:56 +03:00
|
|
|
|
1999-03-30 23:02:56 +04:00
|
|
|
/*
|
|
|
|
* If we still didn't find an address, punt. We could have an
|
|
|
|
* interface up (and receiving packets) with no address.
|
|
|
|
*/
|
2007-11-10 02:42:56 +03:00
|
|
|
if (sin == NULL) {
|
2016-08-01 06:15:30 +03:00
|
|
|
KASSERT(ia == NULL);
|
1999-03-30 23:02:56 +04:00
|
|
|
m_freem(m);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2000-01-25 20:07:56 +03:00
|
|
|
ip->ip_src = sin->sin_addr;
|
1993-03-21 12:45:37 +03:00
|
|
|
ip->ip_ttl = MAXTTL;
|
|
|
|
|
2016-08-01 06:15:30 +03:00
|
|
|
if (ia != NULL)
|
|
|
|
ia4_release(ia, &psref_ia);
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (optlen > 0) {
|
2000-03-30 16:51:13 +04:00
|
|
|
u_char *cp;
|
1993-03-21 12:45:37 +03:00
|
|
|
int opt, cnt;
|
|
|
|
u_int len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Retrieve any source routing from the incoming packet;
|
|
|
|
* add on any record-route or timestamp options.
|
|
|
|
*/
|
2018-01-23 10:15:04 +03:00
|
|
|
cp = (u_char *)(ip + 1);
|
2017-03-31 09:49:44 +03:00
|
|
|
if ((opts = ip_srcroute(m)) == NULL &&
|
1993-03-21 12:45:37 +03:00
|
|
|
(opts = m_gethdr(M_DONTWAIT, MT_HEADER))) {
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(opts, m->m_owner);
|
1993-03-21 12:45:37 +03:00
|
|
|
opts->m_len = sizeof(struct in_addr);
|
1996-09-09 18:51:07 +04:00
|
|
|
*mtod(opts, struct in_addr *) = zeroin_addr;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2018-01-23 10:15:04 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (opts) {
|
2018-01-23 10:15:04 +03:00
|
|
|
for (cnt = optlen; cnt > 0; cnt -= len, cp += len) {
|
|
|
|
opt = cp[IPOPT_OPTVAL];
|
|
|
|
if (opt == IPOPT_EOL)
|
|
|
|
break;
|
|
|
|
if (opt == IPOPT_NOP)
|
|
|
|
len = 1;
|
|
|
|
else {
|
|
|
|
if (cnt < IPOPT_OLEN + sizeof(*cp))
|
|
|
|
break;
|
|
|
|
len = cp[IPOPT_OLEN];
|
|
|
|
if (len < IPOPT_OLEN + sizeof(*cp) ||
|
|
|
|
len > cnt)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Overflows can't happen */
|
|
|
|
KASSERT(opts->m_len + len <= MHLEN);
|
|
|
|
|
|
|
|
if (opt == IPOPT_RR || opt == IPOPT_TS ||
|
|
|
|
opt == IPOPT_SECURITY) {
|
|
|
|
memmove(mtod(opts, char *) +
|
|
|
|
opts->m_len, cp, len);
|
|
|
|
opts->m_len += len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Terminate & pad, if necessary */
|
|
|
|
if ((cnt = opts->m_len % 4) != 0) {
|
|
|
|
for (; cnt < 4; cnt++) {
|
|
|
|
*(mtod(opts, char *) + opts->m_len) =
|
|
|
|
IPOPT_EOL;
|
|
|
|
opts->m_len++;
|
|
|
|
}
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2018-01-23 10:15:04 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Now strip out original options by copying rest of first
|
|
|
|
* mbuf's data back, and adjust the IP length.
|
|
|
|
*/
|
2002-08-14 04:23:27 +04:00
|
|
|
ip->ip_len = htons(ntohs(ip->ip_len) - optlen);
|
1993-03-21 12:45:37 +03:00
|
|
|
ip->ip_hl = sizeof(struct ip) >> 2;
|
|
|
|
m->m_len -= optlen;
|
|
|
|
if (m->m_flags & M_PKTHDR)
|
|
|
|
m->m_pkthdr.len -= optlen;
|
|
|
|
optlen += sizeof(struct ip);
|
2007-03-04 08:59:00 +03:00
|
|
|
memmove(ip + 1, (char *)ip + optlen,
|
|
|
|
(unsigned)(m->m_len - sizeof(struct ip)));
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(m);
|
1993-12-06 07:50:19 +03:00
|
|
|
m->m_flags &= ~(M_BCAST|M_MCAST);
|
2003-04-17 20:57:49 +04:00
|
|
|
|
2005-02-27 01:45:09 +03:00
|
|
|
/*
|
2003-04-17 20:57:49 +04:00
|
|
|
* Clear any in-bound checksum flags for this packet.
|
|
|
|
*/
|
2004-06-25 19:43:00 +04:00
|
|
|
if (m->m_flags & M_PKTHDR)
|
|
|
|
m->m_pkthdr.csum_flags = 0;
|
2003-04-17 20:57:49 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
icmp_send(m, opts);
|
1994-05-13 10:02:48 +04:00
|
|
|
done:
|
2016-08-01 06:15:30 +03:00
|
|
|
curlwp_bindx(bound);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (opts)
|
|
|
|
(void)m_free(opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send an icmp packet back to the ip level,
|
|
|
|
* after supplying a checksum.
|
|
|
|
*/
|
2018-01-23 10:15:04 +03:00
|
|
|
static void
|
2005-02-04 01:51:50 +03:00
|
|
|
icmp_send(struct mbuf *m, struct mbuf *opts)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 16:51:13 +04:00
|
|
|
struct ip *ip = mtod(m, struct ip *);
|
|
|
|
int hlen;
|
|
|
|
struct icmp *icp;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
hlen = ip->ip_hl << 2;
|
|
|
|
m->m_data += hlen;
|
|
|
|
m->m_len -= hlen;
|
|
|
|
icp = mtod(m, struct icmp *);
|
|
|
|
icp->icmp_cksum = 0;
|
2002-08-14 04:23:27 +04:00
|
|
|
icp->icmp_cksum = in_cksum(m, ntohs(ip->ip_len) - hlen);
|
1993-03-21 12:45:37 +03:00
|
|
|
m->m_data -= hlen;
|
|
|
|
m->m_len += hlen;
|
2018-05-11 17:38:28 +03:00
|
|
|
|
2007-11-10 02:42:56 +03:00
|
|
|
(void)ip_output(m, opts, NULL, 0, NULL, NULL);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
n_time
|
2005-02-04 01:51:50 +03:00
|
|
|
iptime(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
struct timeval atv;
|
|
|
|
u_long t;
|
|
|
|
|
|
|
|
microtime(&atv);
|
|
|
|
t = (atv.tv_sec % (24*60*60)) * 1000 + atv.tv_usec / 1000;
|
|
|
|
return (htonl(t));
|
|
|
|
}
|
1994-05-13 10:02:48 +04:00
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
/*
|
|
|
|
* sysctl helper routine for net.inet.icmp.returndatabytes. ensures
|
|
|
|
* that the new value is in the correct range.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_net_inet_icmp_returndatabytes(SYSCTLFN_ARGS)
|
1994-05-13 10:02:48 +04:00
|
|
|
{
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
int error, t;
|
|
|
|
struct sysctlnode node;
|
|
|
|
|
|
|
|
node = *rnode;
|
|
|
|
node.sysctl_data = &t;
|
|
|
|
t = icmpreturndatabytes;
|
|
|
|
error = sysctl_lookup(SYSCTLFN_CALL(&node));
|
|
|
|
if (error || newp == NULL)
|
2018-01-23 10:15:04 +03:00
|
|
|
return error;
|
1994-05-13 10:02:48 +04:00
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (t < 8 || t > 512)
|
2018-01-23 10:15:04 +03:00
|
|
|
return EINVAL;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
icmpreturndatabytes = t;
|
1994-05-13 10:02:48 +04:00
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
return 0;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sysctl helper routine for net.inet.icmp.redirtimeout. ensures that
|
|
|
|
* the given value is not less than zero and then resets the timeout
|
|
|
|
* queue.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_net_inet_icmp_redirtimeout(SYSCTLFN_ARGS)
|
|
|
|
{
|
|
|
|
int error, tmp;
|
|
|
|
struct sysctlnode node;
|
|
|
|
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
node = *rnode;
|
|
|
|
node.sysctl_data = &tmp;
|
|
|
|
tmp = icmp_redirtimeout;
|
|
|
|
error = sysctl_lookup(SYSCTLFN_CALL(&node));
|
|
|
|
if (error || newp == NULL)
|
2017-02-13 10:18:20 +03:00
|
|
|
goto out;
|
|
|
|
if (tmp < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
icmp_redirtimeout = tmp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* was it a *defined* side-effect that anyone even *reading*
|
|
|
|
* this value causes these things to happen?
|
|
|
|
*/
|
|
|
|
if (icmp_redirect_timeout_q != NULL) {
|
|
|
|
if (icmp_redirtimeout == 0) {
|
2016-10-25 05:45:09 +03:00
|
|
|
rt_timer_queue_destroy(icmp_redirect_timeout_q);
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
icmp_redirect_timeout_q = NULL;
|
|
|
|
} else {
|
|
|
|
rt_timer_queue_change(icmp_redirect_timeout_q,
|
|
|
|
icmp_redirtimeout);
|
|
|
|
}
|
|
|
|
} else if (icmp_redirtimeout > 0) {
|
|
|
|
icmp_redirect_timeout_q =
|
|
|
|
rt_timer_queue_create(icmp_redirtimeout);
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
2017-02-13 10:18:20 +03:00
|
|
|
error = 0;
|
|
|
|
out:
|
|
|
|
mutex_exit(&icmp_mtx);
|
|
|
|
return error;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
}
|
|
|
|
|
2008-04-12 09:58:22 +04:00
|
|
|
static int
|
|
|
|
sysctl_net_inet_icmp_stats(SYSCTLFN_ARGS)
|
|
|
|
{
|
|
|
|
|
2008-05-04 11:22:14 +04:00
|
|
|
return (NETSTAT_SYSCTL(icmpstat_percpu, ICMP_NSTATS));
|
2008-04-12 09:58:22 +04:00
|
|
|
}
|
|
|
|
|
2009-09-16 19:23:04 +04:00
|
|
|
static void
|
|
|
|
sysctl_netinet_icmp_setup(struct sysctllog **clog)
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
{
|
|
|
|
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTLTYPE_NODE, "inet", NULL,
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_NET, PF_INET, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_NODE, "icmp",
|
|
|
|
SYSCTL_DESCR("ICMPv4 related settings"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP, CTL_EOL);
|
|
|
|
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_INT, "maskrepl",
|
|
|
|
SYSCTL_DESCR("Respond to ICMP_MASKREQ messages"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &icmpmaskrepl, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP,
|
|
|
|
ICMPCTL_MASKREPL, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_INT, "returndatabytes",
|
|
|
|
SYSCTL_DESCR("Number of bytes to return in an ICMP "
|
|
|
|
"error message"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_net_inet_icmp_returndatabytes, 0,
|
|
|
|
&icmpreturndatabytes, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP,
|
|
|
|
ICMPCTL_RETURNDATABYTES, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_INT, "errppslimit",
|
|
|
|
SYSCTL_DESCR("Maximum number of outgoing ICMP error "
|
|
|
|
"messages per second"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &icmperrppslim, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP,
|
|
|
|
ICMPCTL_ERRPPSLIMIT, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_INT, "rediraccept",
|
|
|
|
SYSCTL_DESCR("Accept ICMP_REDIRECT messages"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, &icmp_rediraccept, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP,
|
|
|
|
ICMPCTL_REDIRACCEPT, CTL_EOL);
|
2004-03-24 18:34:46 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:33:59 +04:00
|
|
|
CTLTYPE_INT, "redirtimeout",
|
|
|
|
SYSCTL_DESCR("Lifetime of ICMP_REDIRECT generated "
|
|
|
|
"routes"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_net_inet_icmp_redirtimeout, 0,
|
|
|
|
&icmp_redirtimeout, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP,
|
|
|
|
ICMPCTL_REDIRTIMEOUT, CTL_EOL);
|
2005-08-05 13:21:25 +04:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_STRUCT, "stats",
|
|
|
|
SYSCTL_DESCR("ICMP statistics"),
|
2008-04-12 09:58:22 +04:00
|
|
|
sysctl_net_inet_icmp_stats, 0, NULL, 0,
|
2005-08-05 13:21:25 +04:00
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP, ICMPCTL_STATS,
|
|
|
|
CTL_EOL);
|
2009-12-07 21:47:24 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
|
|
|
CTLTYPE_INT, "bmcastecho",
|
|
|
|
SYSCTL_DESCR("Respond to ICMP_ECHO or ICMP_TIMESTAMP "
|
|
|
|
"message to the broadcast or multicast"),
|
|
|
|
NULL, 0, &icmpbmcastecho, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP, ICMPCTL_BMCASTECHO,
|
|
|
|
CTL_EOL);
|
2022-08-29 12:14:02 +03:00
|
|
|
sysctl_createv(clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
|
|
|
CTLTYPE_BOOL, "dynamic_rt_msg",
|
|
|
|
SYSCTL_DESCR("Send routing message for RTF_DYNAMIC"),
|
|
|
|
NULL, 0, &icmp_dynamic_rt_msg, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_ICMP, ICMPCTL_DYNAMIC_RT_MSG,
|
|
|
|
CTL_EOL);
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
1997-10-18 02:12:14 +04:00
|
|
|
|
2008-04-12 09:58:22 +04:00
|
|
|
void
|
|
|
|
icmp_statinc(u_int stat)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(stat < ICMP_NSTATS);
|
|
|
|
ICMP_STATINC(stat);
|
|
|
|
}
|
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
/* Table of common MTUs */
|
2001-11-04 16:38:50 +03:00
|
|
|
static const u_int mtu_table[] = {
|
|
|
|
65535, 65280, 32000, 17914, 9180, 8166,
|
|
|
|
4352, 2002, 1492, 1006, 508, 296, 68, 0
|
|
|
|
};
|
|
|
|
|
2000-10-18 21:09:14 +04:00
|
|
|
void
|
2005-02-04 01:51:50 +03:00
|
|
|
icmp_mtudisc(struct icmp *icp, struct in_addr faddr)
|
1997-10-18 02:12:14 +04:00
|
|
|
{
|
2000-10-19 00:34:00 +04:00
|
|
|
struct icmp_mtudisc_callback *mc;
|
1997-10-18 02:12:14 +04:00
|
|
|
struct sockaddr *dst = sintosa(&icmpsrc);
|
2000-10-18 21:09:14 +04:00
|
|
|
struct rtentry *rt;
|
1997-10-29 08:28:44 +03:00
|
|
|
u_long mtu = ntohs(icp->icmp_nextmtu); /* Why a long? IPv6 */
|
2018-01-23 10:15:04 +03:00
|
|
|
int error;
|
1997-10-18 02:12:14 +04:00
|
|
|
|
|
|
|
rt = rtalloc1(dst, 1);
|
2015-04-24 05:56:51 +03:00
|
|
|
if (rt == NULL)
|
1997-10-18 02:12:14 +04:00
|
|
|
return;
|
2002-06-09 20:33:36 +04:00
|
|
|
|
1997-10-18 02:12:14 +04:00
|
|
|
/* If we didn't get a host route, allocate one */
|
|
|
|
if ((rt->rt_flags & RTF_HOST) == 0) {
|
|
|
|
struct rtentry *nrt;
|
|
|
|
|
2016-04-01 12:16:02 +03:00
|
|
|
error = rtrequest(RTM_ADD, dst, rt->rt_gateway, NULL,
|
1997-10-18 02:12:14 +04:00
|
|
|
RTF_GATEWAY | RTF_HOST | RTF_DYNAMIC, &nrt);
|
|
|
|
if (error) {
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
1997-10-18 02:12:14 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
nrt->rt_rmx = rt->rt_rmx;
|
2022-08-29 12:14:02 +03:00
|
|
|
rt_newmsg_dynamic(RTM_ADD, nrt);
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
1997-10-18 02:12:14 +04:00
|
|
|
rt = nrt;
|
|
|
|
}
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
1998-04-29 07:44:11 +04:00
|
|
|
error = rt_timer_add(rt, icmp_mtudisc_timeout, ip_mtudisc_timeout_q);
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_exit(&icmp_mtx);
|
1998-04-29 07:44:11 +04:00
|
|
|
if (error) {
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
1998-04-29 07:44:11 +04:00
|
|
|
return;
|
|
|
|
}
|
1997-10-18 02:12:14 +04:00
|
|
|
|
|
|
|
if (mtu == 0) {
|
|
|
|
int i = 0;
|
|
|
|
|
2002-08-14 04:23:27 +04:00
|
|
|
mtu = ntohs(icp->icmp_ip.ip_len);
|
1997-10-18 02:12:14 +04:00
|
|
|
/* Some 4.2BSD-based routers incorrectly adjust the ip_len */
|
|
|
|
if (mtu > rt->rt_rmx.rmx_mtu && rt->rt_rmx.rmx_mtu != 0)
|
|
|
|
mtu -= (icp->icmp_ip.ip_hl << 2);
|
|
|
|
|
1997-10-29 08:28:44 +03:00
|
|
|
/* If we still can't guess a value, try the route */
|
|
|
|
if (mtu == 0) {
|
1997-10-18 02:12:14 +04:00
|
|
|
mtu = rt->rt_rmx.rmx_mtu;
|
|
|
|
|
1997-10-29 08:28:44 +03:00
|
|
|
/* If no route mtu, default to the interface mtu */
|
|
|
|
if (mtu == 0)
|
|
|
|
mtu = rt->rt_ifp->if_mtu;
|
|
|
|
}
|
|
|
|
|
2018-01-23 10:15:04 +03:00
|
|
|
for (i = 0; i < sizeof(mtu_table) / sizeof(mtu_table[0]); i++) {
|
1997-10-29 08:28:44 +03:00
|
|
|
if (mtu > mtu_table[i]) {
|
|
|
|
mtu = mtu_table[i];
|
1997-10-18 02:12:14 +04:00
|
|
|
break;
|
1997-10-29 08:28:44 +03:00
|
|
|
}
|
2018-01-23 10:15:04 +03:00
|
|
|
}
|
1997-10-18 02:12:14 +04:00
|
|
|
}
|
|
|
|
|
1998-04-29 07:44:11 +04:00
|
|
|
/*
|
|
|
|
* XXX: RTV_MTU is overloaded, since the admin can set it
|
|
|
|
* to turn off PMTU for a route, and the kernel can
|
|
|
|
* set it to indicate a serious problem with PMTU
|
|
|
|
* on a route. We should be using a separate flag
|
|
|
|
* for the kernel to indicate this.
|
|
|
|
*/
|
|
|
|
|
1997-10-18 02:12:14 +04:00
|
|
|
if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) {
|
1997-10-29 08:28:44 +03:00
|
|
|
if (mtu < 296 || mtu > rt->rt_ifp->if_mtu)
|
1997-10-18 02:12:14 +04:00
|
|
|
rt->rt_rmx.rmx_locks |= RTV_MTU;
|
2002-06-09 20:33:36 +04:00
|
|
|
else if (rt->rt_rmx.rmx_mtu > mtu ||
|
2000-10-18 23:20:02 +04:00
|
|
|
rt->rt_rmx.rmx_mtu == 0) {
|
2008-04-12 09:58:22 +04:00
|
|
|
ICMP_STATINC(ICMP_STAT_PMTUCHG);
|
1997-10-18 02:12:14 +04:00
|
|
|
rt->rt_rmx.rmx_mtu = mtu;
|
2000-10-18 23:20:02 +04:00
|
|
|
}
|
1997-10-18 02:12:14 +04:00
|
|
|
}
|
1997-10-29 08:28:44 +03:00
|
|
|
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
if (rt != NULL)
|
|
|
|
rt_unref(rt);
|
2000-10-18 21:09:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify protocols that the MTU for this destination
|
|
|
|
* has changed.
|
|
|
|
*/
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_enter(&icmp_mtx);
|
2000-10-18 21:09:14 +04:00
|
|
|
for (mc = LIST_FIRST(&icmp_mtudisc_callbacks); mc != NULL;
|
|
|
|
mc = LIST_NEXT(mc, mc_list))
|
|
|
|
(*mc->mc_func)(faddr);
|
2017-02-13 10:18:20 +03:00
|
|
|
mutex_exit(&icmp_mtx);
|
1997-10-18 02:12:14 +04:00
|
|
|
}
|
1998-04-29 07:44:11 +04:00
|
|
|
|
1999-07-01 12:12:45 +04:00
|
|
|
/*
|
|
|
|
* Return the next larger or smaller MTU plateau (table from RFC 1191)
|
|
|
|
* given current value MTU. If DIR is less than zero, a larger plateau
|
|
|
|
* is returned; otherwise, a smaller value is returned.
|
|
|
|
*/
|
2006-03-22 04:07:24 +03:00
|
|
|
u_int
|
2018-01-23 10:15:04 +03:00
|
|
|
ip_next_mtu(u_int mtu, int dir) /* XXX unused */
|
1999-07-01 12:12:45 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2001-11-04 16:38:50 +03:00
|
|
|
for (i = 0; i < (sizeof mtu_table) / (sizeof mtu_table[0]); i++) {
|
|
|
|
if (mtu >= mtu_table[i])
|
1999-07-01 12:12:45 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dir < 0) {
|
|
|
|
if (i == 0) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
2001-11-04 16:38:50 +03:00
|
|
|
return mtu_table[i - 1];
|
1999-07-01 12:12:45 +04:00
|
|
|
}
|
|
|
|
} else {
|
2001-11-04 16:38:50 +03:00
|
|
|
if (mtu_table[i] == 0) {
|
1999-07-01 12:12:45 +04:00
|
|
|
return 0;
|
2001-11-04 16:38:50 +03:00
|
|
|
} else if (mtu > mtu_table[i]) {
|
|
|
|
return mtu_table[i];
|
1999-07-01 12:12:45 +04:00
|
|
|
} else {
|
2001-11-04 16:38:50 +03:00
|
|
|
return mtu_table[i + 1];
|
1999-07-01 12:12:45 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-04-29 07:44:11 +04:00
|
|
|
static void
|
2006-11-16 04:32:37 +03:00
|
|
|
icmp_mtudisc_timeout(struct rtentry *rt, struct rttimer *r)
|
1998-04-29 07:44:11 +04:00
|
|
|
{
|
2018-06-01 10:13:35 +03:00
|
|
|
struct rtentry *retrt;
|
2015-08-31 09:25:15 +03:00
|
|
|
|
2015-04-24 06:20:41 +03:00
|
|
|
KASSERT(rt != NULL);
|
2015-08-31 09:25:15 +03:00
|
|
|
rt_assert_referenced(rt);
|
2015-04-24 06:20:41 +03:00
|
|
|
|
2002-06-09 20:33:36 +04:00
|
|
|
if ((rt->rt_flags & (RTF_DYNAMIC | RTF_HOST)) ==
|
1998-04-29 07:44:11 +04:00
|
|
|
(RTF_DYNAMIC | RTF_HOST)) {
|
2016-04-01 12:16:02 +03:00
|
|
|
rtrequest(RTM_DELETE, rt_getkey(rt),
|
2018-06-01 10:13:35 +03:00
|
|
|
rt->rt_gateway, rt_mask(rt), rt->rt_flags, &retrt);
|
2022-08-29 12:14:02 +03:00
|
|
|
rt_newmsg_dynamic(RTM_DELETE, retrt);
|
2018-06-01 10:13:35 +03:00
|
|
|
rt_unref(rt);
|
|
|
|
rt_free(retrt);
|
1998-04-29 07:44:11 +04:00
|
|
|
} else {
|
|
|
|
if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) {
|
|
|
|
rt->rt_rmx.rmx_mtu = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-02-15 07:03:49 +03:00
|
|
|
|
2001-10-30 09:41:09 +03:00
|
|
|
static void
|
2006-11-16 04:32:37 +03:00
|
|
|
icmp_redirect_timeout(struct rtentry *rt, struct rttimer *r)
|
2001-10-30 09:41:09 +03:00
|
|
|
{
|
2018-06-01 10:13:35 +03:00
|
|
|
struct rtentry *retrt;
|
2015-08-31 09:25:15 +03:00
|
|
|
|
2015-04-24 06:20:41 +03:00
|
|
|
KASSERT(rt != NULL);
|
2015-08-31 09:25:15 +03:00
|
|
|
rt_assert_referenced(rt);
|
2015-04-24 06:20:41 +03:00
|
|
|
|
2002-06-09 20:33:36 +04:00
|
|
|
if ((rt->rt_flags & (RTF_DYNAMIC | RTF_HOST)) ==
|
2001-10-30 09:41:09 +03:00
|
|
|
(RTF_DYNAMIC | RTF_HOST)) {
|
2016-04-01 12:16:02 +03:00
|
|
|
rtrequest(RTM_DELETE, rt_getkey(rt),
|
2018-06-01 10:13:35 +03:00
|
|
|
rt->rt_gateway, rt_mask(rt), rt->rt_flags, &retrt);
|
2022-08-29 12:14:02 +03:00
|
|
|
rt_newmsg_dynamic(RTM_DELETE, retrt);
|
2018-06-01 10:13:35 +03:00
|
|
|
rt_unref(rt);
|
|
|
|
rt_free(retrt);
|
2001-10-30 09:41:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-02-15 07:03:49 +03:00
|
|
|
/*
|
|
|
|
* Perform rate limit check.
|
|
|
|
* Returns 0 if it is okay to send the icmp packet.
|
|
|
|
* Returns 1 if the router SHOULD NOT send this icmp packet due to rate
|
|
|
|
* limitation.
|
|
|
|
*
|
|
|
|
* XXX per-destination/type check necessary?
|
|
|
|
*/
|
2010-06-26 18:24:27 +04:00
|
|
|
int
|
2006-11-16 04:32:37 +03:00
|
|
|
icmp_ratelimit(const struct in_addr *dst, const int type,
|
|
|
|
const int code)
|
2000-02-15 07:03:49 +03:00
|
|
|
{
|
|
|
|
|
2000-07-10 13:31:29 +04:00
|
|
|
/* PPS limit */
|
|
|
|
if (!ppsratecheck(&icmperrppslim_last, &icmperrpps_count,
|
|
|
|
icmperrppslim)) {
|
|
|
|
/* The packet is subject to rate limit */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-02-04 01:51:50 +03:00
|
|
|
/* okay to send */
|
2000-07-10 13:31:29 +04:00
|
|
|
return 0;
|
2000-02-15 07:03:49 +03:00
|
|
|
}
|