2018-05-01 10:21:39 +03:00
|
|
|
/* $NetBSD: nd6.c,v 1.248 2018/05/01 07:21:39 maxv Exp $ */
|
2002-06-09 01:22:29 +04:00
|
|
|
/* $KAME: nd6.c,v 1.279 2002/06/08 11:16:51 itojun Exp $ */
|
1999-07-04 01:24:45 +04:00
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
|
|
|
|
* All rights reserved.
|
2000-04-16 19:00:56 +04:00
|
|
|
*
|
1999-06-28 10:36:47 +04:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the project nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
2000-04-16 19:00:56 +04:00
|
|
|
*
|
1999-06-28 10:36:47 +04:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2001-11-13 03:56:55 +03:00
|
|
|
#include <sys/cdefs.h>
|
2018-05-01 10:21:39 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: nd6.c,v 1.248 2018/05/01 07:21:39 maxv Exp $");
|
2015-04-30 13:00:04 +03:00
|
|
|
|
2015-08-25 01:21:26 +03:00
|
|
|
#ifdef _KERNEL_OPT
|
2015-04-30 13:00:04 +03:00
|
|
|
#include "opt_net_mpsafe.h"
|
2015-08-25 01:21:26 +03:00
|
|
|
#endif
|
2003-08-23 02:11:44 +04:00
|
|
|
|
2014-10-14 19:29:43 +04:00
|
|
|
#include "bridge.h"
|
|
|
|
#include "carp.h"
|
2001-11-13 03:56:55 +03:00
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-03-23 10:01:25 +03:00
|
|
|
#include <sys/callout.h>
|
2017-02-22 06:41:54 +03:00
|
|
|
#include <sys/kmem.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
2008-04-24 15:38:36 +04:00
|
|
|
#include <sys/socketvar.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/queue.h>
|
First step of random number subsystem rework described in
<20111022023242.BA26F14A158@mail.netbsd.org>. This change includes
the following:
An initial cleanup and minor reorganization of the entropy pool
code in sys/dev/rnd.c and sys/dev/rndpool.c. Several bugs are
fixed. Some effort is made to accumulate entropy more quickly at
boot time.
A generic interface, "rndsink", is added, for stream generators to
request that they be re-keyed with good quality entropy from the pool
as soon as it is available.
The arc4random()/arc4randbytes() implementation in libkern is
adjusted to use the rndsink interface for rekeying, which helps
address the problem of low-quality keys at boot time.
An implementation of the FIPS 140-2 statistical tests for random
number generator quality is provided (libkern/rngtest.c). This
is based on Greg Rose's implementation from Qualcomm.
A new random stream generator, nist_ctr_drbg, is provided. It is
based on an implementation of the NIST SP800-90 CTR_DRBG by
Henric Jungheim. This generator users AES in a modified counter
mode to generate a backtracking-resistant random stream.
An abstraction layer, "cprng", is provided for in-kernel consumers
of randomness. The arc4random/arc4randbytes API is deprecated for
in-kernel use. It is replaced by "cprng_strong". The current
cprng_fast implementation wraps the existing arc4random
implementation. The current cprng_strong implementation wraps the
new CTR_DRBG implementation. Both interfaces are rekeyed from
the entropy pool automatically at intervals justifiable from best
current cryptographic practice.
In some quick tests, cprng_fast() is about the same speed as
the old arc4randbytes(), and cprng_strong() is about 20% faster
than rnd_extract_data(). Performance is expected to improve.
The AES code in src/crypto/rijndael is no longer an optional
kernel component, as it is required by cprng_strong, which is
not an optional kernel component.
The entropy pool output is subjected to the rngtest tests at
startup time; if it fails, the system will reboot. There is
approximately a 3/10000 chance of a false positive from these
tests. Entropy pool _input_ from hardware random numbers is
subjected to the rngtest tests at attach time, as well as the
FIPS continuous-output test, to detect bad or stuck hardware
RNGs; if any are detected, they are detached, but the system
continues to run.
A problem with rndctl(8) is fixed -- datastructures with
pointers in arrays are no longer passed to userspace (this
was not a security problem, but rather a major issue for
compat32). A new kernel will require a new rndctl.
The sysctl kern.arandom() and kern.urandom() nodes are hooked
up to the new generators, but the /dev/*random pseudodevices
are not, yet.
Manual pages for the new kernel interfaces are forthcoming.
2011-11-20 02:51:18 +04:00
|
|
|
#include <sys/cprng.h>
|
2016-07-11 10:37:00 +03:00
|
|
|
#include <sys/workqueue.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_dl.h>
|
2015-11-25 09:21:26 +03:00
|
|
|
#include <net/if_llatbl.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/if_ether.h>
|
|
|
|
#include <net/if_fddi.h>
|
2002-06-03 04:51:47 +04:00
|
|
|
#include <net/if_arc.h>
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <netinet6/in6_var.h>
|
2000-02-06 15:49:37 +03:00
|
|
|
#include <netinet/ip6.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <netinet6/ip6_var.h>
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
#include <netinet6/scope6_var.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
#include <netinet6/nd6.h>
|
2014-06-05 20:06:49 +04:00
|
|
|
#include <netinet6/in6_ifattach.h>
|
2000-02-06 15:49:37 +03:00
|
|
|
#include <netinet/icmp6.h>
|
2008-04-15 07:57:04 +04:00
|
|
|
#include <netinet6/icmp6_private.h>
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
|
|
|
|
#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
|
|
|
|
|
|
|
|
/* timer values */
|
|
|
|
int nd6_prune = 1; /* walk list every 1 seconds */
|
|
|
|
int nd6_delay = 5; /* delay first probe time 5 second */
|
|
|
|
int nd6_umaxtries = 3; /* maximum unicast query */
|
|
|
|
int nd6_mmaxtries = 3; /* maximum multicast query */
|
|
|
|
int nd6_useloopback = 1; /* use loopback interface for local traffic */
|
2001-02-23 11:02:41 +03:00
|
|
|
int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */
|
1999-06-28 10:36:47 +04:00
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
/* preventing too many loops in ND option parsing */
|
|
|
|
int nd6_maxndopt = 10; /* max # of ND options allowed */
|
|
|
|
|
2000-07-06 16:36:18 +04:00
|
|
|
int nd6_maxnudhint = 0; /* max # of subsequent upper layer hints */
|
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */
|
|
|
|
|
2001-02-07 11:59:47 +03:00
|
|
|
#ifdef ND6_DEBUG
|
|
|
|
int nd6_debug = 1;
|
|
|
|
#else
|
|
|
|
int nd6_debug = 0;
|
|
|
|
#endif
|
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
krwlock_t nd6_lock __cacheline_aligned;
|
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
struct nd_drhead nd_defrouter;
|
1999-06-28 10:36:47 +04:00
|
|
|
struct nd_prhead nd_prefix = { 0 };
|
|
|
|
|
|
|
|
int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL;
|
|
|
|
|
2007-11-01 23:33:56 +03:00
|
|
|
static void nd6_setmtu0(struct ifnet *, struct nd_ifinfo *);
|
|
|
|
static void nd6_slowtimo(void *);
|
2016-07-05 09:32:18 +03:00
|
|
|
static int regen_tmpaddr(const struct in6_ifaddr *);
|
2016-04-04 10:37:07 +03:00
|
|
|
static void nd6_free(struct llentry *, int);
|
2007-11-01 23:33:56 +03:00
|
|
|
static void nd6_llinfo_timer(void *);
|
2016-04-01 08:11:38 +03:00
|
|
|
static void nd6_timer(void *);
|
2016-07-11 10:37:00 +03:00
|
|
|
static void nd6_timer_work(struct work *, void *);
|
2015-11-25 09:21:26 +03:00
|
|
|
static void clear_llinfo_pqueue(struct llentry *);
|
2016-12-14 07:05:11 +03:00
|
|
|
static struct nd_opt_hdr *nd6_option(union nd_opts *);
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-04-01 08:11:38 +03:00
|
|
|
static callout_t nd6_slowtimo_ch;
|
|
|
|
static callout_t nd6_timer_ch;
|
2016-07-11 10:37:00 +03:00
|
|
|
static struct workqueue *nd6_timer_wq;
|
|
|
|
static struct work nd6_timer_wk;
|
2000-03-23 10:01:25 +03:00
|
|
|
|
2018-01-29 05:02:14 +03:00
|
|
|
static int fill_drlist(void *, size_t *);
|
|
|
|
static int fill_prlist(void *, size_t *);
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2016-12-11 10:37:53 +03:00
|
|
|
static struct ifnet *nd6_defifp;
|
|
|
|
static int nd6_defifindex;
|
|
|
|
|
|
|
|
static int nd6_setdefaultiface(int);
|
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_IP6NDP, "NDP", "IPv6 Neighbour Discovery");
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
void
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_init(void)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2016-07-11 10:37:00 +03:00
|
|
|
int error;
|
1999-12-13 18:17:17 +03:00
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
rw_init(&nd6_lock);
|
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
/* initialization of the default router list */
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_INIT();
|
1999-12-13 18:17:17 +03:00
|
|
|
|
2008-04-24 15:38:36 +04:00
|
|
|
callout_init(&nd6_slowtimo_ch, CALLOUT_MPSAFE);
|
|
|
|
callout_init(&nd6_timer_ch, CALLOUT_MPSAFE);
|
2007-07-10 00:51:58 +04:00
|
|
|
|
2016-07-11 10:37:00 +03:00
|
|
|
error = workqueue_create(&nd6_timer_wq, "nd6_timer",
|
|
|
|
nd6_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
|
|
|
|
if (error)
|
|
|
|
panic("%s: workqueue_create failed (%d)\n", __func__, error);
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/* start timer */
|
2000-03-23 10:01:25 +03:00
|
|
|
callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
|
|
|
|
nd6_slowtimo, NULL);
|
2016-04-01 08:11:38 +03:00
|
|
|
callout_reset(&nd6_timer_ch, hz, nd6_timer, NULL);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2002-05-29 11:53:39 +04:00
|
|
|
struct nd_ifinfo *
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_ifattach(struct ifnet *ifp)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2002-05-29 11:53:39 +04:00
|
|
|
struct nd_ifinfo *nd;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2017-02-22 06:41:54 +03:00
|
|
|
nd = kmem_zalloc(sizeof(*nd), KM_SLEEP);
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2002-05-29 11:53:39 +04:00
|
|
|
nd->initialized = 1;
|
2000-04-12 14:36:38 +04:00
|
|
|
|
2002-05-29 11:53:39 +04:00
|
|
|
nd->chlim = IPV6_DEFHLIM;
|
|
|
|
nd->basereachable = REACHABLE_TIME;
|
|
|
|
nd->reachable = ND_COMPUTE_RTIME(nd->basereachable);
|
|
|
|
nd->retrans = RETRANS_TIMER;
|
2014-06-05 20:06:49 +04:00
|
|
|
|
2009-11-06 23:41:22 +03:00
|
|
|
nd->flags = ND6_IFF_PERFORMNUD | ND6_IFF_ACCEPT_RTADV;
|
2002-05-30 09:06:28 +04:00
|
|
|
|
2014-06-05 20:06:49 +04:00
|
|
|
/* A loopback interface always has ND6_IFF_AUTO_LINKLOCAL.
|
|
|
|
* A bridge interface should not have ND6_IFF_AUTO_LINKLOCAL
|
2014-10-18 12:33:23 +04:00
|
|
|
* because one of its members should. */
|
2014-06-05 20:06:49 +04:00
|
|
|
if ((ip6_auto_linklocal && ifp->if_type != IFT_BRIDGE) ||
|
|
|
|
(ifp->if_flags & IFF_LOOPBACK))
|
|
|
|
nd->flags |= ND6_IFF_AUTO_LINKLOCAL;
|
|
|
|
|
|
|
|
/* A loopback interface does not need to accept RTADV.
|
|
|
|
* A bridge interface should not accept RTADV
|
2014-10-18 12:33:23 +04:00
|
|
|
* because one of its members should. */
|
2014-06-05 20:06:49 +04:00
|
|
|
if (ip6_accept_rtadv &&
|
|
|
|
!(ifp->if_flags & IFF_LOOPBACK) &&
|
|
|
|
!(ifp->if_type != IFT_BRIDGE))
|
|
|
|
nd->flags |= ND6_IFF_ACCEPT_RTADV;
|
|
|
|
|
2002-05-30 09:06:28 +04:00
|
|
|
/* XXX: we cannot call nd6_setmtu since ifp is not fully initialized */
|
|
|
|
nd6_setmtu0(ifp, nd);
|
2000-04-12 14:36:38 +04:00
|
|
|
|
2002-05-29 11:53:39 +04:00
|
|
|
return nd;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-02-23 22:15:59 +03:00
|
|
|
nd6_ifdetach(struct ifnet *ifp, struct in6_ifextra *ext)
|
2002-05-29 11:53:39 +04:00
|
|
|
{
|
|
|
|
|
2016-12-19 07:52:17 +03:00
|
|
|
/* Ensure all IPv6 addresses are purged before calling nd6_purge */
|
|
|
|
if_purgeaddrs(ifp, AF_INET6, in6_purgeaddr);
|
2015-02-23 22:15:59 +03:00
|
|
|
nd6_purge(ifp, ext);
|
2017-02-22 06:41:54 +03:00
|
|
|
kmem_free(ext->nd_ifinfo, sizeof(struct nd_ifinfo));
|
2002-05-29 11:53:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_setmtu(struct ifnet *ifp)
|
2002-05-30 09:06:28 +04:00
|
|
|
{
|
|
|
|
nd6_setmtu0(ifp, ND_IFINFO(ifp));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_setmtu0(struct ifnet *ifp, struct nd_ifinfo *ndi)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2002-05-30 09:06:28 +04:00
|
|
|
u_int32_t omaxmtu;
|
|
|
|
|
|
|
|
omaxmtu = ndi->maxmtu;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2001-02-21 20:23:09 +03:00
|
|
|
switch (ifp->if_type) {
|
2002-06-03 04:51:47 +04:00
|
|
|
case IFT_ARCNET:
|
|
|
|
ndi->maxmtu = MIN(ARC_PHDS_MAXMTU, ifp->if_mtu); /* RFC2497 */
|
2001-02-21 19:28:43 +03:00
|
|
|
break;
|
2002-05-29 11:53:39 +04:00
|
|
|
case IFT_FDDI:
|
|
|
|
ndi->maxmtu = MIN(FDDIIPMTU, ifp->if_mtu);
|
|
|
|
break;
|
2001-02-21 19:28:43 +03:00
|
|
|
default:
|
|
|
|
ndi->maxmtu = ifp->if_mtu;
|
|
|
|
break;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2002-05-30 09:06:28 +04:00
|
|
|
/*
|
|
|
|
* Decreasing the interface MTU under IPV6 minimum MTU may cause
|
|
|
|
* undesirable situation. We thus notify the operator of the change
|
|
|
|
* explicitly. The check for omaxmtu is necessary to restrict the
|
2002-06-03 06:09:37 +04:00
|
|
|
* log to the case of changing the MTU, not initializing it.
|
2002-05-30 09:06:28 +04:00
|
|
|
*/
|
|
|
|
if (omaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
|
2003-05-04 17:43:09 +04:00
|
|
|
log(LOG_NOTICE, "nd6_setmtu0: new link MTU on %s (%lu) is too"
|
|
|
|
" small for IPv6 which needs %lu\n",
|
|
|
|
if_name(ifp), (unsigned long)ndi->maxmtu, (unsigned long)
|
|
|
|
IPV6_MMTU);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2002-05-29 11:53:39 +04:00
|
|
|
|
|
|
|
if (ndi->maxmtu > in6_maxmtu)
|
|
|
|
in6_setmaxmtu(); /* check all interfaces just in case */
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2002-05-29 11:53:39 +04:00
|
|
|
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(ndopts, 0, sizeof(*ndopts));
|
1999-06-28 10:36:47 +04:00
|
|
|
ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
|
|
|
|
ndopts->nd_opts_last
|
|
|
|
= (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
|
|
|
|
|
|
|
|
if (icmp6len == 0) {
|
|
|
|
ndopts->nd_opts_done = 1;
|
|
|
|
ndopts->nd_opts_search = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take one ND option.
|
|
|
|
*/
|
2016-12-14 07:05:11 +03:00
|
|
|
static struct nd_opt_hdr *
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_option(union nd_opts *ndopts)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
|
|
|
struct nd_opt_hdr *nd_opt;
|
|
|
|
int olen;
|
|
|
|
|
2015-06-30 11:31:42 +03:00
|
|
|
KASSERT(ndopts != NULL);
|
|
|
|
KASSERT(ndopts->nd_opts_last != NULL);
|
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
if (ndopts->nd_opts_search == NULL)
|
1999-06-28 10:36:47 +04:00
|
|
|
return NULL;
|
|
|
|
if (ndopts->nd_opts_done)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nd_opt = ndopts->nd_opts_search;
|
|
|
|
|
2001-02-21 20:23:09 +03:00
|
|
|
/* make sure nd_opt_len is inside the buffer */
|
2007-03-04 08:59:00 +03:00
|
|
|
if ((void *)&nd_opt->nd_opt_len >= (void *)ndopts->nd_opts_last) {
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(ndopts, 0, sizeof(*ndopts));
|
2001-02-21 20:23:09 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
olen = nd_opt->nd_opt_len << 3;
|
|
|
|
if (olen == 0) {
|
|
|
|
/*
|
|
|
|
* Message validation requires that all included
|
|
|
|
* options have a length that is greater than zero.
|
|
|
|
*/
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(ndopts, 0, sizeof(*ndopts));
|
1999-06-28 10:36:47 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
ndopts->nd_opts_search = (struct nd_opt_hdr *)((char *)nd_opt + olen);
|
2001-02-21 20:23:09 +03:00
|
|
|
if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
|
|
|
|
/* option overruns the end of buffer, invalid */
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(ndopts, 0, sizeof(*ndopts));
|
2001-02-21 20:23:09 +03:00
|
|
|
return NULL;
|
|
|
|
} else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
|
|
|
|
/* reached the end of options chain */
|
1999-06-28 10:36:47 +04:00
|
|
|
ndopts->nd_opts_done = 1;
|
|
|
|
ndopts->nd_opts_search = NULL;
|
|
|
|
}
|
|
|
|
return nd_opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse multiple ND options.
|
|
|
|
* This function is much easier to use, for ND routines that do not need
|
|
|
|
* multiple options of the same type.
|
|
|
|
*/
|
|
|
|
int
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_options(union nd_opts *ndopts)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
|
|
|
struct nd_opt_hdr *nd_opt;
|
|
|
|
int i = 0;
|
|
|
|
|
2015-06-30 11:31:42 +03:00
|
|
|
KASSERT(ndopts != NULL);
|
|
|
|
KASSERT(ndopts->nd_opts_last != NULL);
|
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
if (ndopts->nd_opts_search == NULL)
|
1999-06-28 10:36:47 +04:00
|
|
|
return 0;
|
2006-03-06 02:47:08 +03:00
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
while (1) {
|
|
|
|
nd_opt = nd6_option(ndopts);
|
2006-03-06 02:47:08 +03:00
|
|
|
if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Message validation requires that all included
|
|
|
|
* options have a length that is greater than zero.
|
|
|
|
*/
|
2008-04-15 07:57:04 +04:00
|
|
|
ICMP6_STATINC(ICMP6_STAT_ND_BADOPT);
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(ndopts, 0, sizeof(*ndopts));
|
1999-06-28 10:36:47 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
if (nd_opt == NULL)
|
1999-06-28 10:36:47 +04:00
|
|
|
goto skip1;
|
|
|
|
|
|
|
|
switch (nd_opt->nd_opt_type) {
|
|
|
|
case ND_OPT_SOURCE_LINKADDR:
|
|
|
|
case ND_OPT_TARGET_LINKADDR:
|
|
|
|
case ND_OPT_MTU:
|
|
|
|
case ND_OPT_REDIRECTED_HEADER:
|
2018-03-06 13:57:00 +03:00
|
|
|
case ND_OPT_NONCE:
|
1999-06-28 10:36:47 +04:00
|
|
|
if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
|
2016-04-01 11:12:00 +03:00
|
|
|
nd6log(LOG_INFO,
|
2001-02-07 11:59:47 +03:00
|
|
|
"duplicated ND6 option found (type=%d)\n",
|
2016-04-01 11:12:00 +03:00
|
|
|
nd_opt->nd_opt_type);
|
1999-06-28 10:36:47 +04:00
|
|
|
/* XXX bark? */
|
|
|
|
} else {
|
|
|
|
ndopts->nd_opt_array[nd_opt->nd_opt_type]
|
|
|
|
= nd_opt;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ND_OPT_PREFIX_INFORMATION:
|
|
|
|
if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
|
|
|
|
ndopts->nd_opt_array[nd_opt->nd_opt_type]
|
|
|
|
= nd_opt;
|
|
|
|
}
|
|
|
|
ndopts->nd_opts_pi_end =
|
|
|
|
(struct nd_opt_prefix_info *)nd_opt;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* Unknown options must be silently ignored,
|
2006-11-24 22:46:58 +03:00
|
|
|
* to accommodate future extension to the protocol.
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2016-04-01 11:12:00 +03:00
|
|
|
nd6log(LOG_DEBUG,
|
1999-06-28 10:36:47 +04:00
|
|
|
"nd6_options: unsupported option %d - "
|
2016-04-01 11:12:00 +03:00
|
|
|
"option ignored\n", nd_opt->nd_opt_type);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
skip1:
|
|
|
|
i++;
|
1999-12-13 18:17:17 +03:00
|
|
|
if (i > nd6_maxndopt) {
|
2008-04-15 07:57:04 +04:00
|
|
|
ICMP6_STATINC(ICMP6_STAT_ND_TOOMANYOPT);
|
2016-04-01 11:12:00 +03:00
|
|
|
nd6log(LOG_INFO, "too many loop in nd opt\n");
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ndopts->nd_opts_done)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-06-27 12:41:08 +04:00
|
|
|
* ND6 timer routine to handle ND6 entries
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
|
|
|
void
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(struct llentry *ln, time_t xtick)
|
2003-06-27 12:41:08 +04:00
|
|
|
{
|
|
|
|
|
2015-12-07 09:19:13 +03:00
|
|
|
CTASSERT(sizeof(time_t) > sizeof(int));
|
2015-11-25 09:21:26 +03:00
|
|
|
LLE_WLOCK_ASSERT(ln);
|
2003-06-27 12:41:08 +04:00
|
|
|
|
2016-12-21 11:47:02 +03:00
|
|
|
KASSERT(xtick >= 0);
|
|
|
|
|
2018-03-06 10:24:01 +03:00
|
|
|
/*
|
|
|
|
* We have to take care of a reference leak which occurs if
|
|
|
|
* callout_reset overwrites a pending callout schedule. Unfortunately
|
|
|
|
* we don't have a mean to know the overwrite, so we need to know it
|
|
|
|
* using callout_stop. We need to call callout_pending first to exclude
|
|
|
|
* the case that the callout has never been scheduled.
|
|
|
|
*/
|
|
|
|
if (callout_pending(&ln->la_timer)) {
|
|
|
|
bool expired = callout_stop(&ln->la_timer);
|
|
|
|
if (!expired)
|
|
|
|
LLE_REMREF(ln);
|
|
|
|
}
|
|
|
|
|
2016-12-21 11:47:02 +03:00
|
|
|
ln->ln_expire = time_uptime + xtick / hz;
|
|
|
|
LLE_ADDREF(ln);
|
|
|
|
if (xtick > INT_MAX) {
|
|
|
|
ln->ln_ntick = xtick - INT_MAX;
|
|
|
|
callout_reset(&ln->ln_timer_ch, INT_MAX,
|
|
|
|
nd6_llinfo_timer, ln);
|
2003-06-27 12:41:08 +04:00
|
|
|
} else {
|
2016-12-21 11:47:02 +03:00
|
|
|
ln->ln_ntick = 0;
|
|
|
|
callout_reset(&ln->ln_timer_ch, xtick,
|
|
|
|
nd6_llinfo_timer, ln);
|
2003-06-27 12:41:08 +04:00
|
|
|
}
|
2015-11-25 09:21:26 +03:00
|
|
|
}
|
2003-06-27 12:41:08 +04:00
|
|
|
|
2015-11-18 08:16:22 +03:00
|
|
|
/*
|
|
|
|
* Gets source address of the first packet in hold queue
|
|
|
|
* and stores it in @src.
|
|
|
|
* Returns pointer to @src (if hold queue is not empty) or NULL.
|
|
|
|
*/
|
|
|
|
static struct in6_addr *
|
2015-11-25 09:21:26 +03:00
|
|
|
nd6_llinfo_get_holdsrc(struct llentry *ln, struct in6_addr *src)
|
2015-11-18 08:16:22 +03:00
|
|
|
{
|
|
|
|
struct ip6_hdr *hip6;
|
|
|
|
|
|
|
|
if (ln == NULL || ln->ln_hold == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* assuming every packet in ln_hold has the same IP header
|
|
|
|
*/
|
|
|
|
hip6 = mtod(ln->ln_hold, struct ip6_hdr *);
|
|
|
|
/* XXX pullup? */
|
|
|
|
if (sizeof(*hip6) < ln->ln_hold->m_len)
|
|
|
|
*src = hip6->ip6_src;
|
|
|
|
else
|
|
|
|
src = NULL;
|
|
|
|
|
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
2003-06-27 12:41:08 +04:00
|
|
|
static void
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_llinfo_timer(void *arg)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2015-11-25 09:21:26 +03:00
|
|
|
struct llentry *ln = arg;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct ifnet *ifp;
|
2003-06-27 12:41:08 +04:00
|
|
|
struct nd_ifinfo *ndi = NULL;
|
2015-11-18 05:51:11 +03:00
|
|
|
bool send_ns = false;
|
|
|
|
const struct in6_addr *daddr6 = NULL;
|
2002-06-03 06:09:37 +04:00
|
|
|
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
|
2018-03-06 10:24:01 +03:00
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
LLE_WLOCK(ln);
|
2016-12-21 11:47:02 +03:00
|
|
|
if ((ln->la_flags & LLE_LINKED) == 0)
|
|
|
|
goto out;
|
2003-06-27 12:41:08 +04:00
|
|
|
if (ln->ln_ntick > 0) {
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, ln->ln_ntick);
|
2015-11-25 09:21:26 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
ifp = ln->lle_tbl->llt_ifp;
|
2015-09-02 11:03:10 +03:00
|
|
|
KASSERT(ifp != NULL);
|
|
|
|
|
2003-06-27 12:41:08 +04:00
|
|
|
ndi = ND_IFINFO(ifp);
|
|
|
|
|
|
|
|
switch (ln->ln_state) {
|
|
|
|
case ND6_LLINFO_INCOMPLETE:
|
|
|
|
if (ln->ln_asked < nd6_mmaxtries) {
|
|
|
|
ln->ln_asked++;
|
2015-11-18 05:51:11 +03:00
|
|
|
send_ns = true;
|
2003-06-27 12:41:08 +04:00
|
|
|
} else {
|
|
|
|
struct mbuf *m = ln->ln_hold;
|
|
|
|
if (m) {
|
2006-03-24 22:24:38 +03:00
|
|
|
struct mbuf *m0;
|
|
|
|
|
2003-06-27 12:41:08 +04:00
|
|
|
/*
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
* assuming every packet in ln_hold has
|
|
|
|
* the same IP header
|
2003-06-27 12:41:08 +04:00
|
|
|
*/
|
2006-03-24 22:24:38 +03:00
|
|
|
m0 = m->m_nextpkt;
|
|
|
|
m->m_nextpkt = NULL;
|
|
|
|
ln->ln_hold = m0;
|
|
|
|
clear_llinfo_pqueue(ln);
|
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_free(ln, 0);
|
2003-06-27 12:41:08 +04:00
|
|
|
ln = NULL;
|
2016-12-21 07:08:47 +03:00
|
|
|
if (m != NULL) {
|
2015-11-19 06:02:10 +03:00
|
|
|
icmp6_error2(m, ICMP6_DST_UNREACH,
|
|
|
|
ICMP6_DST_UNREACH_ADDR, 0, ifp);
|
2016-12-21 07:08:47 +03:00
|
|
|
}
|
2003-06-27 12:41:08 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ND6_LLINFO_REACHABLE:
|
|
|
|
if (!ND6_LLINFO_PERMANENT(ln)) {
|
|
|
|
ln->ln_state = ND6_LLINFO_STALE;
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_gctimer * hz);
|
2003-06-27 12:41:08 +04:00
|
|
|
}
|
|
|
|
break;
|
2001-02-23 11:02:41 +03:00
|
|
|
|
2012-06-23 07:13:41 +04:00
|
|
|
case ND6_LLINFO_PURGE:
|
2003-06-27 12:41:08 +04:00
|
|
|
case ND6_LLINFO_STALE:
|
|
|
|
/* Garbage Collection(RFC 2461 5.3) */
|
|
|
|
if (!ND6_LLINFO_PERMANENT(ln)) {
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_free(ln, 1);
|
2003-06-27 12:41:08 +04:00
|
|
|
ln = NULL;
|
|
|
|
}
|
|
|
|
break;
|
2001-02-23 11:02:41 +03:00
|
|
|
|
2003-06-27 12:41:08 +04:00
|
|
|
case ND6_LLINFO_DELAY:
|
|
|
|
if (ndi && (ndi->flags & ND6_IFF_PERFORMNUD) != 0) {
|
|
|
|
/* We need NUD */
|
|
|
|
ln->ln_asked = 1;
|
|
|
|
ln->ln_state = ND6_LLINFO_PROBE;
|
2016-04-04 10:37:07 +03:00
|
|
|
daddr6 = &ln->r_l3addr.addr6;
|
2015-11-18 05:51:11 +03:00
|
|
|
send_ns = true;
|
2003-06-27 12:41:08 +04:00
|
|
|
} else {
|
|
|
|
ln->ln_state = ND6_LLINFO_STALE; /* XXX */
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_gctimer * hz);
|
2003-06-27 12:41:08 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ND6_LLINFO_PROBE:
|
|
|
|
if (ln->ln_asked < nd6_umaxtries) {
|
|
|
|
ln->ln_asked++;
|
2016-04-04 10:37:07 +03:00
|
|
|
daddr6 = &ln->r_l3addr.addr6;
|
2015-11-18 05:51:11 +03:00
|
|
|
send_ns = true;
|
2003-06-27 12:41:08 +04:00
|
|
|
} else {
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_free(ln, 0);
|
2003-06-27 12:41:08 +04:00
|
|
|
ln = NULL;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2003-06-27 12:41:08 +04:00
|
|
|
break;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2002-06-03 06:09:37 +04:00
|
|
|
|
2015-11-18 05:51:11 +03:00
|
|
|
if (send_ns) {
|
2015-11-18 08:16:22 +03:00
|
|
|
struct in6_addr src, *psrc;
|
2016-04-04 10:37:07 +03:00
|
|
|
const struct in6_addr *taddr6 = &ln->r_l3addr.addr6;
|
2015-11-18 08:16:22 +03:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, ndi->retrans * hz / 1000);
|
2015-11-18 08:16:22 +03:00
|
|
|
psrc = nd6_llinfo_get_holdsrc(ln, &src);
|
2015-11-25 09:21:26 +03:00
|
|
|
LLE_FREE_LOCKED(ln);
|
|
|
|
ln = NULL;
|
2018-03-06 13:57:00 +03:00
|
|
|
nd6_ns_output(ifp, daddr6, taddr6, psrc, NULL);
|
2015-11-18 05:51:11 +03:00
|
|
|
}
|
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
out:
|
|
|
|
if (ln != NULL)
|
|
|
|
LLE_FREE_LOCKED(ln);
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
|
2003-06-27 12:41:08 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ND6 timer routine to expire default route list and prefix list
|
|
|
|
*/
|
2016-04-01 08:11:38 +03:00
|
|
|
static void
|
2016-07-11 10:37:00 +03:00
|
|
|
nd6_timer_work(struct work *wk, void *arg)
|
2003-06-27 12:41:08 +04:00
|
|
|
{
|
2006-11-20 07:34:16 +03:00
|
|
|
struct nd_defrouter *next_dr, *dr;
|
|
|
|
struct nd_prefix *next_pr, *pr;
|
2003-06-27 12:41:08 +04:00
|
|
|
struct in6_ifaddr *ia6, *nia6;
|
2016-08-01 06:15:30 +03:00
|
|
|
int s, bound;
|
|
|
|
struct psref psref;
|
2003-06-27 12:41:08 +04:00
|
|
|
|
|
|
|
callout_reset(&nd6_timer_ch, nd6_prune * hz,
|
|
|
|
nd6_timer, NULL);
|
|
|
|
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
|
2008-04-24 15:38:36 +04:00
|
|
|
|
2001-03-08 13:49:32 +03:00
|
|
|
/* expire default router list */
|
2016-12-19 10:51:34 +03:00
|
|
|
|
|
|
|
ND6_WLOCK();
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH_SAFE(dr, next_dr) {
|
2015-08-07 11:11:33 +03:00
|
|
|
if (dr->expire && dr->expire < time_uptime) {
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrtrlist_del(dr, NULL);
|
2000-07-06 16:36:18 +04:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
/*
|
|
|
|
* expire interface addresses.
|
|
|
|
* in the past the loop was inside prefix expiry processing.
|
|
|
|
* However, from a stricter speci-confrmance standpoint, we should
|
|
|
|
* rather separate address lifetimes and prefix lifetimes.
|
|
|
|
*/
|
2016-08-01 06:15:30 +03:00
|
|
|
bound = curlwp_bind();
|
2006-03-06 02:47:08 +03:00
|
|
|
addrloop:
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
|
|
|
for (ia6 = IN6_ADDRLIST_READER_FIRST(); ia6; ia6 = nia6) {
|
|
|
|
nia6 = IN6_ADDRLIST_READER_NEXT(ia6);
|
|
|
|
|
|
|
|
ia6_acquire(ia6, &psref);
|
|
|
|
pserialize_read_exit(s);
|
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
/* check address lifetime */
|
|
|
|
if (IFA6_IS_INVALID(ia6)) {
|
2006-03-06 02:47:08 +03:00
|
|
|
int regen = 0;
|
2017-12-15 07:03:46 +03:00
|
|
|
struct ifnet *ifp;
|
2006-03-06 02:47:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the expiring address is temporary, try
|
|
|
|
* regenerating a new one. This would be useful when
|
|
|
|
* we suspended a laptop PC, then turned it on after a
|
|
|
|
* period that could invalidate all temporary
|
|
|
|
* addresses. Although we may have to restart the
|
|
|
|
* loop (see below), it must be after purging the
|
|
|
|
* address. Otherwise, we'd see an infinite loop of
|
|
|
|
* regeneration.
|
|
|
|
*/
|
|
|
|
if (ip6_use_tempaddr &&
|
|
|
|
(ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
|
2017-12-15 07:03:46 +03:00
|
|
|
IFNET_LOCK(ia6->ia_ifa.ifa_ifp);
|
2006-03-06 02:47:08 +03:00
|
|
|
if (regen_tmpaddr(ia6) == 0)
|
|
|
|
regen = 1;
|
2017-12-15 07:03:46 +03:00
|
|
|
IFNET_UNLOCK(ia6->ia_ifa.ifa_ifp);
|
2006-03-06 02:47:08 +03:00
|
|
|
}
|
|
|
|
|
2017-12-15 07:03:46 +03:00
|
|
|
ifp = ia6->ia_ifa.ifa_ifp;
|
|
|
|
IFNET_LOCK(ifp);
|
|
|
|
/*
|
|
|
|
* Need to take the lock first to prevent if_detach
|
|
|
|
* from running in6_purgeaddr concurrently.
|
|
|
|
*/
|
|
|
|
if (!if_is_deactivated(ifp)) {
|
|
|
|
ia6_release(ia6, &psref);
|
|
|
|
in6_purgeaddr(&ia6->ia_ifa);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* ifp is being destroyed, ia6 will be destroyed
|
|
|
|
* by if_detach.
|
|
|
|
*/
|
|
|
|
ia6_release(ia6, &psref);
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
ia6 = NULL;
|
2017-12-15 07:03:46 +03:00
|
|
|
IFNET_UNLOCK(ifp);
|
2006-03-06 02:47:08 +03:00
|
|
|
|
|
|
|
if (regen)
|
|
|
|
goto addrloop; /* XXX: see below */
|
|
|
|
} else if (IFA6_IS_DEPRECATED(ia6)) {
|
|
|
|
int oldflags = ia6->ia6_flags;
|
|
|
|
|
2013-05-21 12:37:27 +04:00
|
|
|
if ((oldflags & IN6_IFF_DEPRECATED) == 0) {
|
|
|
|
ia6->ia6_flags |= IN6_IFF_DEPRECATED;
|
2015-02-25 03:26:58 +03:00
|
|
|
rt_newaddrmsg(RTM_NEWADDR,
|
|
|
|
(struct ifaddr *)ia6, 0, NULL);
|
2013-05-21 12:37:27 +04:00
|
|
|
}
|
2006-03-06 02:47:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a temporary address has just become deprecated,
|
|
|
|
* regenerate a new one if possible.
|
|
|
|
*/
|
|
|
|
if (ip6_use_tempaddr &&
|
|
|
|
(ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
|
|
|
|
(oldflags & IN6_IFF_DEPRECATED) == 0) {
|
|
|
|
|
|
|
|
if (regen_tmpaddr(ia6) == 0) {
|
|
|
|
/*
|
|
|
|
* A new temporary address is
|
|
|
|
* generated.
|
|
|
|
* XXX: this means the address chain
|
|
|
|
* has changed while we are still in
|
|
|
|
* the loop. Although the change
|
|
|
|
* would not cause disaster (because
|
|
|
|
* it's not a deletion, but an
|
|
|
|
* addition,) we'd rather restart the
|
|
|
|
* loop just for safety. Or does this
|
|
|
|
* significantly reduce performance??
|
|
|
|
*/
|
2016-08-01 06:15:30 +03:00
|
|
|
ia6_release(ia6, &psref);
|
2006-03-06 02:47:08 +03:00
|
|
|
goto addrloop;
|
|
|
|
}
|
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* A new RA might have made a deprecated address
|
|
|
|
* preferred.
|
|
|
|
*/
|
2013-05-21 12:37:27 +04:00
|
|
|
if (ia6->ia6_flags & IN6_IFF_DEPRECATED) {
|
|
|
|
ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
|
2015-02-25 03:26:58 +03:00
|
|
|
rt_newaddrmsg(RTM_NEWADDR,
|
|
|
|
(struct ifaddr *)ia6, 0, NULL);
|
2013-05-21 12:37:27 +04:00
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
|
|
|
ia6_release(ia6, &psref);
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
|
|
|
curlwp_bindx(bound);
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
/* expire prefix list */
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH_SAFE(pr, next_pr) {
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* check prefix lifetime.
|
|
|
|
* since pltime is just for autoconf, pltime processing for
|
|
|
|
* prefix is not necessary.
|
|
|
|
*/
|
2002-06-09 01:22:29 +04:00
|
|
|
if (pr->ndpr_vltime != ND6_INFINITE_LIFETIME &&
|
2015-08-07 11:11:33 +03:00
|
|
|
time_uptime - pr->ndpr_lastupdate > pr->ndpr_vltime) {
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
2017-02-22 06:02:55 +03:00
|
|
|
* Just invalidate the prefix here. Removing it
|
|
|
|
* will be done when purging an associated address.
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2017-02-22 06:02:55 +03:00
|
|
|
KASSERT(pr->ndpr_refcnt > 0);
|
|
|
|
nd6_invalidate_prefix(pr);
|
2006-11-20 07:34:16 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2008-04-24 15:38:36 +04:00
|
|
|
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2016-07-11 10:37:00 +03:00
|
|
|
static void
|
|
|
|
nd6_timer(void *ignored_arg)
|
|
|
|
{
|
|
|
|
|
|
|
|
workqueue_enqueue(nd6_timer_wq, &nd6_timer_wk, NULL);
|
|
|
|
}
|
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
/* ia6: deprecated/invalidated temporary address */
|
2006-03-06 02:47:08 +03:00
|
|
|
static int
|
2016-07-05 09:32:18 +03:00
|
|
|
regen_tmpaddr(const struct in6_ifaddr *ia6)
|
2006-03-06 02:47:08 +03:00
|
|
|
{
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct in6_ifaddr *public_ifa6 = NULL;
|
2016-08-01 06:15:30 +03:00
|
|
|
int s;
|
2006-03-06 02:47:08 +03:00
|
|
|
|
|
|
|
ifp = ia6->ia_ifa.ifa_ifp;
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
2016-07-07 12:32:01 +03:00
|
|
|
IFADDR_READER_FOREACH(ifa, ifp) {
|
2006-03-06 02:47:08 +03:00
|
|
|
struct in6_ifaddr *it6;
|
|
|
|
|
|
|
|
if (ifa->ifa_addr->sa_family != AF_INET6)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
it6 = (struct in6_ifaddr *)ifa;
|
|
|
|
|
|
|
|
/* ignore no autoconf addresses. */
|
|
|
|
if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* ignore autoconf addresses with different prefixes. */
|
|
|
|
if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we are looking at an autoconf address with the same
|
|
|
|
* prefix as ours. If the address is temporary and is still
|
|
|
|
* preferred, do not create another one. It would be rare, but
|
|
|
|
* could happen, for example, when we resume a laptop PC after
|
|
|
|
* a long period.
|
|
|
|
*/
|
|
|
|
if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
|
|
|
|
!IFA6_IS_DEPRECATED(it6)) {
|
|
|
|
public_ifa6 = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a public autoconf address that has the same prefix
|
|
|
|
* as ours. If it is preferred, keep it. We can't break the
|
|
|
|
* loop here, because there may be a still-preferred temporary
|
|
|
|
* address with the prefix.
|
|
|
|
*/
|
|
|
|
if (!IFA6_IS_DEPRECATED(it6))
|
2016-07-05 07:25:23 +03:00
|
|
|
public_ifa6 = it6;
|
2006-03-06 02:47:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (public_ifa6 != NULL) {
|
|
|
|
int e;
|
2016-08-01 06:15:30 +03:00
|
|
|
struct psref psref;
|
2006-03-06 02:47:08 +03:00
|
|
|
|
2016-08-01 06:15:30 +03:00
|
|
|
ia6_acquire(public_ifa6, &psref);
|
|
|
|
pserialize_read_exit(s);
|
2006-03-06 02:47:08 +03:00
|
|
|
/*
|
|
|
|
* Random factor is introduced in the preferred lifetime, so
|
|
|
|
* we do not need additional delay (3rd arg to in6_tmpifadd).
|
|
|
|
*/
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
|
|
|
e = in6_tmpifadd(public_ifa6, 0, 0);
|
|
|
|
ND6_UNLOCK();
|
|
|
|
if (e != 0) {
|
2016-08-01 06:15:30 +03:00
|
|
|
ia6_release(public_ifa6, &psref);
|
2006-03-06 02:47:08 +03:00
|
|
|
log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
|
|
|
|
" tmp addr, errno=%d\n", e);
|
2007-03-16 02:35:25 +03:00
|
|
|
return -1;
|
2006-03-06 02:47:08 +03:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
ia6_release(public_ifa6, &psref);
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0;
|
2006-03-06 02:47:08 +03:00
|
|
|
}
|
2016-10-18 05:46:50 +03:00
|
|
|
pserialize_read_exit(s);
|
2006-03-06 02:47:08 +03:00
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
return -1;
|
2006-03-06 02:47:08 +03:00
|
|
|
}
|
|
|
|
|
2009-11-06 23:41:22 +03:00
|
|
|
bool
|
|
|
|
nd6_accepts_rtadv(const struct nd_ifinfo *ndi)
|
|
|
|
{
|
|
|
|
switch (ndi->flags & (ND6_IFF_ACCEPT_RTADV|ND6_IFF_OVERRIDE_RTADV)) {
|
|
|
|
case ND6_IFF_OVERRIDE_RTADV|ND6_IFF_ACCEPT_RTADV:
|
|
|
|
return true;
|
|
|
|
case ND6_IFF_ACCEPT_RTADV:
|
|
|
|
return ip6_accept_rtadv != 0;
|
|
|
|
case ND6_IFF_OVERRIDE_RTADV:
|
|
|
|
case 0:
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-02-04 17:34:22 +03:00
|
|
|
/*
|
|
|
|
* Nuke neighbor cache/prefix/default router management table, right before
|
|
|
|
* ifp goes away.
|
|
|
|
*/
|
|
|
|
void
|
2015-02-23 22:15:59 +03:00
|
|
|
nd6_purge(struct ifnet *ifp, struct in6_ifextra *ext)
|
2000-02-04 17:34:22 +03:00
|
|
|
{
|
2002-06-09 01:22:29 +04:00
|
|
|
struct nd_defrouter *dr, *ndr;
|
2000-02-04 17:34:22 +03:00
|
|
|
struct nd_prefix *pr, *npr;
|
|
|
|
|
2015-02-23 22:15:59 +03:00
|
|
|
/*
|
|
|
|
* During detach, the ND info might be already removed, but
|
|
|
|
* then is explitly passed as argument.
|
|
|
|
* Otherwise get it from ifp->if_afdata.
|
|
|
|
*/
|
|
|
|
if (ext == NULL)
|
|
|
|
ext = ifp->if_afdata[AF_INET6];
|
|
|
|
if (ext == NULL)
|
|
|
|
return;
|
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
2002-06-09 01:22:29 +04:00
|
|
|
/*
|
|
|
|
* Nuke default router list entries toward ifp.
|
|
|
|
* We defer removal of default router list entries that is installed
|
|
|
|
* in the routing table, in order to keep additional side effects as
|
|
|
|
* small as possible.
|
|
|
|
*/
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH_SAFE(dr, ndr) {
|
2002-06-09 01:22:29 +04:00
|
|
|
if (dr->installed)
|
|
|
|
continue;
|
|
|
|
|
2015-02-23 22:15:59 +03:00
|
|
|
if (dr->ifp == ifp) {
|
|
|
|
KASSERT(ext != NULL);
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrtrlist_del(dr, ext);
|
2015-02-23 22:15:59 +03:00
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2012-02-02 23:35:18 +04:00
|
|
|
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH_SAFE(dr, ndr) {
|
2002-06-09 01:22:29 +04:00
|
|
|
if (!dr->installed)
|
|
|
|
continue;
|
|
|
|
|
2015-02-23 22:15:59 +03:00
|
|
|
if (dr->ifp == ifp) {
|
|
|
|
KASSERT(ext != NULL);
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrtrlist_del(dr, ext);
|
2015-02-23 22:15:59 +03:00
|
|
|
}
|
2000-02-04 17:34:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Nuke prefix list entries toward ifp */
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH_SAFE(pr, npr) {
|
2000-02-04 17:34:22 +03:00
|
|
|
if (pr->ndpr_ifp == ifp) {
|
2005-04-03 15:02:27 +04:00
|
|
|
/*
|
2016-12-19 07:52:17 +03:00
|
|
|
* All addresses referencing pr should be already freed.
|
2002-06-09 01:22:29 +04:00
|
|
|
*/
|
2016-12-19 07:52:17 +03:00
|
|
|
KASSERT(pr->ndpr_refcnt == 0);
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_prelist_remove(pr);
|
2000-02-04 17:34:22 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cancel default outgoing interface setting */
|
|
|
|
if (nd6_defifindex == ifp->if_index)
|
|
|
|
nd6_setdefaultiface(0);
|
|
|
|
|
2009-07-26 03:12:09 +04:00
|
|
|
/* XXX: too restrictive? */
|
2012-02-03 07:32:45 +04:00
|
|
|
if (!ip6_forwarding && ifp->if_afdata[AF_INET6]) {
|
|
|
|
struct nd_ifinfo *ndi = ND_IFINFO(ifp);
|
|
|
|
if (ndi && nd6_accepts_rtadv(ndi)) {
|
|
|
|
/* refresh default router list */
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_select();
|
2012-02-03 07:32:45 +04:00
|
|
|
}
|
2001-06-27 21:36:14 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2000-02-04 17:34:22 +03:00
|
|
|
|
|
|
|
/*
|
2015-11-25 09:21:26 +03:00
|
|
|
* We may not need to nuke the neighbor cache entries here
|
|
|
|
* because the neighbor cache is kept in if_afdata[AF_INET6].
|
|
|
|
* nd6_purge() is invoked by in6_ifdetach() which is called
|
|
|
|
* from if_detach() where everything gets purged. However
|
|
|
|
* in6_ifdetach is directly called from vlan(4), so we still
|
|
|
|
* need to purge entries here.
|
2000-02-04 17:34:22 +03:00
|
|
|
*/
|
2015-11-25 09:21:26 +03:00
|
|
|
if (ext->lltable != NULL)
|
|
|
|
lltable_purge_entries(ext->lltable);
|
2000-02-04 17:34:22 +03:00
|
|
|
}
|
|
|
|
|
2016-12-19 06:32:54 +03:00
|
|
|
void
|
|
|
|
nd6_assert_purged(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct nd_defrouter *dr;
|
|
|
|
struct nd_prefix *pr;
|
2017-01-16 10:33:36 +03:00
|
|
|
char ip6buf[INET6_ADDRSTRLEN] __diagused;
|
2016-12-19 06:32:54 +03:00
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-19 06:32:54 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH(dr) {
|
|
|
|
KASSERTMSG(dr->ifp != ifp,
|
|
|
|
"defrouter %s remains on %s",
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &dr->rtaddr), ifp->if_xname);
|
2016-12-19 06:32:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ND_PREFIX_LIST_FOREACH(pr) {
|
|
|
|
KASSERTMSG(pr->ndpr_ifp != ifp,
|
|
|
|
"prefix %s/%d remains on %s",
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &pr->ndpr_prefix.sin6_addr),
|
2016-12-19 06:32:54 +03:00
|
|
|
pr->ndpr_plen, ifp->if_xname);
|
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2016-12-19 06:32:54 +03:00
|
|
|
}
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
struct llentry *
|
|
|
|
nd6_lookup(const struct in6_addr *addr6, const struct ifnet *ifp, bool wlock)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
|
|
|
struct sockaddr_in6 sin6;
|
2016-04-04 10:37:07 +03:00
|
|
|
struct llentry *ln;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2007-11-10 03:07:57 +03:00
|
|
|
sockaddr_in6_init(&sin6, addr6, 0, 0, 0);
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
IF_AFDATA_RLOCK(ifp);
|
|
|
|
ln = lla_lookup(LLTABLE6(ifp), wlock ? LLE_EXCLUSIVE : 0,
|
|
|
|
sin6tosa(&sin6));
|
|
|
|
IF_AFDATA_RUNLOCK(ifp);
|
|
|
|
|
|
|
|
return ln;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct llentry *
|
|
|
|
nd6_create(const struct in6_addr *addr6, const struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
struct llentry *ln;
|
2017-11-10 10:24:28 +03:00
|
|
|
struct rtentry *rt;
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
sockaddr_in6_init(&sin6, addr6, 0, 0, 0);
|
2017-11-10 10:24:28 +03:00
|
|
|
rt = rtalloc1(sin6tosa(&sin6), 0);
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
IF_AFDATA_WLOCK(ifp);
|
2017-11-10 10:24:28 +03:00
|
|
|
ln = lla_create(LLTABLE6(ifp), LLE_EXCLUSIVE, sin6tosa(&sin6), rt);
|
2016-04-04 10:37:07 +03:00
|
|
|
IF_AFDATA_WUNLOCK(ifp);
|
|
|
|
|
2017-11-10 10:24:28 +03:00
|
|
|
if (rt != NULL)
|
|
|
|
rt_unref(rt);
|
2016-04-04 10:37:07 +03:00
|
|
|
if (ln != NULL)
|
|
|
|
ln->ln_state = ND6_LLINFO_NOSTATE;
|
|
|
|
|
|
|
|
return ln;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether a given IPv6 address is a neighbor or not, ignoring
|
|
|
|
* the actual neighbor cache. The neighbor cache is ignored in order
|
|
|
|
* to not reenter the routing code from within itself.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nd6_is_new_addr_neighbor(const struct sockaddr_in6 *addr, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct nd_prefix *pr;
|
|
|
|
struct ifaddr *dstaddr;
|
2016-08-01 06:15:30 +03:00
|
|
|
int s;
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A link-local address is always a neighbor.
|
|
|
|
* XXX: a link does not necessarily specify a single interface.
|
|
|
|
*/
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
|
|
|
|
struct sockaddr_in6 sin6_copy;
|
|
|
|
u_int32_t zone;
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
2016-04-04 10:37:07 +03:00
|
|
|
* We need sin6_copy since sa6_recoverscope() may modify the
|
|
|
|
* content (XXX).
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
sin6_copy = *addr;
|
|
|
|
if (sa6_recoverscope(&sin6_copy))
|
|
|
|
return 0; /* XXX: should be impossible */
|
|
|
|
if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
|
|
|
|
return 0;
|
|
|
|
if (sin6_copy.sin6_scope_id == zone)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2000-04-16 19:00:56 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
/*
|
|
|
|
* If the address matches one of our addresses,
|
|
|
|
* it should be a neighbor.
|
|
|
|
* If the address matches one of our on-link prefixes, it should be a
|
|
|
|
* neighbor.
|
|
|
|
*/
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH(pr) {
|
2016-04-04 10:37:07 +03:00
|
|
|
if (pr->ndpr_ifp != ifp)
|
|
|
|
continue;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
|
|
|
|
struct rtentry *rt;
|
|
|
|
|
2016-07-15 10:40:09 +03:00
|
|
|
rt = rtalloc1(sin6tosa(&pr->ndpr_prefix), 0);
|
2016-04-04 10:37:07 +03:00
|
|
|
if (rt == NULL)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* This is the case where multiple interfaces
|
|
|
|
* have the same prefix, but only one is installed
|
|
|
|
* into the routing table and that prefix entry
|
|
|
|
* is not the one being examined here. In the case
|
|
|
|
* where RADIX_MPATH is enabled, multiple route
|
|
|
|
* entries (of the same rt_key value) will be
|
|
|
|
* installed because the interface addresses all
|
|
|
|
* differ.
|
|
|
|
*/
|
|
|
|
if (!IN6_ARE_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
|
|
|
|
&satocsin6(rt_getkey(rt))->sin6_addr)) {
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
2016-04-04 10:37:07 +03:00
|
|
|
continue;
|
|
|
|
}
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
2007-03-16 02:35:25 +03:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
|
2016-12-19 10:51:34 +03:00
|
|
|
&addr->sin6_addr, &pr->ndpr_mask)) {
|
|
|
|
ND6_UNLOCK();
|
2016-04-04 10:37:07 +03:00
|
|
|
return 1;
|
2016-12-19 10:51:34 +03:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2014-01-15 14:25:04 +04:00
|
|
|
|
|
|
|
/*
|
2016-04-04 10:37:07 +03:00
|
|
|
* If the address is assigned on the node of the other side of
|
|
|
|
* a p2p interface, the address should be a neighbor.
|
2014-01-15 14:25:04 +04:00
|
|
|
*/
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
2016-07-15 10:40:09 +03:00
|
|
|
dstaddr = ifa_ifwithdstaddr(sin6tocsa(addr));
|
2016-04-04 10:37:07 +03:00
|
|
|
if (dstaddr != NULL) {
|
|
|
|
if (dstaddr->ifa_ifp == ifp) {
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
2016-04-04 10:37:07 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
2014-01-15 14:25:04 +04:00
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
/*
|
2016-04-04 10:37:07 +03:00
|
|
|
* If the default router list is empty, all addresses are regarded
|
|
|
|
* as on-link, and thus, as a neighbor.
|
1999-12-13 18:17:17 +03:00
|
|
|
*/
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-04-04 10:37:07 +03:00
|
|
|
if (ND_IFINFO(ifp)->flags & ND6_IFF_ACCEPT_RTADV &&
|
2016-12-19 10:51:34 +03:00
|
|
|
ND_DEFROUTER_LIST_EMPTY() && nd6_defifindex == ifp->if_index) {
|
|
|
|
ND6_UNLOCK();
|
2016-04-04 10:37:07 +03:00
|
|
|
return 1;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2014-01-15 14:25:04 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
return 0;
|
2014-01-15 14:25:04 +04:00
|
|
|
}
|
|
|
|
|
1999-07-06 16:23:19 +04:00
|
|
|
/*
|
|
|
|
* Detect if a given IPv6 address identifies a neighbor on a given link.
|
2000-04-16 19:00:56 +04:00
|
|
|
* XXX: should take care of the destination of a p2p link?
|
1999-07-06 16:23:19 +04:00
|
|
|
*/
|
|
|
|
int
|
KNF: de-__P, bzero -> memset, bcmp -> memcmp. Remove extraneous
parentheses in return statements.
Cosmetic: don't open-code TAILQ_FOREACH().
Cosmetic: change types of variables to avoid oodles of casts: in
in6_src.c, avoid casts by changing several route_in6 pointers
to struct route pointers. Remove unnecessary casts to caddr_t
elsewhere.
Pave the way for eliminating address family-specific route caches:
soon, struct route will not embed a sockaddr, but it will hold
a reference to an external sockaddr, instead. We will set the
destination sockaddr using rtcache_setdst(). (I created a stub
for it, but it isn't used anywhere, yet.) rtcache_free() will
free the sockaddr. I have extracted from rtcache_free() a helper
subroutine, rtcache_clear(). rtcache_clear() will "forget" a
cached route, but it will not forget the destination by releasing
the sockaddr. I use rtcache_clear() instead of rtcache_free()
in rtcache_update(), because rtcache_update() is not supposed
to forget the destination.
Constify:
1 Introduce const accessor for route->ro_dst, rtcache_getdst().
2 Constify the 'dst' argument to ifnet->if_output(). This
led me to constify a lot of code called by output routines.
3 Constify the sockaddr argument to protosw->pr_ctlinput. This
led me to constify a lot of code called by ctlinput routines.
4 Introduce const macros for converting from a generic sockaddr
to family-specific sockaddrs, e.g., sockaddr_in: satocsin6,
satocsin, et cetera.
2007-02-18 01:34:07 +03:00
|
|
|
nd6_is_addr_neighbor(const struct sockaddr_in6 *addr, struct ifnet *ifp)
|
1999-07-06 16:23:19 +04:00
|
|
|
{
|
2002-06-09 01:22:29 +04:00
|
|
|
struct nd_prefix *pr;
|
2016-04-04 10:37:07 +03:00
|
|
|
struct llentry *ln;
|
2015-07-17 05:21:08 +03:00
|
|
|
struct rtentry *rt;
|
1999-07-06 16:23:19 +04:00
|
|
|
|
2000-05-09 15:51:12 +04:00
|
|
|
/*
|
|
|
|
* A link-local address is always a neighbor.
|
2001-10-16 10:24:44 +04:00
|
|
|
* XXX: a link does not necessarily specify a single interface.
|
2000-05-09 15:51:12 +04:00
|
|
|
*/
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
|
|
|
|
struct sockaddr_in6 sin6_copy;
|
|
|
|
u_int32_t zone;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need sin6_copy since sa6_recoverscope() may modify the
|
|
|
|
* content (XXX).
|
|
|
|
*/
|
|
|
|
sin6_copy = *addr;
|
|
|
|
if (sa6_recoverscope(&sin6_copy))
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0; /* XXX: should be impossible */
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (sin6_copy.sin6_scope_id == zone)
|
2007-03-16 02:35:25 +03:00
|
|
|
return 1;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
else
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
}
|
1999-07-06 16:23:19 +04:00
|
|
|
|
|
|
|
/*
|
2002-06-09 01:22:29 +04:00
|
|
|
* If the address matches one of our on-link prefixes, it should be a
|
|
|
|
* neighbor.
|
1999-07-06 16:23:19 +04:00
|
|
|
*/
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH(pr) {
|
2002-06-09 01:22:29 +04:00
|
|
|
if (pr->ndpr_ifp != ifp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!(pr->ndpr_stateflags & NDPRF_ONLINK))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
|
2016-12-19 10:51:34 +03:00
|
|
|
&addr->sin6_addr, &pr->ndpr_mask)) {
|
|
|
|
ND6_UNLOCK();
|
2007-03-16 02:35:25 +03:00
|
|
|
return 1;
|
2016-12-19 10:51:34 +03:00
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the default router list is empty, all addresses are regarded
|
|
|
|
* as on-link, and thus, as a neighbor.
|
|
|
|
* XXX: we restrict the condition to hosts, because routers usually do
|
|
|
|
* not have the "default router list".
|
|
|
|
*/
|
2016-12-12 06:13:14 +03:00
|
|
|
if (!ip6_forwarding && ND_DEFROUTER_LIST_EMPTY() &&
|
2002-06-09 01:22:29 +04:00
|
|
|
nd6_defifindex == ifp->if_index) {
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2007-03-16 02:35:25 +03:00
|
|
|
return 1;
|
1999-07-06 16:23:19 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-07-06 16:23:19 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
if (nd6_is_new_addr_neighbor(addr, ifp))
|
|
|
|
return 1;
|
|
|
|
|
1999-07-06 16:23:19 +04:00
|
|
|
/*
|
2016-04-04 10:37:07 +03:00
|
|
|
* Even if the address matches none of our addresses, it might be
|
|
|
|
* in the neighbor cache or a connected route.
|
1999-07-06 16:23:19 +04:00
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_lookup(&addr->sin6_addr, ifp, false);
|
|
|
|
if (ln != NULL) {
|
|
|
|
LLE_RUNLOCK(ln);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rt = rtalloc1(sin6tocsa(addr), 0);
|
|
|
|
if (rt == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((rt->rt_flags & RTF_CONNECTED) && (rt->rt_ifp == ifp
|
|
|
|
#if NBRIDGE > 0
|
|
|
|
|| rt->rt_ifp->if_bridge == ifp->if_bridge
|
|
|
|
#endif
|
|
|
|
#if NCARP > 0
|
|
|
|
|| (ifp->if_type == IFT_CARP && rt->rt_ifp == ifp->if_carpdev) ||
|
|
|
|
(rt->rt_ifp->if_type == IFT_CARP && rt->rt_ifp->if_carpdev == ifp)||
|
|
|
|
(ifp->if_type == IFT_CARP && rt->rt_ifp->if_type == IFT_CARP &&
|
|
|
|
rt->rt_ifp->if_carpdev == ifp->if_carpdev)
|
|
|
|
#endif
|
|
|
|
)) {
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
2007-03-16 02:35:25 +03:00
|
|
|
return 1;
|
2015-07-17 05:21:08 +03:00
|
|
|
}
|
Make the routing table and rtcaches MP-safe
See the following descriptions for details.
Proposed on tech-kern and tech-net
Overview
--------
We protect the routing table with a rwock and protect
rtcaches with another rwlock. Each rtentry is protected
from being freed or updated via reference counting and psref.
Global rwlocks
--------------
There are two rwlocks; one for the routing table (rt_lock) and
the other for rtcaches (rtcache_lock). rtcache_lock covers
all existing rtcaches; there may have room for optimizations
(future work).
The locking order is rtcache_lock first and rt_lock is next.
rtentry references
------------------
References to an rtentry is managed with reference counting
and psref. Either of the two mechanisms is used depending on
where a rtentry is obtained. Reference counting is used when
we obtain a rtentry from the routing table directly via
rtalloc1 and rtrequest{,1} while psref is used when we obtain
a rtentry from a rtcache via rtcache_* APIs. In both cases,
a caller can sleep/block with holding an obtained rtentry.
The reasons why we use two different mechanisms are (i) only
using reference counting hurts the performance due to atomic
instructions (rtcache case) (ii) ease of implementation;
applying psref to APIs such rtaloc1 and rtrequest{,1} requires
additional works (adding a local variable and an argument).
We will finally migrate to use only psref but we can do it
when we have a lockless routing table alternative.
Reference counting for rtentry
------------------------------
rt_refcnt now doesn't count permanent references such as for
rt_timers and rtcaches, instead it is used only for temporal
references when obtaining a rtentry via rtalloc1 and rtrequest{,1}.
We can do so because destroying a rtentry always involves
removing references of rt_timers and rtcaches to the rtentry
and we don't need to track such references. This also makes
it easy to wait for readers to release references on deleting
or updating a rtentry, i.e., we can simply wait until the
reference counter is 0 or 1. (If there are permanent references
the counter can be arbitrary.)
rt_ref increments a reference counter of a rtentry and rt_unref
decrements it. rt_ref is called inside APIs (rtalloc1 and
rtrequest{,1} so users don't need to care about it while
users must call rt_unref to an obtained rtentry after using it.
rtfree is removed and we use rt_unref and rt_free instead.
rt_unref now just decrements the counter of a given rtentry
and rt_free just tries to destroy a given rtentry.
See the next section for destructions of rtentries by rt_free.
Destructions of rtentries
-------------------------
We destroy a rtentry only when we call rtrequst{,1}(RTM_DELETE);
the original implementation can destroy in any rtfree where it's
the last reference. If we use reference counting or psref, it's
easy to understand if the place that a rtentry is destroyed is
fixed.
rt_free waits for references to a given rtentry to be released
before actually destroying the rtentry. rt_free uses a condition
variable (cv_wait) (and psref_target_destroy for psref) to wait.
Unfortunately rtrequst{,1}(RTM_DELETE) can be called in softint
that we cannot use cv_wait. In that case, we have to defer the
destruction to a workqueue.
rtentry#rt_cv, rtentry#rt_psref and global variables
(see rt_free_global) are added to conduct the procedure.
Updates of rtentries
--------------------
One difficulty to use refcnt/psref instead of rwlock for rtentry
is updates of rtentries. We need an additional mechanism to
prevent readers from seeing inconsistency of a rtentry being
updated.
We introduce RTF_UPDATING flag to rtentries that are updating.
While the flag is set to a rtentry, users cannot acquire the
rtentry. By doing so, we avoid users to see inconsistent
rtentries.
There are two options when a user tries to acquire a rtentry
with the RTF_UPDATING flag; if a user runs in softint context
the user fails to acquire a rtentry (NULL is returned).
Otherwise a user waits until the update completes by waiting
on cv.
The procedure of a updater is simpler to destruction of
a rtentry. Wait on cv (and psref) and after all readers left,
proceed with the update.
Global variables (see rt_update_global) are added to conduct
the procedure.
Currently we apply the mechanism to only RTM_CHANGE in
rtsock.c. We would have to apply other codes. See
"Known issues" section.
psref for rtentry
-----------------
When we obtain a rtentry from a rtcache via rtcache_* APIs,
psref is used to reference to the rtentry.
rtcache_ref acquires a reference to a rtentry with psref
and rtcache_unref releases the reference after using it.
rtcache_ref is called inside rtcache_* APIs and users don't
need to take care of it while users must call rtcache_unref
to release the reference.
struct psref and int bound that is needed for psref is
embedded into struct route. By doing so we don't need to
add local variables and additional argument to APIs.
However this adds another constraint to psref other than
reference counting one's; holding a reference of an rtentry
via a rtcache is allowed by just one caller at the same time.
So we must not acquire a rtentry via a rtcache twice and
avoid a recursive use of a rtcache. And also a rtcache must
be arranged to be used by a LWP/softint at the same time
somehow. For IP forwarding case, we have per-CPU rtcaches
used in softint so the constraint is guaranteed. For a h
rtcache of a PCB case, the constraint is guaranteed by the
solock of each PCB. Any other cases (pf, ipf, stf and ipsec)
are currently guaranteed by only the existence of the global
locks (softnet_lock and/or KERNEL_LOCK). If we've found the
cases that we cannot guarantee the constraint, we would need
to introduce other rtcache APIs that use simple reference
counting.
psref of rtcache is created with IPL_SOFTNET and so rtcache
shouldn't used at an IPL higher than IPL_SOFTNET.
Note that rtcache_free is used to invalidate a given rtcache.
We don't need another care by my change; just keep them as
they are.
Performance impact
------------------
When NET_MPSAFE is disabled the performance drop is 3% while
when it's enabled the drop is increased to 11%. The difference
comes from that currently we don't take any global locks and
don't use psref if NET_MPSAFE is disabled.
We can optimize the performance of the case of NET_MPSAFE
on by reducing lookups of rtcache that uses psref;
currently we do two lookups but we should be able to trim
one of two. This is a future work.
Known issues
------------
There are two known issues to be solved; one is that
a caller of rtrequest(RTM_ADD) may change rtentry (see rtinit).
We need to prevent new references during the update. Or
we may be able to remove the code (perhaps, need more
investigations).
The other is rtredirect that updates a rtentry. We need
to apply our update mechanism, however it's not easy because
rtredirect is called in softint and we cannot apply our
mechanism simply. One solution is to defer rtredirect to
a workqueue but it requires some code restructuring.
2016-12-12 06:55:57 +03:00
|
|
|
rt_unref(rt);
|
1999-07-06 16:23:19 +04:00
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0;
|
1999-07-06 16:23:19 +04:00
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Free an nd6 llinfo entry.
|
2001-10-17 14:55:09 +04:00
|
|
|
* Since the function would cause significant changes in the kernel, DO NOT
|
|
|
|
* make it global, unless you have a strong reason for the change, and are sure
|
|
|
|
* that the change is safe.
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2015-11-25 09:21:26 +03:00
|
|
|
static void
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_free(struct llentry *ln, int gc)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
1999-12-13 18:17:17 +03:00
|
|
|
struct nd_defrouter *dr;
|
2016-04-04 10:37:07 +03:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct in6_addr *in6;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
KASSERT(ln != NULL);
|
|
|
|
LLE_WLOCK_ASSERT(ln);
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
ifp = ln->lle_tbl->llt_ifp;
|
|
|
|
in6 = &ln->r_l3addr.addr6;
|
2000-02-26 11:39:18 +03:00
|
|
|
/*
|
2001-10-17 14:55:09 +04:00
|
|
|
* we used to have pfctlinput(PRC_HOSTDEAD) here.
|
|
|
|
* even though it is not harmful, it was not really necessary.
|
2000-02-26 11:39:18 +03:00
|
|
|
*/
|
|
|
|
|
2003-06-24 11:54:47 +04:00
|
|
|
if (!ip6_forwarding) {
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
2016-12-11 10:38:50 +03:00
|
|
|
dr = nd6_defrouter_lookup(in6, ifp);
|
2001-10-17 14:55:09 +04:00
|
|
|
|
|
|
|
if (dr != NULL && dr->expire &&
|
|
|
|
ln->ln_state == ND6_LLINFO_STALE && gc) {
|
|
|
|
/*
|
|
|
|
* If the reason for the deletion is just garbage
|
|
|
|
* collection, and the neighbor is an active default
|
|
|
|
* router, do not delete it. Instead, reset the GC
|
|
|
|
* timer using the router's lifetime.
|
|
|
|
* Simply deleting the entry would affect default
|
|
|
|
* router selection, which is not necessarily a good
|
|
|
|
* thing, especially when we're using router preference
|
|
|
|
* values.
|
|
|
|
* XXX: the check for ln_state would be redundant,
|
|
|
|
* but we intentionally keep it just in case.
|
|
|
|
*/
|
2015-08-07 11:11:33 +03:00
|
|
|
if (dr->expire > time_uptime)
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln,
|
2015-08-07 11:11:33 +03:00
|
|
|
(dr->expire - time_uptime) * hz);
|
2003-06-27 12:41:08 +04:00
|
|
|
else
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_gctimer * hz);
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2015-12-18 12:04:33 +03:00
|
|
|
LLE_WUNLOCK(ln);
|
2015-11-25 09:21:26 +03:00
|
|
|
return;
|
2001-10-17 14:55:09 +04:00
|
|
|
}
|
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
if (ln->ln_router || dr) {
|
2016-04-10 11:15:52 +03:00
|
|
|
/*
|
2016-12-11 10:38:50 +03:00
|
|
|
* We need to unlock to avoid a LOR with nd6_rt_flush()
|
2016-04-10 11:15:52 +03:00
|
|
|
* with the rnh and for the calls to
|
2016-12-11 10:38:50 +03:00
|
|
|
* nd6_pfxlist_onlink_check() and nd6_defrouter_select() in the
|
2016-04-10 11:15:52 +03:00
|
|
|
* block further down for calls into nd6_lookup().
|
|
|
|
* We still hold a ref.
|
|
|
|
*/
|
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
2016-12-11 10:38:50 +03:00
|
|
|
* nd6_rt_flush must be called whether or not the neighbor
|
1999-12-13 18:17:17 +03:00
|
|
|
* is in the Default Router List.
|
|
|
|
* See a corresponding comment in nd6_na_input().
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_rt_flush(in6, ifp);
|
1999-12-13 18:17:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dr) {
|
|
|
|
/*
|
|
|
|
* Unreachablity of a router might affect the default
|
|
|
|
* router selection and on-link detection of advertised
|
|
|
|
* prefixes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Temporarily fake the state to choose a new default
|
|
|
|
* router and to perform on-link determination of
|
2001-10-17 14:55:09 +04:00
|
|
|
* prefixes correctly.
|
1999-12-13 18:17:17 +03:00
|
|
|
* Below the state will be set correctly,
|
|
|
|
* or the entry itself will be deleted.
|
|
|
|
*/
|
|
|
|
ln->ln_state = ND6_LLINFO_INCOMPLETE;
|
|
|
|
|
2001-10-17 14:55:09 +04:00
|
|
|
/*
|
2016-12-11 10:38:50 +03:00
|
|
|
* Since nd6_defrouter_select() does not affect the
|
2001-10-17 14:55:09 +04:00
|
|
|
* on-link determination and MIP6 needs the check
|
|
|
|
* before the default router selection, we perform
|
|
|
|
* the check now.
|
|
|
|
*/
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_pfxlist_onlink_check();
|
2001-10-17 14:55:09 +04:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
/*
|
|
|
|
* refresh default router list
|
|
|
|
*/
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_select();
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-04-10 11:15:52 +03:00
|
|
|
|
|
|
|
#ifdef __FreeBSD__
|
|
|
|
/*
|
|
|
|
* If this entry was added by an on-link redirect, remove the
|
|
|
|
* corresponding host route.
|
|
|
|
*/
|
|
|
|
if (ln->la_flags & LLE_REDIRECT)
|
|
|
|
nd6_free_redirect(ln);
|
|
|
|
#endif
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2016-04-10 11:15:52 +03:00
|
|
|
|
|
|
|
if (ln->ln_router || dr)
|
|
|
|
LLE_WLOCK(ln);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
1999-12-13 18:17:17 +03:00
|
|
|
|
2001-02-08 15:57:54 +03:00
|
|
|
/*
|
2016-04-04 10:37:07 +03:00
|
|
|
* Save to unlock. We still hold an extra reference and will not
|
|
|
|
* free(9) in llentry_free() if someone else holds one as well.
|
2001-02-08 15:57:54 +03:00
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
IF_AFDATA_LOCK(ifp);
|
|
|
|
LLE_WLOCK(ln);
|
|
|
|
|
2016-12-21 11:47:02 +03:00
|
|
|
lltable_free_entry(LLTABLE6(ifp), ln);
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Upper-layer reachability hint for Neighbor Unreachability Detection.
|
|
|
|
*
|
2006-03-03 17:07:06 +03:00
|
|
|
* XXX cost-effective methods?
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
|
|
|
void
|
2015-09-01 11:46:27 +03:00
|
|
|
nd6_nud_hint(struct rtentry *rt)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2015-11-25 09:21:26 +03:00
|
|
|
struct llentry *ln;
|
2016-04-04 10:37:07 +03:00
|
|
|
struct ifnet *ifp;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2015-07-15 12:20:18 +03:00
|
|
|
if (rt == NULL)
|
|
|
|
return;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
ifp = rt->rt_ifp;
|
|
|
|
ln = nd6_lookup(&(satocsin6(rt_getkey(rt)))->sin6_addr, ifp, true);
|
|
|
|
if (ln == NULL)
|
2015-09-01 11:46:27 +03:00
|
|
|
return;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2000-04-13 18:11:06 +04:00
|
|
|
if (ln->ln_state < ND6_LLINFO_REACHABLE)
|
2016-04-04 10:37:07 +03:00
|
|
|
goto done;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2000-07-06 16:36:18 +04:00
|
|
|
/*
|
|
|
|
* if we get upper-layer reachability confirmation many times,
|
|
|
|
* it is possible we have false information.
|
|
|
|
*/
|
2015-07-15 12:20:18 +03:00
|
|
|
ln->ln_byhint++;
|
|
|
|
if (ln->ln_byhint > nd6_maxnudhint)
|
2016-04-04 10:37:07 +03:00
|
|
|
goto done;
|
2000-07-06 16:36:18 +04:00
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
ln->ln_state = ND6_LLINFO_REACHABLE;
|
2016-04-04 10:37:07 +03:00
|
|
|
if (!ND6_LLINFO_PERMANENT(ln))
|
|
|
|
nd6_llinfo_settimer(ln, ND_IFINFO(rt->rt_ifp)->reachable * hz);
|
|
|
|
|
|
|
|
done:
|
|
|
|
LLE_WUNLOCK(ln);
|
2015-09-01 11:46:27 +03:00
|
|
|
|
2015-07-17 05:21:08 +03:00
|
|
|
return;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2016-09-02 10:15:14 +03:00
|
|
|
struct gc_args {
|
|
|
|
int gc_entries;
|
|
|
|
const struct in6_addr *skip_in6;
|
|
|
|
};
|
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
static int
|
|
|
|
nd6_purge_entry(struct lltable *llt, struct llentry *ln, void *farg)
|
|
|
|
{
|
2016-09-02 10:15:14 +03:00
|
|
|
struct gc_args *args = farg;
|
|
|
|
int *n = &args->gc_entries;
|
|
|
|
const struct in6_addr *skip_in6 = args->skip_in6;
|
2015-11-25 09:21:26 +03:00
|
|
|
|
|
|
|
if (*n <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ND6_LLINFO_PERMANENT(ln))
|
|
|
|
return 0;
|
|
|
|
|
2016-09-02 10:15:14 +03:00
|
|
|
if (IN6_ARE_ADDR_EQUAL(&ln->r_l3addr.addr6, skip_in6))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-25 09:21:26 +03:00
|
|
|
LLE_WLOCK(ln);
|
|
|
|
if (ln->ln_state > ND6_LLINFO_INCOMPLETE)
|
|
|
|
ln->ln_state = ND6_LLINFO_STALE;
|
|
|
|
else
|
|
|
|
ln->ln_state = ND6_LLINFO_PURGE;
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, 0);
|
2015-11-25 09:21:26 +03:00
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
|
|
|
|
(*n)--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-09-02 10:15:14 +03:00
|
|
|
nd6_gc_neighbors(struct lltable *llt, const struct in6_addr *in6)
|
2015-11-25 09:21:26 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
if (ip6_neighborgcthresh >= 0 &&
|
|
|
|
lltable_get_entry_count(llt) >= ip6_neighborgcthresh) {
|
2016-09-02 10:15:14 +03:00
|
|
|
struct gc_args gc_args = {10, in6};
|
2015-11-25 09:21:26 +03:00
|
|
|
/*
|
|
|
|
* XXX entries that are "less recently used" should be
|
|
|
|
* freed first.
|
|
|
|
*/
|
2016-09-02 10:15:14 +03:00
|
|
|
lltable_foreach_lle(llt, nd6_purge_entry, &gc_args);
|
2015-11-25 09:21:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
void
|
2008-10-24 21:07:33 +04:00
|
|
|
nd6_rtrequest(int req, struct rtentry *rt, const struct rt_addrinfo *info)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
|
|
|
struct sockaddr *gate = rt->rt_gateway;
|
|
|
|
struct ifnet *ifp = rt->rt_ifp;
|
Use malloc(9) for sockaddrs instead of pool(9), and remove dom_sa_pool
and dom_sa_len members from struct domain. Pools of fixed-size
objects are too rigid for sockaddr_dls, whose size can vary over
a wide range.
Return sockaddr_dl to its "historical" size. Now that I'm using
malloc(9) instead of pool(9) to allocate sockaddr_dl, I can create
a sockaddr_dl of any size in the kernel, so expanding sockaddr_dl
is useless.
Avoid using sizeof(struct sockaddr_dl) in the kernel.
Introduce sockaddr_dl_alloc() for allocating & initializing an
arbitrary sockaddr_dl on the heap.
Add an argument, the sockaddr length, to sockaddr_alloc(),
sockaddr_copy(), and sockaddr_dl_setaddr().
Constify: LLADDR() -> CLLADDR().
Where the kernel overwrites LLADDR(), use sockaddr_dl_setaddr(),
instead. Used properly, sockaddr_dl_setaddr() will not overrun
the end of the sockaddr.
2007-08-30 06:17:34 +04:00
|
|
|
uint8_t namelen = strlen(ifp->if_xname), addrlen = ifp->if_addrlen;
|
1999-06-28 10:36:47 +04:00
|
|
|
struct ifaddr *ifa;
|
|
|
|
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
Take steps to hide the radix_node implementation of the forwarding table
from the forwarding table's users:
Introduce rt_walktree() for walking the routing table and
applying a function to each rtentry. Replace most
rn_walktree() calls with it.
Use rt_getkey()/rt_setkey() to get/set a route's destination.
Keep a pointer to the sockaddr key in the rtentry, so that
rtentry users do not have to grovel in the radix_node for
the key.
Add a RTM_GET method to rtrequest. Use that instead of
radix_node lookups in, e.g., carp(4).
Add sys/net/link_proto.c, which supplies sockaddr routines for
link-layer socket addresses (sockaddr_dl).
Cosmetic:
Constify. KNF. Stop open-coding LIST_FOREACH, TAILQ_FOREACH,
et cetera. Use NULL instead of 0 for null pointers. Use
__arraycount(). Reduce gratuitous parenthesization.
Stop using variadic arguments for rip6_output(), it is
unnecessary.
Remove the unnecessary rtentry member rt_genmask and the
code to maintain it, since nothing actually used it.
Make rt_maskedcopy() easier to read by using meaningful variable
names.
Extract a subroutine intern_netmask() for looking up a netmask in
the masks table.
Start converting backslash-ridden IPv6 macros in
sys/netinet6/in6_var.h into inline subroutines that one
can read without special eyeglasses.
One functional change: when the kernel serves an RTM_GET, RTM_LOCK,
or RTM_CHANGE request, it applies the netmask (if supplied) to a
destination before searching for it in the forwarding table.
I have changed sys/netinet/ip_carp.c, carp_setroute(), to remove
the unlawful radix_node knowledge.
Apart from the changes to carp(4), netiso, ATM, and strip(4), I
have run the changes on three nodes in my wireless routing testbed,
which involves IPv4 + IPv6 dynamic routing acrobatics, and it's
working beautifully so far.
2007-07-20 00:48:52 +04:00
|
|
|
|
*** Summary ***
When a link-layer address changes (e.g., ifconfig ex0 link
02:de:ad:be:ef:02 active), send a gratuitous ARP and/or a Neighbor
Advertisement to update the network-/link-layer address bindings
on our LAN peers.
Refuse a change of ethernet address to the address 00:00:00:00:00:00
or to any multicast/broadcast address. (Thanks matt@.)
Reorder ifnet ioctl operations so that driver ioctls may inherit
the functions of their "class"---ether_ioctl(), fddi_ioctl(), et
cetera---and the class ioctls may inherit from the generic ioctl,
ifioctl_common(), but both driver- and class-ioctls may override
the generic behavior. Make network drivers share more code.
Distinguish a "factory" link-layer address from others for the
purposes of both protecting that address from deletion and computing
EUI64.
Return consistent, appropriate error codes from network drivers.
Improve readability. KNF.
*** Details ***
In if_attach(), always initialize the interface ioctl routine,
ifnet->if_ioctl, if the driver has not already initialized it.
Delete if_ioctl == NULL tests everywhere else, because it cannot
happen.
In the ioctl routines of network interfaces, inherit common ioctl
behaviors by calling either ifioctl_common() or whichever ioctl
routine is appropriate for the class of interface---e.g., ether_ioctl()
for ethernets.
Stop (ab)using SIOCSIFADDR and start to use SIOCINITIFADDR. In
the user->kernel interface, SIOCSIFADDR's argument was an ifreq,
but on the protocol->ifnet interface, SIOCSIFADDR's argument was
an ifaddr. That was confusing, and it would work against me as I
make it possible for a network interface to overload most ioctls.
On the protocol->ifnet interface, replace SIOCSIFADDR with
SIOCINITIFADDR. In ifioctl(), return EPERM if userland tries to
invoke SIOCINITIFADDR.
In ifioctl(), give the interface the first shot at handling most
interface ioctls, and give the protocol the second shot, instead
of the other way around. Finally, let compatibility code (COMPAT_OSOCK)
take a shot.
Pull device initialization out of switch statements under
SIOCINITIFADDR. For example, pull ..._init() out of any switch
statement that looks like this:
switch (...->sa_family) {
case ...:
..._init();
...
break;
...
default:
..._init();
...
break;
}
Rewrite many if-else clauses that handle all permutations of IFF_UP
and IFF_RUNNING to use a switch statement,
switch (x & (IFF_UP|IFF_RUNNING)) {
case 0:
...
break;
case IFF_RUNNING:
...
break;
case IFF_UP:
...
break;
case IFF_UP|IFF_RUNNING:
...
break;
}
unifdef lots of code containing #ifdef FreeBSD, #ifdef NetBSD, and
#ifdef SIOCSIFMTU, especially in fwip(4) and in ndis(4).
In ipw(4), remove an if_set_sadl() call that is out of place.
In nfe(4), reuse the jumbo MTU logic in ether_ioctl().
Let ethernets register a callback for setting h/w state such as
promiscuous mode and the multicast filter in accord with a change
in the if_flags: ether_set_ifflags_cb() registers a callback that
returns ENETRESET if the caller should reset the ethernet by calling
if_init(), 0 on success, != 0 on failure. Pull common code from
ex(4), gem(4), nfe(4), sip(4), tlp(4), vge(4) into ether_ioctl(),
and register if_flags callbacks for those drivers.
Return ENOTTY instead of EINVAL for inappropriate ioctls. In
zyd(4), use ENXIO instead of ENOTTY to indicate that the device is
not any longer attached.
Add to if_set_sadl() a boolean 'factory' argument that indicates
whether a link-layer address was assigned by the factory or some
other source. In a comment, recommend using the factory address
for generating an EUI64, and update in6_get_hw_ifid() to prefer a
factory address to any other link-layer address.
Add a routing message, RTM_LLINFO_UPD, that tells protocols to
update the binding of network-layer addresses to link-layer addresses.
Implement this message in IPv4 and IPv6 by sending a gratuitous
ARP or a neighbor advertisement, respectively. Generate RTM_LLINFO_UPD
messages on a change of an interface's link-layer address.
In ether_ioctl(), do not let SIOCALIFADDR set a link-layer address
that is broadcast/multicast or equal to 00:00:00:00:00:00.
Make ether_ioctl() call ifioctl_common() to handle ioctls that it
does not understand.
In gif(4), initialize if_softc and use it, instead of assuming that
the gif_softc and ifp overlap.
Let ifioctl_common() handle SIOCGIFADDR.
Sprinkle rtcache_invariants(), which checks on DIAGNOSTIC kernels
that certain invariants on a struct route are satisfied.
In agr(4), rewrite agr_ioctl_filter() to be a bit more explicit
about the ioctls that we do not allow on an agr(4) member interface.
bzero -> memset. Delete unnecessary casts to void *. Use
sockaddr_in_init() and sockaddr_in6_init(). Compare pointers with
NULL instead of "testing truth". Replace some instances of (type
*)0 with NULL. Change some K&R prototypes to ANSI C, and join
lines.
2008-11-07 03:20:01 +03:00
|
|
|
if (req == RTM_LLINFO_UPD) {
|
|
|
|
int rc;
|
|
|
|
struct in6_addr *in6;
|
|
|
|
struct in6_addr in6_all;
|
|
|
|
int anycast;
|
|
|
|
|
|
|
|
if ((ifa = info->rti_ifa) == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
in6 = &ifatoia6(ifa)->ia_addr.sin6_addr;
|
|
|
|
anycast = ifatoia6(ifa)->ia6_flags & IN6_IFF_ANYCAST;
|
|
|
|
|
|
|
|
in6_all = in6addr_linklocal_allnodes;
|
|
|
|
if ((rc = in6_setscope(&in6_all, ifa->ifa_ifp, NULL)) != 0) {
|
|
|
|
log(LOG_ERR, "%s: failed to set scope %s "
|
|
|
|
"(errno=%d)\n", __func__, if_name(ifp), rc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX don't set Override for proxy addresses */
|
|
|
|
nd6_na_output(ifa->ifa_ifp, &in6_all, in6,
|
|
|
|
(anycast ? 0 : ND_NA_FLAG_OVERRIDE)
|
|
|
|
#if 0
|
|
|
|
| (ip6_forwarding ? ND_NA_FLAG_ROUTER : 0)
|
|
|
|
#endif
|
|
|
|
, 1, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
if ((rt->rt_flags & RTF_GATEWAY) != 0)
|
1999-06-28 10:36:47 +04:00
|
|
|
return;
|
|
|
|
|
2001-10-17 14:55:09 +04:00
|
|
|
if (nd6_need_cache(ifp) == 0 && (rt->rt_flags & RTF_HOST) == 0) {
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
2001-10-17 14:55:09 +04:00
|
|
|
/*
|
|
|
|
* This is probably an interface direct route for a link
|
|
|
|
* which does not need neighbor caches (e.g. fe80::%lo0/64).
|
|
|
|
* We do not need special treatment below for such a route.
|
|
|
|
* Moreover, the RTF_LLINFO flag which would be set below
|
|
|
|
* would annoy the ndp(8) command.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
switch (req) {
|
2016-08-01 06:15:30 +03:00
|
|
|
case RTM_ADD: {
|
2017-11-10 10:25:39 +03:00
|
|
|
struct psref psref;
|
2016-08-01 06:15:30 +03:00
|
|
|
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* There is no backward compatibility :)
|
|
|
|
*
|
|
|
|
* if ((rt->rt_flags & RTF_HOST) == 0 &&
|
|
|
|
* SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff)
|
|
|
|
* rt->rt_flags |= RTF_CLONING;
|
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
/* XXX should move to route.c? */
|
|
|
|
if (rt->rt_flags & (RTF_CONNECTED | RTF_LOCAL)) {
|
2007-09-02 23:42:21 +04:00
|
|
|
union {
|
|
|
|
struct sockaddr sa;
|
|
|
|
struct sockaddr_dl sdl;
|
|
|
|
struct sockaddr_storage ss;
|
|
|
|
} u;
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
2003-06-24 11:49:03 +04:00
|
|
|
* Case 1: This route should come from a route to
|
|
|
|
* interface (RTF_CLONING case) or the route should be
|
|
|
|
* treated as on-link but is currently not
|
2008-10-24 20:54:18 +04:00
|
|
|
* (RTF_LLINFO && ln == NULL case).
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2010-07-15 23:15:30 +04:00
|
|
|
if (sockaddr_dl_init(&u.sdl, sizeof(u.ss),
|
2007-09-02 23:42:21 +04:00
|
|
|
ifp->if_index, ifp->if_type,
|
2010-07-15 23:15:30 +04:00
|
|
|
NULL, namelen, NULL, addrlen) == NULL) {
|
|
|
|
printf("%s.%d: sockaddr_dl_init(, %zu, ) "
|
|
|
|
"failed on %s\n", __func__, __LINE__,
|
|
|
|
sizeof(u.ss), if_name(ifp));
|
|
|
|
}
|
2007-09-02 23:42:21 +04:00
|
|
|
rt_setgate(rt, &u.sa);
|
Use malloc(9) for sockaddrs instead of pool(9), and remove dom_sa_pool
and dom_sa_len members from struct domain. Pools of fixed-size
objects are too rigid for sockaddr_dls, whose size can vary over
a wide range.
Return sockaddr_dl to its "historical" size. Now that I'm using
malloc(9) instead of pool(9) to allocate sockaddr_dl, I can create
a sockaddr_dl of any size in the kernel, so expanding sockaddr_dl
is useless.
Avoid using sizeof(struct sockaddr_dl) in the kernel.
Introduce sockaddr_dl_alloc() for allocating & initializing an
arbitrary sockaddr_dl on the heap.
Add an argument, the sockaddr length, to sockaddr_alloc(),
sockaddr_copy(), and sockaddr_dl_setaddr().
Constify: LLADDR() -> CLLADDR().
Where the kernel overwrites LLADDR(), use sockaddr_dl_setaddr(),
instead. Used properly, sockaddr_dl_setaddr() will not overrun
the end of the sockaddr.
2007-08-30 06:17:34 +04:00
|
|
|
gate = rt->rt_gateway;
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
2016-04-25 17:38:08 +03:00
|
|
|
if (gate == NULL) {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"%s: rt_setgate failed on %s\n", __func__,
|
|
|
|
if_name(ifp));
|
|
|
|
break;
|
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
2016-04-04 10:37:07 +03:00
|
|
|
if ((rt->rt_flags & RTF_CONNECTED) != 0)
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
|
|
|
}
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
2000-02-26 11:39:18 +03:00
|
|
|
/*
|
|
|
|
* In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here.
|
|
|
|
* We don't do that here since llinfo is not ready yet.
|
|
|
|
*
|
|
|
|
* There are also couple of other things to be discussed:
|
|
|
|
* - unsolicited NA code needs improvement beforehand
|
|
|
|
* - RFC2461 says we MAY send multicast unsolicited NA
|
|
|
|
* (7.2.6 paragraph 4), however, it also says that we
|
|
|
|
* SHOULD provide a mechanism to prevent multicast NA storm.
|
|
|
|
* we don't have anything like it right now.
|
2001-02-10 07:14:26 +03:00
|
|
|
* note that the mechanism needs a mutual agreement
|
2000-02-26 11:39:18 +03:00
|
|
|
* between proxies, which means that we need to implement
|
2001-02-10 07:14:26 +03:00
|
|
|
* a new protocol, or a new kludge.
|
|
|
|
* - from RFC2461 6.2.4, host MUST NOT send an unsolicited NA.
|
2000-02-26 11:39:18 +03:00
|
|
|
* we need to check ip6forwarding before sending it.
|
|
|
|
* (or should we allow proxy ND configuration only for
|
|
|
|
* routers? there's no mention about proxy ND from hosts)
|
|
|
|
*/
|
|
|
|
#if 0
|
|
|
|
/* XXX it does not work */
|
1999-06-28 10:36:47 +04:00
|
|
|
if (rt->rt_flags & RTF_ANNOUNCE)
|
|
|
|
nd6_na_output(ifp,
|
2007-08-07 08:35:42 +04:00
|
|
|
&satocsin6(rt_getkey(rt))->sin6_addr,
|
|
|
|
&satocsin6(rt_getkey(rt))->sin6_addr,
|
2000-02-26 11:39:18 +03:00
|
|
|
ip6_forwarding ? ND_NA_FLAG_ROUTER : 0,
|
|
|
|
1, NULL);
|
|
|
|
#endif
|
2016-04-04 10:37:07 +03:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) == 0) {
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
2000-04-16 19:27:59 +04:00
|
|
|
/*
|
|
|
|
* Address resolution isn't necessary for a point to
|
|
|
|
* point link, so we can skip this test for a p2p link.
|
|
|
|
*/
|
|
|
|
if (gate->sa_family != AF_LINK ||
|
Use malloc(9) for sockaddrs instead of pool(9), and remove dom_sa_pool
and dom_sa_len members from struct domain. Pools of fixed-size
objects are too rigid for sockaddr_dls, whose size can vary over
a wide range.
Return sockaddr_dl to its "historical" size. Now that I'm using
malloc(9) instead of pool(9) to allocate sockaddr_dl, I can create
a sockaddr_dl of any size in the kernel, so expanding sockaddr_dl
is useless.
Avoid using sizeof(struct sockaddr_dl) in the kernel.
Introduce sockaddr_dl_alloc() for allocating & initializing an
arbitrary sockaddr_dl on the heap.
Add an argument, the sockaddr length, to sockaddr_alloc(),
sockaddr_copy(), and sockaddr_dl_setaddr().
Constify: LLADDR() -> CLLADDR().
Where the kernel overwrites LLADDR(), use sockaddr_dl_setaddr(),
instead. Used properly, sockaddr_dl_setaddr() will not overrun
the end of the sockaddr.
2007-08-30 06:17:34 +04:00
|
|
|
gate->sa_len <
|
|
|
|
sockaddr_dl_measure(namelen, addrlen)) {
|
2000-04-16 19:27:59 +04:00
|
|
|
log(LOG_DEBUG,
|
2001-02-07 11:59:47 +03:00
|
|
|
"nd6_rtrequest: bad gateway value: %s\n",
|
|
|
|
if_name(ifp));
|
2000-04-16 19:27:59 +04:00
|
|
|
break;
|
|
|
|
}
|
2007-08-07 08:35:42 +04:00
|
|
|
satosdl(gate)->sdl_type = ifp->if_type;
|
|
|
|
satosdl(gate)->sdl_index = ifp->if_index;
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2013-01-24 18:23:09 +04:00
|
|
|
RT_DPRINTF("rt_getkey(rt) = %p\n", rt_getkey(rt));
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-06-30 04:34:53 +03:00
|
|
|
/*
|
|
|
|
* When called from rt_ifa_addlocal, we cannot depend on that
|
|
|
|
* the address (rt_getkey(rt)) exits in the address list of the
|
|
|
|
* interface. So check RTF_LOCAL instead.
|
|
|
|
*/
|
|
|
|
if (rt->rt_flags & RTF_LOCAL) {
|
|
|
|
if (nd6_useloopback)
|
|
|
|
rt->rt_ifp = lo0ifp; /* XXX */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
Take steps to hide the radix_node implementation of the forwarding table
from the forwarding table's users:
Introduce rt_walktree() for walking the routing table and
applying a function to each rtentry. Replace most
rn_walktree() calls with it.
Use rt_getkey()/rt_setkey() to get/set a route's destination.
Keep a pointer to the sockaddr key in the rtentry, so that
rtentry users do not have to grovel in the radix_node for
the key.
Add a RTM_GET method to rtrequest. Use that instead of
radix_node lookups in, e.g., carp(4).
Add sys/net/link_proto.c, which supplies sockaddr routines for
link-layer socket addresses (sockaddr_dl).
Cosmetic:
Constify. KNF. Stop open-coding LIST_FOREACH, TAILQ_FOREACH,
et cetera. Use NULL instead of 0 for null pointers. Use
__arraycount(). Reduce gratuitous parenthesization.
Stop using variadic arguments for rip6_output(), it is
unnecessary.
Remove the unnecessary rtentry member rt_genmask and the
code to maintain it, since nothing actually used it.
Make rt_maskedcopy() easier to read by using meaningful variable
names.
Extract a subroutine intern_netmask() for looking up a netmask in
the masks table.
Start converting backslash-ridden IPv6 macros in
sys/netinet6/in6_var.h into inline subroutines that one
can read without special eyeglasses.
One functional change: when the kernel serves an RTM_GET, RTM_LOCK,
or RTM_CHANGE request, it applies the netmask (if supplied) to a
destination before searching for it in the forwarding table.
I have changed sys/netinet/ip_carp.c, carp_setroute(), to remove
the unlawful radix_node knowledge.
Apart from the changes to carp(4), netiso, ATM, and strip(4), I
have run the changes on three nodes in my wireless routing testbed,
which involves IPv4 + IPv6 dynamic routing acrobatics, and it's
working beautifully so far.
2007-07-20 00:48:52 +04:00
|
|
|
* check if rt_getkey(rt) is an address assigned
|
1999-06-28 10:36:47 +04:00
|
|
|
* to the interface.
|
|
|
|
*/
|
2017-11-10 10:25:39 +03:00
|
|
|
ifa = (struct ifaddr *)in6ifa_ifpwithaddr_psref(ifp,
|
|
|
|
&satocsin6(rt_getkey(rt))->sin6_addr, &psref);
|
2008-10-24 20:54:18 +04:00
|
|
|
if (ifa != NULL) {
|
1999-06-28 10:36:47 +04:00
|
|
|
if (nd6_useloopback) {
|
2016-05-18 14:28:44 +03:00
|
|
|
rt->rt_ifp = lo0ifp; /* XXX */
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Make sure rt_ifa be equal to the ifaddr
|
|
|
|
* corresponding to the address.
|
|
|
|
* We need this because when we refer
|
|
|
|
* rt_ifa->ia6_flags in ip6_input, we assume
|
|
|
|
* that the rt_ifa points to the address instead
|
|
|
|
* of the loopback address.
|
|
|
|
*/
|
2006-11-13 08:13:38 +03:00
|
|
|
if (ifa != rt->rt_ifa)
|
|
|
|
rt_replace_ifa(rt, ifa);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2000-02-26 11:39:18 +03:00
|
|
|
} else if (rt->rt_flags & RTF_ANNOUNCE) {
|
|
|
|
/* join solicited node multicast for proxy ND */
|
|
|
|
if (ifp->if_flags & IFF_MULTICAST) {
|
|
|
|
struct in6_addr llsol;
|
|
|
|
int error;
|
|
|
|
|
2007-08-07 08:35:42 +04:00
|
|
|
llsol = satocsin6(rt_getkey(rt))->sin6_addr;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
llsol.s6_addr32[0] = htonl(0xff020000);
|
2000-02-26 11:39:18 +03:00
|
|
|
llsol.s6_addr32[1] = 0;
|
|
|
|
llsol.s6_addr32[2] = htonl(1);
|
|
|
|
llsol.s6_addr8[12] = 0xff;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (in6_setscope(&llsol, ifp, NULL))
|
2016-04-04 10:37:07 +03:00
|
|
|
goto out;
|
2006-03-06 02:47:08 +03:00
|
|
|
if (!in6_addmulti(&llsol, ifp, &error, 0)) {
|
2017-01-16 10:33:36 +03:00
|
|
|
char ip6buf[INET6_ADDRSTRLEN];
|
2016-04-01 11:12:00 +03:00
|
|
|
nd6log(LOG_ERR, "%s: failed to join "
|
2001-10-17 14:55:09 +04:00
|
|
|
"%s (errno=%d)\n", if_name(ifp),
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &llsol), error);
|
2001-05-24 12:17:22 +04:00
|
|
|
}
|
2000-02-26 11:39:18 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
out:
|
2017-11-10 10:25:39 +03:00
|
|
|
ifa_release(ifa, &psref);
|
2015-11-25 09:21:26 +03:00
|
|
|
/*
|
|
|
|
* If we have too many cache entries, initiate immediate
|
|
|
|
* purging for some entries.
|
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
if (rt->rt_ifp != NULL)
|
2016-09-02 10:15:14 +03:00
|
|
|
nd6_gc_neighbors(LLTABLE6(rt->rt_ifp), NULL);
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
2016-08-01 06:15:30 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
case RTM_DELETE:
|
2000-02-26 11:39:18 +03:00
|
|
|
/* leave from solicited node multicast for proxy ND */
|
|
|
|
if ((rt->rt_flags & RTF_ANNOUNCE) != 0 &&
|
|
|
|
(ifp->if_flags & IFF_MULTICAST) != 0) {
|
|
|
|
struct in6_addr llsol;
|
|
|
|
struct in6_multi *in6m;
|
|
|
|
|
2007-08-07 08:35:42 +04:00
|
|
|
llsol = satocsin6(rt_getkey(rt))->sin6_addr;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
llsol.s6_addr32[0] = htonl(0xff020000);
|
2000-02-26 11:39:18 +03:00
|
|
|
llsol.s6_addr32[1] = 0;
|
|
|
|
llsol.s6_addr32[2] = htonl(1);
|
|
|
|
llsol.s6_addr8[12] = 0xff;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (in6_setscope(&llsol, ifp, NULL) == 0) {
|
2017-02-22 10:46:00 +03:00
|
|
|
in6m = in6_lookup_multi(&llsol, ifp);
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (in6m)
|
|
|
|
in6_delmulti(in6m);
|
2006-09-02 11:22:44 +04:00
|
|
|
}
|
2000-02-26 11:39:18 +03:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
break;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_ioctl(u_long cmd, void *data, struct ifnet *ifp)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
|
|
|
struct in6_drlist *drl = (struct in6_drlist *)data;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct in6_oprlist *oprl = (struct in6_oprlist *)data;
|
1999-06-28 10:36:47 +04:00
|
|
|
struct in6_ndireq *ndi = (struct in6_ndireq *)data;
|
|
|
|
struct in6_nbrinfo *nbi = (struct in6_nbrinfo *)data;
|
1999-12-13 18:17:17 +03:00
|
|
|
struct in6_ndifreq *ndif = (struct in6_ndifreq *)data;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct nd_defrouter *dr;
|
1999-06-28 10:36:47 +04:00
|
|
|
struct nd_prefix *pr;
|
|
|
|
int i = 0, error = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGDRLST_IN6:
|
2002-06-09 01:22:29 +04:00
|
|
|
/*
|
|
|
|
* obsolete API, use sysctl under net.inet6.icmp6
|
|
|
|
*/
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(drl, 0, sizeof(*drl));
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH(dr) {
|
2006-11-20 07:34:16 +03:00
|
|
|
if (i >= DRLSTSIZ)
|
|
|
|
break;
|
1999-06-28 10:36:47 +04:00
|
|
|
drl->defrouter[i].rtaddr = dr->rtaddr;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
in6_clearscope(&drl->defrouter[i].rtaddr);
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
drl->defrouter[i].flags = dr->flags;
|
|
|
|
drl->defrouter[i].rtlifetime = dr->rtlifetime;
|
2015-08-07 11:11:33 +03:00
|
|
|
drl->defrouter[i].expire = dr->expire ?
|
|
|
|
time_mono_to_wall(dr->expire) : 0;
|
1999-06-28 10:36:47 +04:00
|
|
|
drl->defrouter[i].if_index = dr->ifp->if_index;
|
|
|
|
i++;
|
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
|
|
|
case SIOCGPRLST_IN6:
|
2002-06-09 01:22:29 +04:00
|
|
|
/*
|
|
|
|
* obsolete API, use sysctl under net.inet6.icmp6
|
|
|
|
*
|
|
|
|
* XXX the structure in6_prlist was changed in backward-
|
|
|
|
* incompatible manner. in6_oprlist is used for SIOCGPRLST_IN6,
|
|
|
|
* in6_prlist is used for nd6_sysctl() - fill_prlist().
|
|
|
|
*/
|
2000-02-26 11:39:18 +03:00
|
|
|
/*
|
|
|
|
* XXX meaning of fields, especialy "raflags", is very
|
|
|
|
* differnet between RA prefix list and RR/static prefix list.
|
|
|
|
* how about separating ioctls into two?
|
|
|
|
*/
|
2008-10-24 20:54:18 +04:00
|
|
|
memset(oprl, 0, sizeof(*oprl));
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH(pr) {
|
1999-06-28 10:36:47 +04:00
|
|
|
struct nd_pfxrouter *pfr;
|
|
|
|
int j;
|
|
|
|
|
2006-11-20 07:34:16 +03:00
|
|
|
if (i >= PRLSTSIZ)
|
|
|
|
break;
|
2002-06-09 01:22:29 +04:00
|
|
|
oprl->prefix[i].prefix = pr->ndpr_prefix.sin6_addr;
|
|
|
|
oprl->prefix[i].raflags = pr->ndpr_raf;
|
|
|
|
oprl->prefix[i].prefixlen = pr->ndpr_plen;
|
|
|
|
oprl->prefix[i].vltime = pr->ndpr_vltime;
|
|
|
|
oprl->prefix[i].pltime = pr->ndpr_pltime;
|
|
|
|
oprl->prefix[i].if_index = pr->ndpr_ifp->if_index;
|
2006-03-06 02:47:08 +03:00
|
|
|
if (pr->ndpr_vltime == ND6_INFINITE_LIFETIME)
|
|
|
|
oprl->prefix[i].expire = 0;
|
|
|
|
else {
|
|
|
|
time_t maxexpire;
|
|
|
|
|
|
|
|
/* XXX: we assume time_t is signed. */
|
|
|
|
maxexpire = (-1) &
|
|
|
|
~((time_t)1 <<
|
|
|
|
((sizeof(maxexpire) * 8) - 1));
|
|
|
|
if (pr->ndpr_vltime <
|
|
|
|
maxexpire - pr->ndpr_lastupdate) {
|
2015-08-07 11:11:33 +03:00
|
|
|
time_t expire;
|
|
|
|
expire = pr->ndpr_lastupdate +
|
|
|
|
pr->ndpr_vltime;
|
|
|
|
oprl->prefix[i].expire = expire ?
|
|
|
|
time_mono_to_wall(expire) : 0;
|
2006-03-06 02:47:08 +03:00
|
|
|
} else
|
|
|
|
oprl->prefix[i].expire = maxexpire;
|
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
j = 0;
|
2006-11-20 07:34:16 +03:00
|
|
|
LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
|
1999-06-28 10:36:47 +04:00
|
|
|
if (j < DRLSTSIZ) {
|
2002-06-09 01:22:29 +04:00
|
|
|
#define RTRADDR oprl->prefix[i].advrtr[j]
|
1999-06-28 10:36:47 +04:00
|
|
|
RTRADDR = pfr->router->rtaddr;
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
in6_clearscope(&RTRADDR);
|
1999-06-28 10:36:47 +04:00
|
|
|
#undef RTRADDR
|
|
|
|
}
|
|
|
|
j++;
|
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
oprl->prefix[i].advrtrs = j;
|
|
|
|
oprl->prefix[i].origin = PR_ORIG_RA;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
i++;
|
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-12-13 18:17:17 +03:00
|
|
|
|
2002-05-29 11:53:39 +04:00
|
|
|
break;
|
|
|
|
case OSIOCGIFINFO_IN6:
|
2006-03-06 02:47:08 +03:00
|
|
|
#define ND ndi->ndi
|
2002-05-29 11:53:39 +04:00
|
|
|
/* XXX: old ndp(8) assumes a positive value for linkmtu. */
|
2006-03-06 02:47:08 +03:00
|
|
|
memset(&ND, 0, sizeof(ND));
|
|
|
|
ND.linkmtu = IN6_LINKMTU(ifp);
|
|
|
|
ND.maxmtu = ND_IFINFO(ifp)->maxmtu;
|
|
|
|
ND.basereachable = ND_IFINFO(ifp)->basereachable;
|
|
|
|
ND.reachable = ND_IFINFO(ifp)->reachable;
|
|
|
|
ND.retrans = ND_IFINFO(ifp)->retrans;
|
|
|
|
ND.flags = ND_IFINFO(ifp)->flags;
|
|
|
|
ND.recalctm = ND_IFINFO(ifp)->recalctm;
|
|
|
|
ND.chlim = ND_IFINFO(ifp)->chlim;
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
|
|
|
case SIOCGIFINFO_IN6:
|
2006-03-06 02:47:08 +03:00
|
|
|
ND = *ND_IFINFO(ifp);
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
2006-03-06 02:47:08 +03:00
|
|
|
case SIOCSIFINFO_IN6:
|
|
|
|
/*
|
|
|
|
* used to change host variables from userland.
|
|
|
|
* intented for a use on router to reflect RA configurations.
|
|
|
|
*/
|
|
|
|
/* 0 means 'unspecified' */
|
|
|
|
if (ND.linkmtu != 0) {
|
|
|
|
if (ND.linkmtu < IPV6_MMTU ||
|
|
|
|
ND.linkmtu > IN6_LINKMTU(ifp)) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ND_IFINFO(ifp)->linkmtu = ND.linkmtu;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ND.basereachable != 0) {
|
|
|
|
int obasereachable = ND_IFINFO(ifp)->basereachable;
|
|
|
|
|
|
|
|
ND_IFINFO(ifp)->basereachable = ND.basereachable;
|
|
|
|
if (ND.basereachable != obasereachable)
|
|
|
|
ND_IFINFO(ifp)->reachable =
|
|
|
|
ND_COMPUTE_RTIME(ND.basereachable);
|
|
|
|
}
|
|
|
|
if (ND.retrans != 0)
|
|
|
|
ND_IFINFO(ifp)->retrans = ND.retrans;
|
|
|
|
if (ND.chlim != 0)
|
|
|
|
ND_IFINFO(ifp)->chlim = ND.chlim;
|
|
|
|
/* FALLTHROUGH */
|
2000-04-16 19:27:59 +04:00
|
|
|
case SIOCSIFINFO_FLAGS:
|
2014-03-20 17:34:35 +04:00
|
|
|
{
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
struct in6_ifaddr *ia;
|
2016-12-19 10:51:34 +03:00
|
|
|
int s;
|
2014-03-20 17:34:35 +04:00
|
|
|
|
|
|
|
if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) &&
|
|
|
|
!(ND.flags & ND6_IFF_IFDISABLED))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the interface is marked as ND6_IFF_IFDISABLED and
|
|
|
|
* has a link-local address with IN6_IFF_DUPLICATED,
|
|
|
|
* do not clear ND6_IFF_IFDISABLED.
|
|
|
|
* See RFC 4862, section 5.4.5.
|
|
|
|
*/
|
|
|
|
int duplicated_linklocal = 0;
|
|
|
|
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
2016-07-07 12:32:01 +03:00
|
|
|
IFADDR_READER_FOREACH(ifa, ifp) {
|
2014-03-20 17:34:35 +04:00
|
|
|
if (ifa->ifa_addr->sa_family != AF_INET6)
|
|
|
|
continue;
|
|
|
|
ia = (struct in6_ifaddr *)ifa;
|
|
|
|
if ((ia->ia6_flags & IN6_IFF_DUPLICATED) &&
|
|
|
|
IN6_IS_ADDR_LINKLOCAL(IA6_IN6(ia)))
|
|
|
|
{
|
|
|
|
duplicated_linklocal = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
2014-03-20 17:34:35 +04:00
|
|
|
|
|
|
|
if (duplicated_linklocal) {
|
|
|
|
ND.flags |= ND6_IFF_IFDISABLED;
|
|
|
|
log(LOG_ERR, "Cannot enable an interface"
|
|
|
|
" with a link-local address marked"
|
|
|
|
" duplicate.\n");
|
|
|
|
} else {
|
|
|
|
ND_IFINFO(ifp)->flags &= ~ND6_IFF_IFDISABLED;
|
|
|
|
if (ifp->if_flags & IFF_UP)
|
|
|
|
in6_if_up(ifp);
|
|
|
|
}
|
|
|
|
} else if (!(ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED) &&
|
2016-08-01 06:15:30 +03:00
|
|
|
(ND.flags & ND6_IFF_IFDISABLED)) {
|
|
|
|
int bound = curlwp_bind();
|
2014-03-20 17:34:35 +04:00
|
|
|
/* Mark all IPv6 addresses as tentative. */
|
|
|
|
|
|
|
|
ND_IFINFO(ifp)->flags |= ND6_IFF_IFDISABLED;
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
2016-07-07 12:32:01 +03:00
|
|
|
IFADDR_READER_FOREACH(ifa, ifp) {
|
2016-08-01 06:15:30 +03:00
|
|
|
struct psref psref;
|
2014-03-20 17:34:35 +04:00
|
|
|
if (ifa->ifa_addr->sa_family != AF_INET6)
|
|
|
|
continue;
|
2016-08-01 06:15:30 +03:00
|
|
|
ifa_acquire(ifa, &psref);
|
|
|
|
pserialize_read_exit(s);
|
|
|
|
|
2014-03-20 17:34:35 +04:00
|
|
|
nd6_dad_stop(ifa);
|
2016-08-01 06:15:30 +03:00
|
|
|
|
2014-03-20 17:34:35 +04:00
|
|
|
ia = (struct in6_ifaddr *)ifa;
|
|
|
|
ia->ia6_flags |= IN6_IFF_TENTATIVE;
|
2016-08-01 06:15:30 +03:00
|
|
|
|
|
|
|
s = pserialize_read_enter();
|
|
|
|
ifa_release(ifa, &psref);
|
2014-03-20 17:34:35 +04:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(s);
|
|
|
|
curlwp_bindx(bound);
|
2014-03-20 17:34:35 +04:00
|
|
|
}
|
|
|
|
|
2014-06-05 20:06:49 +04:00
|
|
|
if (ND.flags & ND6_IFF_AUTO_LINKLOCAL) {
|
|
|
|
if (!(ND_IFINFO(ifp)->flags & ND6_IFF_AUTO_LINKLOCAL)) {
|
|
|
|
/* auto_linklocal 0->1 transition */
|
|
|
|
|
|
|
|
ND_IFINFO(ifp)->flags |= ND6_IFF_AUTO_LINKLOCAL;
|
|
|
|
in6_ifattach(ifp, NULL);
|
|
|
|
} else if (!(ND.flags & ND6_IFF_IFDISABLED) &&
|
|
|
|
ifp->if_flags & IFF_UP)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* When the IF already has
|
|
|
|
* ND6_IFF_AUTO_LINKLOCAL, no link-local
|
|
|
|
* address is assigned, and IFF_UP, try to
|
|
|
|
* assign one.
|
|
|
|
*/
|
2016-08-01 06:15:30 +03:00
|
|
|
int haslinklocal = 0;
|
2014-06-05 20:06:49 +04:00
|
|
|
|
2016-08-01 06:15:30 +03:00
|
|
|
s = pserialize_read_enter();
|
|
|
|
IFADDR_READER_FOREACH(ifa, ifp) {
|
2014-06-05 20:06:49 +04:00
|
|
|
if (ifa->ifa_addr->sa_family !=AF_INET6)
|
|
|
|
continue;
|
|
|
|
ia = (struct in6_ifaddr *)ifa;
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(IA6_IN6(ia))){
|
|
|
|
haslinklocal = 1;
|
|
|
|
break;
|
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
}
|
|
|
|
pserialize_read_exit(s);
|
|
|
|
if (!haslinklocal)
|
2014-06-05 20:06:49 +04:00
|
|
|
in6_ifattach(ifp, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2006-03-06 02:47:08 +03:00
|
|
|
ND_IFINFO(ifp)->flags = ND.flags;
|
2000-04-16 19:27:59 +04:00
|
|
|
break;
|
2006-03-06 02:47:08 +03:00
|
|
|
#undef ND
|
1999-12-13 18:17:17 +03:00
|
|
|
case SIOCSNDFLUSH_IN6: /* XXX: the ioctl name is confusing... */
|
2002-06-09 01:22:29 +04:00
|
|
|
/* sync kernel routing table with the default router list */
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_reset();
|
|
|
|
nd6_defrouter_select();
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
|
|
|
case SIOCSPFXFLUSH_IN6:
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
1999-06-28 10:36:47 +04:00
|
|
|
/* flush all the prefix advertised by routers */
|
2005-05-30 01:43:51 +04:00
|
|
|
struct nd_prefix *pfx, *next;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
restart:
|
|
|
|
ND6_WLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH_SAFE(pfx, next) {
|
2002-06-09 01:22:29 +04:00
|
|
|
struct in6_ifaddr *ia, *ia_next;
|
2016-08-01 06:15:30 +03:00
|
|
|
int _s;
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2005-05-30 01:43:51 +04:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&pfx->ndpr_prefix.sin6_addr))
|
2002-06-09 01:22:29 +04:00
|
|
|
continue; /* XXX */
|
|
|
|
|
|
|
|
/* do we really have to remove addresses as well? */
|
2016-08-01 06:15:30 +03:00
|
|
|
_s = pserialize_read_enter();
|
|
|
|
for (ia = IN6_ADDRLIST_READER_FIRST(); ia;
|
2016-07-04 09:48:14 +03:00
|
|
|
ia = ia_next) {
|
2017-12-15 07:03:46 +03:00
|
|
|
struct ifnet *ifa_ifp;
|
|
|
|
int bound;
|
|
|
|
struct psref psref;
|
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
/* ia might be removed. keep the next ptr. */
|
2016-08-01 06:15:30 +03:00
|
|
|
ia_next = IN6_ADDRLIST_READER_NEXT(ia);
|
2002-06-09 01:22:29 +04:00
|
|
|
|
|
|
|
if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0)
|
|
|
|
continue;
|
|
|
|
|
2017-12-15 07:03:46 +03:00
|
|
|
if (ia->ia6_ndpr != pfx)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bound = curlwp_bind();
|
|
|
|
ia6_acquire(ia, &psref);
|
|
|
|
pserialize_read_exit(_s);
|
|
|
|
ND6_UNLOCK();
|
|
|
|
|
|
|
|
ifa_ifp = ia->ia_ifa.ifa_ifp;
|
|
|
|
if (ifa_ifp == ifp) {
|
|
|
|
/* Already have IFNET_LOCK(ifp) */
|
|
|
|
KASSERT(!if_is_deactivated(ifp));
|
|
|
|
ia6_release(ia, &psref);
|
2002-06-09 01:22:29 +04:00
|
|
|
in6_purgeaddr(&ia->ia_ifa);
|
2017-12-15 07:03:46 +03:00
|
|
|
curlwp_bindx(bound);
|
2016-08-01 06:15:30 +03:00
|
|
|
goto restart;
|
|
|
|
}
|
2017-12-15 07:03:46 +03:00
|
|
|
IFNET_LOCK(ifa_ifp);
|
|
|
|
/*
|
|
|
|
* Need to take the lock first to prevent
|
|
|
|
* if_detach from running in6_purgeaddr
|
|
|
|
* concurrently.
|
|
|
|
*/
|
|
|
|
if (!if_is_deactivated(ifa_ifp)) {
|
|
|
|
ia6_release(ia, &psref);
|
|
|
|
in6_purgeaddr(&ia->ia_ifa);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* ifp is being destroyed, ia will be
|
|
|
|
* destroyed by if_detach.
|
|
|
|
*/
|
|
|
|
ia6_release(ia, &psref);
|
|
|
|
/* XXX may cause busy loop */
|
|
|
|
}
|
|
|
|
IFNET_UNLOCK(ifa_ifp);
|
|
|
|
curlwp_bindx(bound);
|
|
|
|
goto restart;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2016-08-01 06:15:30 +03:00
|
|
|
pserialize_read_exit(_s);
|
2016-12-19 10:51:34 +03:00
|
|
|
|
|
|
|
KASSERT(pfx->ndpr_refcnt == 0);
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_prelist_remove(pfx);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
case SIOCSRTRFLUSH_IN6:
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
1999-06-28 10:36:47 +04:00
|
|
|
/* flush all the default routers */
|
2005-05-30 01:43:51 +04:00
|
|
|
struct nd_defrouter *drtr, *next;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_WLOCK();
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_reset();
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH_SAFE(drtr, next) {
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrtrlist_del(drtr, NULL);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_select();
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
1999-06-28 10:36:47 +04:00
|
|
|
break;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
case SIOCGNBRINFO_IN6:
|
2006-03-06 02:47:08 +03:00
|
|
|
{
|
2015-11-25 09:21:26 +03:00
|
|
|
struct llentry *ln;
|
1999-12-13 18:17:17 +03:00
|
|
|
struct in6_addr nb_addr = nbi->addr; /* make local for safety */
|
|
|
|
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if ((error = in6_setscope(&nb_addr, ifp, NULL)) != 0)
|
2007-03-16 02:35:25 +03:00
|
|
|
return error;
|
1999-12-13 18:17:17 +03:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_lookup(&nb_addr, ifp, false);
|
2015-07-17 05:21:08 +03:00
|
|
|
if (ln == NULL) {
|
1999-12-13 18:17:17 +03:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
nbi->state = ln->ln_state;
|
|
|
|
nbi->asked = ln->ln_asked;
|
|
|
|
nbi->isrouter = ln->ln_router;
|
2015-08-07 11:11:33 +03:00
|
|
|
nbi->expire = ln->ln_expire ?
|
|
|
|
time_mono_to_wall(ln->ln_expire) : 0;
|
2016-04-04 10:37:07 +03:00
|
|
|
LLE_RUNLOCK(ln);
|
2002-06-03 06:09:37 +04:00
|
|
|
|
1999-12-13 18:17:17 +03:00
|
|
|
break;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
1999-12-13 18:17:17 +03:00
|
|
|
case SIOCGDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */
|
|
|
|
ndif->ifindex = nd6_defifindex;
|
|
|
|
break;
|
|
|
|
case SIOCSDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */
|
2007-03-16 02:35:25 +03:00
|
|
|
return nd6_setdefaultiface(ndif->ifindex);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2007-03-16 02:35:25 +03:00
|
|
|
return error;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2007-05-17 04:53:26 +04:00
|
|
|
void
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_release_pkts(struct llentry *ln, struct ifnet *ifp)
|
2007-05-17 04:53:26 +04:00
|
|
|
{
|
|
|
|
struct mbuf *m_hold, *m_hold_next;
|
2016-04-04 10:37:07 +03:00
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
|
|
|
|
LLE_WLOCK_ASSERT(ln);
|
2007-05-17 04:53:26 +04:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
sockaddr_in6_init(&sin6, &ln->r_l3addr.addr6, 0, 0, 0);
|
|
|
|
|
|
|
|
m_hold = ln->la_hold, ln->la_hold = NULL, ln->la_numheld = 0;
|
|
|
|
|
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
for (; m_hold != NULL; m_hold = m_hold_next) {
|
2007-05-17 04:53:26 +04:00
|
|
|
m_hold_next = m_hold->m_nextpkt;
|
|
|
|
m_hold->m_nextpkt = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we assume ifp is not a p2p here, so
|
|
|
|
* just set the 2nd argument as the
|
|
|
|
* 1st one.
|
|
|
|
*/
|
2017-02-14 06:05:06 +03:00
|
|
|
ip6_if_output(ifp, ifp, m_hold, &sin6, NULL);
|
2007-05-17 04:53:26 +04:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
LLE_WLOCK(ln);
|
2007-05-17 04:53:26 +04:00
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Create neighbor cache entry and cache link-layer address,
|
2001-10-16 10:24:44 +04:00
|
|
|
* on reception of inbound ND6 packets. (RS/RA/NS/redirect)
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2015-07-17 05:21:08 +03:00
|
|
|
void
|
2006-10-12 05:30:41 +04:00
|
|
|
nd6_cache_lladdr(
|
|
|
|
struct ifnet *ifp,
|
|
|
|
struct in6_addr *from,
|
|
|
|
char *lladdr,
|
2006-11-16 04:32:37 +03:00
|
|
|
int lladdrlen,
|
2006-10-12 05:30:41 +04:00
|
|
|
int type, /* ICMP6 type */
|
|
|
|
int code /* type dependent information */
|
|
|
|
)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2009-07-26 03:12:09 +04:00
|
|
|
struct nd_ifinfo *ndi = ND_IFINFO(ifp);
|
2015-11-25 09:21:26 +03:00
|
|
|
struct llentry *ln = NULL;
|
1999-06-28 10:36:47 +04:00
|
|
|
int is_newentry;
|
|
|
|
int do_update;
|
|
|
|
int olladdr;
|
|
|
|
int llchange;
|
|
|
|
int newstate = 0;
|
2016-04-04 10:37:07 +03:00
|
|
|
uint16_t router = 0;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2015-06-30 11:31:42 +03:00
|
|
|
KASSERT(ifp != NULL);
|
|
|
|
KASSERT(from != NULL);
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
/* nothing must be updated for unspecified address */
|
|
|
|
if (IN6_IS_ADDR_UNSPECIFIED(from))
|
2015-07-17 05:21:08 +03:00
|
|
|
return;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Validation about ifp->if_addrlen and lladdrlen must be done in
|
|
|
|
* the caller.
|
|
|
|
*
|
|
|
|
* XXX If the link does not have link-layer adderss, what should
|
|
|
|
* we do? (ifp->if_addrlen == 0)
|
|
|
|
* Spec says nothing in sections for RA, RS and NA. There's small
|
|
|
|
* description on it in NS section (RFC 2461 7.2.3).
|
|
|
|
*/
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_lookup(from, ifp, true);
|
|
|
|
if (ln == NULL) {
|
1999-06-28 10:36:47 +04:00
|
|
|
#if 0
|
|
|
|
/* nothing must be done if there's no lladdr */
|
|
|
|
if (!lladdr || !lladdrlen)
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_create(from, ifp);
|
1999-06-28 10:36:47 +04:00
|
|
|
is_newentry = 1;
|
2001-03-08 13:49:32 +03:00
|
|
|
} else {
|
|
|
|
/* do nothing if static ndp is set */
|
2016-04-04 10:37:07 +03:00
|
|
|
if (ln->la_flags & LLE_STATIC) {
|
|
|
|
LLE_WUNLOCK(ln);
|
2015-07-17 05:21:08 +03:00
|
|
|
return;
|
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
is_newentry = 0;
|
2001-03-08 13:49:32 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
if (ln == NULL)
|
2016-04-04 10:37:07 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
olladdr = (ln->la_flags & LLE_VALID) ? 1 : 0;
|
1999-06-28 10:36:47 +04:00
|
|
|
if (olladdr && lladdr) {
|
2016-04-04 10:37:07 +03:00
|
|
|
llchange = memcmp(lladdr, &ln->ll_addr, ifp->if_addrlen);
|
1999-06-28 10:36:47 +04:00
|
|
|
} else
|
|
|
|
llchange = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* newentry olladdr lladdr llchange (*=record)
|
|
|
|
* 0 n n -- (1)
|
|
|
|
* 0 y n -- (2)
|
|
|
|
* 0 n y -- (3) * STALE
|
|
|
|
* 0 y y n (4) *
|
|
|
|
* 0 y y y (5) * STALE
|
|
|
|
* 1 -- n -- (6) NOSTATE(= PASSIVE)
|
|
|
|
* 1 -- y -- (7) * STALE
|
|
|
|
*/
|
|
|
|
|
2001-10-16 10:24:44 +04:00
|
|
|
if (lladdr) { /* (3-5) and (7) */
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Record source link-layer address
|
|
|
|
* XXX is it dependent to ifp->if_type?
|
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
memcpy(&ln->ll_addr, lladdr, ifp->if_addrlen);
|
|
|
|
ln->la_flags |= LLE_VALID;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_newentry) {
|
2002-06-09 01:22:29 +04:00
|
|
|
if ((!olladdr && lladdr) || /* (3) */
|
|
|
|
(olladdr && lladdr && llchange)) { /* (5) */
|
1999-06-28 10:36:47 +04:00
|
|
|
do_update = 1;
|
|
|
|
newstate = ND6_LLINFO_STALE;
|
2001-10-16 10:24:44 +04:00
|
|
|
} else /* (1-2,4) */
|
1999-06-28 10:36:47 +04:00
|
|
|
do_update = 0;
|
|
|
|
} else {
|
|
|
|
do_update = 1;
|
2006-03-06 02:47:08 +03:00
|
|
|
if (lladdr == NULL) /* (6) */
|
1999-06-28 10:36:47 +04:00
|
|
|
newstate = ND6_LLINFO_NOSTATE;
|
2001-10-16 10:24:44 +04:00
|
|
|
else /* (7) */
|
1999-06-28 10:36:47 +04:00
|
|
|
newstate = ND6_LLINFO_STALE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_update) {
|
|
|
|
/*
|
|
|
|
* Update the state of the neighbor cache.
|
|
|
|
*/
|
|
|
|
ln->ln_state = newstate;
|
|
|
|
|
|
|
|
if (ln->ln_state == ND6_LLINFO_STALE) {
|
2001-03-22 00:56:29 +03:00
|
|
|
/*
|
|
|
|
* XXX: since nd6_output() below will cause
|
|
|
|
* state tansition to DELAY and reset the timer,
|
|
|
|
* we must set the timer now, although it is actually
|
|
|
|
* meaningless.
|
|
|
|
*/
|
2015-12-07 09:19:13 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_gctimer * hz);
|
2001-03-22 00:56:29 +03:00
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_release_pkts(ln, ifp);
|
1999-06-28 10:36:47 +04:00
|
|
|
} else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
|
|
|
|
/* probe right away */
|
2003-06-27 12:41:08 +04:00
|
|
|
nd6_llinfo_settimer((void *)ln, 0);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ICMP6 type dependent behavior.
|
|
|
|
*
|
|
|
|
* NS: clear IsRouter if new entry
|
|
|
|
* RS: clear IsRouter
|
|
|
|
* RA: set IsRouter if there's lladdr
|
|
|
|
* redir: clear IsRouter if new entry
|
|
|
|
*
|
|
|
|
* RA case, (1):
|
|
|
|
* The spec says that we must set IsRouter in the following cases:
|
|
|
|
* - If lladdr exist, set IsRouter. This means (1-5).
|
|
|
|
* - If it is old entry (!newentry), set IsRouter. This means (7).
|
|
|
|
* So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
|
|
|
|
* A quetion arises for (1) case. (1) case has no lladdr in the
|
|
|
|
* neighbor cache, this is similar to (6).
|
|
|
|
* This case is rare but we figured that we MUST NOT set IsRouter.
|
|
|
|
*
|
|
|
|
* newentry olladdr lladdr llchange NS RS RA redir
|
1999-07-31 22:41:15 +04:00
|
|
|
* D R
|
|
|
|
* 0 n n -- (1) c ? s
|
|
|
|
* 0 y n -- (2) c s s
|
|
|
|
* 0 n y -- (3) c s s
|
|
|
|
* 0 y y n (4) c s s
|
|
|
|
* 0 y y y (5) c s s
|
|
|
|
* 1 -- n -- (6) c c c s
|
|
|
|
* 1 -- y -- (7) c c s c s
|
1999-06-28 10:36:47 +04:00
|
|
|
*
|
|
|
|
* (c=clear s=set)
|
|
|
|
*/
|
|
|
|
switch (type & 0xff) {
|
|
|
|
case ND_NEIGHBOR_SOLICIT:
|
|
|
|
/*
|
|
|
|
* New entry must have is_router flag cleared.
|
|
|
|
*/
|
2001-10-16 10:24:44 +04:00
|
|
|
if (is_newentry) /* (6-7) */
|
1999-06-28 10:36:47 +04:00
|
|
|
ln->ln_router = 0;
|
|
|
|
break;
|
1999-07-31 22:41:15 +04:00
|
|
|
case ND_REDIRECT:
|
|
|
|
/*
|
|
|
|
* If the icmp is a redirect to a better router, always set the
|
2001-10-16 10:24:44 +04:00
|
|
|
* is_router flag. Otherwise, if the entry is newly created,
|
|
|
|
* clear the flag. [RFC 2461, sec 8.3]
|
1999-07-31 22:41:15 +04:00
|
|
|
*/
|
|
|
|
if (code == ND_REDIRECT_ROUTER)
|
|
|
|
ln->ln_router = 1;
|
2001-10-16 10:24:44 +04:00
|
|
|
else if (is_newentry) /* (6-7) */
|
1999-07-31 22:41:15 +04:00
|
|
|
ln->ln_router = 0;
|
|
|
|
break;
|
1999-06-28 10:36:47 +04:00
|
|
|
case ND_ROUTER_SOLICIT:
|
|
|
|
/*
|
|
|
|
* is_router flag must always be cleared.
|
|
|
|
*/
|
|
|
|
ln->ln_router = 0;
|
|
|
|
break;
|
|
|
|
case ND_ROUTER_ADVERT:
|
|
|
|
/*
|
|
|
|
* Mark an entry with lladdr as a router.
|
|
|
|
*/
|
2002-06-09 01:22:29 +04:00
|
|
|
if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
|
|
|
|
(is_newentry && lladdr)) { /* (7) */
|
1999-06-28 10:36:47 +04:00
|
|
|
ln->ln_router = 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
#if 0
|
|
|
|
/* XXX should we send rtmsg as it used to be? */
|
2014-12-16 14:42:27 +03:00
|
|
|
if (do_update)
|
2015-02-25 15:45:34 +03:00
|
|
|
rt_newmsg(RTM_CHANGE, rt); /* tell user process */
|
2016-04-04 10:37:07 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (ln != NULL) {
|
|
|
|
router = ln->ln_router;
|
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have too many cache entries, initiate immediate
|
|
|
|
* purging for some entries.
|
|
|
|
*/
|
|
|
|
if (is_newentry)
|
2016-09-02 10:15:14 +03:00
|
|
|
nd6_gc_neighbors(LLTABLE6(ifp), &ln->r_l3addr.addr6);
|
2014-12-16 14:42:27 +03:00
|
|
|
|
2001-06-22 17:36:12 +04:00
|
|
|
/*
|
|
|
|
* When the link-layer address of a router changes, select the
|
|
|
|
* best router again. In particular, when the neighbor entry is newly
|
|
|
|
* created, it might affect the selection policy.
|
|
|
|
* Question: can we restrict the first condition to the "is_newentry"
|
|
|
|
* case?
|
|
|
|
* XXX: when we hear an RA from a new router with the link-layer
|
2016-12-11 10:38:50 +03:00
|
|
|
* address option, nd6_defrouter_select() is called twice, since
|
2001-06-22 17:36:12 +04:00
|
|
|
* defrtrlist_update called the function as well. However, I believe
|
|
|
|
* we can compromise the overhead, since it only happens the first
|
|
|
|
* time.
|
2016-12-11 10:38:50 +03:00
|
|
|
* XXX: although nd6_defrouter_select() should not have a bad effect
|
2001-10-16 10:24:44 +04:00
|
|
|
* for those are not autoconfigured hosts, we explicitly avoid such
|
|
|
|
* cases for safety.
|
2001-06-22 17:36:12 +04:00
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
if (do_update && router && !ip6_forwarding &&
|
2016-12-19 10:51:34 +03:00
|
|
|
nd6_accepts_rtadv(ndi)) {
|
|
|
|
ND6_WLOCK();
|
2016-12-11 10:38:50 +03:00
|
|
|
nd6_defrouter_select();
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2006-11-16 04:32:37 +03:00
|
|
|
nd6_slowtimo(void *ignored_arg)
|
1999-06-28 10:36:47 +04:00
|
|
|
{
|
2001-02-10 07:14:26 +03:00
|
|
|
struct nd_ifinfo *nd6if;
|
2002-05-29 11:53:39 +04:00
|
|
|
struct ifnet *ifp;
|
2016-05-12 05:24:16 +03:00
|
|
|
int s;
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
|
2015-08-31 06:26:53 +03:00
|
|
|
callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
|
2000-03-23 10:01:25 +03:00
|
|
|
nd6_slowtimo, NULL);
|
2016-05-12 05:24:16 +03:00
|
|
|
|
|
|
|
s = pserialize_read_enter();
|
|
|
|
IFNET_READER_FOREACH(ifp) {
|
2002-05-29 11:53:39 +04:00
|
|
|
nd6if = ND_IFINFO(ifp);
|
1999-06-28 10:36:47 +04:00
|
|
|
if (nd6if->basereachable && /* already initialized */
|
|
|
|
(nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
|
|
|
|
/*
|
|
|
|
* Since reachable time rarely changes by router
|
|
|
|
* advertisements, we SHOULD insure that a new random
|
|
|
|
* value gets recomputed at least once every few hours.
|
|
|
|
* (RFC 2461, 6.3.4)
|
|
|
|
*/
|
|
|
|
nd6if->recalctm = nd6_recalc_reachtm_interval;
|
|
|
|
nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable);
|
|
|
|
}
|
|
|
|
}
|
2016-05-12 05:24:16 +03:00
|
|
|
pserialize_read_exit(s);
|
|
|
|
|
2017-11-17 10:37:12 +03:00
|
|
|
SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2017-02-14 06:05:06 +03:00
|
|
|
/*
|
|
|
|
* Return 0 if a neighbor cache is found. Return EWOULDBLOCK if a cache is not
|
|
|
|
* found and trying to resolve a neighbor; in this case the mbuf is queued in
|
|
|
|
* the list. Otherwise return errno after freeing the mbuf.
|
|
|
|
*/
|
2015-09-04 08:33:23 +03:00
|
|
|
int
|
2017-02-14 06:05:06 +03:00
|
|
|
nd6_resolve(struct ifnet *ifp, const struct rtentry *rt, struct mbuf *m,
|
|
|
|
const struct sockaddr *_dst, uint8_t *lldst, size_t dstsize)
|
2015-09-04 08:33:23 +03:00
|
|
|
{
|
2015-11-25 09:21:26 +03:00
|
|
|
struct llentry *ln = NULL;
|
2016-04-04 10:37:07 +03:00
|
|
|
bool created = false;
|
2017-02-14 06:05:06 +03:00
|
|
|
const struct sockaddr_in6 *dst = satocsin6(_dst);
|
2016-04-26 12:30:01 +03:00
|
|
|
|
2017-02-14 06:05:06 +03:00
|
|
|
/* discard the packet if IPv6 operation is disabled on the interface */
|
|
|
|
if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED)) {
|
|
|
|
m_freem(m);
|
|
|
|
return ENETDOWN; /* better error? */
|
2016-04-26 12:30:01 +03:00
|
|
|
}
|
|
|
|
|
1999-06-28 10:36:47 +04:00
|
|
|
/*
|
|
|
|
* Address resolution or Neighbor Unreachability Detection
|
|
|
|
* for the next hop.
|
|
|
|
* At this point, the destination of the packet must be a unicast
|
|
|
|
* or an anycast address(i.e. not a multicast).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Look up the neighbor cache for the nexthop */
|
2017-02-14 06:05:06 +03:00
|
|
|
ln = nd6_lookup(&dst->sin6_addr, ifp, false);
|
|
|
|
|
|
|
|
if (ln != NULL && (ln->la_flags & LLE_VALID) != 0) {
|
|
|
|
KASSERT(ln->ln_state > ND6_LLINFO_INCOMPLETE);
|
|
|
|
/* Fast path */
|
|
|
|
memcpy(lldst, &ln->ll_addr, MIN(dstsize, ifp->if_addrlen));
|
|
|
|
LLE_RUNLOCK(ln);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (ln != NULL)
|
|
|
|
LLE_RUNLOCK(ln);
|
|
|
|
|
|
|
|
/* Slow path */
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_lookup(&dst->sin6_addr, ifp, true);
|
2017-02-14 06:05:06 +03:00
|
|
|
if (ln == NULL && nd6_is_addr_neighbor(dst, ifp)) {
|
2017-06-16 05:24:54 +03:00
|
|
|
struct sockaddr_in6 sin6;
|
2000-05-09 15:51:12 +04:00
|
|
|
/*
|
|
|
|
* Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
|
2001-10-16 10:24:44 +04:00
|
|
|
* the condition below is not very efficient. But we believe
|
2000-05-09 15:51:12 +04:00
|
|
|
* it is tolerable, because this should be a rare case.
|
|
|
|
*/
|
2016-04-04 10:37:07 +03:00
|
|
|
ln = nd6_create(&dst->sin6_addr, ifp);
|
2017-02-14 06:05:06 +03:00
|
|
|
if (ln == NULL) {
|
2017-01-16 10:33:36 +03:00
|
|
|
char ip6buf[INET6_ADDRSTRLEN];
|
2000-05-09 15:51:12 +04:00
|
|
|
log(LOG_DEBUG,
|
2017-02-14 06:05:06 +03:00
|
|
|
"%s: can't allocate llinfo for %s "
|
|
|
|
"(ln=%p, rt=%p)\n", __func__,
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &dst->sin6_addr), ln, rt);
|
2017-02-14 06:05:06 +03:00
|
|
|
m_freem(m);
|
|
|
|
return ENOBUFS;
|
2000-05-09 15:51:12 +04:00
|
|
|
}
|
2017-06-16 05:24:54 +03:00
|
|
|
|
|
|
|
sockaddr_in6_init(&sin6, &ln->r_l3addr.addr6, 0, 0, 0);
|
|
|
|
rt_clonedmsg(sin6tosa(&sin6), ifp, rt);
|
|
|
|
|
2017-02-14 06:05:06 +03:00
|
|
|
created = true;
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
2017-10-05 06:42:14 +03:00
|
|
|
if (ln == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return ENETDOWN; /* better error? */
|
|
|
|
}
|
|
|
|
|
2016-04-04 10:37:07 +03:00
|
|
|
LLE_WLOCK_ASSERT(ln);
|
|
|
|
|
2000-04-16 19:27:59 +04:00
|
|
|
/* We don't have to do link-layer address resolution on a p2p link. */
|
|
|
|
if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
|
2001-02-23 11:02:41 +03:00
|
|
|
ln->ln_state < ND6_LLINFO_REACHABLE) {
|
2000-04-16 19:27:59 +04:00
|
|
|
ln->ln_state = ND6_LLINFO_STALE;
|
2015-12-07 09:19:13 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_gctimer * hz);
|
2001-02-23 11:02:41 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The first time we send a packet to a neighbor whose entry is
|
|
|
|
* STALE, we have to change the state to DELAY and a sets a timer to
|
|
|
|
* expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
|
|
|
|
* neighbor unreachability detection on expiration.
|
|
|
|
* (RFC 2461 7.3.3)
|
|
|
|
*/
|
|
|
|
if (ln->ln_state == ND6_LLINFO_STALE) {
|
|
|
|
ln->ln_asked = 0;
|
|
|
|
ln->ln_state = ND6_LLINFO_DELAY;
|
2015-12-07 09:19:13 +03:00
|
|
|
nd6_llinfo_settimer(ln, nd6_delay * hz);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is a neighbor cache entry, but no ethernet address
|
2006-03-06 02:47:08 +03:00
|
|
|
* response yet. Append this latest packet to the end of the
|
|
|
|
* packet queue in the mbuf, unless the number of the packet
|
|
|
|
* does not exceed nd6_maxqueuelen. When it exceeds nd6_maxqueuelen,
|
|
|
|
* the oldest packet in the queue will be removed.
|
1999-06-28 10:36:47 +04:00
|
|
|
*/
|
2001-02-23 09:41:50 +03:00
|
|
|
if (ln->ln_state == ND6_LLINFO_NOSTATE)
|
1999-06-28 10:36:47 +04:00
|
|
|
ln->ln_state = ND6_LLINFO_INCOMPLETE;
|
2006-03-06 02:47:08 +03:00
|
|
|
if (ln->ln_hold) {
|
|
|
|
struct mbuf *m_hold;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
for (m_hold = ln->ln_hold; m_hold; m_hold = m_hold->m_nextpkt) {
|
|
|
|
i++;
|
|
|
|
if (m_hold->m_nextpkt == NULL) {
|
|
|
|
m_hold->m_nextpkt = m;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (i >= nd6_maxqueuelen) {
|
|
|
|
m_hold = ln->ln_hold;
|
|
|
|
ln->ln_hold = ln->ln_hold->m_nextpkt;
|
2006-03-24 22:24:38 +03:00
|
|
|
m_freem(m_hold);
|
2006-03-06 02:47:08 +03:00
|
|
|
i--;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ln->ln_hold = m;
|
|
|
|
}
|
|
|
|
|
2002-06-07 21:15:12 +04:00
|
|
|
/*
|
|
|
|
* If there has been no NS for the neighbor after entering the
|
|
|
|
* INCOMPLETE state, send the first solicitation.
|
|
|
|
*/
|
2003-06-27 12:41:08 +04:00
|
|
|
if (!ND6_LLINFO_PERMANENT(ln) && ln->ln_asked == 0) {
|
2015-11-18 08:16:22 +03:00
|
|
|
struct in6_addr src, *psrc;
|
|
|
|
|
2002-06-07 21:15:12 +04:00
|
|
|
ln->ln_asked++;
|
2016-04-04 10:37:07 +03:00
|
|
|
nd6_llinfo_settimer(ln, ND_IFINFO(ifp)->retrans * hz / 1000);
|
2015-11-18 08:16:22 +03:00
|
|
|
psrc = nd6_llinfo_get_holdsrc(ln, &src);
|
2016-04-04 10:37:07 +03:00
|
|
|
LLE_WUNLOCK(ln);
|
|
|
|
ln = NULL;
|
2018-03-06 13:57:00 +03:00
|
|
|
nd6_ns_output(ifp, NULL, &dst->sin6_addr, psrc, NULL);
|
2016-04-04 10:37:07 +03:00
|
|
|
} else {
|
|
|
|
/* We did the lookup so we need to do the unlock here. */
|
|
|
|
LLE_WUNLOCK(ln);
|
1999-06-28 10:36:47 +04:00
|
|
|
}
|
2016-04-04 10:37:07 +03:00
|
|
|
|
|
|
|
if (created)
|
2016-09-02 10:15:14 +03:00
|
|
|
nd6_gc_neighbors(LLTABLE6(ifp), &dst->sin6_addr);
|
2015-07-17 05:21:08 +03:00
|
|
|
|
2017-02-14 06:05:06 +03:00
|
|
|
return EWOULDBLOCK;
|
2016-04-04 10:37:07 +03:00
|
|
|
}
|
1999-06-28 10:36:47 +04:00
|
|
|
|
2001-10-17 14:55:09 +04:00
|
|
|
int
|
2007-03-16 02:35:25 +03:00
|
|
|
nd6_need_cache(struct ifnet *ifp)
|
2001-10-17 14:55:09 +04:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* XXX: we currently do not make neighbor cache on any interface
|
|
|
|
* other than ARCnet, Ethernet, FDDI and GIF.
|
|
|
|
*
|
|
|
|
* RFC2893 says:
|
|
|
|
* - unidirectional tunnels needs no ND
|
|
|
|
*/
|
|
|
|
switch (ifp->if_type) {
|
|
|
|
case IFT_ARCNET:
|
|
|
|
case IFT_ETHER:
|
|
|
|
case IFT_FDDI:
|
|
|
|
case IFT_IEEE1394:
|
2006-05-18 13:05:49 +04:00
|
|
|
case IFT_CARP:
|
2001-10-17 14:55:09 +04:00
|
|
|
case IFT_GIF: /* XXX need more cases? */
|
2006-03-06 02:47:08 +03:00
|
|
|
case IFT_PPP:
|
|
|
|
case IFT_TUNNEL:
|
2007-03-16 02:35:25 +03:00
|
|
|
return 1;
|
2001-10-17 14:55:09 +04:00
|
|
|
default:
|
2007-03-16 02:35:25 +03:00
|
|
|
return 0;
|
2001-10-17 14:55:09 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-24 22:24:38 +03:00
|
|
|
static void
|
2015-11-25 09:21:26 +03:00
|
|
|
clear_llinfo_pqueue(struct llentry *ln)
|
2006-03-24 22:24:38 +03:00
|
|
|
{
|
|
|
|
struct mbuf *m_hold, *m_hold_next;
|
|
|
|
|
|
|
|
for (m_hold = ln->ln_hold; m_hold; m_hold = m_hold_next) {
|
|
|
|
m_hold_next = m_hold->m_nextpkt;
|
|
|
|
m_hold->m_nextpkt = NULL;
|
|
|
|
m_freem(m_hold);
|
|
|
|
}
|
|
|
|
|
|
|
|
ln->ln_hold = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
int
|
2006-10-12 05:30:41 +04:00
|
|
|
nd6_sysctl(
|
|
|
|
int name,
|
|
|
|
void *oldp, /* syscall arg, need copyout */
|
|
|
|
size_t *oldlenp,
|
|
|
|
void *newp, /* syscall arg, need copyin */
|
2006-11-16 04:32:37 +03:00
|
|
|
size_t newlen
|
2006-10-12 05:30:41 +04:00
|
|
|
)
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
2018-01-29 05:02:14 +03:00
|
|
|
int (*fill_func)(void *, size_t *);
|
2002-06-09 01:22:29 +04:00
|
|
|
|
|
|
|
if (newp)
|
|
|
|
return EPERM;
|
2018-01-29 05:02:14 +03:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
switch (name) {
|
|
|
|
case ICMPV6CTL_ND6_DRLIST:
|
2018-01-29 05:02:14 +03:00
|
|
|
fill_func = fill_drlist;
|
2002-06-09 01:22:29 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ICMPV6CTL_ND6_PRLIST:
|
2018-01-29 05:02:14 +03:00
|
|
|
fill_func = fill_prlist;
|
2002-06-09 01:22:29 +04:00
|
|
|
break;
|
|
|
|
|
2006-03-06 02:47:08 +03:00
|
|
|
case ICMPV6CTL_ND6_MAXQLEN:
|
2018-01-29 06:29:26 +03:00
|
|
|
return 0;
|
2006-03-06 02:47:08 +03:00
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
default:
|
2018-01-29 06:29:26 +03:00
|
|
|
return ENOPROTOOPT;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2018-01-29 05:02:14 +03:00
|
|
|
|
2018-01-29 22:51:15 +03:00
|
|
|
if (oldlenp == NULL)
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
size_t ol;
|
|
|
|
int error = (*fill_func)(NULL, &ol); /* calc len needed */
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (oldp == NULL) {
|
|
|
|
*oldlenp = ol;
|
|
|
|
return 0;
|
2018-01-29 05:02:14 +03:00
|
|
|
}
|
2018-01-29 22:51:15 +03:00
|
|
|
|
|
|
|
ol = *oldlenp = min(ol, *oldlenp);
|
|
|
|
if (ol == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
void *p = kmem_alloc(ol, KM_SLEEP);
|
|
|
|
error = (*fill_func)(p, oldlenp);
|
|
|
|
if (!error)
|
|
|
|
error = copyout(p, oldp, *oldlenp);
|
|
|
|
kmem_free(p, ol);
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
return error;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-01-29 05:02:14 +03:00
|
|
|
fill_drlist(void *oldp, size_t *oldlenp)
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
2016-12-19 10:51:34 +03:00
|
|
|
int error = 0;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct in6_defrouter *d = NULL, *de = NULL;
|
|
|
|
struct nd_defrouter *dr;
|
|
|
|
size_t l;
|
|
|
|
|
|
|
|
if (oldp) {
|
|
|
|
d = (struct in6_defrouter *)oldp;
|
2007-03-04 08:59:00 +03:00
|
|
|
de = (struct in6_defrouter *)((char *)oldp + *oldlenp);
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
l = 0;
|
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:13:14 +03:00
|
|
|
ND_DEFROUTER_LIST_FOREACH(dr) {
|
2002-06-09 01:22:29 +04:00
|
|
|
|
|
|
|
if (oldp && d + 1 <= de) {
|
2007-11-10 03:07:57 +03:00
|
|
|
memset(d, 0, sizeof(*d));
|
|
|
|
sockaddr_in6_init(&d->rtaddr, &dr->rtaddr, 0, 0, 0);
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
if (sa6_recoverscope(&d->rtaddr)) {
|
2017-01-16 10:33:36 +03:00
|
|
|
char ip6buf[INET6_ADDRSTRLEN];
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
log(LOG_ERR,
|
|
|
|
"scope error in router list (%s)\n",
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &d->rtaddr.sin6_addr));
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
/* XXX: press on... */
|
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
d->flags = dr->flags;
|
|
|
|
d->rtlifetime = dr->rtlifetime;
|
2015-08-07 11:11:33 +03:00
|
|
|
d->expire = dr->expire ?
|
|
|
|
time_mono_to_wall(dr->expire) : 0;
|
2002-06-09 01:22:29 +04:00
|
|
|
d->if_index = dr->ifp->if_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
l += sizeof(*d);
|
|
|
|
if (d)
|
|
|
|
d++;
|
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2018-01-29 22:51:15 +03:00
|
|
|
*oldlenp = l; /* (void *)d - (void *)oldp */
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
return error;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-01-29 05:02:14 +03:00
|
|
|
fill_prlist(void *oldp, size_t *oldlenp)
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
2016-12-19 10:51:34 +03:00
|
|
|
int error = 0;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct nd_prefix *pr;
|
2013-12-18 00:25:00 +04:00
|
|
|
uint8_t *p = NULL, *ps = NULL;
|
|
|
|
uint8_t *pe = NULL;
|
2002-06-09 01:22:29 +04:00
|
|
|
size_t l;
|
2017-01-16 10:33:36 +03:00
|
|
|
char ip6buf[INET6_ADDRSTRLEN];
|
2002-06-09 01:22:29 +04:00
|
|
|
|
|
|
|
if (oldp) {
|
2013-12-18 00:25:00 +04:00
|
|
|
ps = p = (uint8_t*)oldp;
|
|
|
|
pe = (uint8_t*)oldp + *oldlenp;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
l = 0;
|
|
|
|
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_RLOCK();
|
2016-12-12 06:14:01 +03:00
|
|
|
ND_PREFIX_LIST_FOREACH(pr) {
|
2002-06-09 01:22:29 +04:00
|
|
|
u_short advrtrs;
|
2013-12-18 00:25:00 +04:00
|
|
|
struct sockaddr_in6 sin6;
|
2002-06-09 01:22:29 +04:00
|
|
|
struct nd_pfxrouter *pfr;
|
2013-12-18 00:25:00 +04:00
|
|
|
struct in6_prefix pfx;
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2013-12-18 00:25:00 +04:00
|
|
|
if (oldp && p + sizeof(struct in6_prefix) <= pe)
|
2002-06-09 01:22:29 +04:00
|
|
|
{
|
2013-12-18 00:25:00 +04:00
|
|
|
memset(&pfx, 0, sizeof(pfx));
|
|
|
|
ps = p;
|
|
|
|
pfx.prefix = pr->ndpr_prefix;
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2013-12-18 00:25:00 +04:00
|
|
|
if (sa6_recoverscope(&pfx.prefix)) {
|
2002-06-09 01:22:29 +04:00
|
|
|
log(LOG_ERR,
|
|
|
|
"scope error in prefix list (%s)\n",
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf, &pfx.prefix.sin6_addr));
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
/* XXX: press on... */
|
|
|
|
}
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.raflags = pr->ndpr_raf;
|
|
|
|
pfx.prefixlen = pr->ndpr_plen;
|
|
|
|
pfx.vltime = pr->ndpr_vltime;
|
|
|
|
pfx.pltime = pr->ndpr_pltime;
|
|
|
|
pfx.if_index = pr->ndpr_ifp->if_index;
|
2002-06-09 01:22:29 +04:00
|
|
|
if (pr->ndpr_vltime == ND6_INFINITE_LIFETIME)
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.expire = 0;
|
2002-06-09 01:22:29 +04:00
|
|
|
else {
|
|
|
|
time_t maxexpire;
|
|
|
|
|
|
|
|
/* XXX: we assume time_t is signed. */
|
|
|
|
maxexpire = (-1) &
|
2006-03-06 02:47:08 +03:00
|
|
|
~((time_t)1 <<
|
|
|
|
((sizeof(maxexpire) * 8) - 1));
|
2002-06-09 01:22:29 +04:00
|
|
|
if (pr->ndpr_vltime <
|
|
|
|
maxexpire - pr->ndpr_lastupdate) {
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.expire = pr->ndpr_lastupdate +
|
2002-06-09 01:22:29 +04:00
|
|
|
pr->ndpr_vltime;
|
|
|
|
} else
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.expire = maxexpire;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.refcnt = pr->ndpr_refcnt;
|
|
|
|
pfx.flags = pr->ndpr_stateflags;
|
|
|
|
pfx.origin = PR_ORIG_RA;
|
|
|
|
|
|
|
|
p += sizeof(pfx); l += sizeof(pfx);
|
|
|
|
|
2002-06-09 01:22:29 +04:00
|
|
|
advrtrs = 0;
|
2006-11-20 07:34:16 +03:00
|
|
|
LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
|
2013-12-18 00:25:00 +04:00
|
|
|
if (p + sizeof(sin6) > pe) {
|
2002-06-09 01:22:29 +04:00
|
|
|
advrtrs++;
|
|
|
|
continue;
|
|
|
|
}
|
2013-12-18 00:25:00 +04:00
|
|
|
|
|
|
|
sockaddr_in6_init(&sin6, &pfr->router->rtaddr,
|
2017-01-16 10:33:36 +03:00
|
|
|
0, 0, 0);
|
2013-12-18 00:25:00 +04:00
|
|
|
if (sa6_recoverscope(&sin6)) {
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
log(LOG_ERR,
|
|
|
|
"scope error in "
|
|
|
|
"prefix list (%s)\n",
|
2017-01-16 18:44:46 +03:00
|
|
|
IN6_PRINT(ip6buf,
|
2017-01-16 10:33:36 +03:00
|
|
|
&pfr->router->rtaddr));
|
Better support of IPv6 scoped addresses.
- most of the kernel code will not care about the actual encoding of
scope zone IDs and won't touch "s6_addr16[1]" directly.
- similarly, most of the kernel code will not care about link-local
scoped addresses as a special case.
- scope boundary check will be stricter. For example, the current
*BSD code allows a packet with src=::1 and dst=(some global IPv6
address) to be sent outside of the node, if the application do:
s = socket(AF_INET6);
bind(s, "::1");
sendto(s, some_global_IPv6_addr);
This is clearly wrong, since ::1 is only meaningful within a single
node, but the current implementation of the *BSD kernel cannot
reject this attempt.
- and, while there, don't try to remove the ff02::/32 interface route
entry in in6_ifdetach() as it's already gone.
This also includes some level of support for the standard source
address selection algorithm defined in RFC3484, which will be
completed on in the future.
From the KAME project via JINMEI Tatuya.
Approved by core@.
2006-01-21 03:15:35 +03:00
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
advrtrs++;
|
2013-12-18 00:25:00 +04:00
|
|
|
memcpy(p, &sin6, sizeof(sin6));
|
|
|
|
p += sizeof(sin6);
|
|
|
|
l += sizeof(sin6);
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2013-12-18 00:25:00 +04:00
|
|
|
pfx.advrtrs = advrtrs;
|
|
|
|
memcpy(ps, &pfx, sizeof(pfx));
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
else {
|
2013-12-18 00:25:00 +04:00
|
|
|
l += sizeof(pfx);
|
2002-06-09 01:22:29 +04:00
|
|
|
advrtrs = 0;
|
2013-12-18 00:25:00 +04:00
|
|
|
LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
|
2002-06-09 01:22:29 +04:00
|
|
|
advrtrs++;
|
2013-12-18 00:25:00 +04:00
|
|
|
l += sizeof(sin6);
|
|
|
|
}
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
|
|
|
}
|
2016-12-19 10:51:34 +03:00
|
|
|
ND6_UNLOCK();
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2018-01-29 22:51:15 +03:00
|
|
|
*oldlenp = l;
|
2002-06-09 01:22:29 +04:00
|
|
|
|
2007-03-16 02:35:25 +03:00
|
|
|
return error;
|
2002-06-09 01:22:29 +04:00
|
|
|
}
|
2016-12-11 10:37:53 +03:00
|
|
|
|
|
|
|
static int
|
|
|
|
nd6_setdefaultiface(int ifindex)
|
|
|
|
{
|
|
|
|
ifnet_t *ifp;
|
|
|
|
int error = 0;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = pserialize_read_enter();
|
|
|
|
ifp = if_byindex(ifindex);
|
|
|
|
if (ifp == NULL) {
|
|
|
|
pserialize_read_exit(s);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
if (nd6_defifindex != ifindex) {
|
|
|
|
nd6_defifindex = ifindex;
|
|
|
|
nd6_defifp = nd6_defifindex > 0 ? ifp : NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our current implementation assumes one-to-one maping between
|
|
|
|
* interfaces and links, so it would be natural to use the
|
|
|
|
* default interface as the default link.
|
|
|
|
*/
|
|
|
|
scope6_setdefault(nd6_defifp);
|
|
|
|
}
|
|
|
|
pserialize_read_exit(s);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|