2007-02-17 10:50:49 +03:00
|
|
|
/* $NetBSD: route.c,v 1.85 2007/02/17 07:50:49 dyoung Exp $ */
|
1998-04-29 07:41:49 +04:00
|
|
|
|
|
|
|
/*-
|
|
|
|
* Copyright (c) 1998 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
|
|
|
|
* NASA Ames Research Center.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1999-07-01 12:12:45 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
|
|
|
|
* All rights reserved.
|
2005-02-27 01:45:09 +03:00
|
|
|
*
|
1999-07-01 12:12:45 +04:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the project nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
2005-02-27 01:45:09 +03:00
|
|
|
*
|
1999-07-01 12:12:45 +04:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-13 10:02:48 +04:00
|
|
|
* Copyright (c) 1980, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-04-03 01:17:28 +04:00
|
|
|
* @(#)route.c 8.3 (Berkeley) 1/9/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
1993-03-21 21:04:42 +03:00
|
|
|
|
2001-11-13 02:49:33 +03:00
|
|
|
#include <sys/cdefs.h>
|
2007-02-17 10:50:49 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: route.c,v 1.85 2007/02/17 07:50:49 dyoung Exp $");
|
1998-07-05 10:49:00 +04:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-03-23 10:01:25 +03:00
|
|
|
#include <sys/callout.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/domain.h>
|
|
|
|
#include <sys/protosw.h>
|
1998-04-29 07:41:49 +04:00
|
|
|
#include <sys/kernel.h>
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <sys/ioctl.h>
|
1998-12-22 05:27:06 +03:00
|
|
|
#include <sys/pool.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/raw_cb.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_var.h>
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1993-12-18 03:40:47 +03:00
|
|
|
|
2002-05-13 00:40:11 +04:00
|
|
|
struct route_cb route_cb;
|
|
|
|
struct rtstat rtstat;
|
|
|
|
struct radix_node_head *rt_tables[AF_MAX+1];
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
int rttrash; /* routes not in table but not freed */
|
|
|
|
struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
|
|
|
|
|
2004-04-25 20:42:40 +04:00
|
|
|
POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL);
|
|
|
|
POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL);
|
1998-12-22 05:27:06 +03:00
|
|
|
|
2000-03-23 10:01:25 +03:00
|
|
|
struct callout rt_timer_ch; /* callout for rt_timer_timer() */
|
|
|
|
|
2004-04-22 01:03:43 +04:00
|
|
|
static int rtdeletemsg(struct rtentry *);
|
|
|
|
static int rtflushclone1(struct radix_node *, void *);
|
|
|
|
static void rtflushclone(struct radix_node_head *, struct rtentry *);
|
2001-01-27 07:49:31 +03:00
|
|
|
|
2006-12-07 22:37:08 +03:00
|
|
|
struct ifaddr *
|
|
|
|
rt_get_ifa(struct rtentry *rt)
|
|
|
|
{
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
|
|
|
|
if ((ifa = rt->rt_ifa) == NULL)
|
|
|
|
return ifa;
|
|
|
|
else if (ifa->ifa_getifa == NULL)
|
|
|
|
return ifa;
|
|
|
|
#if 0
|
|
|
|
else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
|
|
|
|
return ifa;
|
|
|
|
#endif
|
|
|
|
else {
|
|
|
|
ifa = (*ifa->ifa_getifa)(ifa, rt_key(rt));
|
|
|
|
rt_replace_ifa(rt, ifa);
|
|
|
|
return ifa;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-07 22:20:14 +03:00
|
|
|
static void
|
|
|
|
rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
|
|
|
|
{
|
|
|
|
rt->rt_ifa = ifa;
|
|
|
|
if (ifa->ifa_seqno != NULL)
|
|
|
|
rt->rt_ifa_seqno = *ifa->ifa_seqno;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
|
|
|
|
{
|
|
|
|
IFAREF(ifa);
|
|
|
|
IFAFREE(rt->rt_ifa);
|
|
|
|
rt_set_ifa1(rt, ifa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
|
|
|
|
{
|
|
|
|
IFAREF(ifa);
|
|
|
|
rt_set_ifa1(rt, ifa);
|
|
|
|
}
|
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rtable_init(void **table)
|
1994-05-13 10:02:48 +04:00
|
|
|
{
|
|
|
|
struct domain *dom;
|
2005-01-23 21:41:56 +03:00
|
|
|
DOMAIN_FOREACH(dom)
|
1994-05-13 10:02:48 +04:00
|
|
|
if (dom->dom_rtattach)
|
|
|
|
dom->dom_rtattach(&table[dom->dom_family],
|
|
|
|
dom->dom_rtoffset);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-05-11 13:26:46 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
route_init(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-12-22 05:27:06 +03:00
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
rn_init(); /* initialize all zeroes, all ones, mask table */
|
|
|
|
rtable_init((void **)rt_tables);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
Here are various changes designed to protect against bad IPv4
routing caused by stale route caches (struct route). Route caches
are sprinkled throughout PCBs, the IP fast-forwarding table, and
IP tunnel interfaces (gre, gif, stf).
Stale IPv6 and ISO route caches will be treated by separate patches.
Thank you to Christoph Badura for suggesting the general approach
to invalidating route caches that I take here.
Here are the details:
Add hooks to struct domain for tracking and for invalidating each
domain's route caches: dom_rtcache, dom_rtflush, and dom_rtflushall.
Introduce helper subroutines, rtflush(ro) for invalidating a route
cache, rtflushall(family) for invalidating all route caches in a
routing domain, and rtcache(ro) for notifying the domain of a new
cached route.
Chain together all IPv4 route caches where ro_rt != NULL. Provide
in_rtcache() for adding a route to the chain. Provide in_rtflush()
and in_rtflushall() for invalidating IPv4 route caches. In
in_rtflush(), set ro_rt to NULL, and remove the route from the
chain. In in_rtflushall(), walk the chain and remove every route
cache.
In rtrequest1(), call rtflushall() to invalidate route caches when
a route is added.
In gif(4), discard the workaround for stale caches that involves
expiring them every so often.
Replace the pattern 'RTFREE(ro->ro_rt); ro->ro_rt = NULL;' with a
call to rtflush(ro).
Update ipflow_fastforward() and all other users of route caches so
that they expect a cached route, ro->ro_rt, to turn to NULL.
Take care when moving a 'struct route' to rtflush() the source and
to rtcache() the destination.
In domain initializers, use .dom_xxx tags.
KNF here and there.
2006-12-09 08:33:04 +03:00
|
|
|
void
|
|
|
|
rtflushall(int family)
|
|
|
|
{
|
|
|
|
struct domain *dom;
|
|
|
|
|
|
|
|
if ((dom = pffinddomain(family)) != NULL && dom->dom_rtflushall != NULL)
|
|
|
|
(*dom->dom_rtflushall)();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtflush(struct route *ro)
|
|
|
|
{
|
|
|
|
struct domain *dom;
|
|
|
|
|
|
|
|
KASSERT(ro->ro_rt != NULL);
|
|
|
|
|
|
|
|
RTFREE(ro->ro_rt);
|
|
|
|
ro->ro_rt = NULL;
|
|
|
|
|
|
|
|
if ((dom = pffinddomain(ro->ro_dst.sa_family)) != NULL &&
|
|
|
|
dom->dom_rtflush != NULL)
|
|
|
|
(*dom->dom_rtflush)(ro);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtcache(struct route *ro)
|
|
|
|
{
|
|
|
|
struct domain *dom;
|
|
|
|
|
|
|
|
KASSERT(ro->ro_rt != NULL);
|
|
|
|
|
|
|
|
if ((dom = pffinddomain(ro->ro_dst.sa_family)) != NULL &&
|
|
|
|
dom->dom_rtcache != NULL)
|
|
|
|
(*dom->dom_rtcache)(ro);
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Packet routing routines.
|
|
|
|
*/
|
1994-05-11 13:26:46 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rtalloc(struct route *ro)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2006-11-13 20:51:02 +03:00
|
|
|
if (ro->ro_rt != NULL) {
|
|
|
|
if (ro->ro_rt->rt_ifp != NULL &&
|
|
|
|
(ro->ro_rt->rt_flags & RTF_UP) != 0)
|
|
|
|
return;
|
Here are various changes designed to protect against bad IPv4
routing caused by stale route caches (struct route). Route caches
are sprinkled throughout PCBs, the IP fast-forwarding table, and
IP tunnel interfaces (gre, gif, stf).
Stale IPv6 and ISO route caches will be treated by separate patches.
Thank you to Christoph Badura for suggesting the general approach
to invalidating route caches that I take here.
Here are the details:
Add hooks to struct domain for tracking and for invalidating each
domain's route caches: dom_rtcache, dom_rtflush, and dom_rtflushall.
Introduce helper subroutines, rtflush(ro) for invalidating a route
cache, rtflushall(family) for invalidating all route caches in a
routing domain, and rtcache(ro) for notifying the domain of a new
cached route.
Chain together all IPv4 route caches where ro_rt != NULL. Provide
in_rtcache() for adding a route to the chain. Provide in_rtflush()
and in_rtflushall() for invalidating IPv4 route caches. In
in_rtflush(), set ro_rt to NULL, and remove the route from the
chain. In in_rtflushall(), walk the chain and remove every route
cache.
In rtrequest1(), call rtflushall() to invalidate route caches when
a route is added.
In gif(4), discard the workaround for stale caches that involves
expiring them every so often.
Replace the pattern 'RTFREE(ro->ro_rt); ro->ro_rt = NULL;' with a
call to rtflush(ro).
Update ipflow_fastforward() and all other users of route caches so
that they expect a cached route, ro->ro_rt, to turn to NULL.
Take care when moving a 'struct route' to rtflush() the source and
to rtcache() the destination.
In domain initializers, use .dom_xxx tags.
KNF here and there.
2006-12-09 08:33:04 +03:00
|
|
|
rtflush(ro);
|
2006-11-13 20:51:02 +03:00
|
|
|
}
|
Here are various changes designed to protect against bad IPv4
routing caused by stale route caches (struct route). Route caches
are sprinkled throughout PCBs, the IP fast-forwarding table, and
IP tunnel interfaces (gre, gif, stf).
Stale IPv6 and ISO route caches will be treated by separate patches.
Thank you to Christoph Badura for suggesting the general approach
to invalidating route caches that I take here.
Here are the details:
Add hooks to struct domain for tracking and for invalidating each
domain's route caches: dom_rtcache, dom_rtflush, and dom_rtflushall.
Introduce helper subroutines, rtflush(ro) for invalidating a route
cache, rtflushall(family) for invalidating all route caches in a
routing domain, and rtcache(ro) for notifying the domain of a new
cached route.
Chain together all IPv4 route caches where ro_rt != NULL. Provide
in_rtcache() for adding a route to the chain. Provide in_rtflush()
and in_rtflushall() for invalidating IPv4 route caches. In
in_rtflush(), set ro_rt to NULL, and remove the route from the
chain. In in_rtflushall(), walk the chain and remove every route
cache.
In rtrequest1(), call rtflushall() to invalidate route caches when
a route is added.
In gif(4), discard the workaround for stale caches that involves
expiring them every so often.
Replace the pattern 'RTFREE(ro->ro_rt); ro->ro_rt = NULL;' with a
call to rtflush(ro).
Update ipflow_fastforward() and all other users of route caches so
that they expect a cached route, ro->ro_rt, to turn to NULL.
Take care when moving a 'struct route' to rtflush() the source and
to rtcache() the destination.
In domain initializers, use .dom_xxx tags.
KNF here and there.
2006-12-09 08:33:04 +03:00
|
|
|
if ((ro->ro_rt = rtalloc1(&ro->ro_dst, 1)) == NULL)
|
|
|
|
return;
|
|
|
|
rtcache(ro);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct rtentry *
|
2004-04-22 01:03:43 +04:00
|
|
|
rtalloc1(const struct sockaddr *dst, int report)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:45:33 +04:00
|
|
|
struct radix_node_head *rnh = rt_tables[dst->sa_family];
|
|
|
|
struct rtentry *rt;
|
|
|
|
struct radix_node *rn;
|
2006-04-10 23:06:37 +04:00
|
|
|
struct rtentry *newrt = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
struct rt_addrinfo info;
|
1995-08-13 03:59:09 +04:00
|
|
|
int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2005-05-30 01:22:52 +04:00
|
|
|
if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
|
1993-03-21 12:45:37 +03:00
|
|
|
((rn->rn_flags & RNF_ROOT) == 0)) {
|
|
|
|
newrt = rt = (struct rtentry *)rn;
|
|
|
|
if (report && (rt->rt_flags & RTF_CLONING)) {
|
2006-04-10 23:06:37 +04:00
|
|
|
err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
|
|
|
|
&newrt);
|
1994-03-23 08:05:03 +03:00
|
|
|
if (err) {
|
|
|
|
newrt = rt;
|
|
|
|
rt->rt_refcnt++;
|
|
|
|
goto miss;
|
|
|
|
}
|
2006-04-15 06:19:00 +04:00
|
|
|
KASSERT(newrt != NULL);
|
1994-03-23 08:05:03 +03:00
|
|
|
if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
|
|
|
|
msgtype = RTM_RESOLVE;
|
|
|
|
goto miss;
|
|
|
|
}
|
2001-01-17 07:05:41 +03:00
|
|
|
/* Inform listeners of the new route */
|
2001-07-18 20:43:09 +04:00
|
|
|
memset(&info, 0, sizeof(info));
|
2001-01-17 07:05:41 +03:00
|
|
|
info.rti_info[RTAX_DST] = rt_key(rt);
|
|
|
|
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
|
|
|
|
info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
|
|
|
|
if (rt->rt_ifp != NULL) {
|
2005-02-27 01:45:09 +03:00
|
|
|
info.rti_info[RTAX_IFP] =
|
2001-11-05 21:02:15 +03:00
|
|
|
TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
|
2001-01-17 07:05:41 +03:00
|
|
|
info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
|
|
|
|
}
|
|
|
|
rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
|
1993-03-21 12:45:37 +03:00
|
|
|
} else
|
|
|
|
rt->rt_refcnt++;
|
|
|
|
} else {
|
|
|
|
rtstat.rts_unreach++;
|
1994-05-13 10:02:48 +04:00
|
|
|
miss: if (report) {
|
2001-07-18 20:43:09 +04:00
|
|
|
memset((caddr_t)&info, 0, sizeof(info));
|
1994-05-13 10:02:48 +04:00
|
|
|
info.rti_info[RTAX_DST] = dst;
|
|
|
|
rt_missmsg(msgtype, &info, 0, err);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
return (newrt);
|
|
|
|
}
|
|
|
|
|
1994-05-11 13:26:46 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rtfree(struct rtentry *rt)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:45:33 +04:00
|
|
|
struct ifaddr *ifa;
|
1994-05-13 10:02:48 +04:00
|
|
|
|
2006-04-10 23:06:37 +04:00
|
|
|
if (rt == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
panic("rtfree");
|
|
|
|
rt->rt_refcnt--;
|
|
|
|
if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
|
|
|
|
if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
|
|
|
|
panic ("rtfree 2");
|
1994-05-13 10:02:48 +04:00
|
|
|
rttrash--;
|
|
|
|
if (rt->rt_refcnt < 0) {
|
1996-10-13 06:10:01 +04:00
|
|
|
printf("rtfree: %p not freed (neg refs)\n", rt);
|
1994-05-13 10:02:48 +04:00
|
|
|
return;
|
|
|
|
}
|
2002-11-12 04:37:30 +03:00
|
|
|
rt_timer_remove_all(rt, 0);
|
1994-05-13 10:02:48 +04:00
|
|
|
ifa = rt->rt_ifa;
|
2006-12-04 03:52:47 +03:00
|
|
|
rt->rt_ifa = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
IFAFREE(ifa);
|
2006-12-04 03:52:47 +03:00
|
|
|
rt->rt_ifp = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
Free(rt_key(rt));
|
1998-12-22 05:27:06 +03:00
|
|
|
pool_put(&rtentry_pool, rt);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
ifafree(struct ifaddr *ifa)
|
1994-05-13 10:02:48 +04:00
|
|
|
{
|
2000-02-02 01:52:04 +03:00
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
1994-05-13 10:02:48 +04:00
|
|
|
if (ifa == NULL)
|
2000-02-02 01:52:04 +03:00
|
|
|
panic("ifafree: null ifa");
|
|
|
|
if (ifa->ifa_refcnt != 0)
|
|
|
|
panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
|
|
|
|
#endif
|
2000-02-02 21:02:08 +03:00
|
|
|
#ifdef IFAREF_DEBUG
|
|
|
|
printf("ifafree: freeing ifaddr %p\n", ifa);
|
|
|
|
#endif
|
2000-02-02 01:52:04 +03:00
|
|
|
free(ifa, M_IFADDR);
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Force a routing table entry to the specified
|
|
|
|
* destination to go through the given gateway.
|
|
|
|
* Normally called as a result of a routing redirect
|
|
|
|
* message from the network layer.
|
|
|
|
*
|
1995-08-13 03:59:09 +04:00
|
|
|
* N.B.: must be called at splsoftnet
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
1996-02-14 00:59:53 +03:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
|
|
|
|
const struct sockaddr *netmask, int flags, const struct sockaddr *src,
|
|
|
|
struct rtentry **rtp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:45:33 +04:00
|
|
|
struct rtentry *rt;
|
1993-03-21 12:45:37 +03:00
|
|
|
int error = 0;
|
2006-04-10 23:06:37 +04:00
|
|
|
u_quad_t *stat = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
struct rt_addrinfo info;
|
|
|
|
struct ifaddr *ifa;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
/* verify the gateway is directly reachable */
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
|
1993-03-21 12:45:37 +03:00
|
|
|
error = ENETUNREACH;
|
1994-03-23 08:05:03 +03:00
|
|
|
goto out;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
rt = rtalloc1(dst, 0);
|
|
|
|
/*
|
|
|
|
* If the redirect isn't from our current router for this dst,
|
|
|
|
* it's either old or wrong. If it redirects us to ourselves,
|
|
|
|
* we have a routing loop, perhaps as a result of an interface
|
|
|
|
* going down recently.
|
|
|
|
*/
|
2001-07-20 22:52:18 +04:00
|
|
|
#define equal(a1, a2) \
|
|
|
|
((a1)->sa_len == (a2)->sa_len && \
|
2005-05-30 01:22:52 +04:00
|
|
|
memcmp((a1), (a2), (a1)->sa_len) == 0)
|
1994-05-13 10:02:48 +04:00
|
|
|
if (!(flags & RTF_DONE) && rt &&
|
|
|
|
(!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
|
1993-03-21 12:45:37 +03:00
|
|
|
error = EINVAL;
|
|
|
|
else if (ifa_ifwithaddr(gateway))
|
|
|
|
error = EHOSTUNREACH;
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
/*
|
|
|
|
* Create a new entry if we just got back a wildcard entry
|
2000-03-14 02:52:25 +03:00
|
|
|
* or the lookup failed. This is necessary for hosts
|
1993-03-21 12:45:37 +03:00
|
|
|
* which use routing redirects generated by smart gateways
|
|
|
|
* to dynamically build the routing tables.
|
|
|
|
*/
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
|
1993-03-21 12:45:37 +03:00
|
|
|
goto create;
|
|
|
|
/*
|
|
|
|
* Don't listen to the redirect if it's
|
2005-02-27 01:45:09 +03:00
|
|
|
* for a route to an interface.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
if (rt->rt_flags & RTF_GATEWAY) {
|
|
|
|
if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
|
|
|
|
/*
|
|
|
|
* Changing from route to net => route to host.
|
|
|
|
* Create new route, rather than smashing route to net.
|
|
|
|
*/
|
|
|
|
create:
|
2001-01-17 07:05:41 +03:00
|
|
|
if (rt)
|
|
|
|
rtfree(rt);
|
1993-03-21 12:45:37 +03:00
|
|
|
flags |= RTF_GATEWAY | RTF_DYNAMIC;
|
2001-01-17 07:05:41 +03:00
|
|
|
info.rti_info[RTAX_DST] = dst;
|
|
|
|
info.rti_info[RTAX_GATEWAY] = gateway;
|
|
|
|
info.rti_info[RTAX_NETMASK] = netmask;
|
|
|
|
info.rti_ifa = ifa;
|
|
|
|
info.rti_flags = flags;
|
|
|
|
rt = NULL;
|
|
|
|
error = rtrequest1(RTM_ADD, &info, &rt);
|
|
|
|
if (rt != NULL)
|
|
|
|
flags = rt->rt_flags;
|
1993-03-21 12:45:37 +03:00
|
|
|
stat = &rtstat.rts_dynamic;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Smash the current notion of the gateway to
|
|
|
|
* this destination. Should check about netmask!!!
|
|
|
|
*/
|
1994-05-13 10:02:48 +04:00
|
|
|
rt->rt_flags |= RTF_MODIFIED;
|
|
|
|
flags |= RTF_MODIFIED;
|
|
|
|
stat = &rtstat.rts_newgateway;
|
|
|
|
rt_setgate(rt, rt_key(rt), gateway);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
error = EHOSTUNREACH;
|
|
|
|
done:
|
|
|
|
if (rt) {
|
|
|
|
if (rtp && !error)
|
|
|
|
*rtp = rt;
|
|
|
|
else
|
|
|
|
rtfree(rt);
|
|
|
|
}
|
1994-03-23 08:05:03 +03:00
|
|
|
out:
|
1993-03-21 12:45:37 +03:00
|
|
|
if (error)
|
|
|
|
rtstat.rts_badredirect++;
|
1994-03-23 08:05:03 +03:00
|
|
|
else if (stat != NULL)
|
|
|
|
(*stat)++;
|
2001-07-18 20:43:09 +04:00
|
|
|
memset((caddr_t)&info, 0, sizeof(info));
|
1994-05-13 10:02:48 +04:00
|
|
|
info.rti_info[RTAX_DST] = dst;
|
|
|
|
info.rti_info[RTAX_GATEWAY] = gateway;
|
|
|
|
info.rti_info[RTAX_NETMASK] = netmask;
|
|
|
|
info.rti_info[RTAX_AUTHOR] = src;
|
|
|
|
rt_missmsg(RTM_REDIRECT, &info, flags, error);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2001-01-27 07:49:31 +03:00
|
|
|
/*
|
|
|
|
* Delete a route and generate a message
|
|
|
|
*/
|
|
|
|
static int
|
2004-04-22 01:03:43 +04:00
|
|
|
rtdeletemsg(struct rtentry *rt)
|
2001-01-27 07:49:31 +03:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct rt_addrinfo info;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request the new route so that the entry is not actually
|
|
|
|
* deleted. That will allow the information being reported to
|
|
|
|
* be accurate (and consistent with route_output()).
|
|
|
|
*/
|
2001-07-18 20:43:09 +04:00
|
|
|
memset((caddr_t)&info, 0, sizeof(info));
|
2001-01-27 07:49:31 +03:00
|
|
|
info.rti_info[RTAX_DST] = rt_key(rt);
|
|
|
|
info.rti_info[RTAX_NETMASK] = rt_mask(rt);
|
|
|
|
info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
|
|
|
|
info.rti_flags = rt->rt_flags;
|
|
|
|
error = rtrequest1(RTM_DELETE, &info, &rt);
|
|
|
|
|
|
|
|
rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
|
|
|
|
|
|
|
|
/* Adjust the refcount */
|
|
|
|
if (error == 0 && rt->rt_refcnt <= 0) {
|
|
|
|
rt->rt_refcnt++;
|
|
|
|
rtfree(rt);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-01-27 13:39:33 +03:00
|
|
|
static int
|
2004-04-22 01:03:43 +04:00
|
|
|
rtflushclone1(struct radix_node *rn, void *arg)
|
2001-01-27 13:39:33 +03:00
|
|
|
{
|
|
|
|
struct rtentry *rt, *parent;
|
|
|
|
|
|
|
|
rt = (struct rtentry *)rn;
|
|
|
|
parent = (struct rtentry *)arg;
|
|
|
|
if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
|
|
|
|
rtdeletemsg(rt);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2004-04-22 01:03:43 +04:00
|
|
|
rtflushclone(struct radix_node_head *rnh, struct rtentry *parent)
|
2001-01-27 13:39:33 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
|
|
|
|
panic("rtflushclone: called with a non-cloning route");
|
|
|
|
if (!rnh->rnh_walktree)
|
|
|
|
panic("rtflushclone: no rnh_walktree");
|
|
|
|
#endif
|
2001-01-27 14:07:59 +03:00
|
|
|
rnh->rnh_walktree(rnh, rtflushclone1, (void *)parent);
|
2001-01-27 13:39:33 +03:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1998-12-22 05:27:06 +03:00
|
|
|
* Routing table ioctl interface.
|
|
|
|
*/
|
1994-05-11 13:26:46 +04:00
|
|
|
int
|
2006-11-16 04:32:37 +03:00
|
|
|
rtioctl(u_long req, caddr_t data, struct lwp *l)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ifaddr *
|
2004-04-22 01:03:43 +04:00
|
|
|
ifa_ifwithroute(int flags, const struct sockaddr *dst,
|
|
|
|
const struct sockaddr *gateway)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:45:33 +04:00
|
|
|
struct ifaddr *ifa;
|
1993-03-21 12:45:37 +03:00
|
|
|
if ((flags & RTF_GATEWAY) == 0) {
|
|
|
|
/*
|
|
|
|
* If we are adding a route to an interface,
|
|
|
|
* and the interface is a pt to pt link
|
|
|
|
* we should search for the destination
|
|
|
|
* as our clue to the interface. Otherwise
|
|
|
|
* we can use the local address.
|
|
|
|
*/
|
2006-04-10 23:06:37 +04:00
|
|
|
ifa = NULL;
|
2005-02-27 01:45:09 +03:00
|
|
|
if (flags & RTF_HOST)
|
1993-03-21 12:45:37 +03:00
|
|
|
ifa = ifa_ifwithdstaddr(dst);
|
2006-04-10 23:06:37 +04:00
|
|
|
if (ifa == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
ifa = ifa_ifwithaddr(gateway);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we are adding a route to a remote net
|
|
|
|
* or host, the gateway may still be on the
|
|
|
|
* other end of a pt to pt link.
|
|
|
|
*/
|
|
|
|
ifa = ifa_ifwithdstaddr(gateway);
|
|
|
|
}
|
2006-04-10 23:06:37 +04:00
|
|
|
if (ifa == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
ifa = ifa_ifwithnet(gateway);
|
2006-04-10 23:06:37 +04:00
|
|
|
if (ifa == NULL) {
|
1993-03-21 12:45:37 +03:00
|
|
|
struct rtentry *rt = rtalloc1(dst, 0);
|
2006-04-10 23:06:37 +04:00
|
|
|
if (rt == NULL)
|
|
|
|
return NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
rt->rt_refcnt--;
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((ifa = rt->rt_ifa) == NULL)
|
|
|
|
return NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
if (ifa->ifa_addr->sa_family != dst->sa_family) {
|
1994-05-13 10:02:48 +04:00
|
|
|
struct ifaddr *oifa = ifa;
|
1993-03-21 12:45:37 +03:00
|
|
|
ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
|
|
|
|
if (ifa == 0)
|
|
|
|
ifa = oifa;
|
|
|
|
}
|
|
|
|
return (ifa);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
|
|
|
|
|
1994-05-11 13:26:46 +04:00
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
|
|
|
|
const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
|
2001-01-17 07:05:41 +03:00
|
|
|
{
|
|
|
|
struct rt_addrinfo info;
|
|
|
|
|
2001-07-18 20:43:09 +04:00
|
|
|
memset(&info, 0, sizeof(info));
|
2001-01-17 07:05:41 +03:00
|
|
|
info.rti_flags = flags;
|
|
|
|
info.rti_info[RTAX_DST] = dst;
|
|
|
|
info.rti_info[RTAX_GATEWAY] = gateway;
|
|
|
|
info.rti_info[RTAX_NETMASK] = netmask;
|
|
|
|
return rtrequest1(req, &info, ret_nrt);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_getifa(struct rt_addrinfo *info)
|
2001-01-17 07:05:41 +03:00
|
|
|
{
|
|
|
|
struct ifaddr *ifa;
|
2006-04-10 23:06:37 +04:00
|
|
|
const struct sockaddr *dst = info->rti_info[RTAX_DST];
|
|
|
|
const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
|
|
|
|
const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
|
|
|
|
const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
|
|
|
|
int flags = info->rti_flags;
|
2001-01-17 07:05:41 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ifp may be specified by sockaddr_dl when protocol address
|
|
|
|
* is ambiguous
|
|
|
|
*/
|
|
|
|
if (info->rti_ifp == NULL && ifpaddr != NULL
|
|
|
|
&& ifpaddr->sa_family == AF_LINK &&
|
2004-04-21 08:17:28 +04:00
|
|
|
(ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
|
2001-01-17 07:05:41 +03:00
|
|
|
info->rti_ifp = ifa->ifa_ifp;
|
|
|
|
if (info->rti_ifa == NULL && ifaaddr != NULL)
|
|
|
|
info->rti_ifa = ifa_ifwithaddr(ifaaddr);
|
|
|
|
if (info->rti_ifa == NULL) {
|
2004-04-21 08:17:28 +04:00
|
|
|
const struct sockaddr *sa;
|
2001-01-17 07:05:41 +03:00
|
|
|
|
|
|
|
sa = ifaaddr != NULL ? ifaaddr :
|
|
|
|
(gateway != NULL ? gateway : dst);
|
|
|
|
if (sa != NULL && info->rti_ifp != NULL)
|
|
|
|
info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
|
|
|
|
else if (dst != NULL && gateway != NULL)
|
|
|
|
info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
|
|
|
|
else if (sa != NULL)
|
|
|
|
info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
|
|
|
|
}
|
2006-11-13 08:13:38 +03:00
|
|
|
if ((ifa = info->rti_ifa) == NULL)
|
|
|
|
return ENETUNREACH;
|
|
|
|
if (ifa->ifa_getifa != NULL)
|
|
|
|
info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
|
|
|
|
if (info->rti_ifp == NULL)
|
|
|
|
info->rti_ifp = ifa->ifa_ifp;
|
|
|
|
return 0;
|
2001-01-17 07:05:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2004-04-22 01:03:43 +04:00
|
|
|
int s = splsoftnet();
|
|
|
|
int error = 0;
|
2001-01-27 07:49:31 +03:00
|
|
|
struct rtentry *rt, *crt;
|
2000-03-30 13:45:33 +04:00
|
|
|
struct radix_node *rn;
|
|
|
|
struct radix_node_head *rnh;
|
1994-05-13 10:02:48 +04:00
|
|
|
struct ifaddr *ifa;
|
2004-09-30 04:14:05 +04:00
|
|
|
struct sockaddr_storage deldst;
|
2006-04-10 23:06:37 +04:00
|
|
|
const struct sockaddr *dst = info->rti_info[RTAX_DST];
|
|
|
|
const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
|
|
|
|
const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
|
|
|
|
int flags = info->rti_flags;
|
1993-03-21 12:45:37 +03:00
|
|
|
#define senderr(x) { error = x ; goto bad; }
|
|
|
|
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((rnh = rt_tables[dst->sa_family]) == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
senderr(ESRCH);
|
|
|
|
if (flags & RTF_HOST)
|
2006-04-10 23:06:37 +04:00
|
|
|
netmask = NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
switch (req) {
|
|
|
|
case RTM_DELETE:
|
2004-09-30 04:14:05 +04:00
|
|
|
if (netmask) {
|
|
|
|
rt_maskedcopy(dst, (struct sockaddr *)&deldst, netmask);
|
|
|
|
dst = (struct sockaddr *)&deldst;
|
|
|
|
}
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
|
2001-01-27 13:39:33 +03:00
|
|
|
senderr(ESRCH);
|
|
|
|
rt = (struct rtentry *)rn;
|
|
|
|
if ((rt->rt_flags & RTF_CLONING) != 0) {
|
|
|
|
/* clean up any cloned children */
|
|
|
|
rtflushclone(rnh, rt);
|
|
|
|
}
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
senderr(ESRCH);
|
|
|
|
if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
|
|
|
|
panic ("rtrequest delete");
|
|
|
|
rt = (struct rtentry *)rn;
|
1994-05-13 10:02:48 +04:00
|
|
|
if (rt->rt_gwroute) {
|
2006-04-10 23:06:37 +04:00
|
|
|
RTFREE(rt->rt_gwroute);
|
|
|
|
rt->rt_gwroute = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
2001-10-16 06:42:36 +04:00
|
|
|
if (rt->rt_parent) {
|
|
|
|
rt->rt_parent->rt_refcnt--;
|
|
|
|
rt->rt_parent = NULL;
|
|
|
|
}
|
1999-10-09 12:13:00 +04:00
|
|
|
rt->rt_flags &= ~RTF_UP;
|
1993-03-21 12:45:37 +03:00
|
|
|
if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
|
2001-01-17 07:05:41 +03:00
|
|
|
ifa->ifa_rtrequest(RTM_DELETE, rt, info);
|
1993-03-21 12:45:37 +03:00
|
|
|
rttrash++;
|
1994-05-13 10:02:48 +04:00
|
|
|
if (ret_nrt)
|
|
|
|
*ret_nrt = rt;
|
|
|
|
else if (rt->rt_refcnt <= 0) {
|
|
|
|
rt->rt_refcnt++;
|
1993-03-21 12:45:37 +03:00
|
|
|
rtfree(rt);
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case RTM_RESOLVE:
|
2006-04-10 23:06:37 +04:00
|
|
|
if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
senderr(EINVAL);
|
2001-01-27 07:49:31 +03:00
|
|
|
if ((rt->rt_flags & RTF_CLONING) == 0)
|
|
|
|
senderr(EINVAL);
|
1993-03-21 12:45:37 +03:00
|
|
|
ifa = rt->rt_ifa;
|
2001-01-27 07:49:31 +03:00
|
|
|
flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
|
|
|
|
flags |= RTF_CLONED;
|
1993-03-21 12:45:37 +03:00
|
|
|
gateway = rt->rt_gateway;
|
2006-04-10 23:06:37 +04:00
|
|
|
if ((netmask = rt->rt_genmask) == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
flags |= RTF_HOST;
|
|
|
|
goto makeroute;
|
|
|
|
|
|
|
|
case RTM_ADD:
|
2006-04-10 23:06:37 +04:00
|
|
|
if (info->rti_ifa == NULL && (error = rt_getifa(info)))
|
2001-01-17 07:05:41 +03:00
|
|
|
senderr(error);
|
|
|
|
ifa = info->rti_ifa;
|
1993-03-21 12:45:37 +03:00
|
|
|
makeroute:
|
2006-10-05 21:35:19 +04:00
|
|
|
/* Already at splsoftnet() so pool_get/pool_put are safe */
|
1998-12-22 05:27:06 +03:00
|
|
|
rt = pool_get(&rtentry_pool, PR_NOWAIT);
|
2006-04-10 23:06:37 +04:00
|
|
|
if (rt == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
senderr(ENOBUFS);
|
1994-05-13 10:02:48 +04:00
|
|
|
Bzero(rt, sizeof(*rt));
|
|
|
|
rt->rt_flags = RTF_UP | flags;
|
1998-04-29 07:41:49 +04:00
|
|
|
LIST_INIT(&rt->rt_timer);
|
1994-05-13 10:02:48 +04:00
|
|
|
if (rt_setgate(rt, dst, gateway)) {
|
1998-12-22 05:27:06 +03:00
|
|
|
pool_put(&rtentry_pool, rt);
|
1994-05-13 10:02:48 +04:00
|
|
|
senderr(ENOBUFS);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
if (netmask) {
|
2006-12-04 03:56:44 +03:00
|
|
|
rt_maskedcopy(dst, rt_key(rt), netmask);
|
1993-03-21 12:45:37 +03:00
|
|
|
} else
|
2006-12-04 03:56:44 +03:00
|
|
|
Bcopy(dst, rt_key(rt), dst->sa_len);
|
2006-11-13 08:13:38 +03:00
|
|
|
rt_set_ifa(rt, ifa);
|
1993-03-21 12:45:37 +03:00
|
|
|
rt->rt_ifp = ifa->ifa_ifp;
|
1999-08-21 07:46:35 +04:00
|
|
|
if (req == RTM_RESOLVE) {
|
1993-03-21 12:45:37 +03:00
|
|
|
rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
|
2001-01-27 13:39:33 +03:00
|
|
|
rt->rt_parent = *ret_nrt;
|
|
|
|
rt->rt_parent->rt_refcnt++;
|
1999-08-21 07:46:35 +04:00
|
|
|
}
|
2006-12-04 03:56:44 +03:00
|
|
|
rn = rnh->rnh_addaddr(rt_key(rt), netmask, rnh, rt->rt_nodes);
|
|
|
|
if (rn == NULL && (crt = rtalloc1(rt_key(rt), 0)) != NULL) {
|
2001-01-27 07:49:31 +03:00
|
|
|
/* overwrite cloned route */
|
|
|
|
if ((crt->rt_flags & RTF_CLONED) != 0) {
|
|
|
|
rtdeletemsg(crt);
|
2006-12-04 03:56:44 +03:00
|
|
|
rn = rnh->rnh_addaddr(rt_key(rt),
|
2005-05-30 01:22:52 +04:00
|
|
|
netmask, rnh, rt->rt_nodes);
|
2001-01-27 07:49:31 +03:00
|
|
|
}
|
|
|
|
RTFREE(crt);
|
|
|
|
}
|
2006-04-10 23:06:37 +04:00
|
|
|
if (rn == NULL) {
|
2001-01-27 07:49:31 +03:00
|
|
|
IFAFREE(ifa);
|
2001-01-27 13:39:33 +03:00
|
|
|
if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
|
|
|
|
rtfree(rt->rt_parent);
|
2001-01-27 07:49:31 +03:00
|
|
|
if (rt->rt_gwroute)
|
|
|
|
rtfree(rt->rt_gwroute);
|
|
|
|
Free(rt_key(rt));
|
|
|
|
pool_put(&rtentry_pool, rt);
|
|
|
|
senderr(EEXIST);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
if (ifa->ifa_rtrequest)
|
2001-01-17 07:05:41 +03:00
|
|
|
ifa->ifa_rtrequest(req, rt, info);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (ret_nrt) {
|
|
|
|
*ret_nrt = rt;
|
|
|
|
rt->rt_refcnt++;
|
|
|
|
}
|
2001-01-27 13:39:33 +03:00
|
|
|
if ((rt->rt_flags & RTF_CLONING) != 0) {
|
|
|
|
/* clean up any cloned children */
|
|
|
|
rtflushclone(rnh, rt);
|
|
|
|
}
|
Here are various changes designed to protect against bad IPv4
routing caused by stale route caches (struct route). Route caches
are sprinkled throughout PCBs, the IP fast-forwarding table, and
IP tunnel interfaces (gre, gif, stf).
Stale IPv6 and ISO route caches will be treated by separate patches.
Thank you to Christoph Badura for suggesting the general approach
to invalidating route caches that I take here.
Here are the details:
Add hooks to struct domain for tracking and for invalidating each
domain's route caches: dom_rtcache, dom_rtflush, and dom_rtflushall.
Introduce helper subroutines, rtflush(ro) for invalidating a route
cache, rtflushall(family) for invalidating all route caches in a
routing domain, and rtcache(ro) for notifying the domain of a new
cached route.
Chain together all IPv4 route caches where ro_rt != NULL. Provide
in_rtcache() for adding a route to the chain. Provide in_rtflush()
and in_rtflushall() for invalidating IPv4 route caches. In
in_rtflush(), set ro_rt to NULL, and remove the route from the
chain. In in_rtflushall(), walk the chain and remove every route
cache.
In rtrequest1(), call rtflushall() to invalidate route caches when
a route is added.
In gif(4), discard the workaround for stale caches that involves
expiring them every so often.
Replace the pattern 'RTFREE(ro->ro_rt); ro->ro_rt = NULL;' with a
call to rtflush(ro).
Update ipflow_fastforward() and all other users of route caches so
that they expect a cached route, ro->ro_rt, to turn to NULL.
Take care when moving a 'struct route' to rtflush() the source and
to rtcache() the destination.
In domain initializers, use .dom_xxx tags.
KNF here and there.
2006-12-09 08:33:04 +03:00
|
|
|
rtflushall(dst->sa_family);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
bad:
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-13 10:02:48 +04:00
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_setgate( struct rtentry *rt0, const struct sockaddr *dst,
|
|
|
|
const struct sockaddr *gate)
|
1994-05-13 10:02:48 +04:00
|
|
|
{
|
2004-04-21 08:17:28 +04:00
|
|
|
char *new, *old;
|
2002-08-26 05:42:28 +04:00
|
|
|
u_int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
|
2000-03-30 13:45:33 +04:00
|
|
|
struct rtentry *rt = rt0;
|
1994-05-13 10:02:48 +04:00
|
|
|
|
2006-04-10 23:06:37 +04:00
|
|
|
if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
|
1994-05-13 10:02:48 +04:00
|
|
|
old = (caddr_t)rt_key(rt);
|
|
|
|
R_Malloc(new, caddr_t, dlen + glen);
|
2006-04-10 23:06:37 +04:00
|
|
|
if (new == NULL)
|
1994-05-13 10:02:48 +04:00
|
|
|
return 1;
|
2000-03-10 17:47:12 +03:00
|
|
|
Bzero(new, dlen + glen);
|
1994-05-13 10:02:48 +04:00
|
|
|
rt->rt_nodes->rn_key = new;
|
|
|
|
} else {
|
2005-05-30 01:22:52 +04:00
|
|
|
new = __UNCONST(rt->rt_nodes->rn_key); /*XXXUNCONST*/
|
2006-04-10 23:06:37 +04:00
|
|
|
old = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
|
|
|
Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
|
|
|
|
if (old) {
|
|
|
|
Bcopy(dst, new, dlen);
|
|
|
|
Free(old);
|
|
|
|
}
|
|
|
|
if (rt->rt_gwroute) {
|
2006-04-10 23:06:37 +04:00
|
|
|
RTFREE(rt->rt_gwroute);
|
|
|
|
rt->rt_gwroute = NULL;
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
|
|
|
if (rt->rt_flags & RTF_GATEWAY) {
|
|
|
|
rt->rt_gwroute = rtalloc1(gate, 1);
|
1999-08-21 07:46:35 +04:00
|
|
|
/*
|
|
|
|
* If we switched gateways, grab the MTU from the new
|
2001-07-26 09:47:37 +04:00
|
|
|
* gateway route if the current MTU, if the current MTU is
|
|
|
|
* greater than the MTU of gateway.
|
|
|
|
* Note that, if the MTU of gateway is 0, we will reset the
|
|
|
|
* MTU of the route to run PMTUD again from scratch. XXX
|
1999-08-21 07:46:35 +04:00
|
|
|
*/
|
|
|
|
if (rt->rt_gwroute
|
|
|
|
&& !(rt->rt_rmx.rmx_locks & RTV_MTU)
|
2001-07-26 09:47:37 +04:00
|
|
|
&& rt->rt_rmx.rmx_mtu
|
|
|
|
&& rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
|
1999-08-21 07:46:35 +04:00
|
|
|
rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
|
|
|
|
}
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1994-05-11 13:26:46 +04:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
|
|
|
|
const struct sockaddr *netmask)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2005-05-30 01:22:52 +04:00
|
|
|
const u_char *cp1 = (const u_char *)src;
|
2000-03-30 13:45:33 +04:00
|
|
|
u_char *cp2 = (u_char *)dst;
|
2005-05-30 01:22:52 +04:00
|
|
|
const u_char *cp3 = (const u_char *)netmask;
|
1993-03-21 12:45:37 +03:00
|
|
|
u_char *cplim = cp2 + *cp3;
|
|
|
|
u_char *cplim2 = cp2 + *cp1;
|
|
|
|
|
|
|
|
*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
|
|
|
|
cp3 += 2;
|
|
|
|
if (cplim > cplim2)
|
|
|
|
cplim = cplim2;
|
|
|
|
while (cp2 < cplim)
|
|
|
|
*cp2++ = *cp1++ & *cp3++;
|
|
|
|
if (cp2 < cplim2)
|
2005-05-30 01:22:52 +04:00
|
|
|
memset(cp2, 0, (unsigned)(cplim2 - cp2));
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1994-05-13 10:02:48 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1999-10-09 22:55:30 +04:00
|
|
|
* Set up or tear down a routing table entry, normally
|
1993-03-21 12:45:37 +03:00
|
|
|
* for an interface.
|
|
|
|
*/
|
1994-05-11 13:26:46 +04:00
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rtinit(struct ifaddr *ifa, int cmd, int flags)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2000-03-30 13:45:33 +04:00
|
|
|
struct rtentry *rt;
|
|
|
|
struct sockaddr *dst, *odst;
|
1999-10-09 22:55:30 +04:00
|
|
|
struct sockaddr_storage deldst;
|
2006-04-10 23:06:37 +04:00
|
|
|
struct rtentry *nrt = NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
int error;
|
2001-01-17 07:05:41 +03:00
|
|
|
struct rt_addrinfo info;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
|
|
|
|
if (cmd == RTM_DELETE) {
|
|
|
|
if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
|
1999-10-09 22:55:30 +04:00
|
|
|
/* Delete subnet route for this interface */
|
|
|
|
odst = dst;
|
|
|
|
dst = (struct sockaddr *)&deldst;
|
|
|
|
rt_maskedcopy(odst, dst, ifa->ifa_netmask);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1996-02-14 00:59:53 +03:00
|
|
|
if ((rt = rtalloc1(dst, 0)) != NULL) {
|
1993-03-21 12:45:37 +03:00
|
|
|
rt->rt_refcnt--;
|
1999-10-09 22:55:30 +04:00
|
|
|
if (rt->rt_ifa != ifa)
|
2007-02-17 10:50:49 +03:00
|
|
|
return (flags & RTF_HOST) ? EHOSTUNREACH
|
|
|
|
: ENETUNREACH;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
2001-07-18 20:43:09 +04:00
|
|
|
memset(&info, 0, sizeof(info));
|
2001-01-17 07:05:41 +03:00
|
|
|
info.rti_ifa = ifa;
|
|
|
|
info.rti_flags = flags | ifa->ifa_flags;
|
|
|
|
info.rti_info[RTAX_DST] = dst;
|
|
|
|
info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
|
|
|
|
/*
|
|
|
|
* XXX here, it seems that we are assuming that ifa_netmask is NULL
|
|
|
|
* for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
|
|
|
|
* variable) when RTF_HOST is 1. still not sure if i can safely
|
|
|
|
* change it to meet bsdi4 behavior.
|
|
|
|
*/
|
|
|
|
info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
|
|
|
|
error = rtrequest1(cmd, &info, &nrt);
|
1994-05-13 10:02:48 +04:00
|
|
|
if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
|
|
|
|
rt_newaddrmsg(cmd, ifa, error, nrt);
|
|
|
|
if (rt->rt_refcnt <= 0) {
|
|
|
|
rt->rt_refcnt++;
|
|
|
|
rtfree(rt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
|
|
|
|
rt->rt_refcnt--;
|
|
|
|
if (rt->rt_ifa != ifa) {
|
1997-04-03 01:17:28 +04:00
|
|
|
printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
|
|
|
|
rt->rt_ifa);
|
1994-05-13 10:02:48 +04:00
|
|
|
if (rt->rt_ifa->ifa_rtrequest)
|
2001-01-17 07:05:41 +03:00
|
|
|
rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
|
2006-11-13 08:13:38 +03:00
|
|
|
rt_replace_ifa(rt, ifa);
|
1994-05-13 10:02:48 +04:00
|
|
|
rt->rt_ifp = ifa->ifa_ifp;
|
|
|
|
if (ifa->ifa_rtrequest)
|
2001-01-17 07:05:41 +03:00
|
|
|
ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
|
1994-05-13 10:02:48 +04:00
|
|
|
}
|
|
|
|
rt_newaddrmsg(cmd, ifa, error, nrt);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2007-02-17 10:50:49 +03:00
|
|
|
return error;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1998-04-29 07:41:49 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Route timer routines. These routes allow functions to be called
|
|
|
|
* for various routes at any time. This is useful in supporting
|
|
|
|
* path MTU discovery and redirect route deletion.
|
|
|
|
*
|
|
|
|
* This is similar to some BSDI internal functions, but it provides
|
|
|
|
* for multiple queues for efficiency's sake...
|
|
|
|
*/
|
|
|
|
|
|
|
|
LIST_HEAD(, rttimer_queue) rttimer_queue_head;
|
|
|
|
static int rt_init_done = 0;
|
|
|
|
|
2004-04-22 01:03:43 +04:00
|
|
|
#define RTTIMER_CALLOUT(r) do { \
|
|
|
|
if (r->rtt_func != NULL) { \
|
|
|
|
(*r->rtt_func)(r->rtt_rt, r); \
|
|
|
|
} else { \
|
|
|
|
rtrequest((int) RTM_DELETE, \
|
|
|
|
(struct sockaddr *)rt_key(r->rtt_rt), \
|
|
|
|
0, 0, 0, 0); \
|
|
|
|
} \
|
|
|
|
} while (/*CONSTCOND*/0)
|
1998-04-29 07:41:49 +04:00
|
|
|
|
2005-02-27 01:45:09 +03:00
|
|
|
/*
|
1998-04-29 07:41:49 +04:00
|
|
|
* Some subtle order problems with domain initialization mean that
|
|
|
|
* we cannot count on this being run from rt_init before various
|
|
|
|
* protocol initializations are done. Therefore, we make sure
|
|
|
|
* that this is run when the first queue is added...
|
|
|
|
*/
|
|
|
|
|
2005-02-27 01:45:09 +03:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_init(void)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
|
|
|
assert(rt_init_done == 0);
|
|
|
|
|
|
|
|
LIST_INIT(&rttimer_queue_head);
|
2000-03-23 10:01:25 +03:00
|
|
|
callout_init(&rt_timer_ch);
|
|
|
|
callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
|
1998-04-29 07:41:49 +04:00
|
|
|
rt_init_done = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rttimer_queue *
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_queue_create(u_int timeout)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
|
|
|
struct rttimer_queue *rtq;
|
|
|
|
|
|
|
|
if (rt_init_done == 0)
|
|
|
|
rt_timer_init();
|
|
|
|
|
|
|
|
R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
|
|
|
|
if (rtq == NULL)
|
2007-02-17 10:50:49 +03:00
|
|
|
return NULL;
|
2000-03-10 17:47:12 +03:00
|
|
|
Bzero(rtq, sizeof *rtq);
|
1998-04-29 07:41:49 +04:00
|
|
|
|
|
|
|
rtq->rtq_timeout = timeout;
|
2000-12-09 04:29:45 +03:00
|
|
|
rtq->rtq_count = 0;
|
1998-12-27 21:27:48 +03:00
|
|
|
TAILQ_INIT(&rtq->rtq_head);
|
1998-04-29 07:41:49 +04:00
|
|
|
LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
|
|
|
|
|
2007-02-17 10:50:49 +03:00
|
|
|
return rtq;
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
1998-12-27 21:27:48 +03:00
|
|
|
|
1998-04-29 07:41:49 +04:00
|
|
|
rtq->rtq_timeout = timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
1998-12-27 21:27:48 +03:00
|
|
|
struct rttimer *r;
|
1998-04-29 07:41:49 +04:00
|
|
|
|
1998-12-27 21:27:48 +03:00
|
|
|
while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
|
1998-04-29 07:41:49 +04:00
|
|
|
LIST_REMOVE(r, rtt_link);
|
1998-12-27 21:27:48 +03:00
|
|
|
TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
|
|
|
|
if (destroy)
|
1998-04-29 07:41:49 +04:00
|
|
|
RTTIMER_CALLOUT(r);
|
2006-10-05 21:35:19 +04:00
|
|
|
/* we are already at splsoftnet */
|
1998-12-22 05:27:06 +03:00
|
|
|
pool_put(&rttimer_pool, r);
|
2000-12-09 04:29:45 +03:00
|
|
|
if (rtq->rtq_count > 0)
|
|
|
|
rtq->rtq_count--;
|
|
|
|
else
|
2002-11-12 05:10:13 +03:00
|
|
|
printf("rt_timer_queue_remove_all: "
|
|
|
|
"rtq_count reached 0\n");
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
2002-11-12 05:10:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
|
2002-11-12 05:10:13 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
rt_timer_queue_remove_all(rtq, destroy);
|
1998-04-29 07:41:49 +04:00
|
|
|
|
|
|
|
LIST_REMOVE(rtq, rtq_link);
|
1998-12-22 05:27:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller is responsible for freeing the rttimer_queue structure.
|
|
|
|
*/
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
|
2000-12-09 04:29:45 +03:00
|
|
|
unsigned long
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_count(struct rttimer_queue *rtq)
|
2000-12-09 04:29:45 +03:00
|
|
|
{
|
|
|
|
return rtq->rtq_count;
|
|
|
|
}
|
|
|
|
|
2005-02-27 01:45:09 +03:00
|
|
|
void
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_remove_all(struct rtentry *rt, int destroy)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
1998-12-27 21:27:48 +03:00
|
|
|
struct rttimer *r;
|
1998-04-29 07:41:49 +04:00
|
|
|
|
1998-12-27 21:27:48 +03:00
|
|
|
while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
|
1998-04-29 07:41:49 +04:00
|
|
|
LIST_REMOVE(r, rtt_link);
|
1998-12-27 21:27:48 +03:00
|
|
|
TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
|
2002-11-12 04:37:30 +03:00
|
|
|
if (destroy)
|
|
|
|
RTTIMER_CALLOUT(r);
|
2000-12-09 04:29:45 +03:00
|
|
|
if (r->rtt_queue->rtq_count > 0)
|
|
|
|
r->rtt_queue->rtq_count--;
|
|
|
|
else
|
|
|
|
printf("rt_timer_remove_all: rtq_count reached 0\n");
|
2006-10-05 21:35:19 +04:00
|
|
|
/* we are already at splsoftnet */
|
2000-12-11 10:52:48 +03:00
|
|
|
pool_put(&rttimer_pool, r);
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-02-27 01:45:09 +03:00
|
|
|
int
|
2004-04-22 01:03:43 +04:00
|
|
|
rt_timer_add(struct rtentry *rt,
|
|
|
|
void (*func)(struct rtentry *, struct rttimer *),
|
|
|
|
struct rttimer_queue *queue)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
1998-12-27 21:27:48 +03:00
|
|
|
struct rttimer *r;
|
2006-10-05 21:35:19 +04:00
|
|
|
int s;
|
1998-04-29 07:41:49 +04:00
|
|
|
|
1998-12-27 21:27:48 +03:00
|
|
|
/*
|
|
|
|
* If there's already a timer with this action, destroy it before
|
|
|
|
* we add a new one.
|
|
|
|
*/
|
2007-02-17 10:50:49 +03:00
|
|
|
LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
|
|
|
|
if (r->rtt_func == func)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (r != NULL) {
|
|
|
|
LIST_REMOVE(r, rtt_link);
|
|
|
|
TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
|
|
|
|
if (r->rtt_queue->rtq_count > 0)
|
|
|
|
r->rtt_queue->rtq_count--;
|
|
|
|
else
|
|
|
|
printf("rt_timer_add: rtq_count reached 0\n");
|
|
|
|
} else {
|
|
|
|
s = splsoftnet();
|
|
|
|
r = pool_get(&rttimer_pool, PR_NOWAIT);
|
|
|
|
splx(s);
|
|
|
|
if (r == NULL)
|
|
|
|
return ENOBUFS;
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
|
2007-02-17 10:50:49 +03:00
|
|
|
memset(r, 0, sizeof(*r));
|
1998-12-27 21:27:48 +03:00
|
|
|
|
|
|
|
r->rtt_rt = rt;
|
2006-06-08 02:33:33 +04:00
|
|
|
r->rtt_time = time_uptime;
|
1998-12-27 21:27:48 +03:00
|
|
|
r->rtt_func = func;
|
|
|
|
r->rtt_queue = queue;
|
|
|
|
LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
|
|
|
|
TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
|
2000-12-09 04:29:45 +03:00
|
|
|
r->rtt_queue->rtq_count++;
|
2005-02-27 01:45:09 +03:00
|
|
|
|
1998-12-27 21:27:48 +03:00
|
|
|
return (0);
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
void
|
2006-11-16 04:32:37 +03:00
|
|
|
rt_timer_timer(void *arg)
|
1998-04-29 07:41:49 +04:00
|
|
|
{
|
1998-12-27 21:27:48 +03:00
|
|
|
struct rttimer_queue *rtq;
|
|
|
|
struct rttimer *r;
|
|
|
|
int s;
|
1998-10-28 08:01:11 +03:00
|
|
|
|
1998-12-27 21:27:48 +03:00
|
|
|
s = splsoftnet();
|
2007-02-17 10:50:49 +03:00
|
|
|
LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
|
1998-12-27 21:27:48 +03:00
|
|
|
while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
|
2006-06-08 02:33:33 +04:00
|
|
|
(r->rtt_time + rtq->rtq_timeout) < time_uptime) {
|
1998-12-27 21:27:48 +03:00
|
|
|
LIST_REMOVE(r, rtt_link);
|
|
|
|
TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
|
|
|
|
RTTIMER_CALLOUT(r);
|
|
|
|
pool_put(&rttimer_pool, r);
|
2000-12-09 04:29:45 +03:00
|
|
|
if (rtq->rtq_count > 0)
|
|
|
|
rtq->rtq_count--;
|
|
|
|
else
|
|
|
|
printf("rt_timer_timer: rtq_count reached 0\n");
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
|
|
|
}
|
1998-12-27 21:27:48 +03:00
|
|
|
splx(s);
|
1998-04-29 07:41:49 +04:00
|
|
|
|
2000-03-23 10:01:25 +03:00
|
|
|
callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
|
1998-04-29 07:41:49 +04:00
|
|
|
}
|
2006-12-16 00:18:52 +03:00
|
|
|
|
2007-01-05 19:40:08 +03:00
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
#ifndef RTCACHE_DEBUG_SIZE
|
|
|
|
#define RTCACHE_DEBUG_SIZE (1024 * 1024)
|
|
|
|
#endif
|
|
|
|
static const char *cache_caller[RTCACHE_DEBUG_SIZE];
|
|
|
|
static struct route *cache_entry[RTCACHE_DEBUG_SIZE];
|
|
|
|
size_t cache_cur;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
static void
|
|
|
|
_rtcache_init_debug(const char *caller, struct route *ro, int flag)
|
|
|
|
#else
|
|
|
|
static void
|
|
|
|
_rtcache_init(struct route *ro, int flag)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < cache_cur; ++i) {
|
|
|
|
if (cache_entry[i] == ro)
|
|
|
|
panic("Reinit of route %p, initialised from %s", ro, cache_caller[i]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ro->ro_rt = rtalloc1(&ro->ro_dst, flag);
|
|
|
|
if (ro->ro_rt != NULL) {
|
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
if (cache_cur == RTCACHE_DEBUG_SIZE)
|
|
|
|
panic("Route cache debug overflow");
|
|
|
|
cache_caller[cache_cur] = caller;
|
|
|
|
cache_entry[cache_cur] = ro;
|
|
|
|
++cache_cur;
|
|
|
|
#endif
|
|
|
|
rtcache(ro);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
void
|
|
|
|
rtcache_init_debug(const char *caller, struct route *ro)
|
|
|
|
{
|
|
|
|
_rtcache_init_debug(caller, ro, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtcache_init_noclone_debug(const char *caller, struct route *ro)
|
|
|
|
{
|
|
|
|
_rtcache_init_debug(caller, ro, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
2006-12-16 00:18:52 +03:00
|
|
|
void
|
|
|
|
rtcache_init(struct route *ro)
|
|
|
|
{
|
2007-01-05 19:40:08 +03:00
|
|
|
_rtcache_init(ro, 1);
|
2006-12-16 00:18:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtcache_init_noclone(struct route *ro)
|
|
|
|
{
|
2007-01-05 19:40:08 +03:00
|
|
|
_rtcache_init(ro, 0);
|
2006-12-16 00:18:52 +03:00
|
|
|
}
|
2007-01-05 19:40:08 +03:00
|
|
|
#endif
|
2006-12-16 00:18:52 +03:00
|
|
|
|
2007-01-05 19:40:08 +03:00
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
void
|
|
|
|
rtcache_copy_debug(const char *caller, struct route *new, const struct route *old, size_t new_len)
|
|
|
|
#else
|
2006-12-16 00:18:52 +03:00
|
|
|
void
|
|
|
|
rtcache_copy(struct route *new, const struct route *old, size_t new_len)
|
2007-01-05 19:40:08 +03:00
|
|
|
#endif
|
2006-12-16 00:18:52 +03:00
|
|
|
{
|
2007-01-05 19:40:08 +03:00
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < cache_cur; ++i) {
|
|
|
|
if (cache_entry[i] == new)
|
|
|
|
panic("Copy to initalised route %p (before %s)", new, cache_caller[i]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-12-16 00:18:52 +03:00
|
|
|
bzero(new, new_len);
|
|
|
|
if (old->ro_dst.sa_len + offsetof(struct route, ro_dst) > new_len)
|
|
|
|
panic("rtcache_copy: dst address will overflow new route");
|
|
|
|
bcopy(&old->ro_dst, &new->ro_dst, old->ro_dst.sa_len);
|
|
|
|
new->ro_rt = old->ro_rt;
|
|
|
|
if (new->ro_rt != NULL) {
|
2007-01-05 19:40:08 +03:00
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
if (cache_cur == RTCACHE_DEBUG_SIZE)
|
|
|
|
panic("Route cache debug overflow");
|
|
|
|
cache_caller[cache_cur] = caller;
|
|
|
|
cache_entry[cache_cur] = new;
|
|
|
|
++cache_cur;
|
|
|
|
#endif
|
2006-12-16 00:18:52 +03:00
|
|
|
rtcache(new);
|
|
|
|
++new->ro_rt->rt_refcnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtcache_free(struct route *ro)
|
|
|
|
{
|
2007-01-05 19:40:08 +03:00
|
|
|
#ifdef RTCACHE_DEBUG
|
|
|
|
size_t j, i = cache_cur;
|
|
|
|
for (i = j = 0; i < cache_cur; ++i, ++j) {
|
|
|
|
if (cache_entry[i] == ro) {
|
|
|
|
if (ro->ro_rt == NULL)
|
|
|
|
panic("Route cache manipulated (allocated by %s)", cache_caller[i]);
|
|
|
|
--j;
|
|
|
|
} else {
|
|
|
|
cache_caller[j] = cache_caller[i];
|
|
|
|
cache_entry[j] = cache_entry[i];
|
|
|
|
}
|
|
|
|
}
|
2006-12-16 00:18:52 +03:00
|
|
|
if (ro->ro_rt != NULL) {
|
2007-01-05 19:40:08 +03:00
|
|
|
if (i != j + 1)
|
|
|
|
panic("Wrong entries after rtcache_free: %zu (expected %zu)", j, i - 1);
|
|
|
|
--cache_cur;
|
2006-12-16 00:18:52 +03:00
|
|
|
}
|
2007-01-05 19:40:08 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (ro->ro_rt != NULL)
|
|
|
|
rtflush(ro);
|
2006-12-16 00:18:52 +03:00
|
|
|
ro->ro_rt = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rtcache_update(struct route *ro)
|
|
|
|
{
|
|
|
|
rtcache_free(ro);
|
|
|
|
rtcache_init(ro);
|
|
|
|
}
|