Avoid having a rtcache directly in a percpu storage for tunnel protocols.
percpu(9) has a certain memory storage for each CPU and provides it by the piece to users. If the storages went short, percpu(9) enlarges them by allocating new larger memory areas, replacing old ones with them and destroying the old ones. A percpu storage referenced by a pointer gotten via percpu_getref can be destroyed by the mechanism after a running thread sleeps even if percpu_putref has not been called. Using rtcache, i.e., packet processing, typically involves sleepable operations such as rwlock so we must avoid dereferencing a rtcache that is directly stored in a percpu storage during packet processing. Address this situation by having just a pointer to a rtcache in a percpu storage instead. Reviewed by ozaki-r@ and yamaguchi@
This commit is contained in:
parent
2bcf5b2995
commit
2da350beca
61
sys/net/if.c
61
sys/net/if.c
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if.c,v 1.460 2019/09/13 07:55:07 msaitoh Exp $ */
|
||||
/* $NetBSD: if.c,v 1.461 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc.
|
||||
@ -90,7 +90,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.460 2019/09/13 07:55:07 msaitoh Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.461 2019/09/19 06:07:24 knakahara Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_inet.h"
|
||||
@ -2906,6 +2906,63 @@ if_tunnel_check_nesting(struct ifnet *ifp, struct mbuf *m, int limit)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
if_tunnel_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct tunnel_ro *tro = p;
|
||||
|
||||
tro->tr_ro = kmem_zalloc(sizeof(*tro->tr_ro), KM_SLEEP);
|
||||
tro->tr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
|
||||
}
|
||||
|
||||
percpu_t *
|
||||
if_tunnel_alloc_ro_percpu(void)
|
||||
{
|
||||
percpu_t *ro_percpu;
|
||||
|
||||
ro_percpu = percpu_alloc(sizeof(struct tunnel_ro));
|
||||
percpu_foreach(ro_percpu, if_tunnel_ro_init_pc, NULL);
|
||||
|
||||
return ro_percpu;
|
||||
}
|
||||
|
||||
static void
|
||||
if_tunnel_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct tunnel_ro *tro = p;
|
||||
|
||||
rtcache_free(tro->tr_ro);
|
||||
kmem_free(tro->tr_ro, sizeof(*tro->tr_ro));
|
||||
|
||||
mutex_obj_free(tro->tr_lock);
|
||||
}
|
||||
|
||||
void
|
||||
if_tunnel_free_ro_percpu(percpu_t *ro_percpu)
|
||||
{
|
||||
|
||||
percpu_foreach(ro_percpu, if_tunnel_ro_fini_pc, NULL);
|
||||
percpu_free(ro_percpu, sizeof(struct tunnel_ro));
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
if_tunnel_rtcache_free_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct tunnel_ro *tro = p;
|
||||
|
||||
mutex_enter(tro->tr_lock);
|
||||
rtcache_free(tro->tr_ro);
|
||||
mutex_exit(tro->tr_lock);
|
||||
}
|
||||
|
||||
void if_tunnel_ro_percpu_rtcache_free(percpu_t *ro_percpu)
|
||||
{
|
||||
|
||||
percpu_foreach(ro_percpu, if_tunnel_rtcache_free_pc, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* common */
|
||||
int
|
||||
ifioctl_common(struct ifnet *ifp, u_long cmd, void *data)
|
||||
|
29
sys/net/if.h
29
sys/net/if.h
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if.h,v 1.276 2019/09/13 07:55:07 msaitoh Exp $ */
|
||||
/* $NetBSD: if.h,v 1.277 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2001 The NetBSD Foundation, Inc.
|
||||
@ -1122,6 +1122,33 @@ void if_acquire(struct ifnet *, struct psref *);
|
||||
#define if_release if_put
|
||||
|
||||
int if_tunnel_check_nesting(struct ifnet *, struct mbuf *, int);
|
||||
percpu_t *if_tunnel_alloc_ro_percpu(void);
|
||||
void if_tunnel_free_ro_percpu(percpu_t *);
|
||||
void if_tunnel_ro_percpu_rtcache_free(percpu_t *);
|
||||
|
||||
struct tunnel_ro {
|
||||
struct route *tr_ro;
|
||||
kmutex_t *tr_lock;
|
||||
};
|
||||
|
||||
static inline void
|
||||
if_tunnel_get_ro(percpu_t *ro_percpu, struct route **ro, kmutex_t **lock)
|
||||
{
|
||||
struct tunnel_ro *tro;
|
||||
|
||||
tro = percpu_getref(ro_percpu);
|
||||
*ro = tro->tr_ro;
|
||||
*lock = tro->tr_lock;
|
||||
mutex_enter(*lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
if_tunnel_put_ro(percpu_t *ro_percpu, kmutex_t *lock)
|
||||
{
|
||||
|
||||
mutex_exit(lock);
|
||||
percpu_putref(ro_percpu);
|
||||
}
|
||||
|
||||
static __inline if_index_t
|
||||
if_get_index(const struct ifnet *ifp)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_gif.c,v 1.148 2019/06/25 12:30:50 msaitoh Exp $ */
|
||||
/* $NetBSD: if_gif.c,v 1.149 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
/* $KAME: if_gif.c,v 1.76 2001/08/20 02:01:02 kjc Exp $ */
|
||||
|
||||
/*
|
||||
@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_gif.c,v 1.148 2019/06/25 12:30:50 msaitoh Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_gif.c,v 1.149 2019/09/19 06:07:24 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -104,9 +104,6 @@ static struct {
|
||||
|
||||
struct psref_class *gv_psref_class __read_mostly;
|
||||
|
||||
static void gif_ro_init_pc(void *, void *, struct cpu_info *);
|
||||
static void gif_ro_fini_pc(void *, void *, struct cpu_info *);
|
||||
|
||||
static int gifattach0(struct gif_softc *);
|
||||
static int gif_output(struct ifnet *, struct mbuf *,
|
||||
const struct sockaddr *, const struct rtentry *);
|
||||
@ -271,8 +268,7 @@ gif_clone_create(struct if_clone *ifc, int unit)
|
||||
mutex_init(&sc->gif_lock, MUTEX_DEFAULT, IPL_NONE);
|
||||
sc->gif_psz = pserialize_create();
|
||||
|
||||
sc->gif_ro_percpu = percpu_alloc(sizeof(struct gif_ro));
|
||||
percpu_foreach(sc->gif_ro_percpu, gif_ro_init_pc, NULL);
|
||||
sc->gif_ro_percpu = if_tunnel_alloc_ro_percpu();
|
||||
mutex_enter(&gif_softcs.lock);
|
||||
LIST_INSERT_HEAD(&gif_softcs.list, sc, gif_list);
|
||||
mutex_exit(&gif_softcs.lock);
|
||||
@ -309,32 +305,6 @@ gifattach0(struct gif_softc *sc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
gif_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct gif_ro *gro = p;
|
||||
|
||||
gro->gr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
|
||||
}
|
||||
|
||||
static void
|
||||
gif_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct gif_ro *gro = p;
|
||||
|
||||
rtcache_free(&gro->gr_ro);
|
||||
|
||||
mutex_obj_free(gro->gr_lock);
|
||||
}
|
||||
|
||||
void
|
||||
gif_rtcache_free_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct gif_ro *gro = p;
|
||||
|
||||
rtcache_free(&gro->gr_ro);
|
||||
}
|
||||
|
||||
static int
|
||||
gif_clone_destroy(struct ifnet *ifp)
|
||||
{
|
||||
@ -347,8 +317,7 @@ gif_clone_destroy(struct ifnet *ifp)
|
||||
bpf_detach(ifp);
|
||||
if_detach(ifp);
|
||||
|
||||
percpu_foreach(sc->gif_ro_percpu, gif_ro_fini_pc, NULL);
|
||||
percpu_free(sc->gif_ro_percpu, sizeof(struct gif_ro));
|
||||
if_tunnel_free_ro_percpu(sc->gif_ro_percpu);
|
||||
|
||||
pserialize_destroy(sc->gif_psz);
|
||||
mutex_destroy(&sc->gif_lock);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_gif.h,v 1.32 2018/10/19 00:12:56 knakahara Exp $ */
|
||||
/* $NetBSD: if_gif.h,v 1.33 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
/* $KAME: if_gif.h,v 1.23 2001/07/27 09:21:42 itojun Exp $ */
|
||||
|
||||
/*
|
||||
@ -55,11 +55,6 @@ extern struct psref_class *gv_psref_class;
|
||||
|
||||
struct encaptab;
|
||||
|
||||
struct gif_ro {
|
||||
struct route gr_ro;
|
||||
kmutex_t *gr_lock;
|
||||
};
|
||||
|
||||
struct gif_variant {
|
||||
struct gif_softc *gv_softc;
|
||||
struct sockaddr *gv_psrc; /* Physical src addr */
|
||||
@ -73,7 +68,7 @@ struct gif_variant {
|
||||
|
||||
struct gif_softc {
|
||||
struct ifnet gif_if; /* common area - must be at the top */
|
||||
percpu_t *gif_ro_percpu; /* struct gif_ro */
|
||||
percpu_t *gif_ro_percpu; /* struct tunnel_ro */
|
||||
struct gif_variant *gif_var; /*
|
||||
* reader must use gif_getref_variant()
|
||||
* instead of direct dereference.
|
||||
@ -131,8 +126,6 @@ gif_heldref_variant(struct gif_variant *var)
|
||||
/* Prototypes */
|
||||
void gif_input(struct mbuf *, int, struct ifnet *);
|
||||
|
||||
void gif_rtcache_free_pc(void *, void *, struct cpu_info *);
|
||||
|
||||
#ifdef GIF_ENCAPCHECK
|
||||
int gif_encapcheck(struct mbuf *, int, int, void *);
|
||||
#endif
|
||||
@ -147,8 +140,8 @@ int gif_encapcheck(struct mbuf *, int, int, void *);
|
||||
* - gif_var->gv_psref for reader
|
||||
* gif_softc->gif_var is used for variant values while the gif tunnel
|
||||
* exists.
|
||||
* + Each CPU's gif_ro.gr_ro of gif_ro_percpu are protected by
|
||||
* percpu'ed gif_ro.gr_lock.
|
||||
* + Each CPU's tunnel_ro.tr_ro of gif_ro_percpu are protected by
|
||||
* percpu'ed tunnel_ro.tr_lock.
|
||||
*
|
||||
* Locking order:
|
||||
* - encap_lock => gif_softc->gif_lock => gif_softcs.lock
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_ipsec.c,v 1.23 2019/09/13 07:55:07 msaitoh Exp $ */
|
||||
/* $NetBSD: if_ipsec.c,v 1.24 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -27,7 +27,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_ipsec.c,v 1.23 2019/09/13 07:55:07 msaitoh Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_ipsec.c,v 1.24 2019/09/19 06:07:24 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -80,9 +80,6 @@ __KERNEL_RCSID(0, "$NetBSD: if_ipsec.c,v 1.23 2019/09/13 07:55:07 msaitoh Exp $"
|
||||
#include <netipsec/ipsec.h>
|
||||
#include <netipsec/ipsecif.h>
|
||||
|
||||
static void if_ipsec_ro_init_pc(void *, void *, struct cpu_info *);
|
||||
static void if_ipsec_ro_fini_pc(void *, void *, struct cpu_info *);
|
||||
|
||||
static int if_ipsec_clone_create(struct if_clone *, int);
|
||||
static int if_ipsec_clone_destroy(struct ifnet *);
|
||||
|
||||
@ -182,8 +179,7 @@ if_ipsec_clone_create(struct if_clone *ifc, int unit)
|
||||
sc->ipsec_var = var;
|
||||
mutex_init(&sc->ipsec_lock, MUTEX_DEFAULT, IPL_NONE);
|
||||
sc->ipsec_psz = pserialize_create();
|
||||
sc->ipsec_ro_percpu = percpu_alloc(sizeof(struct ipsec_ro));
|
||||
percpu_foreach(sc->ipsec_ro_percpu, if_ipsec_ro_init_pc, NULL);
|
||||
sc->ipsec_ro_percpu = if_tunnel_alloc_ro_percpu();
|
||||
|
||||
mutex_enter(&ipsec_softcs.lock);
|
||||
LIST_INSERT_HEAD(&ipsec_softcs.list, sc, ipsec_list);
|
||||
@ -213,24 +209,6 @@ if_ipsec_attach0(struct ipsec_softc *sc)
|
||||
if_register(&sc->ipsec_if);
|
||||
}
|
||||
|
||||
static void
|
||||
if_ipsec_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct ipsec_ro *iro = p;
|
||||
|
||||
iro->ir_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
|
||||
}
|
||||
|
||||
static void
|
||||
if_ipsec_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct ipsec_ro *iro = p;
|
||||
|
||||
rtcache_free(&iro->ir_ro);
|
||||
|
||||
mutex_obj_free(iro->ir_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
if_ipsec_clone_destroy(struct ifnet *ifp)
|
||||
{
|
||||
@ -249,8 +227,7 @@ if_ipsec_clone_destroy(struct ifnet *ifp)
|
||||
bpf_detach(ifp);
|
||||
if_detach(ifp);
|
||||
|
||||
percpu_foreach(sc->ipsec_ro_percpu, if_ipsec_ro_fini_pc, NULL);
|
||||
percpu_free(sc->ipsec_ro_percpu, sizeof(struct ipsec_ro));
|
||||
if_tunnel_free_ro_percpu(sc->ipsec_ro_percpu);
|
||||
|
||||
pserialize_destroy(sc->ipsec_psz);
|
||||
mutex_destroy(&sc->ipsec_lock);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_ipsec.h,v 1.4 2018/10/19 00:12:56 knakahara Exp $ */
|
||||
/* $NetBSD: if_ipsec.h,v 1.5 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -86,14 +86,9 @@ struct ipsec_variant {
|
||||
struct psref_target iv_psref;
|
||||
};
|
||||
|
||||
struct ipsec_ro {
|
||||
struct route ir_ro;
|
||||
kmutex_t *ir_lock;
|
||||
};
|
||||
|
||||
struct ipsec_softc {
|
||||
struct ifnet ipsec_if; /* common area - must be at the top */
|
||||
percpu_t *ipsec_ro_percpu; /* struct ipsec_ro */
|
||||
percpu_t *ipsec_ro_percpu; /* struct tunnel_ro */
|
||||
struct ipsec_variant *ipsec_var; /*
|
||||
* reader must use ipsec_getref_variant()
|
||||
* instead of direct dereference.
|
||||
@ -220,7 +215,7 @@ int if_ipsec_ioctl(struct ifnet *, u_long, void *);
|
||||
* - ipsec_var->iv_psref for reader
|
||||
* ipsec_softc->ipsec_var is used for variant values while the ipsec tunnel
|
||||
* exists.
|
||||
* + struct ipsec_ro->ir_ro is protected by struct ipsec_ro->ir_lock.
|
||||
* + struct tunnel_ro->tr_ro is protected by struct tunnel_ro->tr_lock.
|
||||
* This lock is required to exclude softnet/0 lwp(such as output
|
||||
* processing softint) and processing lwp(such as DAD timer processing).
|
||||
* + if_ipsec_share_sp() and if_ipsec_unshare_sp() operations are serialized by
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_l2tp.c,v 1.37 2019/09/19 04:59:42 knakahara Exp $ */
|
||||
/* $NetBSD: if_l2tp.c,v 1.38 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.37 2019/09/19 04:59:42 knakahara Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.38 2019/09/19 06:07:24 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -115,9 +115,6 @@ static struct {
|
||||
pserialize_t l2tp_psz __read_mostly;
|
||||
struct psref_class *lv_psref_class __read_mostly;
|
||||
|
||||
static void l2tp_ro_init_pc(void *, void *, struct cpu_info *);
|
||||
static void l2tp_ro_fini_pc(void *, void *, struct cpu_info *);
|
||||
|
||||
static void l2tp_ifq_init_pc(void *, void *, struct cpu_info *);
|
||||
|
||||
static int l2tp_clone_create(struct if_clone *, int);
|
||||
@ -253,8 +250,7 @@ l2tp_clone_create(struct if_clone *ifc, int unit)
|
||||
sc->l2tp_psz = pserialize_create();
|
||||
PSLIST_ENTRY_INIT(sc, l2tp_hash);
|
||||
|
||||
sc->l2tp_ro_percpu = percpu_alloc(sizeof(struct l2tp_ro));
|
||||
percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_init_pc, NULL);
|
||||
sc->l2tp_ro_percpu = if_tunnel_alloc_ro_percpu();
|
||||
|
||||
sc->l2tp_ifq_percpu = percpu_alloc(sizeof(struct ifqueue));
|
||||
percpu_foreach(sc->l2tp_ifq_percpu, l2tp_ifq_init_pc, NULL);
|
||||
@ -320,24 +316,6 @@ l2tpattach0(struct l2tp_softc *sc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
l2tp_ro_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct l2tp_ro *lro = p;
|
||||
|
||||
lro->lr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
|
||||
}
|
||||
|
||||
void
|
||||
l2tp_ro_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct l2tp_ro *lro = p;
|
||||
|
||||
rtcache_free(&lro->lr_ro);
|
||||
|
||||
mutex_obj_free(lro->lr_lock);
|
||||
}
|
||||
|
||||
void
|
||||
l2tp_ifq_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
@ -376,8 +354,7 @@ l2tp_clone_destroy(struct ifnet *ifp)
|
||||
|
||||
if_detach(ifp);
|
||||
|
||||
percpu_foreach(sc->l2tp_ro_percpu, l2tp_ro_fini_pc, NULL);
|
||||
percpu_free(sc->l2tp_ro_percpu, sizeof(struct l2tp_ro));
|
||||
if_tunnel_free_ro_percpu(sc->l2tp_ro_percpu);
|
||||
|
||||
kmem_free(var, sizeof(struct l2tp_variant));
|
||||
pserialize_destroy(sc->l2tp_psz);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: if_l2tp.h,v 1.7 2019/09/19 04:59:42 knakahara Exp $ */
|
||||
/* $NetBSD: if_l2tp.h,v 1.8 2019/09/19 06:07:24 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -91,15 +91,10 @@ struct l2tp_variant {
|
||||
struct psref_target lv_psref;
|
||||
};
|
||||
|
||||
struct l2tp_ro {
|
||||
struct route lr_ro;
|
||||
kmutex_t *lr_lock;
|
||||
};
|
||||
|
||||
struct l2tp_softc {
|
||||
struct ethercom l2tp_ec; /* common area - must be at the top */
|
||||
/* to use ether_input(), we must have this */
|
||||
percpu_t *l2tp_ro_percpu; /* struct l2tp_ro */
|
||||
percpu_t *l2tp_ro_percpu; /* struct tunnel_ro */
|
||||
struct l2tp_variant *l2tp_var; /*
|
||||
* reader must use l2tp_getref_variant()
|
||||
* instead of direct dereference.
|
||||
@ -195,7 +190,7 @@ struct mbuf *l2tp_tcpmss_clamp(struct ifnet *, struct mbuf *);
|
||||
* - l2tp_var->lv_psref for reader
|
||||
* l2tp_softc->l2tp_var is used for variant values while the l2tp tunnel
|
||||
* exists.
|
||||
* + struct l2tp_ro->lr_ro is protected by struct l2tp_ro->lr_lock.
|
||||
* + struct l2tp_ro->lr_ro is protected by struct tunnel_ro->tr_lock.
|
||||
* This lock is required to exclude softnet/0 lwp(such as output
|
||||
* processing softint) and processing lwp(such as DAD timer processing).
|
||||
*
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: in_gif.c,v 1.94 2018/05/01 07:21:39 maxv Exp $ */
|
||||
/* $NetBSD: in_gif.c,v 1.95 2019/09/19 06:07:25 knakahara Exp $ */
|
||||
/* $KAME: in_gif.c,v 1.66 2001/07/29 04:46:09 itojun Exp $ */
|
||||
|
||||
/*
|
||||
@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: in_gif.c,v 1.94 2018/05/01 07:21:39 maxv Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: in_gif.c,v 1.95 2019/09/19 06:07:25 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -81,12 +81,12 @@ static int
|
||||
in_gif_output(struct gif_variant *var, int family, struct mbuf *m)
|
||||
{
|
||||
struct rtentry *rt;
|
||||
struct route *ro;
|
||||
struct gif_ro *gro;
|
||||
struct gif_softc *sc;
|
||||
struct sockaddr_in *sin_src;
|
||||
struct sockaddr_in *sin_dst;
|
||||
struct ifnet *ifp;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
struct ip iphdr; /* capsule IP header, host byte ordered */
|
||||
int proto, error;
|
||||
u_int8_t tos;
|
||||
@ -173,30 +173,25 @@ in_gif_output(struct gif_variant *var, int family, struct mbuf *m)
|
||||
bcopy(&iphdr, mtod(m, struct ip *), sizeof(struct ip));
|
||||
|
||||
sc = var->gv_softc;
|
||||
gro = percpu_getref(sc->gif_ro_percpu);
|
||||
mutex_enter(gro->gr_lock);
|
||||
ro = &gro->gr_ro;
|
||||
if ((rt = rtcache_lookup(ro, var->gv_pdst)) == NULL) {
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
if_tunnel_get_ro(sc->gif_ro_percpu, &ro_pc, &lock_pc);
|
||||
if ((rt = rtcache_lookup(ro_pc, var->gv_pdst)) == NULL) {
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH;
|
||||
}
|
||||
|
||||
/* If the route constitutes infinite encapsulation, punt. */
|
||||
if (rt->rt_ifp == ifp) {
|
||||
rtcache_unref(rt, ro);
|
||||
rtcache_free(ro);
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH; /*XXX*/
|
||||
}
|
||||
rtcache_unref(rt, ro);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
|
||||
error = ip_output(m, NULL, ro, 0, NULL, NULL);
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
error = ip_output(m, NULL, ro_pc, 0, NULL, NULL);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -398,7 +393,7 @@ in_gif_detach(struct gif_variant *var)
|
||||
if (error == 0)
|
||||
var->gv_encap_cookie4 = NULL;
|
||||
|
||||
percpu_foreach(sc->gif_ro_percpu, gif_rtcache_free_pc, NULL);
|
||||
if_tunnel_ro_percpu_rtcache_free(sc->gif_ro_percpu);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: in_l2tp.c,v 1.16 2018/09/03 02:33:30 knakahara Exp $ */
|
||||
/* $NetBSD: in_l2tp.c,v 1.17 2019/09/19 06:07:25 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -27,7 +27,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: in_l2tp.c,v 1.16 2018/09/03 02:33:30 knakahara Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: in_l2tp.c,v 1.17 2019/09/19 06:07:25 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_l2tp.h"
|
||||
@ -91,7 +91,9 @@ in_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
struct sockaddr_in *sin_dst = satosin(var->lv_pdst);
|
||||
struct ip iphdr; /* capsule IP header, host byte ordered */
|
||||
struct rtentry *rt;
|
||||
struct l2tp_ro *lro;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
|
||||
int error;
|
||||
uint32_t sess_id;
|
||||
|
||||
@ -207,26 +209,23 @@ in_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
}
|
||||
memcpy(mtod(m, struct ip *), &iphdr, sizeof(struct ip));
|
||||
|
||||
lro = percpu_getref(sc->l2tp_ro_percpu);
|
||||
mutex_enter(lro->lr_lock);
|
||||
if ((rt = rtcache_lookup(&lro->lr_ro, var->lv_pdst)) == NULL) {
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
if_tunnel_get_ro(sc->l2tp_ro_percpu, &ro_pc, &lock_pc);
|
||||
if ((rt = rtcache_lookup(ro_pc, var->lv_pdst)) == NULL) {
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
error = ENETUNREACH;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rt->rt_ifp == ifp) {
|
||||
rtcache_unref(rt, &lro->lr_ro);
|
||||
rtcache_free(&lro->lr_ro);
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
error = ENETUNREACH; /*XXX*/
|
||||
goto out;
|
||||
}
|
||||
rtcache_unref(rt, &lro->lr_ro);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
|
||||
/*
|
||||
* To avoid inappropriate rewrite of checksum,
|
||||
@ -234,9 +233,8 @@ in_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
*/
|
||||
m->m_pkthdr.csum_flags = 0;
|
||||
|
||||
error = ip_output(m, NULL, &lro->lr_ro, 0, NULL, NULL);
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
error = ip_output(m, NULL, ro_pc, 0, NULL, NULL);
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
return error;
|
||||
|
||||
looped:
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: in6_gif.c,v 1.93 2018/05/01 07:21:39 maxv Exp $ */
|
||||
/* $NetBSD: in6_gif.c,v 1.94 2019/09/19 06:07:25 knakahara Exp $ */
|
||||
/* $KAME: in6_gif.c,v 1.62 2001/07/29 04:27:25 itojun Exp $ */
|
||||
|
||||
/*
|
||||
@ -31,7 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: in6_gif.c,v 1.93 2018/05/01 07:21:39 maxv Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: in6_gif.c,v 1.94 2019/09/19 06:07:25 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -84,13 +84,13 @@ static int
|
||||
in6_gif_output(struct gif_variant *var, int family, struct mbuf *m)
|
||||
{
|
||||
struct rtentry *rt;
|
||||
struct route *ro;
|
||||
struct gif_ro *gro;
|
||||
struct gif_softc *sc;
|
||||
struct sockaddr_in6 *sin6_src;
|
||||
struct sockaddr_in6 *sin6_dst;
|
||||
struct ifnet *ifp;
|
||||
struct ip6_hdr *ip6;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
int proto, error;
|
||||
u_int8_t itos, otos;
|
||||
|
||||
@ -179,27 +179,23 @@ in6_gif_output(struct gif_variant *var, int family, struct mbuf *m)
|
||||
ip6->ip6_flow |= htonl((u_int32_t)otos << 20);
|
||||
|
||||
sc = ifp->if_softc;
|
||||
gro = percpu_getref(sc->gif_ro_percpu);
|
||||
mutex_enter(gro->gr_lock);
|
||||
ro = &gro->gr_ro;
|
||||
rt = rtcache_lookup(ro, var->gv_pdst);
|
||||
if_tunnel_get_ro(sc->gif_ro_percpu, &ro_pc, &lock_pc);
|
||||
rt = rtcache_lookup(ro_pc, var->gv_pdst);
|
||||
if (rt == NULL) {
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH;
|
||||
}
|
||||
|
||||
/* If the route constitutes infinite encapsulation, punt. */
|
||||
if (rt->rt_ifp == ifp) {
|
||||
rtcache_unref(rt, ro);
|
||||
rtcache_free(ro);
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH; /* XXX */
|
||||
}
|
||||
rtcache_unref(rt, ro);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
|
||||
#ifdef IPV6_MINMTU
|
||||
/*
|
||||
@ -207,12 +203,11 @@ in6_gif_output(struct gif_variant *var, int family, struct mbuf *m)
|
||||
* it is too painful to ask for resend of inner packet, to achieve
|
||||
* path MTU discovery for encapsulated packets.
|
||||
*/
|
||||
error = ip6_output(m, 0, ro, IPV6_MINMTU, NULL, NULL, NULL);
|
||||
error = ip6_output(m, 0, ro_pc, IPV6_MINMTU, NULL, NULL, NULL);
|
||||
#else
|
||||
error = ip6_output(m, 0, ro, 0, NULL, NULL, NULL);
|
||||
error = ip6_output(m, 0, ro_pc, 0, NULL, NULL, NULL);
|
||||
#endif
|
||||
mutex_exit(gro->gr_lock);
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -419,7 +414,7 @@ in6_gif_detach(struct gif_variant *var)
|
||||
if (error == 0)
|
||||
var->gv_encap_cookie6 = NULL;
|
||||
|
||||
percpu_foreach(sc->gif_ro_percpu, gif_rtcache_free_pc, NULL);
|
||||
if_tunnel_ro_percpu_rtcache_free(sc->gif_ro_percpu);
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -432,7 +427,8 @@ in6_gif_ctlinput(int cmd, const struct sockaddr *sa, void *d, void *eparg)
|
||||
struct ip6ctlparam *ip6cp = NULL;
|
||||
struct ip6_hdr *ip6;
|
||||
const struct sockaddr_in6 *dst6;
|
||||
struct route *ro;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
struct psref psref;
|
||||
|
||||
if (sa->sa_family != AF_INET6 ||
|
||||
@ -468,15 +464,15 @@ in6_gif_ctlinput(int cmd, const struct sockaddr *sa, void *d, void *eparg)
|
||||
}
|
||||
gif_putref_variant(var, &psref);
|
||||
|
||||
ro = percpu_getref(sc->gif_ro_percpu);
|
||||
dst6 = satocsin6(rtcache_getdst(ro));
|
||||
if_tunnel_get_ro(sc->gif_ro_percpu, &ro_pc, &lock_pc);
|
||||
dst6 = satocsin6(rtcache_getdst(ro_pc));
|
||||
/* XXX scope */
|
||||
if (dst6 == NULL)
|
||||
;
|
||||
else if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &dst6->sin6_addr))
|
||||
rtcache_free(ro);
|
||||
rtcache_free(ro_pc);
|
||||
|
||||
percpu_putref(sc->gif_ro_percpu);
|
||||
if_tunnel_put_ro(sc->gif_ro_percpu, lock_pc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: in6_l2tp.c,v 1.17 2018/09/03 02:33:31 knakahara Exp $ */
|
||||
/* $NetBSD: in6_l2tp.c,v 1.18 2019/09/19 06:07:25 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -27,7 +27,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: in6_l2tp.c,v 1.17 2018/09/03 02:33:31 knakahara Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: in6_l2tp.c,v 1.18 2019/09/19 06:07:25 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_l2tp.h"
|
||||
@ -90,7 +90,8 @@ int
|
||||
in6_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
{
|
||||
struct rtentry *rt;
|
||||
struct l2tp_ro *lro;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
struct l2tp_softc *sc;
|
||||
struct ifnet *ifp;
|
||||
struct sockaddr_in6 *sin6_src = satosin6(var->lv_psrc);
|
||||
@ -201,25 +202,22 @@ in6_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
return ENOBUFS;
|
||||
memcpy(mtod(m, struct ip6_hdr *), &ip6hdr, sizeof(struct ip6_hdr));
|
||||
|
||||
lro = percpu_getref(sc->l2tp_ro_percpu);
|
||||
mutex_enter(lro->lr_lock);
|
||||
if ((rt = rtcache_lookup(&lro->lr_ro, var->lv_pdst)) == NULL) {
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
if_tunnel_get_ro(sc->l2tp_ro_percpu, &ro_pc, &lock_pc);
|
||||
if ((rt = rtcache_lookup(ro_pc, var->lv_pdst)) == NULL) {
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH;
|
||||
}
|
||||
|
||||
/* If the route constitutes infinite encapsulation, punt. */
|
||||
if (rt->rt_ifp == ifp) {
|
||||
rtcache_unref(rt, &lro->lr_ro);
|
||||
rtcache_free(&lro->lr_ro);
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH; /* XXX */
|
||||
}
|
||||
rtcache_unref(rt, &lro->lr_ro);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
|
||||
/*
|
||||
* To avoid inappropriate rewrite of checksum,
|
||||
@ -227,9 +225,8 @@ in6_l2tp_output(struct l2tp_variant *var, struct mbuf *m)
|
||||
*/
|
||||
m->m_pkthdr.csum_flags = 0;
|
||||
|
||||
error = ip6_output(m, 0, &lro->lr_ro, 0, NULL, NULL, NULL);
|
||||
mutex_exit(lro->lr_lock);
|
||||
percpu_putref(sc->l2tp_ro_percpu);
|
||||
error = ip6_output(m, 0, ro_pc, 0, NULL, NULL, NULL);
|
||||
if_tunnel_put_ro(sc->l2tp_ro_percpu, lock_pc);
|
||||
return(error);
|
||||
|
||||
looped:
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: ipsecif.c,v 1.16 2019/05/17 05:27:24 knakahara Exp $ */
|
||||
/* $NetBSD: ipsecif.c,v 1.17 2019/09/19 06:07:25 knakahara Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2017 Internet Initiative Japan Inc.
|
||||
@ -27,7 +27,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: ipsecif.c,v 1.16 2019/05/17 05:27:24 knakahara Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: ipsecif.c,v 1.17 2019/09/19 06:07:25 knakahara Exp $");
|
||||
|
||||
#ifdef _KERNEL_OPT
|
||||
#include "opt_inet.h"
|
||||
@ -500,7 +500,8 @@ ipsecif6_output(struct ipsec_variant *var, int family, struct mbuf *m)
|
||||
{
|
||||
struct ifnet *ifp = &var->iv_softc->ipsec_if;
|
||||
struct ipsec_softc *sc = ifp->if_softc;
|
||||
struct ipsec_ro *iro;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
struct rtentry *rt;
|
||||
struct sockaddr_in6 *sin6_src;
|
||||
struct sockaddr_in6 *sin6_dst;
|
||||
@ -601,24 +602,21 @@ ipsecif6_output(struct ipsec_variant *var, int family, struct mbuf *m)
|
||||
|
||||
sockaddr_in6_init(&u.dst6, &sin6_dst->sin6_addr, 0, 0, 0);
|
||||
|
||||
iro = percpu_getref(sc->ipsec_ro_percpu);
|
||||
mutex_enter(iro->ir_lock);
|
||||
if ((rt = rtcache_lookup(&iro->ir_ro, &u.dst)) == NULL) {
|
||||
mutex_exit(iro->ir_lock);
|
||||
percpu_putref(sc->ipsec_ro_percpu);
|
||||
if_tunnel_get_ro(sc->ipsec_ro_percpu, &ro_pc, &lock_pc);
|
||||
if ((rt = rtcache_lookup(ro_pc, &u.dst)) == NULL) {
|
||||
if_tunnel_put_ro(sc->ipsec_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH;
|
||||
}
|
||||
|
||||
if (rt->rt_ifp == ifp) {
|
||||
rtcache_unref(rt, &iro->ir_ro);
|
||||
rtcache_free(&iro->ir_ro);
|
||||
mutex_exit(iro->ir_lock);
|
||||
percpu_putref(sc->ipsec_ro_percpu);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->ipsec_ro_percpu, lock_pc);
|
||||
m_freem(m);
|
||||
return ENETUNREACH;
|
||||
}
|
||||
rtcache_unref(rt, &iro->ir_ro);
|
||||
rtcache_unref(rt, ro_pc);
|
||||
|
||||
/* set NAT-T ports */
|
||||
error = ipsecif_set_natt_ports(var, m);
|
||||
@ -632,14 +630,13 @@ ipsecif6_output(struct ipsec_variant *var, int family, struct mbuf *m)
|
||||
* it is too painful to ask for resend of inner packet, to achieve
|
||||
* path MTU discovery for encapsulated packets.
|
||||
*/
|
||||
error = ip6_output(m, 0, &iro->ir_ro,
|
||||
error = ip6_output(m, 0, ro_pc,
|
||||
ip6_ipsec_pmtu ? 0 : IPV6_MINMTU, 0, NULL, NULL);
|
||||
|
||||
out:
|
||||
if (error)
|
||||
rtcache_free(&iro->ir_ro);
|
||||
mutex_exit(iro->ir_lock);
|
||||
percpu_putref(sc->ipsec_ro_percpu);
|
||||
rtcache_free(ro_pc);
|
||||
if_tunnel_put_ro(sc->ipsec_ro_percpu, lock_pc);
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -935,16 +932,6 @@ ipsecif6_attach(struct ipsec_variant *var)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ipsecif6_rtcache_free_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
|
||||
{
|
||||
struct ipsec_ro *iro = p;
|
||||
|
||||
mutex_enter(iro->ir_lock);
|
||||
rtcache_free(&iro->ir_ro);
|
||||
mutex_exit(iro->ir_lock);
|
||||
}
|
||||
|
||||
int
|
||||
ipsecif6_detach(struct ipsec_variant *var)
|
||||
{
|
||||
@ -953,7 +940,7 @@ ipsecif6_detach(struct ipsec_variant *var)
|
||||
|
||||
KASSERT(var->iv_encap_cookie6 != NULL);
|
||||
|
||||
percpu_foreach(sc->ipsec_ro_percpu, ipsecif6_rtcache_free_pc, NULL);
|
||||
if_tunnel_ro_percpu_rtcache_free(sc->ipsec_ro_percpu);
|
||||
|
||||
var->iv_output = NULL;
|
||||
error = encap_detach(var->iv_encap_cookie6);
|
||||
@ -969,7 +956,8 @@ ipsecif6_ctlinput(int cmd, const struct sockaddr *sa, void *d, void *eparg)
|
||||
struct ip6ctlparam *ip6cp = NULL;
|
||||
struct ip6_hdr *ip6;
|
||||
const struct sockaddr_in6 *dst6;
|
||||
struct ipsec_ro *iro;
|
||||
struct route *ro_pc;
|
||||
kmutex_t *lock_pc;
|
||||
|
||||
if (sa->sa_family != AF_INET6 ||
|
||||
sa->sa_len != sizeof(struct sockaddr_in6))
|
||||
@ -993,18 +981,16 @@ ipsecif6_ctlinput(int cmd, const struct sockaddr *sa, void *d, void *eparg)
|
||||
if (!ip6)
|
||||
return NULL;
|
||||
|
||||
iro = percpu_getref(sc->ipsec_ro_percpu);
|
||||
mutex_enter(iro->ir_lock);
|
||||
dst6 = satocsin6(rtcache_getdst(&iro->ir_ro));
|
||||
if_tunnel_get_ro(sc->ipsec_ro_percpu, &ro_pc, &lock_pc);
|
||||
dst6 = satocsin6(rtcache_getdst(ro_pc));
|
||||
/* XXX scope */
|
||||
if (dst6 == NULL)
|
||||
;
|
||||
else if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &dst6->sin6_addr))
|
||||
/* flush route cache */
|
||||
rtcache_free(&iro->ir_ro);
|
||||
rtcache_free(ro_pc);
|
||||
|
||||
mutex_exit(iro->ir_lock);
|
||||
percpu_putref(sc->ipsec_ro_percpu);
|
||||
if_tunnel_put_ro(sc->ipsec_ro_percpu, lock_pc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user