Make bpf dynamically loadable.
This commit is contained in:
parent
7393fe922a
commit
b2bb0f38d5
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $ */
|
||||
/* $NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1990, 1991, 1993
|
||||
|
@ -39,7 +39,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_bpf.h"
|
||||
|
@ -58,6 +58,8 @@ __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $");
|
|||
#include <sys/vnode.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/module.h>
|
||||
#include <sys/once.h>
|
||||
|
||||
#include <sys/file.h>
|
||||
#include <sys/filedesc.h>
|
||||
|
@ -359,13 +361,8 @@ bpf_detachd(struct bpf_d *d)
|
|||
d->bd_bif = 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* bpfilterattach() is called at boot time.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
void
|
||||
bpfilterattach(int n)
|
||||
static int
|
||||
doinit(void)
|
||||
{
|
||||
|
||||
mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
|
||||
|
@ -375,6 +372,20 @@ bpfilterattach(int n)
|
|||
bpf_gstats.bs_recv = 0;
|
||||
bpf_gstats.bs_drop = 0;
|
||||
bpf_gstats.bs_capt = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* bpfilterattach() is called at boot time.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
void
|
||||
bpfilterattach(int n)
|
||||
{
|
||||
static ONCE_DECL(control);
|
||||
|
||||
RUN_ONCE(&control, doinit);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1910,9 +1921,43 @@ struct bpf_ops bpf_ops_kernel = {
|
|||
.bpf_mtap_sl_out = bpf_mtap_sl_out,
|
||||
};
|
||||
|
||||
void
|
||||
bpf_setops()
|
||||
{
|
||||
MODULE(MODULE_CLASS_DRIVER, bpf, NULL);
|
||||
|
||||
bpf_ops = &bpf_ops_kernel;
|
||||
static int
|
||||
bpf_modcmd(modcmd_t cmd, void *arg)
|
||||
{
|
||||
devmajor_t bmajor, cmajor;
|
||||
int error;
|
||||
|
||||
bmajor = cmajor = NODEVMAJOR;
|
||||
|
||||
switch (cmd) {
|
||||
case MODULE_CMD_INIT:
|
||||
bpfilterattach(0);
|
||||
error = devsw_attach("bpf", NULL, &bmajor,
|
||||
&bpf_cdevsw, &cmajor);
|
||||
if (error == EEXIST)
|
||||
error = 0; /* maybe built-in ... improve eventually */
|
||||
if (error)
|
||||
break;
|
||||
|
||||
bpf_ops_handover_enter(&bpf_ops_kernel);
|
||||
atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
|
||||
bpf_ops_handover_exit();
|
||||
break;
|
||||
|
||||
case MODULE_CMD_FINI:
|
||||
/*
|
||||
* bpf_ops is not (yet) referenced in the callers before
|
||||
* attach. maybe other issues too. "safety first".
|
||||
*/
|
||||
error = EOPNOTSUPP;
|
||||
break;
|
||||
|
||||
default:
|
||||
error = ENOTTY;
|
||||
break;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: bpf.h,v 1.52 2010/01/19 22:08:00 pooka Exp $ */
|
||||
/* $NetBSD: bpf.h,v 1.53 2010/01/25 22:18:17 pooka Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1990, 1991, 1993
|
||||
|
@ -276,6 +276,9 @@ struct bpf_ops {
|
|||
extern struct bpf_ops *bpf_ops;
|
||||
void bpf_setops(void);
|
||||
|
||||
void bpf_ops_handover_enter(struct bpf_ops *);
|
||||
void bpf_ops_handover_exit(void);
|
||||
|
||||
void bpfilterattach(int);
|
||||
|
||||
int bpf_validate(struct bpf_insn *, int);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $ */
|
||||
/* $NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010 The NetBSD Foundation, Inc.
|
||||
|
@ -27,18 +27,119 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/mbuf.h>
|
||||
|
||||
#include <net/bpf.h>
|
||||
|
||||
static void
|
||||
bpf_stub_attach(struct ifnet *ipf, u_int dlt, u_int hlen, struct bpf_if **drvp)
|
||||
{
|
||||
struct laglist {
|
||||
struct ifnet *lag_ifp;
|
||||
u_int lag_dlt;
|
||||
u_int lag_hlen;
|
||||
struct bpf_if **lag_drvp;
|
||||
|
||||
TAILQ_ENTRY(laglist) lag_entries;
|
||||
};
|
||||
|
||||
static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
|
||||
|
||||
static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
|
||||
static void bpf_stub_detach(struct ifnet *);
|
||||
|
||||
static void bpf_stub_null(void);
|
||||
static void bpf_stub_warn(void);
|
||||
|
||||
static kmutex_t handovermtx;
|
||||
static kcondvar_t handovercv;
|
||||
static bool handover;
|
||||
|
||||
struct bpf_ops bpf_ops_stub = {
|
||||
.bpf_attach = bpf_stub_attach,
|
||||
.bpf_detach = bpf_stub_detach,
|
||||
.bpf_change_type = (void *)bpf_stub_null,
|
||||
|
||||
.bpf_tap = (void *)bpf_stub_warn,
|
||||
.bpf_mtap = (void *)bpf_stub_warn,
|
||||
.bpf_mtap2 = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_af = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_et = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_sl_in = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_sl_out = (void *)bpf_stub_warn,
|
||||
};
|
||||
struct bpf_ops *bpf_ops;
|
||||
|
||||
static void
|
||||
bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
|
||||
{
|
||||
struct laglist *lag;
|
||||
bool storeattach = true;
|
||||
|
||||
lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
|
||||
lag->lag_ifp = ifp;
|
||||
lag->lag_dlt = dlt;
|
||||
lag->lag_hlen = hlen;
|
||||
lag->lag_drvp = drvp;
|
||||
|
||||
mutex_enter(&handovermtx);
|
||||
/*
|
||||
* If handover is in progress, wait for it to finish and complete
|
||||
* attach after that. Otherwise record ourselves.
|
||||
*/
|
||||
while (handover) {
|
||||
storeattach = false;
|
||||
cv_wait(&handovercv, &handovermtx);
|
||||
}
|
||||
|
||||
if (storeattach == false) {
|
||||
mutex_exit(&handovermtx);
|
||||
kmem_free(lag, sizeof(*lag));
|
||||
KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
|
||||
bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
|
||||
} else {
|
||||
*drvp = NULL;
|
||||
TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
|
||||
mutex_exit(&handovermtx);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bpf_stub_detach(struct ifnet *ifp)
|
||||
{
|
||||
TAILQ_HEAD(, laglist) rmlist;
|
||||
struct laglist *lag, *lag_next;
|
||||
bool didhand;
|
||||
|
||||
TAILQ_INIT(&rmlist);
|
||||
|
||||
didhand = false;
|
||||
mutex_enter(&handovermtx);
|
||||
while (handover) {
|
||||
didhand = true;
|
||||
cv_wait(&handovercv, &handovermtx);
|
||||
}
|
||||
|
||||
if (didhand == false) {
|
||||
/* atomically remove all */
|
||||
for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
|
||||
lag_next = TAILQ_NEXT(lag, lag_entries);
|
||||
if (lag->lag_ifp == ifp) {
|
||||
TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
|
||||
TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
|
||||
}
|
||||
}
|
||||
mutex_exit(&handovermtx);
|
||||
while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
|
||||
TAILQ_REMOVE(&rmlist, lag, lag_entries);
|
||||
kmem_free(lag, sizeof(*lag));
|
||||
}
|
||||
} else {
|
||||
mutex_exit(&handovermtx);
|
||||
KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
|
||||
bpf_ops->bpf_detach(ifp);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -59,27 +160,49 @@ bpf_stub_warn(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
struct bpf_ops bpf_ops_stub = {
|
||||
.bpf_attach = bpf_stub_attach,
|
||||
.bpf_detach = (void *)bpf_stub_null,
|
||||
.bpf_change_type = (void *)bpf_stub_null,
|
||||
|
||||
.bpf_tap = (void *)bpf_stub_warn,
|
||||
.bpf_mtap = (void *)bpf_stub_warn,
|
||||
.bpf_mtap2 = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_af = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_et = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_sl_in = (void *)bpf_stub_warn,
|
||||
.bpf_mtap_sl_out = (void *)bpf_stub_warn,
|
||||
};
|
||||
|
||||
struct bpf_ops *bpf_ops;
|
||||
|
||||
void bpf_setops_stub(void);
|
||||
void
|
||||
bpf_setops_stub()
|
||||
bpf_setops()
|
||||
{
|
||||
|
||||
mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
|
||||
cv_init(&handovercv, "bpfops");
|
||||
bpf_ops = &bpf_ops_stub;
|
||||
}
|
||||
__weak_alias(bpf_setops,bpf_setops_stub);
|
||||
|
||||
/*
|
||||
* Party's over, prepare for handover.
|
||||
* It needs to happen *before* bpf_ops is set to make it atomic
|
||||
* to callers (see also stub implementations, which wait if
|
||||
* called during handover). The likelyhood of seeing a full
|
||||
* attach-detach *during* handover comes close to astronomical,
|
||||
* but handle it anyway since it's relatively easy.
|
||||
*/
|
||||
void
|
||||
bpf_ops_handover_enter(struct bpf_ops *newops)
|
||||
{
|
||||
struct laglist *lag;
|
||||
|
||||
mutex_enter(&handovermtx);
|
||||
handover = true;
|
||||
|
||||
while ((lag = TAILQ_FIRST(&lagdrvs)) != NULL) {
|
||||
TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
|
||||
mutex_exit(&handovermtx);
|
||||
newops->bpf_attach(lag->lag_ifp, lag->lag_dlt,
|
||||
lag->lag_hlen, lag->lag_drvp);
|
||||
kmem_free(lag, sizeof(*lag));
|
||||
mutex_enter(&handovermtx);
|
||||
}
|
||||
mutex_exit(&handovermtx);
|
||||
}
|
||||
|
||||
/* hangover done */
|
||||
void
|
||||
bpf_ops_handover_exit()
|
||||
{
|
||||
|
||||
mutex_enter(&handovermtx);
|
||||
handover = false;
|
||||
cv_broadcast(&handovercv);
|
||||
mutex_exit(&handovermtx);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue