Implement copfuncs and external memory in bpfjit.

This commit is contained in:
alnsn 2014-06-24 10:53:30 +00:00
parent b61ced9fc0
commit 19fed70d36
8 changed files with 1047 additions and 623 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $ */
/* $NetBSD: bpf.c,v 1.183 2014/06/24 10:53:30 alnsn Exp $ */
/*
* Copyright (c) 1990, 1991, 1993
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $");
__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.183 2014/06/24 10:53:30 alnsn Exp $");
#if defined(_KERNEL_OPT)
#include "opt_bpf.h"
@ -197,6 +197,7 @@ const struct cdevsw bpf_cdevsw = {
bpfjit_func_t
bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
{
membar_consumer();
if (bpfjit_module_ops.bj_generate_code != NULL) {
return bpfjit_module_ops.bj_generate_code(bc, code, size);
@ -1114,10 +1115,8 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp)
return EINVAL;
}
membar_consumer();
if (bpf_jit) {
bpf_ctx_t *bc = bpf_default_ctx();
jcode = bpf_jit_generate(bc, fcode, flen);
}
if (bpf_jit)
jcode = bpf_jit_generate(NULL, fcode, flen);
} else {
fcode = NULL;
}
@ -1388,17 +1387,17 @@ static inline void
bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
void *pkt, u_int pktlen, u_int buflen, const bool rcv)
{
bpf_ctx_t *bc = bpf_default_ctx();
bpf_args_t args = {
.pkt = pkt,
.wirelen = pktlen,
.buflen = buflen,
.arg = NULL
};
struct bpf_d *d;
struct timespec ts;
bpf_args_t args;
struct bpf_d *d;
const bpf_ctx_t *bc = NULL;
bool gottime = false;
args.pkt = (const uint8_t *)pkt;
args.wirelen = pktlen;
args.buflen = buflen;
/*
* Note that the IPL does not have to be raised at this point.
* The only problem that could arise here is that if two different
@ -1414,7 +1413,7 @@ bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
bpf_gstats.bs_recv++;
if (d->bd_jitcode)
slen = d->bd_jitcode(pkt, pktlen, buflen);
slen = d->bd_jitcode(bc, &args);
else
slen = bpf_filter_ext(bc, d->bd_filter, &args);

View File

@ -1,4 +1,4 @@
/* $NetBSD: bpf.h,v 1.63 2013/11/15 00:12:44 rmind Exp $ */
/* $NetBSD: bpf.h,v 1.64 2014/06/24 10:53:30 alnsn Exp $ */
/*
* Copyright (c) 1990, 1991, 1993
@ -45,6 +45,9 @@
/* BSD style release date */
#define BPF_RELEASE 199606
/* Date when COP instructions and external memory have been released. */
#define BPF_COP_EXTMEM_RELEASE 20140624
__BEGIN_DECLS
typedef int bpf_int32;
@ -279,6 +282,32 @@ struct bpf_insn {
*/
#define BPF_MEMWORDS 16
/*
* Each bit in bpf_memword_init_t value indicates if the corresponding
* external memory word is initialised prior to calling a bpf program.
* Note that when used internally, a meaning is often flipped: bits
* indicate which memory words need to be initialised prior to
* executing a bpf program.
*/
typedef uint32_t bpf_memword_init_t;
#define BPF_MEMWORD_INIT(k) (UINT32_C(1) << (k))
/* Two most significant bits are reserved by bpfjit. */
__CTASSERT(BPF_MEMWORDS + 2 <= sizeof(bpf_memword_init_t) * NBBY);
#ifdef _KERNEL
/*
* Max number of external memory words (for BPF_LD|BPF_MEM and BPF_ST).
*/
#define BPF_MAX_MEMWORDS 30
__CTASSERT(BPF_MAX_MEMWORDS >= BPF_MEMWORDS);
#ifdef __BPF_PRIVATE
/* Two most significant bits are reserved by bpfjit. */
__CTASSERT(BPF_MAX_MEMWORDS + 2 <= sizeof(bpf_memword_init_t) * NBBY);
#endif
#endif
/*
* Structure to retrieve available DLTs for the interface.
*/
@ -293,20 +322,35 @@ typedef struct bpf_ctx bpf_ctx_t;
struct bpf_args;
typedef struct bpf_args bpf_args_t;
#if defined(_KERNEL) || defined(__BPF_PRIVATE)
typedef uint32_t (*bpf_copfunc_t)(bpf_ctx_t *, bpf_args_t *, uint32_t);
struct bpf_args {
const struct mbuf * pkt;
size_t wirelen;
size_t buflen;
uint32_t mem[BPF_MEMWORDS];
void * arg;
const uint8_t * pkt;
size_t wirelen;
size_t buflen;
/*
* The following arguments are used only by some kernel
* subsystems.
* They aren't required for classical bpf filter programs.
* For such programs, bpfjit generated code doesn't read
* those arguments at all. Note however that bpf interpreter
* always needs a pointer to memstore.
*/
uint32_t * mem; /* pointer to external memory store */
void * arg; /* auxiliary argument for a copfunc */
};
#if defined(_KERNEL) || defined(__BPF_PRIVATE)
typedef uint32_t (*bpf_copfunc_t)(const bpf_ctx_t *, bpf_args_t *, uint32_t);
struct bpf_ctx {
const bpf_copfunc_t * copfuncs;
size_t nfuncs;
/*
* Number of external memwords, up to BPF_MAX_MEMWORDS or 0.
* The latter forces a switch to internal memstore with a
* fixed number (BPF_MEMWORDS) of memwords.
*/
size_t extwords;
bpf_memword_init_t noinit; /* pre-initialised external memwords */
};
#endif
@ -411,12 +455,12 @@ void bpf_ops_handover_exit(void);
void bpfilterattach(int);
bpf_ctx_t *bpf_create(void);
bpf_ctx_t *bpf_default_ctx(void);
void bpf_destroy(bpf_ctx_t *);
int bpf_set_cop(bpf_ctx_t *, const bpf_copfunc_t *, size_t);
u_int bpf_filter_ext(bpf_ctx_t *, const struct bpf_insn *, bpf_args_t *);
int bpf_validate_ext(bpf_ctx_t *, const struct bpf_insn *, int);
int bpf_set_cop(bpf_ctx_t *, const bpf_copfunc_t *, size_t);
int bpf_set_extmem(bpf_ctx_t *, size_t, bpf_memword_init_t);
u_int bpf_filter_ext(const bpf_ctx_t *, const struct bpf_insn *, bpf_args_t *);
int bpf_validate_ext(const bpf_ctx_t *, const struct bpf_insn *, int);
bpfjit_func_t bpf_jit_generate(bpf_ctx_t *, void *, size_t);
void bpf_jit_freecode(bpfjit_func_t);

View File

@ -1,4 +1,4 @@
/* $NetBSD: bpf_filter.c,v 1.61 2013/11/15 00:12:44 rmind Exp $ */
/* $NetBSD: bpf_filter.c,v 1.62 2014/06/24 10:53:30 alnsn Exp $ */
/*-
* Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.61 2013/11/15 00:12:44 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.62 2014/06/24 10:53:30 alnsn Exp $");
#if 0
#if !(defined(lint) || defined(KERNEL))
@ -56,9 +56,6 @@ static const char rcsid[] =
#ifdef _KERNEL
/* Default BPF context (zeroed). */
static bpf_ctx_t bpf_def_ctx;
bpf_ctx_t *
bpf_create(void)
{
@ -79,10 +76,14 @@ bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
return 0;
}
bpf_ctx_t *
bpf_default_ctx(void)
int
bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
{
return &bpf_def_ctx;
/* XXX check arguments */
bc->extwords = nwords;
bc->noinit = preinited;
return 0;
}
#endif
@ -104,9 +105,13 @@ bpf_default_ctx(void)
} \
}
uint32_t m_xword (const struct mbuf *, uint32_t, int *);
uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
uint32_t m_xword(const struct mbuf *, uint32_t, int *);
uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
#define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
#define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
#define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
uint32_t
m_xword(const struct mbuf *m, uint32_t k, int *err)
@ -185,17 +190,20 @@ u_int
bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
u_int buflen)
{
uint32_t mem[BPF_MEMWORDS];
bpf_args_t args = {
.pkt = (const struct mbuf *)p,
.pkt = p,
.wirelen = wirelen,
.buflen = buflen,
.mem = mem,
.arg = NULL
};
return bpf_filter_ext(&bpf_def_ctx, pc, &args);
return bpf_filter_ext(NULL, pc, &args);
}
u_int
bpf_filter_ext(bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
#else
u_int
bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
@ -204,15 +212,17 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
{
uint32_t A, X, k;
#ifndef _KERNEL
uint32_t mem[BPF_MEMWORDS];
bpf_args_t args_store = {
.pkt = (const struct mbuf *)p,
.pkt = p,
.wirelen = wirelen,
.buflen = buflen,
.mem = mem,
.arg = NULL
};
bpf_args_t * const args = &args_store;
#else
const uint8_t * const p = (const uint8_t *)args->pkt;
const uint8_t * const p = args->pkt;
#endif
if (pc == 0) {
/*
@ -255,7 +265,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xword(args->pkt, k, &merr);
A = xword(args->pkt, k, &merr);
if (merr != 0)
return 0;
continue;
@ -275,7 +285,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xhalf(args->pkt, k, &merr);
A = xhalf(args->pkt, k, &merr);
if (merr != 0)
return 0;
continue;
@ -294,7 +304,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xbyte(args->pkt, k, &merr);
A = xbyte(args->pkt, k, &merr);
continue;
#else
return 0;
@ -321,7 +331,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xword(args->pkt, k, &merr);
A = xword(args->pkt, k, &merr);
if (merr != 0)
return 0;
continue;
@ -342,7 +352,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xhalf(args->pkt, k, &merr);
A = xhalf(args->pkt, k, &merr);
if (merr != 0)
return 0;
continue;
@ -362,7 +372,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
A = m_xbyte(args->pkt, k, &merr);
A = xbyte(args->pkt, k, &merr);
continue;
#else
return 0;
@ -379,7 +389,7 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
if (args->buflen != 0)
return 0;
X = (m_xbyte(args->pkt, k, &merr) & 0xf) << 2;
X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
continue;
#else
return 0;
@ -560,18 +570,17 @@ bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
* The kernel needs to be able to verify an application's filter code.
* Otherwise, a bogus program could easily crash the system.
*/
__CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
#if defined(KERNEL) || defined(_KERNEL)
int
bpf_validate(const struct bpf_insn *f, int signed_len)
{
return bpf_validate_ext(&bpf_def_ctx, f, signed_len);
return bpf_validate_ext(NULL, f, signed_len);
}
int
bpf_validate_ext(bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
#else
int
bpf_validate(const struct bpf_insn *f, int signed_len)
@ -580,8 +589,13 @@ bpf_validate(const struct bpf_insn *f, int signed_len)
u_int i, from, len, ok = 0;
const struct bpf_insn *p;
#if defined(KERNEL) || defined(_KERNEL)
uint16_t *mem, invalid;
bpf_memword_init_t *mem, invalid;
size_t size;
const size_t extwords = (bc != NULL) ? bc->extwords : 0;
const size_t memwords = (extwords != 0) ? extwords : BPF_MEMWORDS;
const bpf_memword_init_t noinit = (extwords != 0) ? bc->noinit : 0;
#else
const size_t memwords = BPF_MEMWORDS;
#endif
len = (u_int)signed_len;
@ -596,7 +610,7 @@ bpf_validate(const struct bpf_insn *f, int signed_len)
#if defined(KERNEL) || defined(_KERNEL)
mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
invalid = ~0; /* All is invalid on startup */
invalid = ~noinit; /* Only pre-initialised memory is valid on startup */
#endif
for (i = 0; i < len; ++i) {
@ -623,10 +637,10 @@ bpf_validate(const struct bpf_insn *f, int signed_len)
* More strict check with actual packet length
* is done runtime.
*/
if (p->k >= BPF_MEMWORDS)
if (p->k >= memwords)
goto out;
/* check for current memory invalid */
if (invalid & (1 << p->k))
if (invalid & BPF_MEMWORD_INIT(p->k))
goto out;
#endif
break;
@ -642,11 +656,11 @@ bpf_validate(const struct bpf_insn *f, int signed_len)
break;
case BPF_ST:
case BPF_STX:
if (p->k >= BPF_MEMWORDS)
if (p->k >= memwords)
goto out;
#if defined(KERNEL) || defined(_KERNEL)
/* validate the memory word */
invalid &= ~(1 << p->k);
invalid &= ~BPF_MEMWORD_INIT(1 << p->k);
#endif
break;
case BPF_ALU:
@ -737,19 +751,24 @@ bpf_validate(const struct bpf_insn *f, int signed_len)
case BPF_RET:
break;
case BPF_MISC:
#if defined(KERNEL) || defined(_KERNEL)
switch (BPF_MISCOP(p->code)) {
case BPF_COP:
case BPF_COPX:
/* In-kernel COP use only. */
if (bc->copfuncs) {
invalid = 0;
#if defined(KERNEL) || defined(_KERNEL)
if (bc == NULL || bc->copfuncs == NULL)
goto out;
if (BPF_MISCOP(p->code) == BPF_COP &&
p->k >= bc->nfuncs) {
goto out;
}
break;
#else
goto out;
#endif
default:
break;
}
#endif
break;
default:
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: bpfjit.c,v 1.12 2014/06/17 16:52:33 alnsn Exp $ */
/* $NetBSD: bpfjit.c,v 1.13 2014/06/24 10:53:30 alnsn Exp $ */
/*-
* Copyright (c) 2011-2014 Alexander Nasonov.
@ -31,9 +31,9 @@
#include <sys/cdefs.h>
#ifdef _KERNEL
__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.12 2014/06/17 16:52:33 alnsn Exp $");
__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.13 2014/06/24 10:53:30 alnsn Exp $");
#else
__RCSID("$NetBSD: bpfjit.c,v 1.12 2014/06/17 16:52:33 alnsn Exp $");
__RCSID("$NetBSD: bpfjit.c,v 1.13 2014/06/24 10:53:30 alnsn Exp $");
#endif
#include <sys/types.h>
@ -75,11 +75,19 @@ __RCSID("$NetBSD: bpfjit.c,v 1.12 2014/06/17 16:52:33 alnsn Exp $");
#include <stdio.h> /* for stderr */
#endif
/*
* Arguments of generated bpfjit_func_t.
* The first argument is reassigned upon entry
* to a more frequently used buf argument.
*/
#define BJ_CTX_ARG SLJIT_SAVED_REG1
#define BJ_ARGS SLJIT_SAVED_REG2
/*
* Permanent register assignments.
*/
#define BJ_BUF SLJIT_SAVED_REG1
#define BJ_WIRELEN SLJIT_SAVED_REG2
//#define BJ_ARGS SLJIT_SAVED_REG2
#define BJ_BUFLEN SLJIT_SAVED_REG3
#define BJ_AREG SLJIT_SCRATCH_REG1
#define BJ_TMP1REG SLJIT_SCRATCH_REG2
@ -87,12 +95,23 @@ __RCSID("$NetBSD: bpfjit.c,v 1.12 2014/06/17 16:52:33 alnsn Exp $");
#define BJ_XREG SLJIT_TEMPORARY_EREG1
#define BJ_TMP3REG SLJIT_TEMPORARY_EREG2
typedef unsigned int bpfjit_init_mask_t;
#define BJ_INIT_NOBITS 0u
#define BJ_INIT_MBIT(k) (1u << (k))
#define BJ_INIT_MMASK (BJ_INIT_MBIT(BPF_MEMWORDS) - 1u)
#define BJ_INIT_ABIT BJ_INIT_MBIT(BPF_MEMWORDS)
#define BJ_INIT_XBIT BJ_INIT_MBIT(BPF_MEMWORDS + 1)
/*
* EREG registers can't be used for indirect calls, reuse BJ_BUF and
* BJ_BUFLEN registers. They can be easily restored from BJ_ARGS.
*/
#define BJ_COPF_PTR SLJIT_SAVED_REG1
#define BJ_COPF_IDX SLJIT_SAVED_REG3
#ifdef _KERNEL
#define MAX_MEMWORDS BPF_MAX_MEMWORDS
#else
#define MAX_MEMWORDS BPF_MEMWORDS
#endif
#define BJ_INIT_NOBITS ((bpf_memword_init_t)0)
#define BJ_INIT_MBIT(k) BPF_MEMWORD_INIT(k)
#define BJ_INIT_ABIT BJ_INIT_MBIT(MAX_MEMWORDS)
#define BJ_INIT_XBIT BJ_INIT_MBIT(MAX_MEMWORDS + 1)
/*
* Datatype for Array Bounds Check Elimination (ABC) pass.
@ -102,10 +121,12 @@ typedef uint64_t bpfjit_abc_length_t;
struct bpfjit_stack
{
uint32_t mem[BPF_MEMWORDS];
bpf_ctx_t *ctx;
uint32_t *extmem; /* pointer to external memory store */
#ifdef _KERNEL
void *tmp;
#endif
uint32_t mem[BPF_MEMWORDS]; /* internal memory store */
};
/*
@ -172,7 +193,7 @@ struct bpfjit_insn_data {
struct bpfjit_read_pkt_data rdata;
} u;
bpfjit_init_mask_t invalid;
bpf_memword_init_t invalid;
bool unreachable;
};
@ -222,6 +243,32 @@ read_width(const struct bpf_insn *pc)
}
}
/*
* Copy buf and buflen members of bpf_args from BJ_ARGS
* pointer to BJ_BUF and BJ_BUFLEN registers.
*/
static int
load_buf_buflen(struct sljit_compiler *compiler)
{
int status;
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_BUF, 0,
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, pkt));
if (status != SLJIT_SUCCESS)
return status;
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_BUFLEN, 0,
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, buflen));
return status;
}
static bool
grow_jumps(struct sljit_jump ***jumps, size_t *size)
{
@ -527,6 +574,120 @@ emit_xcall(struct sljit_compiler* compiler, const struct bpf_insn *pc,
}
#endif
/*
* Emit code for BPF_COP and BPF_COPX instructions.
*/
static int
emit_cop(struct sljit_compiler* compiler, const bpf_ctx_t *bc,
const struct bpf_insn *pc, struct sljit_jump **ret0_jump)
{
#if BJ_XREG == SLJIT_RETURN_REG || \
BJ_XREG == SLJIT_SCRATCH_REG1 || \
BJ_XREG == SLJIT_SCRATCH_REG2 || \
BJ_XREG == SLJIT_SCRATCH_REG3 || \
BJ_COPF_PTR == BJ_ARGS || \
BJ_COPF_IDX == BJ_ARGS
#error "Not supported assignment of registers."
#endif
struct sljit_jump *jump;
int status;
jump = NULL;
BJ_ASSERT(bc != NULL && bc->copfuncs != NULL);
if (BPF_MISCOP(pc->code) == BPF_COPX) {
/* if (X >= bc->nfuncs) return 0; */
jump = sljit_emit_cmp(compiler,
SLJIT_C_GREATER_EQUAL,
BJ_XREG, 0,
SLJIT_IMM, bc->nfuncs);
if (jump == NULL)
return SLJIT_ERR_ALLOC_FAILED;
}
if (jump != NULL)
*ret0_jump = jump;
/*
* Copy bpf_copfunc_t arguments to registers.
*/
#if BJ_AREG != SLJIT_SCRATCH_REG3
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
SLJIT_SCRATCH_REG3, 0,
BJ_AREG, 0);
if (status != SLJIT_SUCCESS)
return status;
#endif
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_SCRATCH_REG1, 0,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, ctx));
if (status != SLJIT_SUCCESS)
return status;
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_SCRATCH_REG2, 0,
BJ_ARGS, 0);
if (status != SLJIT_SUCCESS)
return status;
if (BPF_MISCOP(pc->code) == BPF_COP) {
status = sljit_emit_ijump(compiler,
SLJIT_CALL3,
SLJIT_IMM, SLJIT_FUNC_OFFSET(bc->copfuncs[pc->k]));
if (status != SLJIT_SUCCESS)
return status;
} else if (BPF_MISCOP(pc->code) == BPF_COPX) {
/* load ctx->copfuncs */
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_COPF_PTR, 0,
SLJIT_MEM1(SLJIT_SCRATCH_REG1),
offsetof(struct bpf_ctx, copfuncs));
if (status != SLJIT_SUCCESS)
return status;
/*
* Load X to a register that can be used for
* memory addressing.
*/
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_COPF_IDX, 0,
BJ_XREG, 0);
if (status != SLJIT_SUCCESS)
return status;
status = sljit_emit_ijump(compiler,
SLJIT_CALL3,
SLJIT_MEM2(BJ_COPF_PTR, BJ_COPF_IDX),
SLJIT_WORD_SHIFT);
if (status != SLJIT_SUCCESS)
return status;
status = load_buf_buflen(compiler);
if (status != SLJIT_SUCCESS)
return status;
}
#if BJ_AREG != SLJIT_RETURN_REG
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_RETURN_REG, 0);
if (status != SLJIT_SUCCESS)
return status;
#endif
return status;
}
/*
* Generate code for
* BPF_LD+BPF_W+BPF_ABS A <- P[k:4]
@ -670,6 +831,62 @@ emit_pkt_read(struct sljit_compiler* compiler,
return status;
}
static int
emit_memload(struct sljit_compiler* compiler,
sljit_si dst, uint32_t k, size_t extwords)
{
int status;
sljit_si src;
sljit_sw srcw;
srcw = k * sizeof(uint32_t);
if (extwords == 0) {
src = SLJIT_MEM1(SLJIT_LOCALS_REG);
srcw += offsetof(struct bpfjit_stack, mem);
} else {
/* copy extmem pointer to the tmp1 register */
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
BJ_TMP1REG, 0,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, extmem));
if (status != SLJIT_SUCCESS)
return status;
src = SLJIT_MEM1(BJ_TMP1REG);
}
return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, 0, src, srcw);
}
static int
emit_memstore(struct sljit_compiler* compiler,
sljit_si src, uint32_t k, size_t extwords)
{
int status;
sljit_si dst;
sljit_sw dstw;
dstw = k * sizeof(uint32_t);
if (extwords == 0) {
dst = SLJIT_MEM1(SLJIT_LOCALS_REG);
dstw += offsetof(struct bpfjit_stack, mem);
} else {
/* copy extmem pointer to the tmp1 register */
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
BJ_TMP1REG, 0,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, extmem));
if (status != SLJIT_SUCCESS)
return status;
dst = SLJIT_MEM1(BJ_TMP1REG);
}
return sljit_emit_op1(compiler, SLJIT_MOV_UI, dst, dstw, src, 0);
}
/*
* Generate code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf).
*/
@ -939,16 +1156,19 @@ optimize_init(struct bpfjit_insn_data *insn_dat, size_t insn_count)
*/
static bool
optimize_pass1(const struct bpf_insn *insns,
struct bpfjit_insn_data *insn_dat, size_t insn_count,
bpfjit_init_mask_t *initmask, int *nscratches)
struct bpfjit_insn_data *insn_dat, size_t insn_count, size_t extwords,
bpf_memword_init_t *initmask, int *nscratches, int *ncopfuncs)
{
struct bpfjit_jump *jtf;
size_t i;
uint32_t jt, jf;
bpfjit_abc_length_t length;
bpfjit_init_mask_t invalid; /* borrowed from bpf_filter() */
bpf_memword_init_t invalid; /* borrowed from bpf_filter() */
bool unreachable;
const size_t memwords = (extwords != 0) ? extwords : BPF_MEMWORDS;
*ncopfuncs = 0;
*nscratches = 2;
*initmask = BJ_INIT_NOBITS;
@ -995,7 +1215,7 @@ optimize_pass1(const struct bpf_insn *insns,
*initmask |= invalid & BJ_INIT_XBIT;
if (BPF_MODE(insns[i].code) == BPF_MEM &&
(uint32_t)insns[i].k < BPF_MEMWORDS) {
(uint32_t)insns[i].k < memwords) {
*initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
}
@ -1012,7 +1232,7 @@ optimize_pass1(const struct bpf_insn *insns,
*nscratches = 4;
if (BPF_MODE(insns[i].code) == BPF_MEM &&
(uint32_t)insns[i].k < BPF_MEMWORDS) {
(uint32_t)insns[i].k < memwords) {
*initmask |= invalid & BJ_INIT_MBIT(insns[i].k);
}
@ -1022,7 +1242,7 @@ optimize_pass1(const struct bpf_insn *insns,
case BPF_ST:
*initmask |= invalid & BJ_INIT_ABIT;
if ((uint32_t)insns[i].k < BPF_MEMWORDS)
if ((uint32_t)insns[i].k < memwords)
invalid &= ~BJ_INIT_MBIT(insns[i].k);
continue;
@ -1034,7 +1254,7 @@ optimize_pass1(const struct bpf_insn *insns,
*initmask |= invalid & BJ_INIT_XBIT;
if ((uint32_t)insns[i].k < BPF_MEMWORDS)
if ((uint32_t)insns[i].k < memwords)
invalid &= ~BJ_INIT_MBIT(insns[i].k);
continue;
@ -1073,6 +1293,22 @@ optimize_pass1(const struct bpf_insn *insns,
*initmask |= invalid & BJ_INIT_XBIT;
invalid &= ~BJ_INIT_ABIT;
continue;
case BPF_COPX:
/* uses BJ_XREG */
if (*nscratches < 4)
*nscratches = 4;
/* FALLTHROUGH */
case BPF_COP:
/* calls copfunc with three arguments */
if (*nscratches < 3)
*nscratches = 3;
(*ncopfuncs)++;
*initmask |= invalid & BJ_INIT_ABIT;
invalid &= ~BJ_INIT_ABIT;
continue;
}
continue;
@ -1129,7 +1365,7 @@ optimize_pass1(const struct bpf_insn *insns,
*/
static void
optimize_pass2(const struct bpf_insn *insns,
struct bpfjit_insn_data *insn_dat, size_t insn_count)
struct bpfjit_insn_data *insn_dat, size_t insn_count, size_t extwords)
{
struct bpfjit_jump *jmp;
const struct bpf_insn *pc;
@ -1154,12 +1390,12 @@ optimize_pass2(const struct bpf_insn *insns,
* ABC optimization completely because for
* every jump there is a branch with no read
* instruction.
* With no side effects, BPF_RET+BPF_K 0 is
* indistinguishable from out-of-bound load.
* With no side effects, BPF_STMT(BPF_RET+BPF_K, 0)
* is indistinguishable from out-of-bound load.
* Therefore, abc_length can be set to
* MAX_ABC_LENGTH and enable ABC for many
* bpf programs.
* If this optimization pass encounters any
* If this optimization encounters any
* instruction with a side effect, it will
* reset abc_length.
*/
@ -1169,6 +1405,22 @@ optimize_pass2(const struct bpf_insn *insns,
abc_length = 0;
break;
case BPF_MISC:
if (BPF_MISCOP(pc->code) == BPF_COP ||
BPF_MISCOP(pc->code) == BPF_COPX) {
/* COP instructions can have side effects. */
abc_length = 0;
}
break;
case BPF_ST:
case BPF_STX:
if (extwords != 0) {
/* Write to memory is visible after a call. */
abc_length = 0;
}
break;
case BPF_JMP:
abc_length = pd->u.jdata.abc_length;
break;
@ -1223,17 +1475,18 @@ optimize_pass3(const struct bpf_insn *insns,
static bool
optimize(const struct bpf_insn *insns,
struct bpfjit_insn_data *insn_dat, size_t insn_count,
bpfjit_init_mask_t *initmask, int *nscratches)
size_t extwords,
bpf_memword_init_t *initmask, int *nscratches, int *ncopfuncs)
{
optimize_init(insn_dat, insn_count);
if (!optimize_pass1(insns, insn_dat, insn_count,
initmask, nscratches)) {
extwords, initmask, nscratches, ncopfuncs)) {
return false;
}
optimize_pass2(insns, insn_dat, insn_count);
optimize_pass2(insns, insn_dat, insn_count, extwords);
optimize_pass3(insns, insn_dat, insn_count);
return true;
@ -1326,7 +1579,8 @@ kx_to_reg_arg(const struct bpf_insn *pc)
}
bpfjit_func_t
bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
bpfjit_generate_code(const bpf_ctx_t *bc,
const struct bpf_insn *insns, size_t insn_count)
{
void *rv;
struct sljit_compiler *compiler;
@ -1337,8 +1591,8 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
unsigned int rval, mode, src;
/* optimization related */
bpfjit_init_mask_t initmask;
int nscratches;
bpf_memword_init_t initmask;
int nscratches, ncopfuncs;
/* a list of jumps to out-of-bound return from a generated function */
struct sljit_jump **ret0;
@ -1357,10 +1611,17 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
uint32_t jt, jf;
const size_t extwords = (bc != NULL) ? bc->extwords : 0;
const size_t memwords = (extwords != 0) ? extwords : BPF_MEMWORDS;
const bpf_memword_init_t noinit = (extwords != 0) ? bc->noinit : 0;
rv = NULL;
ret0 = NULL;
compiler = NULL;
insn_dat = NULL;
ret0 = NULL;
if (memwords > MAX_MEMWORDS)
goto fail;
if (insn_count == 0 || insn_count > SIZE_MAX / sizeof(insn_dat[0]))
goto fail;
@ -1370,15 +1631,10 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
goto fail;
if (!optimize(insns, insn_dat, insn_count,
&initmask, &nscratches)) {
extwords, &initmask, &nscratches, &ncopfuncs)) {
goto fail;
}
#if defined(_KERNEL)
/* bpf_filter() checks initialization of memwords. */
BJ_ASSERT((initmask & BJ_INIT_MMASK) == 0);
#endif
ret0_size = 0;
ret0_maxsize = 64;
ret0 = BJ_ALLOC(ret0_maxsize * sizeof(ret0[0]));
@ -1398,8 +1654,53 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
if (status != SLJIT_SUCCESS)
goto fail;
for (i = 0; i < BPF_MEMWORDS; i++) {
if (ncopfuncs > 0) {
/* save ctx argument */
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, ctx),
BJ_CTX_ARG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
if (extwords != 0) {
/* copy "mem" argument from bpf_args to bpfjit_stack */
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_TMP1REG, 0,
BJ_ARGS, offsetof(struct bpf_args, mem));
if (status != SLJIT_SUCCESS)
goto fail;
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, extmem),
BJ_TMP1REG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
status = load_buf_buflen(compiler);
if (status != SLJIT_SUCCESS)
goto fail;
/*
* Exclude pre-initialised external memory words but keep
* initialization statuses of A and X registers in case
* bc->noinit wrongly sets those two bits.
*/
initmask &= ~noinit | BJ_INIT_ABIT | BJ_INIT_XBIT;
#if defined(_KERNEL)
/* bpf_filter() checks initialization of memwords. */
BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0);
#endif
for (i = 0; i < memwords; i++) {
if (initmask & BJ_INIT_MBIT(i)) {
/* M[i] = 0; */
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
SLJIT_MEM1(SLJIT_LOCALS_REG),
@ -1502,14 +1803,10 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
/* BPF_LD+BPF_MEM A <- M[k] */
if (pc->code == (BPF_LD|BPF_MEM)) {
if ((uint32_t)pc->k >= BPF_MEMWORDS)
if ((uint32_t)pc->k >= memwords)
goto fail;
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
BJ_AREG, 0,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, mem) +
pc->k * sizeof(uint32_t));
status = emit_memload(compiler,
BJ_AREG, pc->k, extwords);
if (status != SLJIT_SUCCESS)
goto fail;
@ -1521,7 +1818,8 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
BJ_WIRELEN, 0);
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, wirelen));
if (status != SLJIT_SUCCESS)
goto fail;
@ -1566,7 +1864,8 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_XREG, 0,
BJ_WIRELEN, 0);
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, wirelen));
if (status != SLJIT_SUCCESS)
goto fail;
@ -1577,14 +1876,10 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
if (mode == BPF_MEM) {
if (BPF_SIZE(pc->code) != BPF_W)
goto fail;
if ((uint32_t)pc->k >= BPF_MEMWORDS)
if ((uint32_t)pc->k >= memwords)
goto fail;
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
BJ_XREG, 0,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, mem) +
pc->k * sizeof(uint32_t));
status = emit_memload(compiler,
BJ_XREG, pc->k, extwords);
if (status != SLJIT_SUCCESS)
goto fail;
@ -1607,16 +1902,12 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
case BPF_ST:
if (pc->code != BPF_ST ||
(uint32_t)pc->k >= BPF_MEMWORDS) {
(uint32_t)pc->k >= memwords) {
goto fail;
}
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, mem) +
pc->k * sizeof(uint32_t),
BJ_AREG, 0);
status = emit_memstore(compiler,
BJ_AREG, pc->k, extwords);
if (status != SLJIT_SUCCESS)
goto fail;
@ -1624,16 +1915,12 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
case BPF_STX:
if (pc->code != BPF_STX ||
(uint32_t)pc->k >= BPF_MEMWORDS) {
(uint32_t)pc->k >= memwords) {
goto fail;
}
status = sljit_emit_op1(compiler,
SLJIT_MOV_UI,
SLJIT_MEM1(SLJIT_LOCALS_REG),
offsetof(struct bpfjit_stack, mem) +
pc->k * sizeof(uint32_t),
BJ_XREG, 0);
status = emit_memstore(compiler,
BJ_XREG, pc->k, extwords);
if (status != SLJIT_SUCCESS)
goto fail;
@ -1804,6 +2091,26 @@ bpfjit_generate_code(bpf_ctx_t *bc, struct bpf_insn *insns, size_t insn_count)
goto fail;
continue;
case BPF_COP:
case BPF_COPX:
if (bc == NULL || bc->copfuncs == NULL)
goto fail;
if (BPF_MISCOP(pc->code) == BPF_COP &&
(uint32_t)pc->k >= bc->nfuncs) {
goto fail;
}
jump = NULL;
status = emit_cop(compiler, bc, pc, &jump);
if (status != SLJIT_SUCCESS)
goto fail;
if (jump != NULL && !append_jump(jump,
&ret0, &ret0_size, &ret0_maxsize))
goto fail;
continue;
}
goto fail;

View File

@ -1,7 +1,7 @@
/* $NetBSD: bpfjit.h,v 1.2 2013/11/15 00:12:44 rmind Exp $ */
/* $NetBSD: bpfjit.h,v 1.3 2014/06/24 10:53:30 alnsn Exp $ */
/*-
* Copyright (c) 2011-2012 Alexander Nasonov.
* Copyright (c) 2011-2012,2014 Alexander Nasonov.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -46,22 +46,22 @@
#endif
/*
* RETURN value and arguments of a function generated by sljit have sljit_uw
* type which can have a greater width than arguments below. In such cases,
* we rely on the fact that calling conventions use same registers for
* smaller types.
* Return value of a function generated by sljit have sljit_uw type
* which can have a greater width. In such cases, we rely on the fact
* that calling conventions use same registers for smaller types.
* SLJIT_MOV_UI is passed to sljit_emit_return() to make sure that the
* return value is truncated to unsigned int.
*/
typedef unsigned int (*bpfjit_func_t)(const uint8_t *,
unsigned int, unsigned int);
typedef unsigned int (*bpfjit_func_t)(const bpf_ctx_t *, bpf_args_t *);
bpfjit_func_t bpfjit_generate_code(bpf_ctx_t *, struct bpf_insn *, size_t);
bpfjit_func_t bpfjit_generate_code(const bpf_ctx_t *,
const struct bpf_insn *, size_t);
void bpfjit_free_code(bpfjit_func_t);
#ifdef _KERNEL
struct bpfjit_ops {
bpfjit_func_t (*bj_generate_code)(bpf_ctx_t *, struct bpf_insn *, size_t);
bpfjit_func_t (*bj_generate_code)(const bpf_ctx_t *,
const struct bpf_insn *, size_t);
void (*bj_free_code)(bpfjit_func_t);
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_ruleset.c,v 1.31 2014/05/30 23:26:06 rmind Exp $ */
/* $NetBSD: npf_ruleset.c,v 1.32 2014/06/24 10:53:30 alnsn Exp $ */
/*-
* Copyright (c) 2009-2013 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.31 2014/05/30 23:26:06 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.32 2014/06/24 10:53:30 alnsn Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -741,12 +741,14 @@ npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
const u_int nitems = rlset->rs_nitems;
const u_int ifid = nbuf->nb_ifid;
npf_rule_t *final_rl = NULL;
const struct mbuf *m;
bpf_args_t bc_args;
u_int n = 0;
memset(&bc_args, 0, sizeof(bpf_args_t));
bc_args.pkt = nbuf_head_mbuf(nbuf);
bc_args.wirelen = m_length(bc_args.pkt);
m = nbuf_head_mbuf(nbuf);
bc_args.pkt = (const uint8_t *)m;
bc_args.wirelen = m_length(m);
bc_args.arg = npc;
KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_bpf_test.c,v 1.4 2013/11/23 19:40:11 rmind Exp $ */
/* $NetBSD: npf_bpf_test.c,v 1.5 2014/06/24 10:53:30 alnsn Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@ test_bpf_code(void *code, size_t size)
npf_cache_all(&npc, &nbuf);
memset(&bc_args, 0, sizeof(bpf_args_t));
bc_args.pkt = m;
bc_args.pkt = (const uint8_t *)m;
bc_args.wirelen = m_length(m);
bc_args.arg = &npc;