- nbuf_ensure_contig: rework to use m_ensure_contig(9), which will not free

the mbuf chain on failure.  Fixes some corner cases.  Improve regression
  test and sprinkle some asserts.
- npf_reassembly: clear nbuf on IPv6 reassembly failure path (partial fix).
  The problem was found and fix provided by Anthony Mallet.
This commit is contained in:
rmind 2013-01-20 18:45:56 +00:00
parent 38f91e249c
commit 3107fd1eb5
7 changed files with 86 additions and 44 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_handler.c,v 1.24 2012/12/24 19:05:43 rmind Exp $ */
/* $NetBSD: npf_handler.c,v 1.25 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_handler.c,v 1.24 2012/12/24 19:05:43 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_handler.c,v 1.25 2013/01/20 18:45:56 rmind Exp $");
#include <sys/types.h>
#include <sys/param.h>
@ -91,6 +91,9 @@ npf_reassembly(npf_cache_t *npc, nbuf_t *nbuf, struct mbuf **mp)
*/
const u_int hlen = npf_cache_hlen(npc);
error = ip6_reass_packet(mp, hlen);
if (error && *mp == NULL) {
memset(nbuf, 0, sizeof(nbuf_t));
}
#endif
}
if (error) {
@ -249,7 +252,7 @@ out:
/* Reset mbuf pointer before returning to the caller. */
if ((*mp = nbuf_head_mbuf(&nbuf)) == NULL) {
return ENOMEM;
return error ? error : ENOMEM;
}
/* Pass the packet if decided and there is no error. */

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_mbuf.c,v 1.9 2012/12/24 19:05:44 rmind Exp $ */
/* $NetBSD: npf_mbuf.c,v 1.10 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.9 2012/12/24 19:05:44 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_mbuf.c,v 1.10 2013/01/20 18:45:56 rmind Exp $");
#include <sys/param.h>
#include <sys/mbuf.h>
@ -160,47 +160,53 @@ nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
void *
nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
{
struct mbuf *m = nbuf->nb_mbuf;
const u_int off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t);
int tlen = off + len;
const struct mbuf * const n = nbuf->nb_mbuf;
const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);
KASSERT(off < m_length(nbuf->nb_mbuf0));
KASSERT(off < n->m_len);
if (__predict_false(m->m_len < tlen)) {
const bool head_buf = (nbuf->nb_mbuf0 == m);
const int target = NBUF_ENSURE_ROUNDUP(tlen);
const int pleft = m_length(m);
if (__predict_false(n->m_len < (off + len))) {
struct mbuf *m = nbuf->nb_mbuf0;
const size_t foff = nbuf_offset(nbuf);
const size_t plen = m_length(m);
const size_t mlen = m->m_len;
size_t target;
bool success;
npf_stats_inc(NPF_STAT_NBUF_NONCONTIG);
/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
if (target <= pleft) {
tlen = target;
if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
target = foff + len;
}
/* Rearrange the chain to be contiguous. */
if ((m = m_pullup(m, tlen)) == NULL) {
npf_stats_inc(NPF_STAT_NBUF_CONTIG_FAIL);
memset(nbuf, 0, sizeof(nbuf_t));
return NULL;
KASSERT((m->m_flags & M_PKTHDR) != 0);
success = m_ensure_contig(&m, target);
KASSERT(m != NULL);
/* If no change in the chain: return what we have. */
if (m == nbuf->nb_mbuf0 && m->m_len == mlen) {
return success ? nbuf->nb_nptr : NULL;
}
/*
* If the buffer was re-allocated, indicate that references
* to the data would need reset. Also, it was the head
* buffer - update our record.
* The mbuf chain was re-arranged. Update the pointers
* accordingly and indicate that the references to the data
* might need a reset.
*/
if (nbuf->nb_mbuf != m) {
nbuf->nb_flags |= NBUF_DATAREF_RESET;
}
if (head_buf) {
KASSERT((m->m_flags & M_PKTHDR) != 0);
KASSERT(off < m_length(m));
nbuf->nb_mbuf0 = m;
}
KASSERT((m->m_flags & M_PKTHDR) != 0);
nbuf->nb_mbuf0 = m;
nbuf->nb_mbuf = m;
nbuf->nb_nptr = mtod(m, uint8_t *) + off;
KASSERT(foff < m->m_len && foff < m_length(m));
nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
nbuf->nb_flags |= NBUF_DATAREF_RESET;
if (!success) {
npf_stats_inc(NPF_STAT_NBUF_CONTIG_FAIL);
return NULL;
}
}
return nbuf->nb_nptr;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_processor.c,v 1.13 2012/12/24 19:05:44 rmind Exp $ */
/* $NetBSD: npf_processor.c,v 1.14 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_processor.c,v 1.13 2012/12/24 19:05:44 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_processor.c,v 1.14 2013/01/20 18:45:56 rmind Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -182,12 +182,13 @@ process_next:
KASSERT(i < NPF_NREGS);
KASSERT(n >= sizeof(uint8_t) && n <= sizeof(uint32_t));
if ((n_ptr = nbuf_ensure_contig(nbuf, n)) == NULL) {
goto fail;
}
n_ptr = nbuf_ensure_contig(nbuf, n);
if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
npf_recache(npc, nbuf);
}
if (n_ptr == NULL) {
goto fail;
}
memcpy(&regs[i], n_ptr, n);
break;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_rproc.c,v 1.4 2012/10/03 12:24:56 mlelstv Exp $ */
/* $NetBSD: npf_rproc.c,v 1.5 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
@ -263,6 +263,7 @@ npf_rproc_run(npf_cache_t *npc, nbuf_t *nbuf, npf_rproc_t *rp, int *decision)
{
const unsigned extcount = rp->rp_ext_count;
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
KASSERT(rp->rp_refcnt > 0);
for (unsigned i = 0; i < extcount; i++) {
@ -271,5 +272,9 @@ npf_rproc_run(npf_cache_t *npc, nbuf_t *nbuf, npf_rproc_t *rp, int *decision)
KASSERT(ext->ext_refcnt > 0);
extops->proc(npc, nbuf, rp->rp_ext_meta[i], decision);
if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
npf_recache(npc, nbuf);
}
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_ruleset.c,v 1.15 2012/12/24 19:05:44 rmind Exp $ */
/* $NetBSD: npf_ruleset.c,v 1.16 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2009-2012 The NetBSD Foundation, Inc.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.15 2012/12/24 19:05:44 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.16 2013/01/20 18:45:56 rmind Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -367,6 +367,7 @@ npf_ruleset_inspect(npf_cache_t *npc, nbuf_t *nbuf,
KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
again:
TAILQ_FOREACH(rl, &rlset->rs_queue, r_entry) {
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
/* Match the interface. */
@ -401,6 +402,8 @@ again:
final_rl = NULL;
goto again;
}
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
return final_rl;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_session.c,v 1.19 2012/12/24 19:05:45 rmind Exp $ */
/* $NetBSD: npf_session.c,v 1.20 2013/01/20 18:45:56 rmind Exp $ */
/*-
* Copyright (c) 2010-2012 The NetBSD Foundation, Inc.
@ -80,7 +80,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: npf_session.c,v 1.19 2012/12/24 19:05:45 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: npf_session.c,v 1.20 2013/01/20 18:45:56 rmind Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -570,6 +570,8 @@ npf_session_inspect(npf_cache_t *npc, nbuf_t *nbuf, const int di, int *error)
npf_session_t *se;
bool forw;
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
/*
* Check if session tracking is on. Also, if layer 3 and 4 are not
* cached - protocol is not supported or packet is invalid.
@ -590,6 +592,7 @@ npf_session_inspect(npf_cache_t *npc, nbuf_t *nbuf, const int di, int *error)
*error = ENOMEM;
return NULL;
}
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
/* Main lookup of the session. */
if ((se = npf_session_lookup(npc, nbuf, di, &forw)) == NULL) {
@ -625,6 +628,8 @@ npf_session_establish(npf_cache_t *npc, nbuf_t *nbuf, const int di)
u_int proto, alen;
bool ok;
KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
/*
* Check if session tracking is on. Also, if layer 3 and 4 are not
* cached - protocol is not supported or packet is invalid.

View File

@ -1,4 +1,4 @@
/* $NetBSD: npf_nbuf_test.c,v 1.3 2012/12/24 19:05:48 rmind Exp $ */
/* $NetBSD: npf_nbuf_test.c,v 1.4 2013/01/20 18:45:57 rmind Exp $ */
/*
* NPF nbuf interface test.
@ -16,23 +16,41 @@
CTASSERT((MBUF_CHAIN_LEN % sizeof(uint32_t)) == 0);
static void
mbuf_consistency_check(nbuf_t *nbuf)
{
struct mbuf *m = nbuf_head_mbuf(nbuf);
while (m) {
assert(m->m_type != MT_FREE);
m = m->m_next;
}
}
static char *
parse_nbuf_chain(struct mbuf *m)
{
const void *dummy_ifp = (void *)0xdeadbeef;
char *s = kmem_zalloc(MBUF_CHAIN_LEN + 1, KM_SLEEP);
nbuf_t nbuf;
void *nptr;
int n;
nbuf_init(&nbuf, m, dummy_ifp);
nptr = nbuf_advance(&nbuf, (random() % 16) + 1, (random() % 16) + 1);
mbuf_consistency_check(&nbuf);
assert(nptr != NULL);
nbuf_reset(&nbuf);
for (n = 0; ; ) {
void *nptr;
char d[4 + 1];
nptr = nbuf_ensure_contig(&nbuf, sizeof(uint32_t));
if (nptr == NULL) {
break;
}
mbuf_consistency_check(&nbuf);
memcpy(&d, nptr, sizeof(uint32_t));
d[sizeof(d) - 1] = '\0';
@ -48,6 +66,7 @@ parse_nbuf_chain(struct mbuf *m)
}
n += sizeof(uint32_t);
}
mbuf_consistency_check(&nbuf);
return s;
}