2019-12-06 10:27:06 +03:00
|
|
|
/* $NetBSD: uipc_mbuf.c,v 1.236 2019/12/06 07:27:07 maxv Exp $ */
|
1999-04-27 02:04:28 +04:00
|
|
|
|
2018-01-14 19:59:37 +03:00
|
|
|
/*
|
2018-12-22 16:55:56 +03:00
|
|
|
* Copyright (c) 1999, 2001, 2018 The NetBSD Foundation, Inc.
|
1999-04-27 02:04:28 +04:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
|
2018-12-22 16:55:56 +03:00
|
|
|
* NASA Ames Research Center, and Maxime Villard.
|
1999-04-27 02:04:28 +04:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-13 10:01:27 +04:00
|
|
|
* Copyright (c) 1982, 1986, 1988, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2019-12-06 10:27:06 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.236 2019/12/06 07:27:07 maxv Exp $");
|
2003-06-23 15:00:59 +04:00
|
|
|
|
2015-08-25 01:21:26 +03:00
|
|
|
#ifdef _KERNEL_OPT
|
2003-06-23 15:00:59 +04:00
|
|
|
#include "opt_mbuftrace.h"
|
2010-02-08 22:02:25 +03:00
|
|
|
#include "opt_nmbclusters.h"
|
2006-01-24 16:02:57 +03:00
|
|
|
#include "opt_ddb.h"
|
2018-07-18 10:06:40 +03:00
|
|
|
#include "ether.h"
|
2015-08-25 01:21:26 +03:00
|
|
|
#endif
|
2001-11-12 18:25:01 +03:00
|
|
|
|
1993-12-18 07:21:37 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2008-03-24 15:24:37 +03:00
|
|
|
#include <sys/atomic.h>
|
2008-01-17 17:49:28 +03:00
|
|
|
#include <sys/cpu.h>
|
1993-12-18 07:21:37 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/domain.h>
|
|
|
|
#include <sys/protosw.h>
|
2008-01-17 17:49:28 +03:00
|
|
|
#include <sys/percpu.h>
|
1998-08-01 05:35:20 +04:00
|
|
|
#include <sys/pool.h>
|
1998-05-22 21:47:21 +04:00
|
|
|
#include <sys/socket.h>
|
2001-10-29 10:02:30 +03:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
|
1998-05-22 21:47:21 +04:00
|
|
|
#include <net/if.h>
|
1993-12-18 07:21:37 +03:00
|
|
|
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_t mb_cache; /* mbuf cache */
|
2018-08-09 20:43:54 +03:00
|
|
|
static pool_cache_t mcl_cache; /* mbuf cluster cache */
|
2001-07-26 23:05:04 +04:00
|
|
|
|
1997-03-27 23:33:07 +03:00
|
|
|
struct mbstat mbstat;
|
2018-04-27 09:27:36 +03:00
|
|
|
int max_linkhdr;
|
|
|
|
int max_protohdr;
|
|
|
|
int max_hdr;
|
|
|
|
int max_datalen;
|
1997-03-27 23:33:07 +03:00
|
|
|
|
2018-04-27 10:41:58 +03:00
|
|
|
static void mb_drain(void *, int);
|
2003-04-09 22:38:01 +04:00
|
|
|
static int mb_ctor(void *, void *, int);
|
|
|
|
|
2018-04-27 09:27:36 +03:00
|
|
|
static void sysctl_kern_mbuf_setup(void);
|
2008-12-07 23:58:46 +03:00
|
|
|
|
|
|
|
static struct sysctllog *mbuf_sysctllog;
|
|
|
|
|
2018-04-26 11:13:30 +03:00
|
|
|
static struct mbuf *m_copy_internal(struct mbuf *, int, int, int, bool);
|
|
|
|
static struct mbuf *m_split_internal(struct mbuf *, int, int, bool);
|
2018-04-26 11:31:36 +03:00
|
|
|
static int m_copyback_internal(struct mbuf **, int, int, const void *,
|
|
|
|
int, int);
|
2004-09-06 13:43:29 +04:00
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
/* Flags for m_copyback_internal. */
|
|
|
|
#define CB_COPYBACK 0x0001 /* copyback from cp */
|
|
|
|
#define CB_PRESERVE 0x0002 /* preserve original data */
|
|
|
|
#define CB_COW 0x0004 /* do copy-on-write */
|
|
|
|
#define CB_EXTEND 0x0008 /* extend chain */
|
1998-08-01 05:35:20 +04:00
|
|
|
|
2005-12-08 06:11:14 +03:00
|
|
|
static const char mclpool_warnmsg[] =
|
2010-02-08 22:02:25 +03:00
|
|
|
"WARNING: mclpool limit reached; increase kern.mbuf.nmbclusters";
|
1999-04-27 02:04:28 +04:00
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
|
|
|
|
|
2008-01-17 17:49:28 +03:00
|
|
|
static percpu_t *mbstat_percpu;
|
|
|
|
|
2003-02-26 09:31:08 +03:00
|
|
|
#ifdef MBUFTRACE
|
|
|
|
struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners);
|
|
|
|
struct mowner unknown_mowners[] = {
|
2006-10-11 01:49:14 +04:00
|
|
|
MOWNER_INIT("unknown", "free"),
|
|
|
|
MOWNER_INIT("unknown", "data"),
|
|
|
|
MOWNER_INIT("unknown", "header"),
|
|
|
|
MOWNER_INIT("unknown", "soname"),
|
|
|
|
MOWNER_INIT("unknown", "soopts"),
|
|
|
|
MOWNER_INIT("unknown", "ftable"),
|
|
|
|
MOWNER_INIT("unknown", "control"),
|
|
|
|
MOWNER_INIT("unknown", "oobdata"),
|
2003-02-26 09:31:08 +03:00
|
|
|
};
|
2006-10-11 01:49:14 +04:00
|
|
|
struct mowner revoked_mowner = MOWNER_INIT("revoked", "");
|
2003-02-26 09:31:08 +03:00
|
|
|
#endif
|
|
|
|
|
2008-03-24 15:24:37 +03:00
|
|
|
#define MEXT_ISEMBEDDED(m) ((m)->m_ext_ref == (m))
|
|
|
|
|
|
|
|
#define MCLADDREFERENCE(o, n) \
|
|
|
|
do { \
|
|
|
|
KASSERT(((o)->m_flags & M_EXT) != 0); \
|
|
|
|
KASSERT(((n)->m_flags & M_EXT) == 0); \
|
|
|
|
KASSERT((o)->m_ext.ext_refcnt >= 1); \
|
|
|
|
(n)->m_flags |= ((o)->m_flags & M_EXTCOPYFLAGS); \
|
|
|
|
atomic_inc_uint(&(o)->m_ext.ext_refcnt); \
|
|
|
|
(n)->m_ext_ref = (o)->m_ext_ref; \
|
|
|
|
mowner_ref((n), (n)->m_flags); \
|
|
|
|
} while (/* CONSTCOND */ 0)
|
|
|
|
|
2010-02-08 22:02:25 +03:00
|
|
|
static int
|
|
|
|
nmbclusters_limit(void)
|
|
|
|
{
|
2010-05-12 00:21:56 +04:00
|
|
|
#if defined(PMAP_MAP_POOLPAGE)
|
2012-10-18 23:33:38 +04:00
|
|
|
/* direct mapping, doesn't use space in kmem_arena */
|
2010-02-08 22:02:25 +03:00
|
|
|
vsize_t max_size = physmem / 4;
|
|
|
|
#else
|
2012-02-10 21:35:47 +04:00
|
|
|
vsize_t max_size = MIN(physmem / 4, nkmempages / 4);
|
2010-02-08 22:02:25 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
max_size = max_size * PAGE_SIZE / MCLBYTES;
|
|
|
|
#ifdef NMBCLUSTERS_MAX
|
|
|
|
max_size = MIN(max_size, NMBCLUSTERS_MAX);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef NMBCLUSTERS
|
|
|
|
return MIN(max_size, NMBCLUSTERS);
|
|
|
|
#else
|
|
|
|
return max_size;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1998-08-01 05:35:20 +04:00
|
|
|
/*
|
2003-05-27 13:03:46 +04:00
|
|
|
* Initialize the mbuf allocator.
|
1998-08-01 05:35:20 +04:00
|
|
|
*/
|
1993-09-04 04:01:43 +04:00
|
|
|
void
|
2003-01-31 07:55:52 +03:00
|
|
|
mbinit(void)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2003-04-09 22:38:01 +04:00
|
|
|
|
2008-07-02 18:47:34 +04:00
|
|
|
CTASSERT(sizeof(struct _m_ext) <= MHLEN);
|
|
|
|
CTASSERT(sizeof(struct mbuf) == MSIZE);
|
2003-04-09 22:38:01 +04:00
|
|
|
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_kern_mbuf_setup();
|
|
|
|
|
2007-11-07 03:23:13 +03:00
|
|
|
mb_cache = pool_cache_init(msize, 0, 0, 0, "mbpl",
|
|
|
|
NULL, IPL_VM, mb_ctor, NULL, NULL);
|
|
|
|
KASSERT(mb_cache != NULL);
|
|
|
|
|
2019-10-19 09:36:47 +03:00
|
|
|
mcl_cache = pool_cache_init(mclbytes, COHERENCY_UNIT, 0, 0, "mclpl",
|
|
|
|
NULL, IPL_VM, NULL, NULL, NULL);
|
2007-11-07 03:23:13 +03:00
|
|
|
KASSERT(mcl_cache != NULL);
|
2002-03-09 04:46:32 +03:00
|
|
|
|
2018-04-27 10:41:58 +03:00
|
|
|
pool_cache_set_drain_hook(mb_cache, mb_drain, NULL);
|
|
|
|
pool_cache_set_drain_hook(mcl_cache, mb_drain, NULL);
|
2001-07-26 23:05:04 +04:00
|
|
|
|
2010-02-08 22:02:25 +03:00
|
|
|
/*
|
|
|
|
* Set an arbitrary default limit on the number of mbuf clusters.
|
|
|
|
*/
|
|
|
|
#ifdef NMBCLUSTERS
|
|
|
|
nmbclusters = nmbclusters_limit();
|
|
|
|
#else
|
|
|
|
nmbclusters = MAX(1024,
|
|
|
|
(vsize_t)physmem * PAGE_SIZE / MCLBYTES / 16);
|
|
|
|
nmbclusters = MIN(nmbclusters, nmbclusters_limit());
|
|
|
|
#endif
|
|
|
|
|
1999-03-23 05:51:27 +03:00
|
|
|
/*
|
1999-03-31 05:26:40 +04:00
|
|
|
* Set the hard limit on the mclpool to the number of
|
|
|
|
* mbuf clusters the kernel is to support. Log the limit
|
|
|
|
* reached message max once a minute.
|
|
|
|
*/
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_sethardlimit(mcl_cache, nmbclusters, mclpool_warnmsg, 60);
|
1999-04-27 02:04:28 +04:00
|
|
|
|
2008-01-17 17:49:28 +03:00
|
|
|
mbstat_percpu = percpu_alloc(sizeof(struct mbstat_cpu));
|
|
|
|
|
1999-03-31 05:26:40 +04:00
|
|
|
/*
|
1999-04-27 02:04:28 +04:00
|
|
|
* Set a low water mark for both mbufs and clusters. This should
|
|
|
|
* help ensure that they can be allocated in a memory starvation
|
|
|
|
* situation. This is important for e.g. diskless systems which
|
|
|
|
* must allocate mbufs in order for the pagedaemon to clean pages.
|
1999-03-23 05:51:27 +03:00
|
|
|
*/
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_setlowat(mb_cache, mblowat);
|
|
|
|
pool_cache_setlowat(mcl_cache, mcllowat);
|
2003-02-26 09:31:08 +03:00
|
|
|
|
|
|
|
#ifdef MBUFTRACE
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Attach the unknown mowners.
|
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
MOWNER_ATTACH(&revoked_mowner);
|
|
|
|
for (i = sizeof(unknown_mowners)/sizeof(unknown_mowners[0]);
|
|
|
|
i-- > 0; )
|
|
|
|
MOWNER_ATTACH(&unknown_mowners[i]);
|
|
|
|
}
|
|
|
|
#endif
|
1999-04-27 02:04:28 +04:00
|
|
|
}
|
|
|
|
|
2018-04-27 10:41:58 +03:00
|
|
|
static void
|
|
|
|
mb_drain(void *arg, int flags)
|
|
|
|
{
|
|
|
|
struct domain *dp;
|
|
|
|
const struct protosw *pr;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
KERNEL_LOCK(1, NULL);
|
|
|
|
s = splvm();
|
|
|
|
DOMAIN_FOREACH(dp) {
|
|
|
|
for (pr = dp->dom_protosw;
|
|
|
|
pr < dp->dom_protoswNPROTOSW; pr++)
|
|
|
|
if (pr->pr_drain)
|
|
|
|
(*pr->pr_drain)();
|
|
|
|
}
|
|
|
|
/* XXX we cannot use psref in H/W interrupt */
|
|
|
|
if (!cpu_intr_p()) {
|
|
|
|
int bound = curlwp_bind();
|
|
|
|
IFNET_READER_FOREACH(ifp) {
|
|
|
|
struct psref psref;
|
|
|
|
|
|
|
|
if_acquire(ifp, &psref);
|
|
|
|
|
|
|
|
if (ifp->if_drain)
|
|
|
|
(*ifp->if_drain)(ifp);
|
|
|
|
|
|
|
|
if_release(ifp, &psref);
|
|
|
|
}
|
|
|
|
curlwp_bindx(bound);
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
mbstat.m_drain++;
|
|
|
|
KERNEL_UNLOCK_ONE(NULL);
|
|
|
|
}
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
/*
|
2010-02-08 22:02:25 +03:00
|
|
|
* sysctl helper routine for the kern.mbuf subtree.
|
|
|
|
* nmbclusters, mblowat and mcllowat need range
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
* checking and pool tweaking after being reset.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_kern_mbuf(SYSCTLFN_ARGS)
|
1999-04-27 02:04:28 +04:00
|
|
|
{
|
|
|
|
int error, newval;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
struct sysctlnode node;
|
1999-04-27 02:04:28 +04:00
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
node = *rnode;
|
|
|
|
node.sysctl_data = &newval;
|
|
|
|
switch (rnode->sysctl_num) {
|
1999-04-27 02:04:28 +04:00
|
|
|
case MBUF_NMBCLUSTERS:
|
|
|
|
case MBUF_MBLOWAT:
|
|
|
|
case MBUF_MCLLOWAT:
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
newval = *(int*)rnode->sysctl_data;
|
|
|
|
break;
|
|
|
|
default:
|
2018-04-27 09:27:36 +03:00
|
|
|
return EOPNOTSUPP;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
error = sysctl_lookup(SYSCTLFN_CALL(&node));
|
|
|
|
if (error || newp == NULL)
|
2018-04-27 09:27:36 +03:00
|
|
|
return error;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (newval < 0)
|
2018-04-27 09:27:36 +03:00
|
|
|
return EINVAL;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
|
|
|
|
switch (node.sysctl_num) {
|
|
|
|
case MBUF_NMBCLUSTERS:
|
|
|
|
if (newval < nmbclusters)
|
2018-04-27 09:27:36 +03:00
|
|
|
return EINVAL;
|
2010-02-08 22:02:25 +03:00
|
|
|
if (newval > nmbclusters_limit())
|
2018-04-27 09:27:36 +03:00
|
|
|
return EINVAL;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
nmbclusters = newval;
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_sethardlimit(mcl_cache, nmbclusters,
|
|
|
|
mclpool_warnmsg, 60);
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
break;
|
|
|
|
case MBUF_MBLOWAT:
|
|
|
|
mblowat = newval;
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_setlowat(mb_cache, mblowat);
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
break;
|
|
|
|
case MBUF_MCLLOWAT:
|
2004-01-21 05:11:20 +03:00
|
|
|
mcllowat = newval;
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_setlowat(mcl_cache, mcllowat);
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-04-27 09:27:36 +03:00
|
|
|
return 0;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
}
|
|
|
|
|
2003-02-26 09:31:08 +03:00
|
|
|
#ifdef MBUFTRACE
|
2008-01-17 17:49:28 +03:00
|
|
|
static void
|
2018-10-05 08:06:48 +03:00
|
|
|
mowner_convert_to_user_cb(void *v1, void *v2, struct cpu_info *ci)
|
2008-01-17 17:49:28 +03:00
|
|
|
{
|
|
|
|
struct mowner_counter *mc = v1;
|
|
|
|
struct mowner_user *mo_user = v2;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MOWNER_COUNTER_NCOUNTERS; i++) {
|
|
|
|
mo_user->mo_counter[i] += mc->mc_counter[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mowner_convert_to_user(struct mowner *mo, struct mowner_user *mo_user)
|
|
|
|
{
|
|
|
|
|
|
|
|
memset(mo_user, 0, sizeof(*mo_user));
|
2008-07-02 18:47:34 +04:00
|
|
|
CTASSERT(sizeof(mo_user->mo_name) == sizeof(mo->mo_name));
|
|
|
|
CTASSERT(sizeof(mo_user->mo_descr) == sizeof(mo->mo_descr));
|
2008-01-17 17:49:28 +03:00
|
|
|
memcpy(mo_user->mo_name, mo->mo_name, sizeof(mo->mo_name));
|
|
|
|
memcpy(mo_user->mo_descr, mo->mo_descr, sizeof(mo->mo_descr));
|
2018-10-05 08:06:48 +03:00
|
|
|
percpu_foreach(mo->mo_counters, mowner_convert_to_user_cb, mo_user);
|
2008-01-17 17:49:28 +03:00
|
|
|
}
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
static int
|
|
|
|
sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS)
|
|
|
|
{
|
|
|
|
struct mowner *mo;
|
|
|
|
size_t len = 0;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (namelen != 0)
|
2018-04-27 09:27:36 +03:00
|
|
|
return EINVAL;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (newp != NULL)
|
2018-04-27 09:27:36 +03:00
|
|
|
return EPERM;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
|
|
|
|
LIST_FOREACH(mo, &mowners, mo_link) {
|
2008-01-17 17:49:28 +03:00
|
|
|
struct mowner_user mo_user;
|
|
|
|
|
|
|
|
mowner_convert_to_user(mo, &mo_user);
|
|
|
|
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (oldp != NULL) {
|
2008-01-17 17:49:28 +03:00
|
|
|
if (*oldlenp - len < sizeof(mo_user)) {
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
2003-02-26 09:31:08 +03:00
|
|
|
}
|
2008-01-17 17:49:28 +03:00
|
|
|
error = copyout(&mo_user, (char *)oldp + len,
|
|
|
|
sizeof(mo_user));
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
if (error)
|
|
|
|
break;
|
2003-02-26 09:31:08 +03:00
|
|
|
}
|
2008-01-17 17:49:28 +03:00
|
|
|
len += sizeof(mo_user);
|
1999-04-27 02:04:28 +04:00
|
|
|
}
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
|
|
|
|
if (error == 0)
|
|
|
|
*oldlenp = len;
|
|
|
|
|
2018-04-27 09:27:36 +03:00
|
|
|
return error;
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
}
|
|
|
|
#endif /* MBUFTRACE */
|
|
|
|
|
2018-04-27 11:23:18 +03:00
|
|
|
void
|
|
|
|
mbstat_type_add(int type, int diff)
|
|
|
|
{
|
|
|
|
struct mbstat_cpu *mb;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splvm();
|
|
|
|
mb = percpu_getref(mbstat_percpu);
|
|
|
|
mb->m_mtypes[type] += diff;
|
|
|
|
percpu_putref(mbstat_percpu);
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
2008-01-17 17:49:28 +03:00
|
|
|
static void
|
2019-09-28 19:02:12 +03:00
|
|
|
mbstat_convert_to_user_cb(void *v1, void *v2, struct cpu_info *ci)
|
2008-01-17 17:49:28 +03:00
|
|
|
{
|
|
|
|
struct mbstat_cpu *mbsc = v1;
|
|
|
|
struct mbstat *mbs = v2;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < __arraycount(mbs->m_mtypes); i++) {
|
|
|
|
mbs->m_mtypes[i] += mbsc->m_mtypes[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mbstat_convert_to_user(struct mbstat *mbs)
|
|
|
|
{
|
|
|
|
|
|
|
|
memset(mbs, 0, sizeof(*mbs));
|
|
|
|
mbs->m_drain = mbstat.m_drain;
|
2019-09-28 19:02:12 +03:00
|
|
|
percpu_foreach(mbstat_percpu, mbstat_convert_to_user_cb, mbs);
|
2008-01-17 17:49:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_kern_mbuf_stats(SYSCTLFN_ARGS)
|
|
|
|
{
|
|
|
|
struct sysctlnode node;
|
|
|
|
struct mbstat mbs;
|
|
|
|
|
|
|
|
mbstat_convert_to_user(&mbs);
|
|
|
|
node = *rnode;
|
|
|
|
node.sysctl_data = &mbs;
|
|
|
|
node.sysctl_size = sizeof(mbs);
|
|
|
|
return sysctl_lookup(SYSCTLFN_CALL(&node));
|
|
|
|
}
|
|
|
|
|
2008-12-07 23:58:46 +03:00
|
|
|
static void
|
2009-03-15 20:14:40 +03:00
|
|
|
sysctl_kern_mbuf_setup(void)
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
{
|
|
|
|
|
2008-12-07 23:58:46 +03:00
|
|
|
KASSERT(mbuf_sysctllog == NULL);
|
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_NODE, "mbuf",
|
|
|
|
SYSCTL_DESCR("mbuf control variables"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, CTL_EOL);
|
|
|
|
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_INT, "msize",
|
|
|
|
SYSCTL_DESCR("mbuf base size"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, msize, NULL, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_MSIZE, CTL_EOL);
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_INT, "mclbytes",
|
|
|
|
SYSCTL_DESCR("mbuf cluster size"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
NULL, mclbytes, NULL, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_MCLBYTES, CTL_EOL);
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_INT, "nmbclusters",
|
|
|
|
SYSCTL_DESCR("Limit on the number of mbuf clusters"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_kern_mbuf, 0, &nmbclusters, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_NMBCLUSTERS, CTL_EOL);
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_INT, "mblowat",
|
|
|
|
SYSCTL_DESCR("mbuf low water mark"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_kern_mbuf, 0, &mblowat, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_MBLOWAT, CTL_EOL);
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_INT, "mcllowat",
|
|
|
|
SYSCTL_DESCR("mbuf cluster low water mark"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_kern_mbuf, 0, &mcllowat, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_MCLLOWAT, CTL_EOL);
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_STRUCT, "stats",
|
|
|
|
SYSCTL_DESCR("mbuf allocation statistics"),
|
2008-01-17 17:49:28 +03:00
|
|
|
sysctl_kern_mbuf_stats, 0, NULL, 0,
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
CTL_KERN, KERN_MBUF, MBUF_STATS, CTL_EOL);
|
|
|
|
#ifdef MBUFTRACE
|
2008-12-07 23:58:46 +03:00
|
|
|
sysctl_createv(&mbuf_sysctllog, 0, NULL, NULL,
|
2004-03-24 18:34:46 +03:00
|
|
|
CTLFLAG_PERMANENT,
|
2004-05-25 08:30:32 +04:00
|
|
|
CTLTYPE_STRUCT, "mowners",
|
|
|
|
SYSCTL_DESCR("Information about mbuf owners"),
|
Dynamic sysctl.
Gone are the old kern_sysctl(), cpu_sysctl(), hw_sysctl(),
vfs_sysctl(), etc, routines, along with sysctl_int() et al. Now all
nodes are registered with the tree, and nodes can be added (or
removed) easily, and I/O to and from the tree is handled generically.
Since the nodes are registered with the tree, the mapping from name to
number (and back again) can now be discovered, instead of having to be
hard coded. Adding new nodes to the tree is likewise much simpler --
the new infrastructure handles almost all the work for simple types,
and just about anything else can be done with a small helper function.
All existing nodes are where they were before (numerically speaking),
so all existing consumers of sysctl information should notice no
difference.
PS - I'm sorry, but there's a distinct lack of documentation at the
moment. I'm working on sysctl(3/8/9) right now, and I promise to
watch out for buses.
2003-12-04 22:38:21 +03:00
|
|
|
sysctl_kern_mbuf_mowners, 0, NULL, 0,
|
|
|
|
CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL);
|
2018-04-27 09:27:36 +03:00
|
|
|
#endif
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2003-04-09 22:38:01 +04:00
|
|
|
static int
|
2006-11-01 13:17:58 +03:00
|
|
|
mb_ctor(void *arg, void *object, int flags)
|
2003-04-09 22:38:01 +04:00
|
|
|
{
|
|
|
|
struct mbuf *m = object;
|
|
|
|
|
|
|
|
#ifdef POOL_VTOPHYS
|
|
|
|
m->m_paddr = POOL_VTOPHYS(m);
|
|
|
|
#else
|
|
|
|
m->m_paddr = M_PADDR_INVALID;
|
|
|
|
#endif
|
2018-04-27 09:27:36 +03:00
|
|
|
return 0;
|
2003-04-09 22:38:01 +04:00
|
|
|
}
|
|
|
|
|
2013-06-27 21:47:18 +04:00
|
|
|
/*
|
|
|
|
* Add mbuf to the end of a chain
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2018-01-22 12:06:40 +03:00
|
|
|
m_add(struct mbuf *c, struct mbuf *m)
|
|
|
|
{
|
2013-06-27 21:47:18 +04:00
|
|
|
struct mbuf *n;
|
|
|
|
|
|
|
|
if (c == NULL)
|
|
|
|
return m;
|
|
|
|
|
|
|
|
for (n = c; n->m_next != NULL; n = n->m_next)
|
|
|
|
continue;
|
|
|
|
n->m_next = m;
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
struct mbuf *
|
2018-04-28 11:34:45 +03:00
|
|
|
m_get(int how, int type)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
struct mbuf *m;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2011-08-08 23:10:33 +04:00
|
|
|
KASSERT(type != MT_FREE);
|
|
|
|
|
2008-01-17 17:49:28 +03:00
|
|
|
m = pool_cache_get(mb_cache,
|
2018-04-28 11:34:45 +03:00
|
|
|
how == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : PR_NOWAIT);
|
2008-01-17 17:49:28 +03:00
|
|
|
if (m == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mbstat_type_add(type, 1);
|
2016-04-20 11:50:43 +03:00
|
|
|
|
2018-03-21 20:03:09 +03:00
|
|
|
mowner_init(m, type);
|
|
|
|
m->m_ext_ref = m; /* default */
|
|
|
|
m->m_type = type;
|
|
|
|
m->m_len = 0;
|
|
|
|
m->m_next = NULL;
|
|
|
|
m->m_nextpkt = NULL; /* default */
|
|
|
|
m->m_data = m->m_dat;
|
|
|
|
m->m_flags = 0; /* default */
|
2008-01-17 17:49:28 +03:00
|
|
|
|
|
|
|
return m;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mbuf *
|
2018-04-28 11:34:45 +03:00
|
|
|
m_gethdr(int how, int type)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
struct mbuf *m;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
m = m_get(how, type);
|
2008-01-17 17:49:28 +03:00
|
|
|
if (m == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2018-03-21 20:03:09 +03:00
|
|
|
m->m_data = m->m_pktdat;
|
|
|
|
m->m_flags = M_PKTHDR;
|
|
|
|
|
|
|
|
m_reset_rcvif(m);
|
|
|
|
m->m_pkthdr.len = 0;
|
|
|
|
m->m_pkthdr.csum_flags = 0;
|
|
|
|
m->m_pkthdr.csum_data = 0;
|
2019-01-16 04:50:25 +03:00
|
|
|
m->m_pkthdr.segsz = 0;
|
|
|
|
m->m_pkthdr.ether_vtag = 0;
|
2019-01-17 05:47:15 +03:00
|
|
|
m->m_pkthdr.pkthdr_flags = 0;
|
2018-03-21 20:03:09 +03:00
|
|
|
SLIST_INIT(&m->m_pkthdr.tags);
|
|
|
|
|
|
|
|
m->m_pkthdr.pattr_class = NULL;
|
|
|
|
m->m_pkthdr.pattr_af = AF_UNSPEC;
|
|
|
|
m->m_pkthdr.pattr_hdr = NULL;
|
2008-01-17 17:49:28 +03:00
|
|
|
|
|
|
|
return m;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2003-02-26 09:31:08 +03:00
|
|
|
void
|
2018-04-28 11:34:45 +03:00
|
|
|
m_clget(struct mbuf *m, int how)
|
2003-02-26 09:31:08 +03:00
|
|
|
{
|
2018-04-27 12:22:28 +03:00
|
|
|
m->m_ext_storage.ext_buf = (char *)pool_cache_get_paddr(mcl_cache,
|
2018-04-28 11:34:45 +03:00
|
|
|
how == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : PR_NOWAIT,
|
2018-04-27 12:22:28 +03:00
|
|
|
&m->m_ext_storage.ext_paddr);
|
|
|
|
|
|
|
|
if (m->m_ext_storage.ext_buf == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
MCLINITREFERENCE(m);
|
|
|
|
m->m_data = m->m_ext.ext_buf;
|
|
|
|
m->m_flags = (m->m_flags & ~M_EXTCOPYFLAGS) |
|
|
|
|
M_EXT|M_EXT_CLUSTER|M_EXT_RW;
|
|
|
|
m->m_ext.ext_size = MCLBYTES;
|
|
|
|
m->m_ext.ext_free = NULL;
|
2018-04-27 19:18:40 +03:00
|
|
|
m->m_ext.ext_arg = NULL;
|
2018-04-27 12:22:28 +03:00
|
|
|
/* ext_paddr initialized above */
|
|
|
|
|
|
|
|
mowner_ref(m, M_EXT|M_EXT_CLUSTER);
|
2003-02-26 09:31:08 +03:00
|
|
|
}
|
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
struct mbuf *
|
|
|
|
m_getcl(int how, int type, int flags)
|
|
|
|
{
|
|
|
|
struct mbuf *mp;
|
|
|
|
|
|
|
|
if ((flags & M_PKTHDR) != 0)
|
|
|
|
mp = m_gethdr(how, type);
|
|
|
|
else
|
|
|
|
mp = m_get(how, type);
|
|
|
|
|
|
|
|
if (mp == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MCLGET(mp, how);
|
|
|
|
if ((mp->m_flags & M_EXT) != 0)
|
|
|
|
return mp;
|
|
|
|
|
|
|
|
m_free(mp);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2018-04-26 10:46:24 +03:00
|
|
|
* Utility function for M_PREPEND. Do *NOT* use it directly.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2003-01-31 07:55:52 +03:00
|
|
|
m_prepend(struct mbuf *m, int len, int how)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
|
|
|
struct mbuf *mn;
|
|
|
|
|
2018-01-22 13:26:38 +03:00
|
|
|
if (__predict_false(len > MHLEN)) {
|
|
|
|
panic("%s: len > MHLEN", __func__);
|
|
|
|
}
|
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
2013-10-10 00:15:20 +04:00
|
|
|
mn = m_get(how, m->m_type);
|
2011-08-31 22:31:02 +04:00
|
|
|
if (mn == NULL) {
|
1993-03-21 12:45:37 +03:00
|
|
|
m_freem(m);
|
2018-01-22 12:06:40 +03:00
|
|
|
return NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2018-01-22 10:22:52 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (m->m_flags & M_PKTHDR) {
|
2018-12-22 16:11:37 +03:00
|
|
|
m_move_pkthdr(mn, m);
|
2003-02-26 09:31:08 +03:00
|
|
|
} else {
|
|
|
|
MCLAIM(mn, m->m_owner);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
mn->m_next = m;
|
|
|
|
m = mn;
|
2018-01-22 10:22:52 +03:00
|
|
|
|
|
|
|
if (m->m_flags & M_PKTHDR) {
|
|
|
|
if (len < MHLEN)
|
2018-12-22 17:28:56 +03:00
|
|
|
m_align(m, len);
|
2018-01-22 10:22:52 +03:00
|
|
|
} else {
|
|
|
|
if (len < MLEN)
|
2018-12-22 17:28:56 +03:00
|
|
|
m_align(m, len);
|
2018-01-22 10:22:52 +03:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
m->m_len = len;
|
2018-01-22 12:06:40 +03:00
|
|
|
return m;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mbuf *
|
2018-04-26 11:13:30 +03:00
|
|
|
m_copym(struct mbuf *m, int off, int len, int wait)
|
1999-10-27 18:23:26 +04:00
|
|
|
{
|
2018-04-26 11:13:30 +03:00
|
|
|
/* Shallow copy on M_EXT. */
|
|
|
|
return m_copy_internal(m, off, len, wait, false);
|
1999-10-27 18:23:26 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mbuf *
|
2018-04-26 11:13:30 +03:00
|
|
|
m_dup(struct mbuf *m, int off, int len, int wait)
|
1999-10-27 18:23:26 +04:00
|
|
|
{
|
2018-04-26 11:13:30 +03:00
|
|
|
/* Deep copy. */
|
|
|
|
return m_copy_internal(m, off, len, wait, true);
|
1999-10-27 18:23:26 +04:00
|
|
|
}
|
|
|
|
|
2013-11-14 04:50:36 +04:00
|
|
|
static inline int
|
2018-01-22 12:06:40 +03:00
|
|
|
m_copylen(int len, int copylen)
|
|
|
|
{
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
return (len == M_COPYALL) ? copylen : uimin(len, copylen);
|
2013-11-14 04:50:36 +04:00
|
|
|
}
|
|
|
|
|
1999-10-27 18:23:26 +04:00
|
|
|
static struct mbuf *
|
2018-04-26 11:13:30 +03:00
|
|
|
m_copy_internal(struct mbuf *m, int off0, int len, int wait, bool deep)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
struct mbuf *n, **np;
|
|
|
|
int off = off0;
|
1993-03-21 12:45:37 +03:00
|
|
|
struct mbuf *top;
|
|
|
|
int copyhdr = 0;
|
|
|
|
|
2013-11-14 04:50:36 +04:00
|
|
|
if (off < 0 || (len != M_COPYALL && len < 0))
|
2018-04-26 11:31:36 +03:00
|
|
|
panic("%s: off %d, len %d", __func__, off, len);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (off == 0 && m->m_flags & M_PKTHDR)
|
|
|
|
copyhdr = 1;
|
|
|
|
while (off > 0) {
|
2018-01-22 12:06:40 +03:00
|
|
|
if (m == NULL)
|
2018-04-27 09:27:36 +03:00
|
|
|
panic("%s: m == NULL, off %d", __func__, off);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (off < m->m_len)
|
|
|
|
break;
|
|
|
|
off -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
2018-01-22 12:06:40 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
np = ⊤
|
2018-01-22 12:06:40 +03:00
|
|
|
top = NULL;
|
2013-11-14 13:21:30 +04:00
|
|
|
while (len == M_COPYALL || len > 0) {
|
2018-01-22 12:06:40 +03:00
|
|
|
if (m == NULL) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (len != M_COPYALL)
|
2018-04-26 11:31:36 +03:00
|
|
|
panic("%s: m == NULL, len %d [!COPYALL]",
|
|
|
|
__func__, len);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-01-22 12:06:40 +03:00
|
|
|
|
2013-10-10 00:15:20 +04:00
|
|
|
n = m_get(wait, m->m_type);
|
1993-03-21 12:45:37 +03:00
|
|
|
*np = n;
|
2018-01-22 12:06:40 +03:00
|
|
|
if (n == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
goto nospace;
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(n, m->m_owner);
|
2018-01-22 12:06:40 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (copyhdr) {
|
2018-12-22 17:07:53 +03:00
|
|
|
m_copy_pkthdr(n, m);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (len == M_COPYALL)
|
|
|
|
n->m_pkthdr.len -= off0;
|
|
|
|
else
|
|
|
|
n->m_pkthdr.len = len;
|
|
|
|
copyhdr = 0;
|
|
|
|
}
|
2013-11-14 04:50:36 +04:00
|
|
|
n->m_len = m_copylen(len, m->m_len - off);
|
2018-01-22 12:06:40 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (m->m_flags & M_EXT) {
|
1999-10-27 18:23:26 +04:00
|
|
|
if (!deep) {
|
|
|
|
n->m_data = m->m_data + off;
|
|
|
|
MCLADDREFERENCE(m, n);
|
|
|
|
} else {
|
2000-08-18 18:12:47 +04:00
|
|
|
/*
|
2018-01-22 18:05:27 +03:00
|
|
|
* We don't care if MCLGET fails. n->m_len is
|
|
|
|
* recomputed and handles that.
|
2000-08-18 18:12:47 +04:00
|
|
|
*/
|
1999-10-27 18:23:26 +04:00
|
|
|
MCLGET(n, wait);
|
2015-02-08 17:46:30 +03:00
|
|
|
n->m_len = 0;
|
2000-08-18 20:19:22 +04:00
|
|
|
n->m_len = M_TRAILINGSPACE(n);
|
2013-11-14 04:50:36 +04:00
|
|
|
n->m_len = m_copylen(len, n->m_len);
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
n->m_len = uimin(n->m_len, m->m_len - off);
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(n, void *), mtod(m, char *) + off,
|
1999-10-27 18:23:26 +04:00
|
|
|
(unsigned)n->m_len);
|
|
|
|
}
|
2018-01-22 12:06:40 +03:00
|
|
|
} else {
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(n, void *), mtod(m, char *) + off,
|
1993-03-21 12:45:37 +03:00
|
|
|
(unsigned)n->m_len);
|
2018-01-22 12:06:40 +03:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
if (len != M_COPYALL)
|
|
|
|
len -= n->m_len;
|
2000-08-18 20:19:22 +04:00
|
|
|
off += n->m_len;
|
2018-04-27 09:27:36 +03:00
|
|
|
|
|
|
|
KASSERT(off <= m->m_len);
|
|
|
|
|
2000-08-18 20:19:22 +04:00
|
|
|
if (off == m->m_len) {
|
|
|
|
m = m->m_next;
|
|
|
|
off = 0;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
np = &n->m_next;
|
|
|
|
}
|
2018-01-22 12:06:40 +03:00
|
|
|
|
|
|
|
return top;
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
nospace:
|
|
|
|
m_freem(top);
|
2018-01-22 12:06:40 +03:00
|
|
|
return NULL;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
1997-03-27 23:33:07 +03:00
|
|
|
/*
|
|
|
|
* Copy an entire packet, including header (which must be present).
|
2018-01-22 18:05:27 +03:00
|
|
|
* An optimization of the common case 'm_copym(m, 0, M_COPYALL, how)'.
|
1997-03-27 23:33:07 +03:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2003-01-31 07:55:52 +03:00
|
|
|
m_copypacket(struct mbuf *m, int how)
|
1997-03-27 23:33:07 +03:00
|
|
|
{
|
|
|
|
struct mbuf *top, *n, *o;
|
|
|
|
|
2018-04-27 09:15:49 +03:00
|
|
|
if (__predict_false((m->m_flags & M_PKTHDR) == 0)) {
|
|
|
|
panic("%s: no header (m = %p)", __func__, m);
|
|
|
|
}
|
|
|
|
|
2013-10-10 00:15:20 +04:00
|
|
|
n = m_get(how, m->m_type);
|
1997-03-27 23:33:07 +03:00
|
|
|
top = n;
|
|
|
|
if (!n)
|
|
|
|
goto nospace;
|
|
|
|
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(n, m->m_owner);
|
2018-12-22 17:07:53 +03:00
|
|
|
m_copy_pkthdr(n, m);
|
1997-03-27 23:33:07 +03:00
|
|
|
n->m_len = m->m_len;
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data;
|
|
|
|
MCLADDREFERENCE(m, n);
|
|
|
|
} else {
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
|
1997-03-27 23:33:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
m = m->m_next;
|
|
|
|
while (m) {
|
2013-10-10 00:15:20 +04:00
|
|
|
o = m_get(how, m->m_type);
|
1997-03-27 23:33:07 +03:00
|
|
|
if (!o)
|
|
|
|
goto nospace;
|
|
|
|
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(o, m->m_owner);
|
1997-03-27 23:33:07 +03:00
|
|
|
n->m_next = o;
|
|
|
|
n = n->m_next;
|
|
|
|
|
|
|
|
n->m_len = m->m_len;
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data;
|
|
|
|
MCLADDREFERENCE(m, n);
|
|
|
|
} else {
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
|
1997-03-27 23:33:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
return top;
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1997-03-27 23:33:07 +03:00
|
|
|
nospace:
|
|
|
|
m_freem(top);
|
2003-08-15 06:59:32 +04:00
|
|
|
return NULL;
|
1997-03-27 23:33:07 +03:00
|
|
|
}
|
|
|
|
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2018-04-27 09:36:16 +03:00
|
|
|
m_copydata(struct mbuf *m, int off, int len, void *cp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2018-04-27 09:36:16 +03:00
|
|
|
unsigned int count;
|
2018-01-22 12:06:40 +03:00
|
|
|
struct mbuf *m0 = m;
|
|
|
|
int len0 = len;
|
|
|
|
int off0 = off;
|
2018-04-27 09:36:16 +03:00
|
|
|
void *cp0 = cp;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (off < 0 || len < 0)
|
2004-10-21 02:10:31 +04:00
|
|
|
panic("m_copydata: off %d, len %d", off, len);
|
1993-03-21 12:45:37 +03:00
|
|
|
while (off > 0) {
|
2005-06-02 14:34:59 +04:00
|
|
|
if (m == NULL)
|
2013-06-28 05:23:05 +04:00
|
|
|
panic("m_copydata(%p,%d,%d,%p): m=NULL, off=%d (%d)",
|
2018-04-27 09:36:16 +03:00
|
|
|
m0, len0, off0, cp0, off, off0 - off);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (off < m->m_len)
|
|
|
|
break;
|
|
|
|
off -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
while (len > 0) {
|
2005-06-02 14:34:59 +04:00
|
|
|
if (m == NULL)
|
2013-06-28 05:23:05 +04:00
|
|
|
panic("m_copydata(%p,%d,%d,%p): "
|
|
|
|
"m=NULL, off=%d (%d), len=%d (%d)",
|
2018-04-27 09:36:16 +03:00
|
|
|
m0, len0, off0, cp0,
|
2013-06-28 05:23:05 +04:00
|
|
|
off, off0 - off, len, len0 - len);
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
count = uimin(m->m_len - off, len);
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(cp, mtod(m, char *) + off, count);
|
1993-03-21 12:45:37 +03:00
|
|
|
len -= count;
|
2007-03-04 08:59:00 +03:00
|
|
|
cp = (char *)cp + count;
|
1993-03-21 12:45:37 +03:00
|
|
|
off = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Concatenate mbuf chain n to m.
|
2003-09-04 08:10:32 +04:00
|
|
|
* n might be copied into m (when n->m_len is small), therefore data portion of
|
|
|
|
* n could be copied into an mbuf of different mbuf type.
|
1993-03-21 12:45:37 +03:00
|
|
|
* Any m_pkthdr is not updated.
|
|
|
|
*/
|
1996-02-04 05:17:43 +03:00
|
|
|
void
|
2003-01-31 07:55:52 +03:00
|
|
|
m_cat(struct mbuf *m, struct mbuf *n)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2003-08-15 06:59:32 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
while (m->m_next)
|
|
|
|
m = m->m_next;
|
|
|
|
while (n) {
|
2004-02-26 05:30:04 +03:00
|
|
|
if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) {
|
1993-03-21 12:45:37 +03:00
|
|
|
/* just join the two chains */
|
|
|
|
m->m_next = n;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* splat the data from one into the other */
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(m, char *) + m->m_len, mtod(n, void *),
|
1993-03-21 12:45:37 +03:00
|
|
|
(u_int)n->m_len);
|
|
|
|
m->m_len += n->m_len;
|
|
|
|
n = m_free(n);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-09-19 12:07:17 +04:00
|
|
|
void
|
2003-01-31 07:55:52 +03:00
|
|
|
m_adj(struct mbuf *mp, int req_len)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
int len = req_len;
|
|
|
|
struct mbuf *m;
|
|
|
|
int count;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
if ((m = mp) == NULL)
|
|
|
|
return;
|
|
|
|
if (len >= 0) {
|
|
|
|
/*
|
|
|
|
* Trim from head.
|
|
|
|
*/
|
|
|
|
while (m != NULL && len > 0) {
|
|
|
|
if (m->m_len <= len) {
|
|
|
|
len -= m->m_len;
|
|
|
|
m->m_len = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
} else {
|
|
|
|
m->m_len -= len;
|
|
|
|
m->m_data += len;
|
|
|
|
len = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mp->m_flags & M_PKTHDR)
|
2018-01-22 18:05:27 +03:00
|
|
|
mp->m_pkthdr.len -= (req_len - len);
|
1993-03-21 12:45:37 +03:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Trim from tail. Scan the mbuf chain,
|
|
|
|
* calculating its length and finding the last mbuf.
|
|
|
|
* If the adjustment only affects this mbuf, then just
|
|
|
|
* adjust and return. Otherwise, rescan and truncate
|
|
|
|
* after the remaining size.
|
|
|
|
*/
|
|
|
|
len = -len;
|
|
|
|
count = 0;
|
|
|
|
for (;;) {
|
|
|
|
count += m->m_len;
|
2018-01-22 18:05:27 +03:00
|
|
|
if (m->m_next == NULL)
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
if (m->m_len >= len) {
|
|
|
|
m->m_len -= len;
|
1994-04-15 01:34:17 +04:00
|
|
|
if (mp->m_flags & M_PKTHDR)
|
|
|
|
mp->m_pkthdr.len -= len;
|
1993-03-21 12:45:37 +03:00
|
|
|
return;
|
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
count -= len;
|
|
|
|
if (count < 0)
|
|
|
|
count = 0;
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* Correct length for chain is "count".
|
|
|
|
* Find the mbuf with last data, adjust its length,
|
|
|
|
* and toss data from remaining mbufs on chain.
|
|
|
|
*/
|
|
|
|
m = mp;
|
|
|
|
if (m->m_flags & M_PKTHDR)
|
|
|
|
m->m_pkthdr.len = count;
|
|
|
|
for (; m; m = m->m_next) {
|
|
|
|
if (m->m_len >= count) {
|
|
|
|
m->m_len = count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
count -= m->m_len;
|
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
if (m) {
|
2006-04-15 08:58:14 +04:00
|
|
|
while (m->m_next)
|
|
|
|
(m = m->m_next)->m_len = 0;
|
2018-01-22 18:05:27 +03:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-01-19 04:51:52 +04:00
|
|
|
* m_ensure_contig: rearrange an mbuf chain that given length of bytes
|
|
|
|
* would be contiguous and in the data area of an mbuf (therefore, mtod()
|
|
|
|
* would work for a structure of given length).
|
|
|
|
*
|
|
|
|
* => On success, returns true and the resulting mbuf chain; false otherwise.
|
|
|
|
* => The mbuf chain may change, but is always preserved valid.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2013-01-19 04:51:52 +04:00
|
|
|
bool
|
|
|
|
m_ensure_contig(struct mbuf **m0, int len)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2013-01-19 04:51:52 +04:00
|
|
|
struct mbuf *n = *m0, *m;
|
|
|
|
size_t count, space;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* If first mbuf has no cluster, and has room for len bytes
|
|
|
|
* without shifting current data, pullup into it,
|
|
|
|
* otherwise allocate a new mbuf to prepend to the chain.
|
|
|
|
*/
|
|
|
|
if ((n->m_flags & M_EXT) == 0 &&
|
|
|
|
n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
|
2013-01-19 04:51:52 +04:00
|
|
|
if (n->m_len >= len) {
|
|
|
|
return true;
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
m = n;
|
|
|
|
n = n->m_next;
|
|
|
|
len -= m->m_len;
|
|
|
|
} else {
|
2013-01-19 04:51:52 +04:00
|
|
|
if (len > MHLEN) {
|
|
|
|
return false;
|
|
|
|
}
|
2013-10-10 00:15:20 +04:00
|
|
|
m = m_get(M_DONTWAIT, n->m_type);
|
2013-01-19 04:51:52 +04:00
|
|
|
if (m == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(m, n->m_owner);
|
1993-03-21 12:45:37 +03:00
|
|
|
if (n->m_flags & M_PKTHDR) {
|
2018-12-22 16:11:37 +03:00
|
|
|
m_move_pkthdr(m, n);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
|
|
|
|
do {
|
2013-01-19 04:51:52 +04:00
|
|
|
count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len);
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(m, char *) + m->m_len, mtod(n, void *),
|
1993-03-21 12:45:37 +03:00
|
|
|
(unsigned)count);
|
|
|
|
len -= count;
|
|
|
|
m->m_len += count;
|
|
|
|
n->m_len -= count;
|
|
|
|
space -= count;
|
|
|
|
if (n->m_len)
|
|
|
|
n->m_data += count;
|
|
|
|
else
|
|
|
|
n = m_free(n);
|
|
|
|
} while (len > 0 && n);
|
2013-01-19 04:51:52 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
m->m_next = n;
|
2013-01-19 04:51:52 +04:00
|
|
|
*m0 = m;
|
|
|
|
|
|
|
|
return len <= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_pullup(struct mbuf *n, int len)
|
|
|
|
{
|
|
|
|
struct mbuf *m = n;
|
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
2013-01-19 04:51:52 +04:00
|
|
|
if (!m_ensure_contig(&m, len)) {
|
|
|
|
KASSERT(m != NULL);
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
}
|
|
|
|
return m;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
1994-05-13 10:01:27 +04:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
/*
|
|
|
|
* ensure that [off, off + len) is contiguous on the mbuf chain "m".
|
|
|
|
* packet chain before "off" is kept untouched.
|
|
|
|
* if offp == NULL, the target will start at <retval, 0> on resulting chain.
|
|
|
|
* if offp != NULL, the target will start at <retval, *offp> on resulting chain.
|
|
|
|
*
|
|
|
|
* on error return (NULL return value), original "m" will be freed.
|
|
|
|
*
|
|
|
|
* XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster)
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_pulldown(struct mbuf *m, int off, int len, int *offp)
|
|
|
|
{
|
|
|
|
struct mbuf *n, *o;
|
|
|
|
int hlen, tlen, olen;
|
|
|
|
int sharedcluster;
|
|
|
|
|
|
|
|
/* Check invalid arguments. */
|
|
|
|
if (m == NULL)
|
|
|
|
panic("%s: m == NULL", __func__);
|
|
|
|
if (len > MCLBYTES) {
|
|
|
|
m_freem(m);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = m;
|
|
|
|
while (n != NULL && off > 0) {
|
|
|
|
if (n->m_len > off)
|
|
|
|
break;
|
|
|
|
off -= n->m_len;
|
|
|
|
n = n->m_next;
|
|
|
|
}
|
|
|
|
/* Be sure to point non-empty mbuf. */
|
|
|
|
while (n != NULL && n->m_len == 0)
|
|
|
|
n = n->m_next;
|
|
|
|
if (!n) {
|
|
|
|
m_freem(m);
|
|
|
|
return NULL; /* mbuf chain too short */
|
|
|
|
}
|
|
|
|
|
|
|
|
sharedcluster = M_READONLY(n);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The target data is on <n, off>. If we got enough data on the mbuf
|
|
|
|
* "n", we're done.
|
|
|
|
*/
|
|
|
|
#ifdef __NO_STRICT_ALIGNMENT
|
|
|
|
if ((off == 0 || offp) && len <= n->m_len - off && !sharedcluster)
|
|
|
|
#else
|
|
|
|
if ((off == 0 || offp) && len <= n->m_len - off && !sharedcluster &&
|
|
|
|
ALIGNED_POINTER((mtod(n, char *) + off), uint32_t))
|
|
|
|
#endif
|
|
|
|
goto ok;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When (len <= n->m_len - off) and (off != 0), it is a special case.
|
|
|
|
* Len bytes from <n, off> sit in single mbuf, but the caller does
|
|
|
|
* not like the starting position (off).
|
|
|
|
*
|
|
|
|
* Chop the current mbuf into two pieces, set off to 0.
|
|
|
|
*/
|
|
|
|
if (len <= n->m_len - off) {
|
|
|
|
struct mbuf *mlast;
|
|
|
|
|
|
|
|
o = m_dup(n, off, n->m_len - off, M_DONTWAIT);
|
|
|
|
if (o == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return NULL; /* ENOBUFS */
|
|
|
|
}
|
|
|
|
KASSERT(o->m_len >= len);
|
|
|
|
for (mlast = o; mlast->m_next != NULL; mlast = mlast->m_next)
|
|
|
|
;
|
|
|
|
n->m_len = off;
|
|
|
|
mlast->m_next = n->m_next;
|
|
|
|
n->m_next = o;
|
|
|
|
n = o;
|
|
|
|
off = 0;
|
|
|
|
goto ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to take hlen from <n, off> and tlen from <n->m_next, 0>,
|
|
|
|
* and construct contiguous mbuf with m_len == len.
|
|
|
|
*
|
|
|
|
* Note that hlen + tlen == len, and tlen > 0.
|
|
|
|
*/
|
|
|
|
hlen = n->m_len - off;
|
|
|
|
tlen = len - hlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that we have enough trailing data on mbuf chain. If not,
|
|
|
|
* we can do nothing about the chain.
|
|
|
|
*/
|
|
|
|
olen = 0;
|
|
|
|
for (o = n->m_next; o != NULL; o = o->m_next)
|
|
|
|
olen += o->m_len;
|
|
|
|
if (hlen + olen < len) {
|
|
|
|
m_freem(m);
|
|
|
|
return NULL; /* mbuf chain too short */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Easy cases first. We need to use m_copydata() to get data from
|
|
|
|
* <n->m_next, 0>.
|
|
|
|
*/
|
|
|
|
if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen &&
|
|
|
|
!sharedcluster) {
|
|
|
|
m_copydata(n->m_next, 0, tlen, mtod(n, char *) + n->m_len);
|
|
|
|
n->m_len += tlen;
|
|
|
|
m_adj(n->m_next, tlen);
|
|
|
|
goto ok;
|
|
|
|
}
|
|
|
|
if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen &&
|
|
|
|
#ifndef __NO_STRICT_ALIGNMENT
|
|
|
|
ALIGNED_POINTER((n->m_next->m_data - hlen), uint32_t) &&
|
|
|
|
#endif
|
|
|
|
!sharedcluster && n->m_next->m_len >= tlen) {
|
|
|
|
n->m_next->m_data -= hlen;
|
|
|
|
n->m_next->m_len += hlen;
|
|
|
|
memcpy(mtod(n->m_next, void *), mtod(n, char *) + off, hlen);
|
|
|
|
n->m_len -= hlen;
|
|
|
|
n = n->m_next;
|
|
|
|
off = 0;
|
|
|
|
goto ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now, we need to do the hard way. Don't copy as there's no room
|
|
|
|
* on both ends.
|
|
|
|
*/
|
|
|
|
o = m_get(M_DONTWAIT, m->m_type);
|
|
|
|
if (o && len > MLEN) {
|
|
|
|
MCLGET(o, M_DONTWAIT);
|
|
|
|
if ((o->m_flags & M_EXT) == 0) {
|
|
|
|
m_free(o);
|
|
|
|
o = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!o) {
|
|
|
|
m_freem(m);
|
|
|
|
return NULL; /* ENOBUFS */
|
|
|
|
}
|
|
|
|
/* get hlen from <n, off> into <o, 0> */
|
|
|
|
o->m_len = hlen;
|
|
|
|
memcpy(mtod(o, void *), mtod(n, char *) + off, hlen);
|
|
|
|
n->m_len -= hlen;
|
|
|
|
/* get tlen from <n->m_next, 0> into <o, hlen> */
|
|
|
|
m_copydata(n->m_next, 0, tlen, mtod(o, char *) + o->m_len);
|
|
|
|
o->m_len += tlen;
|
|
|
|
m_adj(n->m_next, tlen);
|
|
|
|
o->m_next = n->m_next;
|
|
|
|
n->m_next = o;
|
|
|
|
n = o;
|
|
|
|
off = 0;
|
|
|
|
|
|
|
|
ok:
|
|
|
|
if (offp)
|
|
|
|
*offp = off;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
/*
|
|
|
|
* Like m_pullup(), except a new mbuf is always allocated, and we allow
|
|
|
|
* the amount of empty space before the data in the new mbuf to be specified
|
|
|
|
* (in the event that the caller expects to prepend later).
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_copyup(struct mbuf *n, int len, int dstoff)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
|
|
|
int count, space;
|
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
2018-04-20 09:01:59 +03:00
|
|
|
if (len > ((int)MHLEN - dstoff))
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
goto bad;
|
2013-10-10 00:15:20 +04:00
|
|
|
m = m_get(M_DONTWAIT, n->m_type);
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
if (m == NULL)
|
|
|
|
goto bad;
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(m, n->m_owner);
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
if (n->m_flags & M_PKTHDR) {
|
2018-12-22 16:11:37 +03:00
|
|
|
m_move_pkthdr(m, n);
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
}
|
|
|
|
m->m_data += dstoff;
|
|
|
|
space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
|
|
|
|
do {
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
count = uimin(uimin(uimax(len, max_protohdr), space), n->m_len);
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(m, char *) + m->m_len, mtod(n, void *),
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
(unsigned)count);
|
|
|
|
len -= count;
|
|
|
|
m->m_len += count;
|
|
|
|
n->m_len -= count;
|
|
|
|
space -= count;
|
|
|
|
if (n->m_len)
|
|
|
|
n->m_data += count;
|
|
|
|
else
|
|
|
|
n = m_free(n);
|
|
|
|
} while (len > 0 && n);
|
|
|
|
if (len > 0) {
|
|
|
|
(void) m_free(m);
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
m->m_next = n;
|
2018-04-27 09:27:36 +03:00
|
|
|
return m;
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
bad:
|
|
|
|
m_freem(n);
|
2018-04-27 09:27:36 +03:00
|
|
|
return NULL;
|
Changes to allow the IPv4 and IPv6 layers to align headers themseves,
as necessary:
* Implement a new mbuf utility routine, m_copyup(), is is like
m_pullup(), except that it always prepends and copies, rather
than only doing so if the desired length is larger than m->m_len.
m_copyup() also allows an offset into the destination mbuf, which
allows space for packet headers, in the forwarding case.
* Add *_HDR_ALIGNED_P() macros for IP, IPv6, ICMP, and IGMP. These
macros expand to 1 if __NO_STRICT_ALIGNMENT is defined, so that
architectures which do not have strict alignment constraints don't
pay for the test or visit the new align-if-needed path.
* Use the new macros to check if a header needs to be aligned, or to
assert that it already is, as appropriate.
Note: This code is still somewhat experimental. However, the new
code path won't be visited if individual device drivers continue
to guarantee that packets are delivered to layer 3 already properly
aligned (which are rules that are already in use).
2002-07-01 02:40:32 +04:00
|
|
|
}
|
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
struct mbuf *
|
2018-04-26 11:13:30 +03:00
|
|
|
m_split(struct mbuf *m0, int len, int wait)
|
2004-09-06 13:43:29 +04:00
|
|
|
{
|
2018-04-26 11:13:30 +03:00
|
|
|
return m_split_internal(m0, len, wait, true);
|
2004-09-06 13:43:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
2018-04-26 11:13:30 +03:00
|
|
|
m_split_internal(struct mbuf *m0, int len0, int wait, bool copyhdr)
|
1994-05-13 10:01:27 +04:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
struct mbuf *m, *n;
|
1997-11-20 07:28:18 +03:00
|
|
|
unsigned len = len0, remain, len_save;
|
1994-05-13 10:01:27 +04:00
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len0 != M_COPYALL);
|
1994-05-13 10:01:27 +04:00
|
|
|
for (m = m0; m && len > m->m_len; m = m->m_next)
|
|
|
|
len -= m->m_len;
|
2018-01-22 18:05:27 +03:00
|
|
|
if (m == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
remain = m->m_len - len;
|
2004-09-06 13:43:29 +04:00
|
|
|
if (copyhdr && (m0->m_flags & M_PKTHDR)) {
|
2013-10-10 00:15:20 +04:00
|
|
|
n = m_gethdr(wait, m0->m_type);
|
|
|
|
if (n == NULL)
|
|
|
|
return NULL;
|
2018-01-22 18:05:27 +03:00
|
|
|
|
2006-08-08 19:53:40 +04:00
|
|
|
MCLAIM(n, m0->m_owner);
|
2016-06-10 16:31:43 +03:00
|
|
|
m_copy_rcvif(n, m0);
|
1994-05-13 10:01:27 +04:00
|
|
|
n->m_pkthdr.len = m0->m_pkthdr.len - len0;
|
1997-11-20 07:28:18 +03:00
|
|
|
len_save = m0->m_pkthdr.len;
|
1994-05-13 10:01:27 +04:00
|
|
|
m0->m_pkthdr.len = len0;
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
if (m->m_flags & M_EXT)
|
|
|
|
goto extpacket;
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
if (remain > MHLEN) {
|
|
|
|
/* m can't be the lead packet */
|
2018-12-27 17:03:54 +03:00
|
|
|
m_align(n, 0);
|
2009-04-05 20:31:21 +04:00
|
|
|
n->m_len = 0;
|
1994-05-13 10:01:27 +04:00
|
|
|
n->m_next = m_split(m, len, wait);
|
2018-01-22 18:05:27 +03:00
|
|
|
if (n->m_next == NULL) {
|
|
|
|
(void)m_free(n);
|
1997-11-20 07:28:18 +03:00
|
|
|
m0->m_pkthdr.len = len_save;
|
2018-01-22 18:05:27 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
} else {
|
2018-12-27 17:03:54 +03:00
|
|
|
m_align(n, remain);
|
2018-01-22 18:05:27 +03:00
|
|
|
}
|
1994-05-13 10:01:27 +04:00
|
|
|
} else if (remain == 0) {
|
|
|
|
n = m->m_next;
|
2018-01-22 18:05:27 +03:00
|
|
|
m->m_next = NULL;
|
|
|
|
return n;
|
1994-05-13 10:01:27 +04:00
|
|
|
} else {
|
2013-10-10 00:15:20 +04:00
|
|
|
n = m_get(wait, m->m_type);
|
2018-01-22 18:05:27 +03:00
|
|
|
if (n == NULL)
|
|
|
|
return NULL;
|
2003-02-26 09:31:08 +03:00
|
|
|
MCLAIM(n, m->m_owner);
|
2018-12-22 17:28:56 +03:00
|
|
|
m_align(n, remain);
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
extpacket:
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data + len;
|
2008-03-24 15:24:37 +03:00
|
|
|
MCLADDREFERENCE(m, n);
|
1994-05-13 10:01:27 +04:00
|
|
|
} else {
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(n, void *), mtod(m, char *) + len, remain);
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
n->m_len = remain;
|
|
|
|
m->m_len = len;
|
|
|
|
n->m_next = m->m_next;
|
2018-01-22 18:05:27 +03:00
|
|
|
m->m_next = NULL;
|
|
|
|
return n;
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
/*
|
|
|
|
* Routine to copy from device local memory into mbufs.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2018-11-15 13:56:29 +03:00
|
|
|
m_devget(char *buf, int totlen, int off, struct ifnet *ifp)
|
1994-05-13 10:01:27 +04:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
struct mbuf *m;
|
2018-01-22 18:05:27 +03:00
|
|
|
struct mbuf *top = NULL, **mp = ⊤
|
|
|
|
char *cp, *epkt;
|
2018-11-15 13:56:29 +03:00
|
|
|
int len;
|
1994-05-13 10:01:27 +04:00
|
|
|
|
|
|
|
cp = buf;
|
|
|
|
epkt = cp + totlen;
|
|
|
|
if (off) {
|
1994-10-31 00:43:03 +03:00
|
|
|
/*
|
|
|
|
* If 'off' is non-zero, packet is trailer-encapsulated,
|
|
|
|
* so we have to skip the type and length fields.
|
|
|
|
*/
|
2005-12-26 21:41:36 +03:00
|
|
|
cp += off + 2 * sizeof(uint16_t);
|
|
|
|
totlen -= 2 * sizeof(uint16_t);
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
2013-10-10 00:15:20 +04:00
|
|
|
m = m_gethdr(M_DONTWAIT, MT_DATA);
|
|
|
|
if (m == NULL)
|
|
|
|
return NULL;
|
2016-06-10 16:27:10 +03:00
|
|
|
m_set_rcvif(m, ifp);
|
1994-05-13 10:01:27 +04:00
|
|
|
m->m_pkthdr.len = totlen;
|
|
|
|
m->m_len = MHLEN;
|
|
|
|
|
|
|
|
while (totlen > 0) {
|
|
|
|
if (top) {
|
2013-10-10 00:15:20 +04:00
|
|
|
m = m_get(M_DONTWAIT, MT_DATA);
|
2018-01-22 18:05:27 +03:00
|
|
|
if (m == NULL) {
|
1994-05-13 10:01:27 +04:00
|
|
|
m_freem(top);
|
2018-01-22 18:05:27 +03:00
|
|
|
return NULL;
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
|
|
|
m->m_len = MLEN;
|
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
len = uimin(totlen, epkt - cp);
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
if (len >= MINCLSIZE) {
|
|
|
|
MCLGET(m, M_DONTWAIT);
|
1997-04-24 12:14:04 +04:00
|
|
|
if ((m->m_flags & M_EXT) == 0) {
|
1997-04-28 21:03:58 +04:00
|
|
|
m_free(m);
|
1997-04-24 12:14:04 +04:00
|
|
|
m_freem(top);
|
2018-01-22 18:05:27 +03:00
|
|
|
return NULL;
|
1997-04-24 12:14:04 +04:00
|
|
|
}
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
m->m_len = len = uimin(len, MCLBYTES);
|
1994-05-13 10:01:27 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Place initial small packet/header at end of mbuf.
|
|
|
|
*/
|
|
|
|
if (len < m->m_len) {
|
|
|
|
if (top == 0 && len + max_linkhdr <= m->m_len)
|
|
|
|
m->m_data += max_linkhdr;
|
|
|
|
m->m_len = len;
|
|
|
|
} else
|
|
|
|
len = m->m_len;
|
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
2018-11-15 13:56:29 +03:00
|
|
|
memcpy(mtod(m, void *), cp, (size_t)len);
|
2018-01-22 18:05:27 +03:00
|
|
|
|
1994-05-13 10:01:27 +04:00
|
|
|
cp += len;
|
|
|
|
*mp = m;
|
|
|
|
mp = &m->m_next;
|
|
|
|
totlen -= len;
|
|
|
|
if (cp == epkt)
|
|
|
|
cp = buf;
|
|
|
|
}
|
2018-01-22 18:05:27 +03:00
|
|
|
|
|
|
|
return top;
|
1994-05-13 10:01:27 +04:00
|
|
|
}
|
1997-03-27 23:33:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy data from a buffer back into the indicated mbuf chain,
|
|
|
|
* starting "off" bytes from the beginning, extending the mbuf
|
|
|
|
* chain if necessary.
|
|
|
|
*/
|
|
|
|
void
|
2004-09-08 16:00:28 +04:00
|
|
|
m_copyback(struct mbuf *m0, int off, int len, const void *cp)
|
2004-09-06 13:43:29 +04:00
|
|
|
{
|
|
|
|
#if defined(DEBUG)
|
|
|
|
struct mbuf *origm = m0;
|
|
|
|
int error;
|
2018-01-22 18:05:27 +03:00
|
|
|
#endif
|
2004-09-06 13:43:29 +04:00
|
|
|
|
|
|
|
if (m0 == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
error =
|
2018-01-22 18:05:27 +03:00
|
|
|
#endif
|
2018-04-26 11:31:36 +03:00
|
|
|
m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_EXTEND,
|
|
|
|
M_DONTWAIT);
|
2004-09-06 13:43:29 +04:00
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
if (error != 0 || (m0 != NULL && origm != m0))
|
|
|
|
panic("m_copyback");
|
2018-01-22 18:05:27 +03:00
|
|
|
#endif
|
2004-09-06 13:43:29 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mbuf *
|
2004-09-08 16:00:28 +04:00
|
|
|
m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
|
2004-09-06 13:43:29 +04:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* don't support chain expansion */
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
2004-09-06 13:43:29 +04:00
|
|
|
KDASSERT(off + len <= m_length(m0));
|
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
error = m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_COW,
|
|
|
|
how);
|
2004-09-06 13:43:29 +04:00
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* no way to recover from partial success.
|
|
|
|
* just free the chain.
|
|
|
|
*/
|
|
|
|
m_freem(m0);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return m0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
m_makewritable(struct mbuf **mp, int off, int len, int how)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
#if defined(DEBUG)
|
2013-11-14 22:54:40 +04:00
|
|
|
int origlen = m_length(*mp);
|
2018-01-22 18:05:27 +03:00
|
|
|
#endif
|
2004-09-06 13:43:29 +04:00
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
error = m_copyback_internal(mp, off, len, NULL, CB_PRESERVE|CB_COW,
|
|
|
|
how);
|
2017-01-09 17:25:52 +03:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2004-09-06 13:43:29 +04:00
|
|
|
#if defined(DEBUG)
|
2013-11-14 22:54:40 +04:00
|
|
|
int reslen = 0;
|
|
|
|
for (struct mbuf *n = *mp; n; n = n->m_next)
|
2004-09-06 13:43:29 +04:00
|
|
|
reslen += n->m_len;
|
|
|
|
if (origlen != reslen)
|
|
|
|
panic("m_makewritable: length changed");
|
|
|
|
if (((*mp)->m_flags & M_PKTHDR) != 0 && reslen != (*mp)->m_pkthdr.len)
|
|
|
|
panic("m_makewritable: inconsist");
|
2018-01-22 18:05:27 +03:00
|
|
|
#endif
|
2004-09-06 13:43:29 +04:00
|
|
|
|
2017-01-09 17:25:52 +03:00
|
|
|
return 0;
|
2004-09-06 13:43:29 +04:00
|
|
|
}
|
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
static int
|
|
|
|
m_copyback_internal(struct mbuf **mp0, int off, int len, const void *vp,
|
|
|
|
int flags, int how)
|
1997-03-27 23:33:07 +03:00
|
|
|
{
|
1998-05-22 21:47:21 +04:00
|
|
|
int mlen;
|
2004-09-06 13:43:29 +04:00
|
|
|
struct mbuf *m, *n;
|
|
|
|
struct mbuf **mp;
|
1997-03-27 23:33:07 +03:00
|
|
|
int totlen = 0;
|
2004-09-08 16:00:28 +04:00
|
|
|
const char *cp = vp;
|
1997-03-27 23:33:07 +03:00
|
|
|
|
2004-09-06 13:43:29 +04:00
|
|
|
KASSERT(mp0 != NULL);
|
|
|
|
KASSERT(*mp0 != NULL);
|
2018-04-26 11:31:36 +03:00
|
|
|
KASSERT((flags & CB_PRESERVE) == 0 || cp == NULL);
|
|
|
|
KASSERT((flags & CB_COPYBACK) == 0 || cp != NULL);
|
2004-09-06 13:43:29 +04:00
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
if (len == M_COPYALL)
|
|
|
|
len = m_length(*mp0) - off;
|
|
|
|
|
2006-03-15 13:40:30 +03:00
|
|
|
/*
|
2018-04-26 11:31:36 +03:00
|
|
|
* we don't bother to update "totlen" in the case of CB_COW,
|
|
|
|
* assuming that CB_EXTEND and CB_COW are exclusive.
|
2006-03-15 13:40:30 +03:00
|
|
|
*/
|
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
KASSERT((~flags & (CB_EXTEND|CB_COW)) != 0);
|
2006-03-15 13:40:30 +03:00
|
|
|
|
2004-09-06 13:43:29 +04:00
|
|
|
mp = mp0;
|
|
|
|
m = *mp;
|
1997-03-27 23:33:07 +03:00
|
|
|
while (off > (mlen = m->m_len)) {
|
|
|
|
off -= mlen;
|
|
|
|
totlen += mlen;
|
2006-03-19 13:07:19 +03:00
|
|
|
if (m->m_next == NULL) {
|
|
|
|
int tspace;
|
|
|
|
extend:
|
2018-04-26 11:31:36 +03:00
|
|
|
if ((flags & CB_EXTEND) == 0)
|
2004-09-06 13:43:29 +04:00
|
|
|
goto out;
|
2006-03-19 13:07:19 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* try to make some space at the end of "m".
|
|
|
|
*/
|
|
|
|
|
|
|
|
mlen = m->m_len;
|
|
|
|
if (off + len >= MINCLSIZE &&
|
|
|
|
(m->m_flags & M_EXT) == 0 && m->m_len == 0) {
|
|
|
|
MCLGET(m, how);
|
|
|
|
}
|
|
|
|
tspace = M_TRAILINGSPACE(m);
|
|
|
|
if (tspace > 0) {
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
tspace = uimin(tspace, off + len);
|
2006-03-19 13:07:19 +03:00
|
|
|
KASSERT(tspace > 0);
|
|
|
|
memset(mtod(m, char *) + m->m_len, 0,
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
uimin(off, tspace));
|
2006-03-19 13:07:19 +03:00
|
|
|
m->m_len += tspace;
|
|
|
|
off += mlen;
|
|
|
|
totlen -= mlen;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need to allocate an mbuf.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (off + len >= MINCLSIZE) {
|
|
|
|
n = m_getcl(how, m->m_type, 0);
|
|
|
|
} else {
|
|
|
|
n = m_get(how, m->m_type);
|
|
|
|
}
|
|
|
|
if (n == NULL) {
|
1997-03-27 23:33:07 +03:00
|
|
|
goto out;
|
2006-03-19 13:07:19 +03:00
|
|
|
}
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
n->m_len = uimin(M_TRAILINGSPACE(n), off + len);
|
|
|
|
memset(mtod(n, char *), 0, uimin(n->m_len, off));
|
1997-03-27 23:33:07 +03:00
|
|
|
m->m_next = n;
|
|
|
|
}
|
2004-09-06 13:43:29 +04:00
|
|
|
mp = &m->m_next;
|
1997-03-27 23:33:07 +03:00
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
while (len > 0) {
|
2004-09-06 13:43:29 +04:00
|
|
|
mlen = m->m_len - off;
|
|
|
|
if (mlen != 0 && M_READONLY(m)) {
|
|
|
|
/*
|
2018-04-26 11:31:36 +03:00
|
|
|
* This mbuf is read-only. Allocate a new writable
|
|
|
|
* mbuf and try again.
|
2004-09-06 13:43:29 +04:00
|
|
|
*/
|
2018-04-26 11:31:36 +03:00
|
|
|
char *datap;
|
|
|
|
int eatlen;
|
2004-09-06 13:43:29 +04:00
|
|
|
|
2018-04-26 11:31:36 +03:00
|
|
|
KASSERT((flags & CB_COW) != 0);
|
2004-09-06 13:43:29 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if we're going to write into the middle of
|
|
|
|
* a mbuf, split it first.
|
|
|
|
*/
|
2010-10-28 18:21:50 +04:00
|
|
|
if (off > 0) {
|
2018-04-26 11:13:30 +03:00
|
|
|
n = m_split_internal(m, off, how, false);
|
2004-09-06 13:43:29 +04:00
|
|
|
if (n == NULL)
|
|
|
|
goto enobufs;
|
|
|
|
m->m_next = n;
|
|
|
|
mp = &m->m_next;
|
|
|
|
m = n;
|
|
|
|
off = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX TODO coalesce into the trailingspace of
|
|
|
|
* the previous mbuf when possible.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate a new mbuf. copy packet header if needed.
|
|
|
|
*/
|
2013-10-10 00:15:20 +04:00
|
|
|
n = m_get(how, m->m_type);
|
2004-09-06 13:43:29 +04:00
|
|
|
if (n == NULL)
|
|
|
|
goto enobufs;
|
|
|
|
MCLAIM(n, m->m_owner);
|
|
|
|
if (off == 0 && (m->m_flags & M_PKTHDR) != 0) {
|
2018-12-22 16:11:37 +03:00
|
|
|
m_move_pkthdr(n, m);
|
2004-09-06 13:43:29 +04:00
|
|
|
n->m_len = MHLEN;
|
|
|
|
} else {
|
|
|
|
if (len >= MINCLSIZE)
|
|
|
|
MCLGET(n, M_DONTWAIT);
|
|
|
|
n->m_len =
|
|
|
|
(n->m_flags & M_EXT) ? MCLBYTES : MLEN;
|
|
|
|
}
|
|
|
|
if (n->m_len > len)
|
|
|
|
n->m_len = len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free the region which has been overwritten.
|
|
|
|
* copying data from old mbufs if requested.
|
|
|
|
*/
|
2018-04-26 11:31:36 +03:00
|
|
|
if (flags & CB_PRESERVE)
|
2004-09-06 13:43:29 +04:00
|
|
|
datap = mtod(n, char *);
|
|
|
|
else
|
|
|
|
datap = NULL;
|
|
|
|
eatlen = n->m_len;
|
|
|
|
while (m != NULL && M_READONLY(m) &&
|
|
|
|
n->m_type == m->m_type && eatlen > 0) {
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
mlen = uimin(eatlen, m->m_len);
|
2004-09-06 13:43:29 +04:00
|
|
|
if (datap) {
|
|
|
|
m_copydata(m, 0, mlen, datap);
|
|
|
|
datap += mlen;
|
|
|
|
}
|
|
|
|
m->m_data += mlen;
|
|
|
|
m->m_len -= mlen;
|
|
|
|
eatlen -= mlen;
|
|
|
|
if (m->m_len == 0)
|
|
|
|
*mp = m = m_free(m);
|
|
|
|
}
|
|
|
|
if (eatlen > 0)
|
|
|
|
n->m_len -= eatlen;
|
|
|
|
n->m_next = m;
|
|
|
|
*mp = m = n;
|
|
|
|
continue;
|
|
|
|
}
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
mlen = uimin(mlen, len);
|
2018-04-26 11:31:36 +03:00
|
|
|
if (flags & CB_COPYBACK) {
|
2007-03-04 08:59:00 +03:00
|
|
|
memcpy(mtod(m, char *) + off, cp, (unsigned)mlen);
|
2004-09-06 13:43:29 +04:00
|
|
|
cp += mlen;
|
|
|
|
}
|
1997-03-27 23:33:07 +03:00
|
|
|
len -= mlen;
|
|
|
|
mlen += off;
|
|
|
|
off = 0;
|
|
|
|
totlen += mlen;
|
|
|
|
if (len == 0)
|
|
|
|
break;
|
2006-03-19 13:07:19 +03:00
|
|
|
if (m->m_next == NULL) {
|
|
|
|
goto extend;
|
1997-03-27 23:33:07 +03:00
|
|
|
}
|
2004-09-06 13:43:29 +04:00
|
|
|
mp = &m->m_next;
|
1997-03-27 23:33:07 +03:00
|
|
|
m = m->m_next;
|
|
|
|
}
|
2018-04-27 09:27:36 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) {
|
2018-04-26 11:31:36 +03:00
|
|
|
KASSERT((flags & CB_EXTEND) != 0);
|
1997-03-27 23:33:07 +03:00
|
|
|
m->m_pkthdr.len = totlen;
|
2006-03-15 13:40:30 +03:00
|
|
|
}
|
2004-09-06 13:43:29 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
enobufs:
|
|
|
|
return ENOBUFS;
|
1997-03-27 23:33:07 +03:00
|
|
|
}
|
2003-04-12 06:49:25 +04:00
|
|
|
|
2018-04-27 11:23:18 +03:00
|
|
|
/*
|
Modify m_defrag, so that it never frees the first mbuf of the chain. While
here use the given 'flags' argument, and not M_DONTWAIT.
We have a problem with several drivers: they poll an mbuf chain from their
queues and call m_defrag on them, but m_defrag could update the mbuf
pointer, so the mbuf in the queue is no longer valid. It is not easy to
fix each driver, because doing pop+push will reorder the queue, and we
don't really want that to happen.
This problem was independently spotted by me, Kengo, Masanobu, and other
people too it seems (perhaps PR/53218).
Now m_defrag leaves the first mbuf in place, and compresses the chain
only starting from the second mbuf in the chain.
It is important not to compress the first mbuf with hacks, because the
storage of this first mbuf may be shared with other mbufs.
2018-04-28 11:16:15 +03:00
|
|
|
* Compress the mbuf chain. Return the new mbuf chain on success, NULL on
|
|
|
|
* failure. The first mbuf is preserved, and on success the pointer returned
|
|
|
|
* is the same as the one passed.
|
2018-04-27 11:23:18 +03:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2018-04-28 11:34:45 +03:00
|
|
|
m_defrag(struct mbuf *m, int how)
|
2018-04-27 11:23:18 +03:00
|
|
|
{
|
|
|
|
struct mbuf *m0, *mn, *n;
|
Modify m_defrag, so that it never frees the first mbuf of the chain. While
here use the given 'flags' argument, and not M_DONTWAIT.
We have a problem with several drivers: they poll an mbuf chain from their
queues and call m_defrag on them, but m_defrag could update the mbuf
pointer, so the mbuf in the queue is no longer valid. It is not easy to
fix each driver, because doing pop+push will reorder the queue, and we
don't really want that to happen.
This problem was independently spotted by me, Kengo, Masanobu, and other
people too it seems (perhaps PR/53218).
Now m_defrag leaves the first mbuf in place, and compresses the chain
only starting from the second mbuf in the chain.
It is important not to compress the first mbuf with hacks, because the
storage of this first mbuf may be shared with other mbufs.
2018-04-28 11:16:15 +03:00
|
|
|
int sz;
|
2018-04-27 11:23:18 +03:00
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
2018-04-27 11:23:18 +03:00
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
if (m->m_next == NULL)
|
|
|
|
return m;
|
Modify m_defrag, so that it never frees the first mbuf of the chain. While
here use the given 'flags' argument, and not M_DONTWAIT.
We have a problem with several drivers: they poll an mbuf chain from their
queues and call m_defrag on them, but m_defrag could update the mbuf
pointer, so the mbuf in the queue is no longer valid. It is not easy to
fix each driver, because doing pop+push will reorder the queue, and we
don't really want that to happen.
This problem was independently spotted by me, Kengo, Masanobu, and other
people too it seems (perhaps PR/53218).
Now m_defrag leaves the first mbuf in place, and compresses the chain
only starting from the second mbuf in the chain.
It is important not to compress the first mbuf with hacks, because the
storage of this first mbuf may be shared with other mbufs.
2018-04-28 11:16:15 +03:00
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
m0 = m_get(how, MT_DATA);
|
2018-04-27 11:23:18 +03:00
|
|
|
if (m0 == NULL)
|
|
|
|
return NULL;
|
|
|
|
mn = m0;
|
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
sz = m->m_pkthdr.len - m->m_len;
|
Modify m_defrag, so that it never frees the first mbuf of the chain. While
here use the given 'flags' argument, and not M_DONTWAIT.
We have a problem with several drivers: they poll an mbuf chain from their
queues and call m_defrag on them, but m_defrag could update the mbuf
pointer, so the mbuf in the queue is no longer valid. It is not easy to
fix each driver, because doing pop+push will reorder the queue, and we
don't really want that to happen.
This problem was independently spotted by me, Kengo, Masanobu, and other
people too it seems (perhaps PR/53218).
Now m_defrag leaves the first mbuf in place, and compresses the chain
only starting from the second mbuf in the chain.
It is important not to compress the first mbuf with hacks, because the
storage of this first mbuf may be shared with other mbufs.
2018-04-28 11:16:15 +03:00
|
|
|
KASSERT(sz >= 0);
|
|
|
|
|
2018-04-27 11:23:18 +03:00
|
|
|
do {
|
Modify m_defrag, so that it never frees the first mbuf of the chain. While
here use the given 'flags' argument, and not M_DONTWAIT.
We have a problem with several drivers: they poll an mbuf chain from their
queues and call m_defrag on them, but m_defrag could update the mbuf
pointer, so the mbuf in the queue is no longer valid. It is not easy to
fix each driver, because doing pop+push will reorder the queue, and we
don't really want that to happen.
This problem was independently spotted by me, Kengo, Masanobu, and other
people too it seems (perhaps PR/53218).
Now m_defrag leaves the first mbuf in place, and compresses the chain
only starting from the second mbuf in the chain.
It is important not to compress the first mbuf with hacks, because the
storage of this first mbuf may be shared with other mbufs.
2018-04-28 11:16:15 +03:00
|
|
|
if (sz > MLEN) {
|
2018-04-28 11:34:45 +03:00
|
|
|
MCLGET(mn, how);
|
2018-04-27 11:23:18 +03:00
|
|
|
if ((mn->m_flags & M_EXT) == 0) {
|
|
|
|
m_freem(m0);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mn->m_len = MIN(sz, MCLBYTES);
|
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
m_copydata(m, m->m_pkthdr.len - sz, mn->m_len,
|
2018-04-27 11:23:18 +03:00
|
|
|
mtod(mn, void *));
|
|
|
|
|
|
|
|
sz -= mn->m_len;
|
|
|
|
|
|
|
|
if (sz > 0) {
|
|
|
|
/* need more mbufs */
|
2018-04-28 11:34:45 +03:00
|
|
|
n = m_get(how, MT_DATA);
|
2018-04-27 11:23:18 +03:00
|
|
|
if (n == NULL) {
|
|
|
|
m_freem(m0);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
mn->m_next = n;
|
|
|
|
mn = n;
|
|
|
|
}
|
|
|
|
} while (sz > 0);
|
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
m_freem(m->m_next);
|
|
|
|
m->m_next = m0;
|
2018-04-27 11:23:18 +03:00
|
|
|
|
2018-04-28 11:34:45 +03:00
|
|
|
return m;
|
2018-04-27 11:23:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-05-03 10:25:49 +03:00
|
|
|
m_remove_pkthdr(struct mbuf *m)
|
2018-04-27 11:23:18 +03:00
|
|
|
{
|
|
|
|
KASSERT(m->m_flags & M_PKTHDR);
|
|
|
|
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(m);
|
2018-04-27 11:23:18 +03:00
|
|
|
m->m_flags &= ~M_PKTHDR;
|
|
|
|
memset(&m->m_pkthdr, 0, sizeof(m->m_pkthdr));
|
|
|
|
}
|
|
|
|
|
2018-04-27 10:20:33 +03:00
|
|
|
void
|
|
|
|
m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
|
|
|
|
{
|
2018-05-07 12:57:37 +03:00
|
|
|
KASSERT((to->m_flags & M_EXT) == 0);
|
2018-11-15 13:06:06 +03:00
|
|
|
KASSERT((to->m_flags & M_PKTHDR) == 0 ||
|
|
|
|
SLIST_FIRST(&to->m_pkthdr.tags) == NULL);
|
2018-04-27 10:20:33 +03:00
|
|
|
KASSERT((from->m_flags & M_PKTHDR) != 0);
|
|
|
|
|
|
|
|
to->m_pkthdr = from->m_pkthdr;
|
|
|
|
to->m_flags = from->m_flags & M_COPYFLAGS;
|
2018-05-07 12:57:37 +03:00
|
|
|
to->m_data = to->m_pktdat;
|
|
|
|
|
2018-04-27 10:20:33 +03:00
|
|
|
SLIST_INIT(&to->m_pkthdr.tags);
|
|
|
|
m_tag_copy_chain(to, from);
|
|
|
|
}
|
|
|
|
|
2005-08-18 04:30:58 +04:00
|
|
|
void
|
|
|
|
m_move_pkthdr(struct mbuf *to, struct mbuf *from)
|
|
|
|
{
|
|
|
|
KASSERT((to->m_flags & M_EXT) == 0);
|
2018-11-15 13:06:06 +03:00
|
|
|
KASSERT((to->m_flags & M_PKTHDR) == 0 ||
|
|
|
|
SLIST_FIRST(&to->m_pkthdr.tags) == NULL);
|
2005-08-18 04:30:58 +04:00
|
|
|
KASSERT((from->m_flags & M_PKTHDR) != 0);
|
|
|
|
|
|
|
|
to->m_pkthdr = from->m_pkthdr;
|
|
|
|
to->m_flags = from->m_flags & M_COPYFLAGS;
|
|
|
|
to->m_data = to->m_pktdat;
|
|
|
|
|
|
|
|
from->m_flags &= ~M_PKTHDR;
|
|
|
|
}
|
|
|
|
|
2018-12-22 16:55:56 +03:00
|
|
|
/*
|
|
|
|
* Set the m_data pointer of a newly-allocated mbuf to place an object of the
|
|
|
|
* specified size at the end of the mbuf, longword aligned.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
m_align(struct mbuf *m, int len)
|
|
|
|
{
|
|
|
|
int buflen, adjust;
|
|
|
|
|
|
|
|
KASSERT(len != M_COPYALL);
|
|
|
|
KASSERT(M_LEADINGSPACE(m) == 0);
|
|
|
|
|
2019-09-18 19:18:12 +03:00
|
|
|
buflen = M_BUFSIZE(m);
|
2018-12-22 16:55:56 +03:00
|
|
|
|
|
|
|
KASSERT(len <= buflen);
|
|
|
|
adjust = buflen - len;
|
|
|
|
m->m_data += adjust &~ (sizeof(long)-1);
|
|
|
|
}
|
|
|
|
|
2003-04-12 06:49:25 +04:00
|
|
|
/*
|
|
|
|
* Apply function f to the data in an mbuf chain starting "off" bytes from the
|
|
|
|
* beginning, continuing for "len" bytes.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
m_apply(struct mbuf *m, int off, int len,
|
2007-03-04 08:59:00 +03:00
|
|
|
int (*f)(void *, void *, unsigned int), void *arg)
|
2003-04-12 06:49:25 +04:00
|
|
|
{
|
|
|
|
unsigned int count;
|
|
|
|
int rval;
|
|
|
|
|
2013-11-14 22:54:40 +04:00
|
|
|
KASSERT(len != M_COPYALL);
|
2003-04-12 06:49:25 +04:00
|
|
|
KASSERT(len >= 0);
|
|
|
|
KASSERT(off >= 0);
|
|
|
|
|
|
|
|
while (off > 0) {
|
|
|
|
KASSERT(m != NULL);
|
|
|
|
if (off < m->m_len)
|
|
|
|
break;
|
|
|
|
off -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
while (len > 0) {
|
|
|
|
KASSERT(m != NULL);
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
count = uimin(m->m_len - off, len);
|
2003-04-12 06:49:25 +04:00
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
rval = (*f)(arg, mtod(m, char *) + off, count);
|
2003-04-12 06:49:25 +04:00
|
|
|
if (rval)
|
2018-01-22 18:05:27 +03:00
|
|
|
return rval;
|
2003-04-12 06:49:25 +04:00
|
|
|
|
|
|
|
len -= count;
|
|
|
|
off = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
|
2018-01-22 18:05:27 +03:00
|
|
|
return 0;
|
2003-04-12 06:49:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a pointer to mbuf/offset of location in mbuf chain.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_getptr(struct mbuf *m, int loc, int *off)
|
|
|
|
{
|
|
|
|
|
|
|
|
while (loc >= 0) {
|
|
|
|
/* Normal end of search */
|
|
|
|
if (m->m_len > loc) {
|
2018-01-14 19:59:37 +03:00
|
|
|
*off = loc;
|
2018-01-22 18:05:27 +03:00
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
|
|
|
loc -= m->m_len;
|
|
|
|
|
|
|
|
if (m->m_next == NULL) {
|
|
|
|
if (loc == 0) {
|
|
|
|
/* Point at the end of valid data */
|
|
|
|
*off = m->m_len;
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
return NULL;
|
2003-04-12 06:49:25 +04:00
|
|
|
} else {
|
2018-01-22 18:05:27 +03:00
|
|
|
m = m->m_next;
|
2003-04-12 06:49:25 +04:00
|
|
|
}
|
2018-01-14 19:59:37 +03:00
|
|
|
}
|
2003-04-12 06:49:25 +04:00
|
|
|
|
2018-01-22 18:05:27 +03:00
|
|
|
return NULL;
|
2003-04-12 06:49:25 +04:00
|
|
|
}
|
2006-01-24 16:02:57 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
/*
|
|
|
|
* Release a reference to the mbuf external storage.
|
|
|
|
*
|
|
|
|
* => free the mbuf m itself as well.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
m_ext_free(struct mbuf *m)
|
|
|
|
{
|
|
|
|
const bool embedded = MEXT_ISEMBEDDED(m);
|
|
|
|
bool dofree = true;
|
|
|
|
u_int refcnt;
|
|
|
|
|
|
|
|
KASSERT((m->m_flags & M_EXT) != 0);
|
|
|
|
KASSERT(MEXT_ISEMBEDDED(m->m_ext_ref));
|
|
|
|
KASSERT((m->m_ext_ref->m_flags & M_EXT) != 0);
|
|
|
|
KASSERT((m->m_flags & M_EXT_CLUSTER) ==
|
|
|
|
(m->m_ext_ref->m_flags & M_EXT_CLUSTER));
|
|
|
|
|
|
|
|
if (__predict_false(m->m_type == MT_FREE)) {
|
|
|
|
panic("mbuf %p already freed", m);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__predict_true(m->m_ext.ext_refcnt == 1)) {
|
|
|
|
refcnt = m->m_ext.ext_refcnt = 0;
|
|
|
|
} else {
|
|
|
|
refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (refcnt > 0) {
|
|
|
|
if (embedded) {
|
|
|
|
/*
|
|
|
|
* other mbuf's m_ext_ref still points to us.
|
|
|
|
*/
|
|
|
|
dofree = false;
|
|
|
|
} else {
|
|
|
|
m->m_ext_ref = m;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* dropping the last reference
|
|
|
|
*/
|
|
|
|
if (!embedded) {
|
|
|
|
m->m_ext.ext_refcnt++; /* XXX */
|
|
|
|
m_ext_free(m->m_ext_ref);
|
|
|
|
m->m_ext_ref = m;
|
|
|
|
} else if ((m->m_flags & M_EXT_CLUSTER) != 0) {
|
|
|
|
pool_cache_put_paddr(mcl_cache,
|
|
|
|
m->m_ext.ext_buf, m->m_ext.ext_paddr);
|
|
|
|
} else if (m->m_ext.ext_free) {
|
|
|
|
(*m->m_ext.ext_free)(m,
|
|
|
|
m->m_ext.ext_buf, m->m_ext.ext_size,
|
|
|
|
m->m_ext.ext_arg);
|
|
|
|
/*
|
|
|
|
* 'm' is already freed by the ext_free callback.
|
|
|
|
*/
|
|
|
|
dofree = false;
|
|
|
|
} else {
|
|
|
|
free(m->m_ext.ext_buf, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dofree) {
|
|
|
|
m->m_type = MT_FREE;
|
|
|
|
m->m_data = NULL;
|
|
|
|
pool_cache_put(mb_cache, m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free a single mbuf and associated external storage. Return the
|
|
|
|
* successor, if any.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_free(struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct mbuf *n;
|
|
|
|
|
|
|
|
mowner_revoke(m, 1, m->m_flags);
|
|
|
|
mbstat_type_add(m->m_type, -1);
|
|
|
|
|
|
|
|
if (m->m_flags & M_PKTHDR)
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(m);
|
2018-11-15 12:38:57 +03:00
|
|
|
|
|
|
|
n = m->m_next;
|
|
|
|
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
m_ext_free(m);
|
|
|
|
} else {
|
|
|
|
if (__predict_false(m->m_type == MT_FREE)) {
|
|
|
|
panic("mbuf %p already freed", m);
|
|
|
|
}
|
|
|
|
m->m_type = MT_FREE;
|
|
|
|
m->m_data = NULL;
|
|
|
|
pool_cache_put(mb_cache, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
m_freem(struct mbuf *m)
|
|
|
|
{
|
|
|
|
if (m == NULL)
|
|
|
|
return;
|
|
|
|
do {
|
|
|
|
m = m_free(m);
|
|
|
|
} while (m);
|
|
|
|
}
|
|
|
|
|
2006-01-24 16:02:57 +03:00
|
|
|
#if defined(DDB)
|
|
|
|
void
|
|
|
|
m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
char ch;
|
2007-02-22 09:34:42 +03:00
|
|
|
bool opt_c = false;
|
2018-07-17 08:52:07 +03:00
|
|
|
bool opt_d = false;
|
2018-07-18 10:06:40 +03:00
|
|
|
#if NETHER > 0
|
2018-07-17 08:52:07 +03:00
|
|
|
bool opt_v = false;
|
2018-07-18 10:06:40 +03:00
|
|
|
const struct mbuf *m0 = NULL;
|
|
|
|
#endif
|
2018-07-17 08:52:07 +03:00
|
|
|
int no = 0;
|
2006-01-24 16:02:57 +03:00
|
|
|
char buf[512];
|
|
|
|
|
|
|
|
while ((ch = *(modif++)) != '\0') {
|
|
|
|
switch (ch) {
|
|
|
|
case 'c':
|
2007-02-22 09:34:42 +03:00
|
|
|
opt_c = true;
|
2006-01-24 16:02:57 +03:00
|
|
|
break;
|
2018-07-17 08:52:07 +03:00
|
|
|
case 'd':
|
|
|
|
opt_d = true;
|
|
|
|
break;
|
2018-07-18 10:06:40 +03:00
|
|
|
#if NETHER > 0
|
2018-07-17 08:52:07 +03:00
|
|
|
case 'v':
|
|
|
|
opt_v = true;
|
|
|
|
m0 = m;
|
|
|
|
break;
|
2018-07-18 10:06:40 +03:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
break;
|
2006-01-24 16:02:57 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nextchain:
|
2018-07-17 08:52:07 +03:00
|
|
|
(*pr)("MBUF(%d) %p\n", no, m);
|
2008-12-17 01:35:21 +03:00
|
|
|
snprintb(buf, sizeof(buf), M_FLAGS_BITS, (u_int)m->m_flags);
|
2010-11-24 17:49:18 +03:00
|
|
|
(*pr)(" data=%p, len=%d, type=%d, flags=%s\n",
|
2006-01-24 16:02:57 +03:00
|
|
|
m->m_data, m->m_len, m->m_type, buf);
|
2018-07-17 08:52:07 +03:00
|
|
|
if (opt_d) {
|
|
|
|
int i;
|
|
|
|
unsigned char *p = m->m_data;
|
|
|
|
|
|
|
|
(*pr)(" data:");
|
|
|
|
|
|
|
|
for (i = 0; i < m->m_len; i++) {
|
|
|
|
if (i % 16 == 0)
|
|
|
|
(*pr)("\n");
|
|
|
|
(*pr)(" %02x", p[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
(*pr)("\n");
|
|
|
|
}
|
2006-01-24 16:02:57 +03:00
|
|
|
(*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next,
|
|
|
|
m->m_nextpkt);
|
|
|
|
(*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n",
|
|
|
|
(int)M_LEADINGSPACE(m), (int)M_TRAILINGSPACE(m),
|
|
|
|
(int)M_READONLY(m));
|
|
|
|
if ((m->m_flags & M_PKTHDR) != 0) {
|
2008-12-17 01:35:21 +03:00
|
|
|
snprintb(buf, sizeof(buf), M_CSUM_BITS, m->m_pkthdr.csum_flags);
|
2017-03-31 08:44:05 +03:00
|
|
|
(*pr)(" pktlen=%d, rcvif=%p, csum_flags=%s, csum_data=0x%"
|
2006-01-24 16:02:57 +03:00
|
|
|
PRIx32 ", segsz=%u\n",
|
2016-06-10 16:31:43 +03:00
|
|
|
m->m_pkthdr.len, m_get_rcvif_NOMPSAFE(m),
|
2006-01-24 16:02:57 +03:00
|
|
|
buf, m->m_pkthdr.csum_data, m->m_pkthdr.segsz);
|
|
|
|
}
|
|
|
|
if ((m->m_flags & M_EXT)) {
|
2008-03-24 15:24:37 +03:00
|
|
|
(*pr)(" ext_refcnt=%u, ext_buf=%p, ext_size=%zd, "
|
2006-01-24 16:02:57 +03:00
|
|
|
"ext_free=%p, ext_arg=%p\n",
|
2008-03-24 15:24:37 +03:00
|
|
|
m->m_ext.ext_refcnt,
|
2006-01-24 16:02:57 +03:00
|
|
|
m->m_ext.ext_buf, m->m_ext.ext_size,
|
|
|
|
m->m_ext.ext_free, m->m_ext.ext_arg);
|
|
|
|
}
|
|
|
|
if ((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0) {
|
2006-03-18 21:17:19 +03:00
|
|
|
vaddr_t sva = (vaddr_t)m->m_ext.ext_buf;
|
|
|
|
vaddr_t eva = sva + m->m_ext.ext_size;
|
|
|
|
int n = (round_page(eva) - trunc_page(sva)) >> PAGE_SHIFT;
|
|
|
|
int i;
|
2006-01-24 16:02:57 +03:00
|
|
|
|
|
|
|
(*pr)(" pages:");
|
2006-03-18 21:17:19 +03:00
|
|
|
for (i = 0; i < n; i ++) {
|
|
|
|
(*pr)(" %p", m->m_ext.ext_pgs[i]);
|
2006-01-24 16:02:57 +03:00
|
|
|
}
|
|
|
|
(*pr)("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opt_c) {
|
|
|
|
m = m->m_next;
|
|
|
|
if (m != NULL) {
|
2018-07-17 08:52:07 +03:00
|
|
|
no++;
|
2006-01-24 16:02:57 +03:00
|
|
|
goto nextchain;
|
|
|
|
}
|
|
|
|
}
|
2018-07-17 08:52:07 +03:00
|
|
|
|
2018-07-18 10:06:40 +03:00
|
|
|
#if NETHER > 0
|
|
|
|
if (opt_v && m0)
|
2018-07-17 08:52:07 +03:00
|
|
|
m_examine(m0, AF_ETHER, modif, pr);
|
2018-07-18 10:06:40 +03:00
|
|
|
#endif
|
2006-01-24 16:02:57 +03:00
|
|
|
}
|
|
|
|
#endif /* defined(DDB) */
|
2008-01-17 17:49:28 +03:00
|
|
|
|
|
|
|
#if defined(MBUFTRACE)
|
|
|
|
void
|
|
|
|
mowner_attach(struct mowner *mo)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(mo->mo_counters == NULL);
|
|
|
|
mo->mo_counters = percpu_alloc(sizeof(struct mowner_counter));
|
|
|
|
|
|
|
|
/* XXX lock */
|
|
|
|
LIST_INSERT_HEAD(&mowners, mo, mo_link);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mowner_detach(struct mowner *mo)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(mo->mo_counters != NULL);
|
|
|
|
|
|
|
|
/* XXX lock */
|
|
|
|
LIST_REMOVE(mo, mo_link);
|
|
|
|
|
|
|
|
percpu_free(mo->mo_counters, sizeof(struct mowner_counter));
|
|
|
|
mo->mo_counters = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mowner_init(struct mbuf *m, int type)
|
|
|
|
{
|
|
|
|
struct mowner_counter *mc;
|
|
|
|
struct mowner *mo;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
m->m_owner = mo = &unknown_mowners[type];
|
|
|
|
s = splvm();
|
2008-04-09 09:11:20 +04:00
|
|
|
mc = percpu_getref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++;
|
2008-04-09 09:11:20 +04:00
|
|
|
percpu_putref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mowner_ref(struct mbuf *m, int flags)
|
|
|
|
{
|
|
|
|
struct mowner *mo = m->m_owner;
|
|
|
|
struct mowner_counter *mc;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splvm();
|
2008-04-09 09:11:20 +04:00
|
|
|
mc = percpu_getref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
if ((flags & M_EXT) != 0)
|
|
|
|
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++;
|
2018-04-27 10:53:07 +03:00
|
|
|
if ((flags & M_EXT_CLUSTER) != 0)
|
2008-01-17 17:49:28 +03:00
|
|
|
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++;
|
2008-04-09 09:11:20 +04:00
|
|
|
percpu_putref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mowner_revoke(struct mbuf *m, bool all, int flags)
|
|
|
|
{
|
|
|
|
struct mowner *mo = m->m_owner;
|
|
|
|
struct mowner_counter *mc;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splvm();
|
2008-04-09 09:11:20 +04:00
|
|
|
mc = percpu_getref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
if ((flags & M_EXT) != 0)
|
|
|
|
mc->mc_counter[MOWNER_COUNTER_EXT_RELEASES]++;
|
2018-04-27 10:53:07 +03:00
|
|
|
if ((flags & M_EXT_CLUSTER) != 0)
|
2008-01-17 17:49:28 +03:00
|
|
|
mc->mc_counter[MOWNER_COUNTER_CLUSTER_RELEASES]++;
|
|
|
|
if (all)
|
|
|
|
mc->mc_counter[MOWNER_COUNTER_RELEASES]++;
|
2008-04-09 09:11:20 +04:00
|
|
|
percpu_putref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
splx(s);
|
|
|
|
if (all)
|
|
|
|
m->m_owner = &revoked_mowner;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mowner_claim(struct mbuf *m, struct mowner *mo)
|
|
|
|
{
|
|
|
|
struct mowner_counter *mc;
|
|
|
|
int flags = m->m_flags;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splvm();
|
2008-04-09 09:11:20 +04:00
|
|
|
mc = percpu_getref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++;
|
|
|
|
if ((flags & M_EXT) != 0)
|
|
|
|
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++;
|
2018-04-27 10:53:07 +03:00
|
|
|
if ((flags & M_EXT_CLUSTER) != 0)
|
2008-01-17 17:49:28 +03:00
|
|
|
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++;
|
2008-04-09 09:11:20 +04:00
|
|
|
percpu_putref(mo->mo_counters);
|
2008-01-17 17:49:28 +03:00
|
|
|
splx(s);
|
|
|
|
m->m_owner = mo;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
m_claim(struct mbuf *m, struct mowner *mo)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (m->m_owner == mo || mo == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mowner_revoke(m, true, m->m_flags);
|
|
|
|
mowner_claim(m, mo);
|
|
|
|
}
|
2018-04-27 11:23:18 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
m_claimm(struct mbuf *m, struct mowner *mo)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (; m != NULL; m = m->m_next)
|
|
|
|
m_claim(m, mo);
|
|
|
|
}
|
2008-01-17 17:49:28 +03:00
|
|
|
#endif /* defined(MBUFTRACE) */
|
2016-10-04 17:13:21 +03:00
|
|
|
|
2018-04-15 10:35:49 +03:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/*
|
|
|
|
* Verify that the mbuf chain is not malformed. Used only for diagnostic.
|
|
|
|
* Panics on error.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
m_verify_packet(struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct mbuf *n = m;
|
|
|
|
char *low, *high, *dat;
|
|
|
|
int totlen = 0, len;
|
|
|
|
|
|
|
|
if (__predict_false((m->m_flags & M_PKTHDR) == 0)) {
|
|
|
|
panic("%s: mbuf doesn't have M_PKTHDR", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (n != NULL) {
|
|
|
|
if (__predict_false(n->m_type == MT_FREE)) {
|
|
|
|
panic("%s: mbuf already freed (n = %p)", __func__, n);
|
|
|
|
}
|
2018-04-17 10:58:31 +03:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* This ought to be a rule of the mbuf API. Unfortunately,
|
|
|
|
* many places don't respect that rule.
|
|
|
|
*/
|
2018-04-15 10:35:49 +03:00
|
|
|
if (__predict_false((n != m) && (n->m_flags & M_PKTHDR) != 0)) {
|
|
|
|
panic("%s: M_PKTHDR set on secondary mbuf", __func__);
|
|
|
|
}
|
2018-04-16 22:19:51 +03:00
|
|
|
#endif
|
2018-04-15 10:35:49 +03:00
|
|
|
if (__predict_false(n->m_nextpkt != NULL)) {
|
|
|
|
panic("%s: m_nextpkt not null (m_nextpkt = %p)",
|
|
|
|
__func__, n->m_nextpkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
dat = n->m_data;
|
|
|
|
len = n->m_len;
|
2019-12-06 10:27:06 +03:00
|
|
|
if (__predict_false(len < 0)) {
|
2018-04-15 10:35:49 +03:00
|
|
|
panic("%s: incorrect length (len = %d)", __func__, len);
|
|
|
|
}
|
2019-09-18 19:18:12 +03:00
|
|
|
|
|
|
|
low = M_BUFADDR(n);
|
|
|
|
high = low + M_BUFSIZE(n);
|
2018-04-15 10:35:49 +03:00
|
|
|
if (__predict_false((dat < low) || (dat + len > high))) {
|
|
|
|
panic("%s: m_data not in packet"
|
|
|
|
"(dat = %p, len = %d, low = %p, high = %p)",
|
|
|
|
__func__, dat, len, low, high);
|
|
|
|
}
|
|
|
|
|
|
|
|
totlen += len;
|
|
|
|
n = n->m_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (__predict_false(totlen != m->m_pkthdr.len)) {
|
|
|
|
panic("%s: inconsistent mbuf length (%d != %d)", __func__,
|
|
|
|
totlen, m->m_pkthdr.len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
struct m_tag *
|
|
|
|
m_tag_get(int type, int len, int wait)
|
2018-04-10 18:29:46 +03:00
|
|
|
{
|
2018-11-15 12:38:57 +03:00
|
|
|
struct m_tag *t;
|
2018-04-10 18:29:46 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
if (len < 0)
|
|
|
|
return NULL;
|
|
|
|
t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, wait);
|
|
|
|
if (t == NULL)
|
|
|
|
return NULL;
|
|
|
|
t->m_tag_id = type;
|
|
|
|
t->m_tag_len = len;
|
|
|
|
return t;
|
|
|
|
}
|
2018-04-10 18:29:46 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
void
|
|
|
|
m_tag_free(struct m_tag *t)
|
|
|
|
{
|
|
|
|
free(t, M_PACKET_TAGS);
|
|
|
|
}
|
2018-04-10 18:29:46 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
void
|
|
|
|
m_tag_prepend(struct mbuf *m, struct m_tag *t)
|
|
|
|
{
|
2018-11-15 13:37:26 +03:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
2018-11-15 12:38:57 +03:00
|
|
|
SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
|
|
|
|
}
|
2018-04-10 18:29:46 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
void
|
|
|
|
m_tag_unlink(struct mbuf *m, struct m_tag *t)
|
|
|
|
{
|
2018-11-15 13:37:26 +03:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
2018-11-15 12:38:57 +03:00
|
|
|
SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
|
|
|
|
}
|
2018-04-10 18:29:46 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
void
|
|
|
|
m_tag_delete(struct mbuf *m, struct m_tag *t)
|
|
|
|
{
|
|
|
|
m_tag_unlink(m, t);
|
|
|
|
m_tag_free(t);
|
2018-04-10 18:29:46 +03:00
|
|
|
}
|
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
void
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(struct mbuf *m)
|
2016-10-04 17:13:21 +03:00
|
|
|
{
|
2018-11-15 12:38:57 +03:00
|
|
|
struct m_tag *p, *q;
|
2016-10-04 17:13:21 +03:00
|
|
|
|
2018-11-15 13:37:26 +03:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
|
|
|
|
2018-11-15 13:06:06 +03:00
|
|
|
p = SLIST_FIRST(&m->m_pkthdr.tags);
|
2018-11-15 12:38:57 +03:00
|
|
|
if (p == NULL)
|
|
|
|
return;
|
|
|
|
while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
|
|
|
|
m_tag_delete(m, q);
|
|
|
|
m_tag_delete(m, p);
|
|
|
|
}
|
2016-10-04 17:13:21 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
struct m_tag *
|
2018-11-15 13:23:55 +03:00
|
|
|
m_tag_find(const struct mbuf *m, int type)
|
2018-11-15 12:38:57 +03:00
|
|
|
{
|
|
|
|
struct m_tag *p;
|
|
|
|
|
2018-11-15 13:37:26 +03:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0);
|
|
|
|
|
2018-11-15 13:23:55 +03:00
|
|
|
p = SLIST_FIRST(&m->m_pkthdr.tags);
|
2018-11-15 12:38:57 +03:00
|
|
|
while (p != NULL) {
|
|
|
|
if (p->m_tag_id == type)
|
|
|
|
return p;
|
|
|
|
p = SLIST_NEXT(p, m_tag_link);
|
2018-01-01 15:09:56 +03:00
|
|
|
}
|
2018-11-15 12:38:57 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-01 15:09:56 +03:00
|
|
|
|
2018-11-15 12:38:57 +03:00
|
|
|
struct m_tag *
|
|
|
|
m_tag_copy(struct m_tag *t)
|
|
|
|
{
|
|
|
|
struct m_tag *p;
|
|
|
|
|
|
|
|
p = m_tag_get(t->m_tag_id, t->m_tag_len, M_NOWAIT);
|
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
memcpy(p + 1, t + 1, t->m_tag_len);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy two tag chains. The destination mbuf (to) loses any attached
|
|
|
|
* tags even if the operation fails. This should not be a problem, as
|
|
|
|
* m_tag_copy_chain() is typically called with a newly-allocated
|
|
|
|
* destination mbuf.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
m_tag_copy_chain(struct mbuf *to, struct mbuf *from)
|
|
|
|
{
|
|
|
|
struct m_tag *p, *t, *tprev = NULL;
|
|
|
|
|
2018-11-15 13:37:26 +03:00
|
|
|
KASSERT((from->m_flags & M_PKTHDR) != 0);
|
|
|
|
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(to);
|
2018-11-15 12:38:57 +03:00
|
|
|
SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
|
|
|
|
t = m_tag_copy(p);
|
|
|
|
if (t == NULL) {
|
2018-11-15 13:06:06 +03:00
|
|
|
m_tag_delete_chain(to);
|
2018-11-15 12:38:57 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (tprev == NULL)
|
|
|
|
SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
|
|
|
|
else
|
|
|
|
SLIST_INSERT_AFTER(tprev, t, m_tag_link);
|
|
|
|
tprev = t;
|
|
|
|
}
|
|
|
|
return 1;
|
2016-10-04 17:13:21 +03:00
|
|
|
}
|