make some mbuf related statistics per-cpu.
This commit is contained in:
parent
cc868dad6c
commit
44dac61d31
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uipc_mbuf.c,v 1.123 2007/11/14 14:11:57 yamt Exp $ */
|
||||
/* $NetBSD: uipc_mbuf.c,v 1.124 2008/01/17 14:49:29 yamt Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2001 The NetBSD Foundation, Inc.
|
||||
|
@ -69,13 +69,14 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.123 2007/11/14 14:11:57 yamt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.124 2008/01/17 14:49:29 yamt Exp $");
|
||||
|
||||
#include "opt_mbuftrace.h"
|
||||
#include "opt_ddb.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/cpu.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/malloc.h>
|
||||
#define MBTYPES
|
||||
|
@ -84,6 +85,7 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.123 2007/11/14 14:11:57 yamt Exp $")
|
|||
#include <sys/syslog.h>
|
||||
#include <sys/domain.h>
|
||||
#include <sys/protosw.h>
|
||||
#include <sys/percpu.h>
|
||||
#include <sys/pool.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
@ -126,6 +128,8 @@ static const char mclpool_warnmsg[] =
|
|||
|
||||
MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf");
|
||||
|
||||
static percpu_t *mbstat_percpu;
|
||||
|
||||
#ifdef MBUFTRACE
|
||||
struct mownerhead mowners = LIST_HEAD_INITIALIZER(mowners);
|
||||
struct mowner unknown_mowners[] = {
|
||||
|
@ -171,6 +175,8 @@ mbinit(void)
|
|||
*/
|
||||
pool_cache_sethardlimit(mcl_cache, nmbclusters, mclpool_warnmsg, 60);
|
||||
|
||||
mbstat_percpu = percpu_alloc(sizeof(struct mbstat_cpu));
|
||||
|
||||
/*
|
||||
* Set a low water mark for both mbufs and clusters. This should
|
||||
* help ensure that they can be allocated in a memory starvation
|
||||
|
@ -250,6 +256,30 @@ sysctl_kern_mbuf(SYSCTLFN_ARGS)
|
|||
}
|
||||
|
||||
#ifdef MBUFTRACE
|
||||
static void
|
||||
mowner_conver_to_user_cb(void *v1, void *v2, struct cpu_info *ci)
|
||||
{
|
||||
struct mowner_counter *mc = v1;
|
||||
struct mowner_user *mo_user = v2;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MOWNER_COUNTER_NCOUNTERS; i++) {
|
||||
mo_user->mo_counter[i] += mc->mc_counter[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mowner_convert_to_user(struct mowner *mo, struct mowner_user *mo_user)
|
||||
{
|
||||
|
||||
memset(mo_user, 0, sizeof(*mo_user));
|
||||
KASSERT(sizeof(mo_user->mo_name) == sizeof(mo->mo_name));
|
||||
KASSERT(sizeof(mo_user->mo_descr) == sizeof(mo->mo_descr));
|
||||
memcpy(mo_user->mo_name, mo->mo_name, sizeof(mo->mo_name));
|
||||
memcpy(mo_user->mo_descr, mo->mo_descr, sizeof(mo->mo_descr));
|
||||
percpu_foreach(mo->mo_counters, mowner_conver_to_user_cb, mo_user);
|
||||
}
|
||||
|
||||
static int
|
||||
sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS)
|
||||
{
|
||||
|
@ -263,16 +293,21 @@ sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS)
|
|||
return (EPERM);
|
||||
|
||||
LIST_FOREACH(mo, &mowners, mo_link) {
|
||||
struct mowner_user mo_user;
|
||||
|
||||
mowner_convert_to_user(mo, &mo_user);
|
||||
|
||||
if (oldp != NULL) {
|
||||
if (*oldlenp - len < sizeof(*mo)) {
|
||||
if (*oldlenp - len < sizeof(mo_user)) {
|
||||
error = ENOMEM;
|
||||
break;
|
||||
}
|
||||
error = copyout(mo, (char *)oldp + len, sizeof(*mo));
|
||||
error = copyout(&mo_user, (char *)oldp + len,
|
||||
sizeof(mo_user));
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
len += sizeof(*mo);
|
||||
len += sizeof(mo_user);
|
||||
}
|
||||
|
||||
if (error == 0)
|
||||
|
@ -282,6 +317,40 @@ sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS)
|
|||
}
|
||||
#endif /* MBUFTRACE */
|
||||
|
||||
static void
|
||||
mbstat_conver_to_user_cb(void *v1, void *v2, struct cpu_info *ci)
|
||||
{
|
||||
struct mbstat_cpu *mbsc = v1;
|
||||
struct mbstat *mbs = v2;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __arraycount(mbs->m_mtypes); i++) {
|
||||
mbs->m_mtypes[i] += mbsc->m_mtypes[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mbstat_convert_to_user(struct mbstat *mbs)
|
||||
{
|
||||
|
||||
memset(mbs, 0, sizeof(*mbs));
|
||||
mbs->m_drain = mbstat.m_drain;
|
||||
percpu_foreach(mbstat_percpu, mbstat_conver_to_user_cb, mbs);
|
||||
}
|
||||
|
||||
static int
|
||||
sysctl_kern_mbuf_stats(SYSCTLFN_ARGS)
|
||||
{
|
||||
struct sysctlnode node;
|
||||
struct mbstat mbs;
|
||||
|
||||
mbstat_convert_to_user(&mbs);
|
||||
node = *rnode;
|
||||
node.sysctl_data = &mbs;
|
||||
node.sysctl_size = sizeof(mbs);
|
||||
return sysctl_lookup(SYSCTLFN_CALL(&node));
|
||||
}
|
||||
|
||||
SYSCTL_SETUP(sysctl_kern_mbuf_setup, "sysctl kern.mbuf subtree setup")
|
||||
{
|
||||
|
||||
|
@ -331,7 +400,7 @@ SYSCTL_SETUP(sysctl_kern_mbuf_setup, "sysctl kern.mbuf subtree setup")
|
|||
CTLFLAG_PERMANENT,
|
||||
CTLTYPE_STRUCT, "stats",
|
||||
SYSCTL_DESCR("mbuf allocation statistics"),
|
||||
NULL, 0, &mbstat, sizeof(mbstat),
|
||||
sysctl_kern_mbuf_stats, 0, NULL, 0,
|
||||
CTL_KERN, KERN_MBUF, MBUF_STATS, CTL_EOL);
|
||||
#ifdef MBUFTRACE
|
||||
sysctl_createv(clog, 0, NULL, NULL,
|
||||
|
@ -407,8 +476,20 @@ m_get(int nowait, int type)
|
|||
{
|
||||
struct mbuf *m;
|
||||
|
||||
MGET(m, nowait, type);
|
||||
return (m);
|
||||
m = pool_cache_get(mb_cache,
|
||||
nowait == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0);
|
||||
if (m == NULL)
|
||||
return NULL;
|
||||
|
||||
mbstat_type_add(type, 1);
|
||||
mowner_init(m, type);
|
||||
m->m_type = type;
|
||||
m->m_next = NULL;
|
||||
m->m_nextpkt = NULL;
|
||||
m->m_data = m->m_dat;
|
||||
m->m_flags = 0;
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
struct mbuf *
|
||||
|
@ -416,8 +497,18 @@ m_gethdr(int nowait, int type)
|
|||
{
|
||||
struct mbuf *m;
|
||||
|
||||
MGETHDR(m, nowait, type);
|
||||
return (m);
|
||||
m = m_get(nowait, type);
|
||||
if (m == NULL)
|
||||
return NULL;
|
||||
|
||||
m->m_data = m->m_pktdat;
|
||||
m->m_flags = M_PKTHDR;
|
||||
m->m_pkthdr.rcvif = NULL;
|
||||
m->m_pkthdr.csum_flags = 0;
|
||||
m->m_pkthdr.csum_data = 0;
|
||||
SLIST_INIT(&m->m_pkthdr.tags);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
struct mbuf *
|
||||
|
@ -1484,3 +1575,127 @@ nextchain:
|
|||
}
|
||||
}
|
||||
#endif /* defined(DDB) */
|
||||
|
||||
void
|
||||
mbstat_type_add(int type, int diff)
|
||||
{
|
||||
struct mbstat_cpu *mb;
|
||||
int s;
|
||||
|
||||
s = splvm();
|
||||
mb = percpu_getptr(mbstat_percpu);
|
||||
mb->m_mtypes[type] += diff;
|
||||
splx(s);
|
||||
}
|
||||
|
||||
#if defined(MBUFTRACE)
|
||||
void
|
||||
mowner_attach(struct mowner *mo)
|
||||
{
|
||||
|
||||
KASSERT(mo->mo_counters == NULL);
|
||||
mo->mo_counters = percpu_alloc(sizeof(struct mowner_counter));
|
||||
|
||||
/* XXX lock */
|
||||
LIST_INSERT_HEAD(&mowners, mo, mo_link);
|
||||
}
|
||||
|
||||
void
|
||||
mowner_detach(struct mowner *mo)
|
||||
{
|
||||
|
||||
KASSERT(mo->mo_counters != NULL);
|
||||
|
||||
/* XXX lock */
|
||||
LIST_REMOVE(mo, mo_link);
|
||||
|
||||
percpu_free(mo->mo_counters, sizeof(struct mowner_counter));
|
||||
mo->mo_counters = NULL;
|
||||
}
|
||||
|
||||
static struct mowner_counter *
|
||||
mowner_counter(struct mowner *mo)
|
||||
{
|
||||
|
||||
return percpu_getptr(mo->mo_counters);
|
||||
}
|
||||
|
||||
void
|
||||
mowner_init(struct mbuf *m, int type)
|
||||
{
|
||||
struct mowner_counter *mc;
|
||||
struct mowner *mo;
|
||||
int s;
|
||||
|
||||
m->m_owner = mo = &unknown_mowners[type];
|
||||
s = splvm();
|
||||
mc = mowner_counter(mo);
|
||||
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++;
|
||||
splx(s);
|
||||
}
|
||||
|
||||
void
|
||||
mowner_ref(struct mbuf *m, int flags)
|
||||
{
|
||||
struct mowner *mo = m->m_owner;
|
||||
struct mowner_counter *mc;
|
||||
int s;
|
||||
|
||||
s = splvm();
|
||||
mc = mowner_counter(mo);
|
||||
if ((flags & M_EXT) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++;
|
||||
if ((flags & M_CLUSTER) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++;
|
||||
splx(s);
|
||||
}
|
||||
|
||||
void
|
||||
mowner_revoke(struct mbuf *m, bool all, int flags)
|
||||
{
|
||||
struct mowner *mo = m->m_owner;
|
||||
struct mowner_counter *mc;
|
||||
int s;
|
||||
|
||||
s = splvm();
|
||||
mc = mowner_counter(mo);
|
||||
if ((flags & M_EXT) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_EXT_RELEASES]++;
|
||||
if ((flags & M_CLUSTER) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_CLUSTER_RELEASES]++;
|
||||
if (all)
|
||||
mc->mc_counter[MOWNER_COUNTER_RELEASES]++;
|
||||
splx(s);
|
||||
if (all)
|
||||
m->m_owner = &revoked_mowner;
|
||||
}
|
||||
|
||||
static void
|
||||
mowner_claim(struct mbuf *m, struct mowner *mo)
|
||||
{
|
||||
struct mowner_counter *mc;
|
||||
int flags = m->m_flags;
|
||||
int s;
|
||||
|
||||
s = splvm();
|
||||
mc = mowner_counter(mo);
|
||||
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++;
|
||||
if ((flags & M_EXT) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++;
|
||||
if ((flags & M_CLUSTER) != 0)
|
||||
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++;
|
||||
splx(s);
|
||||
m->m_owner = mo;
|
||||
}
|
||||
|
||||
void
|
||||
m_claim(struct mbuf *m, struct mowner *mo)
|
||||
{
|
||||
|
||||
if (m->m_owner == mo || mo == NULL)
|
||||
return;
|
||||
|
||||
mowner_revoke(m, true, m->m_flags);
|
||||
mowner_claim(m, mo);
|
||||
}
|
||||
#endif /* defined(MBUFTRACE) */
|
||||
|
|
179
sys/sys/mbuf.h
179
sys/sys/mbuf.h
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: mbuf.h,v 1.138 2007/11/10 13:06:23 yamt Exp $ */
|
||||
/* $NetBSD: mbuf.h,v 1.139 2008/01/17 14:49:28 yamt Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1996, 1997, 1999, 2001, 2007 The NetBSD Foundation, Inc.
|
||||
|
@ -80,6 +80,9 @@
|
|||
#endif
|
||||
#include <sys/pool.h>
|
||||
#include <sys/queue.h>
|
||||
#if defined(_KERNEL)
|
||||
#include <sys/percpu_types.h>
|
||||
#endif /* defined(_KERNEL) */
|
||||
|
||||
/* For offsetof() */
|
||||
#if defined(_KERNEL) || defined(_STANDALONE)
|
||||
|
@ -110,16 +113,36 @@ struct mowner {
|
|||
char mo_name[16]; /* owner name (fxp0) */
|
||||
char mo_descr[16]; /* owner description (input) */
|
||||
LIST_ENTRY(mowner) mo_link; /* */
|
||||
u_long mo_claims; /* # of small mbuf claimed */
|
||||
u_long mo_releases; /* # of small mbuf released */
|
||||
u_long mo_cluster_claims; /* # of M_CLUSTER mbuf claimed */
|
||||
u_long mo_cluster_releases; /* # of M_CLUSTER mbuf released */
|
||||
u_long mo_ext_claims; /* # of M_EXT mbuf claimed */
|
||||
u_long mo_ext_releases; /* # of M_EXT mbuf released */
|
||||
struct percpu *mo_counters;
|
||||
};
|
||||
|
||||
#define MOWNER_INIT(x, y) { .mo_name = x, .mo_descr = y }
|
||||
|
||||
enum mowner_counter_index {
|
||||
MOWNER_COUNTER_CLAIMS, /* # of small mbuf claimed */
|
||||
MOWNER_COUNTER_RELEASES, /* # of small mbuf released */
|
||||
MOWNER_COUNTER_CLUSTER_CLAIMS, /* # of M_CLUSTER mbuf claimed */
|
||||
MOWNER_COUNTER_CLUSTER_RELEASES,/* # of M_CLUSTER mbuf released */
|
||||
MOWNER_COUNTER_EXT_CLAIMS, /* # of M_EXT mbuf claimed */
|
||||
MOWNER_COUNTER_EXT_RELEASES, /* # of M_EXT mbuf released */
|
||||
|
||||
MOWNER_COUNTER_NCOUNTERS,
|
||||
};
|
||||
|
||||
#if defined(_KERNEL)
|
||||
struct mowner_counter {
|
||||
u_long mc_counter[MOWNER_COUNTER_NCOUNTERS];
|
||||
};
|
||||
#endif /* defined(_KERNEL) */
|
||||
|
||||
/* userland-exported version of struct mowner */
|
||||
struct mowner_user {
|
||||
char mo_name[16]; /* owner name (fxp0) */
|
||||
char mo_descr[16]; /* owner description (input) */
|
||||
LIST_ENTRY(mowner) mo_link; /* unused padding; for compatibility */
|
||||
u_long mo_counter[MOWNER_COUNTER_NCOUNTERS]; /* counters */
|
||||
};
|
||||
|
||||
/*
|
||||
* Macros for type conversion
|
||||
* mtod(m,t) - convert mbuf pointer to data pointer of correct type
|
||||
|
@ -374,63 +397,28 @@ do { \
|
|||
|
||||
#ifdef MBUFTRACE
|
||||
/*
|
||||
* mbuf allocation tracing macros
|
||||
*
|
||||
* mbuf allocation tracing
|
||||
*/
|
||||
#define _MOWNERINIT(m, type) \
|
||||
((m)->m_owner = &unknown_mowners[(type)], (m)->m_owner->mo_claims++)
|
||||
|
||||
#define _MOWNERREF(m, flags) do { \
|
||||
if ((flags) & M_EXT) \
|
||||
(m)->m_owner->mo_ext_claims++; \
|
||||
if ((flags) & M_CLUSTER) \
|
||||
(m)->m_owner->mo_cluster_claims++; \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
#define MOWNERREF(m, flags) MBUFLOCK( _MOWNERREF((m), (flags)); );
|
||||
|
||||
#define _MOWNERREVOKE(m, all, flags) do { \
|
||||
if ((flags) & M_EXT) \
|
||||
(m)->m_owner->mo_ext_releases++; \
|
||||
if ((flags) & M_CLUSTER) \
|
||||
(m)->m_owner->mo_cluster_releases++; \
|
||||
if (all) { \
|
||||
(m)->m_owner->mo_releases++; \
|
||||
(m)->m_owner = &revoked_mowner; \
|
||||
} \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
#define _MOWNERCLAIM(m, mowner) do { \
|
||||
(m)->m_owner = (mowner); \
|
||||
(mowner)->mo_claims++; \
|
||||
if ((m)->m_flags & M_EXT) \
|
||||
(mowner)->mo_ext_claims++; \
|
||||
if ((m)->m_flags & M_CLUSTER) \
|
||||
(mowner)->mo_cluster_claims++; \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
#define MCLAIM(m, mowner) \
|
||||
MBUFLOCK( \
|
||||
if ((m)->m_owner != (mowner) && (mowner) != NULL) { \
|
||||
_MOWNERREVOKE((m), 1, (m)->m_flags); \
|
||||
_MOWNERCLAIM((m), (mowner)); \
|
||||
} \
|
||||
)
|
||||
|
||||
#define MOWNER_ATTACH(mo) LIST_INSERT_HEAD(&mowners, (mo), mo_link)
|
||||
#define MOWNER_DETACH(mo) LIST_REMOVE((mo), mo_link)
|
||||
void mowner_init(struct mbuf *, int);
|
||||
void mowner_ref(struct mbuf *, int);
|
||||
void m_claim(struct mbuf *, struct mowner *);
|
||||
void mowner_revoke(struct mbuf *, bool, int);
|
||||
void mowner_attach(struct mowner *);
|
||||
void mowner_detach(struct mowner *);
|
||||
void m_claimm(struct mbuf *, struct mowner *);
|
||||
#else
|
||||
#define _MOWNERINIT(m, type) do { } while (/* CONSTCOND */ 0)
|
||||
#define _MOWNERREF(m, flags) do { } while (/* CONSTCOND */ 0)
|
||||
#define MOWNERREF(m, flags) do { } while (/* CONSTCOND */ 0)
|
||||
#define _MOWNERREVOKE(m, all, flags) do { } while (/* CONSTCOND */ 0)
|
||||
#define _MOWNERCLAIM(m, mowner) do { } while (/* CONSTCOND */ 0)
|
||||
#define MCLAIM(m, mowner) do { } while (/* CONSTCOND */ 0)
|
||||
#define MOWNER_ATTACH(mo) do { } while (/* CONSTCOND */ 0)
|
||||
#define MOWNER_DETACH(mo) do { } while (/* CONSTCOND */ 0)
|
||||
#define mowner_init(m, type) do { } while (/* CONSTCOND */ 0)
|
||||
#define mowner_ref(m, flags) do { } while (/* CONSTCOND */ 0)
|
||||
#define mowner_revoke(m, all, flags) do { } while (/* CONSTCOND */ 0)
|
||||
#define m_claim(m, mowner) do { } while (/* CONSTCOND */ 0)
|
||||
#define mowner_attach(mo) do { } while (/* CONSTCOND */ 0)
|
||||
#define mowner_detach(mo) do { } while (/* CONSTCOND */ 0)
|
||||
#define m_claimm(m, mo) do { } while (/* CONSTCOND */ 0)
|
||||
#endif
|
||||
|
||||
#define MCLAIM(m, mo) m_claim((m), (mo))
|
||||
#define MOWNER_ATTACH(mo) mowner_attach(mo)
|
||||
#define MOWNER_DETACH(mo) mowner_detach(mo)
|
||||
|
||||
/*
|
||||
* mbuf allocation/deallocation macros:
|
||||
|
@ -445,39 +433,8 @@ do { \
|
|||
* If 'how' is M_WAIT, these macros (and the corresponding functions)
|
||||
* are guaranteed to return successfully.
|
||||
*/
|
||||
#define MGET(m, how, type) \
|
||||
MBUFLOCK( \
|
||||
(m) = pool_cache_get(mb_cache, \
|
||||
(how) == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0); \
|
||||
if (m) { \
|
||||
mbstat.m_mtypes[type]++; \
|
||||
_MOWNERINIT((m), (type)); \
|
||||
(m)->m_type = (type); \
|
||||
(m)->m_next = (struct mbuf *)NULL; \
|
||||
(m)->m_nextpkt = (struct mbuf *)NULL; \
|
||||
(m)->m_data = (m)->m_dat; \
|
||||
(m)->m_flags = 0; \
|
||||
} \
|
||||
)
|
||||
|
||||
#define MGETHDR(m, how, type) \
|
||||
MBUFLOCK( \
|
||||
(m) = pool_cache_get(mb_cache, \
|
||||
(how) == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : 0); \
|
||||
if (m) { \
|
||||
mbstat.m_mtypes[type]++; \
|
||||
_MOWNERINIT((m), (type)); \
|
||||
(m)->m_type = (type); \
|
||||
(m)->m_next = (struct mbuf *)NULL; \
|
||||
(m)->m_nextpkt = (struct mbuf *)NULL; \
|
||||
(m)->m_data = (m)->m_pktdat; \
|
||||
(m)->m_flags = M_PKTHDR; \
|
||||
(m)->m_pkthdr.rcvif = NULL; \
|
||||
(m)->m_pkthdr.csum_flags = 0; \
|
||||
(m)->m_pkthdr.csum_data = 0; \
|
||||
SLIST_INIT(&(m)->m_pkthdr.tags); \
|
||||
} \
|
||||
)
|
||||
#define MGET(m, how, type) m = m_get((how), (type))
|
||||
#define MGETHDR(m, how, type) m = m_gethdr((how), (type))
|
||||
|
||||
#if defined(_KERNEL)
|
||||
#define _M_
|
||||
|
@ -520,7 +477,7 @@ do { \
|
|||
(n)->m_ext.ext_prevref = (o); \
|
||||
(o)->m_ext.ext_nextref = (n); \
|
||||
(n)->m_ext.ext_nextref->m_ext.ext_prevref = (n); \
|
||||
_MOWNERREF((n), (n)->m_flags); \
|
||||
mowner_ref((n), (n)->m_flags); \
|
||||
MCLREFDEBUGN((n), __FILE__, __LINE__); \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
|
@ -548,15 +505,12 @@ do { \
|
|||
*/
|
||||
#define _MCLGET(m, pool_cache, size, how) \
|
||||
do { \
|
||||
MBUFLOCK( \
|
||||
(m)->m_ext.ext_buf = \
|
||||
pool_cache_get_paddr((pool_cache), \
|
||||
(how) == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : 0, \
|
||||
&(m)->m_ext.ext_paddr); \
|
||||
if ((m)->m_ext.ext_buf != NULL) \
|
||||
_MOWNERREF((m), M_EXT|M_CLUSTER); \
|
||||
); \
|
||||
(m)->m_ext.ext_buf = \
|
||||
pool_cache_get_paddr((pool_cache), \
|
||||
(how) == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : 0, \
|
||||
&(m)->m_ext.ext_paddr); \
|
||||
if ((m)->m_ext.ext_buf != NULL) { \
|
||||
mowner_ref((m), M_EXT|M_CLUSTER); \
|
||||
(m)->m_data = (m)->m_ext.ext_buf; \
|
||||
(m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
|
||||
M_EXT|M_CLUSTER|M_EXT_RW; \
|
||||
|
@ -586,7 +540,7 @@ do { \
|
|||
(m)->m_ext.ext_arg = NULL; \
|
||||
(m)->m_ext.ext_type = mbtypes[(m)->m_type]; \
|
||||
MCLINITREFERENCE(m); \
|
||||
MOWNERREF((m), M_EXT); \
|
||||
mowner_ref((m), M_EXT); \
|
||||
} \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
|
@ -599,13 +553,13 @@ do { \
|
|||
(m)->m_ext.ext_arg = (arg); \
|
||||
(m)->m_ext.ext_type = (type); \
|
||||
MCLINITREFERENCE(m); \
|
||||
MOWNERREF((m), M_EXT); \
|
||||
mowner_ref((m), M_EXT); \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
#define MEXTREMOVE(m) \
|
||||
do { \
|
||||
mowner_revoke((m), 0, (m)->m_flags); \
|
||||
int _ms_ = splvm(); /* MBUFLOCK */ \
|
||||
_MOWNERREVOKE((m), 0, (m)->m_flags); \
|
||||
m_ext_free(m, FALSE); \
|
||||
splx(_ms_); \
|
||||
(m)->m_flags &= ~M_EXTCOPYFLAGS; \
|
||||
|
@ -631,12 +585,12 @@ do { \
|
|||
* Place the successor, if any, in n.
|
||||
*/
|
||||
#define MFREE(m, n) \
|
||||
mowner_revoke((m), 1, (m)->m_flags); \
|
||||
mbstat_type_add((m)->m_type, -1); \
|
||||
MBUFLOCK( \
|
||||
mbstat.m_mtypes[(m)->m_type]--; \
|
||||
if ((m)->m_flags & M_PKTHDR) \
|
||||
m_tag_delete_chain((m), NULL); \
|
||||
(n) = (m)->m_next; \
|
||||
_MOWNERREVOKE((m), 1, m->m_flags); \
|
||||
if ((m)->m_flags & M_EXT) { \
|
||||
m_ext_free(m, TRUE); \
|
||||
} else { \
|
||||
|
@ -757,7 +711,8 @@ do { \
|
|||
/* change mbuf to new type */
|
||||
#define MCHTYPE(m, t) \
|
||||
do { \
|
||||
MBUFLOCK(mbstat.m_mtypes[(m)->m_type]--; mbstat.m_mtypes[t]++;); \
|
||||
mbstat_type_add((m)->m_type, -1); \
|
||||
mbstat_type_add(t, 1); \
|
||||
(m)->m_type = t; \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
|
@ -845,6 +800,10 @@ struct mbstat {
|
|||
u_short m_mtypes[256]; /* type specific mbuf allocations */
|
||||
};
|
||||
|
||||
struct mbstat_cpu {
|
||||
u_int m_mtypes[256]; /* type specific mbuf allocations */
|
||||
};
|
||||
|
||||
/*
|
||||
* Mbuf sysctl variables.
|
||||
*/
|
||||
|
@ -911,9 +870,6 @@ void m_adj(struct mbuf *, int);
|
|||
int m_apply(struct mbuf *, int, int,
|
||||
int (*)(void *, void *, unsigned int), void *);
|
||||
void m_cat(struct mbuf *,struct mbuf *);
|
||||
#ifdef MBUFTRACE
|
||||
void m_claimm(struct mbuf *, struct mowner *);
|
||||
#endif
|
||||
void m_clget(struct mbuf *, int);
|
||||
int m_mballoc(int, int);
|
||||
void m_copyback(struct mbuf *, int, int, const void *);
|
||||
|
@ -930,6 +886,9 @@ void m_move_pkthdr(struct mbuf *to, struct mbuf *from);
|
|||
static __inline u_int m_length(struct mbuf *) __unused;
|
||||
static __inline void m_ext_free(struct mbuf *, bool) __unused;
|
||||
|
||||
/* Statistics */
|
||||
void mbstat_type_add(int, int);
|
||||
|
||||
/* Packet tag routines */
|
||||
struct m_tag *m_tag_get(int, int, int);
|
||||
void m_tag_free(struct m_tag *);
|
||||
|
|
Loading…
Reference in New Issue