2018-10-12 08:41:18 +03:00
|
|
|
/* $NetBSD: ip_reass.c,v 1.21 2018/10/12 05:41:18 maxv Exp $ */
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1988, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)ip_input.c 8.2 (Berkeley) 1/4/94
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IP reassembly.
|
|
|
|
*
|
|
|
|
* Additive-Increase/Multiplicative-Decrease (AIMD) strategy for IP
|
|
|
|
* reassembly queue buffer managment.
|
|
|
|
*
|
|
|
|
* We keep a count of total IP fragments (NB: not fragmented packets),
|
|
|
|
* awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments.
|
|
|
|
* If ip_nfrags exceeds ip_maxfrags the limit, we drop half the total
|
|
|
|
* fragments in reassembly queues. This AIMD policy avoids repeatedly
|
|
|
|
* deleting single packets under heavy fragmentation load (e.g., from lossy
|
|
|
|
* NFS peers).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
2018-10-12 08:41:18 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: ip_reass.c,v 1.21 2018/10/12 05:41:18 maxv Exp $");
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
2010-07-19 18:09:44 +04:00
|
|
|
#include <sys/types.h>
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
2010-10-03 23:44:47 +04:00
|
|
|
#include <sys/mutex.h>
|
2010-07-14 02:16:10 +04:00
|
|
|
#include <sys/pool.h>
|
2010-07-19 18:09:44 +04:00
|
|
|
#include <sys/queue.h>
|
2010-07-14 02:16:10 +04:00
|
|
|
#include <sys/sysctl.h>
|
2010-07-19 18:09:44 +04:00
|
|
|
#include <sys/systm.h>
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
2010-07-19 18:09:44 +04:00
|
|
|
#include <netinet/ip_var.h>
|
2010-07-14 02:16:10 +04:00
|
|
|
#include <netinet/ip_private.h>
|
|
|
|
#include <netinet/in_var.h>
|
|
|
|
|
|
|
|
/*
|
2010-08-25 04:05:14 +04:00
|
|
|
* IP reassembly queue structures. Each fragment being reassembled is
|
|
|
|
* attached to one of these structures. They are timed out after TTL
|
|
|
|
* drops to 0, and may also be reclaimed if memory becomes tight.
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct ipfr_qent {
|
|
|
|
TAILQ_ENTRY(ipfr_qent) ipqe_q;
|
|
|
|
struct ip * ipqe_ip;
|
|
|
|
struct mbuf * ipqe_m;
|
|
|
|
bool ipqe_mff;
|
2018-09-17 09:01:36 +03:00
|
|
|
uint16_t ipqe_off;
|
|
|
|
uint16_t ipqe_len;
|
2010-08-25 04:05:14 +04:00
|
|
|
} ipfr_qent_t;
|
|
|
|
|
2010-11-05 03:21:51 +03:00
|
|
|
TAILQ_HEAD(ipfr_qent_head, ipfr_qent);
|
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
typedef struct ipfr_queue {
|
|
|
|
LIST_ENTRY(ipfr_queue) ipq_q; /* to other reass headers */
|
2010-11-05 03:21:51 +03:00
|
|
|
struct ipfr_qent_head ipq_fragq; /* queue of fragment entries */
|
2010-08-25 04:05:14 +04:00
|
|
|
uint8_t ipq_ttl; /* time for reass q to live */
|
|
|
|
uint8_t ipq_p; /* protocol of this fragment */
|
|
|
|
uint16_t ipq_id; /* sequence id for reassembly */
|
|
|
|
struct in_addr ipq_src;
|
|
|
|
struct in_addr ipq_dst;
|
|
|
|
uint16_t ipq_nfrags; /* frags in this queue entry */
|
When reassembling IPv4/IPv6 packets, ensure each fragment has been subject
to the same IPsec processing. That is to say, that all fragments are ESP,
or AH, or AH+ESP, or none.
The reassembly mechanism can be used both on the wire and inside an IPsec
tunnel, so we need to make sure all fragments of a packet were received
on only one side.
Even though I haven't tried, I believe there are configurations where it
would be possible for an attacker to inject an unencrypted fragment into a
legitimate stream of already-decrypted-and-authenticated fragments.
Typically on IPsec gateways with ESP tunnels, where we can encapsulate
fragments (as opposed to the general case, where we fragment encapsulated
data).
Note, for the record: a funnier thing, under IPv4, would be to send a
zero-sized !MFF fragment at the head of the packet, and manage to trigger
an ICMP error; M_DECRYPTED gets lost by the reassembly, and ICMP will reply
with the packet in clear (not encrypted).
2018-05-15 22:16:38 +03:00
|
|
|
uint8_t ipq_tos; /* TOS of this fragment */
|
|
|
|
int ipq_ipsec; /* IPsec flags */
|
2010-08-25 04:05:14 +04:00
|
|
|
} ipfr_queue_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hash table of IP reassembly queues.
|
2010-07-14 02:16:10 +04:00
|
|
|
*/
|
|
|
|
#define IPREASS_HASH_SHIFT 6
|
|
|
|
#define IPREASS_HASH_SIZE (1 << IPREASS_HASH_SHIFT)
|
|
|
|
#define IPREASS_HASH_MASK (IPREASS_HASH_SIZE - 1)
|
|
|
|
#define IPREASS_HASH(x, y) \
|
|
|
|
(((((x) & 0xf) | ((((x) >> 8) & 0xf) << 4)) ^ (y)) & IPREASS_HASH_MASK)
|
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
static LIST_HEAD(, ipfr_queue) ip_frags[IPREASS_HASH_SIZE];
|
2010-10-03 23:44:47 +04:00
|
|
|
static pool_cache_t ipfren_cache;
|
|
|
|
static kmutex_t ipfr_lock;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
/* Number of packets in reassembly queue and total number of fragments. */
|
|
|
|
static int ip_nfragpackets;
|
|
|
|
static int ip_nfrags;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
/* Limits on packet and fragments. */
|
|
|
|
static int ip_maxfragpackets;
|
|
|
|
static int ip_maxfrags;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-07-19 18:09:44 +04:00
|
|
|
/*
|
2010-08-25 04:05:14 +04:00
|
|
|
* Cached copy of nmbclusters. If nbclusters is different, recalculate
|
|
|
|
* IP parameters derived from nmbclusters.
|
2010-07-19 18:09:44 +04:00
|
|
|
*/
|
2010-08-25 04:05:14 +04:00
|
|
|
static int ip_nmbclusters;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* IP reassembly TTL machinery for multiplicative drop.
|
|
|
|
*/
|
2010-08-25 04:05:14 +04:00
|
|
|
static u_int fragttl_histo[IPFRAGTTL + 1];
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
static struct sysctllog *ip_reass_sysctllog;
|
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
void sysctl_ip_reass_setup(void);
|
|
|
|
static void ip_nmbclusters_changed(void);
|
2010-07-19 18:09:44 +04:00
|
|
|
|
2010-08-25 04:05:14 +04:00
|
|
|
static struct mbuf * ip_reass(ipfr_qent_t *, ipfr_queue_t *, u_int);
|
2010-07-19 18:09:44 +04:00
|
|
|
static u_int ip_reass_ttl_decr(u_int ticks);
|
|
|
|
static void ip_reass_drophalf(void);
|
2010-08-25 04:05:14 +04:00
|
|
|
static void ip_freef(ipfr_queue_t *);
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass_init:
|
|
|
|
*
|
|
|
|
* Initialization of IP reassembly mechanism.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ip_reass_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
ipfren_cache = pool_cache_init(sizeof(ipfr_qent_t), coherency_unit,
|
|
|
|
0, 0, "ipfrenpl", NULL, IPL_NET, NULL, NULL, NULL);
|
2010-10-07 07:15:49 +04:00
|
|
|
mutex_init(&ipfr_lock, MUTEX_DEFAULT, IPL_VM);
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
for (i = 0; i < IPREASS_HASH_SIZE; i++) {
|
2010-08-25 04:05:14 +04:00
|
|
|
LIST_INIT(&ip_frags[i]);
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
|
|
|
ip_maxfragpackets = 200;
|
|
|
|
ip_maxfrags = 0;
|
|
|
|
ip_nmbclusters_changed();
|
|
|
|
|
|
|
|
sysctl_ip_reass_setup();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sysctl_ip_reass_setup(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_NODE, "inet",
|
|
|
|
SYSCTL_DESCR("PF_INET related settings"),
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_NET, PF_INET, CTL_EOL);
|
|
|
|
sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_NODE, "ip",
|
|
|
|
SYSCTL_DESCR("IPv4 related settings"),
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
|
|
|
|
|
|
|
|
sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
|
|
|
|
CTLTYPE_INT, "maxfragpackets",
|
|
|
|
SYSCTL_DESCR("Maximum number of fragments to retain for "
|
|
|
|
"possible reassembly"),
|
|
|
|
NULL, 0, &ip_maxfragpackets, 0,
|
|
|
|
CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFRAGPACKETS, CTL_EOL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CHECK_NMBCLUSTER_PARAMS() \
|
|
|
|
do { \
|
|
|
|
if (__predict_false(ip_nmbclusters != nmbclusters)) \
|
|
|
|
ip_nmbclusters_changed(); \
|
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute IP limits derived from the value of nmbclusters.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ip_nmbclusters_changed(void)
|
|
|
|
{
|
|
|
|
ip_maxfrags = nmbclusters / 4;
|
|
|
|
ip_nmbclusters = nmbclusters;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass:
|
|
|
|
*
|
|
|
|
* Take incoming datagram fragment and try to reassemble it into whole
|
|
|
|
* datagram. If a chain for reassembly of this datagram already exists,
|
|
|
|
* then it is given as 'fp'; otherwise have to make a chain.
|
|
|
|
*/
|
2018-04-11 10:15:12 +03:00
|
|
|
static struct mbuf *
|
2010-08-25 04:05:14 +04:00
|
|
|
ip_reass(ipfr_qent_t *ipqe, ipfr_queue_t *fp, const u_int hash)
|
2010-07-14 02:16:10 +04:00
|
|
|
{
|
2018-09-17 09:01:36 +03:00
|
|
|
struct ip *ip = ipqe->ipqe_ip;
|
2010-11-05 03:21:51 +03:00
|
|
|
const int hlen = ip->ip_hl << 2;
|
2010-07-14 02:16:10 +04:00
|
|
|
struct mbuf *m = ipqe->ipqe_m, *t;
|
When reassembling IPv4/IPv6 packets, ensure each fragment has been subject
to the same IPsec processing. That is to say, that all fragments are ESP,
or AH, or AH+ESP, or none.
The reassembly mechanism can be used both on the wire and inside an IPsec
tunnel, so we need to make sure all fragments of a packet were received
on only one side.
Even though I haven't tried, I believe there are configurations where it
would be possible for an attacker to inject an unencrypted fragment into a
legitimate stream of already-decrypted-and-authenticated fragments.
Typically on IPsec gateways with ESP tunnels, where we can encapsulate
fragments (as opposed to the general case, where we fragment encapsulated
data).
Note, for the record: a funnier thing, under IPv4, would be to send a
zero-sized !MFF fragment at the head of the packet, and manage to trigger
an ICMP error; M_DECRYPTED gets lost by the reassembly, and ICMP will reply
with the packet in clear (not encrypted).
2018-05-15 22:16:38 +03:00
|
|
|
int ipsecflags = m->m_flags & (M_DECRYPTED|M_AUTHIPHDR);
|
2010-08-25 04:05:14 +04:00
|
|
|
ipfr_qent_t *nq, *p, *q;
|
2010-10-03 23:44:47 +04:00
|
|
|
int i, next;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
KASSERT(mutex_owned(&ipfr_lock));
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Presence of header sizes in mbufs would confuse code below.
|
|
|
|
*/
|
|
|
|
m->m_data += hlen;
|
|
|
|
m->m_len -= hlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are about to add a fragment; increment frag count.
|
|
|
|
*/
|
|
|
|
ip_nfrags++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If first fragment to arrive, create a reassembly queue.
|
|
|
|
*/
|
|
|
|
if (fp == NULL) {
|
|
|
|
/*
|
|
|
|
* Enforce upper bound on number of fragmented packets
|
|
|
|
* for which we attempt reassembly: a) if maxfrag is 0,
|
|
|
|
* never accept fragments b) if maxfrag is -1, accept
|
|
|
|
* all fragments without limitation.
|
|
|
|
*/
|
2018-09-17 09:01:36 +03:00
|
|
|
if (ip_maxfragpackets < 0) {
|
|
|
|
/* no limit */
|
|
|
|
} else if (ip_nfragpackets >= ip_maxfragpackets) {
|
2010-07-14 02:16:10 +04:00
|
|
|
goto dropfrag;
|
|
|
|
}
|
2010-08-25 04:05:14 +04:00
|
|
|
fp = malloc(sizeof(ipfr_queue_t), M_FTABLE, M_NOWAIT);
|
2010-07-14 02:16:10 +04:00
|
|
|
if (fp == NULL) {
|
|
|
|
goto dropfrag;
|
|
|
|
}
|
2011-06-27 04:45:50 +04:00
|
|
|
ip_nfragpackets++;
|
2010-11-05 03:21:51 +03:00
|
|
|
TAILQ_INIT(&fp->ipq_fragq);
|
2010-07-14 02:16:10 +04:00
|
|
|
fp->ipq_nfrags = 1;
|
|
|
|
fp->ipq_ttl = IPFRAGTTL;
|
2010-11-05 03:21:51 +03:00
|
|
|
fp->ipq_p = ip->ip_p;
|
|
|
|
fp->ipq_id = ip->ip_id;
|
|
|
|
fp->ipq_tos = ip->ip_tos;
|
When reassembling IPv4/IPv6 packets, ensure each fragment has been subject
to the same IPsec processing. That is to say, that all fragments are ESP,
or AH, or AH+ESP, or none.
The reassembly mechanism can be used both on the wire and inside an IPsec
tunnel, so we need to make sure all fragments of a packet were received
on only one side.
Even though I haven't tried, I believe there are configurations where it
would be possible for an attacker to inject an unencrypted fragment into a
legitimate stream of already-decrypted-and-authenticated fragments.
Typically on IPsec gateways with ESP tunnels, where we can encapsulate
fragments (as opposed to the general case, where we fragment encapsulated
data).
Note, for the record: a funnier thing, under IPv4, would be to send a
zero-sized !MFF fragment at the head of the packet, and manage to trigger
an ICMP error; M_DECRYPTED gets lost by the reassembly, and ICMP will reply
with the packet in clear (not encrypted).
2018-05-15 22:16:38 +03:00
|
|
|
fp->ipq_ipsec = ipsecflags;
|
2010-11-05 03:21:51 +03:00
|
|
|
fp->ipq_src = ip->ip_src;
|
|
|
|
fp->ipq_dst = ip->ip_dst;
|
|
|
|
LIST_INSERT_HEAD(&ip_frags[hash], fp, ipq_q);
|
2010-07-14 02:16:10 +04:00
|
|
|
p = NULL;
|
|
|
|
goto insert;
|
|
|
|
} else {
|
|
|
|
fp->ipq_nfrags++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a segment which begins after this one does.
|
|
|
|
*/
|
2010-11-05 03:21:51 +03:00
|
|
|
TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) {
|
2018-09-17 09:01:36 +03:00
|
|
|
if (q->ipqe_off > ipqe->ipqe_off)
|
2010-07-14 02:16:10 +04:00
|
|
|
break;
|
2010-11-05 03:21:51 +03:00
|
|
|
}
|
|
|
|
if (q != NULL) {
|
|
|
|
p = TAILQ_PREV(q, ipfr_qent_head, ipqe_q);
|
|
|
|
} else {
|
|
|
|
p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head);
|
|
|
|
}
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
2018-09-17 11:11:27 +03:00
|
|
|
* Look at the preceding segment.
|
|
|
|
*
|
|
|
|
* If it provides some of our data already, in part or entirely, trim
|
|
|
|
* us or drop us.
|
|
|
|
*
|
|
|
|
* If a preceding segment exists, and was marked as the last segment,
|
|
|
|
* drop us.
|
2010-07-14 02:16:10 +04:00
|
|
|
*/
|
|
|
|
if (p != NULL) {
|
2018-09-17 09:01:36 +03:00
|
|
|
i = p->ipqe_off + p->ipqe_len - ipqe->ipqe_off;
|
2010-07-14 02:16:10 +04:00
|
|
|
if (i > 0) {
|
2018-09-17 09:01:36 +03:00
|
|
|
if (i >= ipqe->ipqe_len) {
|
2010-07-14 02:16:10 +04:00
|
|
|
goto dropfrag;
|
|
|
|
}
|
|
|
|
m_adj(ipqe->ipqe_m, i);
|
2018-09-17 09:01:36 +03:00
|
|
|
ipqe->ipqe_off = ipqe->ipqe_off + i;
|
|
|
|
ipqe->ipqe_len = ipqe->ipqe_len - i;
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
|
|
|
}
|
2018-09-17 11:11:27 +03:00
|
|
|
if (p != NULL && !p->ipqe_mff) {
|
|
|
|
goto dropfrag;
|
|
|
|
}
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/*
|
2018-09-17 11:11:27 +03:00
|
|
|
* Look at the segments that follow.
|
|
|
|
*
|
|
|
|
* If we cover them, in part or entirely, trim them or dequeue them.
|
|
|
|
*
|
|
|
|
* If a following segment exists, and we are marked as the last
|
|
|
|
* segment, drop us.
|
2010-07-14 02:16:10 +04:00
|
|
|
*/
|
2010-11-05 03:21:51 +03:00
|
|
|
while (q != NULL) {
|
2018-09-17 09:01:36 +03:00
|
|
|
i = ipqe->ipqe_off + ipqe->ipqe_len - q->ipqe_off;
|
|
|
|
if (i <= 0) {
|
2010-11-05 03:21:51 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-09-17 09:01:36 +03:00
|
|
|
if (i < q->ipqe_len) {
|
|
|
|
q->ipqe_off = q->ipqe_off + i;
|
|
|
|
q->ipqe_len = q->ipqe_len - i;
|
2010-07-14 02:16:10 +04:00
|
|
|
m_adj(q->ipqe_m, i);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
nq = TAILQ_NEXT(q, ipqe_q);
|
|
|
|
m_freem(q->ipqe_m);
|
|
|
|
TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
|
2010-10-03 23:44:47 +04:00
|
|
|
pool_cache_put(ipfren_cache, q);
|
2010-07-14 02:16:10 +04:00
|
|
|
fp->ipq_nfrags--;
|
|
|
|
ip_nfrags--;
|
2010-11-05 03:21:51 +03:00
|
|
|
q = nq;
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
2018-09-17 11:11:27 +03:00
|
|
|
if (q != NULL && !ipqe->ipqe_mff) {
|
|
|
|
goto dropfrag;
|
|
|
|
}
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
insert:
|
|
|
|
/*
|
|
|
|
* Stick new segment in its place; check for complete reassembly.
|
|
|
|
*/
|
|
|
|
if (p == NULL) {
|
|
|
|
TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q);
|
|
|
|
}
|
|
|
|
next = 0;
|
2010-11-05 03:21:51 +03:00
|
|
|
TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) {
|
2018-09-17 09:01:36 +03:00
|
|
|
if (q->ipqe_off != next) {
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2018-09-17 09:01:36 +03:00
|
|
|
next += q->ipqe_len;
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
2010-11-05 03:21:51 +03:00
|
|
|
p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head);
|
2010-07-14 02:16:10 +04:00
|
|
|
if (p->ipqe_mff) {
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2010-11-05 03:21:51 +03:00
|
|
|
|
2010-07-14 02:16:10 +04:00
|
|
|
/*
|
2010-10-03 23:44:47 +04:00
|
|
|
* Reassembly is complete. Check for a bogus message size.
|
2010-07-14 02:16:10 +04:00
|
|
|
*/
|
|
|
|
q = TAILQ_FIRST(&fp->ipq_fragq);
|
|
|
|
ip = q->ipqe_ip;
|
|
|
|
if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
|
|
|
|
IP_STATINC(IP_STAT_TOOLONG);
|
|
|
|
ip_freef(fp);
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2010-10-03 23:44:47 +04:00
|
|
|
LIST_REMOVE(fp, ipq_q);
|
|
|
|
ip_nfrags -= fp->ipq_nfrags;
|
|
|
|
ip_nfragpackets--;
|
|
|
|
mutex_exit(&ipfr_lock);
|
|
|
|
|
|
|
|
/* Concatenate all fragments. */
|
2010-07-14 02:16:10 +04:00
|
|
|
m = q->ipqe_m;
|
|
|
|
t = m->m_next;
|
|
|
|
m->m_next = NULL;
|
|
|
|
m_cat(m, t);
|
|
|
|
nq = TAILQ_NEXT(q, ipqe_q);
|
2010-10-03 23:44:47 +04:00
|
|
|
pool_cache_put(ipfren_cache, q);
|
|
|
|
|
2010-07-14 02:16:10 +04:00
|
|
|
for (q = nq; q != NULL; q = nq) {
|
|
|
|
t = q->ipqe_m;
|
|
|
|
nq = TAILQ_NEXT(q, ipqe_q);
|
2010-10-03 23:44:47 +04:00
|
|
|
pool_cache_put(ipfren_cache, q);
|
2018-05-03 10:25:49 +03:00
|
|
|
m_remove_pkthdr(t);
|
2010-07-14 02:16:10 +04:00
|
|
|
m_cat(m, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create header for new packet by modifying header of first
|
|
|
|
* packet. Dequeue and discard fragment reassembly header. Make
|
|
|
|
* header visible.
|
|
|
|
*/
|
2010-07-19 18:09:44 +04:00
|
|
|
ip->ip_len = htons((ip->ip_hl << 2) + next);
|
2018-10-12 08:41:18 +03:00
|
|
|
ip->ip_off = htons(0);
|
2010-07-14 02:16:10 +04:00
|
|
|
ip->ip_src = fp->ipq_src;
|
|
|
|
ip->ip_dst = fp->ipq_dst;
|
2010-10-06 11:39:37 +04:00
|
|
|
free(fp, M_FTABLE);
|
2010-07-19 18:09:44 +04:00
|
|
|
|
2010-07-14 02:16:10 +04:00
|
|
|
m->m_len += (ip->ip_hl << 2);
|
|
|
|
m->m_data -= (ip->ip_hl << 2);
|
2010-10-03 23:44:47 +04:00
|
|
|
|
|
|
|
/* Fix up mbuf. XXX This should be done elsewhere. */
|
Remove M_PKTHDR from secondary mbufs when reassembling packets.
This is a real problem, because I found at least one component that relies
on the fact that only the first mbuf has M_PKTHDR: far from here, in
m_splithdr, we don't update m->m_pkthdr.len if M_PKTHDR is found in a
secondary mbuf. (The initial intention there was to avoid updating
m_pkthdr.len twice, the assumption was that if M_PKTHDR is set then we're
dealing with the first mbuf.) Therefore, when handling fragmented IPsec
packets (in particular IPv6, IPv4 is a bit more complicated), we may end
up with an incorrect m_pkthdr.len after authentication or decryption. In
the case of ESP, this can lead to a remote crash on this instruction:
m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree);
m_pkthdr.len is bigger than the actual mbuf chain.
It seems possible to me to trigger this bug even if you don't have the ESP
key, because the fragmentation part is outside of the encrypted ESP
payload.
So if you MITM the target, and intercept an incoming ESP packet (which you
can't decrypt), you should be able to forge a new specially-crafted,
fragmented packet and stuff the ESP payload (still encrypted, as you
intercepted it) into it. The decryption succeeds and the target crashes.
2018-03-09 14:57:38 +03:00
|
|
|
{
|
|
|
|
KASSERT(m->m_flags & M_PKTHDR);
|
2010-07-14 02:16:10 +04:00
|
|
|
int plen = 0;
|
|
|
|
for (t = m; t; t = t->m_next) {
|
|
|
|
plen += t->m_len;
|
|
|
|
}
|
|
|
|
m->m_pkthdr.len = plen;
|
|
|
|
m->m_pkthdr.csum_flags = 0;
|
|
|
|
}
|
|
|
|
return m;
|
|
|
|
|
|
|
|
dropfrag:
|
|
|
|
if (fp != NULL) {
|
|
|
|
fp->ipq_nfrags--;
|
|
|
|
}
|
|
|
|
ip_nfrags--;
|
|
|
|
IP_STATINC(IP_STAT_FRAGDROPPED);
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
|
|
|
|
|
|
|
pool_cache_put(ipfren_cache, ipqe);
|
2010-07-14 02:16:10 +04:00
|
|
|
m_freem(m);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_freef:
|
|
|
|
*
|
|
|
|
* Free a fragment reassembly header and all associated datagrams.
|
|
|
|
*/
|
2010-07-19 18:09:44 +04:00
|
|
|
static void
|
2010-08-25 04:05:14 +04:00
|
|
|
ip_freef(ipfr_queue_t *fp)
|
2010-07-14 02:16:10 +04:00
|
|
|
{
|
2010-10-03 23:44:47 +04:00
|
|
|
ipfr_qent_t *q;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
KASSERT(mutex_owned(&ipfr_lock));
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
LIST_REMOVE(fp, ipq_q);
|
|
|
|
ip_nfrags -= fp->ipq_nfrags;
|
|
|
|
ip_nfragpackets--;
|
2010-07-14 02:16:10 +04:00
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
while ((q = TAILQ_FIRST(&fp->ipq_fragq)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
|
|
|
|
m_freem(q->ipqe_m);
|
|
|
|
pool_cache_put(ipfren_cache, q);
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
|
|
|
free(fp, M_FTABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass_ttl_decr:
|
|
|
|
*
|
|
|
|
* Decrement TTL of all reasembly queue entries by `ticks'. Count
|
|
|
|
* number of distinct fragments (as opposed to partial, fragmented
|
|
|
|
* datagrams) inthe reassembly queue. While we traverse the entire
|
|
|
|
* reassembly queue, compute and return the median TTL over all
|
|
|
|
* fragments.
|
|
|
|
*/
|
|
|
|
static u_int
|
|
|
|
ip_reass_ttl_decr(u_int ticks)
|
|
|
|
{
|
|
|
|
u_int nfrags, median, dropfraction, keepfraction;
|
2010-08-25 04:05:14 +04:00
|
|
|
ipfr_queue_t *fp, *nfp;
|
2010-07-14 02:16:10 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
nfrags = 0;
|
|
|
|
memset(fragttl_histo, 0, sizeof(fragttl_histo));
|
|
|
|
|
|
|
|
for (i = 0; i < IPREASS_HASH_SIZE; i++) {
|
2010-08-25 04:05:14 +04:00
|
|
|
for (fp = LIST_FIRST(&ip_frags[i]); fp != NULL; fp = nfp) {
|
2010-07-14 02:16:10 +04:00
|
|
|
fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ?
|
|
|
|
0 : fp->ipq_ttl - ticks);
|
|
|
|
nfp = LIST_NEXT(fp, ipq_q);
|
|
|
|
if (fp->ipq_ttl == 0) {
|
|
|
|
IP_STATINC(IP_STAT_FRAGTIMEOUT);
|
|
|
|
ip_freef(fp);
|
|
|
|
} else {
|
|
|
|
nfrags += fp->ipq_nfrags;
|
|
|
|
fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
KASSERT(ip_nfrags == nfrags);
|
|
|
|
|
|
|
|
/* Find median (or other drop fraction) in histogram. */
|
|
|
|
dropfraction = (ip_nfrags / 2);
|
|
|
|
keepfraction = ip_nfrags - dropfraction;
|
|
|
|
for (i = IPFRAGTTL, median = 0; i >= 0; i--) {
|
|
|
|
median += fragttl_histo[i];
|
|
|
|
if (median >= keepfraction)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return TTL of median (or other fraction). */
|
|
|
|
return (u_int)i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ip_reass_drophalf(void)
|
|
|
|
{
|
|
|
|
u_int median_ticks;
|
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
KASSERT(mutex_owned(&ipfr_lock));
|
|
|
|
|
2010-07-14 02:16:10 +04:00
|
|
|
/*
|
|
|
|
* Compute median TTL of all fragments, and count frags
|
|
|
|
* with that TTL or lower (roughly half of all fragments).
|
|
|
|
*/
|
|
|
|
median_ticks = ip_reass_ttl_decr(0);
|
|
|
|
|
|
|
|
/* Drop half. */
|
|
|
|
median_ticks = ip_reass_ttl_decr(median_ticks);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass_drain: drain off all datagram fragments. Do not acquire
|
|
|
|
* softnet_lock as can be called from hardware interrupt context.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ip_reass_drain(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We may be called from a device's interrupt context. If
|
|
|
|
* the ipq is already busy, just bail out now.
|
|
|
|
*/
|
2010-10-03 23:44:47 +04:00
|
|
|
if (mutex_tryenter(&ipfr_lock)) {
|
2010-07-14 02:16:10 +04:00
|
|
|
/*
|
|
|
|
* Drop half the total fragments now. If more mbufs are
|
|
|
|
* needed, we will be called again soon.
|
|
|
|
*/
|
|
|
|
ip_reass_drophalf();
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass_slowtimo:
|
|
|
|
*
|
|
|
|
* If a timer expires on a reassembly queue, discard it.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ip_reass_slowtimo(void)
|
|
|
|
{
|
|
|
|
static u_int dropscanidx = 0;
|
|
|
|
u_int i, median_ttl;
|
|
|
|
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_enter(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
|
|
|
|
/* Age TTL of all fragments by 1 tick .*/
|
|
|
|
median_ttl = ip_reass_ttl_decr(1);
|
|
|
|
|
|
|
|
/* Make sure fragment limit is up-to-date. */
|
|
|
|
CHECK_NMBCLUSTER_PARAMS();
|
|
|
|
|
|
|
|
/* If we have too many fragments, drop the older half. */
|
|
|
|
if (ip_nfrags > ip_maxfrags) {
|
|
|
|
ip_reass_ttl_decr(median_ttl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are over the maximum number of fragmented packets (due to
|
|
|
|
* the limit being lowered), drain off enough to get down to the
|
|
|
|
* new limit. Start draining from the reassembly hashqueue most
|
|
|
|
* recently drained.
|
|
|
|
*/
|
|
|
|
if (ip_maxfragpackets < 0)
|
|
|
|
;
|
|
|
|
else {
|
|
|
|
int wrapped = 0;
|
|
|
|
|
|
|
|
i = dropscanidx;
|
|
|
|
while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) {
|
2010-08-25 04:05:14 +04:00
|
|
|
while (LIST_FIRST(&ip_frags[i]) != NULL) {
|
|
|
|
ip_freef(LIST_FIRST(&ip_frags[i]));
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
|
|
|
if (++i >= IPREASS_HASH_SIZE) {
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Do not scan forever even if fragment counters are
|
|
|
|
* wrong: stop after scanning entire reassembly queue.
|
|
|
|
*/
|
|
|
|
if (i == dropscanidx) {
|
|
|
|
wrapped = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dropscanidx = i;
|
|
|
|
}
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-14 02:16:10 +04:00
|
|
|
}
|
2010-07-19 18:09:44 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ip_reass_packet: generic routine to perform IP reassembly.
|
|
|
|
*
|
|
|
|
* => Passed fragment should have IP_MF flag and/or offset set.
|
|
|
|
* => Fragment should not have other than IP_MF flags set.
|
|
|
|
*
|
2010-11-05 03:21:51 +03:00
|
|
|
* => Returns 0 on success or error otherwise.
|
|
|
|
* => On complete, m0 represents a constructed final packet.
|
2010-07-19 18:09:44 +04:00
|
|
|
*/
|
|
|
|
int
|
2018-07-10 18:46:58 +03:00
|
|
|
ip_reass_packet(struct mbuf **m0)
|
2010-07-19 18:09:44 +04:00
|
|
|
{
|
2018-07-10 18:46:58 +03:00
|
|
|
struct mbuf *m = *m0;
|
|
|
|
struct ip *ip = mtod(m, struct ip *);
|
2010-11-05 03:21:51 +03:00
|
|
|
const int hlen = ip->ip_hl << 2;
|
|
|
|
const int len = ntohs(ip->ip_len);
|
When reassembling IPv4/IPv6 packets, ensure each fragment has been subject
to the same IPsec processing. That is to say, that all fragments are ESP,
or AH, or AH+ESP, or none.
The reassembly mechanism can be used both on the wire and inside an IPsec
tunnel, so we need to make sure all fragments of a packet were received
on only one side.
Even though I haven't tried, I believe there are configurations where it
would be possible for an attacker to inject an unencrypted fragment into a
legitimate stream of already-decrypted-and-authenticated fragments.
Typically on IPsec gateways with ESP tunnels, where we can encapsulate
fragments (as opposed to the general case, where we fragment encapsulated
data).
Note, for the record: a funnier thing, under IPv4, would be to send a
zero-sized !MFF fragment at the head of the packet, and manage to trigger
an ICMP error; M_DECRYPTED gets lost by the reassembly, and ICMP will reply
with the packet in clear (not encrypted).
2018-05-15 22:16:38 +03:00
|
|
|
int ipsecflags = m->m_flags & (M_DECRYPTED|M_AUTHIPHDR);
|
2010-08-25 04:05:14 +04:00
|
|
|
ipfr_queue_t *fp;
|
|
|
|
ipfr_qent_t *ipqe;
|
2010-11-05 03:21:51 +03:00
|
|
|
u_int hash, off, flen;
|
|
|
|
bool mff;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent TCP blind data attacks by not allowing non-initial
|
|
|
|
* fragments to start at less than 68 bytes (minimal fragment
|
|
|
|
* size) and making sure the first fragment is at least 68
|
|
|
|
* bytes.
|
|
|
|
*/
|
|
|
|
off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
|
|
|
|
if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) {
|
|
|
|
IP_STATINC(IP_STAT_BADFRAGS);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:48:02 +03:00
|
|
|
if (off + len > IP_MAXPACKET) {
|
2018-02-08 13:03:52 +03:00
|
|
|
IP_STATINC(IP_STAT_TOOLONG);
|
2018-02-06 18:48:02 +03:00
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-11-05 03:21:51 +03:00
|
|
|
/*
|
|
|
|
* Fragment length and MF flag. Make sure that fragments have
|
|
|
|
* a data length which is non-zero and multiple of 8 bytes.
|
|
|
|
*/
|
|
|
|
flen = ntohs(ip->ip_len) - hlen;
|
|
|
|
mff = (ip->ip_off & htons(IP_MF)) != 0;
|
|
|
|
if (mff && (flen == 0 || (flen & 0x7) != 0)) {
|
|
|
|
IP_STATINC(IP_STAT_BADFRAGS);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-07-19 18:09:44 +04:00
|
|
|
/* Look for queue of fragments of this datagram. */
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_enter(&ipfr_lock);
|
2010-08-25 04:05:14 +04:00
|
|
|
hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
|
|
|
|
LIST_FOREACH(fp, &ip_frags[hash], ipq_q) {
|
|
|
|
if (ip->ip_id != fp->ipq_id)
|
|
|
|
continue;
|
|
|
|
if (!in_hosteq(ip->ip_src, fp->ipq_src))
|
|
|
|
continue;
|
|
|
|
if (!in_hosteq(ip->ip_dst, fp->ipq_dst))
|
|
|
|
continue;
|
|
|
|
if (ip->ip_p != fp->ipq_p)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2010-07-19 18:09:44 +04:00
|
|
|
|
When reassembling IPv4/IPv6 packets, ensure each fragment has been subject
to the same IPsec processing. That is to say, that all fragments are ESP,
or AH, or AH+ESP, or none.
The reassembly mechanism can be used both on the wire and inside an IPsec
tunnel, so we need to make sure all fragments of a packet were received
on only one side.
Even though I haven't tried, I believe there are configurations where it
would be possible for an attacker to inject an unencrypted fragment into a
legitimate stream of already-decrypted-and-authenticated fragments.
Typically on IPsec gateways with ESP tunnels, where we can encapsulate
fragments (as opposed to the general case, where we fragment encapsulated
data).
Note, for the record: a funnier thing, under IPv4, would be to send a
zero-sized !MFF fragment at the head of the packet, and manage to trigger
an ICMP error; M_DECRYPTED gets lost by the reassembly, and ICMP will reply
with the packet in clear (not encrypted).
2018-05-15 22:16:38 +03:00
|
|
|
if (fp) {
|
|
|
|
/* All fragments must have the same IPsec flags. */
|
|
|
|
if (fp->ipq_ipsec != ipsecflags) {
|
|
|
|
IP_STATINC(IP_STAT_BADFRAGS);
|
|
|
|
mutex_exit(&ipfr_lock);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure that TOS matches previous fragments. */
|
|
|
|
if (fp->ipq_tos != ip->ip_tos) {
|
|
|
|
IP_STATINC(IP_STAT_BADFRAGS);
|
|
|
|
mutex_exit(&ipfr_lock);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2010-07-19 18:09:44 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create new entry and attempt to reassembly.
|
|
|
|
*/
|
|
|
|
IP_STATINC(IP_STAT_FRAGMENTS);
|
2010-10-03 23:44:47 +04:00
|
|
|
ipqe = pool_cache_get(ipfren_cache, PR_NOWAIT);
|
2010-07-19 18:09:44 +04:00
|
|
|
if (ipqe == NULL) {
|
|
|
|
IP_STATINC(IP_STAT_RCVMEMDROP);
|
2010-10-03 23:44:47 +04:00
|
|
|
mutex_exit(&ipfr_lock);
|
2010-07-19 18:09:44 +04:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
ipqe->ipqe_mff = mff;
|
|
|
|
ipqe->ipqe_m = m;
|
|
|
|
ipqe->ipqe_ip = ip;
|
2018-09-17 09:01:36 +03:00
|
|
|
ipqe->ipqe_off = off;
|
|
|
|
ipqe->ipqe_len = flen;
|
2010-07-19 18:09:44 +04:00
|
|
|
|
2010-11-05 03:21:51 +03:00
|
|
|
*m0 = ip_reass(ipqe, fp, hash);
|
|
|
|
if (*m0) {
|
|
|
|
/* Note that finally reassembled. */
|
2010-07-19 18:09:44 +04:00
|
|
|
IP_STATINC(IP_STAT_REASSEMBLED);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|