freebsd11_network: Additions and modifications for emulex_oce.

This change turned out to be bigger than expected...
This commit is contained in:
Augustin Cavalier 2018-10-04 19:34:50 -04:00
parent 6ab878c0e5
commit d20b9323d8
22 changed files with 782 additions and 32 deletions

View File

@ -32,11 +32,12 @@ KernelStaticLibrary libfreebsd11_network.a :
fbsd_mii.c
fbsd_mii_bitbang.c
fbsd_mii_physubr.c
fbsd_subr_bufring.c
fbsd_subr_sbuf.c
fbsd_time.c
firmware.c
if.c
libkern.c
libkern.cpp
mbuf.c
mii.c
mutex.c
@ -44,7 +45,7 @@ KernelStaticLibrary libfreebsd11_network.a :
smp.c
subr_autoconf.cpp
synch.c
systm.c
systm.cpp
taskqueue.c
unit.cpp
;

View File

@ -46,9 +46,6 @@ extern "C" {
#endif
#define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1))
struct internal_intr {
device_t dev;
driver_filter_t filter;
@ -85,7 +82,7 @@ map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection,
phys_addr_t physicalAddr = _phy - offset;
area_id area;
size = ROUNDUP(size + offset, B_PAGE_SIZE);
size = roundup(size + offset, B_PAGE_SIZE);
area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS,
protection, virtualAddr);
if (area < B_OK)
@ -265,6 +262,13 @@ rman_get_rid(struct resource *res)
}
void*
rman_get_virtual(struct resource *res)
{
return NULL;
}
// #pragma mark - Interrupt handling

View File

@ -15,6 +15,9 @@
#define atomic_subtract_int(ptr, value) \
atomic_add((int32 *)(ptr), -value)
#define atomic_load_int(ptr) \
atomic_get((int32 *)ptr)
#define atomic_set_acq_32(ptr, value) \
atomic_set_int(ptr, value)
@ -27,6 +30,12 @@
#define atomic_cmpset_int(ptr, old, new) \
(atomic_test_and_set((int32 *)(ptr), new, old) == old)
#define atomic_add_32 atomic_add_int
#define atomic_subtract_32 atomic_subtract_int
#define atomic_load_acq_32 atomic_load_int
#define atomic_store_rel_int atomic_set_acq_32
#define atomic_cmpset_acq_int atomic_cmpset_int
#define mb() memory_full_barrier()
#define wmb() memory_write_barrier_inline()

View File

@ -0,0 +1,15 @@
/*
* Copyright 2018, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT license.
*/
#ifndef _FBSD_COMPAT_MACHINE_CPU_H_
#define _FBSD_COMPAT_MACHINE_CPU_H_
#include <arch_cpu_defs.h>
#define cpu_spinwait() SPINLOCK_PAUSE()
#endif /* _FBSD_COMPAT_MACHINE_CPU_H_ */

View File

@ -382,6 +382,7 @@ extern int ether_output_frame(struct ifnet *, struct mbuf *);
extern char *ether_sprintf(const u_int8_t *);
void ether_vlan_mtap(struct bpf_if *, struct mbuf *,
void *, u_int);
struct mbuf *ether_vlanencap(struct mbuf *, uint16_t);
#else /* _KERNEL */

View File

@ -72,7 +72,7 @@ struct if_data {
uint16_t ifi_datalen; /* length of this data struct */
uint32_t ifi_mtu; /* maximum transmission unit */
uint32_t ifi_metric; /* routing metric (external only) */
uint32_t ifi_baudrate; /* linespeed */
uint64_t ifi_baudrate; /* linespeed */
/* volatile statistics */
uint64_t ifi_ipackets; /* packets received on interface */
uint64_t ifi_ierrors; /* input errors on interface */

View File

@ -84,6 +84,7 @@ struct route;
#include <sys/lock.h> /* XXX */
#include <sys/mutex.h> /* XXX */
#include <sys/event.h> /* XXX */
#include <sys/buf_ring.h>
#include <sys/_task.h>
#define IF_DUNIT_NONE -1
@ -623,6 +624,167 @@ do { \
IFQ_PURGE(ifq); \
} while (0)
static __inline int
drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
{
int error = 0;
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_ENQUEUE(&ifp->if_snd, m, error);
if (error)
if_inc_counter((ifp), IFCOUNTER_OQDROPS, 1);
return (error);
}
#endif
error = buf_ring_enqueue(br, m);
if (error)
m_freem(m);
return (error);
}
static __inline void
drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new)
{
/*
* The top of the list needs to be swapped
* for this one.
*/
#ifdef ALTQ
if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
/*
* Peek in altq case dequeued it
* so put it back.
*/
IFQ_DRV_PREPEND(&ifp->if_snd, new);
return;
}
#endif
buf_ring_putback_sc(br, new);
}
static __inline struct mbuf *
drbr_peek(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
struct mbuf *m;
if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
/*
* Pull it off like a dequeue
* since drbr_advance() does nothing
* for altq and drbr_putback() will
* use the old prepend function.
*/
IFQ_DEQUEUE(&ifp->if_snd, m);
return (m);
}
#endif
return(buf_ring_peek_clear_sc(br));
}
static __inline void
drbr_flush(struct ifnet *ifp, struct buf_ring *br)
{
struct mbuf *m;
#ifdef ALTQ
if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
IFQ_PURGE(&ifp->if_snd);
#endif
while ((m = buf_ring_dequeue_sc(br)) != NULL)
m_freem(m);
}
static __inline void
drbr_free(struct buf_ring *br, struct malloc_type *type)
{
drbr_flush(NULL, br);
buf_ring_free(br, type);
}
static __inline struct mbuf *
drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
struct mbuf *m;
if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_DEQUEUE(&ifp->if_snd, m);
return (m);
}
#endif
return (buf_ring_dequeue_sc(br));
}
static __inline void
drbr_advance(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
/* Nothing to do here since peek dequeues in altq case */
if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
return;
#endif
return (buf_ring_advance_sc(br));
}
static __inline struct mbuf *
drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
int (*func) (struct mbuf *, void *), void *arg)
{
struct mbuf *m;
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
IFQ_LOCK(&ifp->if_snd);
IFQ_POLL_NOLOCK(&ifp->if_snd, m);
if (m != NULL && func(m, arg) == 0) {
IFQ_UNLOCK(&ifp->if_snd);
return (NULL);
}
IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
IFQ_UNLOCK(&ifp->if_snd);
return (m);
}
#endif
m = buf_ring_peek(br);
if (m == NULL || func(m, arg) == 0)
return (NULL);
return (buf_ring_dequeue_sc(br));
}
static __inline int
drbr_empty(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (IFQ_IS_EMPTY(&ifp->if_snd));
#endif
return (buf_ring_empty(br));
}
static __inline int
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
#endif
return (!buf_ring_empty(br));
}
static __inline int
drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (ifp->if_snd.ifq_len);
#endif
return (buf_ring_count(br));
}
/*
* 72 was chosen below because it is the size of a TCP/IP
* header (40) + the minimum mss (32).

View File

@ -32,6 +32,8 @@
#ifndef _FBSD_COMPAT_NET_IF_VLAN_VAR_H_
#define _FBSD_COMPAT_NET_IF_VLAN_VAR_H_ 1
#include <net/if.h>
struct ether_vlan_header {
u_char evl_dhost[ETHER_ADDR_LEN];
u_char evl_shost[ETHER_ADDR_LEN];

View File

@ -0,0 +1,361 @@
/*-
* Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _SYS_BUF_RING_H_
#define _SYS_BUF_RING_H_
#include <machine/cpu.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
#define DEBUG_BUFRING 1
#endif
#ifdef DEBUG_BUFRING
#include <sys/lock.h>
#include <sys/mutex.h>
#endif
struct buf_ring {
volatile uint32_t br_prod_head;
volatile uint32_t br_prod_tail;
int br_prod_size;
int br_prod_mask;
uint64_t br_drops;
volatile uint32_t br_cons_head;
volatile uint32_t br_cons_tail;
int br_cons_size;
int br_cons_mask;
#ifdef DEBUG_BUFRING
struct mtx *br_lock;
#endif
void *br_ring[0];
};
/*
* multi-producer safe lock-free ring buffer enqueue
*
*/
static __inline int
buf_ring_enqueue(struct buf_ring *br, void *buf)
{
uint32_t prod_head, prod_next, cons_tail;
#ifdef DEBUG_BUFRING
int i;
for (i = br->br_cons_head; i != br->br_prod_head;
i = ((i + 1) & br->br_cons_mask))
if(br->br_ring[i] == buf)
panic("buf=%p already enqueue at %d prod=%d cons=%d",
buf, i, br->br_prod_tail, br->br_cons_tail);
#endif
critical_enter();
do {
prod_head = br->br_prod_head;
prod_next = (prod_head + 1) & br->br_prod_mask;
cons_tail = br->br_cons_tail;
if (prod_next == cons_tail) {
rmb();
if (prod_head == br->br_prod_head &&
cons_tail == br->br_cons_tail) {
br->br_drops++;
critical_exit();
return (ENOBUFS);
}
continue;
}
} while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
#ifdef DEBUG_BUFRING
if (br->br_ring[prod_head] != NULL)
panic("dangling value in enqueue");
#endif
br->br_ring[prod_head] = buf;
/*
* If there are other enqueues in progress
* that preceded us, we need to wait for them
* to complete
*/
while (br->br_prod_tail != prod_head)
cpu_spinwait();
atomic_store_rel_int(&br->br_prod_tail, prod_next);
critical_exit();
return (0);
}
/*
* multi-consumer safe dequeue
*
*/
static __inline void *
buf_ring_dequeue_mc(struct buf_ring *br)
{
uint32_t cons_head, cons_next;
void *buf;
critical_enter();
do {
cons_head = br->br_cons_head;
cons_next = (cons_head + 1) & br->br_cons_mask;
if (cons_head == br->br_prod_tail) {
critical_exit();
return (NULL);
}
} while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
buf = br->br_ring[cons_head];
#ifdef DEBUG_BUFRING
br->br_ring[cons_head] = NULL;
#endif
/*
* If there are other dequeues in progress
* that preceded us, we need to wait for them
* to complete
*/
while (br->br_cons_tail != cons_head)
cpu_spinwait();
atomic_store_rel_int(&br->br_cons_tail, cons_next);
critical_exit();
return (buf);
}
/*
* single-consumer dequeue
* use where dequeue is protected by a lock
* e.g. a network driver's tx queue lock
*/
static __inline void *
buf_ring_dequeue_sc(struct buf_ring *br)
{
uint32_t cons_head, cons_next;
#ifdef PREFETCH_DEFINED
uint32_t cons_next_next;
#endif
uint32_t prod_tail;
void *buf;
/*
* This is a workaround to allow using buf_ring on ARM and ARM64.
* ARM64TODO: Fix buf_ring in a generic way.
* REMARKS: It is suspected that br_cons_head does not require
* load_acq operation, but this change was extensively tested
* and confirmed it's working. To be reviewed once again in
* FreeBSD-12.
*
* Preventing following situation:
* Core(0) - buf_ring_enqueue() Core(1) - buf_ring_dequeue_sc()
* ----------------------------------------- ----------------------------------------------
*
* cons_head = br->br_cons_head;
* atomic_cmpset_acq_32(&br->br_prod_head, ...));
* buf = br->br_ring[cons_head]; <see <1>>
* br->br_ring[prod_head] = buf;
* atomic_store_rel_32(&br->br_prod_tail, ...);
* prod_tail = br->br_prod_tail;
* if (cons_head == prod_tail)
* return (NULL);
* <condition is false and code uses invalid(old) buf>`
*
* <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
*/
#if defined(__arm__) || defined(__aarch64__)
cons_head = atomic_load_acq_32(&br->br_cons_head);
#else
cons_head = br->br_cons_head;
#endif
prod_tail = atomic_load_acq_32(&br->br_prod_tail);
cons_next = (cons_head + 1) & br->br_cons_mask;
#ifdef PREFETCH_DEFINED
cons_next_next = (cons_head + 2) & br->br_cons_mask;
#endif
if (cons_head == prod_tail)
return (NULL);
#ifdef PREFETCH_DEFINED
if (cons_next != prod_tail) {
prefetch(br->br_ring[cons_next]);
if (cons_next_next != prod_tail)
prefetch(br->br_ring[cons_next_next]);
}
#endif
br->br_cons_head = cons_next;
buf = br->br_ring[cons_head];
#ifdef DEBUG_BUFRING
br->br_ring[cons_head] = NULL;
if (!mtx_owned(br->br_lock))
panic("lock not held on single consumer dequeue");
if (br->br_cons_tail != cons_head)
panic("inconsistent list cons_tail=%d cons_head=%d",
br->br_cons_tail, cons_head);
#endif
br->br_cons_tail = cons_next;
return (buf);
}
/*
* single-consumer advance after a peek
* use where it is protected by a lock
* e.g. a network driver's tx queue lock
*/
static __inline void
buf_ring_advance_sc(struct buf_ring *br)
{
uint32_t cons_head, cons_next;
uint32_t prod_tail;
cons_head = br->br_cons_head;
prod_tail = br->br_prod_tail;
cons_next = (cons_head + 1) & br->br_cons_mask;
if (cons_head == prod_tail)
return;
br->br_cons_head = cons_next;
#ifdef DEBUG_BUFRING
br->br_ring[cons_head] = NULL;
#endif
br->br_cons_tail = cons_next;
}
/*
* Used to return a buffer (most likely already there)
* to the top od the ring. The caller should *not*
* have used any dequeue to pull it out of the ring
* but instead should have used the peek() function.
* This is normally used where the transmit queue
* of a driver is full, and an mubf must be returned.
* Most likely whats in the ring-buffer is what
* is being put back (since it was not removed), but
* sometimes the lower transmit function may have
* done a pullup or other function that will have
* changed it. As an optimzation we always put it
* back (since jhb says the store is probably cheaper),
* if we have to do a multi-queue version we will need
* the compare and an atomic.
*/
static __inline void
buf_ring_putback_sc(struct buf_ring *br, void *new)
{
KASSERT(br->br_cons_head != br->br_prod_tail,
("Buf-Ring has none in putback")) ;
br->br_ring[br->br_cons_head] = new;
}
/*
* return a pointer to the first entry in the ring
* without modifying it, or NULL if the ring is empty
* race-prone if not protected by a lock
*/
static __inline void *
buf_ring_peek(struct buf_ring *br)
{
#ifdef DEBUG_BUFRING
if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
panic("lock not held on single consumer dequeue");
#endif
/*
* I believe it is safe to not have a memory barrier
* here because we control cons and tail is worst case
* a lagging indicator so we worst case we might
* return NULL immediately after a buffer has been enqueued
*/
if (br->br_cons_head == br->br_prod_tail)
return (NULL);
return (br->br_ring[br->br_cons_head]);
}
static __inline void *
buf_ring_peek_clear_sc(struct buf_ring *br)
{
#ifdef DEBUG_BUFRING
void *ret;
if (!mtx_owned(br->br_lock))
panic("lock not held on single consumer dequeue");
#endif
/*
* I believe it is safe to not have a memory barrier
* here because we control cons and tail is worst case
* a lagging indicator so we worst case we might
* return NULL immediately after a buffer has been enqueued
*/
if (br->br_cons_head == br->br_prod_tail)
return (NULL);
#ifdef DEBUG_BUFRING
/*
* Single consumer, i.e. cons_head will not move while we are
* running, so atomic_swap_ptr() is not necessary here.
*/
ret = br->br_ring[br->br_cons_head];
br->br_ring[br->br_cons_head] = NULL;
return (ret);
#else
return (br->br_ring[br->br_cons_head]);
#endif
}
static __inline int
buf_ring_full(struct buf_ring *br)
{
return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
}
static __inline int
buf_ring_empty(struct buf_ring *br)
{
return (br->br_cons_head == br->br_prod_tail);
}
static __inline int
buf_ring_count(struct buf_ring *br)
{
return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
& br->br_prod_mask);
}
struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
struct mtx *);
void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
#endif

View File

@ -12,6 +12,7 @@
extern int random(void);
uint32_t arc4random(void);
u_int read_random(void *, u_int);
static __inline int imax(int a, int b) { return (a > b ? a : b); }
static __inline int imin(int a, int b) { return (a < b ? a : b); }

View File

@ -25,7 +25,11 @@
#define M_MAGIC 877983977 /* time when first defined :-) */
#define M_DEVBUF
#define M_DEVBUF 0
struct malloc_type {
};
void *_kernel_malloc(size_t size, int flags);

View File

@ -79,6 +79,54 @@
#define M_ASSERTPKTHDR(m) KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
("%s: no mbuf packet header!", __func__))
/*
* Network interface cards are able to hash protocol fields (such as IPv4
* addresses and TCP port numbers) classify packets into flows. These flows
* can then be used to maintain ordering while delivering packets to the OS
* via parallel input queues, as well as to provide a stateless affinity
* model. NIC drivers can pass up the hash via m->m_pkthdr.flowid, and set
* m_flag fields to indicate how the hash should be interpreted by the
* network stack.
*
* Most NICs support RSS, which provides ordering and explicit affinity, and
* use the hash m_flag bits to indicate what header fields were covered by
* the hash. M_HASHTYPE_OPAQUE and M_HASHTYPE_OPAQUE_HASH can be set by non-
* RSS cards or configurations that provide an opaque flow identifier, allowing
* for ordering and distribution without explicit affinity. Additionally,
* M_HASHTYPE_OPAQUE_HASH indicates that the flow identifier has hash
* properties.
*/
#define M_HASHTYPE_HASHPROP 0x80 /* has hash properties */
#define M_HASHTYPE_HASH(t) (M_HASHTYPE_HASHPROP | (t))
/* Microsoft RSS standard hash types */
#define M_HASHTYPE_NONE 0
#define M_HASHTYPE_RSS_IPV4 M_HASHTYPE_HASH(1) /* IPv4 2-tuple */
#define M_HASHTYPE_RSS_TCP_IPV4 M_HASHTYPE_HASH(2) /* TCPv4 4-tuple */
#define M_HASHTYPE_RSS_IPV6 M_HASHTYPE_HASH(3) /* IPv6 2-tuple */
#define M_HASHTYPE_RSS_TCP_IPV6 M_HASHTYPE_HASH(4) /* TCPv6 4-tuple */
#define M_HASHTYPE_RSS_IPV6_EX M_HASHTYPE_HASH(5) /* IPv6 2-tuple +
* ext hdrs */
#define M_HASHTYPE_RSS_TCP_IPV6_EX M_HASHTYPE_HASH(6) /* TCPv6 4-tuple +
* ext hdrs */
/* Non-standard RSS hash types */
#define M_HASHTYPE_RSS_UDP_IPV4 M_HASHTYPE_HASH(7) /* IPv4 UDP 4-tuple*/
#define M_HASHTYPE_RSS_UDP_IPV4_EX M_HASHTYPE_HASH(8) /* IPv4 UDP 4-tuple +
* ext hdrs */
#define M_HASHTYPE_RSS_UDP_IPV6 M_HASHTYPE_HASH(9) /* IPv6 UDP 4-tuple*/
#define M_HASHTYPE_RSS_UDP_IPV6_EX M_HASHTYPE_HASH(10)/* IPv6 UDP 4-tuple +
* ext hdrs */
#define M_HASHTYPE_OPAQUE 63 /* ordering, not affinity */
#define M_HASHTYPE_OPAQUE_HASH M_HASHTYPE_HASH(M_HASHTYPE_OPAQUE)
/* ordering+hash, not affinity*/
#define M_HASHTYPE_CLEAR(m) ((m)->m_pkthdr.rsstype = 0)
#define M_HASHTYPE_GET(m) ((m)->m_pkthdr.rsstype)
#define M_HASHTYPE_SET(m, v) ((m)->m_pkthdr.rsstype = (v))
#define M_HASHTYPE_TEST(m, v) (M_HASHTYPE_GET(m) == (v))
#define M_HASHTYPE_ISHASH(m) (M_HASHTYPE_GET(m) & M_HASHTYPE_HASHPROP)
#define MBUF_CHECKSLEEP(how) do { } while (0)
#define MTAG_PERSISTENT 0x800
@ -122,12 +170,16 @@ extern int max_datalen; // MHLEN - max_hdr
struct pkthdr {
struct ifnet* rcvif;
int len;
int csum_flags;
int csum_data;
uint16_t tso_segsz;
uint16_t ether_vtag;
SLIST_HEAD(packet_tags, m_tag) tags;
int len;
/* Layer crossing persistent information. */
uint32_t flowid; /* packet's 4-tuple system */
uint64_t csum_flags; /* checksum and offload features */
int csum_data;
uint8_t rsstype; /* hash type */
uint16_t tso_segsz;
uint16_t ether_vtag;
};
struct m_tag {

View File

@ -65,6 +65,7 @@
#define roundup2(x, y) (((x) + ((y) - 1)) & (~((y) - 1)))
#define rounddown(x, y) (((x) / (y)) * (y))
#define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */
#define powerof2(x) ((((x)-1)&(x))==0)
#define PRIMASK 0x0ff
#define PCATCH 0x100

View File

@ -60,6 +60,7 @@ struct resource {
bus_space_handle_t rman_get_bushandle(struct resource *);
bus_space_tag_t rman_get_bustag(struct resource *);
int rman_get_rid(struct resource *);
void* rman_get_virtual(struct resource *);
static inline u_long

View File

@ -0,0 +1 @@
/* nothing here */

View File

@ -21,6 +21,9 @@ struct sysctl_ctx_list {
struct sysctl_oid_list {
};
struct sysctl_oid {
};
#define SYSCTL_HANDLER_ARGS void *oidp, void *arg1, int arg2, \
struct sysctl_req *req

View File

@ -98,6 +98,9 @@ int _pause(const char *, int);
msleep((channel), NULL, (priority), (waitMessage), (timeout))
#define mtx_sleep msleep
void critical_enter(void);
void critical_exit(void);
struct unrhdr;
struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex);
void delete_unrhdr(struct unrhdr *);

View File

@ -29,9 +29,13 @@
* @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93
* $FreeBSD: src/sys/net/if_ethersubr.c,v 1.193.2.12 2006/08/28 02:54:14 thompsa Exp $
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/mbuf.h>
#include <net/ethernet.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#if 0
/*
@ -116,3 +120,31 @@ ether_sprintf(const u_char *ap)
(unsigned)ap[3], (unsigned)ap[4], (unsigned)ap[5]);
return (etherbuf);
}
struct mbuf *
ether_vlanencap(struct mbuf *m, uint16_t tag)
{
struct ether_vlan_header *evl;
M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
if (m == NULL)
return (NULL);
/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
if (m->m_len < sizeof(*evl)) {
m = m_pullup(m, sizeof(*evl));
if (m == NULL)
return (NULL);
}
/*
* Transform the Ethernet header into an Ethernet header
* with 802.1Q encapsulation.
*/
evl = mtod(m, struct ether_vlan_header *);
bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
(char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
evl->evl_tag = htons(tag);
return (m);
}

View File

@ -0,0 +1,66 @@
/*-
* Copyright (c) 2007, 2008 Kip Macy <kmacy@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define FBSD_DRIVER
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/ktr.h>
#include <sys/buf_ring.h>
struct buf_ring *
buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *lock)
{
struct buf_ring *br;
KASSERT(powerof2(count), ("buf ring must be size power of 2"));
br = malloc(sizeof(struct buf_ring) + count*sizeof(caddr_t),
type, flags|M_ZERO);
if (br == NULL)
return (NULL);
#ifdef DEBUG_BUFRING
br->br_lock = lock;
#endif
br->br_prod_size = br->br_cons_size = count;
br->br_prod_mask = br->br_cons_mask = count-1;
br->br_prod_head = br->br_cons_head = 0;
br->br_prod_tail = br->br_cons_tail = 0;
return (br);
}
void
buf_ring_free(struct buf_ring *br, struct malloc_type *type)
{
free(br, type);
}

View File

@ -1,15 +0,0 @@
/*
* Copyright 2009, Colin Günther, coling@gmx.de.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <compat/sys/libkern.h>
uint32_t
arc4random(void)
{
return random();
}

View File

@ -0,0 +1,29 @@
/*
* Copyright 2009, Colin Günther, coling@gmx.de. All rights reserved.
* Copyright 2018, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
extern "C" {
#include <compat/sys/libkern.h>
}
#include <util/Random.h>
u_int
read_random(void* buf, u_int len)
{
uint8* bufs = (uint8*)buf;
for (int i = 0; i < len; i++)
bufs[i] = secure_get_random<uint8>();
return len;
}
uint32_t
arc4random(void)
{
return random();
}

View File

@ -3,9 +3,12 @@
* All rights reserved. Distributed under the terms of the MIT License.
*/
extern "C" {
#include <compat/sys/systm.h>
#include <compat/sys/kernel.h>
}
#include <thread.h>
int
@ -16,6 +19,20 @@ _pause(const char* waitMessage, int timeout)
}
void
critical_enter(void)
{
thread_pin_to_current_cpu(thread_get_current_thread());
}
void
critical_exit(void)
{
thread_unpin_from_current_cpu(thread_get_current_thread());
}
void
freeenv(char *env)
{