freebsd_network: Completely overhaul bus_dma implementation.
The old implementation was based on an ancient copy of the FreeBSD busdma code for x86, and did not make a bunch of assumptions that we make basically everywhere else (for instance, that we can request arbitrarily-aligned contiguous physical memory from the VM.) As a consequence, it had a significant amount of code devoted to bounce pages, which are just a waste of resources on x86, and for that matter, probably any other architecture Haiku will ever be ported to. (Even if we do need to run on some system where only a small portion of system memory can be accessed by devices, likely we would reserve that memory for just this occasion anyway.) I was initially under the impression that the bounce-pages code never turned on, but apparently due to the "alignment" check (and also the "Maxmem" check, which was to defined to 32KB...?!) it does indeed activate on a variety of systems, and maybe (in the case of drivers that do not call sync() properly) even is the cause of some of our ported driver breakage. The new implementation is pretty much optimized for Haiku, and shares almost no code or structure with the old one (save for a few functions that really only have one proper implementation.) Tested with ipro1000 and rtl81xx. Regressions are more than possible, so please don't hesitate to file bugs if your network driver now fails to come up (or you get KDLs.)
This commit is contained in:
parent
56cb682b80
commit
26b95c15f2
@ -10,10 +10,11 @@ UseHeaders $(HAIKU_PRIVATE_KERNEL_HEADERS) : true ;
|
||||
Includes [ FGristFiles kernel_c++_structs.h ]
|
||||
: <src!system!kernel>kernel_c++_struct_sizes.h ;
|
||||
|
||||
SubDirCcFlags [ FDefines _KERNEL=1 _XOPEN_SOURCE ] ;
|
||||
SubDirCcFlags [ FDefines _KERNEL=1 ] ;
|
||||
|
||||
KernelStaticLibrary libfreebsd_network.a :
|
||||
bus.cpp
|
||||
bus_dma.cpp
|
||||
callout.cpp
|
||||
clock.c
|
||||
condvar.c
|
||||
@ -22,7 +23,6 @@ KernelStaticLibrary libfreebsd_network.a :
|
||||
device_hooks.c
|
||||
driver.c
|
||||
eventhandler.c
|
||||
fbsd_busdma_x86.c
|
||||
fbsd_ether.c
|
||||
fbsd_if_media.c
|
||||
fbsd_kern_mbuf.c
|
||||
|
380
src/libs/compat/freebsd_network/bus_dma.cpp
Normal file
380
src/libs/compat/freebsd_network/bus_dma.cpp
Normal file
@ -0,0 +1,380 @@
|
||||
/*
|
||||
* Copyright 2019, Haiku, Inc. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Augustin Cavalier <waddlesplash>
|
||||
*/
|
||||
|
||||
extern "C" {
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/bus.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/mbuf.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - structures
|
||||
|
||||
|
||||
struct bus_dma_tag {
|
||||
bus_dma_tag_t parent;
|
||||
phys_size_t alignment;
|
||||
phys_addr_t boundary;
|
||||
phys_addr_t lowaddr;
|
||||
phys_addr_t highaddr;
|
||||
bus_dma_filter_t* filter;
|
||||
void* filterarg;
|
||||
phys_size_t maxsize;
|
||||
uint32 nsegments;
|
||||
phys_size_t maxsegsz;
|
||||
int32 ref_count;
|
||||
bus_dma_segment_t* segments;
|
||||
};
|
||||
|
||||
|
||||
// #pragma mark - functions
|
||||
|
||||
|
||||
void
|
||||
busdma_lock_mutex(void* arg, bus_dma_lock_op_t op)
|
||||
{
|
||||
struct mtx* dmtx = (struct mtx*)arg;
|
||||
switch (op) {
|
||||
case BUS_DMA_LOCK:
|
||||
mtx_lock(dmtx);
|
||||
break;
|
||||
case BUS_DMA_UNLOCK:
|
||||
mtx_unlock(dmtx);
|
||||
break;
|
||||
default:
|
||||
panic("busdma_lock_mutex: unknown operation 0x%x", op);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary,
|
||||
bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter,
|
||||
void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
|
||||
int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat)
|
||||
{
|
||||
if (boundary != 0 && boundary < maxsegsz)
|
||||
maxsegsz = boundary;
|
||||
|
||||
*dmat = NULL;
|
||||
|
||||
bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag),
|
||||
M_DEVBUF, M_ZERO | M_NOWAIT);
|
||||
if (newtag == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
newtag->parent = parent;
|
||||
newtag->alignment = alignment;
|
||||
newtag->boundary = boundary;
|
||||
newtag->lowaddr = lowaddr;
|
||||
newtag->highaddr = highaddr;
|
||||
newtag->filter = filter;
|
||||
newtag->filterarg = filterarg;
|
||||
newtag->maxsize = maxsize;
|
||||
newtag->nsegments = nsegments;
|
||||
newtag->maxsegsz = maxsegsz;
|
||||
newtag->ref_count = 1;
|
||||
|
||||
newtag->segments = (bus_dma_segment_t*)kernel_malloc(
|
||||
sizeof(bus_dma_segment_t) * newtag->nsegments, M_DEVBUF,
|
||||
M_NOWAIT);
|
||||
if (newtag->segments == NULL) {
|
||||
kernel_free(dmat, M_DEVBUF);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
if (newtag->parent != NULL) {
|
||||
atomic_add(&parent->ref_count, 1);
|
||||
|
||||
newtag->lowaddr = max_c(parent->lowaddr, newtag->lowaddr);
|
||||
newtag->highaddr = min_c(parent->highaddr, newtag->highaddr);
|
||||
|
||||
if (newtag->boundary == 0) {
|
||||
newtag->boundary = parent->boundary;
|
||||
} else if (parent->boundary != 0) {
|
||||
newtag->boundary = min_c(parent->boundary, newtag->boundary);
|
||||
}
|
||||
|
||||
if (newtag->filter == NULL) {
|
||||
newtag->filter = parent->filter;
|
||||
newtag->filterarg = parent->filterarg;
|
||||
}
|
||||
}
|
||||
|
||||
if (newtag->filter != NULL)
|
||||
panic("bus_dma_tag_create: error: filters not implemented!");
|
||||
|
||||
*dmat = newtag;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
{
|
||||
if (dmat == NULL)
|
||||
return 0;
|
||||
|
||||
while (dmat != NULL) {
|
||||
bus_dma_tag_t parent;
|
||||
|
||||
parent = dmat->parent;
|
||||
atomic_add(&dmat->ref_count, -1);
|
||||
if (dmat->ref_count == 0) {
|
||||
kernel_free(dmat->segments, M_DEVBUF);
|
||||
kernel_free(dmat, M_DEVBUF);
|
||||
|
||||
// Last reference released, so release our reference on our parent.
|
||||
dmat = parent;
|
||||
} else
|
||||
dmat = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp)
|
||||
{
|
||||
// We never bounce, so we do not need maps.
|
||||
*mapp = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
// We never create maps, so we never need to destroy them.
|
||||
if (map)
|
||||
panic("map is not NULL!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
bus_dmamap_t* mapp)
|
||||
{
|
||||
int mflags;
|
||||
if (flags & BUS_DMA_NOWAIT)
|
||||
mflags = M_NOWAIT;
|
||||
else
|
||||
mflags = M_WAITOK;
|
||||
|
||||
if (flags & BUS_DMA_ZERO)
|
||||
mflags |= M_ZERO;
|
||||
|
||||
// We never need to map/bounce.
|
||||
*mapp = NULL;
|
||||
|
||||
// FreeBSD uses standard malloc() for the case where maxsize <= PAGE_SIZE,
|
||||
// however, our malloc() has no guarantees that the allocated memory will
|
||||
// not be swapped out, which obviously is a requirement here. So we must
|
||||
// always use kernel_contigmalloc().
|
||||
|
||||
// The range specified by lowaddr, highaddr is an *exclusion* range,
|
||||
// not an inclusion range. So we want to at least start with the low end,
|
||||
// if possible. (The most common exclusion range is 32-bit only,
|
||||
// and ones other than that are very rare, so typically this will
|
||||
// succeed.)
|
||||
if (dmat->lowaddr > B_PAGE_SIZE) {
|
||||
*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
|
||||
0, dmat->lowaddr,
|
||||
dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
|
||||
if (*vaddr == NULL)
|
||||
dprintf("bus_dmamem_alloc: failed to allocate with lowaddr "
|
||||
"0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr);
|
||||
}
|
||||
if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) {
|
||||
*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
|
||||
dmat->highaddr, BUS_SPACE_MAXADDR,
|
||||
dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
|
||||
}
|
||||
|
||||
if (*vaddr == NULL) {
|
||||
dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, "
|
||||
"low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", "
|
||||
"boundary 0x%" B_PRIxPHYSADDR ")\n",
|
||||
(int)dmat->maxsize, dmat->lowaddr, dmat->highaddr, dmat->boundary);
|
||||
return ENOMEM;
|
||||
} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
|
||||
dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n",
|
||||
dmat->alignment, vtophys(vaddr));
|
||||
bus_dmamem_free(dmat, *vaddr, *mapp);
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map)
|
||||
{
|
||||
// We never bounce, so map should be NULL.
|
||||
if (map != NULL)
|
||||
panic("bus_dmamem_free: map is not NULL!");
|
||||
|
||||
kernel_contigfree(vaddr, dmat->maxsize, M_DEVBUF);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t /* map */, void* buf,
|
||||
bus_size_t buflen, int flags, bus_addr_t* lastaddrp, bus_dma_segment_t* segs,
|
||||
int& seg, bool first)
|
||||
{
|
||||
vm_offset_t virtual_addr = (vm_offset_t)buf;
|
||||
bus_addr_t last_phys_addr = *lastaddrp;
|
||||
const bus_addr_t boundary_mask = ~(dmat->boundary - 1);
|
||||
|
||||
while (buflen > 0) {
|
||||
const bus_addr_t phys_addr = pmap_kextract(virtual_addr);
|
||||
|
||||
bus_size_t segment_size = B_PAGE_SIZE - (phys_addr & (B_PAGE_SIZE - 1));
|
||||
if (segment_size > buflen)
|
||||
segment_size = buflen;
|
||||
|
||||
if (dmat->boundary > 0) {
|
||||
// Make sure we don't cross a boundary.
|
||||
bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask;
|
||||
if (segment_size > (boundary_addr - phys_addr))
|
||||
segment_size = (boundary_addr - phys_addr);
|
||||
}
|
||||
|
||||
// Insert chunk into a segment.
|
||||
if (first) {
|
||||
segs[seg].ds_addr = phys_addr;
|
||||
segs[seg].ds_len = segment_size;
|
||||
first = false;
|
||||
} else {
|
||||
// If possible, coalesce into the previous segment.
|
||||
if (phys_addr == last_phys_addr
|
||||
&& (segs[seg].ds_len + segment_size) <= dmat->maxsegsz
|
||||
&& (dmat->boundary == 0
|
||||
|| (segs[seg].ds_addr & boundary_mask)
|
||||
== (phys_addr & boundary_mask))) {
|
||||
segs[seg].ds_len += segment_size;
|
||||
} else {
|
||||
if (++seg >= dmat->nsegments)
|
||||
break;
|
||||
segs[seg].ds_addr = phys_addr;
|
||||
segs[seg].ds_len = segment_size;
|
||||
}
|
||||
}
|
||||
|
||||
last_phys_addr = phys_addr + segment_size;
|
||||
virtual_addr += segment_size;
|
||||
buflen -= segment_size;
|
||||
}
|
||||
|
||||
*lastaddrp = last_phys_addr;
|
||||
return (buflen != 0 ? EFBIG : 0);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
bus_size_t buflen, bus_dmamap_callback_t *callback,
|
||||
void *callback_arg, int flags)
|
||||
{
|
||||
bus_addr_t lastaddr = 0;
|
||||
int error, nsegs = 0;
|
||||
|
||||
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags,
|
||||
&lastaddr, dmat->segments, nsegs, true);
|
||||
|
||||
if (error)
|
||||
(*callback)(callback_arg, dmat->segments, 0, error);
|
||||
else
|
||||
(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
|
||||
|
||||
// ENOMEM is returned; all other errors are only sent to the callback.
|
||||
if (error == ENOMEM)
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
|
||||
bus_dmamap_callback2_t* callback, void* callback_arg, int flags)
|
||||
{
|
||||
M_ASSERTPKTHDR(mb);
|
||||
|
||||
int nsegs = 0, error = 0;
|
||||
if (mb->m_pkthdr.len <= dmat->maxsize) {
|
||||
bool first = true;
|
||||
bus_addr_t lastaddr = 0;
|
||||
for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
|
||||
if (m->m_len <= 0)
|
||||
continue;
|
||||
|
||||
error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
|
||||
flags, &lastaddr, dmat->segments, nsegs, first);
|
||||
first = false;
|
||||
}
|
||||
} else {
|
||||
error = EINVAL;
|
||||
}
|
||||
|
||||
if (error) {
|
||||
(*callback)(callback_arg, dmat->segments, 0, 0, error);
|
||||
} else {
|
||||
(*callback)(callback_arg, dmat->segments, nsegs + 1, mb->m_pkthdr.len,
|
||||
error);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
|
||||
bus_dma_segment_t* segs, int* nsegs, int flags)
|
||||
{
|
||||
M_ASSERTPKTHDR(mb);
|
||||
|
||||
*nsegs = 0;
|
||||
int error = 0;
|
||||
if (mb->m_pkthdr.len <= dmat->maxsize) {
|
||||
bool first = true;
|
||||
bus_addr_t lastaddr = 0;
|
||||
|
||||
for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
|
||||
if (m->m_len <= 0)
|
||||
continue;
|
||||
|
||||
error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
|
||||
flags, &lastaddr, segs, *nsegs, first);
|
||||
first = false;
|
||||
}
|
||||
} else {
|
||||
error = EINVAL;
|
||||
}
|
||||
|
||||
++*nsegs;
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
// We never allocate bounce pages; nothing to do.
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t)
|
||||
{
|
||||
// We never bounce; nothing to do.
|
||||
}
|
@ -184,13 +184,6 @@ typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
|
||||
*/
|
||||
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
|
||||
|
||||
/*
|
||||
* XXX sparc64 uses the same interface, but a much different implementation.
|
||||
* <machine/bus_dma.h> for the sparc64 arch contains the equivalent
|
||||
* declarations.
|
||||
*/
|
||||
#if !defined(__sparc64__)
|
||||
|
||||
/*
|
||||
* Allocate a handle for mapping from kva/uva/physical
|
||||
* address space into bus device space.
|
||||
@ -266,6 +259,5 @@ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map);
|
||||
_bus_dmamap_unload(dmat, dmamap); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __sparc64__ */
|
||||
|
||||
#endif /* _BUS_DMA_H_ */
|
||||
|
@ -30,9 +30,6 @@
|
||||
#define ptoa(x) ((unsigned long)((x) << PAGE_SHIFT))
|
||||
#define atop(x) ((unsigned long)((x) >> PAGE_SHIFT))
|
||||
|
||||
/* MAJOR FIXME */
|
||||
#define Maxmem (32768)
|
||||
|
||||
#ifndef MSIZE
|
||||
#define MSIZE 256
|
||||
#endif
|
||||
|
@ -76,10 +76,6 @@ void uninit_callout(void);
|
||||
|
||||
device_t find_root_device(int);
|
||||
|
||||
/* busdma_machdep.c */
|
||||
void init_bounce_pages(void);
|
||||
void uninit_bounce_pages(void);
|
||||
|
||||
void driver_printf(const char *format, ...)
|
||||
__attribute__ ((format (__printf__, 1, 2)));
|
||||
int driver_vprintf(const char *format, va_list vl);
|
||||
|
@ -193,8 +193,6 @@ _fbsd_init_drivers(driver_t *drivers[])
|
||||
if (status < B_OK)
|
||||
goto err4;
|
||||
|
||||
init_bounce_pages();
|
||||
|
||||
if (HAIKU_DRIVER_REQUIRES(FBSD_TASKQUEUES)) {
|
||||
status = init_taskqueues();
|
||||
if (status < B_OK)
|
||||
@ -244,7 +242,6 @@ err6:
|
||||
if (HAIKU_DRIVER_REQUIRES(FBSD_TASKQUEUES))
|
||||
uninit_taskqueues();
|
||||
err5:
|
||||
uninit_bounce_pages();
|
||||
uninit_callout();
|
||||
err4:
|
||||
uninit_mbufs();
|
||||
@ -281,7 +278,6 @@ _fbsd_uninit_drivers(driver_t *drivers[])
|
||||
uninit_sysinit();
|
||||
if (HAIKU_DRIVER_REQUIRES(FBSD_TASKQUEUES))
|
||||
uninit_taskqueues();
|
||||
uninit_bounce_pages();
|
||||
uninit_callout();
|
||||
uninit_mbufs();
|
||||
uninit_mutexes();
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user