1)rename libdrv -> libddk

2)thread safe malloc
3)linux dma_pool_* 

git-svn-id: svn://kolibrios.org@1616 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2010-09-13 20:07:22 +00:00
parent ff13f0e607
commit f45d6bef85
13 changed files with 2413 additions and 4099 deletions

View File

@ -1,6 +1,4 @@
CC = gcc
AS = as
@ -11,7 +9,7 @@ INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/as
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -fomit-frame-pointer -fno-builtin-printf
NAME:= libdrv
NAME:= libddk
CORE_SRC= core.S
@ -25,6 +23,7 @@ NAME_SRCS:= \
linux/idr.c \
linux/firmware.c \
linux/list_sort.c \
linux/dmapool.c \
malloc/malloc.c \
stdio/icompute.c \
stdio/vsprintf.c \

View File

@ -21,6 +21,7 @@
.global _GetEvent
.global _GetPgAddr
.global _GetService
.global _GetTimerTicks
.global _KernelAlloc
.global _KernelFree
@ -68,6 +69,7 @@
.def _GetEvent; .scl 2; .type 32; .endef
.def _GetPgAddr; .scl 2; .type 32; .endef
.def _GetService; .scl 2; .type 32; .endef
.def _GetTimerTicks; .scl 2; .type 32; .endef
.def _KernelAlloc; .scl 2; .type 32; .endef
.def _KernelFree; .scl 2; .type 32; .endef
@ -117,6 +119,7 @@ _GetDisplay:
_GetEvent:
_GetPgAddr:
_GetService:
_GetTimerTicks:
_KernelAlloc:
_KernelFree:
@ -167,6 +170,7 @@ _WaitEvent:
.ascii " -export:GetEvent" #
.ascii " -export:GetPgAddr" # stdcall
.ascii " -export:GetService" # stdcall
.ascii " -export:GetTimerTicks" #
.ascii " -export:KernelAlloc" # stdcall
.ascii " -export:KernelFree" # stdcall

View File

@ -1,5 +1,6 @@
#include <types.h>
#include <ddk.h>
#include <mutex.h>
#include <syscall.h>
#pragma pack(push, 1)

318
drivers/ddk/linux/dmapool.c Normal file
View File

@ -0,0 +1,318 @@
/*
* DMA Pool allocator
*
* Copyright 2001 David Brownell
* Copyright 2007 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
*
* This software may be redistributed and/or modified under the terms of
* the GNU General Public License ("GPL") version 2 as published by the
* Free Software Foundation.
*
* This allocator returns small blocks of a given size which are DMA-able by
* the given device. It uses the dma_alloc_coherent page allocator to get
* new pages, then splits them up into blocks of the required size.
* Many older drivers still have their own code to do this.
*
* The current design of this allocator is fairly simple. The pool is
* represented by the 'struct dma_pool' which keeps a doubly-linked list of
* allocated pages. Each page in the page_list is split into blocks of at
* least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
* list of free blocks within the page. Used blocks aren't tracked, but we
* keep a count of how many are currently allocated from each page.
*/
#include <ddk.h>
#include <linux/mutex.h>
#include <syscall.h>
struct dma_pool { /* the pool */
struct list_head page_list;
struct mutex lock;
size_t size;
size_t allocation;
size_t boundary;
struct list_head pools;
};
struct dma_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned int in_use;
unsigned int offset;
};
static DEFINE_MUTEX(pools_lock);
/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
* @dev: device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @boundary: returned blocks won't cross this power of two boundary
* Context: !in_interrupt()
*
* Returns a dma allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, dma_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
* larger than requested because of alignment.
*
* If @boundary is nonzero, objects returned from dma_pool_alloc() won't
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
*/
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t boundary)
{
struct dma_pool *retval;
size_t allocation;
if (align == 0) {
align = 1;
} else if (align & (align - 1)) {
return NULL;
}
if (size == 0) {
return NULL;
} else if (size < 4) {
size = 4;
}
if ((size % align) != 0)
size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
allocation = (allocation+0x7FFF) & ~0x7FFF;
if (!boundary) {
boundary = allocation;
} else if ((boundary < size) || (boundary & (boundary - 1))) {
return NULL;
}
retval = kmalloc(sizeof(*retval), GFP_KERNEL);
if (!retval)
return retval;
INIT_LIST_HEAD(&retval->page_list);
// spin_lock_init(&retval->lock);
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
INIT_LIST_HEAD(&retval->pools);
return retval;
}
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
unsigned int offset = 0;
unsigned int next_boundary = pool->boundary;
do {
unsigned int next = offset + pool->size;
if (unlikely((next + pool->size) >= next_boundary)) {
next = next_boundary;
next_boundary += pool->boundary;
}
*(int *)(page->vaddr + offset) = next;
offset = next;
} while (offset < pool->allocation);
}
static struct dma_page *pool_alloc_page(struct dma_pool *pool)
{
struct dma_page *page;
page = malloc(sizeof(*page));
if (!page)
return NULL;
page->vaddr = (void*)KernelAlloc(pool->allocation);
dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
if (page->vaddr)
{
page->dma = GetPgAddr(page->vaddr);
dbgprintf("dma 0x%0x\n", page->dma);
pool_initialise_page(pool, page);
list_add(&page->page_list, &pool->page_list);
page->in_use = 0;
page->offset = 0;
} else {
free(page);
page = NULL;
}
return page;
}
static inline int is_page_busy(struct dma_page *page)
{
return page->in_use != 0;
}
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
{
dma_addr_t dma = page->dma;
KernelFree(page->vaddr);
list_del(&page->page_list);
free(page);
}
/**
* dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed
* Context: !in_interrupt()
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
*/
void dma_pool_destroy(struct dma_pool *pool)
{
mutex_lock(&pools_lock);
list_del(&pool->pools);
mutex_unlock(&pools_lock);
while (!list_empty(&pool->page_list)) {
struct dma_page *page;
page = list_entry(pool->page_list.next,
struct dma_page, page_list);
if (is_page_busy(page))
{
printk(KERN_ERR "dma_pool_destroy %p busy\n",
page->vaddr);
/* leak the still-in-use consistent memory */
list_del(&page->page_list);
kfree(page);
} else
pool_free_page(pool, page);
}
kfree(pool);
}
/**
* dma_pool_alloc - get a block of consistent memory
* @pool: dma pool that will produce the block
* @mem_flags: GFP_* bitmask
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, %NULL is returned.
*/
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
u32_t efl;
struct dma_page *page;
size_t offset;
void *retval;
efl = safe_cli();
restart:
list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation)
goto ready;
}
page = pool_alloc_page(pool);
if (!page)
{
retval = NULL;
goto done;
}
ready:
page->in_use++;
offset = page->offset;
page->offset = *(int *)(page->vaddr + offset);
retval = offset + page->vaddr;
*handle = offset + page->dma;
done:
safe_sti(efl);
return retval;
}
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{
struct dma_page *page;
u32_t efl;
efl = safe_cli();
list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma)
continue;
if (dma < (page->dma + pool->allocation))
goto done;
}
page = NULL;
done:
safe_sti(efl);
return page;
}
/**
* dma_pool_free - put block back into dma pool
* @pool: the dma pool holding the block
* @vaddr: virtual address of block
* @dma: dma address of block
*
* Caller promises neither device nor driver will again touch this block
* unless it is first re-allocated.
*/
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
struct dma_page *page;
unsigned long flags;
unsigned int offset;
u32_t efl;
page = pool_find_page(pool, dma);
if (!page) {
printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n",
vaddr, (unsigned long)dma);
return;
}
offset = vaddr - page->vaddr;
efl = safe_cli();
{
page->in_use--;
*(int *)vaddr = page->offset;
page->offset = offset;
/*
* Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page);
* Better have a few empty pages hang around.
*/
}safe_sti(efl);
}

File diff suppressed because it is too large Load Diff

56
drivers/include/ddk.h Normal file
View File

@ -0,0 +1,56 @@
#ifndef __DDK_H__
#define __DDK_H__
#include <kernel.h>
#define OS_BASE 0x80000000
#define PG_SW 0x003
#define PG_NOCACHE 0x018
#define MANUAL_DESTROY 0x80000000
typedef struct
{
u32_t code;
u32_t data[5];
}kevent_t;
typedef union
{
struct
{
u32_t handle;
u32_t euid;
};
u64_t raw;
}evhandle_t;
typedef struct
{
u32_t handle;
u32_t io_code;
void *input;
int inp_size;
void *output;
int out_size;
}ioctl_t;
typedef int (__stdcall *srv_proc_t)(ioctl_t *);
#define ERR_OK 0
#define ERR_PARAM -1
struct ddk_params;
int ddk_init(struct ddk_params *params);
u32_t drvEntry(int, char *)__asm__("_drvEntry");
#endif /* DDK_H */

View File

@ -0,0 +1,26 @@
/*
* include/linux/dmapool.h
*
* Allocation pools for DMAable (coherent) memory.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
#endif

View File

@ -0,0 +1,86 @@
/*
* Mutexes: blocking mutual exclusion locks
*
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* This file contains the main data structure and API definitions.
*/
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H
#include <kernel.h>
#include <linux/list.h>
#include <asm/atomic.h>
/*
* Simple, straightforward mutexes with strict semantics:
*
* - only one task can hold the mutex at a time
* - only the owner can unlock the mutex
* - multiple unlocks are not permitted
* - recursive locking is not permitted
* - a mutex object must be initialized via the API
* - a mutex object must not be initialized via memset or copying
* - task may not exit with mutex held
* - memory areas where held locks reside must not be freed
* - held mutexes must not be reinitialized
* - mutexes may not be used in hardware or software interrupt
* contexts such as tasklets and timers
*
* These semantics are fully enforced when DEBUG_MUTEXES is
* enabled. Furthermore, besides enforcing the above rules, the mutex
* debugging code also implements a number of additional features
* that make lock debugging easier and faster:
*
* - uses symbolic names of mutexes, whenever they are printed in debug output
* - point-of-acquire tracking, symbolic lookup of function names
* - list of all locks held in the system, printout of them
* - owner tracking
* - detects self-recursing locks and prints out all relevant info
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
struct list_head wait_list;
};
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
*/
struct mutex_waiter {
struct list_head list;
int *task;
};
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) }
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_init(struct mutex*)__asm__("MutexInit");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_lock(struct mutex*)__asm__("MutexLock");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
mutex_unlock(struct mutex*)__asm__("MutexUnlock");
/**
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
static inline int mutex_is_locked(struct mutex *lock)
{
return atomic_read(&lock->count) != 1;
}
#endif

View File

@ -2,33 +2,6 @@
#ifndef __SYSCALL_H__
#define __SYSCALL_H__
#define OS_BASE 0x80000000
typedef struct
{
u32_t code;
u32_t data[5];
}kevent_t;
typedef struct
{
u32_t handle;
u32_t io_code;
void *input;
int inp_size;
void *output;
int out_size;
}ioctl_t;
typedef int (__stdcall *srv_proc_t)(ioctl_t *);
#define ERR_OK 0
#define ERR_PARAM -1
u32_t drvEntry(int, char *)__asm__("_drvEntry");
///////////////////////////////////////////////////////////////////////////////
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport))
@ -40,14 +13,11 @@ u32_t drvEntry(int, char *)__asm__("_drvEntry");
#define SysMsgBoardStr __SysMsgBoardStr
#define PciApi __PciApi
//#define RegService __RegService
#define CreateObject __CreateObject
#define DestroyObject __DestroyObject
///////////////////////////////////////////////////////////////////////////////
#define PG_SW 0x003
#define PG_NOCACHE 0x018
void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace");
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace");
@ -59,6 +29,7 @@ int STDCALL UserFree(void *mem)__asm__("UserFree");
void* STDCALL GetDisplay(void)__asm__("GetDisplay");
u32_t IMPORT GetTimerTicks(void)__asm__("GetTimerTicks");
addr_t STDCALL AllocPage(void)__asm__("AllocPage");
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
@ -78,8 +49,6 @@ void FASTCALL MutexUnlock(struct mutex*)__asm__("MutexUnlock");
void STDCALL SetMouseData(int btn, int x, int y,
int z, int h)__asm__("SetMouseData");
static u32_t PciApi(int cmd);
u8_t STDCALL PciRead8 (u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead8");
u16_t STDCALL PciRead16(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead16");
u32_t STDCALL PciRead32(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead32");
@ -114,23 +83,52 @@ int dbgprintf(const char* format, ...);
///////////////////////////////////////////////////////////////////////////////
static inline u32_t CreateEvent(kevent_t *ev, u32_t flags, u32_t *uid)
static inline evhandle_t CreateEvent(kevent_t *ev, u32_t flags)
{
u32_t handle;
u32_t euid;
evhandle_t evh;
__asm__ __volatile__ (
"call *__imp__CreateEvent"
:"=a"(handle),"=d"(euid)
:"S" (ev), "c"(flags));
:"=A"(evh.raw)
:"S" (ev), "c"(flags)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx", "esi", "edi");
if(uid) *uid = euid;
return evh;
};
static inline void RaiseEvent(evhandle_t evh, u32_t flags, kevent_t *ev)
{
__asm__ __volatile__ (
"call *__imp__RaiseEvent"
::"a"(evh.handle),"b"(evh.euid),"d"(flags),"S" (ev)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx", "esi", "edi");
};
static inline void WaitEvent(u32_t handle, u32_t euid)
{
__asm__ __volatile__ (
"call *__imp__WaitEvent"
::"a"(handle),"b"(euid));
__asm__ __volatile__ ("":::"ecx","edx", "esi");
};
static inline u32_t GetEvent(kevent_t *ev)
{
u32_t handle;
__asm__ __volatile__ (
"call *__imp__GetEvent"
:"=a"(handle)
:"D"(ev)
:"memory");
__asm__ __volatile__ ("":::"ebx","ecx","edx", "esi","edi");
return handle;
};
static inline int GetScreenSize(void)
{
int retval;
@ -238,10 +236,11 @@ static inline u32_t __PciApi(int cmd)
u32_t retval;
__asm__ __volatile__ (
"call *__imp__PciApi"
"call *__imp__PciApi \n\t"
"movzxb %%al, %%eax"
:"=a" (retval)
:"a" (cmd)
:"memory");
:"ebx","ecx","edx");
return retval;
};
@ -294,13 +293,10 @@ static inline u32_t safe_cli(void)
return ifl;
}
static inline void safe_sti(u32_t ifl)
static inline void safe_sti(u32_t efl)
{
__asm__ __volatile__ (
"pushl %0\n\t"
"popf\n"
: : "r" (ifl)
);
if (efl & (1<<9))
__asm__ __volatile__ ("sti");
}
static inline u32_t get_eflags(void)
@ -317,7 +313,6 @@ static inline void __clear (void * dst, unsigned len)
{
u32_t tmp;
__asm__ __volatile__ (
// "xorl %%eax, %%eax \n\t"
"cld \n\t"
"rep stosb \n"
:"=c"(tmp),"=D"(tmp)
@ -411,6 +406,9 @@ static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
addr_t *dma_handle)
{
size = (size + 0x7FFF) & ~0x7FFF;
*dma_handle = AllocPages(size >> 12);
return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
}

View File

@ -216,6 +216,13 @@ bool init_hc(hc_t *hc)
hc->frame_dma = GetPgAddr(hc->frame_base);
hc->frame_number = 0;
hc->td_pool = dma_pool_create("uhci_td", NULL,
sizeof(td_t), 16, 0);
if (!hc->td_pool)
{
dbgprintf("unable to create td dma_pool\n");
goto err_create_td_pool;
}
for (i = 0; i < UHCI_NUM_SKELQH; i++)
{
@ -336,6 +343,12 @@ bool init_hc(hc_t *hc)
};
};
return true;
err_create_td_pool:
KernelFree(hc->frame_base);
return false;
};
u16_t __attribute__((aligned(16)))
@ -396,9 +409,10 @@ request_t *create_request(udev_t *dev, endp_t *enp, u32_t dir,
{
td_t *td, *td_prev;
addr_t data_dma;
hc_t *hc = dev->host;
size_t packet_size = enp->size;
size_t size = req_size;
addr_t td_dma;
request_t *rq = (request_t*)kmalloc(sizeof(request_t),0);
@ -420,7 +434,9 @@ request_t *create_request(udev_t *dev, endp_t *enp, u32_t dir,
packet_size = size;
};
td = alloc_td();
td = dma_pool_alloc(hc->td_pool, 0, &td_dma);
td->dma = td_dma;
td->link = 1;
if(rq->td_head == NULL)
@ -465,6 +481,10 @@ bool ctrl_request(udev_t *dev, void *req, u32_t pid,
td_t *td0, *td, *td_prev;
qh_t *qh;
addr_t data_dma = 0;
hc_t *hc = dev->host;
addr_t td_dma = 0;
bool retval;
@ -476,7 +496,10 @@ bool ctrl_request(udev_t *dev, void *req, u32_t pid,
rq->size = req_size;
rq->dev = dev;
td0 = alloc_td();
td0 = dma_pool_alloc(hc->td_pool, 0, &td_dma);
td0->dma = td_dma;
dbgprintf("alloc td0 %x dma %x\n", td0, td_dma);
td0->status = 0x00800000 | dev->speed;
td0->token = TOKEN( 8, DATA0, 0, dev->addr, 0x2D);
@ -495,7 +518,11 @@ bool ctrl_request(udev_t *dev, void *req, u32_t pid,
packet_size = size;
};
td = alloc_td();
td = dma_pool_alloc(hc->td_pool, 0, &td_dma);
td->dma = td_dma;
dbgprintf("alloc td %x dma %x\n", td, td->dma);
td_prev->link = td->dma | 4;
td->status = TD_CTRL_ACTIVE | dev->speed;
td->token = TOKEN(packet_size, toggle, 0,dev->addr, pid);
@ -509,7 +536,11 @@ bool ctrl_request(udev_t *dev, void *req, u32_t pid,
toggle ^= DATA1;
}
td = alloc_td();
td = dma_pool_alloc(hc->td_pool, 0, &td_dma);
td->dma = td_dma;
dbgprintf("alloc td %x dma %x\n", td, td->dma);
td_prev->link = td->dma | 4;
pid = (pid == DIN) ? DOUT : DIN;
@ -573,7 +604,7 @@ bool ctrl_request(udev_t *dev, void *req, u32_t pid,
do
{
td_prev = td->bk;
free_td(td);
dma_pool_free(hc->td_pool, td, td->dma);
td = td_prev;
}while( td != NULL);

View File

@ -26,7 +26,7 @@ USB_SRC:= usb.c
USB_OBJ:= usb.obj
LIBS:= -ldrv -lcore
LIBS:= -lddk -lcore
USB = usb.dll

View File

@ -1,24 +1,17 @@
#include <kernel.h>
#include <ddk.h>
#include <mutex.h>
#include <pci.h>
//#include <stdio.h>
//#include <malloc.h>
//#include <memory.h>
#include <linux/dmapool.h>
#include <syscall.h>
#include "usb.h"
int __stdcall srv_usb(ioctl_t *io);
bool init_hc(hc_t *hc);
static slab_t qh_slab;
static slab_t td_slab;
LIST_HEAD( hc_list );
LIST_HEAD( newdev_list );
@ -63,30 +56,16 @@ u32_t drvEntry(int action, char *cmdline)
p->r1 = 0;
};
td_slab.available = 128;
td_slab.start = KernelAlloc(4096);
td_slab.nextavail = (addr_t)td_slab.start;
td_slab.dma = GetPgAddr(td_slab.start);
td_t *td;
for (i = 0, td = (td_t*)td_slab.start, dma = td_slab.dma;
i < 128; i++, td++, dma+= sizeof(td_t))
{
td->link = (addr_t)(td+1);
td->status = 0;
td->token = 0;
td->buffer = 0;
td->dma = dma;
};
hc = (hc_t*)hc_list.next;
while( &hc->list != &hc_list)
{
init_hc(hc);
hc_t *tmp = hc;
hc = (hc_t*)hc->list.next;
}
if( !init_hc(tmp))
list_del(&tmp->list);
};
dbgprintf("\n");
@ -184,26 +163,6 @@ static void free_qh(qh_t *qh)
qh_slab.available++;
};
static td_t* alloc_td()
{
if( td_slab.available )
{
td_t *td;
td_slab.available--;
td = (td_t*)td_slab.nextavail;
td_slab.nextavail = td->link;
return td;
}
return NULL;
};
static void free_td(td_t *td)
{
td->link = td_slab.nextavail;
td_slab.nextavail = (addr_t)td;
td_slab.available++;
};
#include "pci.inc"
#include "detect.inc"

View File

@ -51,6 +51,8 @@ typedef struct
addr_t iobase;
struct dma_pool *td_pool;
u32_t *frame_base;
count_t frame_number;
addr_t frame_dma;