* Fixed a ton of wrong usages of B_CONTIGUOUS + B_FULL_LOCK.
* The use of B_{READ|WRITE}_AREA throughout the drivers is surely alarming. Defining these flags means that *every user* application can access these buffers read/write, it becomes visible in userspace like any other memory (just shared among all apps). I would like to ask each driver maintainer to see if that is really wished here. If you only need one app to be able to access it, cloning the area would be more appropriate. * I came across the use of B_ANY_KERNEL_BLOCK_ADDRESS a number of times. This is almost completely useless for most usages, as it tries to align the virtual to a multiple of the size of the area. It just makes the allocation more likely to fail. Please only use where appropriate, and please review your code. * Minor cleanup. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26858 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
9372d6969b
commit
b0884f0cb8
@ -1,7 +1,7 @@
|
||||
/* Realtek RTL8169 Family Driver
|
||||
* Copyright (C) 2004 Marcus Overhagen <marcus@overhagen.de>. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* documentation for any purpose and without fee is hereby granted, provided
|
||||
* that the above copyright notice appear in all copies, and that both the
|
||||
* copyright notice and this permission notice appear in supporting documentation.
|
||||
@ -25,31 +25,35 @@
|
||||
#include "fwdebug.h"
|
||||
#include "util.h"
|
||||
|
||||
|
||||
static inline uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name)
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * virtadr;
|
||||
area_id areaid;
|
||||
void *virtadr;
|
||||
area_id area;
|
||||
status_t rv;
|
||||
|
||||
|
||||
TRACE("allocating %ld bytes for %s\n", size, name);
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, protection);
|
||||
if (areaid < B_OK) {
|
||||
area = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
|
||||
protection);
|
||||
if (area < B_OK) {
|
||||
ERROR("couldn't allocate area %s\n", name);
|
||||
return B_ERROR;
|
||||
}
|
||||
rv = get_memory_map(virtadr, size, &pe, 1);
|
||||
if (rv < B_OK) {
|
||||
delete_area(areaid);
|
||||
delete_area(area);
|
||||
ERROR("couldn't get mapping for %s\n", name);
|
||||
return B_ERROR;
|
||||
}
|
||||
@ -58,33 +62,40 @@ alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *n
|
||||
*virt = virtadr;
|
||||
if (phy)
|
||||
*phy = pe.address;
|
||||
TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", areaid, size, virtadr, pe.address);
|
||||
return areaid;
|
||||
TRACE("area = %ld, size = %ld, virt = %p, phy = %p\n", area, size, virtadr,
|
||||
pe.address);
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name)
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
void *mapadr;
|
||||
area_id area;
|
||||
|
||||
TRACE("mapping physical address %p with %ld bytes for %s\n", phy, size, name);
|
||||
TRACE("mapping physical address %p with %ld bytes for %s\n", phy, size,
|
||||
name);
|
||||
|
||||
offset = (uint32)phy & (B_PAGE_SIZE - 1);
|
||||
phyadr = (char *)phy - offset;
|
||||
size = round_to_pagesize(size + offset);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS, protection, &mapadr);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
|
||||
protection, &mapadr);
|
||||
if (area < B_OK) {
|
||||
ERROR("mapping '%s' failed, error 0x%lx (%s)\n", name, area, strerror(area));
|
||||
ERROR("mapping '%s' failed, error 0x%lx (%s)\n", name, area,
|
||||
strerror(area));
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
*virt = (char *)mapadr + offset;
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = %p, size = %ld, area = 0x%08lx\n",
|
||||
phy, *virt, offset, phyadr, mapadr, size, area);
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = "
|
||||
"%p, size = %ld, area = 0x%08lx\n", phy, *virt, offset, phyadr, mapadr,
|
||||
size, area);
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ scsi_init_ccb_alloc(scsi_bus_info *bus)
|
||||
// the bus is not ready yet so the CCB cannot be initialized
|
||||
// correctly
|
||||
bus->ccb_pool = locked_pool->create(sizeof(scsi_ccb), sizeof(uint32) - 1, 0,
|
||||
CCB_CHUNK_SIZE, CCB_NUM_MAX, 0, "scsi_ccb_pool", B_FULL_LOCK | B_CONTIGUOUS,
|
||||
CCB_CHUNK_SIZE, CCB_NUM_MAX, 0, "scsi_ccb_pool", B_CONTIGUOUS,
|
||||
ccb_low_alloc_hook, ccb_low_free_hook, bus);
|
||||
|
||||
if (bus->ccb_pool == NULL)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
@ -213,8 +213,7 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
|
||||
|
||||
buffer->area = create_area("DMA buffer",
|
||||
(void **)&dma_buffer_address_unaligned,
|
||||
B_ANY_KERNEL_ADDRESS, size,
|
||||
B_FULL_LOCK | B_CONTIGUOUS, 0 );
|
||||
B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS, 0);
|
||||
if (buffer->area < 0) {
|
||||
SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
|
||||
(int)size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
|
||||
* Copyright 2004-2008, Haiku, Inc. All RightsReserved.
|
||||
* Copyright 2002-2003, Thomas Kurschel. All rights reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
@ -50,20 +50,20 @@ fill_temp_sg(scsi_ccb *ccb)
|
||||
|
||||
if (dma_boundary != ~0UL || ccb->data_length > max_sg_block_size) {
|
||||
// S/G list may not be controller-compatible:
|
||||
// we have to split offending entries
|
||||
SHOW_FLOW(3, "Checking violation of dma boundary 0x%x and entry size 0x%x",
|
||||
// we have to split offending entries
|
||||
SHOW_FLOW(3, "Checking violation of dma boundary 0x%x and entry size 0x%x",
|
||||
(int)dma_boundary, (int)max_sg_block_size);
|
||||
|
||||
for (cur_idx = 0; cur_idx < num_entries; ++cur_idx) {
|
||||
addr_t max_len;
|
||||
|
||||
// calculate space upto next dma boundary crossing
|
||||
max_len = (dma_boundary + 1) -
|
||||
max_len = (dma_boundary + 1) -
|
||||
((addr_t)temp_sg[cur_idx].address & dma_boundary);
|
||||
// restrict size per sg item
|
||||
max_len = min(max_len, max_sg_block_size);
|
||||
|
||||
SHOW_FLOW(4, "addr=%p, size=%x, max_len=%x, idx=%d, num=%d",
|
||||
SHOW_FLOW(4, "addr=%p, size=%x, max_len=%x, idx=%d, num=%d",
|
||||
temp_sg[cur_idx].address, (int)temp_sg[cur_idx].size,
|
||||
(int)max_len, (int)cur_idx, (int)num_entries);
|
||||
|
||||
@ -72,7 +72,7 @@ fill_temp_sg(scsi_ccb *ccb)
|
||||
if (++num_entries > max_sg_blocks)
|
||||
goto too_complex;
|
||||
|
||||
memmove(&temp_sg[cur_idx + 1], &temp_sg[cur_idx],
|
||||
memmove(&temp_sg[cur_idx + 1], &temp_sg[cur_idx],
|
||||
(num_entries - 1 - cur_idx) * sizeof(physical_entry));
|
||||
|
||||
temp_sg[cur_idx].size = max_len;
|
||||
@ -84,7 +84,7 @@ fill_temp_sg(scsi_ccb *ccb)
|
||||
|
||||
ccb->sg_count = num_entries;
|
||||
|
||||
return true;
|
||||
return true;
|
||||
|
||||
too_complex:
|
||||
SHOW_ERROR( 2, "S/G list to complex for IO request (max %d entries)",
|
||||
@ -147,7 +147,7 @@ cleanup_tmp_sg(scsi_ccb *ccb)
|
||||
{
|
||||
status_t res;
|
||||
|
||||
SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%d",
|
||||
SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%d",
|
||||
ccb, ccb->data, (int)ccb->data_length);
|
||||
|
||||
res = unlock_memory(ccb->data, ccb->data_length, B_DMA_IO
|
||||
@ -170,12 +170,11 @@ cleanup_tmp_sg(scsi_ccb *ccb)
|
||||
int
|
||||
init_temp_sg(void)
|
||||
{
|
||||
temp_sg_pool = locked_pool->create(
|
||||
temp_sg_pool = locked_pool->create(
|
||||
MAX_TEMP_SG_FRAGMENTS * sizeof(physical_entry),
|
||||
sizeof(physical_entry) - 1, 0,
|
||||
B_PAGE_SIZE, MAX_TEMP_SG_LISTS, 1,
|
||||
"scsi_temp_sg_pool", B_FULL_LOCK | B_CONTIGUOUS,
|
||||
NULL, NULL, NULL);
|
||||
B_PAGE_SIZE, MAX_TEMP_SG_LISTS, 1,
|
||||
"scsi_temp_sg_pool", B_CONTIGUOUS, NULL, NULL, NULL);
|
||||
|
||||
if (temp_sg_pool == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2006, Haiku Inc. All rights reserved.
|
||||
* Copyright 2006-2008, Haiku Inc. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
@ -69,7 +69,7 @@ PhysicalMemoryAllocator::PhysicalMemoryAllocator(const char *name,
|
||||
roundedSize = (roundedSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
|
||||
fArea = create_area(fName, &fLogicalBase, B_ANY_KERNEL_ADDRESS,
|
||||
roundedSize, B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
roundedSize, B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (fArea < B_OK) {
|
||||
TRACE_ERROR(("PMA: failed to create memory area\n"));
|
||||
return;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2003-2006, Haiku Inc. All rights reserved.
|
||||
* Copyright 2003-2008, Haiku Inc. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
@ -282,7 +282,7 @@ Stack::AllocateArea(void **logicalAddress, void **physicalAddress, size_t size,
|
||||
void *logAddress;
|
||||
size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
area_id area = create_area(name, &logAddress, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_FULL_LOCK | B_CONTIGUOUS, 0);
|
||||
B_CONTIGUOUS, 0);
|
||||
|
||||
if (area < B_OK) {
|
||||
TRACE_ERROR(("USB Stack: couldn't allocate area %s\n", name));
|
||||
|
@ -211,7 +211,7 @@ controller_probe(device_node *parent)
|
||||
};
|
||||
device_attr attrs[] = {
|
||||
// properties of this controller for ide bus manager
|
||||
// there are always max. 2 devices
|
||||
// there are always max. 2 devices
|
||||
// (unless this is a Compact Flash Card with a built-in IDE controller,
|
||||
// which has exactly 1 device)
|
||||
{ IDE_CONTROLLER_MAX_DEVICES_ITEM, B_UINT8_TYPE, { ui8: kASICData[asicIndex].channel_count }},
|
||||
@ -221,9 +221,9 @@ controller_probe(device_node *parent)
|
||||
{ IDE_CONTROLLER_CAN_CQ_ITEM, B_UINT8_TYPE, { ui8: true }},
|
||||
// choose any name here
|
||||
{ IDE_CONTROLLER_CONTROLLER_NAME_ITEM, B_STRING_TYPE, { string: CONTROLLER_NAME }},
|
||||
|
||||
|
||||
// DMA properties
|
||||
// data must be word-aligned;
|
||||
// data must be word-aligned;
|
||||
// warning: some controllers are more picky!
|
||||
{ B_BLOCK_DEVICE_DMA_ALIGNMENT, B_UINT32_TYPE, { ui32: 1}},
|
||||
// one S/G block must not cross 64K boundary
|
||||
@ -232,7 +232,7 @@ controller_probe(device_node *parent)
|
||||
{ B_BLOCK_DEVICE_MAX_SG_BLOCK_SIZE, B_UINT32_TYPE, { ui32: 0x10000 }},
|
||||
// see definition of MAX_SG_COUNT
|
||||
{ B_BLOCK_DEVICE_MAX_SG_BLOCKS, B_UINT32_TYPE, { ui32: IDE_ADAPTER_MAX_SG_COUNT }},
|
||||
|
||||
|
||||
// private data to find controller
|
||||
{ "silicon_image_3112/asic_index", B_UINT32_TYPE, { ui32: asicIndex }},
|
||||
{ "silicon_image_3112/mmio_base", B_UINT32_TYPE, { ui32: mmioBase }},
|
||||
@ -248,7 +248,7 @@ controller_probe(device_node *parent)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
static status_t
|
||||
controller_init(device_node *node, void **_controllerCookie)
|
||||
{
|
||||
controller_data *controller;
|
||||
@ -263,9 +263,9 @@ controller_init(device_node *node, void **_controllerCookie)
|
||||
status_t res;
|
||||
uint32 temp;
|
||||
int i;
|
||||
|
||||
|
||||
TRACE("controller_init\n");
|
||||
|
||||
|
||||
if (dm->get_attr_uint32(node, "silicon_image_3112/asic_index", &asicIndex, false) != B_OK)
|
||||
return B_ERROR;
|
||||
if (dm->get_attr_uint32(node, "silicon_image_3112/mmio_base", &mmioBase, false) != B_OK)
|
||||
@ -277,10 +277,10 @@ controller_init(device_node *node, void **_controllerCookie)
|
||||
if (!controller)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
FLOW("controller %p\n", controller);
|
||||
FLOW("controller %p\n", controller);
|
||||
|
||||
mmioArea = map_physical_memory("Silicon Image SATA regs",
|
||||
(void *)mmioBase, kASICData[asicIndex].mmio_bar_size,
|
||||
(void *)mmioBase, kASICData[asicIndex].mmio_bar_size,
|
||||
B_ANY_KERNEL_ADDRESS, 0, (void **)&mmioAddr);
|
||||
if (mmioArea < B_OK) {
|
||||
TRACE("controller_init: mapping memory failed\n");
|
||||
@ -320,7 +320,7 @@ controller_init(device_node *node, void **_controllerCookie)
|
||||
for (i = 0; i < kASICData[asicIndex].channel_count; i++)
|
||||
*(volatile uint32 *)(mmioAddr + kControllerChannelData[i].sien) = 0;
|
||||
*(volatile uint32 *)(mmioAddr + kControllerChannelData[0].sien); // flush
|
||||
|
||||
|
||||
// install interrupt handler
|
||||
res = install_io_interrupt_handler(interruptNumber, handle_interrupt,
|
||||
controller, 0);
|
||||
@ -336,7 +336,7 @@ controller_init(device_node *node, void **_controllerCookie)
|
||||
temp &= (asicIndex == ASIC_SI3114) ? (~SI_MASK_4PORT) : (~SI_MASK_2PORT);
|
||||
*(volatile uint32 *)(mmioAddr + SI_SYSCFG) = temp;
|
||||
*(volatile uint32 *)(mmioAddr + SI_SYSCFG); // flush
|
||||
|
||||
|
||||
*_controllerCookie = controller;
|
||||
|
||||
TRACE("controller_init success\n");
|
||||
@ -418,9 +418,9 @@ channel_init(device_node *node, void **_channelCookie)
|
||||
physical_entry entry;
|
||||
size_t prdtSize;
|
||||
uint32 channelIndex;
|
||||
|
||||
|
||||
TRACE("channel_init enter\n");
|
||||
|
||||
|
||||
channel = malloc(sizeof(channel_data));
|
||||
if (!channel)
|
||||
return B_NO_MEMORY;
|
||||
@ -445,18 +445,19 @@ channel_init(device_node *node, void **_channelCookie)
|
||||
TRACE("channel_index %ld\n", channelIndex);
|
||||
TRACE("channel name: %s\n", kControllerChannelData[channelIndex].name);
|
||||
|
||||
TRACE("channel %p\n", channel);
|
||||
TRACE("channel %p\n", channel);
|
||||
|
||||
parent = dm->get_parent_node(node);
|
||||
dm->get_driver(parent, NULL, (void **)&controller);
|
||||
dm->put_node(parent);
|
||||
|
||||
TRACE("controller %p\n", controller);
|
||||
TRACE("controller %p\n", controller);
|
||||
TRACE("mmio_addr %p\n", (void *)controller->mmio_addr);
|
||||
|
||||
// PRDT must be contiguous, dword-aligned and must not cross 64K boundary
|
||||
// PRDT must be contiguous, dword-aligned and must not cross 64K boundary
|
||||
prdtSize = (IDE_ADAPTER_MAX_SG_COUNT * sizeof(prd_entry) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1);
|
||||
channel->prd_area = create_area("prd", (void **)&channel->prdt, B_ANY_KERNEL_ADDRESS, prdtSize, B_FULL_LOCK | B_CONTIGUOUS, 0);
|
||||
channel->prd_area = create_area("prd", (void **)&channel->prdt,
|
||||
B_ANY_KERNEL_ADDRESS, prdtSize, B_CONTIGUOUS, 0);
|
||||
if (channel->prd_area < B_OK) {
|
||||
TRACE("creating prd_area failed\n");
|
||||
goto err;
|
||||
@ -543,7 +544,7 @@ task_file_write(void *channelCookie, ide_task_file *tf, ide_reg_mask mask)
|
||||
int i;
|
||||
|
||||
FLOW("task_file_write\n");
|
||||
|
||||
|
||||
if (channel->lost)
|
||||
return B_ERROR;
|
||||
|
||||
@ -552,14 +553,14 @@ task_file_write(void *channelCookie, ide_task_file *tf, ide_reg_mask mask)
|
||||
FLOW("%x->HI(%x)\n", tf->raw.r[i + 7], i );
|
||||
channel->task_file[i] = tf->raw.r[i + 7];
|
||||
}
|
||||
|
||||
|
||||
if (((1 << i) & mask) != 0) {
|
||||
FLOW("%x->LO(%x)\n", tf->raw.r[i], i );
|
||||
channel->task_file[i] = tf->raw.r[i];
|
||||
}
|
||||
}
|
||||
*channel->dev_ctrl; // read altstatus to flush
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -581,7 +582,7 @@ task_file_read(void *channelCookie, ide_task_file *tf, ide_reg_mask mask)
|
||||
FLOW("%x: %x\n", i, (int)tf->raw.r[i] );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -600,19 +601,19 @@ altstatus_read(void *channelCookie)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
static status_t
|
||||
device_control_write(void *channelCookie, uint8 val)
|
||||
{
|
||||
channel_data *channel = channelCookie;
|
||||
|
||||
FLOW("device_control_write 0x%x\n", val);
|
||||
|
||||
|
||||
if (channel->lost)
|
||||
return B_ERROR;
|
||||
|
||||
*channel->dev_ctrl = val;
|
||||
*channel->dev_ctrl; // read altstatus to flush
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -635,12 +636,12 @@ pio_write(void *channelCookie, uint16 *data, int count, bool force_16bit)
|
||||
} else {
|
||||
volatile uint32 * base = (volatile uint32 *)channel->command_block;
|
||||
uint32 *cur_data = (uint32 *)data;
|
||||
|
||||
|
||||
for ( ; count > 0; count -= 2 )
|
||||
*base = *(cur_data++);
|
||||
}
|
||||
*channel->dev_ctrl; // read altstatus to flush
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -653,7 +654,7 @@ pio_read(void *channelCookie, uint16 *data, int count, bool force_16bit)
|
||||
return B_ERROR;
|
||||
|
||||
FLOW("pio_read force_16bit = %d, (count & 1) = %d\n", force_16bit, (count & 1));
|
||||
|
||||
|
||||
// The data port is only 8 bit wide in the command register block.
|
||||
// We are memory mapped and read using 16 or 32 bit access from this 8 bit location.
|
||||
|
||||
@ -664,11 +665,11 @@ pio_read(void *channelCookie, uint16 *data, int count, bool force_16bit)
|
||||
} else {
|
||||
volatile uint32 * base = (volatile uint32 *)channel->command_block;
|
||||
uint32 *cur_data = (uint32 *)data;
|
||||
|
||||
|
||||
for ( ; count > 0; count -= 2 )
|
||||
*(cur_data++) = *base;
|
||||
}
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -745,9 +746,9 @@ dma_start(void *channelCookie)
|
||||
*channel->bm_command_reg = command;
|
||||
|
||||
*channel->dev_ctrl; // read altstatus to flush
|
||||
|
||||
|
||||
FLOW("dma_start leave\n");
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -772,7 +773,7 @@ dma_finish(void *channelCookie)
|
||||
| IDE_BM_STATUS_ERROR;
|
||||
|
||||
*channel->dev_ctrl; // read altstatus to flush
|
||||
|
||||
|
||||
if ((status & IDE_BM_STATUS_ERROR) != 0) {
|
||||
FLOW("dma_finish: failed\n");
|
||||
return B_ERROR;
|
||||
@ -851,7 +852,7 @@ static ide_controller_interface sChannelInterface = {
|
||||
0,
|
||||
NULL
|
||||
},
|
||||
|
||||
|
||||
.supports_device = NULL,
|
||||
.register_device = NULL,
|
||||
.init_driver = &channel_init,
|
||||
|
@ -13,24 +13,28 @@
|
||||
#define TRACE(a...) dprintf("\33[34mahci:\33[0m " a)
|
||||
#define ERROR(a...) dprintf("\33[34mahci:\33[0m " a)
|
||||
|
||||
|
||||
static inline uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name)
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * virtadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
TRACE("allocating %ld bytes for %s\n", size, name);
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, protection);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, protection);
|
||||
if (areaid < B_OK) {
|
||||
ERROR("couldn't allocate area %s\n", name);
|
||||
return B_ERROR;
|
||||
@ -49,8 +53,10 @@ alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *n
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name)
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -62,34 +68,37 @@ map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name
|
||||
offset = (uint32)phy & (B_PAGE_SIZE - 1);
|
||||
phyadr = (char *)phy - offset;
|
||||
size = round_to_pagesize(size + offset);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS, protection, &mapadr);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
|
||||
protection, &mapadr);
|
||||
if (area < B_OK) {
|
||||
ERROR("mapping '%s' failed, error 0x%lx (%s)\n", name, area, strerror(area));
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
*virt = (char *)mapadr + offset;
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = %p, size = %ld, area = 0x%08lx\n",
|
||||
phy, *virt, offset, phyadr, mapadr, size, area);
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
sg_memcpy(const physical_entry *sgTable, int sgCount, const void *data, size_t dataSize)
|
||||
sg_memcpy(const physical_entry *sgTable, int sgCount, const void *data,
|
||||
size_t dataSize)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < sgCount && dataSize > 0; i++) {
|
||||
size_t size = min_c(dataSize, sgTable[i].size);
|
||||
addr_t address;
|
||||
|
||||
if (vm_get_physical_page((addr_t)sgTable[i].address, &address, PHYSICAL_PAGE_CAN_WAIT) < B_OK)
|
||||
if (vm_get_physical_page((addr_t)sgTable[i].address, &address,
|
||||
PHYSICAL_PAGE_CAN_WAIT) < B_OK)
|
||||
return B_ERROR;
|
||||
|
||||
TRACE("sg_memcpy phyAddr %p, addr %p, size %lu\n", sgTable[i].address, (void *)address, size);
|
||||
|
||||
|
||||
memcpy((void *)address, data, size);
|
||||
vm_put_physical_page(address);
|
||||
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,60 +38,72 @@ spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
area_id area;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
area = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
|
||||
B_READ_AREA | B_WRITE_AREA);
|
||||
if (area < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n", name));
|
||||
return B_ERROR;
|
||||
}
|
||||
rv = get_memory_map(logadr,size,&pe,1);
|
||||
rv = get_memory_map(logadr, size, &pe, 1);
|
||||
if (rv < B_OK) {
|
||||
delete_area(areaid);
|
||||
delete_area(area);
|
||||
PRINT(("couldn't map %s\n",name));
|
||||
return B_ERROR;
|
||||
}
|
||||
memset(logadr,0,size);
|
||||
memset(logadr, 0, size);
|
||||
if (log)
|
||||
*log = logadr;
|
||||
if (phy)
|
||||
*phy = pe.address;
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n",areaid,size,logadr,pe.address));
|
||||
return areaid;
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n", area, size, logadr,
|
||||
pe.address));
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
/* This is not the most advanced method to map physical memory for io access.
|
||||
* Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
* below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
|
||||
*/
|
||||
area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
area_id
|
||||
map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -108,6 +120,6 @@ area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
|
||||
LOG(("physical = %p, logical = %p, offset = %#x, phyadr = %p, mapadr = %p, size = %#x, area = %#x\n",
|
||||
phy, *log, offset, phyadr, mapadr, size, area));
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,35 +38,44 @@ spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,60 +38,72 @@ spinlock slock = 0;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
}
|
||||
rv = get_memory_map(logadr,size,&pe,1);
|
||||
rv = get_memory_map(logadr, size, &pe, 1);
|
||||
if (rv < B_OK) {
|
||||
delete_area(areaid);
|
||||
PRINT(("couldn't map %s\n",name));
|
||||
PRINT(("couldn't map %s\n", name));
|
||||
return B_ERROR;
|
||||
}
|
||||
memset(logadr,0,size);
|
||||
memset(logadr, 0, size);
|
||||
if (log)
|
||||
*log = logadr;
|
||||
if (phy)
|
||||
*phy = pe.address;
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n",areaid,size,logadr,pe.address));
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n", areaid, size,
|
||||
logadr, pe.address));
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
/* This is not the most advanced method to map physical memory for io access.
|
||||
* Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
* below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
|
||||
*/
|
||||
area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
area_id
|
||||
map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -103,11 +115,12 @@ area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
offset = (uint32)phy & (B_PAGE_SIZE - 1);
|
||||
phyadr = phy - offset;
|
||||
size = round_to_pagesize(size + offset);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS, B_READ_AREA | B_WRITE_AREA, &mapadr);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
|
||||
B_READ_AREA | B_WRITE_AREA, &mapadr);
|
||||
*log = mapadr + offset;
|
||||
|
||||
LOG(("physical = %p, logical = %p, offset = %#x, phyadr = %p, mapadr = %p, size = %#x, area = %#x\n",
|
||||
phy, *log, offset, phyadr, mapadr, size, area));
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,35 +38,44 @@ spinlock slock = 0;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
@ -86,12 +95,14 @@ area_id alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
/* This is not the most advanced method to map physical memory for io access.
|
||||
* Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
* below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
|
||||
*/
|
||||
area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
area_id
|
||||
map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -108,6 +119,6 @@ area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
|
||||
LOG(("physical = %p, logical = %p, offset = %#x, phyadr = %p, mapadr = %p, size = %#x, area = %#x\n",
|
||||
phy, *log, offset, phyadr, mapadr, size, area));
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -35,23 +35,28 @@
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name)
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * virtadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %ld bytes for %s\n", size, name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, protection);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, protection);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
@ -71,6 +76,7 @@ alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *n
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
/* This is not the most advanced method to map physical memory for io access.
|
||||
* Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
@ -94,6 +100,6 @@ map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name
|
||||
|
||||
LOG(("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = %p, size = %ld, area = 0x%08lx\n",
|
||||
phy, *virt, offset, phyadr, mapadr, size, area));
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,51 +38,61 @@ spinlock slock = 0;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
area_id area;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
area = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
|
||||
B_READ_AREA | B_WRITE_AREA);
|
||||
if (area < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
}
|
||||
rv = get_memory_map(logadr,size,&pe,1);
|
||||
rv = get_memory_map(logadr, size, &pe, 1);
|
||||
if (rv < B_OK) {
|
||||
delete_area(areaid);
|
||||
PRINT(("couldn't map %s\n",name));
|
||||
delete_area(area);
|
||||
PRINT(("couldn't map %s\n", name));
|
||||
return B_ERROR;
|
||||
}
|
||||
memset(logadr,0,size);
|
||||
memset(logadr, 0, size);
|
||||
if (log)
|
||||
*log = logadr;
|
||||
if (phy)
|
||||
*phy = pe.address;
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n",areaid,size,logadr,pe.address));
|
||||
LOG(("area = %d, size = %d, log = %#08X, phy = %#08X\n", area, size, logadr,
|
||||
pe.address));
|
||||
return areaid;
|
||||
}
|
||||
|
||||
@ -92,7 +102,7 @@ area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
* below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
|
||||
*/
|
||||
area_id
|
||||
area_id
|
||||
map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
@ -110,7 +120,7 @@ map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
|
||||
LOG(("physical = %p, logical = %p, offset = %#x, phyadr = %p, mapadr = %p, size = %#x, area = %#x\n",
|
||||
phy, *log, offset, phyadr, mapadr, size, area));
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -38,35 +38,44 @@ spinlock slock = B_SPINLOCK_INITIALIZER;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
LOG(("allocating %d bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
PRINT(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
|
@ -89,10 +89,10 @@ stream_handle_interrupt(hda_controller* controller, hda_stream* stream)
|
||||
dprintf("hda: stream status %x\n", status);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
position = stream->Read32(HDAC_STREAM_POSITION);
|
||||
bufferSize = ALIGN(stream->sample_size * stream->num_channels * stream->buffer_length, 128);
|
||||
|
||||
|
||||
// Buffer Completed Interrupt
|
||||
acquire_spinlock(&stream->lock);
|
||||
|
||||
@ -101,7 +101,7 @@ stream_handle_interrupt(hda_controller* controller, hda_stream* stream)
|
||||
stream->buffer_cycle = 1 - (position / bufferSize);
|
||||
|
||||
release_spinlock(&stream->lock);
|
||||
|
||||
|
||||
release_sem_etc(stream->buffer_ready_sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
|
||||
@ -121,7 +121,7 @@ hda_interrupt_handler(hda_controller* controller)
|
||||
uint8 rirbStatus = controller->Read8(HDAC_RIRB_STATUS);
|
||||
uint8 corbStatus = controller->Read8(HDAC_CORB_STATUS);
|
||||
|
||||
/* Check for incoming responses */
|
||||
/* Check for incoming responses */
|
||||
if (rirbStatus) {
|
||||
controller->Write8(HDAC_RIRB_STATUS, rirbStatus);
|
||||
|
||||
@ -156,7 +156,7 @@ hda_interrupt_handler(hda_controller* controller)
|
||||
codec->responses[codec->response_count++] = response;
|
||||
release_sem_etc(codec->response_sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
handled = B_INVOKE_SCHEDULER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((rirbStatus & RIRB_STATUS_OVERRUN) != 0)
|
||||
@ -317,7 +317,7 @@ init_corb_rirb_pos(hda_controller* controller)
|
||||
/* Allocate memory area */
|
||||
controller->corb_rirb_pos_area = create_area("hda corb/rirb/pos",
|
||||
(void**)&controller->corb, B_ANY_KERNEL_ADDRESS, memSize,
|
||||
B_FULL_LOCK | B_CONTIGUOUS, 0);
|
||||
B_CONTIGUOUS, 0);
|
||||
if (controller->corb_rirb_pos_area < 0)
|
||||
return controller->corb_rirb_pos_area;
|
||||
|
||||
@ -501,7 +501,7 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
stream->buffer_descriptors_area = B_ERROR;
|
||||
}
|
||||
|
||||
/* Calculate size of buffer (aligned to 128 bytes) */
|
||||
/* Calculate size of buffer (aligned to 128 bytes) */
|
||||
bufferSize = stream->sample_size * stream->num_channels
|
||||
* stream->buffer_length;
|
||||
bufferSize = ALIGN(bufferSize, 128);
|
||||
@ -515,7 +515,7 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
|
||||
/* Allocate memory for buffers */
|
||||
stream->buffer_area = create_area("hda buffers", (void**)&buffer,
|
||||
B_ANY_KERNEL_ADDRESS, alloc, B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
B_ANY_KERNEL_ADDRESS, alloc, B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (stream->buffer_area < B_OK)
|
||||
return stream->buffer_area;
|
||||
|
||||
@ -525,13 +525,13 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
delete_area(stream->buffer_area);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
bufferPhysicalAddress = (uint32)pe.address;
|
||||
|
||||
dprintf("%s(%s): Allocated %lu bytes for %ld buffers\n", __func__, desc,
|
||||
alloc, stream->num_buffers);
|
||||
|
||||
/* Store pointers (both virtual/physical) */
|
||||
/* Store pointers (both virtual/physical) */
|
||||
for (index = 0; index < stream->num_buffers; index++) {
|
||||
stream->buffers[index] = buffer + (index * bufferSize);
|
||||
stream->physical_buffers[index] = bufferPhysicalAddress
|
||||
@ -544,7 +544,7 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
|
||||
stream->buffer_descriptors_area = create_area("hda buffer descriptors",
|
||||
(void**)&bufferDescriptors, B_ANY_KERNEL_ADDRESS, alloc,
|
||||
B_FULL_LOCK | B_CONTIGUOUS, 0);
|
||||
B_CONTIGUOUS, 0);
|
||||
if (stream->buffer_descriptors_area < B_OK) {
|
||||
delete_area(stream->buffer_area);
|
||||
return stream->buffer_descriptors_area;
|
||||
@ -563,7 +563,7 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
dprintf("%s(%s): Allocated %ld bytes for %ld BDLEs\n", __func__, desc,
|
||||
alloc, stream->num_buffers);
|
||||
|
||||
/* Setup buffer descriptor list (BDL) entries */
|
||||
/* Setup buffer descriptor list (BDL) entries */
|
||||
for (index = 0; index < stream->num_buffers; index++, bufferDescriptors++) {
|
||||
bufferDescriptors->lower = stream->physical_buffers[index];
|
||||
bufferDescriptors->upper = 0;
|
||||
@ -597,7 +597,7 @@ hda_stream_setup_buffers(hda_audio_group* audioGroup, hda_stream* stream,
|
||||
|
||||
dprintf("IRA: %s: setup stream %ld: SR=%ld, SF=%ld\n", __func__, stream->id,
|
||||
stream->rate, stream->bps);
|
||||
|
||||
|
||||
stream->Write16(HDAC_STREAM_FORMAT, format);
|
||||
stream->Write32(HDAC_STREAM_BUFFERS_BASE_LOWER,
|
||||
stream->physical_buffer_descriptors);
|
||||
@ -676,7 +676,7 @@ hda_hw_init(hda_controller* controller)
|
||||
uint16 capabilities, stateStatus, cmd;
|
||||
status_t status;
|
||||
uint8 tcsel;
|
||||
|
||||
|
||||
/* Map MMIO registers */
|
||||
controller->regs_area = map_physical_memory("hda_hw_regs",
|
||||
(void*)controller->pci_info.u.h0.base_registers[0],
|
||||
@ -687,15 +687,15 @@ hda_hw_init(hda_controller* controller)
|
||||
goto error;
|
||||
}
|
||||
|
||||
cmd = (gPci->read_pci_config)(controller->pci_info.bus,
|
||||
cmd = (gPci->read_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function, PCI_command, 2);
|
||||
if (!(cmd & PCI_command_master)) {
|
||||
(gPci->write_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function,
|
||||
(gPci->write_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function,
|
||||
PCI_command, 2, cmd | PCI_command_master);
|
||||
dprintf("hda: enabling PCI bus mastering\n");
|
||||
}
|
||||
|
||||
|
||||
/* Absolute minimum hw is online; we can now install interrupt handler */
|
||||
controller->irq = controller->pci_info.u.h0.interrupt_line;
|
||||
status = install_io_interrupt_handler(controller->irq,
|
||||
@ -704,19 +704,19 @@ hda_hw_init(hda_controller* controller)
|
||||
goto no_irq;
|
||||
|
||||
/* TCSEL is reset to TC0 (clear 0-2 bits) */
|
||||
tcsel = (gPci->read_pci_config)(controller->pci_info.bus,
|
||||
tcsel = (gPci->read_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function, PCI_HDA_TCSEL, 1);
|
||||
(gPci->write_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function,
|
||||
(gPci->write_pci_config)(controller->pci_info.bus,
|
||||
controller->pci_info.device, controller->pci_info.function,
|
||||
PCI_HDA_TCSEL, 1, tcsel & 0xf8);
|
||||
|
||||
|
||||
capabilities = controller->Read16(HDAC_GLOBAL_CAP);
|
||||
controller->num_input_streams = GLOBAL_CAP_INPUT_STREAMS(capabilities);
|
||||
controller->num_output_streams = GLOBAL_CAP_OUTPUT_STREAMS(capabilities);
|
||||
controller->num_bidir_streams = GLOBAL_CAP_BIDIR_STREAMS(capabilities);
|
||||
|
||||
/* show some hw features */
|
||||
dprintf("hda: HDA v%d.%d, O:%ld/I:%ld/B:%ld, #SDO:%d, 64bit:%s\n",
|
||||
dprintf("hda: HDA v%d.%d, O:%ld/I:%ld/B:%ld, #SDO:%d, 64bit:%s\n",
|
||||
controller->Read8(HDAC_VERSION_MAJOR),
|
||||
controller->Read8(HDAC_VERSION_MINOR),
|
||||
controller->num_output_streams, controller->num_input_streams,
|
||||
|
@ -22,35 +22,44 @@ spinlock slock = 0;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **phy, void **log, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
TRACE_ICE(("allocating %#08 bytes for %s\n",size,name));
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
TRACE_ICE(("couldn't allocate area %s\n",name));
|
||||
return B_ERROR;
|
||||
|
@ -3,22 +3,22 @@
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is
|
||||
* files (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of
|
||||
* the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
@ -37,45 +37,50 @@
|
||||
|
||||
|
||||
area_id
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name)
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
void *mapadr;
|
||||
area_id area;
|
||||
|
||||
TRACE("mapping physical address %p with %ld bytes for %s\n", phy, size, name);
|
||||
TRACE("mapping physical address %p with %ld bytes for %s\n", phy, size,
|
||||
name);
|
||||
|
||||
offset = (uint32)phy & (B_PAGE_SIZE - 1);
|
||||
phyadr = (char *)phy - offset;
|
||||
size = ROUNDUP(size + offset, B_PAGE_SIZE);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS, protection, &mapadr);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
|
||||
protection, &mapadr);
|
||||
if (area < B_OK) {
|
||||
TRACE("mapping '%s' failed, error 0x%lx (%s)\n", name, area, strerror(area));
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
*virt = (char *)mapadr + offset;
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = %p, size = %ld, area = 0x%08lx\n",
|
||||
phy, *virt, offset, phyadr, mapadr, size, area);
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name)
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * virtadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
TRACE("allocating %ld bytes for %s\n", size, name);
|
||||
|
||||
size = ROUNDUP(size, B_PAGE_SIZE);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, protection);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, protection);
|
||||
if (areaid < B_OK) {
|
||||
TRACE("couldn't allocate area %s\n", name);
|
||||
return B_ERROR;
|
||||
|
@ -184,7 +184,7 @@ init_hardware(void) {
|
||||
long pci_index = 0;
|
||||
pci_info pcii;
|
||||
bool found_one = FALSE;
|
||||
|
||||
|
||||
/* choke if we can't find the PCI bus */
|
||||
if (get_module(B_PCI_MODULE_NAME, (module_info **)&pci_bus) != B_OK)
|
||||
return B_ERROR;
|
||||
@ -192,7 +192,7 @@ init_hardware(void) {
|
||||
/* while there are more pci devices */
|
||||
while ((*pci_bus->get_nth_pci_info)(pci_index, &pcii) == B_NO_ERROR) {
|
||||
int vendor = 0;
|
||||
|
||||
|
||||
/* if we match a supported vendor */
|
||||
while (SupportedDevices[vendor].vendor) {
|
||||
if (SupportedDevices[vendor].vendor == pcii.vendor_id) {
|
||||
@ -201,7 +201,7 @@ init_hardware(void) {
|
||||
while (*devices) {
|
||||
/* if we match a supported device */
|
||||
if (*devices == pcii.device_id ) {
|
||||
|
||||
|
||||
found_one = TRUE;
|
||||
goto done;
|
||||
}
|
||||
@ -351,7 +351,7 @@ static status_t map_device(device_info *di)
|
||||
{
|
||||
si->use_clone_bugfix = 0;
|
||||
}
|
||||
|
||||
|
||||
/* work out a name for the register mapping */
|
||||
sprintf(buffer, DEVICE_FORMAT " regs",
|
||||
di->pcii.vendor_id, di->pcii.device_id,
|
||||
@ -475,7 +475,7 @@ static status_t map_device(device_info *di)
|
||||
// &si->dma_buffer,
|
||||
// B_ANY_ADDRESS,
|
||||
// G400_DMA_BUFFER_SIZE,
|
||||
// B_FULL_LOCK|B_CONTIGUOUS,
|
||||
// B_CONTIGUOUS,
|
||||
// B_READ_AREA|B_WRITE_AREA);
|
||||
|
||||
/* if there was an error, delete our other areas and pass on error*/
|
||||
@ -489,7 +489,7 @@ static status_t map_device(device_info *di)
|
||||
|
||||
/*find where it is in real memory*/
|
||||
// get_memory_map(si->dma_buffer,4,physical_memory,1);
|
||||
// si->dma_buffer_pci = physical_memory[0].address; /*addr from PCI space*/
|
||||
// si->dma_buffer_pci = physical_memory[0].address; /*addr from PCI space*/
|
||||
}
|
||||
|
||||
/* work out a name for the framebuffer mapping*/
|
||||
@ -516,7 +516,7 @@ static status_t map_device(device_info *di)
|
||||
B_READ_AREA | B_WRITE_AREA,
|
||||
&(si->framebuffer));
|
||||
}
|
||||
|
||||
|
||||
/* if there was an error, delete our other areas and pass on error*/
|
||||
if (si->fb_area < 0)
|
||||
{
|
||||
@ -537,7 +537,7 @@ static status_t map_device(device_info *di)
|
||||
si->framebuffer_pci = (void *) di->pcii.u.h0.base_registers_pci[frame_buffer];
|
||||
|
||||
// remember settings for use here and in accelerant
|
||||
si->settings = current_settings;
|
||||
si->settings = current_settings;
|
||||
|
||||
/* in any case, return the result */
|
||||
return si->fb_area;
|
||||
@ -739,7 +739,7 @@ gx00_interrupt(void *data)
|
||||
atomic_and(flags, ~SKD_HANDLER_INSTALLED);
|
||||
|
||||
exit0:
|
||||
return handled;
|
||||
return handled;
|
||||
}
|
||||
|
||||
static status_t open_hook (const char* name, uint32 flags, void** cookie) {
|
||||
@ -771,8 +771,8 @@ static status_t open_hook (const char* name, uint32 flags, void** cookie) {
|
||||
di->pcii.vendor_id, di->pcii.device_id,
|
||||
di->pcii.bus, di->pcii.device, di->pcii.function);
|
||||
/* create this area with NO user-space read or write permissions, to prevent accidental dammage */
|
||||
di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS,
|
||||
((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK,
|
||||
di->shared_area = create_area(shared_name, (void **)&(di->si), B_ANY_KERNEL_ADDRESS,
|
||||
((sizeof(shared_info) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1)), B_FULL_LOCK,
|
||||
B_USER_CLONEABLE_AREA);
|
||||
if (di->shared_area < 0) {
|
||||
/* return the error */
|
||||
@ -850,7 +850,7 @@ mark_as_open:
|
||||
|
||||
/* send the cookie to the opener */
|
||||
*cookie = di;
|
||||
|
||||
|
||||
goto done;
|
||||
|
||||
|
||||
@ -917,7 +917,7 @@ free_hook (void* dev) {
|
||||
|
||||
/* disable and clear any pending interrupts */
|
||||
disable_vbi(regs);
|
||||
|
||||
|
||||
if (si->ps.int_assigned)
|
||||
{
|
||||
/* remove interrupt handler */
|
||||
@ -960,7 +960,7 @@ control_hook (void* dev, uint32 msg, void *buf, size_t len) {
|
||||
strcpy(sig, current_settings.accelerant);
|
||||
result = B_OK;
|
||||
} break;
|
||||
|
||||
|
||||
/* PRIVATE ioctl from here on */
|
||||
case GX00_GET_PRIVATE_DATA: {
|
||||
gx00_get_private_data *gpd = (gx00_get_private_data *)buf;
|
||||
|
@ -194,9 +194,9 @@ static uint16 nvidia_device_list[] = {
|
||||
0x016e, /* Nvidia unknown FX */
|
||||
0x0170, /* Nvidia GeForce4 MX 460 */
|
||||
0x0171, /* Nvidia GeForce4 MX 440 */
|
||||
0x0172, /* Nvidia GeForce4 MX 420 */
|
||||
0x0173, /* Nvidia GeForce4 MX 440SE */
|
||||
0x0174, /* Nvidia GeForce4 440 Go */
|
||||
0x0172, /* Nvidia GeForce4 MX 420 */
|
||||
0x0173, /* Nvidia GeForce4 MX 440SE */
|
||||
0x0174, /* Nvidia GeForce4 440 Go */
|
||||
0x0175, /* Nvidia GeForce4 420 Go */
|
||||
0x0176, /* Nvidia GeForce4 420 Go 32M */
|
||||
0x0177, /* Nvidia GeForce4 460 Go */
|
||||
@ -364,7 +364,7 @@ static struct {
|
||||
{0x0000, NULL}
|
||||
};
|
||||
|
||||
static nv_settings sSettings = { // see comments in nvidia.settings
|
||||
static nv_settings sSettings = { // see comments in nvidia.settings
|
||||
/* for driver */
|
||||
DRIVER_PREFIX ".accelerant",
|
||||
"none", // primary
|
||||
@ -547,7 +547,7 @@ map_device(device_info *di)
|
||||
{
|
||||
si->use_clone_bugfix = 0;
|
||||
}
|
||||
|
||||
|
||||
/* work out a name for the register mapping */
|
||||
sprintf(buffer, DEVICE_FORMAT " regs",
|
||||
di->pcii.vendor_id, di->pcii.device_id,
|
||||
@ -563,7 +563,7 @@ map_device(device_info *di)
|
||||
B_USER_CLONEABLE_AREA | (si->use_clone_bugfix ? B_READ_AREA|B_WRITE_AREA : 0),
|
||||
(void **)&(di->regs));
|
||||
si->clone_bugfix_regs = (uint32 *) di->regs;
|
||||
|
||||
|
||||
/* if mapping registers to vmem failed then pass on error */
|
||||
if (si->regs_area < 0) return si->regs_area;
|
||||
|
||||
@ -684,7 +684,7 @@ map_device(device_info *di)
|
||||
si->framebuffer_pci = (void *) di->pcii.u.h0.base_registers_pci[frame_buffer];
|
||||
|
||||
// remember settings for use here and in accelerant
|
||||
si->settings = sSettings;
|
||||
si->settings = sSettings;
|
||||
|
||||
/* in any case, return the result */
|
||||
return si->fb_area;
|
||||
@ -833,7 +833,7 @@ nv_interrupt(void *data)
|
||||
atomic_and(flags, ~SKD_HANDLER_INSTALLED);
|
||||
|
||||
exit0:
|
||||
return handled;
|
||||
return handled;
|
||||
}
|
||||
|
||||
|
||||
@ -898,7 +898,7 @@ open_hook(const char* name, uint32 flags, void** cookie)
|
||||
(void **)&unaligned_dma_buffer,
|
||||
B_ANY_KERNEL_ADDRESS,
|
||||
2 * net_buf_size, /* take twice the net size so we can have MTRR-WC even on old systems */
|
||||
B_FULL_LOCK | B_CONTIGUOUS, /* both properties needed: GPU always needs access */
|
||||
B_CONTIGUOUS, /* GPU always needs access */
|
||||
B_USER_CLONEABLE_AREA | B_READ_AREA | B_WRITE_AREA);
|
||||
/* on error, abort */
|
||||
if (si->unaligned_dma_area < 0)
|
||||
@ -1030,7 +1030,7 @@ mark_as_open:
|
||||
|
||||
/* send the cookie to the opener */
|
||||
*cookie = di;
|
||||
|
||||
|
||||
goto done;
|
||||
|
||||
|
||||
@ -1350,7 +1350,7 @@ init_hardware(void)
|
||||
/* while there are more pci devices */
|
||||
while ((*pci_bus->get_nth_pci_info)(index, &pcii) == B_NO_ERROR) {
|
||||
int vendor = 0;
|
||||
|
||||
|
||||
/* if we match a supported vendor */
|
||||
while (SupportedDevices[vendor].vendor) {
|
||||
if (SupportedDevices[vendor].vendor == pcii.vendor_id) {
|
||||
@ -1359,7 +1359,7 @@ init_hardware(void)
|
||||
while (*devices) {
|
||||
/* if we match a supported device */
|
||||
if (*devices == pcii.device_id ) {
|
||||
|
||||
|
||||
found = true;
|
||||
goto done;
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ static struct {
|
||||
{0x0000, NULL}
|
||||
};
|
||||
|
||||
static nv_settings sSettings = { // see comments in nvidia_gpgpu.settings
|
||||
static nv_settings sSettings = { // see comments in nvidia_gpgpu.settings
|
||||
/* for driver */
|
||||
DRIVER_PREFIX ".accelerant",
|
||||
"none", // primary
|
||||
@ -294,7 +294,7 @@ map_device(device_info *di)
|
||||
{
|
||||
si->use_clone_bugfix = 0;
|
||||
}
|
||||
|
||||
|
||||
/* work out a name for the register mapping */
|
||||
sprintf(buffer, DEVICE_FORMAT " regs",
|
||||
di->pcii.vendor_id, di->pcii.device_id,
|
||||
@ -310,7 +310,7 @@ map_device(device_info *di)
|
||||
B_USER_CLONEABLE_AREA | (si->use_clone_bugfix ? B_READ_AREA|B_WRITE_AREA : 0),
|
||||
(void **)&(di->regs));
|
||||
si->clone_bugfix_regs = (uint32 *) di->regs;
|
||||
|
||||
|
||||
/* if mapping registers to vmem failed then pass on error */
|
||||
if (si->regs_area < 0) return si->regs_area;
|
||||
|
||||
@ -431,7 +431,7 @@ map_device(device_info *di)
|
||||
si->framebuffer_pci = (void *) di->pcii.u.h0.base_registers_pci[frame_buffer];
|
||||
|
||||
// remember settings for use here and in accelerant
|
||||
si->settings = sSettings;
|
||||
si->settings = sSettings;
|
||||
|
||||
/* in any case, return the result */
|
||||
return si->fb_area;
|
||||
@ -580,7 +580,7 @@ nv_interrupt(void *data)
|
||||
atomic_and(flags, ~SKD_HANDLER_INSTALLED);
|
||||
|
||||
exit0:
|
||||
return handled;
|
||||
return handled;
|
||||
}
|
||||
|
||||
|
||||
@ -645,7 +645,7 @@ open_hook(const char* name, uint32 flags, void** cookie)
|
||||
(void **)&unaligned_dma_buffer,
|
||||
B_ANY_KERNEL_ADDRESS,
|
||||
2 * net_buf_size, /* take twice the net size so we can have MTRR-WC even on old systems */
|
||||
B_FULL_LOCK | B_CONTIGUOUS, /* both properties needed: GPU always needs access */
|
||||
B_CONTIGUOUS, /* GPU always needs access */
|
||||
B_USER_CLONEABLE_AREA | B_READ_AREA | B_WRITE_AREA);
|
||||
/* on error, abort */
|
||||
if (si->unaligned_dma_area < 0)
|
||||
@ -777,7 +777,7 @@ mark_as_open:
|
||||
|
||||
/* send the cookie to the opener */
|
||||
*cookie = di;
|
||||
|
||||
|
||||
goto done;
|
||||
|
||||
|
||||
@ -1064,7 +1064,7 @@ init_hardware(void)
|
||||
/* while there are more pci devices */
|
||||
while ((*pci_bus->get_nth_pci_info)(index, &pcii) == B_NO_ERROR) {
|
||||
int vendor = 0;
|
||||
|
||||
|
||||
/* if we match a supported vendor */
|
||||
while (SupportedDevices[vendor].vendor) {
|
||||
if (SupportedDevices[vendor].vendor == pcii.vendor_id) {
|
||||
@ -1073,7 +1073,7 @@ init_hardware(void)
|
||||
while (*devices) {
|
||||
/* if we match a supported device */
|
||||
if (*devices == pcii.device_id ) {
|
||||
|
||||
|
||||
found = true;
|
||||
goto done;
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ int32 api_version = B_CUR_DRIVER_API_VERSION;
|
||||
|
||||
status_t
|
||||
init_hardware(void)
|
||||
{
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -289,7 +289,7 @@ find_device(const char *name)
|
||||
{
|
||||
return &b57_hooks;
|
||||
}
|
||||
|
||||
|
||||
|
||||
status_t
|
||||
init_driver(void)
|
||||
@ -356,7 +356,7 @@ init_driver(void)
|
||||
void
|
||||
uninit_driver(void)
|
||||
{
|
||||
struct be_b57_dev *pUmDevice;
|
||||
struct be_b57_dev *pUmDevice;
|
||||
int i, j;
|
||||
|
||||
for (j = 0; j < cards_found; j++) {
|
||||
@ -525,7 +525,7 @@ b57_ioctl(void *cookie,uint32 op,void *data,size_t len)
|
||||
state.media |= (pUmDevice->lm_dev.DuplexMode
|
||||
== LM_DUPLEX_MODE_FULL ? IFM_FULL_DUPLEX : IFM_HALF_DUPLEX);
|
||||
state.quality = 1000;
|
||||
|
||||
|
||||
return user_memcpy(data, &state, sizeof(ether_link_state_t));
|
||||
}
|
||||
case ETHER_SET_LINK_STATE_SEM:
|
||||
@ -734,7 +734,7 @@ MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
|
||||
(uchar)Offset, sizeof(LM_UINT16));
|
||||
return LM_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
LM_STATUS
|
||||
MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
|
||||
@ -834,7 +834,7 @@ tx_cleanup_thread(void *us)
|
||||
struct B_UM_PACKET *pUmPacket;
|
||||
cpu_status cpu;
|
||||
|
||||
while (1) {
|
||||
while (1) {
|
||||
cpu = disable_interrupts();
|
||||
acquire_spinlock(&pUmDevice->lock);
|
||||
|
||||
@ -857,12 +857,12 @@ tx_cleanup_thread(void *us)
|
||||
}
|
||||
return LM_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
/*LM_STATUS MM_StartTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);
|
||||
LM_STATUS MM_CompleteTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);*/
|
||||
|
||||
LM_STATUS
|
||||
MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
|
||||
MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
|
||||
PLM_VOID *pMemoryBlockVirt)
|
||||
{
|
||||
struct be_b57_dev *dev = (struct be_b57_dev *)(pDevice);
|
||||
@ -873,7 +873,7 @@ MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
|
||||
*pMemoryBlockVirt = dev->mem_list[(dev->mem_list_num)++] = (void *)malloc(BlockSize);
|
||||
return LM_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
LM_STATUS
|
||||
MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
|
||||
@ -888,7 +888,7 @@ MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
|
||||
dev = (struct be_b57_dev *)(pDevice);
|
||||
area_desc = dev->lockmem_list[dev->lockmem_list_num++] = create_area("broadcom_shared_mem",
|
||||
&pvirt, B_ANY_KERNEL_ADDRESS, ROUND_UP_TO_PAGE(BlockSize),
|
||||
B_CONTIGUOUS | B_FULL_LOCK, 0);
|
||||
B_CONTIGUOUS, 0);
|
||||
|
||||
if (area_desc < B_OK)
|
||||
return LM_STATUS_FAILURE;
|
||||
@ -920,7 +920,7 @@ MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
|
||||
pDevice->RxCoalescingTicks = DEFAULT_RX_COALESCING_TICKS;
|
||||
pDevice->TxCoalescingTicks = DEFAULT_TX_COALESCING_TICKS;
|
||||
pDevice->StatsCoalescingTicks = DEFAULT_STATS_COALESCING_TICKS;
|
||||
pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
|
||||
pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
|
||||
|
||||
return LM_STATUS_SUCCESS;
|
||||
}
|
||||
@ -931,7 +931,7 @@ MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
|
||||
{
|
||||
#ifdef HAIKU_TARGET_PLATFORM_HAIKU
|
||||
struct be_b57_dev *pUmDevice = (struct be_b57_dev *)pDevice;
|
||||
|
||||
|
||||
if (pUmDevice->linkChangeSem != -1)
|
||||
release_sem_etc(pUmDevice->linkChangeSem, 1,
|
||||
B_DO_NOT_RESCHEDULE);
|
||||
|
@ -24,7 +24,7 @@
|
||||
* Portions of code based on dp83815 driver by: Antonio Carpio (BolivianTONE@nc.rr.com)
|
||||
* Portions of code may be: Copyright (c) 1998, 1999 Be, Inc. All Rights Reserved under terms of Be Sample Code License.
|
||||
*/
|
||||
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <Drivers.h>
|
||||
#include <Errors.h>
|
||||
@ -98,9 +98,9 @@ typedef struct dp83815_properties
|
||||
uint32 reg_base; /* Base address for registers */
|
||||
area_id ioarea; /* PPC: Area where the mmaped registers are */
|
||||
area_id reg_area,
|
||||
mem_area;
|
||||
mem_area;
|
||||
uint8 device_id; /* Which device id this is... */
|
||||
|
||||
|
||||
ether_address_t address; /* holds the MAC address */
|
||||
sem_id lock; /* lock this structure: still interrupt */
|
||||
int32 blockFlag; /* for blocking or nonblocking reads */
|
||||
@ -127,7 +127,7 @@ static status_t close_hook( void * );
|
||||
#define write8( offset , value) (m_pcimodule->write_io_8 ((data->reg_base + (offset)), (value) ) )
|
||||
#define write16( offset , value) (m_pcimodule->write_io_16((data->reg_base + (offset)), (value) ) )
|
||||
#define write32( offset , value) (m_pcimodule->write_io_32((data->reg_base + (offset)), (value) ) )
|
||||
|
||||
|
||||
#define read8( offset ) (m_pcimodule->read_io_8 ((data->reg_base + offset)))
|
||||
#define read16( offset ) (m_pcimodule->read_io_16((data->reg_base + offset)))
|
||||
#define read32( offset ) (m_pcimodule->read_io_32((data->reg_base + offset)))
|
||||
@ -141,27 +141,27 @@ static status_t close_hook( void * );
|
||||
#define write8( offset , value) (*((volatile uint8 *)(data->reg_base + (offset))) = (value))
|
||||
#define write16( offset , value) (*((volatile uint8 *)(data->reg_base + (offset))) = B_HOST_TO_LENDIAN_INT16(value))
|
||||
#define write32( offset , value) (*((volatile uint8 *)(data->reg_base + (offset))) = B_HOST_TO_LENDIAN_INT32(value))
|
||||
|
||||
|
||||
#define read8( offset ) (*((volatile uint8*)(data->reg_base + (offset))))
|
||||
#define read16( offset ) B_LENDIAN_TO_HOST_INT16(*((volatile uint16*)(data->reg_base + (offset))))
|
||||
#define read32( offset ) B_LENDIAN_TO_HOST_INT32(*((volatile uint32*)(data->reg_base + (offset))))
|
||||
|
||||
|
||||
static void dp83815_init_registers( rtl8139_properties_t *data )
|
||||
{
|
||||
int32 base, size, offset;
|
||||
base = data->pcii->u.h0.base_registers[0];
|
||||
size = data->pcii->u.h0.base_register_sizes[0];
|
||||
|
||||
|
||||
/* Round down to nearest page boundary */
|
||||
base = base & ~(B_PAGE_SIZE-1);
|
||||
|
||||
|
||||
/* Adjust the size */
|
||||
offset = data->pcii->u.h0.base_registers[0] - base;
|
||||
size += offset;
|
||||
size = (size +(B_PAGE_SIZE-1)) & ~(B_PAGE_SIZE-1);
|
||||
|
||||
TRACE(( kDevName " _open_hook(): PCI base=%lx size=%lx offset=%lx\n", base, size, offset));
|
||||
|
||||
|
||||
data->ioarea = map_physical_memory(kDevName " Regs", (void *)base, size, B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA, (void **)&data->reg_base);
|
||||
|
||||
data->reg_base = data->reg_base + offset;
|
||||
@ -191,32 +191,32 @@ init_driver (void)
|
||||
int32 i, found; //Counter
|
||||
|
||||
TRACE(( kDevName ": init_driver()\n" ));
|
||||
|
||||
|
||||
// Try if the PCI module is loaded (it would be weird if it wouldn't, but alas)
|
||||
if( ( status = get_module( B_PCI_MODULE_NAME, (module_info **)&m_pcimodule )) != B_OK)
|
||||
if( ( status = get_module( B_PCI_MODULE_NAME, (module_info **)&m_pcimodule )) != B_OK)
|
||||
{
|
||||
TRACE(( kDevName " init_driver(): Get PCI module failed! %lu \n", status));
|
||||
return status;
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
//
|
||||
i = 0;
|
||||
item = (pci_info *)malloc(sizeof(pci_info));
|
||||
for ( i = found = 0 ; m_pcimodule->get_nth_pci_info(i, item) == B_OK ; i++ )
|
||||
{
|
||||
supported_device_t *supported;
|
||||
|
||||
|
||||
for (supported = m_supported_devices; supported->name; supported++) {
|
||||
if ( (item->vendor_id == supported->vendor_id) &&
|
||||
if ( (item->vendor_id == supported->vendor_id) &&
|
||||
(item->device_id == supported->device_id) )
|
||||
{
|
||||
//Also done in etherpci sample code
|
||||
if ((item->u.h0.interrupt_line == 0) || (item->u.h0.interrupt_line == 0xFF))
|
||||
if ((item->u.h0.interrupt_line == 0) || (item->u.h0.interrupt_line == 0xFF))
|
||||
{
|
||||
TRACE(( kDevName " init_driver(): found %s with invalid IRQ - check IRQ assignement\n", supported->name));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
TRACE(( kDevName " init_driver(): found %s at IRQ %u \n", supported->name, item->u.h0.interrupt_line));
|
||||
m_devices[found] = item;
|
||||
item = (pci_info *)malloc(sizeof(pci_info));
|
||||
@ -224,9 +224,9 @@ init_driver (void)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
free( item );
|
||||
|
||||
|
||||
//Check if we have found any devices:
|
||||
if ( found == 0 )
|
||||
{
|
||||
@ -234,18 +234,18 @@ init_driver (void)
|
||||
put_module(B_PCI_MODULE_NAME ); //dereference module
|
||||
return ENODEV;
|
||||
}
|
||||
|
||||
|
||||
//Create the devices list
|
||||
{
|
||||
char name[32];
|
||||
|
||||
for (i = 0; i < found; i++)
|
||||
|
||||
for (i = 0; i < found; i++)
|
||||
{
|
||||
sprintf(name, "%s%ld", kDevDir, i);
|
||||
dp83815_names[i] = strdup(name);
|
||||
}
|
||||
dp83815_names[i] = NULL;
|
||||
}
|
||||
}
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -260,8 +260,8 @@ uninit_driver (void)
|
||||
int index;
|
||||
void *item;
|
||||
TRACE(( kDevName ": uninit_driver()\n" ));
|
||||
|
||||
for (index = 0; (item = dp83815_names[index]) != NULL; index++)
|
||||
|
||||
for (index = 0; (item = dp83815_names[index]) != NULL; index++)
|
||||
{
|
||||
free(item);
|
||||
free(m_devices[index]);
|
||||
@ -281,7 +281,7 @@ open_hook(const char *name, uint32 flags, void** cookie)
|
||||
unsigned char cmd;
|
||||
|
||||
TRACE(( kDevName " open_hook()\n" ));
|
||||
|
||||
|
||||
// verify device access
|
||||
{
|
||||
char *thisName;
|
||||
@ -294,15 +294,15 @@ open_hook(const char *name, uint32 flags, void** cookie)
|
||||
}
|
||||
if (!thisName)
|
||||
return EINVAL;
|
||||
|
||||
|
||||
// check if device is already open
|
||||
mask = 1L << temp8;
|
||||
if (atomic_or(&m_openmask, mask) & mask)
|
||||
return B_BUSY;
|
||||
}
|
||||
|
||||
|
||||
//Create a structure that contains the internals
|
||||
if (!(*cookie = data = (dp83815_properties_t *)malloc(sizeof(dp83815_properties_t))))
|
||||
if (!(*cookie = data = (dp83815_properties_t *)malloc(sizeof(dp83815_properties_t))))
|
||||
{
|
||||
TRACE(( kDevName " open_hook(): Out of memory\n" ));
|
||||
return B_NO_MEMORY;
|
||||
@ -310,13 +310,13 @@ open_hook(const char *name, uint32 flags, void** cookie)
|
||||
|
||||
//Set status to open:
|
||||
m_openmask &= ~( 1L << temp8 );
|
||||
|
||||
|
||||
//Clear memory
|
||||
memset( data , 0 , sizeof( dp83815_properties_t ) );
|
||||
|
||||
|
||||
//Set the ID
|
||||
data->device_id = temp8;
|
||||
|
||||
|
||||
// Create lock
|
||||
data->lock = create_sem( 1 , kDevName " data protect" );
|
||||
set_sem_owner( data->lock , B_SYSTEM_TEAM );
|
||||
@ -324,18 +324,18 @@ open_hook(const char *name, uint32 flags, void** cookie)
|
||||
set_sem_owner( data->Rx.Sem , B_SYSTEM_TEAM );
|
||||
data->Tx.Sem = create_sem( 1 , kDevName " write wait" );
|
||||
set_sem_owner( data->Tx.Sem , B_SYSTEM_TEAM );
|
||||
|
||||
|
||||
//Set up the cookie
|
||||
data->pcii = m_devices[data->device_id];
|
||||
|
||||
|
||||
//Enable the registers
|
||||
dp83815_init_registers( data );
|
||||
|
||||
/* enable pci address access */
|
||||
|
||||
/* enable pci address access */
|
||||
cmd = m_pcimodule->read_pci_config(data->pcii->bus, data->pcii->device, data->pcii->function, PCI_command, 2);
|
||||
cmd = cmd | PCI_command_io | PCI_command_master | PCI_command_memory;
|
||||
m_pcimodule->write_pci_config(data->pcii->bus, data->pcii->device, data->pcii->function, PCI_command, 2, cmd );
|
||||
|
||||
|
||||
if (allocate_resources(data) != B_OK)
|
||||
goto err1;
|
||||
|
||||
@ -352,32 +352,32 @@ open_hook(const char *name, uint32 flags, void** cookie)
|
||||
}
|
||||
|
||||
write32(REG_CR, CR_RXR|CR_TXR); /* Reset Tx & Rx */
|
||||
|
||||
|
||||
if ( init_ring_buffers(data) != B_OK ) /* Init ring buffers */
|
||||
goto err1;
|
||||
|
||||
|
||||
write32(REG_RFCR, RFCR_RFEN|RFCR_AAB|RFCR_AAM|RFCR_AAU);
|
||||
|
||||
|
||||
write32(REG_RXCFG, RXCFG_ATP|RXCFG_DRTH(31)); /* Set the drth */
|
||||
|
||||
|
||||
write32(REG_TXCFG, TXCFG_CSI|
|
||||
TXCFG_HBI|
|
||||
TXCFG_ATP|
|
||||
TXCFG_MXDMA_256|
|
||||
TXCFG_FLTH(16)|
|
||||
TXCFG_DRTH(16) );
|
||||
|
||||
|
||||
write32(REG_IMR, ISR_RXIDLE | ISR_TXOK | ISR_RXOK );
|
||||
|
||||
|
||||
write32(REG_CR, CR_RXE); /* Enable Rx */
|
||||
write32(REG_IER, 1); /* Enable interrupts */
|
||||
|
||||
|
||||
return B_OK;
|
||||
|
||||
err1:
|
||||
free_resources(data);
|
||||
free(data);
|
||||
return B_ERROR;
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
static status_t
|
||||
@ -387,12 +387,12 @@ read_hook (void* cookie, off_t position, void *buf, size_t* num_bytes)
|
||||
cpu_status former;
|
||||
descriptor_t* desc;
|
||||
size_t length = 0;
|
||||
|
||||
|
||||
TRACE(( kDevName ": read_hook()\n" ));
|
||||
|
||||
//if( !data->nonblocking )
|
||||
acquire_sem_etc( data->Rx.Sem, 1, B_CAN_INTERRUPT|data->blockFlag, NONBLOCK_WAIT );
|
||||
|
||||
|
||||
{
|
||||
former = disable_interrupts();
|
||||
acquire_spinlock(&data->Rx.Lock);
|
||||
@ -403,18 +403,18 @@ read_hook (void* cookie, off_t position, void *buf, size_t* num_bytes)
|
||||
}
|
||||
|
||||
length= DESC_LENGTH&desc->cmd;
|
||||
|
||||
|
||||
if( desc->cmd & (DESC_RXA|DESC_RXO|DESC_LONG|DESC_RUNT|DESC_ISE|DESC_CRCE|DESC_FAE|DESC_LBP|DESC_COL) )
|
||||
TRACE(( "desc cmd: %x\n", desc->cmd ));
|
||||
|
||||
|
||||
if( length < 64 ) {
|
||||
*num_bytes = 0;
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
if( *num_bytes < length )
|
||||
length = *num_bytes;
|
||||
|
||||
|
||||
memcpy(buf, desc->virt_buff,length);
|
||||
desc->cmd = DESC_LENGTH&MAX_PACKET_SIZE;
|
||||
*num_bytes = length;
|
||||
@ -435,10 +435,10 @@ write_hook (void* cookie, off_t position, const void* buffer, size_t* num_bytes)
|
||||
descriptor_t* desc;
|
||||
|
||||
TRACE(( kDevName " write_hook()\n" ));
|
||||
|
||||
|
||||
acquire_sem( data->lock );
|
||||
acquire_sem_etc( data->Tx.Sem, 1, B_CAN_INTERRUPT|data->blockFlag, NONBLOCK_WAIT );
|
||||
|
||||
|
||||
{
|
||||
former = disable_interrupts();
|
||||
acquire_spinlock(&data->Tx.Lock);
|
||||
@ -447,7 +447,7 @@ write_hook (void* cookie, off_t position, const void* buffer, size_t* num_bytes)
|
||||
release_spinlock(&data->Tx.Lock);
|
||||
restore_interrupts(former);
|
||||
}
|
||||
|
||||
|
||||
if ( *num_bytes > MAX_PACKET_SIZE ) { /* if needed */
|
||||
TRACE(( "Had to truncate the packet from %d to %d\n", *num_bytes, MAX_PACKET_SIZE));
|
||||
*num_bytes = MAX_PACKET_SIZE; /* truncate the packet */
|
||||
@ -474,29 +474,29 @@ static status_t
|
||||
control_hook (void* cookie, uint32 op, void* arg, size_t len)
|
||||
{
|
||||
dp83815_properties_t *data = (dp83815_properties_t *)cookie;
|
||||
TRACE(( kDevName " control_hook()\n" ));
|
||||
|
||||
TRACE(( kDevName " control_hook()\n" ));
|
||||
|
||||
switch ( op )
|
||||
{
|
||||
{
|
||||
case ETHER_INIT:
|
||||
TRACE(( kDevName " control_hook(): Wants us to init... ;-)\n" ));
|
||||
return B_NO_ERROR;
|
||||
|
||||
|
||||
case ETHER_GETADDR:
|
||||
if ( data == NULL )
|
||||
return B_ERROR;
|
||||
|
||||
|
||||
TRACE(( kDevName " control_hook(): Wants our address...\n" ));
|
||||
memcpy( arg , (void *) &(data->address) , sizeof( ether_address_t ) );
|
||||
return B_OK;
|
||||
|
||||
|
||||
case ETHER_ADDMULTI:
|
||||
return domulti(data, (unsigned char *)arg);
|
||||
|
||||
case ETHER_NONBLOCK:
|
||||
if ( data == NULL )
|
||||
return B_ERROR;
|
||||
|
||||
|
||||
TRACE(( kDevName " control_hook(): Wants to set block/nonblock\n" ));
|
||||
|
||||
if (*((int32 *)arg))
|
||||
@ -505,21 +505,21 @@ control_hook (void* cookie, uint32 op, void* arg, size_t len)
|
||||
data->blockFlag = 0;
|
||||
|
||||
return B_NO_ERROR;
|
||||
|
||||
|
||||
case ETHER_REMMULTI:
|
||||
TRACE(( kDevName " control_hook(): Wants REMMULTI\n" ));
|
||||
return B_OK;
|
||||
|
||||
|
||||
case ETHER_SETPROMISC:
|
||||
TRACE(( kDevName " control_hook(): Wants PROMISC\n" ));
|
||||
return B_OK;
|
||||
|
||||
|
||||
case ETHER_GETFRAMESIZE:
|
||||
TRACE(( kDevName " control_hook(): Wants GETFRAMESIZE\n" ));
|
||||
*( (unsigned int *)arg ) = 1514;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
@ -533,14 +533,14 @@ dp83815_interrupt_hook(void *cookie)
|
||||
if ( isr == 0 ) return B_UNHANDLED_INTERRUPT;
|
||||
|
||||
if ( isr & ISR_RXOK ) {
|
||||
int num_packets = 0;
|
||||
descriptor_t *curr = data->Rx.CurrInt;
|
||||
int num_packets = 0;
|
||||
descriptor_t *curr = data->Rx.CurrInt;
|
||||
|
||||
while( curr->cmd & DESC_OWN ) {
|
||||
curr = curr->virt_next;
|
||||
num_packets++;
|
||||
while( curr->cmd & DESC_OWN ) {
|
||||
curr = curr->virt_next;
|
||||
num_packets++;
|
||||
}
|
||||
|
||||
|
||||
data->Rx.CurrInt = curr;
|
||||
data->stats.rx_ok += num_packets;
|
||||
if( num_packets > 1 )
|
||||
@ -548,18 +548,18 @@ dp83815_interrupt_hook(void *cookie)
|
||||
if( num_packets )
|
||||
release_sem_etc(data->Rx.Sem, num_packets, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
|
||||
|
||||
if( isr & ISR_TXOK ) {
|
||||
data->stats.tx_ok++;
|
||||
release_sem_etc(data->Tx.Sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
|
||||
|
||||
if( isr & ISR_RXIDLE )
|
||||
TRACE(( "RX IS IDLE!\n"));
|
||||
|
||||
|
||||
if( isr & ~(ISR_TXIDLE|ISR_TXOK|ISR_RXOK|ISR_RXIDLE|ISR_RXEARLY) )
|
||||
TRACE(( "ISR: %x\n", isr));
|
||||
|
||||
|
||||
return B_INVOKE_SCHEDULER;
|
||||
}
|
||||
|
||||
@ -570,7 +570,7 @@ close_hook (void* cookie)
|
||||
|
||||
write32(REG_IER, 0); /* Disable interrupts */
|
||||
write32(REG_CR, CR_RXD|CR_TXD); /* Disable Rx & Tx */
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -580,24 +580,24 @@ free_hook (void* cookie)
|
||||
dp83815_properties_t *data = (dp83815_properties_t *) cookie;
|
||||
|
||||
TRACE(( kDevName " free_hook()\n" ));
|
||||
|
||||
|
||||
while ( data->Tx.Lock ); /* wait for any current writes to finish */
|
||||
while ( data->Rx.Lock ); /* wait for any current reads to finish */
|
||||
|
||||
//Remove interrupt handler
|
||||
remove_io_interrupt_handler( data->pcii->u.h0.interrupt_line ,
|
||||
remove_io_interrupt_handler( data->pcii->u.h0.interrupt_line ,
|
||||
dp83815_interrupt_hook , cookie );
|
||||
|
||||
|
||||
m_openmask &= ~(1L << data->device_id);
|
||||
|
||||
|
||||
free_resources(data); /* unblock waiting threads */
|
||||
|
||||
//Finally, free the cookie
|
||||
free( data );
|
||||
|
||||
|
||||
//Put the pci module
|
||||
put_module( B_PCI_MODULE_NAME );
|
||||
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -628,42 +628,37 @@ static status_t init_ring_buffers(dp83815_properties_t *data)
|
||||
area_info info;
|
||||
physical_entry map[2];
|
||||
uint32 pages;
|
||||
|
||||
|
||||
descriptor_t *RxDescRing = NULL;
|
||||
descriptor_t *TxDescRing = NULL;
|
||||
|
||||
descriptor_t *desc_base_virt_addr;
|
||||
uint32 desc_base_phys_addr;
|
||||
|
||||
|
||||
void *buff_base_virt_addr;
|
||||
uint32 buff_base_phys_addr;
|
||||
|
||||
|
||||
data->mem_area = 0;
|
||||
data->mem_area = 0;
|
||||
|
||||
#define NUM_BUFFS 2*MAX_DESC
|
||||
|
||||
pages = pages_needed(2*MAX_DESC*sizeof(descriptor_t) + NUM_BUFFS*BUFFER_SIZE);
|
||||
|
||||
data->mem_area = create_area(kDevName " desc buffer",
|
||||
(void**)&RxDescRing,
|
||||
B_ANY_KERNEL_ADDRESS,
|
||||
pages*B_PAGE_SIZE,
|
||||
B_FULL_LOCK|B_CONTIGUOUS,
|
||||
B_READ_AREA|B_WRITE_AREA);
|
||||
data->mem_area = create_area(kDevName " desc buffer", (void**)&RxDescRing,
|
||||
B_ANY_KERNEL_ADDRESS, pages * B_PAGE_SIZE, B_CONTIGUOUS,
|
||||
B_READ_AREA | B_WRITE_AREA);
|
||||
if( data->mem_area < 0 )
|
||||
return -1;
|
||||
|
||||
|
||||
get_area_info(data->mem_area, &info);
|
||||
get_memory_map(info.address, info.size, map, 4);
|
||||
|
||||
|
||||
desc_base_phys_addr = (int)map[0].address + NUM_BUFFS*BUFFER_SIZE;
|
||||
desc_base_virt_addr = (info.address + NUM_BUFFS*BUFFER_SIZE);
|
||||
|
||||
|
||||
buff_base_phys_addr = (int)map[0].address;
|
||||
buff_base_virt_addr = info.address;
|
||||
|
||||
|
||||
RxDescRing = desc_base_virt_addr;
|
||||
for( i = 0; i < MAX_DESC; i++ ) {
|
||||
@ -682,14 +677,14 @@ static status_t init_ring_buffers(dp83815_properties_t *data)
|
||||
TxDescRing[i].virt_next = &TxDescRing[(i+1)%MAX_DESC];
|
||||
TxDescRing[i].virt_buff = buff_base_virt_addr + ((i+MAX_DESC)*BUFFER_SIZE);
|
||||
}
|
||||
|
||||
|
||||
data->Rx.Curr = RxDescRing;
|
||||
data->Tx.Curr = TxDescRing;
|
||||
|
||||
data->Rx.CurrInt = RxDescRing;
|
||||
data->Tx.CurrInt = TxDescRing;
|
||||
|
||||
|
||||
|
||||
write32(REG_RXDP, desc_base_phys_addr); /* set the initial rx descriptor */
|
||||
|
||||
i = desc_base_phys_addr+MAX_DESC*sizeof(descriptor_t);
|
||||
@ -707,18 +702,18 @@ static status_t allocate_resources(dp83815_properties_t *data)
|
||||
}
|
||||
set_sem_owner(data->Rx.Sem, B_SYSTEM_TEAM);
|
||||
|
||||
|
||||
|
||||
/* intialize tx semaphore with the number of free tx buffers */
|
||||
if ((data->Tx.Sem = create_sem(MAX_DESC, kDevName " tx")) < 0) {
|
||||
delete_sem(data->Rx.Sem);
|
||||
TRACE(( kDevName " create read sem failed %x \n", data->Tx.Sem));
|
||||
return (data->Tx.Sem);
|
||||
}
|
||||
|
||||
|
||||
set_sem_owner(data->Tx.Sem, B_SYSTEM_TEAM);
|
||||
|
||||
data->blockFlag = 0;
|
||||
|
||||
|
||||
return (B_OK);
|
||||
}
|
||||
|
||||
|
@ -4,24 +4,24 @@
|
||||
* Copyright (c) 2002, Marcus Overhagen <marcus@overhagen.de>
|
||||
*
|
||||
* All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* - Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* - Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
@ -35,35 +35,44 @@ spinlock slock = 0;
|
||||
|
||||
uint32 round_to_pagesize(uint32 size);
|
||||
|
||||
cpu_status lock(void)
|
||||
|
||||
cpu_status
|
||||
lock(void)
|
||||
{
|
||||
cpu_status status = disable_interrupts();
|
||||
acquire_spinlock(&slock);
|
||||
return status;
|
||||
}
|
||||
|
||||
void unlock(cpu_status status)
|
||||
|
||||
void
|
||||
unlock(cpu_status status)
|
||||
{
|
||||
release_spinlock(&slock);
|
||||
restore_interrupts(status);
|
||||
}
|
||||
|
||||
uint32 round_to_pagesize(uint32 size)
|
||||
|
||||
uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
area_id alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
|
||||
area_id
|
||||
alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * logadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
dprintf("dp83815: allocating %ld bytes for %s\n",size,name);
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS,size,B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
areaid = create_area(name, &logadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaid < B_OK) {
|
||||
dprintf("couldn't allocate area %s\n",name);
|
||||
return B_ERROR;
|
||||
@ -83,12 +92,14 @@ area_id alloc_mem(void **log, void **phy, size_t size, const char *name)
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
/* This is not the most advanced method to map physical memory for io access.
|
||||
* Perhaps using B_ANY_KERNEL_ADDRESS instead of B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
* makes the whole offset calculation and relocation obsolete. But the code
|
||||
* below does work, and I can't test if using B_ANY_KERNEL_ADDRESS also works.
|
||||
*/
|
||||
area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
area_id
|
||||
map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -105,6 +116,6 @@ area_id map_mem(void **log, void *phy, size_t size, const char *name)
|
||||
|
||||
dprintf("physical = %p, logical = %p, offset = %#lx, phyadr = %p, mapadr = %p, size = %#lx, area = %#lx\n",
|
||||
phy, *log, offset, phyadr, mapadr, size, area);
|
||||
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* Intel PRO/1000 Family Driver
|
||||
* Copyright (C) 2004 Marcus Overhagen <marcus@overhagen.de>. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* documentation for any purpose and without fee is hereby granted, provided
|
||||
* that the above copyright notice appear in all copies, and that both the
|
||||
* copyright notice and this permission notice appear in supporting documentation.
|
||||
@ -23,39 +23,46 @@
|
||||
#undef malloc
|
||||
#undef free
|
||||
|
||||
|
||||
void *
|
||||
driver_malloc(int size, int p2, int p3)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
driver_free(void *p, int p2)
|
||||
{
|
||||
free(p);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
contigmalloc(int size, int p1, int p2, int p3, int p4, int p5, int p6)
|
||||
{
|
||||
void *adr;
|
||||
if (create_area("contigmalloc", &adr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, 0) < 0)
|
||||
return 0;
|
||||
if (create_area("contigmalloc", &adr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, 0) < B_OK)
|
||||
return NULL;
|
||||
return adr;
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
void
|
||||
contigfree(void *p, int p1, int p2)
|
||||
{
|
||||
delete_area(area_for(p));
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
callout_handle_init(struct callout_handle *handle)
|
||||
{
|
||||
handle->timer = -1;
|
||||
}
|
||||
|
||||
|
||||
struct callout_handle
|
||||
timeout(timer_function func, void *cookie, bigtime_t timeout)
|
||||
{
|
||||
@ -64,12 +71,14 @@ timeout(timer_function func, void *cookie, bigtime_t timeout)
|
||||
return handle;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
untimeout(timer_function func, void *cookie, struct callout_handle handle)
|
||||
{
|
||||
delete_timer(handle.timer);
|
||||
}
|
||||
|
||||
|
||||
struct resource *
|
||||
bus_alloc_resource(device_t dev, int type, int *rid, int d, int e, int f, int g)
|
||||
{
|
||||
@ -80,7 +89,7 @@ bus_alloc_resource(device_t dev, int type, int *rid, int d, int e, int f, int g)
|
||||
INIT_DEBUGOUT2("bus_alloc_resource SYS_RES_IOPORT, reg 0x%x, adr %p\n", *rid, (void *)v);
|
||||
return (struct resource *) v;
|
||||
}
|
||||
|
||||
|
||||
case SYS_RES_MEMORY:
|
||||
{
|
||||
uint32 v = pci_read_config(dev, *rid, 4) & PCI_address_memory_32_mask;
|
||||
@ -91,7 +100,7 @@ bus_alloc_resource(device_t dev, int type, int *rid, int d, int e, int f, int g)
|
||||
return 0;
|
||||
return (struct resource *) virt;
|
||||
}
|
||||
|
||||
|
||||
case SYS_RES_IRQ:
|
||||
{
|
||||
uint8 v = pci_read_config(dev, PCI_interrupt_line, 1);
|
||||
@ -101,13 +110,14 @@ bus_alloc_resource(device_t dev, int type, int *rid, int d, int e, int f, int g)
|
||||
}
|
||||
return (struct resource *)(int)v;
|
||||
}
|
||||
|
||||
|
||||
default:
|
||||
INIT_DEBUGOUT("bus_alloc_resource default!\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
bus_release_resource(device_t dev, int type, int reg, struct resource *res)
|
||||
{
|
||||
@ -115,44 +125,47 @@ bus_release_resource(device_t dev, int type, int reg, struct resource *res)
|
||||
case SYS_RES_IOPORT:
|
||||
case SYS_RES_IRQ:
|
||||
return;
|
||||
|
||||
|
||||
case SYS_RES_MEMORY:
|
||||
delete_area(area_for(res));
|
||||
return;
|
||||
|
||||
|
||||
default:
|
||||
INIT_DEBUGOUT("bus_release_resource default!\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
uint32
|
||||
|
||||
uint32
|
||||
rman_get_start(struct resource *res)
|
||||
{
|
||||
return (uint32)res;
|
||||
}
|
||||
|
||||
struct int_tag
|
||||
{
|
||||
|
||||
struct int_tag {
|
||||
interrupt_handler int_func;
|
||||
void *cookie;
|
||||
int irq;
|
||||
};
|
||||
|
||||
|
||||
int
|
||||
bus_setup_intr(device_t dev, struct resource *res, int p3, interrupt_handler int_func, void *cookie, void **tag)
|
||||
{
|
||||
int irq = (int)res;
|
||||
|
||||
|
||||
struct int_tag *int_tag = (struct int_tag *) malloc(sizeof(struct int_tag));
|
||||
int_tag->int_func = int_func;
|
||||
int_tag->cookie = cookie;
|
||||
int_tag->irq = irq;
|
||||
*tag = int_tag;
|
||||
|
||||
|
||||
return install_io_interrupt_handler(irq, int_func, cookie, 0);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
bus_teardown_intr(device_t dev, struct resource *res, void *tag)
|
||||
{
|
||||
|
@ -921,7 +921,7 @@ IPW2100::AllocateContiguous(const char *name, void **logicalAddress,
|
||||
size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
void *virtualAddress = NULL;
|
||||
area_id area = create_area(name, &virtualAddress, B_ANY_KERNEL_ADDRESS,
|
||||
size, B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
size, B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (area < B_OK) {
|
||||
TRACE_ALWAYS(("IPW2100: allocating contiguous area failed\n"));
|
||||
return area;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* Realtek RTL8169 Family Driver
|
||||
* Copyright (C) 2004 Marcus Overhagen <marcus@overhagen.de>. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* Permission to use, copy, modify and distribute this software and its
|
||||
* documentation for any purpose and without fee is hereby granted, provided
|
||||
* that the above copyright notice appear in all copies, and that both the
|
||||
* copyright notice and this permission notice appear in supporting documentation.
|
||||
@ -25,24 +25,28 @@
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
|
||||
|
||||
static inline uint32
|
||||
round_to_pagesize(uint32 size)
|
||||
{
|
||||
return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *name)
|
||||
alloc_mem(void **virt, void **phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
physical_entry pe;
|
||||
void * virtadr;
|
||||
area_id areaid;
|
||||
status_t rv;
|
||||
|
||||
|
||||
TRACE("allocating %ld bytes for %s\n", size, name);
|
||||
|
||||
size = round_to_pagesize(size);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK | B_CONTIGUOUS, protection);
|
||||
areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
|
||||
B_CONTIGUOUS, protection);
|
||||
if (areaid < B_OK) {
|
||||
ERROR("couldn't allocate area %s\n", name);
|
||||
return B_ERROR;
|
||||
@ -62,8 +66,10 @@ alloc_mem(void **virt, void **phy, size_t size, uint32 protection, const char *n
|
||||
return areaid;
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name)
|
||||
map_mem(void **virt, void *phy, size_t size, uint32 protection,
|
||||
const char *name)
|
||||
{
|
||||
uint32 offset;
|
||||
void *phyadr;
|
||||
@ -75,16 +81,18 @@ map_mem(void **virt, void *phy, size_t size, uint32 protection, const char *name
|
||||
offset = (uint32)phy & (B_PAGE_SIZE - 1);
|
||||
phyadr = (char *)phy - offset;
|
||||
size = round_to_pagesize(size + offset);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS, protection, &mapadr);
|
||||
area = map_physical_memory(name, phyadr, size, B_ANY_KERNEL_BLOCK_ADDRESS,
|
||||
protection, &mapadr);
|
||||
if (area < B_OK) {
|
||||
ERROR("mapping '%s' failed, error 0x%lx (%s)\n", name, area, strerror(area));
|
||||
return area;
|
||||
}
|
||||
|
||||
|
||||
*virt = (char *)mapadr + offset;
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = %p, size = %ld, area = 0x%08lx\n",
|
||||
phy, *virt, offset, phyadr, mapadr, size, area);
|
||||
|
||||
TRACE("physical = %p, virtual = %p, offset = %ld, phyadr = %p, mapadr = "
|
||||
"%p, size = %ld, area = 0x%08lx\n", phy, *virt, offset, phyadr, mapadr,
|
||||
size, area);
|
||||
|
||||
return area;
|
||||
}
|
||||
|
@ -431,9 +431,8 @@ block_io_init_device(void *_data, void **cookie)
|
||||
// (else, we may be on the paging path and have no S/G entries at hand)
|
||||
device->phys_vecs_pool = locked_pool->create(
|
||||
params.max_sg_blocks * sizeof(physical_entry),
|
||||
sizeof( physical_entry ) - 1,
|
||||
0, 16*1024, 32, 1, "block io sg lists", B_FULL_LOCK | B_CONTIGUOUS,
|
||||
NULL, NULL, NULL);
|
||||
sizeof( physical_entry ) - 1, 0, 16*1024, 32, 1, "block io sg lists",
|
||||
B_CONTIGUOUS, NULL, NULL, NULL);
|
||||
|
||||
// free(tmp_name);
|
||||
|
||||
@ -492,7 +491,7 @@ block_io_init_buffer(void)
|
||||
|
||||
res = block_io_buffer_area = create_area("block_io_buffer",
|
||||
(void **)&block_io_buffer, B_ANY_KERNEL_ADDRESS,
|
||||
block_io_buffer_size, B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
block_io_buffer_size, B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
|
||||
if (res < 0)
|
||||
goto err2;
|
||||
|
||||
|
@ -392,7 +392,7 @@ ide_adapter_init_channel(device_node *node,
|
||||
// PRDT must be contiguous, dword-aligned and must not cross 64K boundary
|
||||
prdt_size = (IDE_ADAPTER_MAX_SG_COUNT * sizeof( prd_entry ) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1);
|
||||
channel->prd_area = create_area("prd", (void **)&channel->prdt, B_ANY_KERNEL_ADDRESS,
|
||||
prdt_size, B_FULL_LOCK | B_CONTIGUOUS, 0);
|
||||
prdt_size, B_CONTIGUOUS, 0);
|
||||
if (channel->prd_area < B_OK) {
|
||||
res = channel->prd_area;
|
||||
goto err2;
|
||||
|
Loading…
Reference in New Issue
Block a user