Removed whitespace at the end of lines.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26817 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-08-05 16:56:25 +00:00
parent 5737b4aed8
commit 7bcc8f0f61

View File

@ -8,16 +8,16 @@
/*
DMA buffer handling.
If the peripheral driver hasn't made sure that the data of a request
If the peripheral driver hasn't made sure that the data of a request
is DMA safe, we check that and copy data to a buffer if needed.
The buffer is enlarged on demand and destroyed after a time-out
by a daemon. Obviously, it's a good idea to avoid all this, therefore
by a daemon. Obviously, it's a good idea to avoid all this, therefore
blkman takes care of that for read/write requests.
To be able to copy data back after the request was finished, we need a
To be able to copy data back after the request was finished, we need a
S/G list to the original data as the copying is done in a different
thread/process context (namely the service thread).
Currently, there is only one buffer per device; in the future,
we may support multiple buffers, especially if we want to support
more then 4 GB memory, which leads to trouble with 32-bit PCI cards.
@ -34,7 +34,7 @@
/*! Check whether S/G list of request is supported DMA controller */
static bool
is_sg_list_dma_safe(scsi_ccb *request)
is_sg_list_dma_safe(scsi_ccb *request)
{
scsi_bus_info *bus = request->bus;
const physical_entry *sg_list = request->sg_list;
@ -49,7 +49,7 @@ is_sg_list_dma_safe(scsi_ccb *request)
SHOW_FLOW0(1, "S/G-list too long");
return false;
}
// if there are no further restrictions - be happy
if (dma_boundary == ~0UL && alignment == 0 && max_sg_block_size == 0)
return true;
@ -60,11 +60,11 @@ is_sg_list_dma_safe(scsi_ccb *request)
// calculate space upto next dma boundary crossing and
// verify that it isn't crossed
max_len = (dma_boundary + 1) -
max_len = (dma_boundary + 1) -
((addr_t)sg_list->address & dma_boundary);
if (max_len < sg_list->size) {
SHOW_FLOW(0, "S/G-entry crosses DMA boundary @0x%x",
SHOW_FLOW(0, "S/G-entry crosses DMA boundary @0x%x",
(int)sg_list->address + (int)max_len);
return false;
}
@ -73,6 +73,7 @@ is_sg_list_dma_safe(scsi_ccb *request)
if (((addr_t)sg_list->address & alignment) != 0) {
SHOW_FLOW(0, "S/G-entry has bad alignment @0x%x",
(int)sg_list->address);
panic("XXX");
return false;
}
@ -115,7 +116,7 @@ scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
for (; size > 0 && num_vecs > 0; ++sg_list, --num_vecs) {
addr_t virtualAddress;
size_t bytes;
bytes = min( size, sg_list->size );
if (vm_get_physical_page((addr_t)sg_list->address, &virtualAddress,
@ -131,7 +132,7 @@ scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
buffer_data += bytes;
}
return true;
}
@ -176,15 +177,15 @@ static bool
scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
{
size_t sg_list_size, sg_list_entries;
// free old buffer first
// free old buffer first
scsi_free_dma_buffer( buffer );
// just in case alignment is redicuously huge
size = (size + dma_params->alignment) & ~dma_params->alignment;
size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
// calculate worst case number of S/G entries, i.e. if they are non-continuous;
// there is a controller limit and a limit by our own S/G manager to check
if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
@ -193,29 +194,29 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
uchar *dma_buffer_address_unaligned;
// alright - a contiguous buffer is required to keep S/G table short
SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d",
SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d",
(int)size);
// verify that we don't get problems with dma boundary
if (boundary != ~0UL) {
if (size > boundary + 1) {
SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)",
SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)",
(int)size, (int)boundary + 1);
return false;
}
// round up to next power of two and allocate a buffer double the
// needed size so we can cut out an area that doesn't cross
// round up to next power of two and allocate a buffer double the
// needed size so we can cut out an area that doesn't cross
// dma boundary
size = (1 << log2( size )) * 2;
}
buffer->area = create_area("DMA buffer",
(void **)&dma_buffer_address_unaligned,
buffer->area = create_area("DMA buffer",
(void **)&dma_buffer_address_unaligned,
B_ANY_KERNEL_ADDRESS, size,
B_FULL_LOCK | B_CONTIGUOUS, 0 );
if (buffer->area < 0) {
SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
(int)size);
return false;
}
@ -234,7 +235,7 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
// adjust next boundary if outside allocated area
if( next_boundary > dma_buffer_address_unaligned + size )
next_boundary = dma_buffer_address_unaligned + size;
buffer->size = next_boundary - buffer->address;
} else {
// non-boundary case: use buffer directly
@ -243,25 +244,25 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
}
} else {
// we can live with a fragmented buffer - very nice
buffer->area = create_area( "DMA buffer",
(void **)&buffer->address,
buffer->area = create_area( "DMA buffer",
(void **)&buffer->address,
B_ANY_KERNEL_ADDRESS, size,
B_FULL_LOCK, 0 );
if (buffer->area < 0) {
SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes",
SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes",
(int)size);
return false;
}
buffer->size = size;
}
// create S/G list
// create S/G list
// worst case is one entry per page, and size is page-aligned
sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
// create_area has page-granularity
sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
buffer->sg_list_area = create_area("DMA buffer S/G table",
(void **)&buffer->sg_list,
B_ANY_KERNEL_ADDRESS, sg_list_size,
@ -274,7 +275,7 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
buffer->area = 0;
return false;
}
sg_list_entries = sg_list_size / sizeof( physical_entry );
{
@ -284,12 +285,12 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
buffer->address,
buffer->size
};
res = get_iovec_memory_map(
&vec, 1, 0, buffer->size,
buffer->sg_list, sg_list_entries, &buffer->sg_count,
res = get_iovec_memory_map(
&vec, 1, 0, buffer->size,
buffer->sg_list, sg_list_entries, &buffer->sg_count,
&mapped_len );
if( res != B_OK || mapped_len != buffer->size ) {
SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted %d, got %d bytes)",
strerror(res), (int)mapped_len, (int)buffer->size);
@ -316,17 +317,17 @@ scsi_free_dma_buffer_sg_orig(dma_buffer *buffer)
static bool
scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, int size)
{
// free old list first
// free old list first
scsi_free_dma_buffer_sg_orig(buffer);
size = (size * sizeof(physical_entry) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
buffer->sg_orig = create_area("S/G to original data",
(void **)&buffer->sg_list_orig,
buffer->sg_orig = create_area("S/G to original data",
(void **)&buffer->sg_list_orig,
B_ANY_KERNEL_ADDRESS, size,
B_NO_LOCK, 0);
if (buffer->sg_orig < 0) {
SHOW_ERROR(2, "Cannot S/G list buffer to original data of %d bytes",
SHOW_ERROR(2, "Cannot S/G list buffer to original data of %d bytes",
(int)size);
return false;
}
@ -369,11 +370,11 @@ scsi_dma_buffer_compose_sg_orig(dma_buffer *buffer, scsi_ccb *request)
SHOW_FLOW0(1, "copy S/G list");
memcpy(buffer->sg_list_orig, request->sg_list,
memcpy(buffer->sg_list_orig, request->sg_list,
request->sg_count * sizeof(physical_entry));
buffer->sg_count_orig = request->sg_count;
return true;
return true;
}
@ -386,18 +387,18 @@ scsi_get_dma_buffer(scsi_ccb *request)
{
scsi_device_info *device = request->device;
dma_buffer *buffer;
request->buffered = false;
// perhaps we have luck and no buffering is needed
if( is_sg_list_dma_safe( request ))
if( is_sg_list_dma_safe( request ))
return true;
SHOW_FLOW0(1, "Buffer is not DMA safe" );
dump_sg_table(request->sg_list, request->sg_count);
// only one buffer at a time
// only one buffer at a time
acquire_sem(device->dma_buffer_owner);
// make sure, clean-up daemon doesn't bother us
@ -405,9 +406,9 @@ scsi_get_dma_buffer(scsi_ccb *request)
// there is only one buffer, so no further management
buffer = &device->dma_buffer;
buffer->inuse = true;
buffer->inuse = true;
RELEASE_BEN(&device->dma_buffer_lock);
// memorize buffer for cleanup
@ -435,7 +436,7 @@ scsi_get_dma_buffer(scsi_ccb *request)
buffer->orig_data = request->data;
buffer->orig_sg_list = request->sg_list;
buffer->orig_sg_count = request->sg_count;
request->data = buffer->address;
request->sg_list = buffer->sg_list;
request->sg_count = buffer->sg_count;
@ -457,7 +458,7 @@ err:
RELEASE_BEN(&device->dma_buffer_lock);
release_sem(device->dma_buffer_owner);
return false;
return false;
}
@ -469,8 +470,8 @@ scsi_release_dma_buffer(scsi_ccb *request)
{
scsi_device_info *device = request->device;
dma_buffer *buffer = request->dma_buffer;
SHOW_FLOW(1, "Buffering finished, %x, %x",
SHOW_FLOW(1, "Buffering finished, %x, %x",
request->subsys_status & SCSI_SUBSYS_STATUS_MASK,
(int)(request->flags & SCSI_DIR_MASK));
@ -478,12 +479,12 @@ scsi_release_dma_buffer(scsi_ccb *request)
if ((request->subsys_status & SCSI_SUBSYS_STATUS_MASK) == SCSI_REQ_CMP
&& (request->flags & SCSI_DIR_MASK) == SCSI_DIR_IN)
scsi_copy_dma_buffer(request, request->data_length - request->data_resid, false);
// restore request
// restore request
request->data = buffer->orig_data;
request->sg_list = buffer->orig_sg_list;
request->sg_count = buffer->orig_sg_count;
// free buffer
ACQUIRE_BEN(&device->dma_buffer_lock);