scsi: improved code style and use of B_PRI macros

This commit is contained in:
Jérôme Duval 2013-06-09 17:24:28 +02:00
parent d8189e2aaf
commit 8239f0320d
5 changed files with 49 additions and 50 deletions

View File

@ -217,29 +217,30 @@ scsi_init_bus(device_node *node, void **cookie)
bus->dma_params.max_sg_block_size &= ~bus->dma_params.alignment;
if (bus->dma_params.alignment > B_PAGE_SIZE) {
SHOW_ERROR(0, "Alignment (0x%x) must be less then B_PAGE_SIZE",
(int)bus->dma_params.alignment);
SHOW_ERROR(0, "Alignment (0x%" B_PRIx32 ") must be less then "
"B_PAGE_SIZE", bus->dma_params.alignment);
res = B_ERROR;
goto err;
}
if (bus->dma_params.max_sg_block_size < 1) {
SHOW_ERROR(0, "Max s/g block size (0x%x) is too small",
(int)bus->dma_params.max_sg_block_size);
SHOW_ERROR(0, "Max s/g block size (0x%" B_PRIx32 ") is too small",
bus->dma_params.max_sg_block_size);
res = B_ERROR;
goto err;
}
if (bus->dma_params.dma_boundary < B_PAGE_SIZE - 1) {
SHOW_ERROR(0, "DMA boundary (0x%x) must be at least B_PAGE_SIZE",
(int)bus->dma_params.dma_boundary);
SHOW_ERROR(0, "DMA boundary (0x%" B_PRIx32 ") must be at least "
"B_PAGE_SIZE", bus->dma_params.dma_boundary);
res = B_ERROR;
goto err;
}
if (bus->dma_params.max_blocks < 1 || bus->dma_params.max_sg_blocks < 1) {
SHOW_ERROR(0, "Max blocks (%d) and max s/g blocks (%d) must be at least 1",
(int)bus->dma_params.max_blocks, (int)bus->dma_params.max_sg_blocks);
SHOW_ERROR(0, "Max blocks (%" B_PRIu32 ") and max s/g blocks (%"
B_PRIu32 ") must be at least 1", bus->dma_params.max_blocks,
bus->dma_params.max_sg_blocks);
res = B_ERROR;
goto err;
}

View File

@ -83,8 +83,8 @@ is_sg_list_dma_safe(scsi_ccb *request)
// verify entry size
if (sg_list->size > max_sg_block_size) {
SHOW_FLOW(0, "S/G-entry is too long (%d/%d bytes)",
(int)sg_list->size, (int)max_sg_block_size);
SHOW_FLOW(0, "S/G-entry is too long (%" B_PRIuPHYSADDR "/%" B_PRIu32
" bytes)", sg_list->size, max_sg_block_size);
return false;
}
}
@ -103,7 +103,7 @@ scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
uint32 num_vecs = buffer->sg_count_orig;
uchar *buffer_data = buffer->address;
SHOW_FLOW(1, "to_buffer=%d, %d bytes", to_buffer, (int)size);
SHOW_FLOW(1, "to_buffer=%d, %" B_PRIu32 " bytes", to_buffer, size);
// survive even if controller returned invalid data size
size = min_c(size, request->data_length);
@ -154,8 +154,6 @@ scsi_free_dma_buffer(dma_buffer *buffer)
static bool
scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
{
size_t sg_list_size, sg_list_entries;
// free old buffer first
scsi_free_dma_buffer(buffer);
@ -171,14 +169,14 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
uint32 boundary = dma_params->dma_boundary;
// alright - a contiguous buffer is required to keep S/G table short
SHOW_INFO(1, "need to setup contiguous DMA buffer of size %d",
(int)size);
SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32,
size);
// verify that we don't get problems with dma boundary
if (boundary != ~(uint32)0) {
if (size > boundary + 1) {
SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%d/%d bytes)",
(int)size, (int)boundary + 1);
SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%"
B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1);
return false;
}
}
@ -199,8 +197,8 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
(void**)&buffer->address);
if (buffer->area < 0) {
SHOW_ERROR(2, "Cannot create contignous DMA buffer of %d bytes",
(int)size);
SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32
" bytes", size);
return false;
}
@ -212,8 +210,8 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
B_32_BIT_FULL_LOCK, 0);
// TODO: Use B_FULL_LOCK, if possible!
if (buffer->area < 0) {
SHOW_ERROR(2, "Cannot create DMA buffer of %d bytes",
(int)size);
SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes",
size);
return false;
}
@ -222,7 +220,7 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
// create S/G list
// worst case is one entry per page, and size is page-aligned
sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
// create_area has page-granularity
sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
@ -231,15 +229,15 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
B_32_BIT_FULL_LOCK, 0);
// TODO: Use B_FULL_LOCK, if possible!
if (buffer->sg_list_area < 0) {
SHOW_ERROR( 2, "Cannot craete DMA buffer S/G list of %d bytes",
(int)sg_list_size );
SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE
" bytes", sg_list_size );
delete_area(buffer->area);
buffer->area = 0;
return false;
}
sg_list_entries = sg_list_size / sizeof( physical_entry );
size_t sg_list_entries = sg_list_size / sizeof(physical_entry);
{
size_t mapped_len;
@ -255,8 +253,9 @@ scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
&mapped_len );
if( res != B_OK || mapped_len != buffer->size ) {
SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted %d, got %d bytes)",
strerror(res), (int)mapped_len, (int)buffer->size);
SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted "
"%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res),
mapped_len, buffer->size);
}
}
@ -278,7 +277,7 @@ scsi_free_dma_buffer_sg_orig(dma_buffer *buffer)
/** allocate S/G list to original data */
static bool
scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, int size)
scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, size_t size)
{
// free old list first
scsi_free_dma_buffer_sg_orig(buffer);
@ -290,15 +289,15 @@ scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, int size)
B_ANY_KERNEL_ADDRESS, size,
B_NO_LOCK, 0);
if (buffer->sg_orig < 0) {
SHOW_ERROR(2, "Cannot S/G list buffer to original data of %d bytes",
(int)size);
SHOW_ERROR(2, "Cannot S/G list buffer to original data of %" B_PRIuSIZE
" bytes", size);
return false;
}
buffer->sg_count_max_orig = size / sizeof(physical_entry);
SHOW_INFO(3, "Got up to %d S/G entries to original data",
(int)buffer->sg_count_max_orig);
SHOW_INFO(3, "Got up to %" B_PRId32 " S/G entries to original data",
buffer->sg_count_max_orig);
return true;
}

View File

@ -167,7 +167,6 @@ scsi_start_mode_sense_6(scsi_ccb *request)
{
scsi_cmd_mode_sense_6 *cmd = (scsi_cmd_mode_sense_6 *)request->orig_cdb;
scsi_cmd_mode_sense_10 *cdb = (scsi_cmd_mode_sense_10 *)request->cdb;
size_t allocationLength;
SHOW_FLOW0(3, "patching MODE SENSE(6) to MODE SENSE(10)");
@ -180,11 +179,11 @@ scsi_start_mode_sense_6(scsi_ccb *request)
cdb->page_code = cmd->page_code;
cdb->page_control = cmd->page_control;
allocationLength = cmd->allocation_length
size_t allocationLength = cmd->allocation_length
- sizeof(scsi_cmd_mode_sense_6) + sizeof(scsi_cmd_mode_sense_10);
cdb->allocation_length = B_HOST_TO_BENDIAN_INT16(allocationLength);
SHOW_FLOW(3, "allocation_length=%ld", allocationLength);
SHOW_FLOW(3, "allocation_length=%" B_PRIuSIZE, allocationLength);
cdb->control = cmd->control;

View File

@ -37,7 +37,6 @@ fill_temp_sg(scsi_ccb *ccb)
};
size_t num_entries;
size_t mapped_len;
uint32 cur_idx;
physical_entry *temp_sg = (physical_entry *)ccb->sg_list;
res = get_iovec_memory_map(&vec, 1, 0, ccb->data_length, temp_sg, max_sg_blocks,
@ -54,10 +53,10 @@ fill_temp_sg(scsi_ccb *ccb)
if (dma_boundary != ~(uint32)0 || ccb->data_length > max_sg_block_size) {
// S/G list may not be controller-compatible:
// we have to split offending entries
SHOW_FLOW(3, "Checking violation of dma boundary 0x%x and entry size 0x%x",
(int)dma_boundary, (int)max_sg_block_size);
SHOW_FLOW(3, "Checking violation of dma boundary 0x%" B_PRIx32
" and entry size 0x%" B_PRIx32, dma_boundary, max_sg_block_size);
for (cur_idx = 0; cur_idx < num_entries; ++cur_idx) {
for (uint32 cur_idx = 0; cur_idx < num_entries; ++cur_idx) {
addr_t max_len;
// calculate space upto next dma boundary crossing
@ -66,10 +65,10 @@ fill_temp_sg(scsi_ccb *ccb)
// restrict size per sg item
max_len = std::min(max_len, (addr_t)max_sg_block_size);
SHOW_FLOW(4, "addr=%#" B_PRIxPHYSADDR ", size=%x, max_len=%x, "
"idx=%d, num=%d", temp_sg[cur_idx].address,
(int)temp_sg[cur_idx].size, (int)max_len, (int)cur_idx,
(int)num_entries);
SHOW_FLOW(4, "addr=%#" B_PRIxPHYSADDR ", size=%" B_PRIxPHYSADDR
", max_len=%" B_PRIxADDR ", idx=%" B_PRId32 ", num=%"
B_PRIuSIZE, temp_sg[cur_idx].address, temp_sg[cur_idx].size,
max_len, cur_idx, num_entries);
if (max_len < temp_sg[cur_idx].size) {
// split sg block
@ -153,8 +152,8 @@ cleanup_tmp_sg(scsi_ccb *ccb)
{
status_t res;
SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%d",
ccb, ccb->data, (int)ccb->data_length);
SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%" B_PRId32,
ccb, ccb->data, ccb->data_length);
res = unlock_memory(ccb->data, ccb->data_length, B_DMA_IO
| ((ccb->flags & SCSI_DIR_MASK) == SCSI_DIR_IN ? B_READ_DEVICE : 0));

View File

@ -32,8 +32,9 @@ get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len
size_t cur_idx;
size_t left_len;
SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
vec_count, vec_offset, len, max_entries);
SHOW_FLOW(3, "vec_count=%" B_PRIuSIZE ", vec_offset=%" B_PRIuSIZE ", len=%"
B_PRIuSIZE ", max_entries=%" B_PRIuSIZE, vec_count, vec_offset, len,
max_entries);
// skip iovec blocks if needed
while (vec_count > 0 && vec_offset > vec->iov_len) {
@ -117,8 +118,8 @@ get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len
*num_entries = cur_idx;
*mapped_len = len - left_len;
SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
(int)*num_entries, (int)*mapped_len );
SHOW_FLOW( 3, "num_entries=%" B_PRIuSIZE ", mapped_len=%" B_PRIxSIZE,
*num_entries, *mapped_len);
return B_OK;
}