Added Thomas Kurschel's blkman module.

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@7767 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2004-06-06 23:25:59 +00:00
parent b59852c818
commit f66f07a616
8 changed files with 1818 additions and 0 deletions

View File

@ -0,0 +1,12 @@
SubDir OBOS_TOP src add-ons kernel generic blkman ;
UsePrivateHeaders kernel ;
UsePrivateHeaders [ FDirName kernel arch $(OBOS_ARCH) ] ;
UsePrivateHeaders [ FDirName kernel boot platform $(OBOS_BOOT_PLATFORM) ] ;
KernelAddon blkman : kernel generic :
blkman.c
io.c
virtual_memory.c
;

View File

@ -0,0 +1,40 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Functions that are missing in kernel.
*/
#ifndef _KERNEL_EXPORT_EXT_H
#define _KERNEL_EXPORT_EXT_H
#include <KernelExport.h>
#include <iovec.h>
// get memory map of iovec
status_t get_iovec_memory_map(
iovec *vec, // iovec to analyze
size_t vec_count, // number of entries in vec
size_t vec_offset, // number of bytes to skip at beginning of vec
size_t len, // number of bytes to analyze
physical_entry *map, // resulting memory map
size_t max_entries, // max number of entries in map
size_t *num_entries, // actual number of map entries used
size_t *mapped_len // actual number of bytes described by map
);
// map main memory into virtual address space
status_t map_mainmemory(
addr_t physical_addr, // physical address to map
void **virtual_addr // receives corresponding virtual address
);
// unmap main memory from virtual address space
status_t unmap_mainmemory(
void *virtual_addr // virtual address to release
);
#endif

View File

@ -0,0 +1,26 @@
#ifndef __BIOS_INFO_H__
#define __BIOS_INFO_H__
// length: 0x84
typedef struct tagbios_drive {
char name[32]; // 0
uint8 bios_id; // 20
uint8 padding[3];
uint32 cylinder_count; // 24
uint32 head_count; // 28
uint32 sectors_per_track; // 2c
uint32 num_chksums; // 30
struct {
uint64 offset; // 34+
uint32 len; // 3c+
uint32 chksum; // 40+
} chksums[5]; // 34
} bios_drive;
extern bios_drive *bios_drive_info;
extern uint32 boot_calculate_hash( void *buffer, size_t len );
#endif

View File

@ -0,0 +1,689 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open block device manager
Actual I/O.
Main file.
*/
#include "blkman_int.h"
#include <stdio.h>
#define TRACE_BLOCK_IO
#ifdef TRACE_BLOCK_IO
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
uint blkman_buffer_size;
sem_id blkman_buffer_lock;
struct iovec blkman_buffer_vec[1];
void *blkman_buffer_phys;
char *blkman_buffer;
phys_vecs blkman_buffer_phys_vec;
area_id blkman_buffer_area;
locked_pool_interface *locked_pool;
device_manager_info *pnp;
static status_t
blkman_open(blkman_device_info *device, uint32 flags,
blkman_handle_info **res_handle)
{
blkman_handle_info *handle;
status_t res;
TRACE(("blkman_open()\n"));
handle = (blkman_handle_info *)malloc(sizeof(*handle));
if (handle == NULL)
return B_NO_MEMORY;
handle->device = device;
res = device->interface->open(device->cookie, &handle->cookie);
if (res < B_OK)
goto err;
*res_handle = handle;
TRACE((" opened.\n"));
return B_OK;
err:
free(handle);
return res;
}
static status_t
blkman_close(blkman_handle_info *handle)
{
blkman_device_info *device = handle->device;
TRACE(("blkman_close()\n"));
device->interface->close(handle->cookie);
return B_OK;
}
static status_t
blkman_freecookie(blkman_handle_info *handle)
{
blkman_device_info *device = handle->device;
TRACE(("blkman_freecookie()\n"));
device->interface->free(handle->cookie);
free(handle);
TRACE(("done.\n"));
return B_OK;
}
#if 0
/** Verify a checksum that is part of BIOS drive identification.
* returns B_OK on success
*/
static status_t
verify_checksum(blkman_handle_info *handle, uint64 offset, uint32 len, uint32 chksum)
{
void *buffer;
uint32 readlen;
status_t res;
// SHOW_FLOW( 0, "offset=%lld, len=%ld", offset, len );
buffer = malloc(len);
if (buffer == NULL)
return B_NO_MEMORY;
readlen = len;
res = blkman_read(handle, offset, buffer, &readlen);
if (res < B_OK || readlen < len)
goto err;
// SHOW_FLOW0( 0, "check hash sum" );
if (boot_calculate_hash(buffer, len) != chksum)
goto err;
// SHOW_FLOW0( 0, "success" );
free(buffer);
return B_OK;
err:
free(buffer);
return B_ERROR;
}
#endif
/** store BIOS drive id in node's attribute */
static status_t
store_bios_drive_in_node(blkman_device_info *device)
{
pnp_node_attr attribute = {
BLKDEV_BIOS_ID, B_UINT8_TYPE, { ui8:
device->bios_drive != NULL ? device->bios_drive->bios_id : 0 }
};
return pnp->write_attr(device->node, &attribute);
}
/** find BIOS info of drive, if not happened yet.
* this must be called whenever someone wants to access BIOS infos about the drive;
* there are two reasons to not call this during probe():
* - perhaps nobody is interested in BIOS info
* - we need a working blkman device to handle lower level driver
* restrictions, so this method can only be safely called once the
* node has been loaded
*/
static void
find_bios_drive_info(blkman_handle_info *handle)
{
blkman_device_info *device = handle->device;
bios_drive *drive = NULL; //, *colliding_drive;
char name[32];
uint8 bios_id;
// SHOW_FLOW( 0, "%p", device );
// return immediately if BIOS info has already been found
if (device->bios_drive != NULL)
return;
// check whether BIOS info was found during one of the previous
// loads
if (pnp->get_attr_uint8(device->node, BLKDEV_BIOS_ID, &bios_id, false) == B_OK) {
TRACE(("use previous BIOS ID 0x%x\n", bios_id));
// ToDo: this assumes private R5 kernel functions to be present
#if 0
// yes, so find the associated data structure
if (bios_id != 0) {
for (drive = bios_drive_info; drive->bios_id != 0; ++drive) {
if (drive->bios_id == bios_id)
break;
}
} else
#endif
drive = NULL;
device->bios_drive = drive;
return;
}
sprintf(name, "PnP %p", device->node);
// ToDo: this assumes private R5 kernel functions to be present
#if 0
// do it the hard way: find a BIOS drive with the same checksums
for (drive = bios_drive_info; drive->bios_id != 0; ++drive) {
uint32 i;
// ignore identified BIOS drives
if (drive->name[0] != 0)
continue;
TRACE(("verifying drive 0x%x", drive->bios_id));
for (i = 0; i < drive->num_chksums; ++i) {
if (verify_checksum( handle, drive->chksums[i].offset,
drive->chksums[i].len, drive->chksums[i].chksum) != B_OK)
break;
}
if (i == drive->num_chksums)
break;
}
#endif
if (drive->bios_id == 0) {
TRACE(("this is no BIOS drive\n"));
// no BIOS drive found
goto no_bios_drive;
}
TRACE(("this is BIOS drive 0x%x\n", drive->bios_id));
// ToDo: this assumes private R5 kernel functions to be present
#if 0
// the R5 boot loader assumes that two drives can be distinguished by
// - their checksums
// - their physical layout
// unfortunately, the "physical layout" is something virtual defined by the
// BIOS itself, so nobody can verify that;
// as a result, we may have two drives with same checksums and different
// geometry - having no opportunity to check the geometry, we cannot
// distinguish between them.
// The simple solution is to modify the boot loader to not take geometry
// into account, but without sources, the boot loader cannot be fixed.
for (colliding_drive = bios_drive_info; colliding_drive->bios_id != 0; ++colliding_drive) {
uint32 i;
if (drive == colliding_drive)
continue;
if (drive->num_chksums != colliding_drive->num_chksums)
continue;
for (i = 0; i < colliding_drive->num_chksums; ++i) {
if (colliding_drive->chksums[i].offset != drive->chksums[i].offset
|| colliding_drive->chksums[i].len != drive->chksums[i].len
|| colliding_drive->chksums[i].chksum != drive->chksums[i].chksum)
break;
}
if (i < colliding_drive->num_chksums)
continue;
dprintf("Cannot distinguish between BIOS drives %x and %x without geometry\n",
drive->bios_id, colliding_drive->bios_id);
// this is nasty - we cannot reliable assign BIOS drive number.
// if the user has luck, he "only" cannot install a boot manager;
// but if the boot drive is affected, he cannot even boot.
goto no_bios_drive;
}
#endif
TRACE(("store driver \"%s\" in system data\n", name));
// store name so noone else will test this BIOS drive
strcpy(drive->name, name);
device->bios_drive = drive;
// remember that to avoid testing next time
store_bios_drive_in_node(device);
return;
no_bios_drive:
device->bios_drive = NULL;
// remember that to avoid testing next time
store_bios_drive_in_node(device);
return;
}
static status_t
blkman_ioctl(blkman_handle_info *handle, uint32 op, void *buf, size_t len)
{
blkman_device_info *device = handle->device;
status_t res;
if (device->is_bios_drive) {
switch (op) {
case B_GET_BIOS_DRIVE_ID:
find_bios_drive_info(handle);
if (device->bios_drive == NULL)
return B_ERROR;
*(char *)buf = device->bios_drive->bios_id;
return B_OK;
case B_GET_BIOS_GEOMETRY:
if (!device->is_bios_drive)
break;
{
device_geometry *geometry = (device_geometry *)buf;
find_bios_drive_info(handle);
if (device->bios_drive == NULL)
return B_ERROR;
TRACE(("GET_BIOS_GEOMETRY\n"));
// get real geometry from low level driver
res = device->interface->ioctl(handle->cookie, B_GET_GEOMETRY,
geometry, sizeof(*geometry));
if (res != B_OK)
return res;
// replace entries with bios info retrieved by boot loader
geometry->cylinder_count = device->bios_drive->cylinder_count;
geometry->head_count = device->bios_drive->head_count;
geometry->sectors_per_track = device->bios_drive->sectors_per_track;
}
return B_OK;
}
}
res = device->interface->ioctl( handle->cookie, op, buf, len );
//SHOW_FLOW( 0, "%s", strerror( res ));
return res;
}
static status_t
blkman_probe(pnp_node_handle parent)
{
char *str;
TRACE(("blkman_probe()\n"));
// make sure we can handle this parent device
if (pnp->get_attr_string(parent, "type", &str, false) != B_OK)
return B_ERROR;
if (strcmp(str, BLKDEV_TYPE_NAME) != 0) {
free(str);
return B_ERROR;
}
free(str);
// ready to register at devfs
{
pnp_node_attr attrs[] =
{
{ PNP_DRIVER_DRIVER, B_STRING_TYPE, { string: BLKMAN_MODULE_NAME }},
{ PNP_DRIVER_TYPE, B_STRING_TYPE, { string: PNP_DEVFS_TYPE_NAME }},
// we always want devfs on top of us
{ PNP_DRIVER_FIXED_CONSUMER, B_STRING_TYPE, { string: PNP_DEVFS_MODULE_NAME }},
{ PNP_DRIVER_CONNECTION, B_STRING_TYPE, { string: "blkman" }},
{ NULL }
};
pnp_node_handle node;
return pnp->register_device(parent, attrs, NULL, &node);
}
}
static void
blkman_remove(pnp_node_handle node, void *cookie)
{
uint8 bios_id;
//bios_drive *drive;
// if this drive has a BIOS ID, remove it from BIOS drive list
if (pnp->get_attr_uint8(node, BLKDEV_BIOS_ID, &bios_id, false) != B_OK
|| bios_id == 0 )
return;
// ToDo: this assumes private R5 kernel functions to be present
#if 0
for (drive = bios_drive_info; drive->bios_id != 0; ++drive) {
if (drive->bios_id == bios_id)
break;
}
if (drive->bios_id != 0) {
TRACE(("Marking BIOS device 0x%x as being unknown\n", bios_id));
drive->name[0] = 0;
}
#endif
}
static status_t
blkman_init_device(pnp_node_handle node, void *user_cookie, void **cookie)
{
blkman_device_info *device;
blkdev_params params;
char *name, *tmp_name;
uint8 is_bios_drive;
status_t res;
// blkdev_interface *interface;
// blkdev_device_cookie dev_cookie;
TRACE(("blkman_init_device()\n"));
// extract controller/protocoll restrictions from node
if (pnp->get_attr_uint32(node, BLKDEV_DMA_ALIGNMENT, &params.alignment, true) != B_OK)
params.alignment = 0;
if (pnp->get_attr_uint32(node, BLKDEV_MAX_BLOCKS_ITEM, &params.max_blocks, true) != B_OK)
params.max_blocks = 0xffffffff;
if (pnp->get_attr_uint32(node, BLKDEV_DMA_BOUNDARY, &params.dma_boundary, true) != B_OK)
params.dma_boundary = ~0;
if (pnp->get_attr_uint32(node, BLKDEV_MAX_SG_BLOCK_SIZE, &params.max_sg_block_size, true) != B_OK)
params.max_sg_block_size = 0xffffffff;
if (pnp->get_attr_uint32(node, BLKDEV_MAX_SG_BLOCKS, &params.max_sg_blocks, true) != B_OK)
params.max_sg_blocks = ~0;
// do some sanity check:
// (see scsi/bus_mgr.c)
params.max_sg_block_size &= ~params.alignment;
if (params.alignment > B_PAGE_SIZE) {
dprintf("Alignment (0x%lx) must be less then B_PAGE_SIZE\n", params.alignment);
return B_ERROR;
}
if (params.max_sg_block_size < 512) {
dprintf("Max s/g block size (0x%lx) is too small\n", params.max_sg_block_size);
return B_ERROR;
}
if (params.dma_boundary < B_PAGE_SIZE - 1) {
dprintf("DMA boundary (0x%lx) must be at least B_PAGE_SIZE\n", params.dma_boundary);
return B_ERROR;
}
if (params.max_blocks < 1 || params.max_sg_blocks < 1) {
dprintf("Max blocks (%ld) and max s/g blocks (%ld) must be at least 1",
params.max_blocks, params.max_sg_blocks);
return B_ERROR;
}
// allow "only" up to 512 sg entries
// (they consume 4KB and can describe up to 2MB virtual cont. memory!)
params.max_sg_blocks = min(params.max_sg_blocks, 512);
if (pnp->get_attr_uint8(node, BLKDEV_IS_BIOS_DRIVE, &is_bios_drive, true) != B_OK)
is_bios_drive = false;
// we don't really care about /dev name, but if it is
// missing, we will get a problem
if (pnp->get_attr_string(node, PNP_DEVFS_FILENAME, &name, true) != B_OK) {
dprintf("devfs filename is missing.\n");
return B_ERROR;
}
device = (blkman_device_info *)malloc(sizeof(*device));
if (device == NULL) {
res = B_NO_MEMORY;
goto err1;
}
memset(device, 0, sizeof(*device));
device->node = node;
res = benaphore_init(&device->lock, "blkdev_mutex");
if (res < 0)
goto err2;
// construct a identifiable name for S/G pool
tmp_name = malloc(strlen(name) + strlen(" sg_lists") + 1);
if (tmp_name == NULL) {
res = B_NO_MEMORY;
goto err3;
}
strcpy(tmp_name, name);
strcat(tmp_name, " sg_lists");
// create S/G pool with initial size 1
// (else, we may be on the paging path and have no S/G entries at hand)
device->phys_vecs_pool = locked_pool->create(
params.max_sg_blocks * sizeof(physical_entry),
sizeof( physical_entry ) - 1,
0, 16*1024, 32, 1, tmp_name, B_FULL_LOCK | B_CONTIGUOUS,
NULL, NULL, NULL);
free(tmp_name);
if (device->phys_vecs_pool == NULL) {
res = B_NO_MEMORY;
goto err3;
}
device->params = params;
device->is_bios_drive = is_bios_drive != 0;
res = pnp->load_driver(pnp->get_parent(node), device,
(pnp_driver_info **)&device->interface, (void **)&device->cookie);
if (res != B_OK)
goto err4;
free(name);
TRACE(("done\n"));
*cookie = device;
return B_OK;
err4:
locked_pool->destroy(device->phys_vecs_pool);
err3:
benaphore_destroy(&device->lock);
err2:
free(device);
err1:
free(name);
pnp->unload_driver(pnp->get_parent(node));
return res;
}
static status_t
blkman_uninit_device(blkman_device_info *device)
{
pnp->unload_driver(pnp->get_parent(device->node));
locked_pool->destroy(device->phys_vecs_pool);
benaphore_destroy(&device->lock);
free(device);
return B_OK;
}
static status_t
blkman_init_buffer(void)
{
physical_entry physicalTable[2];
status_t res;
TRACE(("blkman_init_buffer()\n"));
blkman_buffer_size = 32*1024;
blkman_buffer_lock = create_sem(1, "blkman_buffer_mutex");
if (blkman_buffer_lock < 0) {
res = blkman_buffer_lock;
goto err1;
}
res = blkman_buffer_area = create_area("blkman_buffer",
(void **)&blkman_buffer, B_ANY_KERNEL_ADDRESS,
blkman_buffer_size, B_FULL_LOCK | B_CONTIGUOUS, B_READ_AREA | B_WRITE_AREA);
if (res < 0)
goto err2;
res = get_memory_map(blkman_buffer, blkman_buffer_size, physicalTable, 2);
if (res < 0)
goto err3;
blkman_buffer_vec[0].iov_base = blkman_buffer;
blkman_buffer_vec[0].iov_len = blkman_buffer_size;
blkman_buffer_phys_vec.num = 1;
blkman_buffer_phys_vec.total_len = blkman_buffer_size;
blkman_buffer_phys_vec.vec[0] = physicalTable[0];
return B_OK;
err3:
delete_area(blkman_buffer_area);
err2:
delete_sem(blkman_buffer_lock);
err1:
return res;
}
static status_t
blkman_uninit_buffer(void)
{
delete_area(blkman_buffer_area);
delete_sem(blkman_buffer_lock);
return B_OK;
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
return blkman_init_buffer();
case B_MODULE_UNINIT:
blkman_uninit_buffer();
return B_OK;
default:
return B_ERROR;
}
}
module_dependency module_dependencies[] = {
{ DEVICE_MANAGER_MODULE_NAME, (module_info **)&pnp },
{ LOCKED_POOL_MODULE_NAME, (module_info **)&locked_pool },
{}
};
pnp_devfs_driver_info blkman_module = {
{
{
BLKMAN_MODULE_NAME,
0,
std_ops
},
blkman_init_device,
(status_t (*)( void * )) blkman_uninit_device,
blkman_probe,
blkman_remove
},
(status_t (*)(void *, uint32, void **))blkman_open,
(status_t (*)(void *))blkman_close,
(status_t (*)(void *))blkman_freecookie,
(status_t (*)(void *, uint32, void *, size_t))blkman_ioctl,
(status_t (*)(void *, off_t, void *, size_t *))blkman_read,
(status_t (*)(void *, off_t, const void *, size_t *))blkman_write,
NULL,
NULL,
(status_t (*)(void *, off_t, const iovec *, size_t, size_t *))blkman_readv,
(status_t (*)(void *, off_t, const iovec *, size_t, size_t *))blkman_writev
};
static status_t
std_ops_for_driver(int32 op, ...)
{
// there is nothing to setup as this is a interface for
// drivers which are loaded by _us_
switch (op) {
case B_MODULE_INIT:
case B_MODULE_UNINIT:
return B_OK;
default:
return B_ERROR;
}
}
blkman_for_driver_interface blkman_for_driver_module = {
{
BLKMAN_FOR_DRIVER_MODULE_NAME,
0,
std_ops_for_driver
},
blkman_set_media_params,
};
module_info *modules[] = {
&blkman_module.dinfo.minfo,
&blkman_for_driver_module.minfo,
NULL
};

View File

@ -0,0 +1,90 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open block device manager
Internal header.
*/
#include <blkman.h>
#include <string.h>
#include <module.h>
#include <locked_pool.h>
#include <malloc.h>
#include <pnp_devfs.h>
#include <device_manager.h>
#include <bios_drive.h>
#include "wrapper.h"
// controller restrictions (see blkman.h)
typedef struct blkdev_params {
uint32 alignment;
uint32 max_blocks;
uint32 dma_boundary;
uint32 max_sg_block_size;
uint32 max_sg_blocks;
} blkdev_params;
// device info
typedef struct blkman_device_info {
pnp_node_handle node;
blkdev_interface *interface;
blkdev_device_cookie cookie;
benaphore lock; // used for access to following variables
uint32 block_size;
uint32 ld_block_size;
uint64 capacity;
blkdev_params params;
bool is_bios_drive; // could be a BIOS drive
locked_pool_cookie phys_vecs_pool; // pool of temporary phys_vecs
bios_drive *bios_drive; // info about corresponding BIOS drive
} blkman_device_info;
// file handle info
typedef struct blkman_handle_info {
blkman_device_info *device;
blkdev_handle_cookie cookie;
} blkman_handle_info;
// attribute containing BIOS drive ID (or 0, if it's no BIOS drive) (uint8)
#define BLKDEV_BIOS_ID "blkdev/bios_id"
// transmission buffer data:
// size in bytes
extern uint blkman_buffer_size;
// to use the buffer, you must own this semaphore
extern sem_id blkman_buffer_lock;
// iovec
extern struct iovec blkman_buffer_vec[1];
// physical address
extern void *blkman_buffer_phys;
// virtual address
extern char *blkman_buffer;
// phys_vec of it (always linear)
extern phys_vecs blkman_buffer_phys_vec;
// area containing buffer
extern area_id blkman_buffer_area;
extern locked_pool_interface *locked_pool;
extern device_manager_info *pnp;
// io.c
status_t blkman_readv( blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, size_t *len );
status_t blkman_read( blkman_handle_info *handle, off_t pos, void *buf, size_t *len );
ssize_t blkman_writev( blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, ssize_t *len );
ssize_t blkman_write( blkman_handle_info *handle, off_t pos, void *buf, size_t *len );
void blkman_set_media_params( blkman_device_info *device,
uint32 block_size, uint32 ld_block_size, uint64 capacity );

View File

@ -0,0 +1,733 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open block device manager
Actual I/O.
This is hardcode. Think twice before changing something
in here.
Things could become a lot easier with the following restrictions:
- stricter data alignment that is sufficient for all controllers
(e.g. page-aligned should certainly be)
- no partial block access
- locked data
- always sufficient iovecs for entire transfer
The last thing is a design problem of the devfs. The first two
make sure that we need no buffer anymore, which would make much
code unnecessary. The locked data would save much code too - having
a sg list as input would make it even more sweeter.
Obviously these restrictions cannot be enforced for user programs,
but at least for transfers from/to disk cache, we could define
extra functions with these properties.
*/
#include "blkman_int.h"
#include "KernelExport_ext.h"
/** get sg list of iovecs, taking dma boundaries and maximum size of
* single s/g entry into account
* <vec_offset> must not point byond first iovec
*/
static int
blkman_map_iovecs(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
phys_vecs *map, size_t max_phys_entries, size_t dma_boundary,
size_t max_sg_block_size)
{
status_t res;
size_t total_len;
size_t cur_idx;
SHOW_FLOW0( 3, "" );
if ((res = get_iovec_memory_map( vec, vec_count, vec_offset, len,
map->vec, max_phys_entries, &map->num, &map->total_len)) < B_OK)
return res;
if (dma_boundary == ~0 && max_sg_block_size >= map->total_len)
return B_OK;
SHOW_FLOW(3, "Checking violation of dma boundary 0x%x and entry size 0x%x",
(int)dma_boundary, (int)max_sg_block_size);
total_len = 0;
for (cur_idx = 0; cur_idx < map->num; ++cur_idx) {
addr_t max_len;
// calculate space upto next dma boundary crossing
max_len = (dma_boundary + 1) - ((addr_t)map->vec[cur_idx].address & dma_boundary);
// restrict size per sg item
max_len = min(max_len, max_sg_block_size);
SHOW_FLOW( 4, "addr=%p, size=%x, max_len=%x, idx=%d, num=%d",
map->vec[cur_idx].address, (int)map->vec[cur_idx].size,
(int)max_len, (int)cur_idx, (int)map->num );
//snooze(100000);
if (max_len < map->vec[cur_idx].size) {
// split sg block
map->num = min(map->num + 1, max_phys_entries);
memmove(&map->vec[cur_idx + 1], &map->vec[cur_idx],
(map->num - 1 - cur_idx) * sizeof(physical_entry));
map->vec[cur_idx].size = max_len;
map->vec[cur_idx + 1].address = (void *)((addr_t)map->vec[cur_idx + 1].address + max_len);
map->vec[cur_idx + 1].size -= max_len;
}
total_len += map->vec[cur_idx].size;
}
// we really have to update total_len - due to block splitting,
// some other blocks may got discarded if s/g list became too long
map->total_len = total_len;
return B_OK;
}
/** check whether dma alignment restrictions are met
* returns true on success
* remark: if the user specifies too many iovecs and the unused iovecs
* are mis-aligned, it's bad luck as we ignore this case and still
* report an alignment problem
*/
static bool
blkman_check_alignment(struct iovec *vecs, uint num_vecs,
size_t vec_offset, uint alignment)
{
if (alignment == 0)
// no alignment - good boy
return true;
for (; num_vecs > 0; ++vecs, --num_vecs) {
// check both begin and end of iovec
if ((((addr_t)vecs->iov_base + vec_offset) & alignment) != 0) {
SHOW_FLOW(1, "s/g entry not aligned (%p)", vecs->iov_base);
return false;
}
if ((((addr_t)vecs->iov_base + vecs->iov_len) & alignment) != 0) {
SHOW_FLOW(1, "end of s/g entry not aligned (%p)",
(void *)((addr_t)vecs->iov_base + vecs->iov_len));
return false;
}
vec_offset = 0;
}
return true;
}
/** try to lock iovecs
* returns number of locked bytes
* (only entire iovecs are attempted to be locked)
* remark: if there are too few iovecs, you simply get fewer locked bytes
*/
static size_t
blkman_lock_iovecs(struct iovec *vecs, uint num_vecs,
size_t vec_offset, size_t len, int flags)
{
size_t orig_len = len;
SHOW_FLOW(3, "len = %lu", len);
for (; len > 0 && num_vecs > 0; ++vecs, --num_vecs) {
size_t lock_len;
status_t res;
lock_len = min(vecs->iov_len - vec_offset, len);
SHOW_FLOW( 3, "pos = %p, len = %lu", vecs->iov_base, vecs->iov_len);
res = lock_memory((void *)((addr_t)vecs->iov_base + vec_offset), lock_len, flags);
if (res != B_OK) {
//snooze( 1000000 );
SHOW_FLOW(3, "cannot lock: %s", strerror(res));
break;
}
len -= lock_len;
vec_offset = 0;
}
SHOW_FLOW( 3, "remaining len=%lu", len);
return orig_len - len;
}
/** unlock iovecs */
static void
blkman_unlock_iovecs(struct iovec *vecs, uint num_vecs,
size_t vec_offset, size_t len, int flags)
{
SHOW_FLOW(3, "len = %lu", len);
for (; len > 0; ++vecs, --num_vecs) {
size_t lock_len;
lock_len = min(vecs->iov_len - vec_offset, len);
if (unlock_memory((void *)((addr_t)vecs->iov_base + vec_offset), lock_len,
flags) != B_OK)
panic( "Cannot unlock previously locked memory!" );
len -= lock_len;
vec_offset = 0;
}
}
/** copy data from/to transfer buffer;
* remark: if iovecs are missing, copying is aborted
*/
static void
blkman_copy_buffer(char *buffer, struct iovec *vecs, uint num_vecs,
size_t vec_offset, size_t len, bool to_buffer)
{
for (; len > 0 && num_vecs > 0; ++vecs, --num_vecs) {
size_t bytes;
bytes = min(len, vecs->iov_len - vec_offset);
if (to_buffer)
memcpy(buffer, (void *)((addr_t)vecs->iov_base + vec_offset), bytes);
else
memcpy((void *)((addr_t)vecs->iov_base + vec_offset), buffer, bytes);
buffer += bytes;
vec_offset = 0;
}
}
/** determine number of bytes described by iovecs */
static size_t
blkman_iovec_len(struct iovec *vecs, uint num_vecs, size_t vec_offset)
{
size_t len = 0;
for (; num_vecs > 0; ++vecs, --num_vecs) {
len += vecs->iov_len - vec_offset;
vec_offset = 0;
}
return len;
}
/** main beast to execute i/o transfer
* as <need_locking> and <write> is usual const, we really want to inline
* it - this makes this function much smaller in a given instance
* <need_locking> - data must be locked before transferring it
* <vec> - should be locked, though it's probably not really necessary
*/
static inline status_t
blkman_readwrite(blkman_handle_info *handle, off_t pos, struct iovec *vec,
int vec_count, size_t *total_len, bool need_locking, bool write)
{
blkman_device_info *device = handle->device;
uint32 block_size, ld_block_size;
uint64 capacity;
bool need_buffer;
status_t res = B_OK;
size_t len = *total_len;
size_t orig_len = len;
size_t vec_offset;
phys_vecs *phys_vecs;
//snooze( 1000000 );
SHOW_FLOW(3, "pos = %Ld, len = %lu, need_locking = %d, write = %d, vec_count = %d",
pos, len, need_locking, write, vec_count);
// general properties may get modified, so make a copy first
/*device->interface->get_media_params( handle->handle_cookie,
&block_size, &ld_block_size, &capacity );*/
ACQUIRE_BEN(&device->lock);
block_size = device->block_size;
ld_block_size = device->ld_block_size;
capacity = device->capacity;
RELEASE_BEN(&device->lock);
if (capacity == 0) {
res = B_DEV_NO_MEDIA;
goto err;
}
if (block_size == 0) {
res = B_DEV_CONFIGURATION_ERROR;
goto err;
}
phys_vecs = locked_pool->alloc(handle->device->phys_vecs_pool);
SHOW_FLOW0(3, "got phys_vecs");
// offset in active iovec (can span even byond first iovec)
vec_offset = 0;
while (len > 0) {
//off_t block_pos;
uint64 block_pos;
uint32 block_ofs;
size_t cur_len;
size_t cur_blocks;
struct iovec *cur_vecs;
size_t cur_vec_count;
size_t cur_vec_offset;
size_t bytes_transferred;
SHOW_FLOW(3, "current len = %lu", len);
// skip handled iovecs
while (vec_count > 0 && vec_offset >= vec->iov_len) {
vec_offset -= vec->iov_len;
++vec;
--vec_count;
}
// having too few iovecs is handled in the following way:
// 1. if no other problem occurs, lock_iovecs restrict transfer
// up to the last block fully described by iovecs
// 2. if only a partial block is described, we fallback to
// buffered transfer because lock_iovecs cannot give you
// a whole block
// 3. whenever buffered transfer is used, an explicit test for
// iovec shortage is done and transmission is restricted up to
// and including the last block that has an iovec; copying from/to
// buffer stops when no iovec is left (copy_buffer) and thus
// restricts transfer appropriately
// 4. whenever all iovecs are consumed, we arrive at this piece of
// code and abort
if (vec_count == 0) {
SHOW_FLOW0(3, "vec too short");
res = B_BAD_VALUE;
goto err2;
}
// get block index / start offset in block
if (ld_block_size) {
block_pos = pos >> ld_block_size;
block_ofs = pos - (block_pos << ld_block_size);
} else {
block_pos = pos / block_size;
block_ofs = pos - block_pos * block_size;
}
// read requests byond end of volume must be ignored without notice
if (block_pos >= capacity) {
SHOW_FLOW0(1, "transfer starts byond end of device");
goto err2;
}
SHOW_FLOW(3, "block_pos = %Ld, block_ofs = %lu", block_pos, block_ofs);
// check whether a buffered transfer is required:
// 1. partial block transfer:
// 1a. transfer starts within block
// 1b. transfer finishes within block
// 2. dma alignment problem
// 1a and 2 is handled immediately, case 1b is delayed: we transmit
// whole blocks until the last (partial) block is reached and handle
// it seperately; therefore we only check for len < block_size, e.g.
// for a last partial block (we could do better, but you get what
// you deserve)
need_buffer = block_ofs != 0
|| len < block_size
|| !blkman_check_alignment(vec, vec_count, vec_offset, device->params.alignment);
retry:
if (need_buffer) {
int tmp_len;
// argh! - need buffered transfer
SHOW_FLOW(1, "buffer required: len=%ld, block_ofs=%ld",
len, block_ofs);
acquire_sem(blkman_buffer_lock);
// nobody helps us if there are too few iovecs, so test
// for that explicitely
// (case that tmp_len = 0 is already checked above; if not,
// we would get trouble when we try to round up to next
// block size, which would lead to zero, making trouble
// during lock)
tmp_len = blkman_iovec_len(vec, vec_count, vec_offset);
tmp_len = min(tmp_len, len);
SHOW_FLOW(3, "tmp_len: %d", tmp_len);
if (write && (block_ofs != 0 || tmp_len < (ssize_t)block_size)) {
// partial block write - need to read block first
// we always handle one block only to keep things simple
cur_blocks = 1;
SHOW_FLOW0(3, "partial write at beginning: reading content of first block");
res = handle->device->interface->read(handle->cookie,
&blkman_buffer_phys_vec, block_pos,
cur_blocks, block_size, &bytes_transferred);
} else {
// alignment problem or partial block read - find out how many
// blocks are spanned by this transfer
cur_blocks = (tmp_len + block_ofs + block_size - 1) / block_size;
SHOW_FLOW(3, "cur_blocks: %ld", cur_blocks);
// restrict block count to buffer size
if (cur_blocks * block_size > blkman_buffer_size)
cur_blocks = blkman_buffer_size / block_size;
}
// copy data into buffer before write
// (calculate number of bytes to copy carefully!)
if (write) {
SHOW_FLOW(3, "copy data to buffer (%ld bytes)",
cur_blocks * block_size - block_ofs);
blkman_copy_buffer(blkman_buffer + block_ofs,
vec, vec_count, vec_offset, cur_blocks * block_size - block_ofs,
true);
}
cur_vecs = blkman_buffer_vec;
cur_vec_count = 1;
cur_vec_offset = 0;
} else {
// no buffer needed
if (ld_block_size)
cur_blocks = len >> ld_block_size;
else
cur_blocks = len / block_size;
cur_vecs = vec;
cur_vec_count = vec_count;
cur_vec_offset = vec_offset;
}
SHOW_FLOW(3, "cur_blocks = %lu, cur_vec_offset = %lu, cur_vec_count = %lu",
cur_blocks, cur_vec_offset, cur_vec_count);
// restrict transfer size to device limits and media capacity
cur_blocks = min(cur_blocks, device->params.max_blocks);
if (block_pos + cur_blocks > capacity)
cur_blocks = capacity - block_pos;
SHOW_FLOW(3, "after applying size restriction: cur_blocks = %lu", cur_blocks);
cur_len = cur_blocks * block_size;
if (need_locking) {
// lock data
// side-node: we also lock the transfer buffer to simplify code
// if n bytes are to be transferred, we try to lock
// n bytes, then n/2, n/4 etc. until we succeed
// this is needed because locking fails for entire iovecs only
for (; cur_len > 0; cur_blocks >>= 1, cur_len = cur_blocks * block_size) {
size_t locked_len;
SHOW_FLOW(3, "trying to lock %lu bytes", cur_len);
locked_len = blkman_lock_iovecs(cur_vecs, cur_vec_count,
cur_vec_offset, cur_len, B_DMA_IO | (write ? 0 : B_READ_DEVICE));
if (locked_len == cur_len)
break;
// couldn't lock all we want
SHOW_FLOW0(3, "couldn't lock all bytes");
if (locked_len > block_size) {
// locked at least one block - we are happy
SHOW_FLOW0(3, "transmission length restricted to locked bytes");
cur_blocks = locked_len / block_size;
cur_len = cur_blocks * block_size;
break;
}
// got less then one block locked - unlock and retry
SHOW_FLOW0(3, "too few bytes locked - trying again with fewer bytes");
blkman_unlock_iovecs(cur_vecs, cur_vec_count, cur_vec_offset, locked_len,
B_DMA_IO | (write ? 0 : B_READ_DEVICE));
}
if (cur_len == 0) {
// didn't manage to lock at least one block
// -> fallback to buffered transfer
SHOW_FLOW0(3, "locking failed");
if (need_buffer) {
// error locking transfer buffer?
// that's impossible - it is locked already!
panic("Cannot lock scratch buffer\n");
res = B_ERROR;
goto err3;
}
need_buffer = true;
goto retry;
}
}
// data is locked and all restrictions are obeyed now;
// time to setup sg list
SHOW_FLOW0(3, "Creating SG list");
res = blkman_map_iovecs(cur_vecs, cur_vec_count, cur_vec_offset, cur_len,
phys_vecs, device->params.max_sg_blocks, device->params.dma_boundary,
device->params.max_sg_block_size);
if (res < 0) {
SHOW_FLOW(3, "failed - %s", strerror(res));
goto cannot_map;
}
if (phys_vecs->total_len < cur_len) {
// we hit some sg limit - restrict transfer appropriately
cur_blocks = phys_vecs->total_len / block_size;
SHOW_FLOW(3, "transmission to complex - restricted to %d blocks", (int)cur_blocks);
if (cur_blocks == 0) {
// oh no - not even one block is left; use transfer buffer instead
SHOW_FLOW0(3, "SG too small to handle even one block");
if (need_locking) {
blkman_unlock_iovecs(cur_vecs, cur_vec_count, cur_vec_offset,
cur_len, B_DMA_IO | (write ? 0 : B_READ_DEVICE));
}
if (need_buffer) {
// we are already using the transfer buffer
// this case is impossible as transfer buffer is linear!
panic("Scratch buffer turned out to be too fragmented !?\n");
}
SHOW_FLOW0(3, "Falling back to buffered transfer");
need_buffer = true;
goto retry;
}
// reflect rounded len in sg list
phys_vecs->total_len = cur_blocks * block_size;
}
// at least - let the bytes flow
SHOW_FLOW(2, "Transmitting %d bytes @%Ld",
(int)phys_vecs->total_len, block_pos);
if (write) {
res = handle->device->interface->write(handle->cookie,
phys_vecs, block_pos, cur_blocks, block_size, &bytes_transferred);
} else {
res = handle->device->interface->read(handle->cookie,
phys_vecs, block_pos, cur_blocks, block_size, &bytes_transferred);
}
SHOW_FLOW(3, "Transfer of %d bytes completed (%s)",
(int)bytes_transferred, strerror(res));
cannot_map:
// unlock data
if (need_locking) {
blkman_unlock_iovecs(cur_vecs, cur_vec_count, cur_vec_offset,
cur_len, B_DMA_IO | (write ? 0 : B_READ_DEVICE));
}
if (res < 0)
goto err3;
if (need_buffer) {
// adjust transfer size by gap skipped at beginning of blocks
bytes_transferred -= block_ofs;
// if we had to round up to block size, adjust transfer as well
if (bytes_transferred > len)
bytes_transferred = len;
// if transfer buffer is used for read, copy result from it
if (!write) {
SHOW_FLOW(3, "copying data back from buffer (%ld bytes)",
bytes_transferred);
blkman_copy_buffer(blkman_buffer + block_ofs,
vec, vec_count, vec_offset, bytes_transferred, false);
}
release_sem(blkman_buffer_lock);
}
len -= bytes_transferred;
vec_offset += bytes_transferred;
pos += bytes_transferred;
}
locked_pool->free(handle->device->phys_vecs_pool, phys_vecs);
SHOW_FLOW0(3, "done");
return B_OK;
err3:
if (need_buffer)
release_sem(blkman_buffer_lock);
err2:
locked_pool->free(handle->device->phys_vecs_pool, phys_vecs);
err:
SHOW_FLOW(3, "done with error %s", strerror(res));
// we haven't transferred all data - tell caller about
*total_len = orig_len - len;
return res;
}
static status_t
blkman_readv_int(blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, size_t *len, bool need_locking)
{
return blkman_readwrite(handle, pos, vec, vec_count, len, need_locking, false);
}
static status_t
blkman_writev_int(blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, size_t *len, bool need_locking)
{
return blkman_readwrite(handle, pos, vec, vec_count, len, need_locking, true);
}
/** generic read(v)/write(v) routine;
* iovecs are locked during transfer
* inlining it leads to overall code reduction as <write> is const
*/
static inline status_t
blkman_readwritev(blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, size_t *len, bool write)
{
status_t res;
struct iovec *cur_vec;
size_t left;
size_t total_len;
if ((res = lock_memory(vec, vec_count * sizeof(vec[0]), 0)) < 0)
return res;
// there is an error in the BeBook: *len does _not_ contain correct
// total length on call - you have to calculate that yourself
total_len = 0;
for (cur_vec = vec, left = vec_count; left > 0; ++cur_vec, --left) {
total_len += cur_vec->iov_len;
}
*len = total_len;
if (write)
res = blkman_writev_int(handle, pos, vec, vec_count, len, true);
else
res = blkman_readv_int(handle, pos, vec, vec_count, len, true);
unlock_memory(vec, vec_count * sizeof(vec[0]), 0);
return res;
}
status_t
blkman_readv(blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, size_t *len)
{
/* SHOW_FLOW( 4, "len=%d", (int)*len );
for( cur_vec = vec, left = vec_count; left > 0; ++cur_vec, --left ) {
SHOW_FLOW( 4, "pos=%x, size=%d",
(int)cur_vec->iov_base, (int)cur_vec->iov_len );
}*/
return blkman_readwritev(handle, pos, vec, vec_count, len, false);
}
status_t
blkman_read(blkman_handle_info *handle, off_t pos, void *buf, size_t *len)
{
iovec vec[1];
vec[0].iov_base = buf;
vec[0].iov_len = *len;
SHOW_FLOW0( 3, "" );
// TBD: this assumes that the thread stack is not paged,
// else you want to use blkman_readv
return blkman_readv_int(handle, pos, vec, 1, len, true);
}
ssize_t
blkman_writev(blkman_handle_info *handle, off_t pos, struct iovec *vec,
size_t vec_count, ssize_t *len)
{
return blkman_readwritev(handle, pos, vec, vec_count, len, true);
}
ssize_t
blkman_write(blkman_handle_info *handle, off_t pos, void *buf, size_t *len)
{
iovec vec[1];
vec[0].iov_base = buf;
vec[0].iov_len = *len;
// see blkman_read
return blkman_writev_int(handle, pos, vec, 1, len, true);
}
void
blkman_set_media_params(blkman_device_info *device, uint32 block_size,
uint32 ld_block_size, uint64 capacity)
{
SHOW_FLOW(3, "block_size = %lu, ld_block_size = %lu, capacity = %Lu\n", block_size,
ld_block_size, capacity);
ACQUIRE_BEN(&device->lock);
device->block_size = block_size;
device->ld_block_size = ld_block_size;
device->capacity = capacity;
RELEASE_BEN(&device->lock);
}

View File

@ -0,0 +1,139 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
VM helper functions.
Important assumption: get_memory_map must combine adjacent
physical pages, so contignous memory always leads to a S/G
list of length one.
*/
#include "KernelExport_ext.h"
#include "wrapper.h"
#include <vm.h>
#include <string.h>
/** get sg list of iovec
* TBD: this should be moved to somewhere in kernel
*/
status_t
get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len)
{
size_t cur_idx;
size_t left_len;
SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
vec_count, vec_offset, len, max_entries);
// skip iovec blocks if needed
while (vec_count > 0 && vec_offset > vec->iov_len) {
vec_offset -= vec->iov_len;
--vec_count;
++vec;
}
for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) {
char *range_start;
size_t range_len;
status_t res;
size_t cur_num_entries, cur_mapped_len;
uint32 tmp_idx;
SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d",
(int)left_len, (int)vec_count, (int)cur_idx );
// map one iovec
range_start = (char *)vec->iov_base + vec_offset;
range_len = min( vec->iov_len - vec_offset, left_len );
SHOW_FLOW( 3, "range_start=%x, range_len=%x",
(int)range_start, (int)range_len );
vec_offset = 0;
if ((res = get_memory_map(range_start, range_len, &map[cur_idx],
max_entries - cur_idx)) != B_OK) {
// according to docu, no error is ever reported - argh!
SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
return res;
}
// stupid: get_memory_map does neither tell how many sg blocks
// are used nor whether there were enough sg blocks at all;
// -> determine that manually
cur_mapped_len = 0;
cur_num_entries = 0;
for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) {
if (map[tmp_idx].size == 0)
break;
cur_mapped_len += map[tmp_idx].size;
++cur_num_entries;
}
if (cur_mapped_len == 0) {
panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
return B_ERROR;
}
SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x",
(int)cur_num_entries, (int)cur_mapped_len );
// try to combine with previous sg block
if (cur_num_entries > 0 && cur_idx > 0
&& map[cur_idx].address == (char *)map[cur_idx - 1].address + map[cur_idx - 1].size) {
SHOW_FLOW0( 3, "combine with previous chunk" );
map[cur_idx - 1].size += map[cur_idx].size;
memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0]));
--cur_num_entries;
}
cur_idx += cur_num_entries;
left_len -= cur_mapped_len;
// advance iovec if current one is described completely
if (cur_mapped_len == range_len) {
++vec;
--vec_count;
}
}
*num_entries = cur_idx;
*mapped_len = len - left_len;
SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
(int)*num_entries, (int)*mapped_len );
return B_OK;
}
/** map main memory into virtual address space */
status_t
map_mainmemory(addr_t physicalAddress, void **_virtualAddress)
{
return vm_get_physical_page(physicalAddress, (addr_t *)_virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
// ToDo: check if CAN_WAIT is correct
}
/** unmap main memory from virtual address space */
status_t
unmap_mainmemory(void *virtualAddress)
{
return vm_put_physical_page((addr_t)virtualAddress);
}

View File

@ -0,0 +1,89 @@
#ifndef _WRAPPER_H
#define _WRAPPER_H
#include <KernelExport.h>
#include <lock.h>
// benaphores
#define INIT_BEN(x, prefix) benaphore_init(x, prefix)
#define DELETE_BEN(x) benaphore_destroy(x)
#define ACQUIRE_BEN(x) benaphore_lock(x)
#define RELEASE_BEN(x) benaphore_unlock(x)
// debug output
#ifdef DEBUG_WAIT_ON_MSG
# define DEBUG_WAIT snooze( DEBUG_WAIT_ON_MSG );
#else
# define DEBUG_WAIT
#endif
#ifdef DEBUG_WAIT_ON_ERROR
# define DEBUG_WAIT_ERROR snooze( DEBUG_WAIT_ON_ERROR );
#else
# define DEBUG_WAIT_ERROR
#endif
#ifndef DEBUG_MAX_LEVEL_FLOW
# define DEBUG_MAX_LEVEL_FLOW 4
#endif
#ifndef DEBUG_MAX_LEVEL_INFO
# define DEBUG_MAX_LEVEL_INFO 4
#endif
#ifndef DEBUG_MAX_LEVEL_ERROR
# define DEBUG_MAX_LEVEL_ERROR 4
#endif
#ifndef DEBUG_MSG_PREFIX
# define DEBUG_MSG_PREFIX ""
#endif
#ifndef debug_level_flow
# define debug_level_flow 3
#endif
#ifndef debug_level_info
# define debug_level_info 2
#endif
#ifndef debug_level_error
# define debug_level_error 1
#endif
#define FUNC_NAME DEBUG_MSG_PREFIX __FUNCTION__ ": "
#define SHOW_FLOW(seriousness, format, param...) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s"##format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_FLOW0(seriousness, format) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s"##format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO(seriousness, format, param...) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s"##format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO0(seriousness, format) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s"##format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_ERROR(seriousness, format, param...) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s"##format"\n", FUNC_NAME, param ); DEBUG_WAIT_ERROR \
}} while( 0 )
#define SHOW_ERROR0(seriousness, format) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s"##format"\n", FUNC_NAME); DEBUG_WAIT_ERROR \
}} while( 0 )
#endif /* _BENAPHORE_H */