Fixing warnings under GCC4 in preparation to enable -Werror there as well:

* Replaced the use of offsetof() for structs that aren't PODs. Add a
  offset_of_member() macro to util/khash.h because that's what it's used for
  in our cases.
* Change the signature of add_debugger_command()/remove_debugger_command() on
  GCC > 2 to avoid the depricated conversion from string constants to char *.
* Adding some "suggested" parenthesis. I know that not everyone likes that, but
  it pointed out at least one bug that is fixed here as well.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@29113 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-02-01 20:48:02 +00:00
parent 47bc666311
commit c33667d400
24 changed files with 112 additions and 79 deletions

View File

@ -174,9 +174,16 @@ extern void panic(const char *format, ...) _PRINTFLIKE(1, 2);
extern void kernel_debugger(const char *message);
extern uint64 parse_expression(const char *string);
#if __GNUC__ > 2
extern int add_debugger_command(const char *name,
debugger_command_hook hook, const char *help);
extern int remove_debugger_command(const char *name,
debugger_command_hook hook);
#else
extern int add_debugger_command(char *name, debugger_command_hook hook, char *help);
extern int remove_debugger_command(char *name,
debugger_command_hook hook);
#endif
/* Miscellaneous */
extern void spin(bigtime_t microseconds);

View File

@ -11,6 +11,12 @@
#include <SupportDefs.h>
// The use of offsetof() on non-PODs is invalid. Since many structs use
// templated members (i.e. DoublyLinkedList) which makes them non-PODs we
// can't use offsetof() anymore. This macro does the same, but requires an
// instance of the object in question.
#define offset_of_member(OBJECT, MEMBER) \
((size_t)((char*)&OBJECT.MEMBER - (char*)&OBJECT))
// can be allocated on the stack
typedef struct hash_iterator {

View File

@ -611,7 +611,7 @@ main(int argc, const char *const *argv)
// write the boot code
printf("Writing boot code to \"%s\" (partition offset: %lld bytes, "
"start offset = %d) "
"start offset = %lld) "
"...\n", fileName, partitionOffset, startOffset);
write_boot_code_part(fileName, fd, startOffset, bootCodeData, 0,

View File

@ -57,7 +57,7 @@ void
UserMessagingMessageSender::SendMessage(const KMessage* message, port_id port,
int32 token)
{
if (message != fMessage && fMessage != NULL
if ((message != fMessage && fMessage != NULL)
|| fTargetCount == MAX_MESSAGING_TARGET_COUNT) {
FlushMessage();
}

View File

@ -367,10 +367,10 @@ is_kernel_stack_address(struct thread* thread, addr_t address)
if (thread == NULL)
return IS_KERNEL_ADDRESS(address);
return address >= thread->kernel_stack_base
&& address < thread->kernel_stack_top
|| thread->cpu != NULL
&& is_double_fault_stack_address(thread->cpu->cpu_num, address);
return (address >= thread->kernel_stack_base
&& address < thread->kernel_stack_top)
|| (thread->cpu != NULL
&& is_double_fault_stack_address(thread->cpu->cpu_num, address));
}
@ -508,7 +508,8 @@ stack_trace(int argc, char **argv)
threadIndex++;
}
if (argc > threadIndex + 1 || argc == 2 && strcmp(argv[1], "--help") == 0) {
if (argc > threadIndex + 1
|| (argc == 2 && strcmp(argv[1], "--help") == 0)) {
kprintf(usage, argv[0]);
return 0;
}

View File

@ -47,7 +47,6 @@
// system, like out of memory situations - should only panic for debugging.
#define FATAL(x) panic x
static const bigtime_t kTransactionIdleTime = 2000000LL;
// a transaction is considered idle after 2 seconds of inactivity
@ -605,8 +604,8 @@ notify_transaction_listeners(block_cache* cache, cache_transaction* transaction,
while (iterator.HasNext()) {
cache_listener* listener = iterator.Next();
bool remove = isClosing && !is_written_event(listener->events)
|| isWritten && is_written_event(listener->events);
bool remove = (isClosing && !is_written_event(listener->events))
|| (isWritten && is_written_event(listener->events));
if (remove)
iterator.Remove();
@ -804,12 +803,14 @@ block_cache::Init()
if (buffer_cache == NULL)
return B_NO_MEMORY;
hash = hash_init(1024, offsetof(cached_block, next), &cached_block::Compare,
&cached_block::Hash);
cached_block dummyBlock;
hash = hash_init(1024, offset_of_member(dummyBlock, next),
&cached_block::Compare, &cached_block::Hash);
if (hash == NULL)
return B_NO_MEMORY;
transaction_hash = hash_init(16, offsetof(cache_transaction, next),
cache_transaction dummyTransaction;
transaction_hash = hash_init(16, offset_of_member(dummyTransaction, next),
&transaction_compare, &::transaction_hash);
if (transaction_hash == NULL)
return B_NO_MEMORY;

View File

@ -242,7 +242,7 @@ FileMap::_Add(file_io_vec* vecs, size_t vecCount, off_t& lastOffset)
if (lastExtent != NULL) {
if (lastExtent->disk.offset + lastExtent->disk.length
== vecs[i].offset
|| lastExtent->disk.offset == -1 && vecs[i].offset == -1) {
|| (lastExtent->disk.offset == -1 && vecs[i].offset == -1)) {
lastExtent->disk.length += vecs[i].length;
offset += vecs[i].length;
start--;
@ -368,8 +368,8 @@ FileMap::SetMode(uint32 mode)
MutexLocker _(fLock);
if (mode == FILE_MAP_CACHE_ALL && fCacheAll
|| mode == FILE_MAP_CACHE_ON_DEMAND && !fCacheAll)
if ((mode == FILE_MAP_CACHE_ALL && fCacheAll)
|| (mode == FILE_MAP_CACHE_ON_DEMAND && !fCacheAll))
return B_OK;
if (mode == FILE_MAP_CACHE_ALL) {

View File

@ -488,8 +488,8 @@ print_debugger_command_usage(const char* commandName)
kprintf_unfiltered("usage: %s ", command->name);
kputs_unfiltered(command->usage);
} else {
char* args[3] = { NULL, "--help", NULL };
invoke_debugger_command(command, 2, args);
const char* args[3] = { NULL, "--help", NULL };
invoke_debugger_command(command, 2, (char**)args);
}
return true;
@ -498,16 +498,24 @@ print_debugger_command_usage(const char* commandName)
// #pragma mark - public API
int
#if __GNUC__ > 2
add_debugger_command(const char *name, int (*func)(int, char **),
const char *desc)
#else
add_debugger_command(char *name, int (*func)(int, char **), char *desc)
#endif
{
return add_debugger_command_etc(name, func, desc, NULL, 0);
}
int
#if __GNUC__ > 2
remove_debugger_command(const char * name, int (*func)(int, char **))
#else
remove_debugger_command(char * name, int (*func)(int, char **))
#endif
{
struct debugger_command *cmd = sCommands;
struct debugger_command *prev = NULL;

View File

@ -513,7 +513,7 @@ struct OperationComparator {
off_t offsetA = a->Offset();
off_t offsetB = b->Offset();
return offsetA < offsetB
|| offsetA == offsetB && a->Length() > b->Length();
|| (offsetA == offsetB && a->Length() > b->Length());
}
};

View File

@ -1746,8 +1746,8 @@ devfs_io(fs_volume *volume, fs_vnode *_vnode, void *_cookie,
bool isWrite = request->IsWrite();
if (!S_ISCHR(vnode->stream.type)
|| ((isWrite && !vnode->stream.u.dev.device->HasWrite()
|| !isWrite && !vnode->stream.u.dev.device->HasRead())
|| (((isWrite && !vnode->stream.u.dev.device->HasWrite())
|| (!isWrite && !vnode->stream.u.dev.device->HasRead()))
&& !vnode->stream.u.dev.device->HasIO())
|| cookie == NULL) {
return B_NOT_ALLOWED;

View File

@ -481,7 +481,7 @@ find_driver(dev_t device, ino_t node)
while (true) {
driver = (legacy_driver *)hash_next(sDriverHash, &iterator);
if (driver == NULL
|| driver->device == device && driver->node == node)
|| (driver->device == device && driver->node == node))
break;
}
@ -1372,8 +1372,10 @@ legacy_driver_probe(const char* subPath)
extern "C" status_t
legacy_driver_init(void)
{
sDriverHash = hash_init(DRIVER_HASH_SIZE, offsetof(legacy_driver, next),
&driver_entry_compare, &driver_entry_hash);
legacy_driver dummyDriver;
sDriverHash = hash_init(DRIVER_HASH_SIZE,
offset_of_member(dummyDriver, next), &driver_entry_compare,
&driver_entry_hash);
if (sDriverHash == NULL)
return B_NO_MEMORY;

View File

@ -2049,9 +2049,9 @@ _user_read_kernel_image_symbols(image_id id, struct Elf32_Sym* symbolTable,
if (_symbolCount == NULL || _stringTableSize == NULL)
return B_BAD_VALUE;
if (!IS_USER_ADDRESS(_symbolCount) || !IS_USER_ADDRESS(_stringTableSize)
|| _imageDelta != NULL && !IS_USER_ADDRESS(_imageDelta)
|| symbolTable != NULL && !IS_USER_ADDRESS(symbolTable)
|| stringTable != NULL && !IS_USER_ADDRESS(stringTable)) {
|| (_imageDelta != NULL && !IS_USER_ADDRESS(_imageDelta))
|| (symbolTable != NULL && !IS_USER_ADDRESS(symbolTable))
|| (stringTable != NULL && !IS_USER_ADDRESS(stringTable))) {
return B_BAD_ADDRESS;
}
@ -2120,8 +2120,8 @@ _user_read_kernel_image_symbols(image_id id, struct Elf32_Sym* symbolTable,
if (user_memcpy(_symbolCount, &symbolCount, sizeof(symbolCount)) != B_OK
|| user_memcpy(_stringTableSize, &stringTableSize,
sizeof(stringTableSize)) != B_OK
|| _imageDelta != NULL && user_memcpy(_imageDelta, &imageDelta,
sizeof(imageDelta)) != B_OK) {
|| (_imageDelta != NULL && user_memcpy(_imageDelta, &imageDelta,
sizeof(imageDelta)) != B_OK)) {
return B_BAD_ADDRESS;
}

View File

@ -364,7 +364,7 @@ Inode::WriteDataToBuffer(const void *_data, size_t *_length, bool nonBlocking)
while (dataSize > 0) {
// Wait until enough space in the buffer is available.
while (!fActive
|| fBuffer.Writable() < minToWrite && fReaderCount > 0) {
|| (fBuffer.Writable() < minToWrite && fReaderCount > 0)) {
if (nonBlocking)
return B_WOULD_BLOCK;
@ -639,9 +639,9 @@ Inode::Select(uint8 event, selectsync *sync, int openMode)
// signal right away, if the condition holds already
if (writer) {
if (event == B_SELECT_WRITE
&& (fBuffer.Writable() > 0 || fReaderCount == 0)
|| event == B_SELECT_ERROR && fReaderCount == 0) {
if ((event == B_SELECT_WRITE
&& (fBuffer.Writable() > 0 || fReaderCount == 0))
|| (event == B_SELECT_ERROR && fReaderCount == 0)) {
return notify_select_event(sync, event);
}
} else {

View File

@ -119,9 +119,9 @@ copy_address_to_userland(const void* address, socklen_t addressLength,
// copy address size and address back to userland
if (user_memcpy(userAddressLength, &addressLength,
sizeof(socklen_t)) != B_OK
|| userAddress != NULL
|| (userAddress != NULL
&& user_memcpy(userAddress, address,
min_c(addressLength, userAddressBufferSize)) != B_OK) {
min_c(addressLength, userAddressBufferSize)) != B_OK)) {
return B_BAD_ADDRESS;
}
@ -973,10 +973,10 @@ _user_recvmsg(int socket, struct msghdr *userMessage, int flags)
message.msg_name = userAddress;
message.msg_iov = userVecs;
message.msg_control = userAncillary;
if (userAddress != NULL && user_memcpy(userAddress, address,
message.msg_namelen) != B_OK
|| userAncillary != NULL && user_memcpy(userAncillary, ancillary,
message.msg_controllen) != B_OK
if ((userAddress != NULL && user_memcpy(userAddress, address,
message.msg_namelen) != B_OK)
|| (userAncillary != NULL && user_memcpy(userAncillary, ancillary,
message.msg_controllen) != B_OK)
|| user_memcpy(userMessage, &message, sizeof(msghdr)) != B_OK) {
return B_BAD_ADDRESS;
}

View File

@ -1852,8 +1852,8 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
if (vnodeToDisconnect != NULL) {
if (vnode == vnodeToDisconnect)
disconnect_fd(descriptor);
} else if (vnode != NULL && vnode->mount == mount
|| vnode == NULL && descriptor->u.mount == mount)
} else if ((vnode != NULL && vnode->mount == mount)
|| (vnode == NULL && descriptor->u.mount == mount))
disconnect_fd(descriptor);
put_fd(descriptor);
@ -4842,15 +4842,17 @@ vfs_setrlimit(int resource, const struct rlimit * rlp)
status_t
vfs_init(kernel_args *args)
{
sVnodeTable = hash_init(VNODE_HASH_TABLE_SIZE, offsetof(struct vnode, next),
&vnode_compare, &vnode_hash);
struct vnode dummyVnode;
sVnodeTable = hash_init(VNODE_HASH_TABLE_SIZE,
offset_of_member(dummyVnode, next), &vnode_compare, &vnode_hash);
if (sVnodeTable == NULL)
panic("vfs_init: error creating vnode hash table\n");
list_init_etc(&sUnusedVnodeList, offsetof(struct vnode, unused_link));
list_init_etc(&sUnusedVnodeList, offset_of_member(dummyVnode, unused_link));
sMountsTable = hash_init(MOUNTS_HASH_TABLE_SIZE, offsetof(struct fs_mount, next),
&mount_compare, &mount_hash);
struct fs_mount dummyMount;
sMountsTable = hash_init(MOUNTS_HASH_TABLE_SIZE,
offset_of_member(dummyMount, next), &mount_compare, &mount_hash);
if (sMountsTable == NULL)
panic("vfs_init: error creating mounts hash table\n");
@ -5752,10 +5754,10 @@ common_fcntl(int fd, int op, uint32 argument, bool kernel)
status = release_advisory_lock(vnode, &flock);
} else {
// the open mode must match the lock type
if ((descriptor->open_mode & O_RWMASK) == O_RDONLY
&& flock.l_type == F_WRLCK
|| (descriptor->open_mode & O_RWMASK) == O_WRONLY
&& flock.l_type == F_RDLCK)
if (((descriptor->open_mode & O_RWMASK) == O_RDONLY
&& flock.l_type == F_WRLCK)
|| ((descriptor->open_mode & O_RWMASK) == O_WRONLY
&& flock.l_type == F_RDLCK))
status = B_FILE_ERROR;
else {
status = acquire_advisory_lock(vnode, -1,
@ -6725,7 +6727,7 @@ static dev_t
fs_mount(char* path, const char* device, const char* fsName, uint32 flags,
const char* args, bool kernel)
{
struct fs_mount* mount;
struct ::fs_mount* mount;
status_t status = 0;
FUNCTION(("fs_mount: entry. path = '%s', fs_name = '%s'\n", path, fsName));
@ -6840,7 +6842,7 @@ fs_mount(char* path, const char* device, const char* fsName, uint32 flags,
}
}
mount = new(std::nothrow) struct ::fs_mount;
mount = new(std::nothrow) (struct ::fs_mount);
if (mount == NULL)
return B_NO_MEMORY;
@ -8390,7 +8392,7 @@ _user_open_parent_dir(int fd, char *userName, size_t nameLength)
return B_BAD_ADDRESS;
// open the parent dir
int parentFD = dir_open(fd, "..", kernel);
int parentFD = dir_open(fd, (char*)"..", kernel);
if (parentFD < 0)
return parentFD;
FDCloser fdCloser(parentFD, kernel);

View File

@ -83,7 +83,7 @@ KernelDaemon::Register(daemon_hook function, void* arg, int frequency)
if (function == NULL || frequency < 1)
return B_BAD_VALUE;
struct daemon* daemon = new(std::nothrow) struct ::daemon;
struct ::daemon* daemon = new(std::nothrow) (struct ::daemon);
if (daemon == NULL)
return B_NO_MEMORY;

View File

@ -2182,8 +2182,8 @@ get_module(const char* path, module_info** _info)
module = (struct module*)hash_lookup(sModulesHash, path);
// if we don't have it cached yet, search for it
if (module == NULL || (module->flags & B_BUILT_IN_MODULE) == 0
&& module->module_image == NULL) {
if (module == NULL || ((module->flags & B_BUILT_IN_MODULE) == 0
&& module->module_image == NULL)) {
module = search_module(path, &moduleImage);
if (module == NULL) {
FATAL(("module: Search for %s failed.\n", path));

View File

@ -26,6 +26,7 @@
#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <util/OpenHashTable.h>
#include <util/khash.h>
#include <vm.h>
#include <vm_address_space.h>
@ -597,8 +598,12 @@ object_cache_low_memory(void *_self, uint32 resources, int32 level)
static void
object_cache_return_object_wrapper(object_depot *depot, void *object)
{
// TODO: the offset calculation might be wrong because we hardcode a
// SmallObjectCache instead of a base object_cache. Also this must
// have an unacceptable overhead.
SmallObjectCache dummyCache;
object_cache *cache = (object_cache *)(((uint8 *)depot)
- offsetof(object_cache, depot));
- offset_of_member(dummyCache, depot));
object_cache_free(cache, object);
}

View File

@ -2724,8 +2724,8 @@ _user_spawn_thread(thread_creation_attributes* userAttributes)
thread_id threadID;
if (!IS_USER_ADDRESS(attributes.entry) || attributes.entry == NULL
|| attributes.stack_address != NULL
&& !IS_USER_ADDRESS(attributes.stack_address)
|| (attributes.stack_address != NULL
&& !IS_USER_ADDRESS(attributes.stack_address))
|| (attributes.name != NULL && (!IS_USER_ADDRESS(attributes.name)
|| user_strlcpy(name, attributes.name, B_OS_NAME_LENGTH) < 0)))
return B_BAD_ADDRESS;

View File

@ -611,7 +611,7 @@ VMAnonymousCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
}
// create our callback
WriteCallback* callback = (flags & B_VIP_IO_REQUEST != 0)
WriteCallback* callback = (flags & B_VIP_IO_REQUEST) != 0
? new(vip_io_alloc) WriteCallback(this, _callback)
: new(std::nothrow) WriteCallback(this, _callback);
if (callback == NULL) {
@ -1211,7 +1211,7 @@ swap_init_post_modules()
close(fd);
error = swap_file_add("/var/swap");
error = swap_file_add((char *)"/var/swap");
if (error != B_OK)
dprintf("Failed to add swap file /var/swap: %s\n", strerror(error));
}

View File

@ -418,7 +418,7 @@ AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID,
area = (vm_area *)hash_lookup(sAreaHash, &areaID);
if (area != NULL
&& (area->address_space->id == team
|| allowKernel && team == vm_kernel_address_space_id())) {
|| (allowKernel && team == vm_kernel_address_space_id()))) {
fSpace = area->address_space;
atomic_add(&fSpace->ref_count, 1);
}
@ -1358,9 +1358,9 @@ set_area_page_protection(vm_area* area, addr_t pageAddress, uint32 protection)
uint32 pageIndex = (pageAddress - area->base) / B_PAGE_SIZE;
uint8& entry = area->page_protections[pageIndex / 2];
if (pageIndex % 2 == 0)
entry = entry & 0xf0 | protection;
entry = (entry & 0xf0) | protection;
else
entry = entry & 0x0f | (protection << 4);
entry = (entry & 0x0f) | (protection << 4);
}
@ -2280,9 +2280,9 @@ _vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec
// The FD must open for reading at any rate. For shared mapping with write
// access, additionally the FD must be open for writing.
if ((openMode & O_ACCMODE) == O_WRONLY
|| mapping == REGION_NO_PRIVATE_MAP
|| (mapping == REGION_NO_PRIVATE_MAP
&& (protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0
&& (openMode & O_ACCMODE) == O_RDONLY) {
&& (openMode & O_ACCMODE) == O_RDONLY)) {
return EACCES;
}
@ -3459,7 +3459,7 @@ dump_caches_recursively(vm_cache* cache, cache_info& info, int level)
if (level == 0)
kprintf("/%lu", info.page_count);
if (cache->type == CACHE_TYPE_RAM || level == 0 && info.committed > 0) {
if (cache->type == CACHE_TYPE_RAM || (level == 0 && info.committed > 0)) {
kprintf(", committed: %lld", cache->committed_size);
if (level == 0)
@ -3704,9 +3704,9 @@ dump_area(int argc, char **argv)
hash_open(sAreaHash, &iter);
while ((area = (vm_area *)hash_next(sAreaHash, &iter)) != NULL) {
if ((area->name != NULL && !strcmp(argv[index], area->name))
|| num != 0
|| (num != 0
&& ((addr_t)area->id == num
|| area->base <= num && area->base + area->size > num)) {
|| (area->base <= num && area->base + area->size > num)))) {
dump_area_struct(area, mappings);
found = true;
}
@ -3736,8 +3736,8 @@ dump_area_list(int argc, char **argv)
hash_open(sAreaHash, &iter);
while ((area = (vm_area *)hash_next(sAreaHash, &iter)) != NULL) {
if (id != 0 && area->address_space->id != id
|| name != NULL && strstr(area->name, name) == NULL)
if ((id != 0 && area->address_space->id != id)
|| (name != NULL && strstr(area->name, name) == NULL))
continue;
kprintf("%p %5lx %p\t%p %4lx\t%4d %s\n", area, area->id, (void *)area->base,
@ -6347,7 +6347,7 @@ _user_sync_memory(void *_address, size_t size, int flags)
if (writeSync && writeAsync)
return B_BAD_VALUE;
if (size == 0 || !writeSync && !writeAsync)
if (size == 0 || (!writeSync && !writeAsync))
return B_OK;
// iterate through the range and sync all concerned areas

View File

@ -389,7 +389,8 @@ status_t
VMCache::Init(uint32 cacheType)
{
mutex_init(&fLock, "vm_cache");
list_init_etc(&consumers, offsetof(VMCache, consumer_link));
VMCache dummyCache;
list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
areas = NULL;
fRefCount = 1;
source = NULL;

View File

@ -1238,11 +1238,11 @@ page_writer(void* /*unused*/)
// until we're low on pages. Also avoid writing temporary pages that
// are active.
if (page->wired_count > 0
|| cache->temporary
|| (cache->temporary
#if ENABLE_SWAP_SUPPORT
&& (!lowOnPages /*|| page->usage_count > 0*/)
#endif
) {
)) {
continue;
}
@ -1444,9 +1444,9 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
InterruptsSpinLocker locker(sPageLock);
if (reserve && sReservedPages <= free_page_queue_count()
if ((reserve && sReservedPages <= free_page_queue_count())
|| count == 0
|| !reserve && (sInactivePageQueue.count > 0
|| ((!reserve && (sInactivePageQueue.count > 0))
|| free_page_queue_count() > sReservedPages))
return stolen;

View File

@ -564,7 +564,7 @@ common_poll(struct pollfd *fds, nfds_t numFDs, bigtime_t timeout, bool kernel)
int fd = fds[i].fd;
// initialize events masks
sync->set[i].selected_events = fds[i].events & ~POLLNVAL
sync->set[i].selected_events = (fds[i].events & ~POLLNVAL)
| POLLERR | POLLHUP;
sync->set[i].events = 0;