Fixed various warnings.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27841 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-10-02 21:22:03 +00:00
parent ef68c59b17
commit b50e620202
12 changed files with 74 additions and 62 deletions

View File

@ -122,7 +122,7 @@ typedef struct ioapic_s {
volatile uint32 io_register_select;
uint32 reserved[3];
volatile uint32 io_window_register;
} ioapic _PACKED;
} ioapic;
static ioapic *sIOAPIC = NULL;
static uint32 sIOAPICMaxRedirectionEntry = 23;

View File

@ -26,6 +26,8 @@
#include <string.h>
#include <stdio.h>
#include "timers/apic.h"
//#define TRACE_ARCH_SMP
#ifdef TRACE_ARCH_SMP
@ -76,7 +78,7 @@ setup_apic(kernel_args *args, int32 cpu)
config = (apic_read(APIC_LINT0) & 0xffff00ff);
config |= APIC_LVT_DM_ExtINT | APIC_LVT_IIPP | APIC_LVT_TM;
apic_write(APIC_LINT0, config);
/* setup LINT1 as NMI */
config = (apic_read(APIC_LINT1) & 0xffff00ff);
config |= APIC_LVT_DM_NMI | APIC_LVT_IIPP;

View File

@ -36,7 +36,7 @@
#ifdef TRACE_MTRR_ARCH_VM
# define TRACE_MTRR(x...) dprintf(x)
#else
# define TRACE_MTRR(x...)
# define TRACE_MTRR(x...)
#endif
@ -78,6 +78,7 @@ free_mtrr(int32 index)
}
#if 0
/*!
Checks if the provided range overlaps an existing mtrr range
If it actually extends an existing range, extendedIndex is filled
@ -93,20 +94,21 @@ is_memory_overlapping(uint64 base, uint64 length, int32 *extendedIndex)
x86_get_mtrr(index, &b, &l, &t);
// check first for write combining extensions
if (base <= b
if (base <= b
&& (base + length) >= (b + l)
&& t == IA32_MTR_WRITE_COMBINING) {
*extendedIndex = index;
return true;
}
if ((base >= b && base < (b + l))
|| ((base + length) > b
|| ((base + length) > b
&& (base + length) <= (b + l)))
return true;
}
}
return false;
}
#endif // 0
static uint64
@ -182,7 +184,7 @@ set_memory_type(int32 id, uint64 base, uint64 length, uint32 type)
#endif
// length must be a power of 2; just round it up to the next value
length = nearest_power(length);
length = nearest_power(length);
if (length + base <= base) {
// 4GB overflow
@ -198,7 +200,7 @@ set_memory_type(int32 id, uint64 base, uint64 length, uint32 type)
if (index < 0)
return B_ERROR;
TRACE_MTRR("allocate MTRR slot %ld, base = %Lx, length = %Lx, type=0x%lx\n",
TRACE_MTRR("allocate MTRR slot %ld, base = %Lx, length = %Lx, type=0x%lx\n",
index, base, length, type);
sMemoryTypeIDs[index] = id;

View File

@ -7,11 +7,6 @@
#include <SupportDefs.h>
/* Method Prototypes */
static int apic_get_prio(void);
static status_t apic_set_hardware_timer(bigtime_t relativeTimeout);
static status_t apic_clear_hardware_timer(void);
static status_t apic_init(struct kernel_args *args);
status_t apic_smp_init_timer(struct kernel_args *args, int32 cpu);
#endif /* _KERNEL_ARCH_x86_TIMERS_APIC_H */

View File

@ -18,6 +18,13 @@
#include "apic.h"
/* Method Prototypes */
static int apic_get_prio(void);
static status_t apic_set_hardware_timer(bigtime_t relativeTimeout);
static status_t apic_clear_hardware_timer(void);
static status_t apic_init(struct kernel_args *args);
static void *sApicPtr = NULL;
static uint32 sApicTicsPerSec = 0;

View File

@ -21,7 +21,6 @@
#define TRACE(x) ;
#endif
static uint32 sHPETAddr;
static struct hpet_regs *sHPETRegs;
struct timer_info gHPETTimer = {

View File

@ -30,6 +30,21 @@
#endif
IOCallback::~IOCallback()
{
}
status_t
IOCallback::DoIO(IOOperation* operation)
{
return B_ERROR;
}
// #pragma mark -
void
IORequestOwner::Dump() const
{

View File

@ -19,7 +19,9 @@
class IOCallback {
public:
virtual status_t DoIO(IOOperation* operation);
virtual ~IOCallback();
virtual status_t DoIO(IOOperation* operation) = 0;
};
typedef status_t (*io_callback)(void* data, io_operation* operation);

View File

@ -154,20 +154,6 @@ get_child_partition(partition_id partitionID, int32 index)
}
// compare_partition_data_offset
static int
compare_partition_data_offset(const void* _a, const void* _b)
{
const partition_data* a = *(const partition_data**)_a;
const partition_data* b = *(const partition_data**)_b;
if (a->offset == b->offset)
return 0;
return a->offset < b->offset ? -1 : 1;
}
// create_child_partition
partition_data *
create_child_partition(partition_id partitionID, int32 index,

View File

@ -1018,6 +1018,10 @@ public:
addr_t deltaFound = INT_MAX;
bool exactMatch = false;
// to get rid of the erroneous "uninitialized" warnings
symbolFound.st_name = 0;
symbolFound.st_value = 0;
for (uint32 i = 0; i < hashTabSize; i++) {
uint32 bucket;
if (!_Read(&hashBuckets[i], bucket))

View File

@ -656,7 +656,7 @@ smp_wake_up_non_boot_cpus()
void
smp_cpu_rendezvous(volatile uint32 *var, int current_cpu)
{
atomic_or(var, 1 << current_cpu);
atomic_or((vint32*)var, 1 << current_cpu);
while (*var != ((1 << sNumCPUs) - 1))
PAUSE();

View File

@ -29,8 +29,8 @@
* BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
*
* This module implements a general bitmap allocator/deallocator. The
* allocator eats around 2 bits per 'block'. The module does not
* try to interpret the meaning of a 'block' other then to return
* allocator eats around 2 bits per 'block'. The module does not
* try to interpret the meaning of a 'block' other then to return
* SWAPBLK_NONE on an allocation failure.
*
* A radix tree is used to maintain the bitmap. Two radix constants are
@ -38,9 +38,9 @@
* 32), and one for the meta nodes (typically 16). Both meta and leaf
* nodes have a hint field. This field gives us a hint as to the largest
* free contiguous range of blocks under the node. It may contain a
* value that is too high, but will never contain a value that is too
* value that is too high, but will never contain a value that is too
* low. When the radix tree is searched, allocation failures in subtrees
* update the hint.
* update the hint.
*
* The radix tree also implements two collapsed states for meta nodes:
* the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
@ -50,33 +50,33 @@
*
* The hinting greatly increases code efficiency for allocations while
* the general radix structure optimizes both allocations and frees. The
* radix tree should be able to operate well no matter how much
* radix tree should be able to operate well no matter how much
* fragmentation there is and no matter how large a bitmap is used.
*
* Unlike the rlist code, the blist code wires all necessary memory at
* creation time. Neither allocations nor frees require interaction with
* the memory subsystem. In contrast, the rlist code may allocate memory
* the memory subsystem. In contrast, the rlist code may allocate memory
* on an rlist_free() call. The non-blocking features of the blist code
* are used to great advantage in the swap code (vm/nswap_pager.c). The
* rlist code uses a little less overall memory then the blist code (but
* due to swap interleaving not all that much less), but the blist code
* due to swap interleaving not all that much less), but the blist code
* scales much, much better.
*
* LAYOUT: The radix tree is layed out recursively using a
* linear array. Each meta node is immediately followed (layed out
* sequentially in memory) by BLIST_META_RADIX lower level nodes. This
* is a recursive structure but one that can be easily scanned through
* a very simple 'skip' calculation. In order to support large radixes,
* portions of the tree may reside outside our memory allocation. We
* handle this with an early-termination optimization (when bighint is
* set to -1) on the scan. The memory allocation is only large enough
* a very simple 'skip' calculation. In order to support large radixes,
* portions of the tree may reside outside our memory allocation. We
* handle this with an early-termination optimization (when bighint is
* set to -1) on the scan. The memory allocation is only large enough
* to cover the number of blocks requested at creation time even if it
* must be encompassed in larger root-node radix.
*
* NOTE: the allocator cannot currently allocate more then
* BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
* large' if you try. This is an area that could use improvement. The
* radix is large enough that this restriction does not effect the swap
* NOTE: the allocator cannot currently allocate more then
* BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
* large' if you try. This is an area that could use improvement. The
* radix is large enough that this restriction does not effect the swap
* system, though. Currently only the allocation code is effected by
* this algorithmic unfeature. The freeing code can handle arbitrary
* ranges.
@ -99,7 +99,7 @@
#include <stdlib.h>
#include <util/RadixBitmap.h>
#define TERMINATOR -1
#define TERMINATOR -1
static uint32
@ -132,15 +132,15 @@ radix_bitmap_init(radix_node *node, uint32 radix, uint32 skip, uint32 slots)
uint32 i;
for (i = 1; i <= skip; i += next_skip) {
if (slots >= radix) {
index = i + radix_bitmap_init(node ? &node[i] : NULL,
index = i + radix_bitmap_init(node ? &node[i] : NULL,
radix, next_skip - 1, radix);
slots -= radix;
} else if (slots > 0) {
index = i + radix_bitmap_init(node ? &node[i] : NULL,
index = i + radix_bitmap_init(node ? &node[i] : NULL,
radix, next_skip - 1, slots);
slots = 0;
} else { // add a terminator
if (node)
if (node)
node[i].big_hint = TERMINATOR;
break;
}
@ -172,7 +172,7 @@ radix_bitmap_create(uint32 slots)
bmp->skip = skip;
bmp->free_slots = slots;
bmp->root_size = 1 + radix_bitmap_init(NULL, radix, skip, slots);
bmp->root = (radix_node *)malloc(bmp->root_size * sizeof(radix_node));
if (bmp->root == NULL) {
free(bmp);
@ -193,10 +193,10 @@ radix_bitmap_destroy(radix_bitmap *bmp)
}
static swap_addr_t
static swap_addr_t
radix_leaf_alloc(radix_node *leaf, swap_addr_t slotIndex, int32 count)
{
if (count <= BITMAP_RADIX) {
if (count <= (int32)BITMAP_RADIX) {
bitmap_t bitmap = ~leaf->u.bitmap;
uint32 n = BITMAP_RADIX - count;
bitmap_t mask = (bitmap_t)-1 >> n;
@ -217,7 +217,7 @@ radix_leaf_alloc(radix_node *leaf, swap_addr_t slotIndex, int32 count)
static swap_addr_t
radix_node_alloc(radix_node *node, swap_addr_t slotIndex, int32 count,
radix_node_alloc(radix_node *node, swap_addr_t slotIndex, int32 count,
uint32 radix, uint32 skip)
{
uint32 next_skip = skip / NODE_RADIX;
@ -229,10 +229,10 @@ radix_node_alloc(radix_node *node, swap_addr_t slotIndex, int32 count,
if (count <= node[i].big_hint) {
swap_addr_t addr = SWAP_SLOT_NONE;
if (next_skip == 1)
if (next_skip == 1)
addr = radix_leaf_alloc(&node[i], slotIndex, count);
else
addr = radix_node_alloc(&node[i], slotIndex, count, radix,
addr = radix_node_alloc(&node[i], slotIndex, count, radix,
next_skip - 1);
if (addr != SWAP_SLOT_NONE) {
node->u.available -= count;
@ -273,7 +273,7 @@ static void
radix_leaf_dealloc(radix_node *leaf, swap_addr_t slotIndex, uint32 count)
{
uint32 n = slotIndex & (BITMAP_RADIX - 1);
bitmap_t mask = ((bitmap_t)-1 >> (BITMAP_RADIX - count - n))
bitmap_t mask = ((bitmap_t)-1 >> (BITMAP_RADIX - count - n))
& ((bitmap_t)-1 << n);
leaf->u.bitmap &= ~mask;
@ -281,8 +281,8 @@ radix_leaf_dealloc(radix_node *leaf, swap_addr_t slotIndex, uint32 count)
}
static void
radix_node_dealloc(radix_node *node, swap_addr_t slotIndex, uint32 count,
static void
radix_node_dealloc(radix_node *node, swap_addr_t slotIndex, uint32 count,
uint32 radix, uint32 skip, swap_addr_t index)
{
node->u.available += count;
@ -301,13 +301,13 @@ radix_node_dealloc(radix_node *node, swap_addr_t slotIndex, uint32 count,
if (next_skip == 1)
radix_leaf_dealloc(&node[i], slotIndex, v);
else
radix_node_dealloc(&node[i], slotIndex, v, radix,
else
radix_node_dealloc(&node[i], slotIndex, v, radix,
next_skip - 1, index);
if (node->big_hint < node[i].big_hint)
node->big_hint = node[i].big_hint;
count -= v;
slotIndex += v;
index += radix;
@ -316,13 +316,13 @@ radix_node_dealloc(radix_node *node, swap_addr_t slotIndex, uint32 count,
}
void
void
radix_bitmap_dealloc(radix_bitmap *bmp, swap_addr_t slotIndex, uint32 count)
{
if (bmp->radix == BITMAP_RADIX)
radix_leaf_dealloc(bmp->root, slotIndex, count);
else
radix_node_dealloc(bmp->root, slotIndex, count, bmp->radix,
radix_node_dealloc(bmp->root, slotIndex, count, bmp->radix,
bmp->skip, 0);
bmp->free_slots += count;