added a lot of debugging functions that should help fixing bugs

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@2099 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
beveloper 2002-11-28 02:25:04 +00:00
parent 363999a1eb
commit 7991b1a031
9 changed files with 89 additions and 24 deletions

View File

@ -65,4 +65,7 @@ long remove_io_interrupt_handler (long,
/** @} */
/* during kernel startup, interrupts are disabled */
extern bool kernel_startup;
#endif /* _KERNEL_INT_H */

View File

@ -187,7 +187,9 @@ i386_handle_trap(struct iframe frame)
// get the old interrupt enable/disable state and restore to that
if(frame.flags & 0x200) {
// dprintf("page_fault: enabling interrupts\n");
dprintf("page_fault: enabling interrupts\n");
if (!kernel_startup)
dprintf("page_fault, but interrupts are disabled. touching address %p from eip %p\n", cr2, frame.eip);
enable_interrupts();
}
ret = vm_page_fault(cr2, frame.eip,

View File

@ -7,6 +7,7 @@
#include <kernel.h>
#include <vm.h>
#include <lock.h>
#include <int.h>
#include <memheap.h>
#include <malloc.h>
#include <debug.h>
@ -241,6 +242,9 @@ malloc(size_t size)
struct heap_page *page;
TRACE(("kmalloc: asked to allocate size %d\n", size));
if (!kernel_startup && !are_interrupts_enabled())
panic("malloc: called with interrupts disabled\n");
mutex_lock(&heap_lock);
@ -257,7 +261,7 @@ malloc(size_t size)
if (bin_index == bin_count) {
// XXX fix the raw alloc later.
//address = raw_alloc(size, bin_index);
panic("kmalloc: asked to allocate too much for now!\n");
panic("malloc: asked to allocate too much for now!\n");
goto out;
} else {
if (bins[bin_index].free_list != NULL) {
@ -328,22 +332,25 @@ free(void *address)
struct heap_bin *bin;
unsigned int i;
if (!kernel_startup && !are_interrupts_enabled())
panic("free: called with interrupts disabled\n");
if (address == NULL)
return;
if ((addr)address < heap_base || (addr)address >= (heap_base + heap_size))
panic("free(): asked to free invalid address %p\n", address);
panic("free: asked to free invalid address %p\n", address);
#if USE_WALL
{
uint32 *wall = (uint32 *)((uint8 *)address - 12);
uint32 size = wall[0];
if (wall[1] != 0xabadcafe || wall[2] != 0xabadcafe)
panic("kfree: front wall was overwritten (allocation at %p, %lu bytes): %08lx %08lx\n", address, size, wall[1], wall[2]);
panic("free: front wall was overwritten (allocation at %p, %lu bytes): %08lx %08lx\n", address, size, wall[1], wall[2]);
wall = (uint32 *)((uint8 *)address + size);
if (wall[0] != 0xabadcafe || wall[1] != 0xabadcafe)
panic("kfree: back wall was overwritten (allocation at %p, %lu bytes): %08lx %08lx\n", address, size, wall[0], wall[1]);
panic("free: back wall was overwritten (allocation at %p, %lu bytes): %08lx %08lx\n", address, size, wall[0], wall[1]);
address = (uint8 *)address - 12;
}
@ -410,6 +417,9 @@ realloc(void *address, size_t newSize)
void *newAddress = NULL;
size_t maxSize = 0, minSize;
if (!kernel_startup && !are_interrupts_enabled())
panic("realloc(): called with interrupts disabled\n");
if (address != NULL && ((addr)address < heap_base || (addr)address >= (heap_base + heap_size)))
panic("realloc(): asked to realloc invalid address %p\n", address);

View File

@ -42,6 +42,11 @@ hash_init(unsigned int table_size, int next_ptr_offset,
return NULL;
t->table = (struct hash_elem **)malloc(sizeof(void *) * table_size);
if (t->table == NULL) {
free(t);
return NULL;
}
for (i = 0; i<table_size; i++)
t->table[i] = NULL;
t->table_size = table_size;
@ -243,20 +248,28 @@ static void nhash_this(hash_table_index *hi, const void **key, ssize_t *klen,
new_hash_table *
hash_make(void)
{
status_t rv;
new_hash_table *nn;
nn = (new_hash_table *)malloc(sizeof(new_hash_table));
if (!nn)
if (nn == NULL)
return NULL;
nn->count = 0;
nn->max = MAX_INITIAL;
nn->array = (hash_entry **)malloc(sizeof(hash_entry) * (nn->max + 1));
memset(nn->array, 0, sizeof(hash_entry) * (nn->max +1));
pool_init(&nn->pool, sizeof(hash_entry));
if (!nn->pool)
if (nn == NULL) {
free(nn);
return NULL;
}
memset(nn->array, 0, sizeof(hash_entry) * (nn->max +1));
rv = pool_init(&nn->pool, sizeof(hash_entry));
if (rv < B_OK || nn->pool == NULL) {
free(nn->array);
free(nn);
return NULL;
}
return nn;
}
@ -291,11 +304,13 @@ expand_array(new_hash_table *nh)
{
hash_index *hi;
hash_entry **new_array;
int new_max = nh->max * 2 +1;
int new_max = (nh->max + 1) * 2 - 1;
int i;
new_array = (hash_entry **)malloc(sizeof(hash_entry) * new_max);
memset(new_array, 0, sizeof(hash_entry) * new_max);
new_array = (hash_entry **)malloc(sizeof(hash_entry) * new_max + 1);
if (new_array == NULL)
panic("khash, expand_array failed\n"); // XXX stupid, this function should return an error if it failes
memset(new_array, 0, sizeof(hash_entry) * new_max + 1);
for (hi = new_hash_first(nh); hi; hi = new_hash_next(hi)) {
i = hi->this_idx->hash & new_max;
hi->this_idx->next = new_array[i];
@ -323,6 +338,7 @@ find_entry(new_hash_table *nh, const void *key, ssize_t klen, const void *val)
hash = hash * 33 + *p;
for (hep = &nh->array[hash & nh->max], he = *hep; he; hep = &he->next, he = *hep) {
dprintf("khash, find_entry looking at hep %p, he %p\n", hep, he);
if (he->hash == hash && he->klen == klen
&& memcmp(he->key, key, klen) == 0) {
break;
@ -348,12 +364,19 @@ find_entry(new_hash_table *nh, const void *key, ssize_t klen, const void *val)
void *
hash_get(new_hash_table *nh, const void *key, ssize_t klen)
{
hash_entry *he;
he = *find_entry(nh, key, klen, NULL);
if (he)
return (void*)he->val;
hash_entry **hepp;
hash_entry *hep;
hepp = find_entry(nh, key, klen, NULL);
dprintf("khash, find_entry returned %p\n", hepp);
if (hepp == NULL)
return NULL;
return NULL;
hep = *hepp;
if (hep == NULL)
return NULL;
return (void *) hep->val; /* XXX casting away the const */
}

View File

@ -8,6 +8,7 @@
#include <kernel.h>
#include <OS.h>
#include <lock.h>
#include <int.h>
#include <debug.h>
#include <arch/cpu.h>
#include <Errors.h>
@ -58,6 +59,9 @@ recursive_lock_lock(recursive_lock *lock)
{
thread_id thid = thread_get_current_thread_id();
bool retval = false;
if (!kernel_startup && !are_interrupts_enabled())
panic("recursive_lock_lock: called with interrupts disabled for lock %p, sem %#lx\n", lock, lock->sem);
if (thid != lock->holder) {
acquire_sem(lock->sem);
@ -130,6 +134,9 @@ mutex_lock(mutex *mutex)
{
thread_id me = thread_get_current_thread_id();
if (!kernel_startup && !are_interrupts_enabled())
panic("mutex_lock: called with interrupts disabled for mutex %p, sem %#lx\n", mutex, mutex->sem);
if (me == mutex->holder)
panic("mutex_lock failure: mutex %p acquired twice by thread 0x%lx\n", mutex, me);

View File

@ -41,6 +41,7 @@
# define TRACE(x) ;
#endif
bool kernel_startup;
static kernel_args ka;
@ -51,6 +52,8 @@ int _start(kernel_args *oldka, int cpu); /* keep compiler happy */
int
_start(kernel_args *oldka, int cpu_num)
{
kernel_startup = true;
memcpy(&ka, oldka, sizeof(kernel_args));
smp_set_num_cpus(ka.num_cpus);
@ -87,6 +90,10 @@ _start(kernel_args *oldka, int cpu_num)
sem_init(&ka);
dprintf("##################################################################\n");
dprintf("semaphores now available\n");
dprintf("##################################################################\n");
// now we can create and use semaphores
vm_init_postsem(&ka);
cbuf_init();
@ -112,6 +119,10 @@ _start(kernel_args *oldka, int cpu_num)
// this is run per cpu for each AP processor after they've been set loose
thread_init_percpu(cpu_num);
}
dprintf("##################################################################\n");
dprintf("interrupts now enabled\n");
dprintf("##################################################################\n");
kernel_startup = false;
enable_interrupts();
TRACE(("main: done... begin idle loop on cpu %d\n", cpu_num));

View File

@ -946,13 +946,15 @@ module_init(kernel_args *ka, module_info **sys_module_headers)
status_t
get_module(const char *path, module_info **vec)
{
module *m = (module *)hash_get(modules_list, path, strlen(path));
module *m;
loaded_module *lm;
int res = B_NO_ERROR;
*vec = NULL;
dprintf("*** get_module: %s\n", path);
m = (module *)hash_get(modules_list, path, strlen(path));
/* If m == NULL we didn't find any record of the module
* in our hash. We'll now call serach_mdoules which will do
* scan of the possible directories that may contain it.
@ -1012,8 +1014,11 @@ get_module(const char *path, module_info **vec)
status_t
put_module(const char *path)
{
module *m = (module *)hash_get(modules_list, path, strlen(path));
module *m;
dprintf("*** put_module: path %s\n", path);
m = (module *)hash_get(modules_list, path, strlen(path));
if (!m) {
dprintf("We don't seem to have a reference to module %s\n", path);
return EINVAL;

View File

@ -60,10 +60,7 @@ void pool_debug_walk(struct pool_ctl *p)
void pool_debug(struct pool_ctl *p, char *name)
{
p->debug = 1;
if (strlen(name) < POOL_DEBUG_NAME_SZ)
strncpy(p->name, name, strlen(name));
else
strncpy(p->name, name, POOL_DEBUG_NAME_SZ);
strlcpy(p->name, name, POOL_DEBUG_NAME_SZ);
}
static struct pool_mem *get_mem_block(struct pool_ctl *pool)
@ -128,6 +125,9 @@ static struct pool_mem *get_mem_block(struct pool_ctl *pool)
int32 pool_init(struct pool_ctl **_newPool, size_t size)
{
struct pool_ctl *pool = NULL;
/* if the init failes, the new pool will be set to NULL */
*_newPool = NULL;
if (init_sem == -1)
create_sem(1, "pool_init_sem");

View File

@ -371,9 +371,13 @@ acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
int slot = id % MAX_SEMS;
int state;
status_t status = B_OK;
if (gSemsActive == false)
return B_NO_MORE_SEMS;
if (!kernel_startup && !are_interrupts_enabled())
panic("acquire_sem_etc: called with interrupts disabled for sem %#lx\n", id);
if (id < 0)
return B_BAD_SEM_ID;
if (count <= 0)