* Added an "iospace" debugger command.

* Fixed broken "full sem" mechanism.
* Fixed recycling of previously mapped IO space chunks. Should avoid the
  panic() call earlier introduced by Marcus.

(tracked down by axeld and bonefish over the course of three days
(in between skiing) and finally nailed on the bus back home)



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16511 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Stephan Aßmus 2006-02-26 17:44:27 +00:00
parent b270799715
commit fb39ecb534
1 changed files with 120 additions and 30 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -11,9 +11,10 @@
#include <vm_address_space.h>
#include <vm_page.h>
#include <vm_priv.h>
#include <smp.h>
//#include <smp.h>
//#include <memheap.h>
#include <thread.h>
#include <util/queue.h>
#include <memheap.h>
#include <string.h>
#include <stdlib.h>
@ -25,12 +26,17 @@
# define TRACE(x) ;
#endif
#define DEBUG_IO_SPACE
// data and structures used to represent physical pages mapped into iospace
typedef struct paddr_chunk_descriptor {
struct paddr_chunk_descriptor *next_q;
// must remain first in structure, queue code uses it
int ref_count;
addr_t va;
int32 ref_count;
addr_t va;
#ifdef DEBUG_IO_SPACE
thread_id last_ref;
#endif
} paddr_chunk_desc;
static paddr_chunk_desc *paddr_desc; // will be one per physical chunk
@ -38,8 +44,9 @@ static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chun
static int first_free_vmapping;
static int num_virtual_chunks;
static queue mapped_paddr_lru;
static mutex iospace_mutex;
static sem_id iospace_full_sem;
static mutex sMutex;
static sem_id sChunkAvailableSem;
static int32 sChunkAvailableWaitingCounter;
static generic_map_iospace_chunk_func sMapIOSpaceChunk;
static addr_t sIOSpaceBase;
@ -54,7 +61,7 @@ generic_get_physical_page(addr_t pa, addr_t *va, uint32 flags)
paddr_chunk_desc *replaced_pchunk;
restart:
mutex_lock(&iospace_mutex);
mutex_lock(&sMutex);
// see if the page is already mapped
index = pa / sIOSpaceChunkSize;
@ -64,7 +71,7 @@ restart:
queue_remove_item(&mapped_paddr_lru, &paddr_desc[index]);
}
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
mutex_unlock(&iospace_mutex);
mutex_unlock(&sMutex);
return B_OK;
}
@ -80,12 +87,12 @@ restart:
// push up the first_free_vmapping pointer
for (; first_free_vmapping < num_virtual_chunks;
first_free_vmapping++) {
if(virtual_pmappings[first_free_vmapping] == NULL)
if (virtual_pmappings[first_free_vmapping] == NULL)
break;
}
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
mutex_unlock(&iospace_mutex);
mutex_unlock(&sMutex);
return B_OK;
}
@ -94,12 +101,14 @@ restart:
if (queue_peek(&mapped_paddr_lru) == NULL) {
// no free slots available
if (flags == PHYSICAL_PAGE_NO_WAIT) {
// punt back to the caller and let them handle this
mutex_unlock(&iospace_mutex);
// put back to the caller and let them handle this
mutex_unlock(&sMutex);
return B_NO_MEMORY;
} else {
mutex_unlock(&iospace_mutex);
acquire_sem(iospace_full_sem);
sChunkAvailableWaitingCounter++;
mutex_unlock(&sMutex);
acquire_sem(sChunkAvailableSem);
goto restart;
}
}
@ -109,10 +118,15 @@ restart:
replaced_pchunk->va = 0;
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
paddr_desc[index].ref_count++;
#ifdef DEBUG_IO_SPACE
paddr_desc[index].last_ref = thread_get_current_thread_id();
#endif
virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize]
= paddr_desc + index;
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
mutex_unlock(&iospace_mutex);
mutex_unlock(&sMutex);
return B_OK;
}
@ -126,11 +140,11 @@ generic_put_physical_page(addr_t va)
panic("someone called put_physical_page on an invalid va 0x%lx\n", va);
va -= sIOSpaceBase;
mutex_lock(&iospace_mutex);
mutex_lock(&sMutex);
desc = virtual_pmappings[va / sIOSpaceChunkSize];
if (desc == NULL) {
mutex_unlock(&iospace_mutex);
mutex_unlock(&sMutex);
panic("put_physical_page called on page at va 0x%lx which is not checked out\n", va);
return B_ERROR;
}
@ -138,18 +152,90 @@ generic_put_physical_page(addr_t va)
if (--desc->ref_count == 0) {
// put it on the mapped lru list
queue_enqueue(&mapped_paddr_lru, desc);
// no sense rescheduling on this one, there's likely a race in the waiting
// thread to grab the iospace_mutex, which would block and eventually get back to
// this thread. waste of time.
release_sem_etc(iospace_full_sem, 1, B_DO_NOT_RESCHEDULE);
}
mutex_unlock(&iospace_mutex);
if (sChunkAvailableWaitingCounter > 0) {
sChunkAvailableWaitingCounter--;
release_sem_etc(sChunkAvailableSem, 1, B_DO_NOT_RESCHEDULE);
}
}
if (desc->ref_count < 0)
panic("generic_put_physical_page(): ref count < 0: %ld\n", desc->ref_count);
mutex_unlock(&sMutex);
return B_OK;
}
#ifdef DEBUG_IO_SPACE
static int
dump_iospace(int argc, char** argv)
{
if (argc < 2) {
kprintf("usage: iospace <physical|virtual|queue>\n");
return 0;
}
int32 i;
if (strchr(argv[1], 'p')) {
// physical address descriptors
kprintf("I/O space physical descriptors (%p)\n", paddr_desc);
int32 max = 1024;
if (argc == 3)
max = strtol(argv[2], NULL, 0);
for (i = 0; i < max; i++) {
kprintf("[%03lx %p %3ld %3ld] ", i, (void *)paddr_desc[i].va,
paddr_desc[i].ref_count, paddr_desc[i].last_ref);
if (i % 8 == 7)
kprintf("\n");
}
if (i % 8)
kprintf("\n");
}
if (strchr(argv[1], 'v')) {
// virtual mappings
kprintf("I/O space virtual chunk mappings (%p, first free: %d)\n",
virtual_pmappings, first_free_vmapping);
for (i = 0; i < num_virtual_chunks; i++) {
kprintf("[%2ld. %03lx] ", i,
(virtual_pmappings[i] - paddr_desc) / sizeof(paddr_desc[0]));
if (i % 12 == 11)
kprintf("\n");
}
if (i % 12)
kprintf("\n");
}
if (strchr(argv[1], 'q')) {
// unused queue
kprintf("I/O space mapped queue:\n");
paddr_chunk_descriptor* descriptor
= (paddr_chunk_descriptor *)queue_peek(&mapped_paddr_lru);
i = 0;
while (descriptor != NULL) {
kprintf("[%03lx %p] ",
(descriptor - paddr_desc) / sizeof(paddr_desc[0]), descriptor);
if (i++ % 12 == 11)
kprintf("\n");
descriptor = descriptor->next_q;
}
if (i % 12)
kprintf("\n");
}
return 0;
}
#endif
// #pragma mark -
@ -195,9 +281,9 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
first_free_vmapping = 0;
queue_init(&mapped_paddr_lru);
iospace_mutex.sem = -1;
iospace_mutex.holder = -1;
iospace_full_sem = -1;
sMutex.sem = -1;
sMutex.holder = -1;
sChunkAvailableSem = -1;
TRACE(("generic_vm_physical_page_mapper_init: done\n"));
@ -234,6 +320,10 @@ generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
TRACE(("generic_vm_physical_page_mapper_init_post_area: done\n"));
#ifdef DEBUG_IO_SPACE
add_debugger_command("iospace", &dump_iospace, "Shows info about the I/O space area.");
#endif
return B_OK;
}
@ -241,8 +331,8 @@ generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
status_t
generic_vm_physical_page_mapper_init_post_sem(kernel_args *args)
{
mutex_init(&iospace_mutex, "iospace_mutex");
iospace_full_sem = create_sem(1, "iospace_full_sem");
mutex_init(&sMutex, "iospace_mutex");
sChunkAvailableSem = create_sem(1, "iospace chunk available");
return iospace_full_sem >= B_OK ? B_OK : iospace_full_sem;
return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem;
}