* Made vm_alloc_virtual_from_kernel_args() available to other kernel

parts, too. Fixed a potential overflow.
* The generic physical page mapper reserves the virtual address range
  for the IO space now, so that noone can interfere until an area has
  been created. The location of the IO space is no longer fixed; it
  didn't look to me like it was necessary for x86, and we definitely
  need to be flexible for PPC.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15855 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2006-01-07 17:37:21 +00:00
parent 4f7d12cf6a
commit 3b36b30fef
4 changed files with 29 additions and 18 deletions

View File

@ -23,6 +23,8 @@ status_t vm_page_init(struct kernel_args *args);
status_t vm_page_init_post_area(struct kernel_args *args);
status_t vm_page_init_post_thread(struct kernel_args *args);
addr_t vm_alloc_virtual_from_kernel_args(kernel_args *ka, size_t size);
status_t vm_mark_page_inuse(addr_t page);
status_t vm_mark_page_range_inuse(addr_t startPage, addr_t length);
status_t vm_page_set_state(vm_page *page, int state);

View File

@ -71,19 +71,20 @@ restart:
// map it
if (first_free_vmapping < num_virtual_chunks) {
// there's a free hole
paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize + sIOSpaceBase;
paddr_desc[index].va = first_free_vmapping * sIOSpaceChunkSize
+ sIOSpaceBase;
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
virtual_pmappings[first_free_vmapping] = &paddr_desc[index];
paddr_desc[index].ref_count++;
// push up the first_free_vmapping pointer
for (; first_free_vmapping < num_virtual_chunks; first_free_vmapping++) {
for (; first_free_vmapping < num_virtual_chunks;
first_free_vmapping++) {
if(virtual_pmappings[first_free_vmapping] == NULL)
break;
}
sMapIOSpaceChunk(paddr_desc[index].va,
index * sIOSpaceChunkSize);
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
mutex_unlock(&iospace_mutex);
return B_OK;
@ -150,7 +151,6 @@ generic_put_physical_page(addr_t va)
// #pragma mark -
// VM API
status_t
@ -161,10 +161,25 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
TRACE(("generic_vm_physical_page_mapper_init: entry\n"));
sMapIOSpaceChunk = mapIOSpaceChunk;
sIOSpaceBase = *ioSpaceBase;
sIOSpaceSize = ioSpaceSize;
sIOSpaceChunkSize = ioSpaceChunkSize;
// reserve virtual space for the IO space
// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
// can guarantee to align the base address to ioSpaceChunkSize.
sIOSpaceBase = vm_alloc_virtual_from_kernel_args(args,
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE);
if (sIOSpaceBase == 0) {
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
"space in virtual address space!");
return B_ERROR;
}
// align the base address to chunk size
sIOSpaceBase = (sIOSpaceBase + ioSpaceChunkSize - 1) / ioSpaceChunkSize
* ioSpaceChunkSize;
*ioSpaceBase = sIOSpaceBase;
// allocate some space to hold physical page mapping info
paddr_desc = (paddr_chunk_desc *)vm_alloc_from_kernel_args(args,
sizeof(paddr_chunk_desc) * 1024, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -211,10 +226,6 @@ generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
temp = (void *)sIOSpaceBase;
area_id ioSpaceArea = vm_create_null_area(vm_kernel_address_space_id(),
"iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize);
// TODO: We don't reserve the virtual address space for the IO space in
// generic_vm_physical_page_mapper_init() yet. So theoretically it could
// happen that a part of that space has been reserved by someone else in
// the meantime.
if (ioSpaceArea < 0) {
panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
"create null area for IO space!\n");

View File

@ -30,8 +30,6 @@
// 256 MB of iospace
#define IOSPACE_SIZE (256*1024*1024)
// put it 256 MB into kernel space
#define IOSPACE_BASE (KERNEL_BASE + IOSPACE_SIZE)
// 4 MB chunks, to optimize for 4 MB pages
#define IOSPACE_CHUNK_SIZE (4*1024*1024)
@ -808,7 +806,6 @@ arch_vm_translation_map_init(kernel_args *args)
TRACE(("iospace_pgtables %p\n", iospace_pgtables));
// init physical page mapper
sIOSpaceBase = IOSPACE_BASE;
error = generic_vm_physical_page_mapper_init(args, map_iospace_chunk,
&sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)

View File

@ -1023,7 +1023,7 @@ static int dump_free_page_table(int argc, char **argv)
#endif
static addr_t
addr_t
vm_alloc_virtual_from_kernel_args(kernel_args *ka, size_t size)
{
addr_t spot = 0;
@ -1048,10 +1048,11 @@ vm_alloc_virtual_from_kernel_args(kernel_args *ka, size_t size)
if (spot == 0) {
// we hadn't found one between allocation ranges. this is ok.
// see if there's a gap after the last one
if (ka->virtual_allocated_range[last_valloc_entry].start
+ ka->virtual_allocated_range[last_valloc_entry].size + size
<= KERNEL_BASE + (KERNEL_SIZE - 1)) {
spot = ka->virtual_allocated_range[last_valloc_entry].start + ka->virtual_allocated_range[last_valloc_entry].size;
addr_t lastRangeEnd
= ka->virtual_allocated_range[last_valloc_entry].start
+ ka->virtual_allocated_range[last_valloc_entry].size;
if (KERNEL_BASE + (KERNEL_SIZE - 1) - lastRangeEnd >= size) {
spot = lastRangeEnd;
ka->virtual_allocated_range[last_valloc_entry].size += size;
goto out;
}