* Moved the early startup VM allocation functions from vm_page.c to vm.cpp.
* Renamed them, made everything static besides vm_allocate_early() (previous vm_alloc_from_kernel_args()) which now allows you to specify a different virtual than physical size, and therefore makes vm_alloc_virtual_from_kernel_args() superfluous (which isn't exported anymore, and is now called allocate_early_virtual()). * Enabled printing a stack trace on serial output on team crash - it doesn't hurt for now, anyway. * Cleanup. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20244 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
bb377cd514
commit
3eca858515
|
@ -21,13 +21,15 @@ struct team;
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
//void vm_dump_areas(vm_address_space *aspace);
|
||||
// startup only
|
||||
status_t vm_init(kernel_args *args);
|
||||
status_t vm_init_post_sem(struct kernel_args *args);
|
||||
status_t vm_init_post_thread(struct kernel_args *args);
|
||||
status_t vm_init_post_modules(struct kernel_args *args);
|
||||
void vm_free_kernel_args(kernel_args *args);
|
||||
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
|
||||
addr_t vm_allocate_early(kernel_args *args, size_t virtualSize,
|
||||
size_t physicalSize, uint32 attributes);
|
||||
|
||||
// to protect code regions with interrupts turned on
|
||||
void permit_page_faults(void);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
|
@ -24,8 +24,6 @@ status_t vm_page_init(struct kernel_args *args);
|
|||
status_t vm_page_init_post_area(struct kernel_args *args);
|
||||
status_t vm_page_init_post_thread(struct kernel_args *args);
|
||||
|
||||
addr_t vm_alloc_virtual_from_kernel_args(kernel_args *ka, size_t size);
|
||||
|
||||
status_t vm_mark_page_inuse(addr_t page);
|
||||
status_t vm_mark_page_range_inuse(addr_t startPage, addr_t length);
|
||||
status_t vm_page_set_state(vm_page *page, int state);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2002-2005, The Haiku Team. All rights reserved.
|
||||
* Copyright 2002-2007, Haiku. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
|
@ -53,9 +53,6 @@ status_t vm_daemon_init(void);
|
|||
void vm_address_space_walk_start(struct hash_iterator *i);
|
||||
vm_address_space *vm_address_space_walk_next(struct hash_iterator *i);
|
||||
|
||||
// allocates memory from the kernel_args structure
|
||||
addr_t vm_alloc_from_kernel_args(kernel_args *args, size_t size, uint32 lock);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
/*
|
||||
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "generic_vm_physical_page_mapper.h"
|
||||
|
||||
#include <vm_address_space.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_priv.h>
|
||||
//#include <smp.h>
|
||||
//#include <memheap.h>
|
||||
#include <thread.h>
|
||||
#include <util/queue.h>
|
||||
|
||||
|
@ -253,8 +252,8 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
|||
// reserve virtual space for the IO space
|
||||
// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
|
||||
// can guarantee to align the base address to ioSpaceChunkSize.
|
||||
sIOSpaceBase = vm_alloc_virtual_from_kernel_args(args,
|
||||
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE);
|
||||
sIOSpaceBase = vm_allocate_early(args,
|
||||
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0);
|
||||
if (sIOSpaceBase == 0) {
|
||||
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
|
||||
"space in virtual address space!");
|
||||
|
@ -267,11 +266,13 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
|||
*ioSpaceBase = sIOSpaceBase;
|
||||
|
||||
// allocate some space to hold physical page mapping info
|
||||
paddr_desc = (paddr_chunk_desc *)vm_alloc_from_kernel_args(args,
|
||||
sizeof(paddr_chunk_desc) * 1024, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
|
||||
sizeof(paddr_chunk_desc) * 1024, ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
|
||||
virtual_pmappings = (paddr_chunk_desc **)vm_alloc_from_kernel_args(args,
|
||||
sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
|
||||
sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
|
||||
paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
|
||||
|
|
|
@ -845,8 +845,9 @@ arch_vm_translation_map_init(kernel_args *args)
|
|||
tmap_list = NULL;
|
||||
|
||||
// allocate some space to hold physical page mapping info
|
||||
iospace_pgtables = (page_table_entry *)vm_alloc_from_kernel_args(args,
|
||||
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)), B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
|
||||
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)), ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
TRACE(("iospace_pgtables %p\n", iospace_pgtables));
|
||||
|
||||
|
|
|
@ -962,9 +962,8 @@ err1:
|
|||
|
||||
|
||||
area_id
|
||||
vm_map_physical_memory(team_id areaID, const char *name, void **_address,
|
||||
uint32 addressSpec, addr_t size, uint32 protection,
|
||||
addr_t physicalAddress)
|
||||
vm_map_physical_memory(team_id aspaceID, const char *name, void **_address,
|
||||
uint32 addressSpec, addr_t size, uint32 protection, addr_t physicalAddress)
|
||||
{
|
||||
vm_cache_ref *cacheRef;
|
||||
vm_area *area;
|
||||
|
@ -975,13 +974,13 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
|
|||
|
||||
TRACE(("vm_map_physical_memory(aspace = %ld, \"%s\", virtual = %p, spec = %ld,"
|
||||
" size = %lu, protection = %ld, phys = %p)\n",
|
||||
areaID, name, _address, addressSpec, size, protection,
|
||||
aspaceID, name, _address, addressSpec, size, protection,
|
||||
(void *)physicalAddress));
|
||||
|
||||
if (!arch_vm_supports_protection(protection))
|
||||
return B_NOT_SUPPORTED;
|
||||
|
||||
vm_address_space *addressSpace = vm_get_address_space_by_id(areaID);
|
||||
vm_address_space *addressSpace = vm_get_address_space_by_id(aspaceID);
|
||||
if (addressSpace == NULL)
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
|
@ -2421,6 +2420,128 @@ reserve_boot_loader_ranges(kernel_args *args)
|
|||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
allocate_early_virtual(kernel_args *args, size_t size)
|
||||
{
|
||||
addr_t spot = 0;
|
||||
uint32 i;
|
||||
int last_valloc_entry = 0;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
// find a slot in the virtual allocation addr range
|
||||
for (i = 1; i < args->num_virtual_allocated_ranges; i++) {
|
||||
addr_t previousRangeEnd = args->virtual_allocated_range[i - 1].start
|
||||
+ args->virtual_allocated_range[i - 1].size;
|
||||
last_valloc_entry = i;
|
||||
// check to see if the space between this one and the last is big enough
|
||||
if (previousRangeEnd >= KERNEL_BASE
|
||||
&& args->virtual_allocated_range[i].start
|
||||
- previousRangeEnd >= size) {
|
||||
spot = previousRangeEnd;
|
||||
args->virtual_allocated_range[i - 1].size += size;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (spot == 0) {
|
||||
// we hadn't found one between allocation ranges. this is ok.
|
||||
// see if there's a gap after the last one
|
||||
addr_t lastRangeEnd
|
||||
= args->virtual_allocated_range[last_valloc_entry].start
|
||||
+ args->virtual_allocated_range[last_valloc_entry].size;
|
||||
if (KERNEL_BASE + (KERNEL_SIZE - 1) - lastRangeEnd >= size) {
|
||||
spot = lastRangeEnd;
|
||||
args->virtual_allocated_range[last_valloc_entry].size += size;
|
||||
goto out;
|
||||
}
|
||||
// see if there's a gap before the first one
|
||||
if (args->virtual_allocated_range[0].start > KERNEL_BASE) {
|
||||
if (args->virtual_allocated_range[0].start - KERNEL_BASE >= size) {
|
||||
args->virtual_allocated_range[0].start -= size;
|
||||
spot = args->virtual_allocated_range[0].start;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return spot;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
is_page_in_physical_memory_range(kernel_args *args, addr_t address)
|
||||
{
|
||||
// TODO: horrible brute-force method of determining if the page can be allocated
|
||||
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
|
||||
if (address >= args->physical_memory_range[i].start
|
||||
&& address < args->physical_memory_range[i].start
|
||||
+ args->physical_memory_range[i].size)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
allocate_early_physical_page(kernel_args *args)
|
||||
{
|
||||
for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
|
||||
addr_t nextPage;
|
||||
|
||||
nextPage = args->physical_allocated_range[i].start
|
||||
+ args->physical_allocated_range[i].size;
|
||||
// see if the page after the next allocated paddr run can be allocated
|
||||
if (i + 1 < args->num_physical_allocated_ranges
|
||||
&& args->physical_allocated_range[i + 1].size != 0) {
|
||||
// see if the next page will collide with the next allocated range
|
||||
if (nextPage >= args->physical_allocated_range[i+1].start)
|
||||
continue;
|
||||
}
|
||||
// see if the next physical page fits in the memory block
|
||||
if (is_page_in_physical_memory_range(args, nextPage)) {
|
||||
// we got one!
|
||||
args->physical_allocated_range[i].size += B_PAGE_SIZE;
|
||||
return nextPage / B_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
// could not allocate a block
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
This one uses the kernel_args' physical and virtual memory ranges to
|
||||
allocate some pages before the VM is completely up.
|
||||
*/
|
||||
addr_t
|
||||
vm_allocate_early(kernel_args *args, size_t virtualSize, size_t physicalSize,
|
||||
uint32 attributes)
|
||||
{
|
||||
if (physicalSize > virtualSize)
|
||||
physicalSize = virtualSize;
|
||||
|
||||
// find the vaddr to allocate at
|
||||
addr_t virtualBase = allocate_early_virtual(args, virtualSize);
|
||||
//dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
|
||||
|
||||
// map the pages
|
||||
for (uint32 i = 0; i < PAGE_ALIGN(physicalSize) / B_PAGE_SIZE; i++) {
|
||||
addr_t physicalAddress = allocate_early_physical_page(args);
|
||||
if (physicalAddress == 0)
|
||||
panic("error allocating early page!\n");
|
||||
|
||||
//dprintf("vm_allocate_early: paddr 0x%lx\n", physicalAddress);
|
||||
|
||||
arch_vm_translation_map_early_map(args, virtualBase + i * B_PAGE_SIZE,
|
||||
physicalAddress * B_PAGE_SIZE, attributes,
|
||||
&allocate_early_physical_page);
|
||||
}
|
||||
|
||||
return virtualBase;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_init(kernel_args *args)
|
||||
{
|
||||
|
@ -2449,7 +2570,7 @@ vm_init(kernel_args *args)
|
|||
heapSize /= 2;
|
||||
|
||||
// map in the new heap and initialize it
|
||||
addr_t heapBase = vm_alloc_from_kernel_args(args, heapSize,
|
||||
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
TRACE(("heap at 0x%lx\n", heapBase));
|
||||
heap_init(heapBase, heapSize);
|
||||
|
@ -2628,7 +2749,7 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
|
|||
area ? area->name : "???", faultAddress - (area ? area->base : 0x0));
|
||||
|
||||
// We can print a stack trace of the userland thread here.
|
||||
#if 0
|
||||
#if 1
|
||||
if (area) {
|
||||
struct stack_frame {
|
||||
#ifdef __INTEL__
|
||||
|
@ -3099,15 +3220,18 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||
|
||||
atomic_add(&page->ref_count, 1);
|
||||
|
||||
//vm_page_map(area, page, newProtection);
|
||||
map->ops->lock(map);
|
||||
map->ops->map(map, address, page->physical_page_number * B_PAGE_SIZE,
|
||||
newProtection);
|
||||
map->ops->unlock(map);
|
||||
}
|
||||
|
||||
vm_page_set_state(page, area->wiring == B_NO_LOCK
|
||||
? PAGE_STATE_ACTIVE : PAGE_STATE_WIRED);
|
||||
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
mutex_unlock(&pageSourceRef->lock);
|
||||
vm_cache_release_ref(pageSourceRef);
|
||||
|
||||
|
@ -3405,8 +3529,8 @@ get_memory_map(const void *address, ulong numBytes, physical_entry *table, long
|
|||
|
||||
if (interrupts) {
|
||||
uint32 flags;
|
||||
status = map->ops->query(map, (addr_t)address + offset, &physicalAddress,
|
||||
&flags);
|
||||
status = map->ops->query(map, (addr_t)address + offset,
|
||||
&physicalAddress, &flags);
|
||||
} else {
|
||||
status = map->ops->query_interrupt(map, (addr_t)address + offset,
|
||||
&physicalAddress);
|
||||
|
@ -3421,7 +3545,8 @@ get_memory_map(const void *address, ulong numBytes, physical_entry *table, long
|
|||
}
|
||||
|
||||
// need to switch to the next physical_entry?
|
||||
if (index < 0 || (addr_t)table[index].address != physicalAddress - table[index].size) {
|
||||
if (index < 0 || (addr_t)table[index].address
|
||||
!= physicalAddress - table[index].size) {
|
||||
if (++index + 1 > numEntries) {
|
||||
// table to small
|
||||
status = B_BUFFER_OVERFLOW;
|
||||
|
@ -3604,12 +3729,15 @@ resize_area(area_id areaID, size_t newSize)
|
|||
// We need to check if all areas of this cache can be resized
|
||||
|
||||
for (current = cacheRef->areas; current; current = current->cache_next) {
|
||||
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
||||
if (current->address_space_next
|
||||
&& current->address_space_next->base <= (current->base
|
||||
+ newSize)) {
|
||||
// if the area was created inside a reserved area, it can also be
|
||||
// resized in that area
|
||||
// ToDo: if there is free space after the reserved area, it could be used as well...
|
||||
vm_area *next = current->address_space_next;
|
||||
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
||||
if (next->id == RESERVED_AREA_ID
|
||||
&& next->cache_offset <= current->base
|
||||
&& next->base - 1 + next->size >= current->base - 1 + newSize)
|
||||
continue;
|
||||
|
||||
|
@ -3622,9 +3750,11 @@ resize_area(area_id areaID, size_t newSize)
|
|||
// Okay, looks good so far, so let's do it
|
||||
|
||||
for (current = cacheRef->areas; current; current = current->cache_next) {
|
||||
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
||||
if (current->address_space_next
|
||||
&& current->address_space_next->base <= (current->base + newSize)) {
|
||||
vm_area *next = current->address_space_next;
|
||||
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
||||
if (next->id == RESERVED_AREA_ID
|
||||
&& next->cache_offset <= current->base
|
||||
&& next->base - 1 + next->size >= current->base - 1 + newSize) {
|
||||
// resize reserved area
|
||||
addr_t offset = current->base + newSize - next->base;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
|
@ -65,8 +65,7 @@ static void clear_page(addr_t pa);
|
|||
static int32 page_scrubber(void *);
|
||||
|
||||
|
||||
/** Dequeues a page from the tail of the given queue */
|
||||
|
||||
/*! Dequeues a page from the tail of the given queue */
|
||||
static vm_page *
|
||||
dequeue_page(page_queue *q)
|
||||
{
|
||||
|
@ -87,8 +86,7 @@ dequeue_page(page_queue *q)
|
|||
}
|
||||
|
||||
|
||||
/** Enqueues a page to the head of the given queue */
|
||||
|
||||
/*! Enqueues a page to the head of the given queue */
|
||||
static void
|
||||
enqueue_page(page_queue *q, vm_page *page)
|
||||
{
|
||||
|
@ -165,10 +163,14 @@ write_page(vm_page *page, bool fsReenter)
|
|||
}
|
||||
|
||||
|
||||
/*!
|
||||
You need to hold the vm_cache lock when calling this function.
|
||||
*/
|
||||
status_t
|
||||
vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
{
|
||||
vm_page *page = cache->page_list;
|
||||
vm_cache_ref *ref = cache->ref;
|
||||
|
||||
// ToDo: join adjacent pages into one vec list
|
||||
|
||||
|
@ -202,7 +204,7 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
|||
|
||||
pageOffset = (off_t)page->cache_offset << PAGE_SHIFT;
|
||||
|
||||
for (area = page->cache->ref->areas; area; area = area->cache_next) {
|
||||
for (area = ref->areas; area; area = area->cache_next) {
|
||||
if (pageOffset >= area->cache_offset
|
||||
&& pageOffset < area->cache_offset + area->size) {
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
|
@ -231,9 +233,12 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
|||
if (!gotPage)
|
||||
continue;
|
||||
|
||||
mutex_unlock(&cache->ref->lock);
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
status = write_page(page, fsReenter);
|
||||
mutex_lock(&cache->ref->lock);
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
cache = ref->cache;
|
||||
|
||||
if (status == B_OK) {
|
||||
if (dequeuedPage) {
|
||||
|
@ -386,8 +391,8 @@ vm_page_init(kernel_args *args)
|
|||
page_active_queue.count = 0;
|
||||
|
||||
// map in the new free page table
|
||||
sPages = (vm_page *)vm_alloc_from_kernel_args(args, sNumPages * sizeof(vm_page),
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
|
||||
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
TRACE(("vm_init: putting free_page_table @ %p, # ents %d (size 0x%x)\n",
|
||||
sPages, sNumPages, (unsigned int)(sNumPages * sizeof(vm_page))));
|
||||
|
@ -453,12 +458,12 @@ vm_page_init_post_thread(kernel_args *args)
|
|||
}
|
||||
|
||||
|
||||
/** This is a background thread that wakes up every now and then (every 100ms)
|
||||
* and moves some pages from the free queue over to the clear queue.
|
||||
* Given enough time, it will clear out all pages from the free queue - we
|
||||
* could probably slow it down after having reached a certain threshold.
|
||||
*/
|
||||
|
||||
/*!
|
||||
This is a background thread that wakes up every now and then (every 100ms)
|
||||
and moves some pages from the free queue over to the clear queue.
|
||||
Given enough time, it will clear out all pages from the free queue - we
|
||||
could probably slow it down after having reached a certain threshold.
|
||||
*/
|
||||
static int32
|
||||
page_scrubber(void *unused)
|
||||
{
|
||||
|
@ -696,12 +701,12 @@ vm_page_allocate_page(int page_state)
|
|||
}
|
||||
|
||||
|
||||
/** Allocates a number of pages and puts their pointers into the provided
|
||||
* array. All pages are marked busy.
|
||||
* Returns B_OK on success, and B_NO_MEMORY when there aren't any free
|
||||
* pages left to allocate.
|
||||
*/
|
||||
|
||||
/*!
|
||||
Allocates a number of pages and puts their pointers into the provided
|
||||
array. All pages are marked busy.
|
||||
Returns B_OK on success, and B_NO_MEMORY when there aren't any free
|
||||
pages left to allocate.
|
||||
*/
|
||||
status_t
|
||||
vm_page_allocate_pages(int pageState, vm_page **pages, uint32 numPages)
|
||||
{
|
||||
|
@ -1048,125 +1053,3 @@ static int dump_free_page_table(int argc, char **argv)
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
addr_t
|
||||
vm_alloc_virtual_from_kernel_args(kernel_args *ka, size_t size)
|
||||
{
|
||||
addr_t spot = 0;
|
||||
uint32 i;
|
||||
int last_valloc_entry = 0;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
// find a slot in the virtual allocation addr range
|
||||
for (i = 1; i < ka->num_virtual_allocated_ranges; i++) {
|
||||
addr_t previousRangeEnd = ka->virtual_allocated_range[i-1].start
|
||||
+ ka->virtual_allocated_range[i-1].size;
|
||||
last_valloc_entry = i;
|
||||
// check to see if the space between this one and the last is big enough
|
||||
if (previousRangeEnd >= KERNEL_BASE
|
||||
&& ka->virtual_allocated_range[i].start
|
||||
- previousRangeEnd >= size) {
|
||||
spot = previousRangeEnd;
|
||||
ka->virtual_allocated_range[i-1].size += size;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (spot == 0) {
|
||||
// we hadn't found one between allocation ranges. this is ok.
|
||||
// see if there's a gap after the last one
|
||||
addr_t lastRangeEnd
|
||||
= ka->virtual_allocated_range[last_valloc_entry].start
|
||||
+ ka->virtual_allocated_range[last_valloc_entry].size;
|
||||
if (KERNEL_BASE + (KERNEL_SIZE - 1) - lastRangeEnd >= size) {
|
||||
spot = lastRangeEnd;
|
||||
ka->virtual_allocated_range[last_valloc_entry].size += size;
|
||||
goto out;
|
||||
}
|
||||
// see if there's a gap before the first one
|
||||
if (ka->virtual_allocated_range[0].start > KERNEL_BASE) {
|
||||
if (ka->virtual_allocated_range[0].start - KERNEL_BASE >= size) {
|
||||
ka->virtual_allocated_range[0].start -= size;
|
||||
spot = ka->virtual_allocated_range[0].start;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return spot;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
is_page_in_phys_range(kernel_args *ka, addr_t paddr)
|
||||
{
|
||||
// XXX horrible brute-force method of determining if the page can be allocated
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ka->num_physical_memory_ranges; i++) {
|
||||
if (paddr >= ka->physical_memory_range[i].start
|
||||
&& paddr < ka->physical_memory_range[i].start
|
||||
+ ka->physical_memory_range[i].size) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
vm_alloc_physical_page_from_kernel_args(kernel_args *ka)
|
||||
{
|
||||
uint32 i;
|
||||
|
||||
for (i = 0; i < ka->num_physical_allocated_ranges; i++) {
|
||||
addr_t next_page;
|
||||
|
||||
next_page = ka->physical_allocated_range[i].start
|
||||
+ ka->physical_allocated_range[i].size;
|
||||
// see if the page after the next allocated paddr run can be allocated
|
||||
if (i + 1 < ka->num_physical_allocated_ranges
|
||||
&& ka->physical_allocated_range[i+1].size != 0) {
|
||||
// see if the next page will collide with the next allocated range
|
||||
if (next_page >= ka->physical_allocated_range[i+1].start)
|
||||
continue;
|
||||
}
|
||||
// see if the next physical page fits in the memory block
|
||||
if (is_page_in_phys_range(ka, next_page)) {
|
||||
// we got one!
|
||||
ka->physical_allocated_range[i].size += B_PAGE_SIZE;
|
||||
return (next_page / B_PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
return 0; // could not allocate a block
|
||||
}
|
||||
|
||||
|
||||
/** This one uses the kernel_args' physical and virtual memory ranges to
|
||||
* allocate some pages before the VM is completely up.
|
||||
*/
|
||||
|
||||
addr_t
|
||||
vm_alloc_from_kernel_args(kernel_args *args, size_t size, uint32 lock)
|
||||
{
|
||||
addr_t virtualBase, physicalAddress;
|
||||
uint32 i;
|
||||
|
||||
// find the vaddr to allocate at
|
||||
virtualBase = vm_alloc_virtual_from_kernel_args(args, size);
|
||||
//dprintf("alloc_from_ka_struct: vaddr 0x%lx\n", virtualAddress);
|
||||
|
||||
// map the pages
|
||||
for (i = 0; i < PAGE_ALIGN(size) / B_PAGE_SIZE; i++) {
|
||||
physicalAddress = vm_alloc_physical_page_from_kernel_args(args);
|
||||
//dprintf("alloc_from_ka_struct: paddr 0x%lx\n", physicalAddress);
|
||||
|
||||
if (physicalAddress == 0)
|
||||
panic("error allocating page from ka_struct!\n");
|
||||
arch_vm_translation_map_early_map(args, virtualBase + i * B_PAGE_SIZE,
|
||||
physicalAddress * B_PAGE_SIZE, lock, &vm_alloc_physical_page_from_kernel_args);
|
||||
}
|
||||
|
||||
return virtualBase;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue