* vm_allocate_early(): Replace "bool blockAlign" parameter by a more flexible
"addr_t aligmnent". * X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(), generic_vm_physical_page_mapper_init(): Use vm_allocate_early()'s alignment feature instead of aligning by hand. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37070 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
c8a1d3ad1e
commit
2ea7b17cf3
@ -63,7 +63,7 @@ status_t vm_init_post_modules(struct kernel_args *args);
|
||||
void vm_free_kernel_args(struct kernel_args *args);
|
||||
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
|
||||
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
|
||||
size_t physicalSize, uint32 attributes, bool blockAlign);
|
||||
size_t physicalSize, uint32 attributes, addr_t alignment);
|
||||
|
||||
void slab_init(struct kernel_args *args);
|
||||
void slab_init_post_area();
|
||||
|
@ -248,29 +248,24 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
||||
sIOSpaceChunkSize = ioSpaceChunkSize;
|
||||
|
||||
// reserve virtual space for the IO space
|
||||
// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
|
||||
// can guarantee to align the base address to ioSpaceChunkSize.
|
||||
sIOSpaceBase = vm_allocate_early(args,
|
||||
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0, false);
|
||||
sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0,
|
||||
ioSpaceChunkSize);
|
||||
if (sIOSpaceBase == 0) {
|
||||
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
|
||||
"space in virtual address space!");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
// align the base address to chunk size
|
||||
sIOSpaceBase = (sIOSpaceBase + ioSpaceChunkSize - 1) / ioSpaceChunkSize
|
||||
* ioSpaceChunkSize;
|
||||
*ioSpaceBase = sIOSpaceBase;
|
||||
|
||||
// allocate some space to hold physical page mapping info
|
||||
paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
|
||||
sizeof(paddr_chunk_desc) * 1024, ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
|
||||
virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
|
||||
sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
|
||||
TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
|
||||
paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
|
||||
|
@ -1262,7 +1262,7 @@ m68k_vm_translation_map_init(kernel_args *args)
|
||||
|
||||
iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
|
||||
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
|
||||
TRACE(("iospace_pgtables %p\n", iospace_pgtables));
|
||||
|
||||
|
@ -84,22 +84,19 @@ X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
|
||||
status_t
|
||||
X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
|
||||
{
|
||||
// We reserve more, so that we can guarantee to align the base address
|
||||
// to page table ranges.
|
||||
addr_t virtualBase = vm_allocate_early(args,
|
||||
1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, false);
|
||||
// allocate a virtual address range for the pages to be mapped into
|
||||
addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
|
||||
kPageTableAlignment);
|
||||
if (virtualBase == 0) {
|
||||
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
|
||||
"physical page pool space in virtual address space!");
|
||||
return B_ERROR;
|
||||
}
|
||||
virtualBase = (virtualBase + kPageTableAlignment - 1)
|
||||
/ kPageTableAlignment * kPageTableAlignment;
|
||||
|
||||
// allocate memory for the page table and data
|
||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
|
||||
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
|
||||
// prepare the page table
|
||||
_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
|
||||
|
@ -1212,7 +1212,8 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
} else {
|
||||
// no areas yet -- allocate raw memory
|
||||
area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
|
||||
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, true);
|
||||
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
SLAB_AREA_SIZE);
|
||||
if (area == NULL) {
|
||||
mutex_lock(&sLock);
|
||||
return B_NO_MEMORY;
|
||||
|
@ -3432,7 +3432,7 @@ reserve_boot_loader_ranges(kernel_args* args)
|
||||
|
||||
|
||||
static addr_t
|
||||
allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
||||
allocate_early_virtual(kernel_args* args, size_t size, addr_t alignment)
|
||||
{
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
@ -3443,8 +3443,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
||||
addr_t previousRangeEnd = args->virtual_allocated_range[i - 1].start
|
||||
+ args->virtual_allocated_range[i - 1].size;
|
||||
|
||||
addr_t base = blockAlign
|
||||
? ROUNDUP(previousRangeEnd, size) : previousRangeEnd;
|
||||
addr_t base = alignment > 0
|
||||
? ROUNDUP(previousRangeEnd, alignment) : previousRangeEnd;
|
||||
|
||||
if (base >= KERNEL_BASE && base < rangeStart
|
||||
&& rangeStart - base >= size) {
|
||||
@ -3459,7 +3459,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
||||
int lastEntryIndex = args->num_virtual_allocated_ranges - 1;
|
||||
addr_t lastRangeEnd = args->virtual_allocated_range[lastEntryIndex].start
|
||||
+ args->virtual_allocated_range[lastEntryIndex].size;
|
||||
addr_t base = blockAlign ? ROUNDUP(lastRangeEnd, size) : lastRangeEnd;
|
||||
addr_t base = alignment > 0
|
||||
? ROUNDUP(lastRangeEnd, alignment) : lastRangeEnd;
|
||||
if (KERNEL_BASE + (KERNEL_SIZE - 1) - base >= size) {
|
||||
args->virtual_allocated_range[lastEntryIndex].size
|
||||
+= base + size - lastRangeEnd;
|
||||
@ -3470,8 +3471,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
||||
addr_t rangeStart = args->virtual_allocated_range[0].start;
|
||||
if (rangeStart > KERNEL_BASE && rangeStart - KERNEL_BASE >= size) {
|
||||
base = rangeStart - size;
|
||||
if (blockAlign)
|
||||
base = ROUNDDOWN(base, size);
|
||||
if (alignment > 0)
|
||||
base = ROUNDDOWN(base, alignment);
|
||||
|
||||
if (base >= KERNEL_BASE) {
|
||||
args->virtual_allocated_range[0].start = base;
|
||||
@ -3532,13 +3533,13 @@ allocate_early_physical_page(kernel_args* args)
|
||||
*/
|
||||
addr_t
|
||||
vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
|
||||
uint32 attributes, bool blockAlign)
|
||||
uint32 attributes, addr_t alignment)
|
||||
{
|
||||
if (physicalSize > virtualSize)
|
||||
physicalSize = virtualSize;
|
||||
|
||||
// find the vaddr to allocate at
|
||||
addr_t virtualBase = allocate_early_virtual(args, virtualSize, blockAlign);
|
||||
addr_t virtualBase = allocate_early_virtual(args, virtualSize, alignment);
|
||||
//dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
|
||||
|
||||
// map the pages
|
||||
@ -3587,7 +3588,7 @@ vm_init(kernel_args* args)
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
// map in the new heap and initialize it
|
||||
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
TRACE(("heap at 0x%lx\n", heapBase));
|
||||
heap_init(heapBase, heapSize);
|
||||
#endif
|
||||
|
@ -2849,7 +2849,7 @@ vm_page_init(kernel_args *args)
|
||||
|
||||
// map in the new free page table
|
||||
sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
|
||||
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
|
||||
TRACE(("vm_init: putting free_page_table @ %p, # ents %ld (size 0x%x)\n",
|
||||
sPages, sNumPages, (unsigned int)(sNumPages * sizeof(vm_page))));
|
||||
|
Loading…
Reference in New Issue
Block a user