* vm_allocate_early(): Replace "bool blockAlign" parameter by a more flexible
"addr_t aligmnent". * X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(), generic_vm_physical_page_mapper_init(): Use vm_allocate_early()'s alignment feature instead of aligning by hand. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37070 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
c8a1d3ad1e
commit
2ea7b17cf3
@ -63,7 +63,7 @@ status_t vm_init_post_modules(struct kernel_args *args);
|
|||||||
void vm_free_kernel_args(struct kernel_args *args);
|
void vm_free_kernel_args(struct kernel_args *args);
|
||||||
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
|
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
|
||||||
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
|
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
|
||||||
size_t physicalSize, uint32 attributes, bool blockAlign);
|
size_t physicalSize, uint32 attributes, addr_t alignment);
|
||||||
|
|
||||||
void slab_init(struct kernel_args *args);
|
void slab_init(struct kernel_args *args);
|
||||||
void slab_init_post_area();
|
void slab_init_post_area();
|
||||||
|
@ -248,29 +248,24 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
|||||||
sIOSpaceChunkSize = ioSpaceChunkSize;
|
sIOSpaceChunkSize = ioSpaceChunkSize;
|
||||||
|
|
||||||
// reserve virtual space for the IO space
|
// reserve virtual space for the IO space
|
||||||
// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
|
sIOSpaceBase = vm_allocate_early(args, sIOSpaceSize, 0, 0,
|
||||||
// can guarantee to align the base address to ioSpaceChunkSize.
|
ioSpaceChunkSize);
|
||||||
sIOSpaceBase = vm_allocate_early(args,
|
|
||||||
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0, false);
|
|
||||||
if (sIOSpaceBase == 0) {
|
if (sIOSpaceBase == 0) {
|
||||||
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
|
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
|
||||||
"space in virtual address space!");
|
"space in virtual address space!");
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
// align the base address to chunk size
|
|
||||||
sIOSpaceBase = (sIOSpaceBase + ioSpaceChunkSize - 1) / ioSpaceChunkSize
|
|
||||||
* ioSpaceChunkSize;
|
|
||||||
*ioSpaceBase = sIOSpaceBase;
|
*ioSpaceBase = sIOSpaceBase;
|
||||||
|
|
||||||
// allocate some space to hold physical page mapping info
|
// allocate some space to hold physical page mapping info
|
||||||
paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
|
paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
|
||||||
sizeof(paddr_chunk_desc) * 1024, ~0L,
|
sizeof(paddr_chunk_desc) * 1024, ~0L,
|
||||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
|
num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
|
||||||
virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
|
virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
|
||||||
sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
|
sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
|
||||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
|
|
||||||
TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
|
TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
|
||||||
paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
|
paddr_desc, virtual_pmappings/*, iospace_pgtables*/));
|
||||||
|
@ -1262,7 +1262,7 @@ m68k_vm_translation_map_init(kernel_args *args)
|
|||||||
|
|
||||||
iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
|
iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
|
||||||
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
|
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
|
||||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
|
|
||||||
TRACE(("iospace_pgtables %p\n", iospace_pgtables));
|
TRACE(("iospace_pgtables %p\n", iospace_pgtables));
|
||||||
|
|
||||||
|
@ -84,22 +84,19 @@ X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
|
|||||||
status_t
|
status_t
|
||||||
X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
|
X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
|
||||||
{
|
{
|
||||||
// We reserve more, so that we can guarantee to align the base address
|
// allocate a virtual address range for the pages to be mapped into
|
||||||
// to page table ranges.
|
addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
|
||||||
addr_t virtualBase = vm_allocate_early(args,
|
kPageTableAlignment);
|
||||||
1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, false);
|
|
||||||
if (virtualBase == 0) {
|
if (virtualBase == 0) {
|
||||||
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
|
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
|
||||||
"physical page pool space in virtual address space!");
|
"physical page pool space in virtual address space!");
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
virtualBase = (virtualBase + kPageTableAlignment - 1)
|
|
||||||
/ kPageTableAlignment * kPageTableAlignment;
|
|
||||||
|
|
||||||
// allocate memory for the page table and data
|
// allocate memory for the page table and data
|
||||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||||
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
|
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
|
||||||
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
|
|
||||||
// prepare the page table
|
// prepare the page table
|
||||||
_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
|
_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
|
||||||
|
@ -1212,7 +1212,8 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
|||||||
} else {
|
} else {
|
||||||
// no areas yet -- allocate raw memory
|
// no areas yet -- allocate raw memory
|
||||||
area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
|
area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
|
||||||
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, true);
|
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||||
|
SLAB_AREA_SIZE);
|
||||||
if (area == NULL) {
|
if (area == NULL) {
|
||||||
mutex_lock(&sLock);
|
mutex_lock(&sLock);
|
||||||
return B_NO_MEMORY;
|
return B_NO_MEMORY;
|
||||||
|
@ -3432,7 +3432,7 @@ reserve_boot_loader_ranges(kernel_args* args)
|
|||||||
|
|
||||||
|
|
||||||
static addr_t
|
static addr_t
|
||||||
allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
allocate_early_virtual(kernel_args* args, size_t size, addr_t alignment)
|
||||||
{
|
{
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
@ -3443,8 +3443,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
|||||||
addr_t previousRangeEnd = args->virtual_allocated_range[i - 1].start
|
addr_t previousRangeEnd = args->virtual_allocated_range[i - 1].start
|
||||||
+ args->virtual_allocated_range[i - 1].size;
|
+ args->virtual_allocated_range[i - 1].size;
|
||||||
|
|
||||||
addr_t base = blockAlign
|
addr_t base = alignment > 0
|
||||||
? ROUNDUP(previousRangeEnd, size) : previousRangeEnd;
|
? ROUNDUP(previousRangeEnd, alignment) : previousRangeEnd;
|
||||||
|
|
||||||
if (base >= KERNEL_BASE && base < rangeStart
|
if (base >= KERNEL_BASE && base < rangeStart
|
||||||
&& rangeStart - base >= size) {
|
&& rangeStart - base >= size) {
|
||||||
@ -3459,7 +3459,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
|||||||
int lastEntryIndex = args->num_virtual_allocated_ranges - 1;
|
int lastEntryIndex = args->num_virtual_allocated_ranges - 1;
|
||||||
addr_t lastRangeEnd = args->virtual_allocated_range[lastEntryIndex].start
|
addr_t lastRangeEnd = args->virtual_allocated_range[lastEntryIndex].start
|
||||||
+ args->virtual_allocated_range[lastEntryIndex].size;
|
+ args->virtual_allocated_range[lastEntryIndex].size;
|
||||||
addr_t base = blockAlign ? ROUNDUP(lastRangeEnd, size) : lastRangeEnd;
|
addr_t base = alignment > 0
|
||||||
|
? ROUNDUP(lastRangeEnd, alignment) : lastRangeEnd;
|
||||||
if (KERNEL_BASE + (KERNEL_SIZE - 1) - base >= size) {
|
if (KERNEL_BASE + (KERNEL_SIZE - 1) - base >= size) {
|
||||||
args->virtual_allocated_range[lastEntryIndex].size
|
args->virtual_allocated_range[lastEntryIndex].size
|
||||||
+= base + size - lastRangeEnd;
|
+= base + size - lastRangeEnd;
|
||||||
@ -3470,8 +3471,8 @@ allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
|
|||||||
addr_t rangeStart = args->virtual_allocated_range[0].start;
|
addr_t rangeStart = args->virtual_allocated_range[0].start;
|
||||||
if (rangeStart > KERNEL_BASE && rangeStart - KERNEL_BASE >= size) {
|
if (rangeStart > KERNEL_BASE && rangeStart - KERNEL_BASE >= size) {
|
||||||
base = rangeStart - size;
|
base = rangeStart - size;
|
||||||
if (blockAlign)
|
if (alignment > 0)
|
||||||
base = ROUNDDOWN(base, size);
|
base = ROUNDDOWN(base, alignment);
|
||||||
|
|
||||||
if (base >= KERNEL_BASE) {
|
if (base >= KERNEL_BASE) {
|
||||||
args->virtual_allocated_range[0].start = base;
|
args->virtual_allocated_range[0].start = base;
|
||||||
@ -3532,13 +3533,13 @@ allocate_early_physical_page(kernel_args* args)
|
|||||||
*/
|
*/
|
||||||
addr_t
|
addr_t
|
||||||
vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
|
vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
|
||||||
uint32 attributes, bool blockAlign)
|
uint32 attributes, addr_t alignment)
|
||||||
{
|
{
|
||||||
if (physicalSize > virtualSize)
|
if (physicalSize > virtualSize)
|
||||||
physicalSize = virtualSize;
|
physicalSize = virtualSize;
|
||||||
|
|
||||||
// find the vaddr to allocate at
|
// find the vaddr to allocate at
|
||||||
addr_t virtualBase = allocate_early_virtual(args, virtualSize, blockAlign);
|
addr_t virtualBase = allocate_early_virtual(args, virtualSize, alignment);
|
||||||
//dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
|
//dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
|
||||||
|
|
||||||
// map the pages
|
// map the pages
|
||||||
@ -3587,7 +3588,7 @@ vm_init(kernel_args* args)
|
|||||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||||
// map in the new heap and initialize it
|
// map in the new heap and initialize it
|
||||||
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
|
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
|
||||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
TRACE(("heap at 0x%lx\n", heapBase));
|
TRACE(("heap at 0x%lx\n", heapBase));
|
||||||
heap_init(heapBase, heapSize);
|
heap_init(heapBase, heapSize);
|
||||||
#endif
|
#endif
|
||||||
|
@ -2849,7 +2849,7 @@ vm_page_init(kernel_args *args)
|
|||||||
|
|
||||||
// map in the new free page table
|
// map in the new free page table
|
||||||
sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
|
sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
|
||||||
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||||
|
|
||||||
TRACE(("vm_init: putting free_page_table @ %p, # ents %ld (size 0x%x)\n",
|
TRACE(("vm_init: putting free_page_table @ %p, # ents %ld (size 0x%x)\n",
|
||||||
sPages, sNumPages, (unsigned int)(sNumPages * sizeof(vm_page))));
|
sPages, sNumPages, (unsigned int)(sNumPages * sizeof(vm_page))));
|
||||||
|
Loading…
Reference in New Issue
Block a user