* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses. * vm_page_allocate_page_run(): - Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not taken into account. - Takes a physical_address_restrictions instead of base/limit and also supports alignment and boundary restrictions, now. * map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/ ReserveAddressRange() take a virtual_address_restrictions parameter, now. They also support an alignment independent from the range size. * create_area_etc(), vm_create_anonymous_area(): Take {virtual,physical}_address_restrictions parameters, now. * Removed no longer needed B_PHYSICAL_BASE_ADDRESS. * DMAResources: - Fixed potential overflows of uint32 when initializing from device node attributes. - Fixed bounce buffer creation TODOs: By using create_area_etc() with the new restrictions parameters we can directly support physical high address, boundary, and alignment. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
8d2572165b
commit
a8ad734f1c
@ -85,7 +85,6 @@ typedef struct {
|
||||
|
||||
/* address specifications for mapping physical memory */
|
||||
#define B_ANY_KERNEL_BLOCK_ADDRESS (B_ANY_KERNEL_ADDRESS + 1)
|
||||
#define B_PHYSICAL_BASE_ADDRESS (B_ANY_KERNEL_ADDRESS + 2)
|
||||
|
||||
/* area protection flags for the kernel */
|
||||
#define B_KERNEL_READ_AREA 16
|
||||
|
@ -17,6 +17,9 @@
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
struct virtual_address_restrictions;
|
||||
|
||||
|
||||
struct VMAddressSpace {
|
||||
public:
|
||||
class AreaIterator;
|
||||
@ -73,9 +76,11 @@ public:
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual status_t InsertArea(VMArea* area, size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 allocationFlags, void** _address)
|
||||
= 0;
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
|
||||
@ -87,9 +92,11 @@ public:
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags) = 0;
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags, uint32 allocationFlags) = 0;
|
||||
virtual status_t ReserveAddressRange(size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 flags, uint32 allocationFlags,
|
||||
void** _address) = 0;
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size, uint32 allocationFlags) = 0;
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
|
@ -77,9 +77,11 @@ void permit_page_faults(void);
|
||||
void forbid_page_faults(void);
|
||||
|
||||
// private kernel only extension (should be moved somewhere else):
|
||||
area_id create_area_etc(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, uint32 size, uint32 lock, uint32 protection,
|
||||
phys_addr_t physicalAddress, uint32 flags);
|
||||
area_id create_area_etc(team_id team, const char *name, uint32 size,
|
||||
uint32 lock, uint32 protection, uint32 flags,
|
||||
const virtual_address_restrictions* virtualAddressRestrictions,
|
||||
const physical_address_restrictions* physicalAddressRestrictions,
|
||||
void **_address);
|
||||
area_id transfer_area(area_id id, void** _address, uint32 addressSpec,
|
||||
team_id target, bool kernel);
|
||||
|
||||
@ -87,9 +89,11 @@ status_t vm_block_address_range(const char* name, void* address, addr_t size);
|
||||
status_t vm_unreserve_address_range(team_id team, void *address, addr_t size);
|
||||
status_t vm_reserve_address_range(team_id team, void **_address,
|
||||
uint32 addressSpec, addr_t size, uint32 flags);
|
||||
area_id vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection,
|
||||
phys_addr_t physicalAddress, uint32 flags, bool kernel);
|
||||
area_id vm_create_anonymous_area(team_id team, const char* name, addr_t size,
|
||||
uint32 wiring, uint32 protection, uint32 flags,
|
||||
const virtual_address_restrictions* virtualAddressRestrictions,
|
||||
const physical_address_restrictions* physicalAddressRestrictions,
|
||||
bool kernel, void** _address);
|
||||
area_id vm_map_physical_memory(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size, uint32 protection,
|
||||
phys_addr_t physicalAddress, bool alreadyWired);
|
||||
|
@ -60,8 +60,8 @@ bool vm_page_try_reserve_pages(vm_page_reservation* reservation, uint32 count,
|
||||
|
||||
struct vm_page *vm_page_allocate_page(vm_page_reservation* reservation,
|
||||
uint32 flags);
|
||||
struct vm_page *vm_page_allocate_page_run(uint32 flags, phys_addr_t base,
|
||||
phys_addr_t limit, page_num_t length, int priority);
|
||||
struct vm_page *vm_page_allocate_page_run(uint32 flags, page_num_t length,
|
||||
const physical_address_restrictions* restrictions, int priority);
|
||||
struct vm_page *vm_page_at_index(int32 index);
|
||||
struct vm_page *vm_lookup_page(page_num_t pageNumber);
|
||||
bool vm_page_is_dummy(struct vm_page *page);
|
||||
|
@ -30,6 +30,30 @@ struct VMCacheRef;
|
||||
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
|
||||
|
||||
|
||||
struct virtual_address_restrictions {
|
||||
void* address;
|
||||
// base or exact address, depending on address_specification
|
||||
uint32 address_specification;
|
||||
// address specification as passed to create_area()
|
||||
size_t alignment;
|
||||
// address alignment; overridden when
|
||||
// address_specification == B_ANY_KERNEL_BLOCK_ADDRESS
|
||||
};
|
||||
|
||||
struct physical_address_restrictions {
|
||||
phys_addr_t low_address;
|
||||
// lowest acceptable address
|
||||
phys_addr_t high_address;
|
||||
// lowest no longer acceptable address; for ranges: the
|
||||
// highest acceptable non-inclusive end address
|
||||
phys_size_t alignment;
|
||||
// address alignment
|
||||
phys_size_t boundary;
|
||||
// multiples of which may not be crossed by the address
|
||||
// range
|
||||
};
|
||||
|
||||
|
||||
typedef struct vm_page_mapping {
|
||||
vm_page_mapping_link page_link;
|
||||
vm_page_mapping_link area_link;
|
||||
|
@ -539,8 +539,9 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
uint32 count = size / B_PAGE_SIZE;
|
||||
|
||||
if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
|
||||
physical_address_restrictions restrictions = {};
|
||||
memory->page = vm_page_allocate_page_run(
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, 0, 0, count,
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, count, &restrictions,
|
||||
VM_PRIORITY_SYSTEM);
|
||||
if (memory->page == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
@ -691,10 +691,14 @@ arch_cpu_init_post_vm(kernel_args *args)
|
||||
//i386_selector_init(gGDT); // pass the new gdt
|
||||
|
||||
// allocate an area for the double fault stacks
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
create_area_etc(B_SYSTEM_TEAM, "double fault stacks",
|
||||
(void**)&sDoubleFaultStacks, B_ANY_KERNEL_ADDRESS,
|
||||
kDoubleFaultStackSize * smp_get_num_cpus(), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&sDoubleFaultStacks);
|
||||
|
||||
X86PagingStructures* kernelPagingStructures
|
||||
= static_cast<X86VMTranslationMap*>(
|
||||
|
@ -1371,9 +1371,12 @@ arch_int_init_post_vm(struct kernel_args *args)
|
||||
if (cpuCount > 0) {
|
||||
size_t areaSize = ROUNDUP(cpuCount * idtSize, B_PAGE_SIZE);
|
||||
desc_table* idt;
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "idt", (void**)&idt,
|
||||
B_ANY_KERNEL_ADDRESS, areaSize, B_CONTIGUOUS,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "idt", areaSize, B_CONTIGUOUS,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, (void**)&idt);
|
||||
if (area < 0)
|
||||
return area;
|
||||
|
||||
|
@ -189,9 +189,13 @@ X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
|
||||
// structures
|
||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||
void* data;
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
|
||||
&data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
PAGE_ALIGN(areaSize), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, &data);
|
||||
if (dataArea < 0)
|
||||
return dataArea;
|
||||
|
||||
|
@ -484,9 +484,13 @@ X86PagingMethodPAE::PhysicalPageSlotPool::AllocatePool(
|
||||
size_t areaSize = B_PAGE_SIZE
|
||||
+ sizeof(PhysicalPageSlot[kPAEPageTableEntryCount]);
|
||||
void* data;
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
|
||||
&data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
PAGE_ALIGN(areaSize), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, &data);
|
||||
if (dataArea < 0)
|
||||
return dataArea;
|
||||
|
||||
@ -800,11 +804,15 @@ X86PagingMethodPAE::Allocate32BitPage(phys_addr_t& _physicalAddress,
|
||||
} else {
|
||||
// no pages -- allocate one
|
||||
locker.Unlock();
|
||||
page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 0, 0x100000000LL, 1,
|
||||
|
||||
physical_address_restrictions restrictions = {};
|
||||
restrictions.high_address = 0x100000000LL;
|
||||
page = vm_page_allocate_page_run(PAGE_STATE_UNUSED, 1, &restrictions,
|
||||
VM_PRIORITY_SYSTEM);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
// map the page
|
||||
|
@ -546,9 +546,14 @@ vm86_prepare(struct vm86_state *state, unsigned int ramSize)
|
||||
if (ramSize < VM86_MIN_RAM_SIZE)
|
||||
ramSize = VM86_MIN_RAM_SIZE;
|
||||
|
||||
void *address = (void *)0;
|
||||
state->ram_area = create_area_etc(team->id, "dos", &address,
|
||||
B_EXACT_ADDRESS, ramSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
|
||||
void *address;
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = NULL;
|
||||
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
state->ram_area = create_area_etc(team->id, "dos", ramSize, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
|
||||
&physicalRestrictions, &address);
|
||||
if (state->ram_area < B_OK) {
|
||||
ret = state->ram_area;
|
||||
TRACE("Could not create RAM area\n");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -291,9 +291,13 @@ debug_heap_init()
|
||||
{
|
||||
// create the heap area
|
||||
void* base;
|
||||
area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", (void**)&base,
|
||||
B_ANY_KERNEL_ADDRESS, KDEBUG_HEAP, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", KDEBUG_HEAP,
|
||||
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&base);
|
||||
if (area < 0)
|
||||
return;
|
||||
|
||||
|
@ -369,10 +369,14 @@ TracingMetaData::Create(TracingMetaData*& _metaData)
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
|
||||
(void**)&metaData->fTraceOutputBuffer, B_ANY_KERNEL_ADDRESS,
|
||||
kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&metaData->fTraceOutputBuffer);
|
||||
if (area < 0)
|
||||
return area;
|
||||
|
||||
@ -411,13 +415,18 @@ TracingMetaData::_CreateMetaDataArea(bool findPrevious, area_id& _area,
|
||||
{
|
||||
// search meta data in memory (from previous session)
|
||||
TracingMetaData* metaData;
|
||||
addr_t metaDataAddress = kMetaDataBaseAddress;
|
||||
phys_addr_t metaDataAddress = kMetaDataBaseAddress;
|
||||
for (; metaDataAddress <= kMetaDataBaseEndAddress;
|
||||
metaDataAddress += kMetaDataAddressIncrement) {
|
||||
area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing metadata",
|
||||
(void**)&metaData, B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE,
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
physicalRestrictions.low_address = metaDataAddress;
|
||||
physicalRestrictions.high_address = metaDataAddress + B_PAGE_SIZE;
|
||||
area_id create_area_etc(B_SYSTEM_TEAM, "tracing metadata", B_PAGE_SIZE,
|
||||
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
metaDataAddress, CREATE_AREA_DONT_CLEAR);
|
||||
CREATE_AREA_DONT_CLEAR, &virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&metaData);
|
||||
if (area < 0)
|
||||
continue;
|
||||
|
||||
@ -463,11 +472,17 @@ TracingMetaData::_InitPreviousTracingData()
|
||||
}
|
||||
|
||||
// re-map the previous tracing buffer
|
||||
void* buffer = fTraceOutputBuffer;
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = fTraceOutputBuffer;
|
||||
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
physicalRestrictions.low_address = fPhysicalAddress;
|
||||
physicalRestrictions.high_address = fPhysicalAddress
|
||||
+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE);
|
||||
area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
|
||||
&buffer, B_EXACT_ADDRESS, kTraceOutputBufferSize + MAX_TRACE_SIZE,
|
||||
B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
fPhysicalAddress, CREATE_AREA_DONT_CLEAR);
|
||||
kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR,
|
||||
&virtualRestrictions, &physicalRestrictions, NULL);
|
||||
if (area < 0) {
|
||||
dprintf("Failed to init tracing meta data: Mapping tracing log "
|
||||
"buffer failed: %s\n", strerror(area));
|
||||
@ -475,7 +490,7 @@ TracingMetaData::_InitPreviousTracingData()
|
||||
}
|
||||
|
||||
dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
|
||||
buffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
|
||||
fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);
|
||||
|
||||
// verify/repair the tracing entry list
|
||||
uint32 errorCount = 0;
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <kernel.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <vm/vm.h>
|
||||
|
||||
#include "IORequest.h"
|
||||
|
||||
@ -117,19 +118,19 @@ DMAResource::Init(device_node* node, generic_size_t blockSize,
|
||||
uint32 value;
|
||||
if (gDeviceManagerModule.get_attr_uint32(node,
|
||||
B_DMA_ALIGNMENT, &value, true) == B_OK)
|
||||
restrictions.alignment = value + 1;
|
||||
restrictions.alignment = (generic_size_t)value + 1;
|
||||
|
||||
if (gDeviceManagerModule.get_attr_uint32(node,
|
||||
B_DMA_BOUNDARY, &value, true) == B_OK)
|
||||
restrictions.boundary = value + 1;
|
||||
restrictions.boundary = (generic_size_t)value + 1;
|
||||
|
||||
if (gDeviceManagerModule.get_attr_uint32(node,
|
||||
B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK)
|
||||
restrictions.max_segment_size = value * blockSize;
|
||||
restrictions.max_segment_size = (generic_size_t)value * blockSize;
|
||||
|
||||
if (gDeviceManagerModule.get_attr_uint32(node,
|
||||
B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK)
|
||||
restrictions.max_transfer_size = value * blockSize;
|
||||
restrictions.max_transfer_size = (generic_size_t)value * blockSize;
|
||||
|
||||
if (gDeviceManagerModule.get_attr_uint32(node,
|
||||
B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK)
|
||||
@ -226,19 +227,16 @@ DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
|
||||
area_id area = -1;
|
||||
phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
|
||||
|
||||
if (fRestrictions.alignment > B_PAGE_SIZE) {
|
||||
dprintf("dma buffer restrictions not yet implemented: alignment %"
|
||||
B_PRIuGENADDR "\n", fRestrictions.alignment);
|
||||
}
|
||||
if (fRestrictions.boundary > B_PAGE_SIZE) {
|
||||
dprintf("dma buffer restrictions not yet implemented: boundary %"
|
||||
B_PRIuGENADDR "\n", fRestrictions.boundary);
|
||||
}
|
||||
|
||||
bounceBuffer = (void*)fRestrictions.low_address;
|
||||
// TODO: We also need to enforce the boundary restrictions.
|
||||
area = create_area("dma buffer", &bounceBuffer, B_PHYSICAL_BASE_ADDRESS,
|
||||
size, B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
physicalRestrictions.low_address = fRestrictions.low_address;
|
||||
physicalRestrictions.high_address = fRestrictions.high_address;
|
||||
physicalRestrictions.alignment = fRestrictions.alignment;
|
||||
physicalRestrictions.boundary = fRestrictions.boundary;
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &virtualRestrictions,
|
||||
&physicalRestrictions, &bounceBuffer);
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
@ -251,10 +249,7 @@ DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
|
||||
|
||||
physicalBase = entry.address;
|
||||
|
||||
if (fRestrictions.high_address < physicalBase + size) {
|
||||
delete_area(area);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
ASSERT(fRestrictions.high_address >= physicalBase + size);
|
||||
|
||||
DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
|
||||
if (buffer == NULL) {
|
||||
|
@ -1881,9 +1881,13 @@ elf_load_user_image(const char *path, struct team *team, int flags,
|
||||
snprintf(regionName, B_OS_NAME_LENGTH, "%s_bss%d", baseName, i);
|
||||
|
||||
regionAddress += fileUpperBound;
|
||||
id = create_area_etc(team->id, regionName,
|
||||
(void **)®ionAddress, B_EXACT_ADDRESS, bssSize,
|
||||
B_NO_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = regionAddress;
|
||||
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
id = create_area_etc(team->id, regionName, bssSize, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
|
||||
&physicalRestrictions, (void**)®ionAddress);
|
||||
if (id < B_OK) {
|
||||
dprintf("error allocating bss area: %s!\n", strerror(id));
|
||||
status = B_NOT_AN_EXECUTABLE;
|
||||
|
@ -645,9 +645,12 @@ port_init(kernel_args *args)
|
||||
size_t size = sizeof(struct port_entry) * sMaxPorts;
|
||||
|
||||
// create and initialize ports table
|
||||
sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", (void**)&sPorts,
|
||||
B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
sPortArea = create_area_etc(B_SYSTEM_TEAM, "port_table", size, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, (void**)&sPorts);
|
||||
if (sPortArea < 0) {
|
||||
panic("unable to allocate kernel port table!\n");
|
||||
return sPortArea;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -417,9 +417,13 @@ haiku_sem_init(kernel_args *args)
|
||||
sMaxSems <<= 1;
|
||||
|
||||
// create and initialize semaphore table
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "sem_table", (void **)&sSems,
|
||||
B_ANY_KERNEL_ADDRESS, sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
|
||||
sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, (void**)&sSems);
|
||||
if (area < 0)
|
||||
panic("unable to allocate semaphore table!\n");
|
||||
|
||||
|
@ -598,14 +598,18 @@ MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
|
||||
if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
|
||||
return B_WOULD_BLOCK;
|
||||
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification
|
||||
= (flags & CACHE_ALIGN_ON_SIZE) != 0
|
||||
? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id area = create_area_etc(VMAddressSpace::KernelID(),
|
||||
"slab large raw allocation", &_pages,
|
||||
(flags & CACHE_ALIGN_ON_SIZE) != 0
|
||||
? B_ANY_KERNEL_BLOCK_ADDRESS : B_ANY_KERNEL_ADDRESS,
|
||||
size, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
|
||||
"slab large raw allocation", size, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
|
||||
? CREATE_AREA_DONT_WAIT : 0)
|
||||
| CREATE_AREA_DONT_CLEAR);
|
||||
| CREATE_AREA_DONT_CLEAR,
|
||||
&virtualRestrictions, &physicalRestrictions, &_pages);
|
||||
return area >= 0 ? B_OK : area;
|
||||
}
|
||||
|
||||
|
@ -865,10 +865,15 @@ delete_team_struct(struct team* team)
|
||||
static status_t
|
||||
create_team_user_data(struct team* team)
|
||||
{
|
||||
void* address = (void*)KERNEL_USER_DATA_BASE;
|
||||
void* address;
|
||||
size_t size = 4 * B_PAGE_SIZE;
|
||||
team->user_data_area = create_area_etc(team->id, "user area", &address,
|
||||
B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
|
||||
virtualRestrictions.address_specification = B_BASE_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
team->user_data_area = create_area_etc(team->id, "user area", size,
|
||||
B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, &virtualRestrictions,
|
||||
&physicalRestrictions, &address);
|
||||
if (team->user_data_area < 0)
|
||||
return team->user_data_area;
|
||||
|
||||
@ -1048,9 +1053,13 @@ team_create_thread_start(void* args)
|
||||
// the exact location at the end of the user stack area
|
||||
|
||||
sprintf(userStackName, "%s_main_stack", team->name);
|
||||
thread->user_stack_area = create_area_etc(team->id, userStackName,
|
||||
(void**)&thread->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = (void*)thread->user_stack_base;
|
||||
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
thread->user_stack_area = create_area_etc(team->id, userStackName, sizeLeft,
|
||||
B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
|
||||
&virtualRestrictions, &physicalRestrictions, NULL);
|
||||
if (thread->user_stack_area < 0) {
|
||||
dprintf("team_create_thread_start: could not create default user stack "
|
||||
"region: %s\n", strerror(thread->user_stack_area));
|
||||
|
@ -554,10 +554,15 @@ create_thread(thread_creation_attributes& attributes, bool kernel)
|
||||
|
||||
snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_stack",
|
||||
attributes.name, thread->id);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = (void*)thread->user_stack_base;
|
||||
virtualRestrictions.address_specification = B_BASE_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
thread->user_stack_area = create_area_etc(team->id, stack_name,
|
||||
(void **)&thread->user_stack_base, B_BASE_ADDRESS,
|
||||
thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0, 0);
|
||||
thread->user_stack_size + TLS_SIZE, B_NO_LOCK,
|
||||
B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0,
|
||||
&virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&thread->user_stack_base);
|
||||
if (thread->user_stack_area < B_OK
|
||||
|| arch_thread_init_tls(thread) < B_OK) {
|
||||
// great, we have a fully running thread without a (usable)
|
||||
|
@ -164,17 +164,20 @@ VMKernelAddressSpace::LookupArea(addr_t address) const
|
||||
You need to hold the VMAddressSpace write lock.
|
||||
*/
|
||||
status_t
|
||||
VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* _area, uint32 allocationFlags)
|
||||
VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
|
||||
const virtual_address_restrictions* addressRestrictions,
|
||||
uint32 allocationFlags, void** _address)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
|
||||
", %p \"%s\")\n", *_address, addressSpec, size, _area, _area->name);
|
||||
", %p \"%s\")\n", addressRestrictions->address,
|
||||
addressRestrictions->address_specification, size, _area, _area->name);
|
||||
|
||||
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
|
||||
|
||||
Range* range;
|
||||
status_t error = _AllocateRange((addr_t)*_address, addressSpec, size,
|
||||
addressSpec == B_EXACT_ADDRESS, allocationFlags, range);
|
||||
status_t error = _AllocateRange(addressRestrictions, size,
|
||||
addressRestrictions->address_specification == B_EXACT_ADDRESS,
|
||||
allocationFlags, range);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -184,7 +187,8 @@ VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
area->SetBase(range->base);
|
||||
area->SetSize(range->size);
|
||||
|
||||
*_address = (void*)area->Base();
|
||||
if (_address != NULL)
|
||||
*_address = (void*)area->Base();
|
||||
fFreeSpace -= area->Size();
|
||||
|
||||
PARANOIA_CHECK_STRUCTURES();
|
||||
@ -356,11 +360,13 @@ VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
size_t size, uint32 flags, uint32 allocationFlags)
|
||||
VMKernelAddressSpace::ReserveAddressRange(size_t size,
|
||||
const virtual_address_restrictions* addressRestrictions,
|
||||
uint32 flags, uint32 allocationFlags, void** _address)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
|
||||
B_PRIxSIZE ", %#" B_PRIx32 ")\n", *_address, addressSpec, size, flags);
|
||||
B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
|
||||
addressRestrictions->address_specification, size, flags);
|
||||
|
||||
// Don't allow range reservations, if the address space is about to be
|
||||
// deleted.
|
||||
@ -368,7 +374,7 @@ VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
Range* range;
|
||||
status_t error = _AllocateRange((addr_t)*_address, addressSpec, size, false,
|
||||
status_t error = _AllocateRange(addressRestrictions, size, false,
|
||||
allocationFlags, range);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
@ -377,7 +383,8 @@ VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
range->reserved.base = range->base;
|
||||
range->reserved.flags = flags;
|
||||
|
||||
*_address = (void*)range->base;
|
||||
if (_address != NULL)
|
||||
*_address = (void*)range->base;
|
||||
|
||||
Get();
|
||||
PARANOIA_CHECK_STRUCTURES();
|
||||
@ -529,19 +536,23 @@ VMKernelAddressSpace::_RemoveRange(Range* range)
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
VMKernelAddressSpace::_AllocateRange(
|
||||
const virtual_address_restrictions* addressRestrictions,
|
||||
size_t size, bool allowReservedRange, uint32 allocationFlags,
|
||||
Range*& _range)
|
||||
{
|
||||
TRACE(" VMKernelAddressSpace::_AllocateRange(address: %#" B_PRIxADDR
|
||||
", size: %#" B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved "
|
||||
"allowed: %d)\n", address, size, addressSpec, allowReservedRange);
|
||||
TRACE(" VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
|
||||
B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
|
||||
addressRestrictions->address, size,
|
||||
addressRestrictions->address_specification, allowReservedRange);
|
||||
|
||||
// prepare size, alignment and the base address for the range search
|
||||
addr_t address = (addr_t)addressRestrictions->address;
|
||||
size = ROUNDUP(size, B_PAGE_SIZE);
|
||||
size_t alignment = B_PAGE_SIZE;
|
||||
size_t alignment = addressRestrictions->alignment != 0
|
||||
? addressRestrictions->alignment : B_PAGE_SIZE;
|
||||
|
||||
switch (addressSpec) {
|
||||
switch (addressRestrictions->address_specification) {
|
||||
case B_EXACT_ADDRESS:
|
||||
{
|
||||
if (address % B_PAGE_SIZE != 0)
|
||||
@ -574,10 +585,13 @@ VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
}
|
||||
|
||||
// find a range
|
||||
Range* range = _FindFreeRange(address, size, alignment, addressSpec,
|
||||
allowReservedRange, address);
|
||||
if (range == NULL)
|
||||
return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
|
||||
Range* range = _FindFreeRange(address, size, alignment,
|
||||
addressRestrictions->address_specification, allowReservedRange,
|
||||
address);
|
||||
if (range == NULL) {
|
||||
return addressRestrictions->address_specification == B_EXACT_ADDRESS
|
||||
? B_BAD_VALUE : B_NO_MEMORY;
|
||||
}
|
||||
|
||||
TRACE(" VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
|
||||
B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
|
||||
|
@ -27,9 +27,10 @@ public:
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(VMArea* area, size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 allocationFlags, void** _address);
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
|
||||
@ -41,9 +42,11 @@ public:
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags, uint32 allocationFlags);
|
||||
virtual status_t ReserveAddressRange(size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 flags, uint32 allocationFlags,
|
||||
void** _address);
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size, uint32 allocationFlags);
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
@ -67,9 +70,10 @@ private:
|
||||
void _InsertRange(Range* range);
|
||||
void _RemoveRange(Range* range);
|
||||
|
||||
status_t _AllocateRange(addr_t address,
|
||||
uint32 addressSpec, size_t size,
|
||||
bool allowReservedRange,
|
||||
status_t _AllocateRange(
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
size_t size, bool allowReservedRange,
|
||||
uint32 allocationFlags, Range*& _range);
|
||||
Range* _FindFreeRange(addr_t start, size_t size,
|
||||
size_t alignment, uint32 addressSpec,
|
||||
|
@ -121,22 +121,23 @@ VMUserAddressSpace::LookupArea(addr_t address) const
|
||||
You need to hold the VMAddressSpace write lock.
|
||||
*/
|
||||
status_t
|
||||
VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* _area, uint32 allocationFlags)
|
||||
VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
|
||||
const virtual_address_restrictions* addressRestrictions,
|
||||
uint32 allocationFlags, void** _address)
|
||||
{
|
||||
VMUserArea* area = static_cast<VMUserArea*>(_area);
|
||||
|
||||
addr_t searchBase, searchEnd;
|
||||
status_t status;
|
||||
|
||||
switch (addressSpec) {
|
||||
switch (addressRestrictions->address_specification) {
|
||||
case B_EXACT_ADDRESS:
|
||||
searchBase = (addr_t)*_address;
|
||||
searchEnd = (addr_t)*_address + (size - 1);
|
||||
searchBase = (addr_t)addressRestrictions->address;
|
||||
searchEnd = (addr_t)addressRestrictions->address + (size - 1);
|
||||
break;
|
||||
|
||||
case B_BASE_ADDRESS:
|
||||
searchBase = (addr_t)*_address;
|
||||
searchBase = (addr_t)addressRestrictions->address;
|
||||
searchEnd = fEndAddress;
|
||||
break;
|
||||
|
||||
@ -155,10 +156,12 @@ VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area,
|
||||
allocationFlags);
|
||||
status = _InsertAreaSlot(searchBase, size, searchEnd,
|
||||
addressRestrictions->address_specification,
|
||||
addressRestrictions->alignment, area, allocationFlags);
|
||||
if (status == B_OK) {
|
||||
*_address = (void*)area->Base();
|
||||
if (_address != NULL)
|
||||
*_address = (void*)area->Base();
|
||||
fFreeSpace -= area->Size();
|
||||
}
|
||||
|
||||
@ -276,8 +279,9 @@ VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
|
||||
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
size_t size, uint32 flags, uint32 allocationFlags)
|
||||
VMUserAddressSpace::ReserveAddressRange(size_t size,
|
||||
const virtual_address_restrictions* addressRestrictions,
|
||||
uint32 flags, uint32 allocationFlags, void** _address)
|
||||
{
|
||||
// check to see if this address space has entered DELETE state
|
||||
if (fDeleting) {
|
||||
@ -290,8 +294,8 @@ VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
if (area == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t status = InsertArea(_address, addressSpec, size, area,
|
||||
allocationFlags);
|
||||
status_t status = InsertArea(area, size, addressRestrictions,
|
||||
allocationFlags, _address);
|
||||
if (status != B_OK) {
|
||||
area->~VMUserArea();
|
||||
free_etc(area, allocationFlags);
|
||||
@ -453,7 +457,8 @@ VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
|
||||
/*! Must be called with this address space's write lock held */
|
||||
status_t
|
||||
VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
|
||||
uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
|
||||
uint32 addressSpec, size_t alignment, VMUserArea* area,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* last = NULL;
|
||||
VMUserArea* next;
|
||||
@ -480,7 +485,8 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
|
||||
// TODO: this could be further optimized.
|
||||
}
|
||||
|
||||
size_t alignment = B_PAGE_SIZE;
|
||||
if (alignment == 0)
|
||||
alignment = B_PAGE_SIZE;
|
||||
if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
|
||||
// align the memory to the next power of two of the size
|
||||
while (alignment < size)
|
||||
|
@ -25,9 +25,10 @@ public:
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(VMArea* area, size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 allocationFlags, void** _address);
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
|
||||
@ -39,9 +40,11 @@ public:
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags, uint32 allocationFlags);
|
||||
virtual status_t ReserveAddressRange(size_t size,
|
||||
const virtual_address_restrictions*
|
||||
addressRestrictions,
|
||||
uint32 flags, uint32 allocationFlags,
|
||||
void** _address);
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size, uint32 allocationFlags);
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
@ -55,7 +58,8 @@ private:
|
||||
uint32 allocationFlags);
|
||||
status_t _InsertAreaSlot(addr_t start, addr_t size,
|
||||
addr_t end, uint32 addressSpec,
|
||||
VMUserArea* area, uint32 allocationFlags);
|
||||
size_t alignment, VMUserArea* area,
|
||||
uint32 allocationFlags);
|
||||
|
||||
private:
|
||||
VMUserAreaList fAreas;
|
||||
|
@ -267,9 +267,10 @@ static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address,
|
||||
bool isWrite, bool isUser, vm_page** wirePage,
|
||||
VMAreaWiredRange* wiredRange = NULL);
|
||||
static status_t map_backing_store(VMAddressSpace* addressSpace,
|
||||
VMCache* cache, void** _virtualAddress, off_t offset, addr_t size,
|
||||
uint32 addressSpec, int wiring, int protection, int mapping,
|
||||
VMArea** _area, const char* areaName, uint32 flags, bool kernel);
|
||||
VMCache* cache, off_t offset, const char* areaName, addr_t size, int wiring,
|
||||
int protection, int mapping, uint32 flags,
|
||||
const virtual_address_restrictions* addressRestrictions, bool kernel,
|
||||
VMArea** _area, void** _virtualAddress);
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
@ -670,12 +671,14 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
// first cache to it and resize the first cache.
|
||||
|
||||
// map the second area
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = (void*)secondBase;
|
||||
addressRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
VMArea* secondArea;
|
||||
void* secondBaseAddress = (void*)secondBase;
|
||||
error = map_backing_store(addressSpace, cache, &secondBaseAddress,
|
||||
area->cache_offset + (secondBase - area->Base()), secondSize,
|
||||
B_EXACT_ADDRESS, area->wiring, area->protection, REGION_NO_PRIVATE_MAP,
|
||||
&secondArea, area->name, 0, kernel);
|
||||
error = map_backing_store(addressSpace, cache,
|
||||
area->cache_offset + (secondBase - area->Base()), area->name,
|
||||
secondSize, area->wiring, area->protection, REGION_NO_PRIVATE_MAP, 0,
|
||||
&addressRestrictions, kernel, &secondArea, NULL);
|
||||
if (error != B_OK) {
|
||||
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
|
||||
return error;
|
||||
@ -740,15 +743,16 @@ unmap_address_range(VMAddressSpace* addressSpace, addr_t address, addr_t size,
|
||||
\a size) is wired.
|
||||
*/
|
||||
static status_t
|
||||
map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec,
|
||||
int wiring, int protection, int mapping, VMArea** _area,
|
||||
const char* areaName, uint32 flags, bool kernel)
|
||||
map_backing_store(VMAddressSpace* addressSpace, VMCache* cache, off_t offset,
|
||||
const char* areaName, addr_t size, int wiring, int protection, int mapping,
|
||||
uint32 flags, const virtual_address_restrictions* addressRestrictions,
|
||||
bool kernel, VMArea** _area, void** _virtualAddress)
|
||||
{
|
||||
TRACE(("map_backing_store: aspace %p, cache %p, virtual %p, offset 0x%Lx, "
|
||||
"size %lu, addressSpec %ld, wiring %d, protection %d, area %p, areaName "
|
||||
"'%s'\n", addressSpace, cache, *_virtualAddress, offset, size,
|
||||
addressSpec, wiring, protection, _area, areaName));
|
||||
"'%s'\n", addressSpace, cache, addressRestrictions->address, offset,
|
||||
size, addressRestrictions->address_specification, wiring, protection,
|
||||
_area, areaName));
|
||||
cache->AssertLocked();
|
||||
|
||||
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
@ -808,16 +812,16 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (addressSpec == B_EXACT_ADDRESS
|
||||
if (addressRestrictions->address_specification == B_EXACT_ADDRESS
|
||||
&& (flags & CREATE_AREA_UNMAP_ADDRESS_RANGE) != 0) {
|
||||
status = unmap_address_range(addressSpace, (addr_t)*_virtualAddress,
|
||||
size, kernel);
|
||||
status = unmap_address_range(addressSpace,
|
||||
(addr_t)addressRestrictions->address, size, kernel);
|
||||
if (status != B_OK)
|
||||
goto err2;
|
||||
}
|
||||
|
||||
status = addressSpace->InsertArea(_virtualAddress, addressSpec, size, area,
|
||||
allocationFlags);
|
||||
status = addressSpace->InsertArea(area, size, addressRestrictions,
|
||||
allocationFlags, _virtualAddress);
|
||||
if (status != B_OK) {
|
||||
// TODO: wait and try again once this is working in the backend
|
||||
#if 0
|
||||
@ -995,10 +999,12 @@ vm_block_address_range(const char* name, void* address, addr_t size)
|
||||
cache->Lock();
|
||||
|
||||
VMArea* area;
|
||||
void* areaAddress = address;
|
||||
status = map_backing_store(addressSpace, cache, &areaAddress, 0, size,
|
||||
B_EXACT_ADDRESS, B_ALREADY_WIRED, 0, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
0, true);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = address;
|
||||
addressRestrictions.address_specification = B_EXACT_ADDRESS;
|
||||
status = map_backing_store(addressSpace, cache, 0, name, size,
|
||||
B_ALREADY_WIRED, B_ALREADY_WIRED, REGION_NO_PRIVATE_MAP, 0,
|
||||
&addressRestrictions, true, &area, NULL);
|
||||
if (status != B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
return status;
|
||||
@ -1035,18 +1041,23 @@ vm_reserve_address_range(team_id team, void** _address, uint32 addressSpec,
|
||||
if (!locker.IsLocked())
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *_address;
|
||||
addressRestrictions.address_specification = addressSpec;
|
||||
VMAddressSpace* addressSpace = locker.AddressSpace();
|
||||
return addressSpace->ReserveAddressRange(_address, addressSpec,
|
||||
size, flags,
|
||||
return addressSpace->ReserveAddressRange(size, &addressRestrictions, flags,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
? HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE : 0,
|
||||
_address);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection,
|
||||
phys_addr_t physicalAddress, uint32 flags, bool kernel)
|
||||
vm_create_anonymous_area(team_id team, const char *name, addr_t size,
|
||||
uint32 wiring, uint32 protection, uint32 flags,
|
||||
const virtual_address_restrictions* virtualAddressRestrictions,
|
||||
const physical_address_restrictions* physicalAddressRestrictions,
|
||||
bool kernel, void** _address)
|
||||
{
|
||||
VMArea* area;
|
||||
VMCache* cache;
|
||||
@ -1075,24 +1086,22 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
#endif
|
||||
|
||||
// check parameters
|
||||
switch (addressSpec) {
|
||||
switch (virtualAddressRestrictions->address_specification) {
|
||||
case B_ANY_ADDRESS:
|
||||
case B_EXACT_ADDRESS:
|
||||
case B_BASE_ADDRESS:
|
||||
case B_ANY_KERNEL_ADDRESS:
|
||||
case B_ANY_KERNEL_BLOCK_ADDRESS:
|
||||
break;
|
||||
case B_PHYSICAL_BASE_ADDRESS:
|
||||
physicalAddress = (addr_t)*address;
|
||||
addressSpec = B_ANY_KERNEL_ADDRESS;
|
||||
break;
|
||||
|
||||
default:
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
if (physicalAddress != 0)
|
||||
if (physicalAddressRestrictions->low_address != 0
|
||||
&& physicalAddressRestrictions->high_address != 0) {
|
||||
wiring = B_CONTIGUOUS;
|
||||
}
|
||||
|
||||
bool doReserveMemory = false;
|
||||
switch (wiring) {
|
||||
@ -1181,7 +1190,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
// we try to allocate the page run here upfront as this may easily
|
||||
// fail for obvious reasons
|
||||
page = vm_page_allocate_page_run(PAGE_STATE_WIRED | pageAllocFlags,
|
||||
physicalAddress, 0, size / B_PAGE_SIZE, priority);
|
||||
size / B_PAGE_SIZE, physicalAddressRestrictions, priority);
|
||||
if (page == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err0;
|
||||
@ -1197,10 +1206,11 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
goto err1;
|
||||
|
||||
addressSpace = locker.AddressSpace();
|
||||
} while (addressSpec == B_EXACT_ADDRESS
|
||||
} while (virtualAddressRestrictions->address_specification
|
||||
== B_EXACT_ADDRESS
|
||||
&& (flags & CREATE_AREA_UNMAP_ADDRESS_RANGE) != 0
|
||||
&& wait_if_address_range_is_wired(addressSpace, (addr_t)*address, size,
|
||||
&locker));
|
||||
&& wait_if_address_range_is_wired(addressSpace,
|
||||
(addr_t)virtualAddressRestrictions->address, size, &locker));
|
||||
|
||||
// create an anonymous cache
|
||||
// if it's a stack, make sure that two pages are available at least
|
||||
@ -1232,9 +1242,9 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
|
||||
cache->Lock();
|
||||
|
||||
status = map_backing_store(addressSpace, cache, address, 0, size,
|
||||
addressSpec, wiring, protection, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
flags, kernel);
|
||||
status = map_backing_store(addressSpace, cache, 0, name, size, wiring,
|
||||
protection, REGION_NO_PRIVATE_MAP, flags, virtualAddressRestrictions,
|
||||
kernel, &area, _address);
|
||||
|
||||
if (status != B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
@ -1432,9 +1442,12 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
|
||||
|
||||
cache->Lock();
|
||||
|
||||
status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
|
||||
REGION_NO_PRIVATE_MAP, &area, name, 0, true);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *_address;
|
||||
addressRestrictions.address_specification = addressSpec & ~B_MTR_MASK;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, 0, name, size,
|
||||
B_FULL_LOCK, protection, REGION_NO_PRIVATE_MAP, 0, &addressRestrictions,
|
||||
true, &area, _address);
|
||||
|
||||
if (status < B_OK)
|
||||
cache->ReleaseRefLocked();
|
||||
@ -1547,9 +1560,12 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
cache->Lock();
|
||||
|
||||
VMArea* area;
|
||||
result = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
|
||||
REGION_NO_PRIVATE_MAP, &area, name, 0, true);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *_address;
|
||||
addressRestrictions.address_specification = addressSpec & ~B_MTR_MASK;
|
||||
result = map_backing_store(locker.AddressSpace(), cache, 0, name,
|
||||
size, B_FULL_LOCK, protection, REGION_NO_PRIVATE_MAP, 0,
|
||||
&addressRestrictions, true, &area, _address);
|
||||
|
||||
if (result != B_OK)
|
||||
cache->ReleaseRefLocked();
|
||||
@ -1631,9 +1647,12 @@ vm_create_null_area(team_id team, const char* name, void** address,
|
||||
cache->Lock();
|
||||
|
||||
VMArea* area;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, address, 0, size,
|
||||
addressSpec, B_LAZY_LOCK, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP,
|
||||
&area, name, flags, true);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *address;
|
||||
addressRestrictions.address_specification = addressSpec;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, 0, name, size,
|
||||
B_LAZY_LOCK, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, flags,
|
||||
&addressRestrictions, true, &area, address);
|
||||
|
||||
if (status < B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
@ -1713,8 +1732,13 @@ _vm_map_file(team_id team, const char* name, void** _address,
|
||||
|
||||
if (fd < 0) {
|
||||
uint32 flags = unmapAddressRange ? CREATE_AREA_UNMAP_ADDRESS_RANGE : 0;
|
||||
return vm_create_anonymous_area(team, name, _address, addressSpec, size,
|
||||
B_NO_LOCK, protection, 0, flags, kernel);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = *_address;
|
||||
virtualRestrictions.address_specification = addressSpec;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
return vm_create_anonymous_area(team, name, size, B_NO_LOCK, protection,
|
||||
flags, &virtualRestrictions, &physicalRestrictions, kernel,
|
||||
_address);
|
||||
}
|
||||
|
||||
// get the open flags of the FD
|
||||
@ -1795,9 +1819,13 @@ _vm_map_file(team_id team, const char* name, void** _address,
|
||||
cache->Lock();
|
||||
|
||||
VMArea* area;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
offset, size, addressSpec, 0, protection, mapping, &area, name,
|
||||
unmapAddressRange ? CREATE_AREA_UNMAP_ADDRESS_RANGE : 0, kernel);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *_address;
|
||||
addressRestrictions.address_specification = addressSpec;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, offset, name, size,
|
||||
0, protection, mapping,
|
||||
unmapAddressRange ? CREATE_AREA_UNMAP_ADDRESS_RANGE : 0,
|
||||
&addressRestrictions, kernel, &area, _address);
|
||||
|
||||
if (status != B_OK || mapping == REGION_PRIVATE_MAP) {
|
||||
// map_backing_store() cannot know we no longer need the ref
|
||||
@ -1936,9 +1964,13 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
if (sourceArea->cache_type == CACHE_TYPE_NULL)
|
||||
status = B_NOT_ALLOWED;
|
||||
else {
|
||||
status = map_backing_store(targetAddressSpace, cache, address,
|
||||
sourceArea->cache_offset, sourceArea->Size(), addressSpec,
|
||||
sourceArea->wiring, protection, mapping, &newArea, name, 0, kernel);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *address;
|
||||
addressRestrictions.address_specification = addressSpec;
|
||||
status = map_backing_store(targetAddressSpace, cache,
|
||||
sourceArea->cache_offset, name, sourceArea->Size(),
|
||||
sourceArea->wiring, protection, mapping, 0, &addressRestrictions,
|
||||
kernel, &newArea, address);
|
||||
}
|
||||
if (status == B_OK && mapping != REGION_PRIVATE_MAP) {
|
||||
// If the mapping is REGION_PRIVATE_MAP, map_backing_store() needed
|
||||
@ -2227,10 +2259,14 @@ vm_copy_area(team_id team, const char* name, void** _address,
|
||||
// existing one, if this is a shared area.
|
||||
|
||||
VMArea* target;
|
||||
status = map_backing_store(targetAddressSpace, cache, _address,
|
||||
source->cache_offset, source->Size(), addressSpec, source->wiring,
|
||||
protection, sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP,
|
||||
&target, name, writableCopy ? 0 : CREATE_AREA_DONT_COMMIT_MEMORY, true);
|
||||
virtual_address_restrictions addressRestrictions = {};
|
||||
addressRestrictions.address = *_address;
|
||||
addressRestrictions.address_specification = addressSpec;
|
||||
status = map_backing_store(targetAddressSpace, cache, source->cache_offset,
|
||||
name, source->Size(), source->wiring, protection,
|
||||
sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP,
|
||||
writableCopy ? 0 : CREATE_AREA_DONT_COMMIT_MEMORY,
|
||||
&addressRestrictions, true, &target, _address);
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
@ -3659,11 +3695,14 @@ vm_init(kernel_args* args)
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
if (vm_page_num_free_pages() >= 200 * 1024 * 1024 / B_PAGE_SIZE) {
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
create_area_etc(VMAddressSpace::KernelID(), "cache info table",
|
||||
(void**)&sCacheInfoTable, B_ANY_KERNEL_ADDRESS,
|
||||
ROUNDUP(kCacheInfoTableCount * sizeof(cache_info), B_PAGE_SIZE),
|
||||
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
|
||||
CREATE_AREA_DONT_WAIT);
|
||||
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&sCacheInfoTable);
|
||||
}
|
||||
#endif // DEBUG_CACHE_LIST
|
||||
|
||||
@ -5593,14 +5632,17 @@ clone_area(const char* name, void** _address, uint32 addressSpec,
|
||||
|
||||
|
||||
area_id
|
||||
create_area_etc(team_id team, const char* name, void** address,
|
||||
uint32 addressSpec, uint32 size, uint32 lock, uint32 protection,
|
||||
phys_addr_t physicalAddress, uint32 flags)
|
||||
create_area_etc(team_id team, const char* name, uint32 size, uint32 lock,
|
||||
uint32 protection, uint32 flags,
|
||||
const virtual_address_restrictions* virtualAddressRestrictions,
|
||||
const physical_address_restrictions* physicalAddressRestrictions,
|
||||
void** _address)
|
||||
{
|
||||
fix_protection(&protection);
|
||||
|
||||
return vm_create_anonymous_area(team, (char*)name, address, addressSpec,
|
||||
size, lock, protection, physicalAddress, flags, true);
|
||||
return vm_create_anonymous_area(team, name, size, lock, protection, flags,
|
||||
virtualAddressRestrictions, physicalAddressRestrictions, true,
|
||||
_address);
|
||||
}
|
||||
|
||||
|
||||
@ -5610,8 +5652,13 @@ create_area(const char* name, void** _address, uint32 addressSpec, size_t size,
|
||||
{
|
||||
fix_protection(&protection);
|
||||
|
||||
return vm_create_anonymous_area(VMAddressSpace::KernelID(), (char*)name,
|
||||
_address, addressSpec, size, lock, protection, 0, 0, true);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = *_address;
|
||||
virtualRestrictions.address_specification = addressSpec;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
return vm_create_anonymous_area(VMAddressSpace::KernelID(), name, size,
|
||||
lock, protection, 0, &virtualRestrictions, &physicalRestrictions, true,
|
||||
_address);
|
||||
}
|
||||
|
||||
|
||||
@ -5849,9 +5896,13 @@ _user_create_area(const char* userName, void** userAddress, uint32 addressSpec,
|
||||
|
||||
fix_protection(&protection);
|
||||
|
||||
area_id area = vm_create_anonymous_area(VMAddressSpace::CurrentID(),
|
||||
(char*)name, &address, addressSpec, size, lock, protection, 0, 0,
|
||||
false);
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address = address;
|
||||
virtualRestrictions.address_specification = addressSpec;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id area = vm_create_anonymous_area(VMAddressSpace::CurrentID(), name,
|
||||
size, lock, protection, 0, &virtualRestrictions, &physicalRestrictions,
|
||||
false, &address);
|
||||
|
||||
if (area >= B_OK
|
||||
&& user_memcpy(userAddress, &address, sizeof(address)) < B_OK) {
|
||||
|
@ -3337,24 +3337,56 @@ allocate_page_run(page_num_t start, page_num_t length, uint32 flags,
|
||||
set the allocated pages to, whether the pages shall be marked busy
|
||||
(VM_PAGE_ALLOC_BUSY), and whether the pages shall be cleared
|
||||
(VM_PAGE_ALLOC_CLEAR).
|
||||
\param base The first acceptable physical address where the page run may
|
||||
start.
|
||||
\param limit The last acceptable physical address where the page run may
|
||||
end (i.e. it must hold runStartAddress + runSize <= limit). If \c 0,
|
||||
the limit is ignored.
|
||||
\param length The number of contiguous pages to allocate.
|
||||
\param restrictions Restrictions to the physical addresses of the page run
|
||||
to allocate, including \c low_address, the first acceptable physical
|
||||
address where the page run may start, \c high_address, the last
|
||||
acceptable physical address where the page run may end (i.e. it must
|
||||
hold \code runStartAddress + length <= high_address \endcode),
|
||||
\c alignment, the alignment of the page run start address, and
|
||||
\c boundary, multiples of which the page run must not cross.
|
||||
Values set to \c 0 are ignored.
|
||||
\param priority The page reservation priority (as passed to
|
||||
vm_page_reserve_pages()).
|
||||
\return The first page of the allocated page run on success; \c NULL
|
||||
when the allocation failed.
|
||||
*/
|
||||
vm_page*
|
||||
vm_page_allocate_page_run(uint32 flags, phys_addr_t base, phys_addr_t limit,
|
||||
page_num_t length, int priority)
|
||||
vm_page_allocate_page_run(uint32 flags, page_num_t length,
|
||||
const physical_address_restrictions* restrictions, int priority)
|
||||
{
|
||||
page_num_t start = base / B_PAGE_SIZE;
|
||||
page_num_t end = std::min(limit > 0 ? limit / B_PAGE_SIZE : sNumPages,
|
||||
sNumPages);
|
||||
// compute start and end page index
|
||||
page_num_t requestedStart
|
||||
= std::max(restrictions->low_address / B_PAGE_SIZE, sPhysicalPageOffset)
|
||||
- sPhysicalPageOffset;
|
||||
page_num_t start = requestedStart;
|
||||
page_num_t end;
|
||||
if (restrictions->high_address > 0) {
|
||||
end = std::max(restrictions->high_address / B_PAGE_SIZE,
|
||||
sPhysicalPageOffset)
|
||||
- sPhysicalPageOffset;
|
||||
} else
|
||||
end = sNumPages;
|
||||
|
||||
// compute alignment mask
|
||||
page_num_t alignmentMask
|
||||
= std::max(restrictions->alignment / B_PAGE_SIZE, (phys_addr_t)1) - 1;
|
||||
ASSERT(((alignmentMask + 1) & alignmentMask) == 0);
|
||||
// alignment must be a power of 2
|
||||
|
||||
// compute the boundary shift
|
||||
uint32 boundaryShift = 0;
|
||||
if (restrictions->boundary != 0) {
|
||||
page_num_t boundary = restrictions->boundary / B_PAGE_SIZE;
|
||||
// boundary must be a power of two and not less than alignment and
|
||||
// length
|
||||
ASSERT(((boundary - 1) & boundary) == 0);
|
||||
ASSERT(boundary >= alignmentMask + 1);
|
||||
ASSERT(boundary >= length);
|
||||
|
||||
while ((boundary >>= 1) > 0)
|
||||
boundaryShift++;
|
||||
}
|
||||
|
||||
vm_page_reservation reservation;
|
||||
vm_page_reserve_pages(&reservation, length, priority);
|
||||
@ -3369,12 +3401,31 @@ vm_page_allocate_page_run(uint32 flags, phys_addr_t base, phys_addr_t limit,
|
||||
int useCached = freePages > 0 && (page_num_t)freePages > 2 * length ? 0 : 1;
|
||||
|
||||
for (;;) {
|
||||
if (alignmentMask != 0 || boundaryShift != 0) {
|
||||
page_num_t offsetStart = start + sPhysicalPageOffset;
|
||||
|
||||
// enforce alignment
|
||||
if ((offsetStart & alignmentMask) != 0) {
|
||||
offsetStart = ((offsetStart + alignmentMask) & ~alignmentMask)
|
||||
- sPhysicalPageOffset;
|
||||
}
|
||||
|
||||
// enforce boundary
|
||||
if (offsetStart << boundaryShift
|
||||
!= (offsetStart + length - 1) << boundaryShift) {
|
||||
offsetStart = (offsetStart + length - 1) << boundaryShift
|
||||
>> boundaryShift;
|
||||
}
|
||||
|
||||
start = offsetStart - sPhysicalPageOffset;
|
||||
}
|
||||
|
||||
if (start + length > end) {
|
||||
if (useCached == 0) {
|
||||
// The first iteration with free pages only was unsuccessful.
|
||||
// Try again also considering cached pages.
|
||||
useCached = 1;
|
||||
start = base >> PAGE_SHIFT;
|
||||
start = requestedStart;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user