* Introduced {malloc,memalign,free}_etc() which take an additional "flags"
argument. They replace the previous special-purpose allocation functions (malloc_nogrow(), vip_io_request_malloc()). * Moved the I/O VIP heap to heap.cpp accordingly. * Added quite a bit of passing around of allocation flags in the VM, particularly in the VM*AddressSpace classes. * Fixed IOBuffer::GetNextVirtualVec(): It was ignoring the VIP flag and always allocated on the normal heap. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35316 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
94a877f0e5
commit
deee8524b7
@ -23,6 +23,12 @@
|
||||
#define HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
|
||||
|
||||
|
||||
// allocation/deallocation flags for {malloc,free}_etc()
|
||||
#define HEAP_DONT_WAIT_FOR_MEMORY 0x01
|
||||
#define HEAP_DONT_LOCK_KERNEL_SPACE 0x02
|
||||
#define HEAP_PRIORITY_VIP 0x04
|
||||
|
||||
|
||||
typedef struct heap_class_s {
|
||||
const char *name;
|
||||
uint32 initial_percentage;
|
||||
@ -41,10 +47,9 @@ typedef struct heap_allocator_s heap_allocator;
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// malloc- and memalign_nogrow disallow waiting for a grow to happen - only to
|
||||
// be used by vm functions that may deadlock on a triggered area creation.
|
||||
void* memalign_nogrow(size_t alignment, size_t size);
|
||||
void* malloc_nogrow(size_t size);
|
||||
|
||||
void* memalign_etc(size_t alignment, size_t size, uint32 flags);
|
||||
void free_etc(void* address, uint32 flags);
|
||||
|
||||
void* memalign(size_t alignment, size_t size);
|
||||
|
||||
@ -74,6 +79,13 @@ status_t heap_init_post_thread();
|
||||
#endif
|
||||
|
||||
|
||||
static inline void*
|
||||
malloc_etc(size_t size, uint32 flags)
|
||||
{
|
||||
return memalign_etc(0, size, flags);
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <new>
|
||||
@ -81,21 +93,34 @@ status_t heap_init_post_thread();
|
||||
#include <util/SinglyLinkedList.h>
|
||||
|
||||
|
||||
static const struct nogrow_t {
|
||||
} nogrow = {};
|
||||
struct malloc_flags {
|
||||
uint32 flags;
|
||||
|
||||
malloc_flags(uint32 flags)
|
||||
:
|
||||
flags(flags)
|
||||
{
|
||||
}
|
||||
|
||||
malloc_flags(const malloc_flags& other)
|
||||
:
|
||||
flags(other.flags)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
inline void*
|
||||
operator new(size_t size, const nogrow_t& nogrow) throw()
|
||||
operator new(size_t size, const malloc_flags& flags) throw()
|
||||
{
|
||||
return malloc_nogrow(size);
|
||||
return malloc_etc(size, flags.flags);
|
||||
}
|
||||
|
||||
|
||||
inline void*
|
||||
operator new[](size_t size, const nogrow_t& nogrow) throw()
|
||||
operator new[](size_t size, const malloc_flags& flags) throw()
|
||||
{
|
||||
return malloc_nogrow(size);
|
||||
return malloc_etc(size, flags.flags);
|
||||
}
|
||||
|
||||
|
||||
|
@ -8,24 +8,26 @@
|
||||
#define _SLAB_SLAB_H_
|
||||
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <OS.h>
|
||||
#include <heap.h>
|
||||
|
||||
|
||||
enum {
|
||||
/* create_object_cache_etc flags */
|
||||
CACHE_NO_DEPOT = 1 << 0,
|
||||
CACHE_UNLOCKED_PAGES = 1 << 1, // unsupported
|
||||
CACHE_LARGE_SLAB = 1 << 2,
|
||||
/* object_cache_{alloc,free}() flags */
|
||||
CACHE_DONT_WAIT_FOR_MEMORY = HEAP_DONT_WAIT_FOR_MEMORY,
|
||||
CACHE_DONT_LOCK_KERNEL_SPACE = HEAP_DONT_LOCK_KERNEL_SPACE,
|
||||
CACHE_PRIORITY_VIP = HEAP_PRIORITY_VIP,
|
||||
CACHE_ALLOC_FLAGS = CACHE_DONT_WAIT_FOR_MEMORY
|
||||
| CACHE_DONT_LOCK_KERNEL_SPACE
|
||||
| CACHE_PRIORITY_VIP,
|
||||
|
||||
/* object_cache_{alloc,free}() flags */
|
||||
CACHE_DONT_WAIT_FOR_MEMORY = 1 << 8,
|
||||
CACHE_DONT_LOCK_KERNEL_SPACE = 1 << 9,
|
||||
CACHE_PRIORITY_VIP = 1 << 10,
|
||||
/* create_object_cache_etc flags */
|
||||
CACHE_NO_DEPOT = 0x08000000,
|
||||
CACHE_UNLOCKED_PAGES = 0x10000000, // unsupported
|
||||
CACHE_LARGE_SLAB = 0x20000000,
|
||||
|
||||
/* internal */
|
||||
CACHE_ALIGN_ON_SIZE = 1 << 30,
|
||||
CACHE_DURING_BOOT = 1 << 31
|
||||
CACHE_ALIGN_ON_SIZE = 0x40000000,
|
||||
CACHE_DURING_BOOT = 0x80000000
|
||||
};
|
||||
|
||||
struct ObjectCache;
|
||||
|
@ -289,8 +289,6 @@ public:
|
||||
bool partialTransfer,
|
||||
size_t bytesTransferred) = 0;
|
||||
|
||||
void operator delete(void* address, size_t size);
|
||||
|
||||
static status_t IORequestCallback(void* data,
|
||||
io_request* request, status_t status,
|
||||
bool partialTransfer,
|
||||
|
@ -70,25 +70,31 @@ public:
|
||||
|
||||
virtual VMArea* LookupArea(addr_t address) const = 0;
|
||||
virtual VMArea* CreateArea(const char* name, uint32 wiring,
|
||||
uint32 protection) = 0;
|
||||
virtual void DeleteArea(VMArea* area) = 0;
|
||||
uint32 protection,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area) = 0;
|
||||
virtual void RemoveArea(VMArea* area) = 0;
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags) = 0;
|
||||
|
||||
virtual bool CanResizeArea(VMArea* area, size_t newSize) = 0;
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize) = 0;
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize)
|
||||
= 0;
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize)
|
||||
= 0;
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags) = 0;
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags) = 0;
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags) = 0;
|
||||
uint32 flags, uint32 allocationFlags) = 0;
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size) = 0;
|
||||
virtual void UnreserveAllAddressRanges() = 0;
|
||||
size_t size, uint32 allocationFlags) = 0;
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
uint32 allocationFlags) = 0;
|
||||
|
||||
virtual void Dump() const;
|
||||
|
||||
|
@ -53,7 +53,7 @@ protected:
|
||||
uint32 wiring, uint32 protection);
|
||||
~VMArea();
|
||||
|
||||
status_t Init(const char* name);
|
||||
status_t Init(const char* name, uint32 allocationFlags);
|
||||
|
||||
protected:
|
||||
friend class VMAddressSpace;
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
VMCache();
|
||||
virtual ~VMCache();
|
||||
|
||||
status_t Init(uint32 cacheType);
|
||||
status_t Init(uint32 cacheType, uint32 allocationFlags);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
|
@ -1369,7 +1369,7 @@ m68k_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
area = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
|
||||
B_PAGE_SIZE, CREATE_AREA_PRIORITY_VIP);
|
||||
B_PAGE_SIZE, 0);
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
|
@ -546,7 +546,7 @@ LargeMemoryPhysicalPageMapper::InitPostArea(kernel_args* args)
|
||||
temp = (void*)fInitialPool.virtualBase;
|
||||
area = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"physical page pool space", &temp, B_EXACT_ADDRESS,
|
||||
1024 * B_PAGE_SIZE, CREATE_AREA_PRIORITY_VIP);
|
||||
1024 * B_PAGE_SIZE, 0);
|
||||
if (area < B_OK) {
|
||||
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
|
||||
"create area for physical page pool space.");
|
||||
|
4
src/system/kernel/cache/vnode_store.cpp
vendored
4
src/system/kernel/cache/vnode_store.cpp
vendored
@ -17,9 +17,9 @@
|
||||
|
||||
|
||||
status_t
|
||||
VMVnodeCache::Init(struct vnode *vnode)
|
||||
VMVnodeCache::Init(struct vnode *vnode, uint32 allocationFlags)
|
||||
{
|
||||
status_t error = VMCache::Init(CACHE_TYPE_VNODE);
|
||||
status_t error = VMCache::Init(CACHE_TYPE_VNODE, allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
|
2
src/system/kernel/cache/vnode_store.h
vendored
2
src/system/kernel/cache/vnode_store.h
vendored
@ -15,7 +15,7 @@ struct file_cache_ref;
|
||||
|
||||
class VMVnodeCache : public VMCache {
|
||||
public:
|
||||
status_t Init(struct vnode *vnode);
|
||||
status_t Init(struct vnode *vnode, uint32 allocationFlags);
|
||||
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
|
@ -28,8 +28,6 @@
|
||||
#endif
|
||||
|
||||
|
||||
#define VIP_HEAP_SIZE 1024 * 1024
|
||||
|
||||
// partial I/O operation phases
|
||||
enum {
|
||||
PHASE_READ_BEGIN = 0,
|
||||
@ -37,8 +35,6 @@ enum {
|
||||
PHASE_DO_ALL = 2
|
||||
};
|
||||
|
||||
heap_allocator* sVIPHeap;
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
@ -56,13 +52,6 @@ IORequestChunk::~IORequestChunk()
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
IORequestChunk::operator delete(void* address, size_t size)
|
||||
{
|
||||
io_request_free(address);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
@ -80,7 +69,7 @@ IOBuffer::Create(uint32 count, bool vip)
|
||||
{
|
||||
size_t size = sizeof(IOBuffer) + sizeof(iovec) * (count - 1);
|
||||
IOBuffer* buffer
|
||||
= (IOBuffer*)(vip ? vip_io_request_malloc(size) : malloc(size));
|
||||
= (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
|
||||
if (buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
@ -101,10 +90,7 @@ IOBuffer::Delete()
|
||||
if (this == NULL)
|
||||
return;
|
||||
|
||||
if (fVIP)
|
||||
vip_io_request_free(this);
|
||||
else
|
||||
free(this);
|
||||
free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
|
||||
}
|
||||
|
||||
|
||||
@ -130,7 +116,8 @@ IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
|
||||
{
|
||||
virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
|
||||
if (cookie == NULL) {
|
||||
cookie = new(std::nothrow) virtual_vec_cookie;
|
||||
cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
|
||||
virtual_vec_cookie;
|
||||
if (cookie == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -207,7 +194,7 @@ IOBuffer::FreeVirtualVecCookie(void* _cookie)
|
||||
if (cookie->mapped_area >= 0)
|
||||
delete_area(cookie->mapped_area);
|
||||
|
||||
delete cookie;
|
||||
free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
|
||||
}
|
||||
|
||||
|
||||
@ -715,7 +702,9 @@ IORequest::~IORequest()
|
||||
/* static */ IORequest*
|
||||
IORequest::Create(bool vip)
|
||||
{
|
||||
return vip ? new(vip_io_alloc) IORequest : new(std::nothrow) IORequest;
|
||||
return vip
|
||||
? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
|
||||
: new(std::nothrow) IORequest;
|
||||
}
|
||||
|
||||
|
||||
@ -1309,106 +1298,3 @@ IORequest::Dump() const
|
||||
set_debug_variable("_buffer", (addr_t)fBuffer);
|
||||
set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - allocator
|
||||
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
static addr_t
|
||||
get_caller()
|
||||
{
|
||||
// Find the first return address outside of the allocator code. Note, that
|
||||
// this makes certain assumptions about how the code for the functions
|
||||
// ends up in the kernel object.
|
||||
addr_t returnAddresses[5];
|
||||
int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1,
|
||||
STACK_TRACE_KERNEL | STACK_TRACE_USER);
|
||||
|
||||
// find the first return address inside the VIP allocator
|
||||
int32 i = 0;
|
||||
for (i = 0; i < depth; i++) {
|
||||
if (returnAddresses[i] >= (addr_t)&get_caller
|
||||
&& returnAddresses[i] < (addr_t)&vip_io_request_allocator_init) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// now continue until we have the first one outside
|
||||
for (; i < depth; i++) {
|
||||
if (returnAddresses[i] < (addr_t)&get_caller
|
||||
|| returnAddresses[i] > (addr_t)&vip_io_request_allocator_init) {
|
||||
return returnAddresses[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void*
|
||||
vip_io_request_malloc(size_t size)
|
||||
{
|
||||
void* address = heap_memalign(sVIPHeap, 0, size);
|
||||
#if KDEBUG
|
||||
if (address == NULL)
|
||||
panic("vip_io_request_malloc(): VIP heap %p out of memory", sVIPHeap);
|
||||
#endif
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vip_io_request_free(void* address)
|
||||
{
|
||||
heap_free(sVIPHeap, address);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
io_request_free(void* address)
|
||||
{
|
||||
if (heap_free(sVIPHeap, address) != B_OK)
|
||||
free(address);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vip_io_request_allocator_init()
|
||||
{
|
||||
static const heap_class heapClass = {
|
||||
"VIP I/O", /* name */
|
||||
100, /* initial percentage */
|
||||
B_PAGE_SIZE / 8, /* max allocation size */
|
||||
B_PAGE_SIZE, /* page size */
|
||||
8, /* min bin size */
|
||||
4, /* bin alignment */
|
||||
8, /* min count per page */
|
||||
16 /* max waste per page */
|
||||
};
|
||||
|
||||
void* address = NULL;
|
||||
area_id area = create_area("VIP I/O heap", &address, B_ANY_KERNEL_ADDRESS,
|
||||
VIP_HEAP_SIZE, B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (area < B_OK) {
|
||||
panic("vip_io_request_allocator_init(): couldn't allocate VIP I/O "
|
||||
"heap area");
|
||||
return;
|
||||
}
|
||||
|
||||
sVIPHeap = heap_create_allocator("VIP I/O heap", (addr_t)address,
|
||||
VIP_HEAP_SIZE, &heapClass, false);
|
||||
if (sVIPHeap == NULL) {
|
||||
panic("vip_io_request_allocator_init(): failed to create VIP I/O "
|
||||
"heap\n");
|
||||
return;
|
||||
}
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
heap_set_get_caller(sVIPHeap, &get_caller);
|
||||
#endif
|
||||
|
||||
dprintf("vip_io_request_allocator_init(): created VIP I/O heap: %p\n",
|
||||
sVIPHeap);
|
||||
}
|
||||
|
@ -102,8 +102,6 @@ public:
|
||||
DoublyLinkedListLink<IORequestChunk>*
|
||||
ListLink() { return &fListLink; }
|
||||
|
||||
void operator delete(void* address, size_t size);
|
||||
|
||||
protected:
|
||||
void SetStatus(status_t status)
|
||||
{ fStatus = status; }
|
||||
@ -352,33 +350,4 @@ private:
|
||||
typedef DoublyLinkedList<IORequest> IORequestList;
|
||||
|
||||
|
||||
// allocator for VIP I/O request memory
|
||||
void* vip_io_request_malloc(size_t size);
|
||||
void vip_io_request_free(void* address);
|
||||
|
||||
void io_request_free(void* address);
|
||||
// frees regardless of whether allocated with vip_io_request_malloc() or
|
||||
// malloc()
|
||||
|
||||
void vip_io_request_allocator_init();
|
||||
|
||||
|
||||
static const struct vip_io_alloc_t {
|
||||
} vip_io_alloc = {};
|
||||
|
||||
|
||||
inline void*
|
||||
operator new(size_t size, const vip_io_alloc_t& vip_io_alloc) throw ()
|
||||
{
|
||||
return vip_io_request_malloc(size);
|
||||
}
|
||||
|
||||
|
||||
inline void*
|
||||
operator new[](size_t size, const vip_io_alloc_t& vip_io_alloc) throw ()
|
||||
{
|
||||
return vip_io_request_malloc(size);
|
||||
}
|
||||
|
||||
|
||||
#endif // IO_REQUEST_H
|
||||
|
@ -2257,7 +2257,6 @@ device_manager_init(struct kernel_args* args)
|
||||
{
|
||||
TRACE(("device manager init\n"));
|
||||
|
||||
vip_io_request_allocator_init();
|
||||
IOSchedulerRoster::Init();
|
||||
|
||||
dm_init_id_generator();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -14,6 +14,9 @@
|
||||
#endif
|
||||
|
||||
|
||||
#include <heap.h>
|
||||
|
||||
|
||||
// #pragma mark - AsyncIOCallback
|
||||
|
||||
|
||||
@ -22,13 +25,6 @@ AsyncIOCallback::~AsyncIOCallback()
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
AsyncIOCallback::operator delete(void* address, size_t size)
|
||||
{
|
||||
io_request_free(address);
|
||||
}
|
||||
|
||||
|
||||
/* static */ status_t
|
||||
AsyncIOCallback::IORequestCallback(void* data, io_request* request,
|
||||
status_t status, bool partialTransfer, size_t transferEndOffset)
|
||||
@ -61,9 +57,6 @@ struct iterative_io_cookie {
|
||||
off_t request_offset;
|
||||
io_request_finished_callback next_finished_callback;
|
||||
void* next_finished_cookie;
|
||||
|
||||
void operator delete(void* address, size_t size)
|
||||
{ io_request_free(address); }
|
||||
};
|
||||
|
||||
|
||||
@ -484,7 +477,7 @@ do_iterative_fd_io(int fd, io_request* request, iterative_io_get_vecs getVecs,
|
||||
|
||||
iterative_io_cookie* iterationCookie
|
||||
= (request->Flags() & B_VIP_IO_REQUEST) != 0
|
||||
? new(vip_io_alloc) iterative_io_cookie
|
||||
? new(malloc_flags(HEAP_PRIORITY_VIP)) iterative_io_cookie
|
||||
: new(std::nothrow) iterative_io_cookie;
|
||||
if (iterationCookie == NULL) {
|
||||
// no memory -- fall back to synchronous I/O
|
||||
|
@ -142,6 +142,10 @@ typedef SinglyLinkedList<DeferredFreeListEntry> DeferredFreeList;
|
||||
typedef SinglyLinkedList<DeferredDeletable> DeferredDeletableList;
|
||||
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
#define VIP_HEAP_SIZE 1024 * 1024
|
||||
|
||||
// Heap class configuration
|
||||
#define HEAP_CLASS_COUNT 3
|
||||
static heap_class sHeapClasses[HEAP_CLASS_COUNT] = {
|
||||
@ -183,11 +187,15 @@ static heap_allocator *sHeaps[HEAP_CLASS_COUNT * B_MAX_CPU_COUNT];
|
||||
static uint32 *sLastGrowRequest[HEAP_CLASS_COUNT * B_MAX_CPU_COUNT];
|
||||
static uint32 *sLastHandledGrowRequest[HEAP_CLASS_COUNT * B_MAX_CPU_COUNT];
|
||||
|
||||
static heap_allocator *sVIPHeap;
|
||||
static heap_allocator *sGrowHeap = NULL;
|
||||
static thread_id sHeapGrowThread = -1;
|
||||
static sem_id sHeapGrowSem = -1;
|
||||
static sem_id sHeapGrownNotify = -1;
|
||||
static bool sAddGrowHeap = false;
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
static DeferredFreeList sDeferredFreeList;
|
||||
static DeferredDeletableList sDeferredDeletableList;
|
||||
static spinlock sDeferredFreeListLock;
|
||||
@ -271,6 +279,9 @@ class Free : public AbstractTraceEntry {
|
||||
// #pragma mark - Debug functions
|
||||
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
static addr_t
|
||||
get_caller()
|
||||
@ -809,6 +820,8 @@ dump_allocations_per_caller(int argc, char **argv)
|
||||
|
||||
#endif // KERNEL_HEAP_LEAK_CHECK
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
#if PARANOID_HEAP_VALIDATION
|
||||
static void
|
||||
@ -1797,9 +1810,6 @@ heap_realloc(heap_allocator *heap, void *address, void **newAddress,
|
||||
}
|
||||
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
inline uint32
|
||||
heap_index_for(size_t size, int32 cpu)
|
||||
{
|
||||
@ -1818,35 +1828,47 @@ heap_index_for(size_t size, int32 cpu)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
deferred_deleter(void *arg, int iteration)
|
||||
static void *
|
||||
memalign_nogrow(size_t alignment, size_t size)
|
||||
{
|
||||
// move entries and deletables to on-stack lists
|
||||
InterruptsSpinLocker locker(sDeferredFreeListLock);
|
||||
if (sDeferredFreeList.IsEmpty() && sDeferredDeletableList.IsEmpty())
|
||||
return;
|
||||
// use dedicated memory in the grow thread by default
|
||||
if (thread_get_current_thread_id() == sHeapGrowThread) {
|
||||
void *result = heap_memalign(sGrowHeap, alignment, size);
|
||||
if (!sAddGrowHeap && heap_should_grow(sGrowHeap)) {
|
||||
// hopefully the heap grower will manage to create a new heap
|
||||
// before running out of private memory...
|
||||
dprintf("heap: requesting new grow heap\n");
|
||||
sAddGrowHeap = true;
|
||||
release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
|
||||
DeferredFreeList entries;
|
||||
entries.MoveFrom(&sDeferredFreeList);
|
||||
if (result != NULL)
|
||||
return result;
|
||||
}
|
||||
|
||||
DeferredDeletableList deletables;
|
||||
deletables.MoveFrom(&sDeferredDeletableList);
|
||||
// try public memory, there might be something available
|
||||
void *result = NULL;
|
||||
int32 cpuCount = MIN(smp_get_num_cpus(),
|
||||
(int32)sHeapCount / HEAP_CLASS_COUNT);
|
||||
int32 cpuNumber = smp_get_current_cpu();
|
||||
for (int32 i = 0; i < cpuCount; i++) {
|
||||
uint32 heapIndex = heap_index_for(size, cpuNumber++ % cpuCount);
|
||||
heap_allocator *heap = sHeaps[heapIndex];
|
||||
result = heap_memalign(heap, alignment, size);
|
||||
if (result != NULL)
|
||||
return result;
|
||||
}
|
||||
|
||||
locker.Unlock();
|
||||
// no memory available
|
||||
if (thread_get_current_thread_id() == sHeapGrowThread)
|
||||
panic("heap: all heaps have run out of memory while growing\n");
|
||||
else
|
||||
dprintf("heap: all heaps have run out of memory\n");
|
||||
|
||||
// free the entries
|
||||
while (DeferredFreeListEntry* entry = entries.RemoveHead())
|
||||
free(entry);
|
||||
|
||||
// delete the deletables
|
||||
while (DeferredDeletable* deletable = deletables.RemoveHead())
|
||||
delete deletable;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
static status_t
|
||||
heap_create_new_heap_area(heap_allocator *heap, const char *name, size_t size)
|
||||
{
|
||||
@ -1907,6 +1929,41 @@ heap_grow_thread(void *)
|
||||
}
|
||||
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
static void
|
||||
deferred_deleter(void *arg, int iteration)
|
||||
{
|
||||
// move entries and deletables to on-stack lists
|
||||
InterruptsSpinLocker locker(sDeferredFreeListLock);
|
||||
if (sDeferredFreeList.IsEmpty() && sDeferredDeletableList.IsEmpty())
|
||||
return;
|
||||
|
||||
DeferredFreeList entries;
|
||||
entries.MoveFrom(&sDeferredFreeList);
|
||||
|
||||
DeferredDeletableList deletables;
|
||||
deletables.MoveFrom(&sDeferredDeletableList);
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
// free the entries
|
||||
while (DeferredFreeListEntry* entry = entries.RemoveHead())
|
||||
free(entry);
|
||||
|
||||
// delete the deletables
|
||||
while (DeferredDeletable* deletable = deletables.RemoveHead())
|
||||
delete deletable;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
status_t
|
||||
heap_init(addr_t base, size_t size)
|
||||
{
|
||||
@ -1988,9 +2045,13 @@ heap_init_post_sem()
|
||||
}
|
||||
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
status_t
|
||||
heap_init_post_thread()
|
||||
{
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
void *address = NULL;
|
||||
area_id growHeapArea = create_area("dedicated grow heap", &address,
|
||||
B_ANY_KERNEL_BLOCK_ADDRESS, HEAP_DEDICATED_GROW_SIZE, B_FULL_LOCK,
|
||||
@ -2039,11 +2100,43 @@ heap_init_post_thread()
|
||||
}
|
||||
}
|
||||
|
||||
// create the VIP heap
|
||||
static const heap_class heapClass = {
|
||||
"VIP I/O", /* name */
|
||||
100, /* initial percentage */
|
||||
B_PAGE_SIZE / 8, /* max allocation size */
|
||||
B_PAGE_SIZE, /* page size */
|
||||
8, /* min bin size */
|
||||
4, /* bin alignment */
|
||||
8, /* min count per page */
|
||||
16 /* max waste per page */
|
||||
};
|
||||
|
||||
area_id vipHeapArea = create_area("VIP heap", &address,
|
||||
B_ANY_KERNEL_ADDRESS, VIP_HEAP_SIZE, B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (vipHeapArea < 0) {
|
||||
panic("heap_init_post_thread(): couldn't allocate VIP heap area");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
sVIPHeap = heap_create_allocator("VIP heap", (addr_t)address,
|
||||
VIP_HEAP_SIZE, &heapClass, false);
|
||||
if (sVIPHeap == NULL) {
|
||||
panic("heap_init_post_thread(): failed to create VIP heap\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
dprintf("heap_init_post_thread(): created VIP heap: %p\n", sVIPHeap);
|
||||
|
||||
send_signal_etc(sHeapGrowThread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
// run the deferred deleter roughly once a second
|
||||
if (register_kernel_daemon(deferred_deleter, NULL, 10) != B_OK)
|
||||
panic("heap_init_post_thread(): failed to init deferred deleter");
|
||||
|
||||
send_signal_etc(sHeapGrowThread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -2148,50 +2241,27 @@ memalign(size_t alignment, size_t size)
|
||||
|
||||
|
||||
void *
|
||||
memalign_nogrow(size_t alignment, size_t size)
|
||||
memalign_etc(size_t alignment, size_t size, uint32 flags)
|
||||
{
|
||||
// use dedicated memory in the grow thread by default
|
||||
if (thread_get_current_thread_id() == sHeapGrowThread) {
|
||||
void *result = heap_memalign(sGrowHeap, alignment, size);
|
||||
if (!sAddGrowHeap && heap_should_grow(sGrowHeap)) {
|
||||
// hopefully the heap grower will manage to create a new heap
|
||||
// before running out of private memory...
|
||||
dprintf("heap: requesting new grow heap\n");
|
||||
sAddGrowHeap = true;
|
||||
release_sem_etc(sHeapGrowSem, 1, B_DO_NOT_RESCHEDULE);
|
||||
}
|
||||
if ((flags & HEAP_PRIORITY_VIP) != 0)
|
||||
return heap_memalign(sVIPHeap, alignment, size);
|
||||
|
||||
if (result != NULL)
|
||||
return result;
|
||||
if ((flags & (HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE))
|
||||
!= 0) {
|
||||
return memalign_nogrow(alignment, size);
|
||||
}
|
||||
|
||||
// try public memory, there might be something available
|
||||
void *result = NULL;
|
||||
int32 cpuCount = MIN(smp_get_num_cpus(),
|
||||
(int32)sHeapCount / HEAP_CLASS_COUNT);
|
||||
int32 cpuNumber = smp_get_current_cpu();
|
||||
for (int32 i = 0; i < cpuCount; i++) {
|
||||
uint32 heapIndex = heap_index_for(size, cpuNumber++ % cpuCount);
|
||||
heap_allocator *heap = sHeaps[heapIndex];
|
||||
result = heap_memalign(heap, alignment, size);
|
||||
if (result != NULL)
|
||||
return result;
|
||||
}
|
||||
|
||||
// no memory available
|
||||
if (thread_get_current_thread_id() == sHeapGrowThread)
|
||||
panic("heap: all heaps have run out of memory while growing\n");
|
||||
else
|
||||
dprintf("heap: all heaps have run out of memory\n");
|
||||
|
||||
return NULL;
|
||||
return memalign(alignment, size);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
malloc_nogrow(size_t size)
|
||||
void
|
||||
free_etc(void *address, uint32 flags)
|
||||
{
|
||||
return memalign_nogrow(0, size);
|
||||
if ((flags & HEAP_PRIORITY_VIP) != 0)
|
||||
heap_free(sVIPHeap, address);
|
||||
else
|
||||
free(address);
|
||||
}
|
||||
|
||||
|
||||
@ -2225,6 +2295,10 @@ free(void *address)
|
||||
if (heap_free(sGrowHeap, address) == B_OK)
|
||||
return;
|
||||
|
||||
// or maybe it was allocated from the VIP heap
|
||||
if (heap_free(sVIPHeap, address) == B_OK)
|
||||
return;
|
||||
|
||||
// or maybe it was a huge allocation using an area
|
||||
area_info areaInfo;
|
||||
area_id area = area_for(address);
|
||||
|
@ -213,19 +213,17 @@ memalign(size_t alignment, size_t size)
|
||||
}
|
||||
|
||||
|
||||
void*
|
||||
memalign_nogrow(size_t alignment, size_t size)
|
||||
void *
|
||||
memalign_etc(size_t alignment, size_t size, uint32 flags)
|
||||
{
|
||||
return block_alloc(size, alignment,
|
||||
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
|
||||
return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
|
||||
}
|
||||
|
||||
|
||||
void*
|
||||
malloc_nogrow(size_t size)
|
||||
void
|
||||
free_etc(void *address, uint32 flags)
|
||||
{
|
||||
return block_alloc(size, 0,
|
||||
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
|
||||
block_free(address, flags & CACHE_ALLOC_FLAGS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -192,8 +192,10 @@ VMAddressSpace::Create(team_id teamID, addr_t base, size_t size, bool kernel,
|
||||
VMAddressSpace** _addressSpace)
|
||||
{
|
||||
VMAddressSpace* addressSpace = kernel
|
||||
? (VMAddressSpace*)new(nogrow) VMKernelAddressSpace(teamID, base, size)
|
||||
: (VMAddressSpace*)new(nogrow) VMUserAddressSpace(teamID, base, size);
|
||||
? (VMAddressSpace*)new(std::nothrow) VMKernelAddressSpace(teamID, base,
|
||||
size)
|
||||
: (VMAddressSpace*)new(std::nothrow) VMUserAddressSpace(teamID, base,
|
||||
size);
|
||||
if (addressSpace == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
|
@ -409,11 +409,6 @@ public:
|
||||
delete this;
|
||||
}
|
||||
|
||||
void operator delete(void* address, size_t size)
|
||||
{
|
||||
io_request_free(address);
|
||||
}
|
||||
|
||||
private:
|
||||
VMAnonymousCache* fCache;
|
||||
page_num_t fPageIndex;
|
||||
@ -447,13 +442,13 @@ VMAnonymousCache::~VMAnonymousCache()
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages)
|
||||
int32 numGuardPages, uint32 allocationFlags)
|
||||
{
|
||||
TRACE("%p->VMAnonymousCache::Init(canOvercommit = %s, "
|
||||
"numPrecommittedPages = %ld, numGuardPages = %ld)\n", this,
|
||||
canOvercommit ? "yes" : "no", numPrecommittedPages, numGuardPages);
|
||||
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM);
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -647,7 +642,7 @@ VMAnonymousCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
|
||||
|
||||
// create our callback
|
||||
WriteCallback* callback = (flags & B_VIP_IO_REQUEST) != 0
|
||||
? new(vip_io_alloc) WriteCallback(this, _callback)
|
||||
? new(malloc_flags(HEAP_PRIORITY_VIP)) WriteCallback(this, _callback)
|
||||
: new(std::nothrow) WriteCallback(this, _callback);
|
||||
if (callback == NULL) {
|
||||
if (newSlot) {
|
||||
|
@ -35,7 +35,8 @@ public:
|
||||
|
||||
status_t Init(bool canOvercommit,
|
||||
int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
int32 numGuardPages,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual status_t Commit(off_t size, int priority);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
@ -39,12 +39,12 @@ VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages)
|
||||
int32 numGuardPages, uint32 allocationFlags)
|
||||
{
|
||||
TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
|
||||
"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
|
||||
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM);
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM, allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
|
@ -18,7 +18,7 @@ public:
|
||||
virtual ~VMAnonymousNoSwapCache();
|
||||
|
||||
status_t Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
int32 numGuardPages, uint32 allocationFlags);
|
||||
|
||||
virtual status_t Commit(off_t size, int priority);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
@ -47,13 +47,17 @@ VMArea::VMArea(VMAddressSpace* addressSpace, uint32 wiring, uint32 protection)
|
||||
|
||||
VMArea::~VMArea()
|
||||
{
|
||||
free(page_protections);
|
||||
free(name);
|
||||
const uint32 flags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
// TODO: This might be stricter than necessary.
|
||||
|
||||
free_etc(page_protections, flags);
|
||||
free_etc(name, flags);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMArea::Init(const char* name)
|
||||
VMArea::Init(const char* name, uint32 allocationFlags)
|
||||
{
|
||||
// restrict the area name to B_OS_NAME_LENGTH
|
||||
size_t length = strlen(name) + 1;
|
||||
@ -61,7 +65,7 @@ VMArea::Init(const char* name)
|
||||
length = B_OS_NAME_LENGTH;
|
||||
|
||||
// clone the name
|
||||
this->name = (char*)malloc_nogrow(length);
|
||||
this->name = (char*)malloc_etc(length, allocationFlags);
|
||||
if (this->name == NULL)
|
||||
return B_NO_MEMORY;
|
||||
strlcpy(this->name, name, length);
|
||||
|
@ -577,7 +577,7 @@ VMCache::~VMCache()
|
||||
|
||||
|
||||
status_t
|
||||
VMCache::Init(uint32 cacheType)
|
||||
VMCache::Init(uint32 cacheType, uint32 allocationFlags)
|
||||
{
|
||||
mutex_init(&fLock, "VMCache");
|
||||
VMCache dummyCache;
|
||||
@ -600,7 +600,7 @@ VMCache::Init(uint32 cacheType)
|
||||
// initialize in case the following fails
|
||||
#endif
|
||||
|
||||
fCacheRef = new(nogrow) VMCacheRef(this);
|
||||
fCacheRef = new(malloc_flags(allocationFlags)) VMCacheRef(this);
|
||||
if (fCacheRef == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -1346,15 +1346,20 @@ VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
|
||||
int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
|
||||
int priority)
|
||||
{
|
||||
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
if (priority >= VM_PRIORITY_VIP)
|
||||
allocationFlags |= HEAP_PRIORITY_VIP;
|
||||
|
||||
#if ENABLE_SWAP_SUPPORT
|
||||
if (swappable) {
|
||||
// TODO: Respect priority!
|
||||
VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
|
||||
VMAnonymousCache* cache
|
||||
= new(malloc_flags(allocationFlags)) VMAnonymousCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
|
||||
numGuardPages);
|
||||
numGuardPages, allocationFlags);
|
||||
if (error != B_OK) {
|
||||
cache->Delete();
|
||||
return error;
|
||||
@ -1367,12 +1372,13 @@ VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
|
||||
}
|
||||
#endif
|
||||
|
||||
VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
|
||||
VMAnonymousNoSwapCache* cache
|
||||
= new(malloc_flags(allocationFlags)) VMAnonymousNoSwapCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
|
||||
numGuardPages);
|
||||
numGuardPages, allocationFlags);
|
||||
if (error != B_OK) {
|
||||
cache->Delete();
|
||||
return error;
|
||||
@ -1388,11 +1394,15 @@ VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
|
||||
/*static*/ status_t
|
||||
VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
|
||||
{
|
||||
VMVnodeCache* cache = new(nogrow) VMVnodeCache;
|
||||
const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
// Note: Vnode cache creation is never VIP.
|
||||
|
||||
VMVnodeCache* cache = new(malloc_flags(allocationFlags)) VMVnodeCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = cache->Init(vnode);
|
||||
status_t error = cache->Init(vnode, allocationFlags);
|
||||
if (error != B_OK) {
|
||||
cache->Delete();
|
||||
return error;
|
||||
@ -1408,11 +1418,15 @@ VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
|
||||
/*static*/ status_t
|
||||
VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
|
||||
{
|
||||
VMDeviceCache* cache = new(nogrow) VMDeviceCache;
|
||||
const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
// Note: Device cache creation is never VIP.
|
||||
|
||||
VMDeviceCache* cache = new(malloc_flags(allocationFlags)) VMDeviceCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = cache->Init(baseAddress);
|
||||
status_t error = cache->Init(baseAddress, allocationFlags);
|
||||
if (error != B_OK) {
|
||||
cache->Delete();
|
||||
return error;
|
||||
@ -1428,12 +1442,16 @@ VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
|
||||
/*static*/ status_t
|
||||
VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
|
||||
{
|
||||
// TODO: Respect priority!
|
||||
VMNullCache* cache = new(nogrow) VMNullCache;
|
||||
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
if (priority >= VM_PRIORITY_VIP)
|
||||
allocationFlags |= HEAP_PRIORITY_VIP;
|
||||
|
||||
VMNullCache* cache = new(malloc_flags(allocationFlags)) VMNullCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = cache->Init();
|
||||
status_t error = cache->Init(allocationFlags);
|
||||
if (error != B_OK) {
|
||||
cache->Delete();
|
||||
return error;
|
||||
|
@ -10,10 +10,10 @@
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Init(addr_t baseAddress)
|
||||
VMDeviceCache::Init(addr_t baseAddress, uint32 allocationFlags)
|
||||
{
|
||||
fBaseAddress = baseAddress;
|
||||
return VMCache::Init(CACHE_TYPE_DEVICE);
|
||||
return VMCache::Init(CACHE_TYPE_DEVICE, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
class VMDeviceCache : public VMCache {
|
||||
public:
|
||||
status_t Init(addr_t baseAddress);
|
||||
status_t Init(addr_t baseAddress, uint32 allocationFlags);
|
||||
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -86,11 +86,11 @@ VMKernelAddressSpace::InitObject()
|
||||
// create the free lists
|
||||
size_t size = fEndAddress - fBase + 1;
|
||||
fFreeListCount = ld(size) - PAGE_SHIFT + 1;
|
||||
fFreeLists = new(nogrow) RangeFreeList[fFreeListCount];
|
||||
fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
|
||||
if (fFreeLists == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
Range* range = new(nogrow) Range(fBase, size, Range::RANGE_FREE);
|
||||
Range* range = new(std::nothrow) Range(fBase, size, Range::RANGE_FREE);
|
||||
if (range == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -127,18 +127,21 @@ VMKernelAddressSpace::NextArea(VMArea* _area) const
|
||||
|
||||
VMArea*
|
||||
VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
|
||||
uint32 protection)
|
||||
uint32 protection, uint32 allocationFlags)
|
||||
{
|
||||
return VMKernelArea::Create(this, name, wiring, protection);
|
||||
return VMKernelArea::Create(this, name, wiring, protection,
|
||||
allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMKernelAddressSpace::DeleteArea(VMArea* area)
|
||||
VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", area);
|
||||
|
||||
delete static_cast<VMKernelArea*>(area);
|
||||
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
|
||||
area->~VMKernelArea();
|
||||
free_etc(area, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
@ -162,7 +165,7 @@ VMKernelAddressSpace::LookupArea(addr_t address) const
|
||||
*/
|
||||
status_t
|
||||
VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* _area)
|
||||
size_t size, VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
|
||||
", %p \"%s\")\n", *_address, addressSpec, size, _area, _area->name);
|
||||
@ -171,7 +174,7 @@ VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
|
||||
Range* range;
|
||||
status_t error = _AllocateRange((addr_t)*_address, addressSpec, size,
|
||||
addressSpec == B_EXACT_ADDRESS, range);
|
||||
addressSpec == B_EXACT_ADDRESS, allocationFlags, range);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -192,13 +195,13 @@ VMKernelAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
|
||||
//! You must hold the address space's write lock.
|
||||
void
|
||||
VMKernelAddressSpace::RemoveArea(VMArea* _area)
|
||||
VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
|
||||
|
||||
VMKernelArea* area = static_cast<VMKernelArea*>(_area);
|
||||
|
||||
_FreeRange(area->Range());
|
||||
_FreeRange(area->Range(), allocationFlags);
|
||||
|
||||
fFreeSpace += area->Size();
|
||||
|
||||
@ -230,7 +233,8 @@ VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
|
||||
newSize);
|
||||
@ -253,8 +257,9 @@ VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
} else {
|
||||
// no free range following -- we need to allocate a new one and
|
||||
// insert it
|
||||
nextRange = new(nogrow) Range(range->base + newSize,
|
||||
range->size - newSize, Range::RANGE_FREE);
|
||||
nextRange = new(malloc_flags(allocationFlags)) Range(
|
||||
range->base + newSize, range->size - newSize,
|
||||
Range::RANGE_FREE);
|
||||
if (nextRange == NULL)
|
||||
return B_NO_MEMORY;
|
||||
_InsertRange(nextRange);
|
||||
@ -274,7 +279,7 @@ VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
if (sizeDiff == nextRange->size) {
|
||||
// The next range is completely covered -- remove and delete it.
|
||||
_RemoveRange(nextRange);
|
||||
delete nextRange;
|
||||
free_etc(nextRange, allocationFlags);
|
||||
} else {
|
||||
// The next range is only partially covered -- shrink it.
|
||||
if (nextRange->type == Range::RANGE_FREE)
|
||||
@ -296,7 +301,8 @@ VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize)
|
||||
VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
|
||||
newSize);
|
||||
@ -323,8 +329,8 @@ VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize)
|
||||
} else {
|
||||
// no free range before -- we need to allocate a new one and
|
||||
// insert it
|
||||
previousRange = new(nogrow) Range(range->base, sizeDiff,
|
||||
Range::RANGE_FREE);
|
||||
previousRange = new(malloc_flags(allocationFlags)) Range(range->base,
|
||||
sizeDiff, Range::RANGE_FREE);
|
||||
if (previousRange == NULL)
|
||||
return B_NO_MEMORY;
|
||||
range->base += sizeDiff;
|
||||
@ -342,15 +348,16 @@ VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize)
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize)
|
||||
VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
return ResizeArea(area, newSize);
|
||||
return ResizeArea(area, newSize, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
size_t size, uint32 flags)
|
||||
size_t size, uint32 flags, uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
|
||||
B_PRIxSIZE ", %#" B_PRIx32 ")\n", *_address, addressSpec, size, flags);
|
||||
@ -362,7 +369,7 @@ VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
|
||||
Range* range;
|
||||
status_t error = _AllocateRange((addr_t)*_address, addressSpec, size, false,
|
||||
range);
|
||||
allocationFlags, range);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -379,7 +386,8 @@ VMKernelAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
|
||||
B_PRIxSIZE ")\n", address, size);
|
||||
@ -401,7 +409,7 @@ VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
nextRange = fRangeList.GetNext(nextRange);
|
||||
|
||||
if (range->type == Range::RANGE_RESERVED) {
|
||||
_FreeRange(range);
|
||||
_FreeRange(range, allocationFlags);
|
||||
Put();
|
||||
}
|
||||
|
||||
@ -414,7 +422,7 @@ VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
|
||||
|
||||
void
|
||||
VMKernelAddressSpace::UnreserveAllAddressRanges()
|
||||
VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
|
||||
{
|
||||
Range* range = fRangeList.Head();
|
||||
while (range != NULL) {
|
||||
@ -426,7 +434,7 @@ VMKernelAddressSpace::UnreserveAllAddressRanges()
|
||||
nextRange = fRangeList.GetNext(nextRange);
|
||||
|
||||
if (range->type == Range::RANGE_RESERVED) {
|
||||
_FreeRange(range);
|
||||
_FreeRange(range, allocationFlags);
|
||||
Put();
|
||||
}
|
||||
|
||||
@ -522,7 +530,8 @@ VMKernelAddressSpace::_RemoveRange(Range* range)
|
||||
|
||||
status_t
|
||||
VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
size_t size, bool allowReservedRange, Range*& _range)
|
||||
size_t size, bool allowReservedRange, uint32 allocationFlags,
|
||||
Range*& _range)
|
||||
{
|
||||
TRACE(" VMKernelAddressSpace::_AllocateRange(address: %#" B_PRIxADDR
|
||||
", size: %#" B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved "
|
||||
@ -582,8 +591,8 @@ VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
// allocation at the beginning of the range
|
||||
if (range->size > size) {
|
||||
// only partial -- split the range
|
||||
Range* leftOverRange = new(nogrow) Range(address + size,
|
||||
range->size - size, range);
|
||||
Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
|
||||
address + size, range->size - size, range);
|
||||
if (leftOverRange == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -592,8 +601,8 @@ VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
}
|
||||
} else if (address + size == range->base + range->size) {
|
||||
// allocation at the end of the range -- split the range
|
||||
Range* leftOverRange = new(nogrow) Range(range->base,
|
||||
range->size - size, range);
|
||||
Range* leftOverRange = new(malloc_flags(allocationFlags)) Range(
|
||||
range->base, range->size - size, range);
|
||||
if (leftOverRange == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -602,14 +611,14 @@ VMKernelAddressSpace::_AllocateRange(addr_t address, uint32 addressSpec,
|
||||
_InsertRange(leftOverRange);
|
||||
} else {
|
||||
// allocation in the middle of the range -- split the range in three
|
||||
Range* leftOverRange1 = new(nogrow) Range(range->base,
|
||||
address - range->base, range);
|
||||
Range* leftOverRange1 = new(malloc_flags(allocationFlags)) Range(
|
||||
range->base, address - range->base, range);
|
||||
if (leftOverRange1 == NULL)
|
||||
return B_NO_MEMORY;
|
||||
Range* leftOverRange2 = new(nogrow) Range(address + size,
|
||||
range->size - size - leftOverRange1->size, range);
|
||||
Range* leftOverRange2 = new(malloc_flags(allocationFlags)) Range(
|
||||
address + size, range->size - size - leftOverRange1->size, range);
|
||||
if (leftOverRange2 == NULL) {
|
||||
delete leftOverRange1;
|
||||
free_etc(leftOverRange1, allocationFlags);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
@ -752,7 +761,7 @@ TRACE(" -> reserved range not allowed\n");
|
||||
|
||||
|
||||
void
|
||||
VMKernelAddressSpace::_FreeRange(Range* range)
|
||||
VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
|
||||
{
|
||||
TRACE(" VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
|
||||
B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
|
||||
@ -769,15 +778,15 @@ VMKernelAddressSpace::_FreeRange(Range* range)
|
||||
_RemoveRange(range);
|
||||
_RemoveRange(nextRange);
|
||||
previousRange->size += range->size + nextRange->size;
|
||||
delete range;
|
||||
delete nextRange;
|
||||
free_etc(range, allocationFlags);
|
||||
free_etc(nextRange, allocationFlags);
|
||||
_FreeListInsertRange(previousRange, previousRange->size);
|
||||
} else {
|
||||
// join with the previous range only, delete the supplied one
|
||||
_FreeListRemoveRange(previousRange, previousRange->size);
|
||||
_RemoveRange(range);
|
||||
previousRange->size += range->size;
|
||||
delete range;
|
||||
free_etc(range, allocationFlags);
|
||||
_FreeListInsertRange(previousRange, previousRange->size);
|
||||
}
|
||||
} else {
|
||||
@ -785,7 +794,7 @@ VMKernelAddressSpace::_FreeRange(Range* range)
|
||||
// join with the next range and delete it
|
||||
_RemoveRange(nextRange);
|
||||
range->size += nextRange->size;
|
||||
delete nextRange;
|
||||
free_etc(nextRange, allocationFlags);
|
||||
}
|
||||
|
||||
// mark the range free and add it to the respective free list
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef VM_KERNEL_ADDRESS_SPACE_H
|
||||
@ -24,23 +24,30 @@ public:
|
||||
|
||||
virtual VMArea* LookupArea(addr_t address) const;
|
||||
virtual VMArea* CreateArea(const char* name, uint32 wiring,
|
||||
uint32 protection);
|
||||
virtual void DeleteArea(VMArea* area);
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area);
|
||||
virtual void RemoveArea(VMArea* area);
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual bool CanResizeArea(VMArea* area, size_t newSize);
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize);
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize);
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize);
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags);
|
||||
uint32 flags, uint32 allocationFlags);
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size);
|
||||
virtual void UnreserveAllAddressRanges();
|
||||
size_t size, uint32 allocationFlags);
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual void Dump() const;
|
||||
|
||||
@ -62,12 +69,14 @@ private:
|
||||
|
||||
status_t _AllocateRange(addr_t address,
|
||||
uint32 addressSpec, size_t size,
|
||||
bool allowReservedRange, Range*& _range);
|
||||
bool allowReservedRange,
|
||||
uint32 allocationFlags, Range*& _range);
|
||||
Range* _FindFreeRange(addr_t start, size_t size,
|
||||
size_t alignment, uint32 addressSpec,
|
||||
bool allowReservedRange,
|
||||
addr_t& _foundAddress);
|
||||
void _FreeRange(Range* range);
|
||||
void _FreeRange(Range* range,
|
||||
uint32 allocationFlags);
|
||||
|
||||
void _CheckStructures() const;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
@ -25,14 +25,14 @@ VMKernelArea::~VMKernelArea()
|
||||
|
||||
/*static*/ VMKernelArea*
|
||||
VMKernelArea::Create(VMAddressSpace* addressSpace, const char* name,
|
||||
uint32 wiring, uint32 protection)
|
||||
uint32 wiring, uint32 protection, uint32 allocationFlags)
|
||||
{
|
||||
VMKernelArea* area = new(nogrow) VMKernelArea(addressSpace, wiring,
|
||||
protection);
|
||||
VMKernelArea* area = new(malloc_flags(allocationFlags)) VMKernelArea(
|
||||
addressSpace, wiring, protection);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
|
||||
if (area->Init(name) != B_OK) {
|
||||
if (area->Init(name, allocationFlags) != B_OK) {
|
||||
delete area;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef VM_KERNEL_AREA_H
|
||||
@ -115,7 +115,7 @@ struct VMKernelArea : VMArea, AVLTreeNode {
|
||||
|
||||
static VMKernelArea* Create(VMAddressSpace* addressSpace,
|
||||
const char* name, uint32 wiring,
|
||||
uint32 protection);
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
|
||||
VMKernelAddressRange* Range() const
|
||||
{ return fRange; }
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
status_t
|
||||
VMNullCache::Init()
|
||||
VMNullCache::Init(uint32 allocationFlags)
|
||||
{
|
||||
return VMCache::Init(CACHE_TYPE_NULL);
|
||||
return VMCache::Init(CACHE_TYPE_NULL, allocationFlags);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
class VMNullCache : public VMCache {
|
||||
public:
|
||||
status_t Init();
|
||||
status_t Init(uint32 allocationFlags);
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -75,16 +75,18 @@ VMUserAddressSpace::NextArea(VMArea* _area) const
|
||||
|
||||
VMArea*
|
||||
VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
|
||||
uint32 protection)
|
||||
uint32 protection, uint32 allocationFlags)
|
||||
{
|
||||
return VMUserArea::Create(this, name, wiring, protection);
|
||||
return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMUserAddressSpace::DeleteArea(VMArea* area)
|
||||
VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
delete static_cast<VMUserArea*>(area);
|
||||
VMUserArea* area = static_cast<VMUserArea*>(_area);
|
||||
area->~VMUserArea();
|
||||
free_etc(area, allocationFlags);
|
||||
}
|
||||
|
||||
|
||||
@ -118,7 +120,7 @@ VMUserAddressSpace::LookupArea(addr_t address) const
|
||||
*/
|
||||
status_t
|
||||
VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* _area)
|
||||
size_t size, VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* area = static_cast<VMUserArea*>(_area);
|
||||
|
||||
@ -151,7 +153,8 @@ VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
return B_BAD_VALUE;
|
||||
}
|
||||
|
||||
status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area);
|
||||
status = _InsertAreaSlot(searchBase, size, searchEnd, addressSpec, area,
|
||||
allocationFlags);
|
||||
if (status == B_OK) {
|
||||
*_address = (void*)area->Base();
|
||||
fFreeSpace -= area->Size();
|
||||
@ -163,7 +166,7 @@ VMUserAddressSpace::InsertArea(void** _address, uint32 addressSpec,
|
||||
|
||||
//! You must hold the address space's write lock.
|
||||
void
|
||||
VMUserAddressSpace::RemoveArea(VMArea* _area)
|
||||
VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* area = static_cast<VMUserArea*>(_area);
|
||||
|
||||
@ -206,7 +209,8 @@ VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
|
||||
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* area = static_cast<VMUserArea*>(_area);
|
||||
|
||||
@ -224,10 +228,12 @@ VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
// resize reserved area
|
||||
addr_t offset = area->Base() + newSize - next->Base();
|
||||
if (next->Size() <= offset) {
|
||||
RemoveArea(next);
|
||||
delete next;
|
||||
RemoveArea(next, allocationFlags);
|
||||
next->~VMUserArea();
|
||||
free_etc(next, allocationFlags);
|
||||
} else {
|
||||
status_t error = ShrinkAreaHead(next, next->Size() - offset);
|
||||
status_t error = ShrinkAreaHead(next, next->Size() - offset,
|
||||
allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
}
|
||||
@ -239,7 +245,8 @@ VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize)
|
||||
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size)
|
||||
VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
size_t oldSize = area->Size();
|
||||
if (size == oldSize)
|
||||
@ -253,7 +260,8 @@ VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size)
|
||||
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size)
|
||||
VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
size_t oldSize = area->Size();
|
||||
if (size == oldSize)
|
||||
@ -267,7 +275,7 @@ VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size)
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
size_t size, uint32 flags)
|
||||
size_t size, uint32 flags, uint32 allocationFlags)
|
||||
{
|
||||
// check to see if this address space has entered DELETE state
|
||||
if (fDeleting) {
|
||||
@ -276,13 +284,15 @@ VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
return B_BAD_TEAM_ID;
|
||||
}
|
||||
|
||||
VMUserArea* area = VMUserArea::CreateReserved(this, flags);
|
||||
VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
|
||||
if (area == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t status = InsertArea(_address, addressSpec, size, area);
|
||||
status_t status = InsertArea(_address, addressSpec, size, area,
|
||||
allocationFlags);
|
||||
if (status != B_OK) {
|
||||
delete area;
|
||||
area->~VMUserArea();
|
||||
free_etc(area, allocationFlags);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -295,7 +305,8 @@ VMUserAddressSpace::ReserveAddressRange(void** _address, uint32 addressSpec,
|
||||
|
||||
|
||||
status_t
|
||||
VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
// check to see if this address space has entered DELETE state
|
||||
if (fDeleting) {
|
||||
@ -313,9 +324,10 @@ VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
break;
|
||||
if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
|
||||
// remove reserved range
|
||||
RemoveArea(area);
|
||||
RemoveArea(area, allocationFlags);
|
||||
Put();
|
||||
delete area;
|
||||
area->~VMUserArea();
|
||||
free_etc(area, allocationFlags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,14 +336,15 @@ VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size)
|
||||
|
||||
|
||||
void
|
||||
VMUserAddressSpace::UnreserveAllAddressRanges()
|
||||
VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
|
||||
{
|
||||
for (VMUserAreaList::Iterator it = fAreas.GetIterator();
|
||||
VMUserArea* area = it.Next();) {
|
||||
if (area->id == RESERVED_AREA_ID) {
|
||||
RemoveArea(area);
|
||||
RemoveArea(area, allocationFlags);
|
||||
Put();
|
||||
delete area;
|
||||
area->~VMUserArea();
|
||||
free_etc(area, allocationFlags);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -362,7 +375,7 @@ VMUserAddressSpace::Dump() const
|
||||
*/
|
||||
status_t
|
||||
VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
|
||||
VMUserArea* area)
|
||||
VMUserArea* area, uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* next;
|
||||
|
||||
@ -395,7 +408,8 @@ VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
|
||||
// the new area fully covers the reversed range
|
||||
fAreas.Remove(next);
|
||||
Put();
|
||||
delete next;
|
||||
next->~VMUserArea();
|
||||
free_etc(next, allocationFlags);
|
||||
} else {
|
||||
// resize the reserved range behind the area
|
||||
next->SetBase(next->Base() + size);
|
||||
@ -411,7 +425,7 @@ VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
|
||||
// the area splits the reserved range into two separate ones
|
||||
// we need a new reserved area to cover this space
|
||||
VMUserArea* reserved = VMUserArea::CreateReserved(this,
|
||||
next->protection);
|
||||
next->protection, allocationFlags);
|
||||
if (reserved == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -437,7 +451,7 @@ VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
|
||||
/*! Must be called with this address space's write lock held */
|
||||
status_t
|
||||
VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
|
||||
uint32 addressSpec, VMUserArea* area)
|
||||
uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* last = NULL;
|
||||
VMUserArea* next;
|
||||
@ -454,7 +468,8 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
|
||||
|
||||
if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
|
||||
// search for a reserved area
|
||||
status_t status = _InsertAreaIntoReservedRegion(start, size, area);
|
||||
status_t status = _InsertAreaIntoReservedRegion(start, size, area,
|
||||
allocationFlags);
|
||||
if (status == B_OK || status == B_BAD_VALUE)
|
||||
return status;
|
||||
|
||||
@ -555,7 +570,8 @@ second_chance:
|
||||
|
||||
foundSpot = true;
|
||||
area->SetBase(alignedBase);
|
||||
delete next;
|
||||
next->~VMUserArea();
|
||||
free_etc(next, allocationFlags);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -22,32 +22,40 @@ public:
|
||||
|
||||
virtual VMArea* LookupArea(addr_t address) const;
|
||||
virtual VMArea* CreateArea(const char* name, uint32 wiring,
|
||||
uint32 protection);
|
||||
virtual void DeleteArea(VMArea* area);
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
virtual void DeleteArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t InsertArea(void** _address, uint32 addressSpec,
|
||||
size_t size, VMArea* area);
|
||||
virtual void RemoveArea(VMArea* area);
|
||||
size_t size, VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
virtual void RemoveArea(VMArea* area,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual bool CanResizeArea(VMArea* area, size_t newSize);
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize);
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize);
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize);
|
||||
virtual status_t ResizeArea(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual status_t ReserveAddressRange(void** _address,
|
||||
uint32 addressSpec, size_t size,
|
||||
uint32 flags);
|
||||
uint32 flags, uint32 allocationFlags);
|
||||
virtual status_t UnreserveAddressRange(addr_t address,
|
||||
size_t size);
|
||||
virtual void UnreserveAllAddressRanges();
|
||||
size_t size, uint32 allocationFlags);
|
||||
virtual void UnreserveAllAddressRanges(
|
||||
uint32 allocationFlags);
|
||||
|
||||
virtual void Dump() const;
|
||||
|
||||
private:
|
||||
status_t _InsertAreaIntoReservedRegion(addr_t start,
|
||||
size_t size, VMUserArea* area);
|
||||
size_t size, VMUserArea* area,
|
||||
uint32 allocationFlags);
|
||||
status_t _InsertAreaSlot(addr_t start, addr_t size,
|
||||
addr_t end, uint32 addressSpec,
|
||||
VMUserArea* area);
|
||||
VMUserArea* area, uint32 allocationFlags);
|
||||
|
||||
private:
|
||||
VMUserAreaList fAreas;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
@ -25,14 +25,14 @@ VMUserArea::~VMUserArea()
|
||||
|
||||
/*static*/ VMUserArea*
|
||||
VMUserArea::Create(VMAddressSpace* addressSpace, const char* name,
|
||||
uint32 wiring, uint32 protection)
|
||||
uint32 wiring, uint32 protection, uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* area = new(nogrow) VMUserArea(addressSpace, wiring,
|
||||
protection);
|
||||
VMUserArea* area = new(malloc_flags(allocationFlags)) VMUserArea(
|
||||
addressSpace, wiring, protection);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
|
||||
if (area->Init(name) != B_OK) {
|
||||
if (area->Init(name, allocationFlags) != B_OK) {
|
||||
delete area;
|
||||
return NULL;
|
||||
}
|
||||
@ -42,9 +42,11 @@ VMUserArea::Create(VMAddressSpace* addressSpace, const char* name,
|
||||
|
||||
|
||||
/*static*/ VMUserArea*
|
||||
VMUserArea::CreateReserved(VMAddressSpace* addressSpace, uint32 flags)
|
||||
VMUserArea::CreateReserved(VMAddressSpace* addressSpace, uint32 flags,
|
||||
uint32 allocationFlags)
|
||||
{
|
||||
VMUserArea* area = new(nogrow) VMUserArea(addressSpace, 0, 0);
|
||||
VMUserArea* area = new(malloc_flags(allocationFlags)) VMUserArea(
|
||||
addressSpace, 0, 0);
|
||||
if (area != NULL) {
|
||||
area->id = RESERVED_AREA_ID;
|
||||
area->protection = flags;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef VM_USER_AREA_H
|
||||
@ -19,9 +19,9 @@ struct VMUserArea : VMArea {
|
||||
|
||||
static VMUserArea* Create(VMAddressSpace* addressSpace,
|
||||
const char* name, uint32 wiring,
|
||||
uint32 protection);
|
||||
uint32 protection, uint32 allocationFlags);
|
||||
static VMUserArea* CreateReserved(VMAddressSpace* addressSpace,
|
||||
uint32 flags);
|
||||
uint32 flags, uint32 allocationFlags);
|
||||
|
||||
DoublyLinkedListLink<VMUserArea>& AddressSpaceLink()
|
||||
{ return fAddressSpaceLink; }
|
||||
|
@ -565,6 +565,17 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
int priority;
|
||||
uint32 allocationFlags;
|
||||
if (addressSpace == VMAddressSpace::Kernel()) {
|
||||
priority = VM_PRIORITY_SYSTEM;
|
||||
allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
} else {
|
||||
priority = VM_PRIORITY_USER;
|
||||
allocationFlags = 0;
|
||||
}
|
||||
|
||||
VMCache* cache = vm_area_get_locked_cache(area);
|
||||
VMCacheChainLocker cacheChainLocker(cache);
|
||||
cacheChainLocker.LockAllSourceCaches();
|
||||
@ -574,7 +585,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
size_t oldSize = area->Size();
|
||||
size_t newSize = address - area->Base();
|
||||
|
||||
status_t error = addressSpace->ShrinkAreaTail(area, newSize);
|
||||
status_t error = addressSpace->ShrinkAreaTail(area, newSize,
|
||||
allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -587,9 +599,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
// Since VMCache::Resize() can temporarily drop the lock, we must
|
||||
// unlock all lower caches to prevent locking order inversion.
|
||||
cacheChainLocker.Unlock(cache);
|
||||
cache->Resize(cache->virtual_base + newSize,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
cache->Resize(cache->virtual_base + newSize, priority);
|
||||
cache->ReleaseRefAndUnlock();
|
||||
}
|
||||
|
||||
@ -606,7 +616,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
unmap_pages(area, oldBase, newBase - oldBase);
|
||||
|
||||
// resize the area
|
||||
status_t error = addressSpace->ShrinkAreaHead(area, newSize);
|
||||
status_t error = addressSpace->ShrinkAreaHead(area, newSize,
|
||||
allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -630,7 +641,8 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
|
||||
// resize the area
|
||||
addr_t oldSize = area->Size();
|
||||
status_t error = addressSpace->ShrinkAreaTail(area, firstNewSize);
|
||||
status_t error = addressSpace->ShrinkAreaTail(area, firstNewSize,
|
||||
allocationFlags);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -646,7 +658,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
B_EXACT_ADDRESS, area->wiring, area->protection, REGION_NO_PRIVATE_MAP,
|
||||
&secondArea, area->name, 0, kernel);
|
||||
if (error != B_OK) {
|
||||
addressSpace->ShrinkAreaTail(area, oldSize);
|
||||
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -715,7 +727,19 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
addressSpec, wiring, protection, _area, areaName));
|
||||
cache->AssertLocked();
|
||||
|
||||
VMArea* area = addressSpace->CreateArea(areaName, wiring, protection);
|
||||
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
int priority;
|
||||
if (addressSpace != VMAddressSpace::Kernel()) {
|
||||
priority = VM_PRIORITY_USER;
|
||||
} else if ((flags & CREATE_AREA_PRIORITY_VIP) != 0) {
|
||||
priority = VM_PRIORITY_VIP;
|
||||
allocationFlags |= HEAP_PRIORITY_VIP;
|
||||
} else
|
||||
priority = VM_PRIORITY_SYSTEM;
|
||||
|
||||
VMArea* area = addressSpace->CreateArea(areaName, wiring, protection,
|
||||
allocationFlags);
|
||||
if (area == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -745,14 +769,6 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
cache = newCache;
|
||||
}
|
||||
|
||||
int priority;
|
||||
if (addressSpace != VMAddressSpace::Kernel())
|
||||
priority = VM_PRIORITY_USER;
|
||||
else if ((flags & CREATE_AREA_PRIORITY_VIP) != 0)
|
||||
priority = VM_PRIORITY_VIP;
|
||||
else
|
||||
priority = VM_PRIORITY_SYSTEM;
|
||||
|
||||
status = cache->SetMinimalCommitment(size, priority);
|
||||
if (status != B_OK)
|
||||
goto err2;
|
||||
@ -773,7 +789,8 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
goto err2;
|
||||
}
|
||||
|
||||
status = addressSpace->InsertArea(_virtualAddress, addressSpec, size, area);
|
||||
status = addressSpace->InsertArea(_virtualAddress, addressSpec, size, area,
|
||||
allocationFlags);
|
||||
if (status != B_OK) {
|
||||
// TODO: wait and try again once this is working in the backend
|
||||
#if 0
|
||||
@ -816,7 +833,7 @@ err2:
|
||||
sourceCache->Lock();
|
||||
}
|
||||
err1:
|
||||
addressSpace->DeleteArea(area);
|
||||
addressSpace->DeleteArea(area, allocationFlags);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -869,7 +886,10 @@ vm_unreserve_address_range(team_id team, void* address, addr_t size)
|
||||
if (!locker.IsLocked())
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
return locker.AddressSpace()->UnreserveAddressRange((addr_t)address, size);
|
||||
VMAddressSpace* addressSpace = locker.AddressSpace();
|
||||
return addressSpace->UnreserveAddressRange((addr_t)address, size,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
}
|
||||
|
||||
|
||||
@ -884,8 +904,11 @@ vm_reserve_address_range(team_id team, void** _address, uint32 addressSpec,
|
||||
if (!locker.IsLocked())
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
return locker.AddressSpace()->ReserveAddressRange(_address, addressSpec,
|
||||
size, flags);
|
||||
VMAddressSpace* addressSpace = locker.AddressSpace();
|
||||
return addressSpace->ReserveAddressRange(_address, addressSpec,
|
||||
size, flags,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
}
|
||||
|
||||
|
||||
@ -1852,13 +1875,13 @@ delete_area(VMAddressSpace* addressSpace, VMArea* area,
|
||||
area->cache->WriteModified();
|
||||
|
||||
arch_vm_unset_memory_type(area);
|
||||
addressSpace->RemoveArea(area);
|
||||
addressSpace->RemoveArea(area, 0);
|
||||
addressSpace->Put();
|
||||
|
||||
area->cache->RemoveArea(area);
|
||||
area->cache->ReleaseRef();
|
||||
|
||||
addressSpace->DeleteArea(area);
|
||||
addressSpace->DeleteArea(area, 0);
|
||||
}
|
||||
|
||||
|
||||
@ -2882,7 +2905,7 @@ vm_delete_areas(struct VMAddressSpace* addressSpace, bool deletingAddressSpace)
|
||||
addressSpace->WriteLock();
|
||||
|
||||
// remove all reserved areas in this address space
|
||||
addressSpace->UnreserveAllAddressRanges();
|
||||
addressSpace->UnreserveAllAddressRanges(0);
|
||||
|
||||
// delete all the areas in this address space
|
||||
while (VMArea* area = addressSpace->FirstArea())
|
||||
@ -3267,11 +3290,13 @@ vm_init(kernel_args* args)
|
||||
|
||||
slab_init(args);
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
// map in the new heap and initialize it
|
||||
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
TRACE(("heap at 0x%lx\n", heapBase));
|
||||
heap_init(heapBase, heapSize);
|
||||
#endif
|
||||
|
||||
// initialize the free page list and physical page mapper
|
||||
vm_page_init(args);
|
||||
@ -3297,9 +3322,11 @@ vm_init(kernel_args* args)
|
||||
|
||||
// allocate areas to represent stuff that already exists
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
address = (void*)ROUNDDOWN(heapBase, B_PAGE_SIZE);
|
||||
create_area("kernel heap", &address, B_EXACT_ADDRESS, heapSize,
|
||||
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
#endif
|
||||
|
||||
allocate_kernel_args(args);
|
||||
|
||||
@ -3384,7 +3411,12 @@ vm_init_post_sem(kernel_args* args)
|
||||
VMAddressSpace::InitPostSem();
|
||||
|
||||
slab_init_post_sem();
|
||||
return heap_init_post_sem();
|
||||
|
||||
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
heap_init_post_sem();
|
||||
#endif
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -4230,6 +4262,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
|
||||
int priority = kernel && anyKernelArea
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
|
||||
uint32 allocationFlags = kernel && anyKernelArea
|
||||
? HEAP_DONT_WAIT_FOR_MEMORY | HEAP_DONT_LOCK_KERNEL_SPACE : 0;
|
||||
|
||||
if (oldSize < newSize) {
|
||||
// Growing the cache can fail, so we do it first.
|
||||
@ -4240,7 +4274,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
status = current->address_space->ResizeArea(current, newSize);
|
||||
status = current->address_space->ResizeArea(current, newSize,
|
||||
allocationFlags);
|
||||
if (status != B_OK)
|
||||
break;
|
||||
|
||||
@ -4266,8 +4301,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
// This can fail, too, in which case we're seriously screwed.
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
if (current->address_space->ResizeArea(current, oldSize)
|
||||
!= B_OK) {
|
||||
if (current->address_space->ResizeArea(current, oldSize,
|
||||
allocationFlags) != B_OK) {
|
||||
panic("vm_resize_area(): Failed and not being able to restore "
|
||||
"original state.");
|
||||
}
|
||||
|
@ -1731,8 +1731,12 @@ vm_page_write_modified_page_range(struct VMCache* cache, uint32 firstPage,
|
||||
if (maxPages < 0 || maxPages > kMaxPages)
|
||||
maxPages = kMaxPages;
|
||||
|
||||
const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
|
||||
| HEAP_DONT_LOCK_KERNEL_SPACE;
|
||||
|
||||
PageWriteWrapper stackWrappers[2];
|
||||
PageWriteWrapper* wrapperPool = new(nogrow) PageWriteWrapper[maxPages + 1];
|
||||
PageWriteWrapper* wrapperPool
|
||||
= new(malloc_flags(allocationFlags)) PageWriteWrapper[maxPages + 1];
|
||||
if (wrapperPool == NULL) {
|
||||
// don't fail, just limit our capabilities
|
||||
wrapperPool = stackWrappers;
|
||||
|
Loading…
Reference in New Issue
Block a user