Moved the Area structure from the beginning of area to one page inside the
area. The first page is not mapped, so someone writing over the bounds of the previous area will be axed immediately. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37701 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
1ed801bbd7
commit
93a56baf09
@ -9,7 +9,6 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include <debug.h>
|
||||
#include <kernel.h>
|
||||
#include <tracing.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <vm/vm.h>
|
||||
@ -567,7 +566,7 @@ MemoryManager::Free(void* pages, uint32 flags)
|
||||
T(Free(pages, flags));
|
||||
|
||||
// get the area and the meta chunk
|
||||
Area* area = (Area*)ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
|
||||
Area* area = _AreaForAddress((addr_t)pages);
|
||||
MetaChunk* metaChunk = &area->metaChunks[
|
||||
((addr_t)pages % SLAB_AREA_SIZE) / SLAB_CHUNK_SIZE_LARGE];
|
||||
|
||||
@ -672,7 +671,7 @@ MemoryManager::FreeRawOrReturnCache(void* pages, uint32 flags)
|
||||
T(FreeRawOrReturnCache(pages, flags));
|
||||
|
||||
// get the area
|
||||
addr_t areaBase = ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
|
||||
addr_t areaBase = _AreaBaseAddressForAddress((addr_t)pages);
|
||||
|
||||
ReadLocker readLocker(sAreaTableLock);
|
||||
Area* area = sAreaTable.Lookup(areaBase);
|
||||
@ -740,10 +739,8 @@ MemoryManager::AcceptableChunkSize(size_t size)
|
||||
MemoryManager::GetAllocationInfo(void* address, size_t& _size)
|
||||
{
|
||||
// get the area
|
||||
addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
|
||||
|
||||
ReadLocker readLocker(sAreaTableLock);
|
||||
Area* area = sAreaTable.Lookup(areaBase);
|
||||
Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
|
||||
readLocker.Unlock();
|
||||
|
||||
if (area == NULL) {
|
||||
@ -782,10 +779,8 @@ MemoryManager::GetAllocationInfo(void* address, size_t& _size)
|
||||
MemoryManager::CacheForAddress(void* address)
|
||||
{
|
||||
// get the area
|
||||
addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
|
||||
|
||||
ReadLocker readLocker(sAreaTableLock);
|
||||
Area* area = sAreaTable.Lookup(areaBase);
|
||||
Area* area = sAreaTable.Lookup(_AreaBaseAddressForAddress((addr_t)address));
|
||||
readLocker.Unlock();
|
||||
|
||||
if (area == NULL)
|
||||
@ -1156,8 +1151,9 @@ MemoryManager::_PrepareMetaChunk(MetaChunk* metaChunk, size_t chunkSize)
|
||||
|
||||
if (metaChunk == area->metaChunks) {
|
||||
// the first chunk is shorter
|
||||
size_t unusableSize = ROUNDUP(kAreaAdminSize, chunkSize);
|
||||
metaChunk->chunkBase = (addr_t)area + unusableSize;
|
||||
size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
|
||||
chunkSize);
|
||||
metaChunk->chunkBase = area->BaseAddress() + unusableSize;
|
||||
metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE - unusableSize;
|
||||
}
|
||||
|
||||
@ -1203,6 +1199,7 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
mutex_unlock(&sLock);
|
||||
|
||||
size_t pagesNeededToMap = 0;
|
||||
void* areaBase;
|
||||
Area* area;
|
||||
VMArea* vmArea = NULL;
|
||||
|
||||
@ -1211,19 +1208,21 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
|
||||
? CREATE_AREA_PRIORITY_VIP : 0;
|
||||
area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
|
||||
(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
|
||||
&areaBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
|
||||
areaCreationFlags);
|
||||
if (areaID < 0) {
|
||||
mutex_lock(&sLock);
|
||||
return areaID;
|
||||
}
|
||||
|
||||
area = _AreaForAddress((addr_t)areaBase);
|
||||
|
||||
// map the memory for the administrative structure
|
||||
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
|
||||
VMTranslationMap* translationMap = addressSpace->TranslationMap();
|
||||
|
||||
pagesNeededToMap = translationMap->MaxPagesNeededToMap((addr_t)area,
|
||||
(addr_t)area + SLAB_AREA_SIZE - 1);
|
||||
pagesNeededToMap = translationMap->MaxPagesNeededToMap(
|
||||
(addr_t)area, (addr_t)areaBase + SLAB_AREA_SIZE - 1);
|
||||
|
||||
vmArea = VMAreaHash::Lookup(areaID);
|
||||
status_t error = _MapChunk(vmArea, (addr_t)area, kAreaAdminSize,
|
||||
@ -1238,13 +1237,14 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
areaID);
|
||||
} else {
|
||||
// no areas yet -- allocate raw memory
|
||||
area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
|
||||
areaBase = (void*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
|
||||
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
|
||||
SLAB_AREA_SIZE);
|
||||
if (area == NULL) {
|
||||
if (areaBase == NULL) {
|
||||
mutex_lock(&sLock);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
area = _AreaForAddress((addr_t)areaBase);
|
||||
|
||||
TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
|
||||
area);
|
||||
@ -1260,7 +1260,7 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
|
||||
MetaChunk* metaChunk = area->metaChunks + i;
|
||||
metaChunk->chunkSize = 0;
|
||||
metaChunk->chunkBase = (addr_t)area + i * SLAB_CHUNK_SIZE_LARGE;
|
||||
metaChunk->chunkBase = (addr_t)areaBase + i * SLAB_CHUNK_SIZE_LARGE;
|
||||
metaChunk->totalSize = SLAB_CHUNK_SIZE_LARGE;
|
||||
// Note: chunkBase and totalSize aren't correct for the first
|
||||
// meta chunk. They will be set in _PrepareMetaChunk().
|
||||
@ -1458,6 +1458,12 @@ MemoryManager::_UnmapFreeChunksEarly(Area* area)
|
||||
|
||||
TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area);
|
||||
|
||||
// unmap the space before the Area structure
|
||||
#if SLAB_AREA_STRUCT_OFFSET > 0
|
||||
_UnmapChunk(area->vmArea, area->BaseAddress(), SLAB_AREA_STRUCT_OFFSET,
|
||||
0);
|
||||
#endif
|
||||
|
||||
for (int32 i = 0; i < SLAB_META_CHUNKS_PER_AREA; i++) {
|
||||
MetaChunk* metaChunk = area->metaChunks + i;
|
||||
if (metaChunk->chunkSize == 0) {
|
||||
@ -1467,7 +1473,7 @@ MemoryManager::_UnmapFreeChunksEarly(Area* area)
|
||||
SLAB_CHUNK_SIZE_LARGE - kAreaAdminSize, 0);
|
||||
} else {
|
||||
_UnmapChunk(area->vmArea,
|
||||
(addr_t)area + i * SLAB_CHUNK_SIZE_LARGE,
|
||||
area->BaseAddress() + i * SLAB_CHUNK_SIZE_LARGE,
|
||||
SLAB_CHUNK_SIZE_LARGE, 0);
|
||||
}
|
||||
} else {
|
||||
@ -1496,7 +1502,7 @@ MemoryManager::_UnmapFreeChunksEarly(Area* area)
|
||||
/*static*/ void
|
||||
MemoryManager::_ConvertEarlyArea(Area* area)
|
||||
{
|
||||
void* address = area;
|
||||
void* address = (void*)area->BaseAddress();
|
||||
area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
|
||||
SLAB_AREA_SIZE, B_ALREADY_WIRED,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
@ -1565,7 +1571,8 @@ MemoryManager::_CheckMetaChunk(MetaChunk* metaChunk)
|
||||
return;
|
||||
}
|
||||
|
||||
addr_t expectedBase = (addr_t)area + metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
|
||||
addr_t expectedBase = area->BaseAddress()
|
||||
+ metaChunkIndex * SLAB_CHUNK_SIZE_LARGE;
|
||||
if (metaChunk->chunkBase < expectedBase
|
||||
|| metaChunk->chunkBase - expectedBase + metaChunk->totalSize
|
||||
> SLAB_CHUNK_SIZE_LARGE) {
|
||||
@ -1778,7 +1785,7 @@ MemoryManager::_DumpMetaChunk(int argc, char** argv)
|
||||
if (!evaluate_debug_expression(argv[1], &address, false))
|
||||
return 0;
|
||||
|
||||
Area* area = (Area*)(addr_t)ROUNDDOWN(address, SLAB_AREA_SIZE);
|
||||
Area* area = _AreaForAddress(address);
|
||||
|
||||
MetaChunk* metaChunk;
|
||||
if ((addr_t)address >= (addr_t)area->metaChunks
|
||||
@ -1851,9 +1858,7 @@ MemoryManager::_DumpArea(int argc, char** argv)
|
||||
if (!evaluate_debug_expression(argv[argi], &address, false))
|
||||
return 0;
|
||||
|
||||
address = ROUNDDOWN(address, SLAB_AREA_SIZE);
|
||||
|
||||
Area* area = (Area*)(addr_t)address;
|
||||
Area* area = _AreaForAddress((addr_t)address);
|
||||
|
||||
for (uint32 k = 0; k < SLAB_META_CHUNKS_PER_AREA; k++) {
|
||||
MetaChunk* metaChunk = area->metaChunks + k;
|
||||
|
@ -25,6 +25,10 @@ struct VMArea;
|
||||
#define SLAB_CHUNK_SIZE_LARGE (128 * B_PAGE_SIZE)
|
||||
#define SLAB_AREA_SIZE (2048 * B_PAGE_SIZE)
|
||||
// TODO: These sizes have been chosen with 4 KB pages in mind.
|
||||
#define SLAB_AREA_STRUCT_OFFSET B_PAGE_SIZE
|
||||
// The offset from the start of the area to the Area structure. This space
|
||||
// is not mapped and will trip code writing beyond the previous area's
|
||||
// bounds.
|
||||
|
||||
#define SLAB_META_CHUNKS_PER_AREA (SLAB_AREA_SIZE / SLAB_CHUNK_SIZE_LARGE)
|
||||
#define SLAB_SMALL_CHUNKS_PER_AREA (SLAB_AREA_SIZE / SLAB_CHUNK_SIZE_SMALL)
|
||||
@ -80,6 +84,7 @@ private:
|
||||
Area* GetArea() const;
|
||||
};
|
||||
|
||||
friend class MetaChunk;
|
||||
typedef DoublyLinkedList<MetaChunk> MetaChunkList;
|
||||
|
||||
struct Area : DoublyLinkedListLinkImpl<Area> {
|
||||
@ -89,6 +94,11 @@ private:
|
||||
uint16 usedMetaChunkCount;
|
||||
bool fullyMapped;
|
||||
MetaChunk metaChunks[SLAB_META_CHUNKS_PER_AREA];
|
||||
|
||||
addr_t BaseAddress() const
|
||||
{
|
||||
return (addr_t)this - SLAB_AREA_STRUCT_OFFSET;
|
||||
}
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<Area> AreaList;
|
||||
@ -104,12 +114,12 @@ private:
|
||||
|
||||
size_t Hash(const Area* value) const
|
||||
{
|
||||
return HashKey((addr_t)value);
|
||||
return HashKey(value->BaseAddress());
|
||||
}
|
||||
|
||||
bool Compare(addr_t key, const Area* value) const
|
||||
{
|
||||
return key == (addr_t)value;
|
||||
return key == value->BaseAddress();
|
||||
}
|
||||
|
||||
Area*& GetLink(Area* value) const
|
||||
@ -158,6 +168,8 @@ private:
|
||||
|
||||
static void _RequestMaintenance();
|
||||
|
||||
static addr_t _AreaBaseAddressForAddress(addr_t address);
|
||||
static Area* _AreaForAddress(addr_t address);
|
||||
static uint32 _ChunkIndexForAddress(
|
||||
const MetaChunk* metaChunk, addr_t address);
|
||||
static addr_t _ChunkAddress(const MetaChunk* metaChunk,
|
||||
@ -207,6 +219,21 @@ MemoryManager::MaintenanceNeeded()
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline addr_t
|
||||
MemoryManager::_AreaBaseAddressForAddress(addr_t address)
|
||||
{
|
||||
return ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline MemoryManager::Area*
|
||||
MemoryManager::_AreaForAddress(addr_t address)
|
||||
{
|
||||
return (Area*)(_AreaBaseAddressForAddress(address)
|
||||
+ SLAB_AREA_STRUCT_OFFSET);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint32
|
||||
MemoryManager::_ChunkIndexForAddress(const MetaChunk* metaChunk, addr_t address)
|
||||
{
|
||||
@ -234,7 +261,7 @@ MemoryManager::_IsChunkFree(const MetaChunk* metaChunk, const Chunk* chunk)
|
||||
inline MemoryManager::Area*
|
||||
MemoryManager::MetaChunk::GetArea() const
|
||||
{
|
||||
return (Area*)ROUNDDOWN((addr_t)this, SLAB_AREA_SIZE);
|
||||
return _AreaForAddress((addr_t)this);
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user