ramfs: Drop now-unused Block* classes.

This commit is contained in:
Augustin Cavalier 2019-08-31 18:47:49 -04:00
parent cbc0726819
commit d2ab19b331
10 changed files with 0 additions and 1975 deletions

View File

@ -1,319 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_H
#define BLOCK_H
class Block;
class BlockHeader;
class BlockReference;
class TFreeBlock;
#include <SupportDefs.h>
// debugging
//#define inline
#define BA_DEFINE_INLINES 1
// BlockHeader
class BlockHeader {
public:
inline Block *ToBlock() { return (Block*)this; }
inline TFreeBlock *ToFreeBlock() { return (TFreeBlock*)this; }
inline void SetPreviousBlock(Block *block);
inline Block *GetPreviousBlock();
inline void SetNextBlock(Block *block);
inline Block *GetNextBlock();
inline bool HasNextBlock() { return (fSize & HAS_NEXT_FLAG); }
inline void SetSize(size_t size, bool hasNext = false);
inline size_t GetSize() const;
static inline size_t GetUsableSizeFor(size_t size);
inline size_t GetUsableSize() const;
inline void *GetData();
inline void SetFree(bool flag);
inline bool IsFree() const;
inline void SetReference(BlockReference *ref);
inline BlockReference *GetReference() const { return fReference; }
inline void FixReference();
inline void SetTo(Block *previous, size_t size, bool isFree, bool hasNext,
BlockReference *reference = NULL);
private:
enum {
FREE_FLAG = 0x80000000,
BACK_SKIP_MASK = 0x7fffffff,
};
enum {
HAS_NEXT_FLAG = 0x80000000,
SIZE_MASK = 0x7fffffff,
};
protected:
BlockHeader();
~BlockHeader();
protected:
size_t fBackSkip;
size_t fSize;
BlockReference *fReference;
};
// Block
class Block : public BlockHeader {
public:
static inline Block *MakeBlock(void *address, ssize_t offset,
Block *previous, size_t size, bool isFree,
bool hasNext,
BlockReference *reference = NULL);
protected:
Block();
~Block();
};
// TFreeBlock
class TFreeBlock : public Block {
public:
inline void SetPreviousFreeBlock(TFreeBlock *block) { fPrevious = block; }
inline void SetNextFreeBlock(TFreeBlock *block) { fNext = block; }
inline TFreeBlock *GetPreviousFreeBlock() { return fPrevious; }
inline TFreeBlock *GetNextFreeBlock() { return fNext; }
inline void SetTo(Block *previous, size_t size, bool hasNext,
TFreeBlock *previousFree, TFreeBlock *nextFree);
// static inline TFreeBlock *MakeFreeBlock(void *address, ssize_t offset,
// Block *previous, size_t size, bool hasNext, TFreeBlock *previousFree,
// TFreeBlock *nextFree);
protected:
TFreeBlock();
~TFreeBlock();
private:
TFreeBlock *fPrevious;
TFreeBlock *fNext;
};
// BlockReference
class BlockReference {
public:
inline BlockReference() : fBlock(NULL) {}
inline BlockReference(Block *block) : fBlock(block) {}
inline void SetBlock(Block *block) { fBlock = block; }
inline Block *GetBlock() const { return fBlock; }
inline void *GetData() const { return fBlock->GetData(); }
inline void *GetDataAt(ssize_t offset) const;
private:
Block *fBlock;
};
// ---------------------------------------------------------------------------
// inline methods
// debugging
#if BA_DEFINE_INLINES
// BlockHeader
// SetPreviousBlock
inline
void
BlockHeader::SetPreviousBlock(Block *block)
{
size_t offset = (block ? (char*)this - (char*)block : 0);
fBackSkip = (fBackSkip & FREE_FLAG) | offset;
}
// GetPreviousBlock
inline
Block *
BlockHeader::GetPreviousBlock()
{
if (fBackSkip & BACK_SKIP_MASK)
return (Block*)((char*)this - (fBackSkip & BACK_SKIP_MASK));
return NULL;
}
// SetNextBlock
inline
void
BlockHeader::SetNextBlock(Block *block)
{
if (block)
fSize = ((char*)block - (char*)this) | HAS_NEXT_FLAG;
else
fSize &= SIZE_MASK;
}
// GetNextBlock
inline
Block *
BlockHeader::GetNextBlock()
{
if (fSize & HAS_NEXT_FLAG)
return (Block*)((char*)this + (SIZE_MASK & fSize));
return NULL;
}
// SetSize
inline
void
BlockHeader::SetSize(size_t size, bool hasNext)
{
fSize = size;
if (hasNext)
fSize |= HAS_NEXT_FLAG;
}
// GetSize
inline
size_t
BlockHeader::GetSize() const
{
return (fSize & SIZE_MASK);
}
// GetUsableSizeFor
inline
size_t
BlockHeader::GetUsableSizeFor(size_t size)
{
return (size - sizeof(BlockHeader));
}
// GetUsableSize
inline
size_t
BlockHeader::GetUsableSize() const
{
return GetUsableSizeFor(GetSize());
}
// GetData
inline
void *
BlockHeader::GetData()
{
return (char*)this + sizeof(BlockHeader);
}
// SetFree
inline
void
BlockHeader::SetFree(bool flag)
{
if (flag)
fBackSkip |= FREE_FLAG;
else
fBackSkip &= ~FREE_FLAG;
}
// IsFree
inline
bool
BlockHeader::IsFree() const
{
return (fBackSkip & FREE_FLAG);
}
// SetTo
inline
void
BlockHeader::SetTo(Block *previous, size_t size, bool isFree, bool hasNext,
BlockReference *reference)
{
SetPreviousBlock(previous);
SetSize(size, hasNext);
SetFree(isFree);
SetReference(reference);
}
// SetReference
inline
void
BlockHeader::SetReference(BlockReference *ref)
{
fReference = ref;
FixReference();
}
// FixReference
inline
void
BlockHeader::FixReference()
{
if (fReference)
fReference->SetBlock(ToBlock());
}
// Block
// MakeBlock
/*inline
Block *
Block::MakeBlock(void *address, ssize_t offset, Block *previous, size_t size,
bool isFree, bool hasNext, BlockReference *reference)
{
Block *block = (Block*)((char*)address + offset);
block->SetTo(previous, size, isFree, hasNext, reference);
return block;
}*/
// TFreeBlock
// SetTo
inline
void
TFreeBlock::SetTo(Block *previous, size_t size, bool hasNext,
TFreeBlock *previousFree, TFreeBlock *nextFree)
{
Block::SetTo(previous, size, true, hasNext, NULL);
SetPreviousFreeBlock(previousFree);
SetNextFreeBlock(nextFree);
}
// MakeFreeBlock
/*inline
TFreeBlock *
TFreeBlock::MakeFreeBlock(void *address, ssize_t offset, Block *previous,
size_t size, bool hasNext, TFreeBlock *previousFree,
TFreeBlock *nextFree)
{
TFreeBlock *block = (TFreeBlock*)((char*)address + offset);
block->SetTo(previous, size, hasNext, previousFree, nextFree);
if (hasNext)
block->GetNextBlock()->SetPreviousBlock(block);
return block;
}*/
// BlockReference
// GetDataAt
inline
void *
BlockReference::GetDataAt(ssize_t offset) const
{
return (char*)fBlock->GetData() + offset;
}
#endif // BA_DEFINE_INLINES
#endif // BLOCK_H

View File

@ -1,429 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
// debugging
#define BA_DEFINE_INLINES 1
#include "AllocationInfo.h"
#include "BlockAllocator.h"
#include "BlockAllocatorArea.h"
#include "BlockAllocatorAreaBucket.h"
#include "DebugSupport.h"
// BlockAllocator
// constructor
BlockAllocator::BlockAllocator(size_t areaSize)
: fReferenceManager(),
fBuckets(NULL),
fBucketCount(0),
fAreaSize(areaSize),
fAreaCount(0),
fFreeBytes(0)
{
// create and init buckets
fBucketCount = bucket_containing_size(areaSize) + 1;
fBuckets = new(nothrow) AreaBucket[fBucketCount];
size_t minSize = 0;
for (int32 i = 0; i < fBucketCount; i++) {
size_t maxSize = (1 << i) * kMinNetBlockSize;
fBuckets[i].SetIndex(i);
fBuckets[i].SetSizeLimits(minSize, maxSize);
minSize = maxSize;
}
}
// destructor
BlockAllocator::~BlockAllocator()
{
if (fBuckets)
delete[] fBuckets;
}
// InitCheck
status_t
BlockAllocator::InitCheck() const
{
RETURN_ERROR(fBuckets ? B_OK : B_NO_MEMORY);
}
// AllocateBlock
BlockReference *
BlockAllocator::AllocateBlock(size_t usableSize)
{
#if ENABLE_BA_PANIC
if (fPanic)
return NULL;
#endif
//PRINT(("BlockAllocator::AllocateBlock(%lu)\n", usableSize));
Block *block = NULL;
if (usableSize > 0 && usableSize <= Area::GetMaxFreeBytesFor(fAreaSize)) {
// get a block reference
BlockReference *reference = fReferenceManager.AllocateReference();
if (reference) {
block = _AllocateBlock(usableSize);
// set reference / cleanup on failure
if (block)
block->SetReference(reference);
else
fReferenceManager.FreeReference(reference);
}
D(SanityCheck(false));
}
//PRINT(("BlockAllocator::AllocateBlock() done: %p\n", block));
return (block ? block->GetReference() : NULL);
}
// FreeBlock
void
BlockAllocator::FreeBlock(BlockReference *blockReference)
{
#if ENABLE_BA_PANIC
if (fPanic)
return;
#endif
D(if (!CheckBlock(blockReference)) return;);
Block *block = (blockReference ? blockReference->GetBlock() : NULL);
//PRINT(("BlockAllocator::FreeBlock(%p)\n", block));
Area *area = NULL;
if (block && !block->IsFree() && (area = _AreaForBlock(block)) != NULL) {
_FreeBlock(area, block, true);
D(SanityCheck(false));
if (_DefragmentingRecommended())
_Defragment();
}
//PRINT(("BlockAllocator::FreeBlock() done\n"));
}
// ResizeBlock
BlockReference *
BlockAllocator::ResizeBlock(BlockReference *blockReference, size_t usableSize)
{
#if ENABLE_BA_PANIC
if (fPanic)
return NULL;
#endif
D(if (!CheckBlock(blockReference)) return NULL;);
//PRINT(("BlockAllocator::ResizeBlock(%p, %lu)\n", blockReference, usableSize));
Block *block = (blockReference ? blockReference->GetBlock() : NULL);
Block *resultBlock = NULL;
Area *area = NULL;
if (block && !block->IsFree() && (area = _AreaForBlock(block)) != NULL) {
//PRINT(("BlockAllocator::ResizeBlock(%p, %lu)\n", block, usableSize));
if (usableSize) {
// try to let the area resize the block
size_t blockSize = block->GetSize();
size_t areaFreeBytes = area->GetFreeBytes();
bool needsDefragmenting = area->NeedsDefragmenting();
//PRINT((" block reference: %p / %p\n", blockReference, block->GetReference()));
resultBlock = area->ResizeBlock(block, usableSize);
block = blockReference->GetBlock();
if (resultBlock) {
//PRINT((" area succeeded in resizing the block\n"));
//PRINT((" block reference now: %p\n", resultBlock->GetReference()));
// the area was able to resize the block
_RethinkAreaBucket(area, area->GetBucket(),
needsDefragmenting);
fFreeBytes = fFreeBytes + area->GetFreeBytes() - areaFreeBytes;
// Defragment only, if the area was able to resize the block,
// the new block is smaller than the old one and defragmenting
// is recommended.
if (blockSize > resultBlock->GetSize()
&& _DefragmentingRecommended()) {
_Defragment();
}
} else {
//PRINT((" area failed to resize the block\n"));
// the area failed: allocate a new block, copy the data, and
// free the old one
resultBlock = _AllocateBlock(usableSize);
block = blockReference->GetBlock();
if (resultBlock) {
memcpy(resultBlock->GetData(), block->GetData(),
block->GetUsableSize());
resultBlock->SetReference(block->GetReference());
_FreeBlock(area, block, false);
}
}
} else
FreeBlock(blockReference);
D(SanityCheck(false));
//PRINT(("BlockAllocator::ResizeBlock() done: %p\n", resultBlock));
}
return (resultBlock ? resultBlock->GetReference() : NULL);
}
// SanityCheck
bool
BlockAllocator::SanityCheck(bool deep) const
{
// iterate through all areas of all buckets
int32 areaCount = 0;
size_t freeBytes = 0;
for (int32 i = 0; i < fBucketCount; i++) {
AreaBucket *bucket = fBuckets + i;
if (deep) {
if (!bucket->SanityCheck(deep))
return false;
}
for (Area *area = bucket->GetFirstArea();
area;
area = bucket->GetNextArea(area)) {
areaCount++;
freeBytes += area->GetFreeBytes();
}
}
// area count
if (areaCount != fAreaCount) {
FATAL("fAreaCount is %" B_PRId32 ", but should be %" B_PRId32 "\n", fAreaCount,
areaCount);
BA_PANIC("BlockAllocator: Bad free bytes.");
return false;
}
// free bytes
if (fFreeBytes != freeBytes) {
FATAL("fFreeBytes is %lu, but should be %lu\n", fFreeBytes,
freeBytes);
BA_PANIC("BlockAllocator: Bad free bytes.");
return false;
}
return true;
}
// CheckArea
bool
BlockAllocator::CheckArea(Area *checkArea)
{
for (int32 i = 0; i < fBucketCount; i++) {
AreaBucket *bucket = fBuckets + i;
for (Area *area = bucket->GetFirstArea();
area;
area = bucket->GetNextArea(area)) {
if (area == checkArea)
return true;
}
}
FATAL("Area %p is not a valid Area!\n", checkArea);
BA_PANIC("Invalid Area.");
return false;
}
// CheckBlock
bool
BlockAllocator::CheckBlock(Block *block, size_t minSize)
{
Area *area = _AreaForBlock(block);
return (area/* && CheckArea(area)*/ && area->CheckBlock(block, minSize));
}
// CheckBlock
bool
BlockAllocator::CheckBlock(BlockReference *reference, size_t minSize)
{
return (fReferenceManager.CheckReference(reference)
&& CheckBlock(reference->GetBlock(), minSize));
}
// GetAllocationInfo
void
BlockAllocator::GetAllocationInfo(AllocationInfo &info)
{
fReferenceManager.GetAllocationInfo(info);
info.AddOtherAllocation(sizeof(AreaBucket), fBucketCount);
info.AddAreaAllocation(fAreaSize, fAreaCount);
}
// _AreaForBlock
inline
BlockAllocator::Area *
BlockAllocator::_AreaForBlock(Block *block)
{
Area *area = NULL;
area_id id = area_for(block);
area_info info;
if (id >= 0 && get_area_info(id, &info) == B_OK)
area = (Area*)info.address;
D(if (!CheckArea(area)) return NULL;);
return area;
}
// _AllocateBlock
Block *
BlockAllocator::_AllocateBlock(size_t usableSize, bool dontCreateArea)
{
Block *block = NULL;
// Get the last area (the one with the most free space) and try
// to let it allocate a block. If that fails, allocate a new area.
// find a bucket for the allocation
// TODO: optimize
AreaBucket *bucket = NULL;
int32 index = bucket_containing_min_size(usableSize);
for (; index < fBucketCount; index++) {
if (!fBuckets[index].IsEmpty()) {
bucket = fBuckets + index;
break;
}
}
// get an area: if we have one, from the bucket, else create a new
// area
Area *area = NULL;
if (bucket)
area = bucket->GetFirstArea();
else if (!dontCreateArea) {
area = Area::Create(fAreaSize);
if (area) {
fAreaCount++;
fFreeBytes += area->GetFreeBytes();
bucket = fBuckets + area->GetBucketIndex();
bucket->AddArea(area);
PRINT("New area allocated. area count now: %" B_PRId32 ", free bytes: %lu\n",
fAreaCount, fFreeBytes);
}
}
// allocate a block
if (area) {
size_t areaFreeBytes = area->GetFreeBytes();
bool needsDefragmenting = area->NeedsDefragmenting();
block = area->AllocateBlock(usableSize);
// move the area into another bucket, if necessary
if (block) {
_RethinkAreaBucket(area, bucket, needsDefragmenting);
fFreeBytes = fFreeBytes + area->GetFreeBytes() - areaFreeBytes;
}
#if ENABLE_BA_PANIC
else if (!fPanic) {
FATAL("Block allocation failed unexpectedly.\n");
PRINT(" usableSize: %lu, areaFreeBytes: %lu\n", usableSize, areaFreeBytes);
BA_PANIC("Block allocation failed unexpectedly.");
//block = area->AllocateBlock(usableSize);
}
#endif
}
return block;
}
// _FreeBlock
void
BlockAllocator::_FreeBlock(Area *area, Block *block, bool freeReference)
{
size_t areaFreeBytes = area->GetFreeBytes();
AreaBucket *bucket = area->GetBucket();
bool needsDefragmenting = area->NeedsDefragmenting();
// free the block and the block reference
BlockReference *reference = block->GetReference();
area->FreeBlock(block);
if (reference && freeReference)
fReferenceManager.FreeReference(reference);
// move the area into another bucket, if necessary
_RethinkAreaBucket(area, bucket, needsDefragmenting);
fFreeBytes = fFreeBytes + area->GetFreeBytes() - areaFreeBytes;
}
// _RethinkAreaBucket
inline
void
BlockAllocator::_RethinkAreaBucket(Area *area, AreaBucket *bucket,
bool needsDefragmenting)
{
AreaBucket *newBucket = fBuckets + area->GetBucketIndex();
if (newBucket != bucket
|| needsDefragmenting != area->NeedsDefragmenting()) {
bucket->RemoveArea(area);
newBucket->AddArea(area);
}
}
// _DefragmentingRecommended
inline
bool
BlockAllocator::_DefragmentingRecommended()
{
// Don't know, whether this makes much sense: We don't try to defragment,
// when not at least a complete area could be deleted, and some tolerance
// being left (a fixed value plus 1/32 of the used bytes).
size_t usedBytes = fAreaCount * Area::GetMaxFreeBytesFor(fAreaSize)
- fFreeBytes;
return (fFreeBytes > fAreaSize + kDefragmentingTolerance + usedBytes / 32);
}
// _Defragment
bool
BlockAllocator::_Defragment()
{
bool success = false;
// We try to empty the least populated area by moving its blocks to other
// areas.
if (fFreeBytes > fAreaSize) {
// find the least populated area
// find the bucket with the least populated areas
AreaBucket *bucket = NULL;
for (int32 i = fBucketCount - 1; i >= 0; i--) {
if (!fBuckets[i].IsEmpty()) {
bucket = fBuckets + i;
break;
}
}
// find the area in the bucket
Area *area = NULL;
if (bucket) {
area = bucket->GetFirstArea();
Area *bucketArea = area;
while ((bucketArea = bucket->GetNextArea(bucketArea)) != NULL) {
if (bucketArea->GetFreeBytes() > area->GetFreeBytes())
area = bucketArea;
}
}
if (area) {
// remove the area from the bucket
bucket->RemoveArea(area);
fFreeBytes -= area->GetFreeBytes();
// iterate through the blocks in the area and try to find a new
// home for them
success = true;
while (Block *block = area->GetFirstUsedBlock()) {
Block *newBlock = _AllocateBlock(block->GetUsableSize(), true);
if (newBlock) {
// got a new block: copy the data to it and free the old
// one
memcpy(newBlock->GetData(), block->GetData(),
block->GetUsableSize());
newBlock->SetReference(block->GetReference());
block->SetReference(NULL);
area->FreeBlock(block, true);
#if ENABLE_BA_PANIC
if (fPanic) {
PRINT("Panicked while trying to free block %p\n",
block);
success = false;
break;
}
#endif
} else {
success = false;
break;
}
}
// delete the area
if (success && area->IsEmpty()) {
area->Delete();
fAreaCount--;
PRINT("defragmenting: area deleted\n");
} else {
PRINT("defragmenting: failed to empty area\n");
// failed: re-add the area
fFreeBytes += area->GetFreeBytes();
AreaBucket *newBucket = fBuckets + area->GetBucketIndex();
newBucket->AddArea(area);
}
}
D(SanityCheck(false));
}
return success;
}
#if ENABLE_BA_PANIC
bool BlockAllocator::fPanic = false;
#endif

View File

@ -1,72 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_ALLOCATOR_H
#define BLOCK_ALLOCATOR_H
#include <OS.h>
#include "Block.h"
#include "BlockReferenceManager.h"
#include "DebugSupport.h"
#include "List.h"
#define ENABLE_BA_PANIC 1
#if ENABLE_BA_PANIC
#define BA_PANIC(x) { PANIC(x); BlockAllocator::fPanic = true; }
#endif
class AllocationInfo;
// BlockAllocator
class BlockAllocator {
public:
BlockAllocator(size_t areaSize);
~BlockAllocator();
status_t InitCheck() const;
BlockReference *AllocateBlock(size_t usableSize);
void FreeBlock(BlockReference *block);
BlockReference *ResizeBlock(BlockReference *block, size_t usableSize);
size_t GetAvailableBytes() const { return fAreaCount * fAreaSize; }
size_t GetFreeBytes() const { return fFreeBytes; }
size_t GetUsedBytes() const { return fAreaCount * fAreaSize
- fFreeBytes; }
public:
class Area;
class AreaBucket;
// debugging only
bool SanityCheck(bool deep = false) const;
bool CheckArea(Area *area);
bool CheckBlock(Block *block, size_t minSize = 0);
bool CheckBlock(BlockReference *reference, size_t minSize = 0);
void GetAllocationInfo(AllocationInfo &info);
private:
inline Area *_AreaForBlock(Block *block);
Block *_AllocateBlock(size_t usableSize, bool dontCreateArea = false);
void _FreeBlock(Area *area, Block *block, bool freeReference);
inline void _RethinkAreaBucket(Area *area, AreaBucket *bucket,
bool needsDefragmenting);
inline bool _DefragmentingRecommended();
bool _Defragment();
private:
BlockReferenceManager fReferenceManager;
AreaBucket *fBuckets;
int32 fBucketCount;
size_t fAreaSize;
int32 fAreaCount;
size_t fFreeBytes;
#if ENABLE_BA_PANIC
public:
static bool fPanic;
#endif
};
#endif // BLOCK_ALLOCATOR_H

View File

@ -1,599 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#include "BlockAllocatorArea.h"
#include "DebugSupport.h"
// constructor
BlockAllocator::Area::Area(area_id id, size_t size)
: fBucket(NULL),
fID(id),
fSize(size),
fFreeBytes(0),
fFreeBlockCount(1),
fUsedBlockCount(0),
fFirstBlock(NULL),
fLastBlock(NULL),
fFirstFree(NULL),
fLastFree(NULL)
{
size_t headerSize = block_align_ceil(sizeof(Area));
fFirstFree = (TFreeBlock*)((char*)this + headerSize);
fFirstFree->SetTo(NULL, block_align_floor(fSize - headerSize), false, NULL,
NULL);
fFirstBlock = fLastBlock = fLastFree = fFirstFree;
fFreeBytes = fFirstFree->GetUsableSize();
}
// Create
BlockAllocator::Area *
BlockAllocator::Area::Create(size_t size)
{
Area *area = NULL;
void *base = NULL;
#if USER
area_id id = create_area("block alloc", &base, B_ANY_ADDRESS,
size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
#else
area_id id = create_area("block alloc", &base, B_ANY_KERNEL_ADDRESS,
size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA);
#endif
if (id >= 0) {
area = new(base) Area(id, size);
} else {
ERROR("BlockAllocator::Area::Create(%lu): Failed to create area: %s\n",
size, strerror(id));
}
return area;
}
// Delete
void
BlockAllocator::Area::Delete()
{
delete_area(fID);
}
// AllocateBlock
Block *
BlockAllocator::Area::AllocateBlock(size_t usableSize, bool dontDefragment)
{
if (kMinBlockSize != block_align_ceil(sizeof(TFreeBlock))) {
FATAL("kMinBlockSize is not correctly initialized! Is %lu, but should be: "
"%lu\n", kMinBlockSize, block_align_ceil(sizeof(TFreeBlock)));
BA_PANIC("kMinBlockSize not correctly initialized.");
return NULL;
}
if (usableSize == 0)
return NULL;
Block *newBlock = NULL;
size_t size = max(usableSize + sizeof(BlockHeader), kMinBlockSize);
size = block_align_ceil(size);
if (size <= _GetBlockFreeBytes()) {
// find first fit
TFreeBlock *block = _FindFreeBlock(size);
if (!block && !dontDefragment) {
// defragmenting is necessary
_Defragment();
block = _FindFreeBlock(size);
if (!block) {
// no free block
// Our data structures seem to be corrupted, since
// _GetBlockFreeBytes() promised that we would have enough
// free space.
FATAL("Couldn't find free block of min size %lu after "
"defragmenting, although we should have %lu usable free "
"bytes!\n", size, _GetBlockFreeBytes());
BA_PANIC("Bad area free bytes.");
}
}
if (block) {
// found a free block
size_t remainder = block->GetSize() - size;
if (remainder >= kMinBlockSize) {
// enough space left for a free block
Block *freePrev = block->GetPreviousBlock();
// TFreeBlock *prevFree = block->GetPreviousFreeBlock();
// TFreeBlock *nextFree = block->GetNextFreeBlock();
// newBlock = block;
_MoveResizeFreeBlock(block, size, remainder);
// setup the new block
// newBlock->SetSize(size, true);
// newBlock->SetFree(false);
newBlock = _MakeUsedBlock(block, 0, freePrev, size, true);
} else {
// not enough space left: take the free block over completely
// remove the block from the free list
_RemoveFreeBlock(block);
newBlock = block;
newBlock->SetFree(false);
}
if (fFreeBlockCount)
fFreeBytes -= newBlock->GetSize();
else
fFreeBytes = 0;
fUsedBlockCount++;
}
}
D(SanityCheck());
return newBlock;
}
// FreeBlock
void
BlockAllocator::Area::FreeBlock(Block *block, bool dontDefragment)
{
if (block) {
// mark the block free and insert it into the free list
block->SetFree(true);
TFreeBlock *freeBlock = (TFreeBlock*)block;
_InsertFreeBlock(freeBlock);
fUsedBlockCount--;
if (fFreeBlockCount == 1)
fFreeBytes += freeBlock->GetUsableSize();
else
fFreeBytes += freeBlock->GetSize();
// try coalescing with the next and the previous free block
D(SanityCheck());
_CoalesceWithNext(freeBlock);
D(SanityCheck());
_CoalesceWithNext(freeBlock->GetPreviousFreeBlock());
// defragment, if sensible
if (!dontDefragment && _DefragmentingRecommended())
_Defragment();
D(SanityCheck());
}
}
// ResizeBlock
Block *
BlockAllocator::Area::ResizeBlock(Block *block, size_t newUsableSize,
bool dontDefragment)
{
//PRINT(("Area::ResizeBlock(%p, %lu)\n", block, newUsableSize));
// newUsableSize must be >0 !
if (newUsableSize == 0)
return NULL;
Block *resultBlock = NULL;
if (block) {
size_t size = block->GetSize();
size_t newSize = max(newUsableSize + sizeof(BlockHeader),
kMinBlockSize);
newSize = block_align_ceil(newSize);
if (newSize == size) {
// size doesn't change: nothing to do
resultBlock = block;
} else if (newSize < size) {
// shrink the block
size_t sizeDiff = size - newSize;
Block *nextBlock = block->GetNextBlock();
if (nextBlock && nextBlock->IsFree()) {
// join the space with the adjoining free block
TFreeBlock *freeBlock = nextBlock->ToFreeBlock();
_MoveResizeFreeBlock(freeBlock, -sizeDiff,
freeBlock->GetSize() + sizeDiff);
// resize the block and we're done
block->SetSize(newSize, true);
fFreeBytes += sizeDiff;
} else if (sizeDiff >= sizeof(TFreeBlock)) {
// the freed space is large enough for a free block
TFreeBlock *newFree = _MakeFreeBlock(block, newSize, block,
sizeDiff, nextBlock, NULL, NULL);
_InsertFreeBlock(newFree);
block->SetSize(newSize, true);
if (fFreeBlockCount == 1)
fFreeBytes += newFree->GetUsableSize();
else
fFreeBytes += newFree->GetSize();
if (!dontDefragment && _DefragmentingRecommended())
_Defragment();
} // else: insufficient space for a free block: no changes
resultBlock = block;
} else {
//PRINT((" grow...\n"));
// grow the block
size_t sizeDiff = newSize - size;
Block *nextBlock = block->GetNextBlock();
if (nextBlock && nextBlock->IsFree()
&& nextBlock->GetSize() >= sizeDiff) {
//PRINT((" adjoining free block\n"));
// there is a adjoining free block and it is large enough
TFreeBlock *freeBlock = nextBlock->ToFreeBlock();
size_t freeSize = freeBlock->GetSize();
if (freeSize - sizeDiff >= sizeof(TFreeBlock)) {
// the remaining space is still large enough for a free
// block
_MoveResizeFreeBlock(freeBlock, sizeDiff,
freeSize - sizeDiff);
block->SetSize(newSize, true);
fFreeBytes -= sizeDiff;
} else {
// the remaining free space wouldn't be large enough for
// a free block: consume the free block completely
Block *freeNext = freeBlock->GetNextBlock();
_RemoveFreeBlock(freeBlock);
block->SetSize(size + freeSize, freeNext);
_FixBlockList(block, block->GetPreviousBlock(), freeNext);
if (fFreeBlockCount == 0)
fFreeBytes = 0;
else
fFreeBytes -= freeSize;
}
resultBlock = block;
} else {
//PRINT((" no adjoining free block\n"));
// no (large enough) adjoining free block: allocate
// a new block and copy the data to it
BlockReference *reference = block->GetReference();
resultBlock = AllocateBlock(newUsableSize, dontDefragment);
block = reference->GetBlock();
if (resultBlock) {
resultBlock->SetReference(reference);
memcpy(resultBlock->GetData(), block->GetData(),
block->GetUsableSize());
FreeBlock(block, dontDefragment);
resultBlock = reference->GetBlock();
}
}
}
}
D(SanityCheck());
//PRINT(("Area::ResizeBlock() done: %p\n", resultBlock));
return resultBlock;
}
// SanityCheck
bool
BlockAllocator::Area::SanityCheck() const
{
// area ID
if (fID < 0) {
FATAL("Area ID < 0: %" B_PRIx32 "\n", fID);
BA_PANIC("Bad area ID.");
return false;
}
// size
size_t areaHeaderSize = block_align_ceil(sizeof(Area));
if (fSize < areaHeaderSize + sizeof(TFreeBlock)) {
FATAL("Area too small to contain area header and at least one free "
"block: %lu bytes\n", fSize);
BA_PANIC("Bad area size.");
return false;
}
// free bytes
if (fFreeBytes > fSize) {
FATAL("Free size greater than area size: %lu vs %lu\n", fFreeBytes,
fSize);
BA_PANIC("Bad area free bytes.");
return false;
}
// block count
if (fFreeBlockCount + fUsedBlockCount == 0) {
FATAL("Area contains no blocks at all.\n");
BA_PANIC("Bad area block count.");
return false;
}
// block list
uint32 usedBlockCount = 0;
uint32 freeBlockCount = 0;
size_t freeBytes = 0;
if (!fFirstBlock || !fLastBlock) {
FATAL("Invalid block list: first or last block NULL: first: %p, "
"last: %p\n", fFirstBlock, fLastBlock);
BA_PANIC("Bad area block list.");
return false;
} else {
// iterate through block list and also check free list
int32 blockCount = fFreeBlockCount + fUsedBlockCount;
Block *block = fFirstBlock;
Block *prevBlock = NULL;
Block *prevFree = NULL;
Block *nextFree = fFirstFree;
bool blockListOK = true;
for (int32 i = 0; i < blockCount; i++) {
blockListOK = false;
if (!block) {
FATAL("Encountered NULL in block list at index %" B_PRId32 ", although "
"list should have %" B_PRId32 " blocks\n", i, blockCount);
BA_PANIC("Bad area block list.");
return false;
}
uint64 address = (addr_t)block;
// block within area?
if (address < (addr_t)this + areaHeaderSize
|| address + sizeof(TFreeBlock) > (addr_t)this + fSize) {
FATAL("Utterly mislocated block: %p, area: %p, "
"size: %lu\n", block, this, fSize);
BA_PANIC("Bad area block.");
return false;
}
// block too large for area?
size_t blockSize = block->GetSize();
if (blockSize < sizeof(TFreeBlock)
|| address + blockSize > (addr_t)this + fSize) {
FATAL("Mislocated block: %p, size: %lu, area: %p, "
"size: %lu\n", block, blockSize, this, fSize);
BA_PANIC("Bad area block.");
return false;
}
// alignment
if (block_align_floor(address) != address
|| block_align_floor(blockSize) != blockSize) {
FATAL("Block %" B_PRId32 " not properly aligned: %p, size: %lu\n",
i, block, blockSize);
BA_PANIC("Bad area block.");
return false;
}
// previous block
if (block->GetPreviousBlock() != prevBlock) {
FATAL("Previous block of block %" B_PRId32 " was not the previous "
"block in list: %p vs %p\n", i,
block->GetPreviousBlock(), prevBlock);
BA_PANIC("Bad area block list.");
return false;
}
// additional checks for free block list
if (block->IsFree()) {
freeBlockCount++;
TFreeBlock *freeBlock = block->ToFreeBlock();
if (prevFree)
freeBytes += freeBlock->GetSize();
else
freeBytes += freeBlock->GetUsableSize();
// block == next free block of previous free block
if (freeBlock != nextFree) {
FATAL("Free block %" B_PRId32 " is not the next block in free "
"list: %p vs %p\n", i, freeBlock, nextFree);
BA_PANIC("Bad area free list.");
return false;
}
// previous free block
if (freeBlock->GetPreviousFreeBlock() != prevFree) {
FATAL("Previous free block of block %" B_PRId32 " was not the "
" previous block in free list: %p vs %p\n", i,
freeBlock->GetPreviousFreeBlock(), prevFree);
BA_PANIC("Bad area free list.");
return false;
}
prevFree = freeBlock;
nextFree = freeBlock->GetNextFreeBlock();
} else
usedBlockCount++;
prevBlock = block;
block = block->GetNextBlock();
blockListOK = true;
}
// final checks on block list
if (blockListOK) {
if (block) {
FATAL("More blocks in block list than expected\n");
BA_PANIC("Bad area block count.");
return false;
} else if (fLastBlock != prevBlock) {
FATAL("last block in block list was %p, but should be "
"%p\n", fLastBlock, prevBlock);
BA_PANIC("Bad area last block.");
return false;
} else if (prevFree != fLastFree) {
FATAL("last block in free list was %p, but should be %p\n",
fLastFree, prevFree);
BA_PANIC("Bad area last free block.");
return false;
}
// block counts (a bit reduntant)
if (freeBlockCount != fFreeBlockCount) {
FATAL("Free block count is %" B_PRIuSIZE ", but should be %" B_PRIu32 "\n",
fFreeBlockCount, freeBlockCount);
BA_PANIC("Bad area free block count.");
return false;
}
if (usedBlockCount != fUsedBlockCount) {
FATAL("Used block count is %" B_PRIuSIZE ", but should be %" B_PRIu32 "\n",
fUsedBlockCount, usedBlockCount);
BA_PANIC("Bad area used block count.");
return false;
}
// free bytes
if (fFreeBytes != freeBytes) {
FATAL("Free bytes is %lu, but should be %lu\n",
fFreeBytes, freeBytes);
BA_PANIC("Bad area free bytes.");
return false;
}
}
}
return true;
}
// CheckBlock
bool
BlockAllocator::Area::CheckBlock(Block *checkBlock, size_t minSize)
{
for (Block *block = fFirstBlock; block; block = block->GetNextBlock()) {
if (block == checkBlock)
return (block->GetUsableSize() >= minSize);
}
FATAL("Block %p is not in area %p!\n", checkBlock, this);
BA_PANIC("Invalid Block.");
return false;
}
// _FindFreeBlock
TFreeBlock *
BlockAllocator::Area::_FindFreeBlock(size_t minSize)
{
// first fit
for (TFreeBlock *block = GetFirstFreeBlock();
block;
block = block->GetNextFreeBlock()) {
if (block->GetSize() >= minSize)
return block;
}
return NULL;
}
// _InsertFreeBlock
void
BlockAllocator::Area::_InsertFreeBlock(TFreeBlock *block)
{
if (block) {
// find the free block before which this one has to be inserted
TFreeBlock *nextFree = NULL;
for (nextFree = GetFirstFreeBlock();
nextFree;
nextFree = nextFree->GetNextFreeBlock()) {
if ((addr_t)nextFree > (addr_t)block)
break;
}
// get the previous block and insert the block between the two
TFreeBlock *prevFree
= (nextFree ? nextFree->GetPreviousFreeBlock() : fLastFree);
_FixFreeList(block, prevFree, nextFree);
fFreeBlockCount++;
}
}
// _RemoveFreeBlock
void
BlockAllocator::Area::_RemoveFreeBlock(TFreeBlock *block)
{
if (block) {
TFreeBlock *prevFree = block->GetPreviousFreeBlock();
TFreeBlock *nextFree = block->GetNextFreeBlock();
if (prevFree)
prevFree->SetNextFreeBlock(nextFree);
else
fFirstFree = nextFree;
if (nextFree)
nextFree->SetPreviousFreeBlock(prevFree);
else
fLastFree = prevFree;
}
fFreeBlockCount--;
}
// _MoveResizeFreeBlock
TFreeBlock *
BlockAllocator::Area::_MoveResizeFreeBlock(TFreeBlock *freeBlock,
ssize_t offset, size_t newSize)
{
TFreeBlock *movedFree = NULL;
if (freeBlock && offset) {
// move the header of the free block
Block *freePrev = freeBlock->GetPreviousBlock();
TFreeBlock *prevFree = freeBlock->GetPreviousFreeBlock();
TFreeBlock *nextFree = freeBlock->GetNextFreeBlock();
movedFree = _MakeFreeBlock(freeBlock, offset, freePrev, newSize,
freeBlock->HasNextBlock(), prevFree, nextFree);
// update the free list
_FixFreeList(movedFree, prevFree, nextFree);
}
return movedFree;
}
// _MakeFreeBlock
inline
TFreeBlock *
BlockAllocator::Area::_MakeFreeBlock(void *address, ssize_t offset,
Block *previous, size_t size,
bool hasNext, TFreeBlock *previousFree,
TFreeBlock *nextFree)
{
TFreeBlock *block = (TFreeBlock*)((char*)address + offset);
block->SetTo(previous, size, hasNext, previousFree, nextFree);
if (hasNext)
block->GetNextBlock()->SetPreviousBlock(block);
else
fLastBlock = block;
return block;
}
// _CoalesceWithNext
bool
BlockAllocator::Area::_CoalesceWithNext(TFreeBlock *block)
{
bool result = false;
TFreeBlock *nextFree = NULL;
if (block && (nextFree = block->GetNextFreeBlock()) != NULL
&& block->GetNextBlock() == nextFree) {
_RemoveFreeBlock(nextFree);
Block *nextBlock = nextFree->GetNextBlock();
block->SetSize(block->GetSize() + nextFree->GetSize(), nextBlock);
if (nextBlock)
nextBlock->SetPreviousBlock(block);
else
fLastBlock = block;
result = true;
}
return result;
}
// _MakeUsedBlock
inline
Block *
BlockAllocator::Area::_MakeUsedBlock(void *address, ssize_t offset,
Block *previous, size_t size,
bool hasNext)
{
Block *block = (Block*)((char*)address + offset);
block->SetTo(previous, size, false, hasNext, NULL);
if (hasNext)
block->GetNextBlock()->SetPreviousBlock(block);
else
fLastBlock = block;
return block;
}
// _Defragment
void
BlockAllocator::Area::_Defragment()
{
D(SanityCheck());
//PRINT(("BlockAllocator::Area::_Defragment()\n"));
// A trivial strategy for now: Keep the last free block and move the
// others so that they can be joined with it. This is done iteratively
// by moving the first free block to adjoin to the second one and
// coalescing them. A free block is moved by moving the data blocks in
// between.
TFreeBlock *nextFree = NULL;
while (fFirstFree && (nextFree = fFirstFree->GetNextFreeBlock()) != NULL) {
Block *prevBlock = fFirstFree->GetPreviousBlock();
Block *nextBlock = fFirstFree->GetNextBlock();
size_t size = fFirstFree->GetSize();
// Used blocks are relatively position independed. We can move them
// en bloc and only need to adjust the previous pointer of the first
// one.
if (!nextBlock->IsFree()) {
// move the used blocks
size_t chunkSize = (char*)nextFree - (char*)nextBlock;
Block *nextFreePrev = nextFree->GetPreviousBlock();
Block *movedBlock = fFirstFree;
memmove(movedBlock, nextBlock, chunkSize);
movedBlock->SetPreviousBlock(prevBlock);
// init the first free block
Block *movedNextFreePrev = (Block*)((char*)nextFreePrev - size);
fFirstFree = _MakeFreeBlock(movedBlock, chunkSize,
movedNextFreePrev, size, true, NULL, nextFree);
nextFree->SetPreviousFreeBlock(fFirstFree);
// fix the references of the moved blocks
for (Block *block = movedBlock;
block != fFirstFree;
block = block->GetNextBlock()) {
block->FixReference();
}
} else {
// uncoalesced adjoining free block: That should never happen,
// since we always coalesce as early as possible.
INFORM(("Warning: Found uncoalesced adjoining free blocks!\n"));
}
// coalesce the first two blocks
D(SanityCheck());
_CoalesceWithNext(fFirstFree);
D(SanityCheck());
}
//D(SanityCheck());
//PRINT(("BlockAllocator::Area::_Defragment() done\n"));
}

View File

@ -1,179 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_ALLOCATOR_AREA_H
#define BLOCK_ALLOCATOR_AREA_H
#include <util/DoublyLinkedList.h>
#include "BlockAllocator.h"
#include "BlockAllocatorMisc.h"
class BlockAllocator::Area : public DoublyLinkedListLinkImpl<Area> {
public:
static Area *Create(size_t size);
void Delete();
inline void SetBucket(AreaBucket *bucket) { fBucket = bucket; }
inline AreaBucket *GetBucket() const { return fBucket; }
inline Block *GetFirstBlock() const { return fFirstBlock; }
inline Block *GetLastBlock() const { return fLastBlock; }
inline TFreeBlock *GetFirstFreeBlock() const { return fFirstFree; }
inline TFreeBlock *GetLastFreeBlock() const { return fLastFree; }
inline bool IsEmpty() const { return (fUsedBlockCount == 0); }
inline Block *GetFirstUsedBlock() const;
static inline size_t GetMaxFreeBytesFor(size_t areaSize);
inline size_t GetFreeBytes() const { return fFreeBytes; }
inline bool NeedsDefragmenting() const { return (fFreeBlockCount > 1); }
inline int32 GetBucketIndex();
Block *AllocateBlock(size_t usableSize, bool dontDefragment = false);
void FreeBlock(Block *block, bool dontDefragment = false);
Block *ResizeBlock(Block *block, size_t newSize,
bool dontDefragment = false);
// debugging only
bool SanityCheck() const;
bool CheckBlock(Block *block, size_t minSize = 0);
private:
inline size_t _GetBlockFreeBytes()
{ return fFreeBytes + sizeof(BlockHeader); }
Area(area_id id, size_t size);
~Area();
inline void _FixBlockList(Block *block, Block *prevBlock,
Block *nextBlock);
inline void _FixFreeList(TFreeBlock *block, TFreeBlock *prevFree,
TFreeBlock *nextFree);
TFreeBlock *_FindFreeBlock(size_t minSize);
void _InsertFreeBlock(TFreeBlock *block);
void _RemoveFreeBlock(TFreeBlock *block);
TFreeBlock * _MoveResizeFreeBlock(TFreeBlock *freeBlock, ssize_t offset,
size_t newSize);
inline TFreeBlock *_MakeFreeBlock(void *address, ssize_t offset,
Block *previous, size_t size, bool hasNext, TFreeBlock *previousFree,
TFreeBlock *nextFree);
bool _CoalesceWithNext(TFreeBlock *block);
inline Block *_MakeUsedBlock(void *address, ssize_t offset,
Block *previous, size_t size, bool hasNext);
inline bool _DefragmentingRecommended();
void _Defragment();
private:
AreaBucket *fBucket;
area_id fID;
size_t fSize;
size_t fFreeBytes;
size_t fFreeBlockCount;
size_t fUsedBlockCount;
Block *fFirstBlock;
Block *fLastBlock;
TFreeBlock *fFirstFree;
TFreeBlock *fLastFree;
};
typedef BlockAllocator::Area Area;
// inline methods
// debugging
#if BA_DEFINE_INLINES
// GetFirstUsedBlock
inline
Block *
BlockAllocator::Area::GetFirstUsedBlock() const
{
// Two assumptions:
// 1) There is always a first block. If that isn't so, our structure are
// corrupt.
// 2) If the first block is free, the second (if any) is not. Otherwise
// there were adjoining free blocks, which our coalescing strategy
// prevents.
return (fFirstBlock->IsFree() ? fFirstBlock->GetNextBlock() : fFirstBlock);
}
// GetMaxFreeBytesFor
inline
size_t
BlockAllocator::Area::GetMaxFreeBytesFor(size_t areaSize)
{
size_t headerSize = block_align_ceil(sizeof(Area));
return Block::GetUsableSizeFor(block_align_floor(areaSize - headerSize));
}
// GetBucketIndex
inline
int32
BlockAllocator::Area::GetBucketIndex()
{
return bucket_containing_size(GetFreeBytes());
}
// _FixBlockList
inline
void
BlockAllocator::Area::_FixBlockList(Block *block, Block *prevBlock,
Block *nextBlock)
{
if (block) {
if (prevBlock)
prevBlock->SetNextBlock(block);
else
fFirstBlock = block;
if (nextBlock)
nextBlock->SetPreviousBlock(block);
else
fLastBlock = block;
}
}
// _FixFreeList
inline
void
BlockAllocator::Area::_FixFreeList(TFreeBlock *block, TFreeBlock *prevFree,
TFreeBlock *nextFree)
{
if (block) {
if (prevFree)
prevFree->SetNextFreeBlock(block);
else
fFirstFree = block;
if (nextFree)
nextFree->SetPreviousFreeBlock(block);
else
fLastFree = block;
block->SetPreviousFreeBlock(prevFree);
block->SetNextFreeBlock(nextFree);
}
}
// _DefragmentingRecommended
inline
bool
BlockAllocator::Area::_DefragmentingRecommended()
{
// Defragmenting Condition: At least more than 5 free blocks and
// free / block ratio greater 1 / 10. Don't know, if that makes any
// sense. ;-)
return (fFreeBlockCount > 5 && fUsedBlockCount / fFreeBlockCount < 10);
}
#endif // BA_DEFINE_INLINES
#endif // BLOCK_ALLOCATOR_AREA_H

View File

@ -1,53 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#include "BlockAllocatorAreaBucket.h"
// constructor
BlockAllocator::AreaBucket::AreaBucket()
: fAreas(),
fIndex(-1),
fMinSize(0),
fMaxSize(0)
{
}
// destructor
BlockAllocator::AreaBucket::~AreaBucket()
{
while (Area *area = fAreas.First()) {
RemoveArea(area);
area->Delete();
}
}
// SanityCheck
bool
BlockAllocator::AreaBucket::SanityCheck(bool deep) const
{
// check area list
for (Area *area = GetFirstArea(); area; area = GetNextArea(area)) {
if (deep) {
if (!area->SanityCheck())
return false;
}
// bucket
if (area->GetBucket() != this) {
FATAL("Area %p is in bucket %p, but thinks it is in bucket %p\n",
area, this, area->GetBucket());
BA_PANIC("Wrong area bucket.");
return false;
}
// size
size_t areaSize = area->GetFreeBytes();
if (areaSize < fMinSize || areaSize >= fMaxSize) {
FATAL("Area is in wrong bucket: free: %lu, min: %lu, max: %lu\n",
areaSize, fMinSize, fMaxSize);
BA_PANIC("Area in wrong bucket.");
return false;
}
}
return true;
}

View File

@ -1,101 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_ALLOCATOR_AREA_BUCKET_H
#define BLOCK_ALLOCATOR_AREA_BUCKET_H
#include <util/DoublyLinkedList.h>
#include "BlockAllocator.h"
#include "BlockAllocatorArea.h"
#include "DebugSupport.h"
class BlockAllocator::AreaBucket {
public:
AreaBucket();
~AreaBucket();
inline void SetIndex(int32 index) { fIndex = index; }
inline int32 GetIndex() const { return fIndex; }
inline void SetSizeLimits(size_t minSize, size_t maxSize);
inline size_t GetMinSize() const { return fMinSize; } // incl.
inline size_t GetMaxSize() const { return fMaxSize; } // excl.
inline void AddArea(Area *area);
inline void RemoveArea(Area *area);
inline Area *GetFirstArea() const { return fAreas.First(); }
inline Area *GetLastArea() const { return fAreas.Last(); }
inline Area *GetNextArea(Area* area) const;
inline bool IsEmpty() const { return fAreas.IsEmpty(); }
// debugging only
bool SanityCheck(bool deep = false) const;
private:
DoublyLinkedList<Area> fAreas;
int32 fIndex;
size_t fMinSize;
size_t fMaxSize;
};
typedef BlockAllocator::AreaBucket AreaBucket;
// inline methods
// debugging
#if BA_DEFINE_INLINES
// SetSizeLimits
/*! \brief Sets the size limits for areas this bucket may contain.
\param minSize Minimal area size. Inclusively.
\param maxSize Maximal area size. Exlusively.
*/
inline
void
BlockAllocator::AreaBucket::SetSizeLimits(size_t minSize, size_t maxSize)
{
fMinSize = minSize;
fMaxSize = maxSize;
}
// AddArea
inline
void
BlockAllocator::AreaBucket::AddArea(Area *area)
{
if (area) {
fAreas.Insert(area, area->NeedsDefragmenting());
area->SetBucket(this);
D(SanityCheck(false));
}
}
// RemoveArea
inline
void
BlockAllocator::AreaBucket::RemoveArea(Area *area)
{
if (area) {
fAreas.Remove(area);
area->SetBucket(NULL);
D(SanityCheck(false));
}
}
// GetNextArea
inline
Area *
BlockAllocator::AreaBucket::GetNextArea(Area* area) const
{
return fAreas.GetNext(area);
}
#endif // BA_DEFINE_INLINES
#endif // BLOCK_ALLOCATOR_AREA_BUCKET_H

View File

@ -1,38 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_ALLOCATOR_MISC_H
#define BLOCK_ALLOCATOR_MISC_H
#include "Block.h"
#include "Misc.h"
// block alignment -- start offsets and size
static const size_t kBlockAlignment = 4; // must be a power of 2
// block_align_{floor,ceil}
static inline size_t block_align_floor(size_t value)
{ return value & ~(kBlockAlignment - 1); }
static inline size_t block_align_ceil(size_t value)
{ return (value + kBlockAlignment - 1) & ~(kBlockAlignment - 1); }
// minimal size of a gross/net block
// BAD DOG: No initializers in the kernel!
//static const size_t kMinBlockSize = block_align_ceil(sizeof(TFreeBlock));
#define kMinBlockSize (block_align_ceil(sizeof(TFreeBlock)))
static const size_t kMinNetBlockSize = 8;
static const size_t kDefragmentingTolerance = 10240;
// bucket_containing_size -- bucket for to contain an area with size
static inline int bucket_containing_size(size_t size)
{ return fls(size / kMinNetBlockSize) + 1; }
// bucket_containing_min_size -- bucket containing areas >= size
static inline int
bucket_containing_min_size(size_t size)
{ return (size ? bucket_containing_size(size - 1) + 1 : 0); }
#endif // BLOCK_ALLOCATOR_MISC_H

View File

@ -1,133 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#include "AllocationInfo.h"
#include "Block.h"
#include "BlockAllocator.h" // only for BA_PANIC
#include "BlockReferenceManager.h"
#include "DebugSupport.h"
static const int kBlockReferenceTableSize = 128;
// constructor
BlockReferenceManager::BlockReferenceManager()
: fTables(10),
fFreeList(NULL)
{
}
// destructor
BlockReferenceManager::~BlockReferenceManager()
{
}
// AllocateReference
BlockReference *
BlockReferenceManager::AllocateReference()
{
BlockReference *reference = NULL;
if (!fFreeList)
_AddTable();
if (fFreeList) {
reference = fFreeList;
fFreeList = *(BlockReference**)fFreeList;
}
return reference;
}
// FreeReference
void
BlockReferenceManager::FreeReference(BlockReference *reference)
{
if (reference) {
*(BlockReference**)reference = fFreeList;
fFreeList = reference;
}
}
// CheckReference
bool
BlockReferenceManager::CheckReference(BlockReference *reference)
{
if (reference) {
uint32 address = (addr_t)reference;
int32 tableCount = fTables.CountItems();
for (int32 i = 0; i < tableCount; i++) {
Table *table = &fTables.ItemAt(i);
uint32 first = (addr_t)table->GetReferences();
uint32 last = (addr_t)(table->GetReferences() + table->GetSize());
if (first <= address && address < last)
return true;
}
}
FATAL("BlockReference %p does not exist!\n", reference);
BA_PANIC("BlockReference doesn't exist.");
return false;
}
// GetAllocationInfo
void
BlockReferenceManager::GetAllocationInfo(AllocationInfo &info)
{
info.AddListAllocation(fTables.GetCapacity(), sizeof(Table));
int32 count = fTables.CountItems();
for (int32 i = 0; i < count; i++) {
Table &table = fTables.ItemAt(i);
info.AddOtherAllocation(table.GetSize() * sizeof(BlockReference));
}
}
// _AddTable
status_t
BlockReferenceManager::_AddTable()
{
status_t error = B_OK;
// add a new table
Table dummy;
if (fTables.AddItem(dummy)) {
int32 index = fTables.CountItems() - 1;
Table &table = fTables.ItemAt(index);
error = table.Init(kBlockReferenceTableSize);
if (error == B_OK) {
// add the references to the free list
uint32 count = table.GetSize();
BlockReference *references = table.GetReferences();
for (uint32 i = 0; i < count; i++) {
BlockReference *reference = references + i;
*(BlockReference**)reference = fFreeList;
fFreeList = reference;
}
} else
fTables.RemoveItem(index);
} else
SET_ERROR(error, B_NO_MEMORY);
return error;
}
// Table
// destructor
BlockReferenceManager::Table::~Table()
{
if (fReferences)
delete[] fReferences;
}
// Init
status_t
BlockReferenceManager::Table::Init(int32 size)
{
status_t error = (size > 0 ? B_OK : B_BAD_VALUE);
if (error == B_OK) {
fReferences = new(std::nothrow) BlockReference[size];
if (fReferences)
fSize = size;
else
SET_ERROR(error, B_NO_MEMORY);
}
return error;
}

View File

@ -1,52 +0,0 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* All rights reserved. Distributed under the terms of the MIT license.
*/
#ifndef BLOCK_REFERENCE_MANAGER_H
#define BLOCK_REFERENCE_MANAGER_H
#include <new>
#include "List.h"
class AllocationInfo;
class BlockReference;
class BlockReferenceManager {
public:
BlockReferenceManager();
~BlockReferenceManager();
BlockReference *AllocateReference();
void FreeReference(BlockReference *reference);
// debugging only
bool CheckReference(BlockReference *reference);
void GetAllocationInfo(AllocationInfo &info);
private:
status_t _AddTable();
private:
class Table {
public:
Table() : fSize(0), fReferences(NULL) {}
Table(int) : fSize(0), fReferences(NULL) {}
~Table();
status_t Init(int32 size);
BlockReference *GetReferences() { return fReferences; }
int32 GetSize() const { return fSize; }
private:
uint32 fSize;
BlockReference *fReferences;
};
List<Table> fTables;
BlockReference *fFreeList;
};
#endif // BLOCK_REFERENCE_MANAGER_H