BTRFS: Implement ExtentAllocator

There are 4 new classes, structs:
* CachedExtent, is a AVLTreeNode, caches the extent locating in extent tree, a extent can be free, allocated, metadata or a data extent. It also hold a references count,
that is incremented each time a new extent refer to it (COW) and item data, that is only for allocated extent (NULL for free).
* CachedTreeExtent, is a AVLTree, cache the whole extent tree and has CachedExtent as its node.
* BlockGroup represents the group of extents that represent the region for each type of allocated extent. For example, region for data extents, metada blocks. It
responsible for inserting nodes in CachedTreeExtent.
* And the final, ExtentAllocator it knows how to allocate/deallocate extents, but for now only the allocating is implemented, actually allocating and deallocating works
are already implemented, they are in functions _AddFreeExtent, _AddAllocatedExtent in CachedTreeExtent. However the deallocating is not needed for now, so it will be
finished later then.

Signed-off-by: Augustin Cavalier <waddlesplash@gmail.com>
This commit is contained in:
hyche 2017-08-13 16:42:23 +07:00 committed by Augustin Cavalier
parent bfd7a4fb42
commit af419b0e98
7 changed files with 865 additions and 1 deletions

View File

@ -0,0 +1,692 @@
#include "ExtentAllocator.h"
#include "Chunk.h"
CachedExtent*
CachedExtent::Create(uint64 offset, uint64 length, uint64 flags)
{
CachedExtent* self = new(std::nothrow) CachedExtent();
if (self == NULL)
return NULL;
self->offset = offset;
self->length = length;
self->refCount = 0;
if ((flags & BTRFS_EXTENT_FLAG_ALLOCATED) != 0)
self->refCount = 1;
self->flags = flags;
self->item = NULL;
return self;
}
void
CachedExtent::Delete()
{
if (item != NULL)
free(item);
item = NULL;
delete this;
}
bool
CachedExtent::IsAllocated() const
{
return (flags & BTRFS_EXTENT_FLAG_ALLOCATED) != 0;
}
bool
CachedExtent::IsData() const
{
return (flags & BTRFS_EXTENT_FLAG_DATA) != 0;
}
void
CachedExtent::Info() const
{
const char* extentType = "allocated tree block";
if (IsAllocated() == false && IsData() == false)
extentType = "free tree block";
else if (IsAllocated() == false && IsData() == true)
extentType = "free data extent";
else if (IsAllocated() == true && IsData() == true)
extentType = "allocated data extent";
TRACE("%s at %" B_PRIu64 " length %" B_PRIu64 " refCount %i\n",
extentType, offset, length, refCount);
}
// ExtentTree Implementation
CachedExtentTree::CachedExtentTree(const TreeDefinition& definition)
:
AVLTree<TreeDefinition>(definition)
{
}
CachedExtentTree::~CachedExtentTree()
{
Delete();
}
/* Find extent that cover or after "offset" and has length >= "size"
* it must also satisfy the condition "type".
*/
status_t
CachedExtentTree::FindNext(CachedExtent** chosen, uint64 offset, uint64 size,
uint64 type)
{
CachedExtent* found = Find(offset);
while (found != NULL && (found->flags != type || found->length < size))
found = Next(found);
if (found == NULL)
return B_ENTRY_NOT_FOUND;
*chosen = found;
return B_OK;
}
/* this will insert all free extents that are holes,
* created by existed allocated extents in the tree
* from lowerBound to upperBound
*/
status_t
CachedExtentTree::FillFreeExtents(uint64 lowerBound, uint64 upperBound)
{
CachedExtent* node = FindClosest(lowerBound, false);
CachedExtent* hole = NULL;
status_t status = B_OK;
uint64 flags = node->flags & (~BTRFS_EXTENT_FLAG_ALLOCATED);
if (lowerBound < node->offset) {
hole = CachedExtent::Create(lowerBound, node->offset - lowerBound,
flags);
status = _AddFreeExtent(hole);
if (status != B_OK)
return status;
}
CachedExtent* next = NULL;
while ((next = Next(node)) != NULL && next->End() < upperBound) {
if (node->End() == next->offset) {
node = next;
continue;
}
hole = CachedExtent::Create(node->End(), next->offset - node->End(),
flags);
status = _AddFreeExtent(hole);
if (status != B_OK)
return status;
node = next;
}
// final node should be a right most node
if (upperBound > node->End()) {
hole = CachedExtent::Create(node->End(), upperBound - node->End(),
flags);
status = _AddFreeExtent(hole);
}
return status;
}
void
CachedExtentTree::_RemoveExtent(CachedExtent* node)
{
node->refCount--;
if (node->refCount <= 0) {
Remove(node);
node->Delete();
}
}
status_t
CachedExtentTree::_AddAllocatedExtent(CachedExtent* node)
{
if (node == NULL || node->IsAllocated() == false)
return B_BAD_VALUE;
CachedExtent* found = Find(node->offset);
if (found == NULL) {
Insert(node);
return B_OK;
}
if ((found->IsData() && !node->IsData())
|| (!found->IsData() && node->IsData())) {
// this shouldn't happen as because different type has
// its different region for locating.
node->Delete();
return B_BAD_VALUE;
}
if (found->IsAllocated() == false) {
// split free extents (found)
if (node->End() > found->End()) {
// TODO: when to handle this ?
node->Delete();
return B_BAD_VALUE;
}
// |----- found ------|
// |-- node ---|
uint64 diff = node->offset - found->offset;
found->offset += diff + node->length;
found->length -= diff + node->length;
// diff < 0 couldn't happen because of the Compare function
if (diff > 0) {
CachedExtent* leftEmpty = CachedExtent::Create(
node->offset - diff, diff, found->flags);
_AddFreeExtent(leftEmpty);
}
if (found->length == 0) {
// free-extent that has no space
_RemoveExtent(found);
}
Insert(node);
return B_OK;
}
if (found->length == node->length) {
found->refCount++;
} else {
// TODO: What should we do in this case ?
return B_BAD_VALUE;
}
node->Delete();
return B_OK;
}
status_t
CachedExtentTree::_AddFreeExtent(CachedExtent* node)
{
if (node == NULL || node->IsAllocated() == true)
return B_BAD_VALUE;
CachedExtent* found = Find(node->offset);
if (found == NULL) {
Insert(node);
_CombineFreeExtent(node);
return B_OK;
}
if ((found->IsData() && !node->IsData())
|| (!found->IsData() && node->IsData())) {
// this shouldn't happen as because different type has
// its different region for locating.
node->Delete();
return B_BAD_VALUE;
}
if (found->End() < node->End()) {
// |---- found-----|
// |--- node ------|
CachedExtent* rightEmpty = CachedExtent::Create(found->End(),
node->End() - found->End(), node->flags);
_AddFreeExtent(rightEmpty);
node->length -= node->End() - found->End();
}
if (found->IsAllocated() == true) {
// free part of the allocated extents(found)
// |----- found ------|
// |-- node ---|
uint64 diff = node->offset - found->offset;
found->offset += diff + node->length;
found->length -= diff + node->length;
// diff < 0 couldn't happen because of the Compare function
if (diff > 0){
CachedExtent* left = CachedExtent::Create(node->offset - diff,
diff, found->flags);
_AddAllocatedExtent(left);
}
if (found->length == 0)
_RemoveExtent(found);
Insert(node);
_CombineFreeExtent(node);
}
return B_OK;
}
status_t
CachedExtentTree::AddExtent(CachedExtent* extent)
{
if (extent->IsAllocated() == true)
return _AddAllocatedExtent(extent);
return _AddFreeExtent(extent);
}
void
CachedExtentTree::_CombineFreeExtent(CachedExtent* node)
{
// node should be inserted first to call this function,
// otherwise it will overdelete.
if (node->IsAllocated() == true)
return;
CachedExtent* other = Next(node);
if (other != NULL) {
if (node->End() == other->offset && node->flags == other->flags) {
node->length += other->length;
_RemoveExtent(other);
}
}
other = Previous(node);
if (other != NULL) {
if (other->End() == node->offset && node->flags == other->flags) {
other->length += node->length;
_RemoveExtent(node);
}
}
}
void
CachedExtentTree::_DumpInOrder(CachedExtent* node) const
{
if (node == NULL)
return;
_DumpInOrder(_GetValue(node->left));
node->Info();
_DumpInOrder(_GetValue(node->right));
}
void
CachedExtentTree::DumpInOrder() const
{
TRACE("\n");
_DumpInOrder(RootNode());
TRACE("\n");
}
void
CachedExtentTree::Delete()
{
_Delete(RootNode());
Clear();
}
void
CachedExtentTree::_Delete(CachedExtent* node)
{
if (node == NULL)
return;
_Delete(_GetValue(node->left));
_Delete(_GetValue(node->right));
node->Delete();
}
// BlockGroup
BlockGroup::BlockGroup(BTree* extentTree)
:
fItem(NULL)
{
fCurrentExtentTree = new(std::nothrow) BTree(extentTree->SystemVolume(),
extentTree->RootBlock());
}
BlockGroup::~BlockGroup()
{
if (fItem != NULL) {
free(fItem);
fItem = NULL;
}
delete fCurrentExtentTree;
fCurrentExtentTree = NULL;
}
status_t
BlockGroup::SetExtentTree(off_t rootAddress)
{
status_t status = fCurrentExtentTree->SetRoot(rootAddress, NULL);
if (status != B_OK)
return status;
if (fItem != NULL) {
// re-allocate BlockGroup;
uint64 flags = Flags();
return Initialize(flags);
}
return B_OK;
}
/* Initialize BlockGroup that has flag
*/
status_t
BlockGroup::Initialize(uint64 flag)
{
if (fCurrentExtentTree == NULL)
return B_NO_MEMORY;
if (fItem != NULL)
free(fItem);
TRACE("BlockGroup::Initialize() find block group has flag: %"
B_PRIu64 "\n", flag);
BTree::Path path(fCurrentExtentTree);
fKey.SetObjectID(0);
// find first item in extent to get the ObjectID
// ignore type because block_group is not always the first item
fKey.SetType(BTRFS_KEY_TYPE_ANY);
fCurrentExtentTree->FindNext(&path, fKey, NULL);
fKey.SetType(BTRFS_KEY_TYPE_BLOCKGROUP_ITEM);
fKey.SetOffset(0);
status_t status;
while(true) {
status = fCurrentExtentTree->FindNext(&path, fKey, (void**)&fItem);
if ((Flags() & flag) == flag || status != B_OK)
break;
fKey.SetObjectID(End());
fKey.SetOffset(0);
}
if (status != B_OK)
ERROR("BlockGroup::Initialize() cannot find block group\n");
return status;
}
status_t
BlockGroup::LoadExtent(CachedExtentTree* tree, bool inverse)
{
if (fItem == NULL)
return B_NO_INIT;
uint64 flags = (Flags() & BTRFS_BLOCKGROUP_FLAG_DATA) != 0 ?
BTRFS_EXTENT_FLAG_DATA : BTRFS_EXTENT_FLAG_TREE_BLOCK;
if (inverse == false)
flags |= BTRFS_EXTENT_FLAG_ALLOCATED;
uint64 start = Start();
CachedExtent* insert;
void* data;
uint64 extentSize = fCurrentExtentTree->SystemVolume()->BlockSize();
btrfs_key key;
key.SetType(BTRFS_KEY_TYPE_METADATA_ITEM);
if ((flags & BTRFS_EXTENT_FLAG_DATA) != 0)
key.SetType(BTRFS_KEY_TYPE_EXTENT_ITEM);
key.SetObjectID(start);
key.SetOffset(0);
TreeIterator iterator(fCurrentExtentTree, key);
status_t status;
while(true) {
status = iterator.GetNextEntry(&data);
key = iterator.Key();
if (status != B_OK) {
if (key.ObjectID() != Start())
break;
// When we couldn't find the item and key has
// objectid == BlockGroup's objectID, because
// key's type < BLOCKGROUP_ITEM
continue;
}
if (inverse == false) {
if ((flags & BTRFS_EXTENT_FLAG_DATA) != 0)
extentSize = key.Offset();
insert = CachedExtent::Create(key.ObjectID(), extentSize, flags);
insert->item = (btrfs_extent*)data;
} else {
extentSize = key.ObjectID() - start;
insert = CachedExtent::Create(start, extentSize, flags);
free(data); // free extent doesn't have data;
}
_InsertExtent(tree, insert);
start = key.ObjectID() + extentSize;
}
if (inverse == true)
_InsertExtent(tree, start, End() - start, flags);
return B_OK;
}
status_t
BlockGroup::_InsertExtent(CachedExtentTree* tree, uint64 start, uint64 length,
uint64 flags)
{
CachedExtent* extent = CachedExtent::Create(start, length, flags);
return _InsertExtent(tree, extent);
}
status_t
BlockGroup::_InsertExtent(CachedExtentTree* tree, CachedExtent* extent)
{
if (extent->length == 0)
return B_BAD_VALUE;
status_t status = tree->AddExtent(extent);
if (status != B_OK) {
return status;
}
return B_OK;
}
// ExtentAllocator
ExtentAllocator::ExtentAllocator(Volume* volume)
:
fVolume(volume),
fTree(NULL),
fStart((uint64)-1),
fEnd(0)
{
fTree = new(std::nothrow) CachedExtentTree(TreeDefinition());
}
ExtentAllocator::~ExtentAllocator()
{
delete fTree;
fTree = NULL;
}
status_t
ExtentAllocator::_LoadExtentTree(uint64 flags)
{
TRACE("ExtentAllocator::_LoadExtentTree() flags: %" B_PRIu64 "\n", flags);
BlockGroup blockGroup(fVolume->ExtentTree());
status_t status = blockGroup.Initialize(flags);
if (status != B_OK)
return status;
for (int i = 0; i < BTRFS_NUM_ROOT_BACKUPS; i++) {
uint64 extentRootAddr =
fVolume->SuperBlock().backup_roots[i].ExtentRoot();
if (extentRootAddr == 0)
continue; // new device has 0 root address
status = blockGroup.SetExtentTree(extentRootAddr);
if (status != B_OK)
return status;
status = blockGroup.LoadExtent(fTree, false);
if (status != B_OK)
return status;
}
if (fTree->IsEmpty()) // 4 backup roots is 0
return B_OK;
uint64 lowerBound = blockGroup.Start();
uint64 upperBound = blockGroup.End();
status = fTree->FillFreeExtents(lowerBound, upperBound);
if (status != B_OK) {
ERROR("ExtentAllocator::_LoadExtentTree() could not fill free extents"
"start %" B_PRIu64 " end %" B_PRIu64 "\n", lowerBound,
upperBound);
return status;
}
if (fStart > lowerBound)
fStart = lowerBound;
if (fEnd < upperBound)
fEnd = upperBound;
return B_OK;
}
status_t
ExtentAllocator::Initialize()
{
TRACE("ExtentAllocator::Initialize()\n");
if (fTree == NULL)
return B_NO_MEMORY;
status_t status = _LoadExtentTree(BTRFS_BLOCKGROUP_FLAG_DATA);
if (status != B_OK) {
ERROR("ExtentAllocator:: could not load exent tree (data)\n");
return status;
}
status = _LoadExtentTree(BTRFS_BLOCKGROUP_FLAG_SYSTEM);
if (status != B_OK) {
ERROR("ExtentAllocator:: could not load exent tree (system)\n");
return status;
}
status = _LoadExtentTree(BTRFS_BLOCKGROUP_FLAG_METADATA);
if (status != B_OK) {
ERROR("ExtentAllocator:: could not load exent tree (metadata)\n");
return status;
}
fTree->DumpInOrder();
return B_OK;
}
/* Allocate extent that
* startwith or after "start" and has size >= "size"
* For now the allocating strategy is "first fit"
*/
status_t
ExtentAllocator::_Allocate(uint64& found, uint64 start, uint64 size,
uint64 type)
{
TRACE("ExtentAllocator::_Allocate() start %" B_PRIu64 " size %" B_PRIu64 " type %"
B_PRIu64 "\n", start, size, type);
CachedExtent* chosen;
status_t status;
while(true) {
status = fTree->FindNext(&chosen, start, size, type);
if (status != B_OK)
return status;
if (TreeDefinition().Compare(start, chosen) == 0) {
if (chosen->End() - start >= size) {
// if covered and have enough space for allocating
break;
}
start = chosen->End(); // set new start and retry
} else
start = chosen->offset;
}
TRACE("ExtentAllocator::_Allocate() found %" B_PRIu64 "\n", start);
chosen = CachedExtent::Create(start, size,
type | BTRFS_EXTENT_FLAG_ALLOCATED);
status = fTree->AddExtent(chosen);
if (status != B_OK)
return status;
found = start;
return B_OK;
}
// Allocate block for metadata
// flags is BLOCKGROUP's flags
status_t
ExtentAllocator::AllocateTreeBlock(uint64& found, uint64 start, uint64 flags)
{
// TODO: implement more features here with flags, e.g DUP, RAID, etc
BlockGroup blockGroup(fVolume->ExtentTree());
status_t status = blockGroup.Initialize(flags);
if (status != B_OK)
return status;
if (start == (uint64)-1)
start = blockGroup.Start();
// normalize inputs
uint64 remainder = start % fVolume->BlockSize();
if (remainder != 0)
start += fVolume->BlockSize() - remainder;
status = _Allocate(found, start, fVolume->BlockSize(),
BTRFS_EXTENT_FLAG_TREE_BLOCK);
if (status != B_OK)
return status;
// check here because tree block locate in 2 blockgroups (system and
// metadata), and there might be a case one can get over the limit.
if (found >= blockGroup.End())
return B_BAD_DATA;
return B_OK;
}
// Allocate block for file data
status_t
ExtentAllocator::AllocateDataBlock(uint64& found, uint64 size, uint64 start,
uint64 flags)
{
// TODO: implement more features here with flags, e.g DUP, RAID, etc
if (start == (uint64)-1) {
BlockGroup blockGroup(fVolume->ExtentTree());
status_t status = blockGroup.Initialize(flags);
if (status != B_OK)
return status;
start = blockGroup.Start();
}
// normalize inputs
uint64 remainder = start % fVolume->SectorSize();
if (remainder != 0)
start += fVolume->SectorSize() - remainder;
size = size / fVolume->SectorSize() * fVolume->SectorSize();
return _Allocate(found, start, size, BTRFS_EXTENT_FLAG_DATA);
}

View File

@ -0,0 +1,153 @@
#ifndef EXTENT_ALLOCATOR_H
#define EXTENT_ALLOCATOR_H
#include "Volume.h"
#include "BTree.h"
//#define TRACE_BTRFS
#ifdef TRACE_BTRFS
# define TRACE(x...) dprintf("\33[34mbtrfs:\33[0m " x)
#else
# define TRACE(x...) ;
#endif
#define ERROR(x...) dprintf("\33[34mbtrfs:\33[0m " x)
struct CachedExtent : AVLTreeNode {
uint64 offset;
uint64 length;
int refCount;
uint64 flags;
btrfs_extent* item;
static CachedExtent* Create(uint64 offset, uint64 length,
uint64 flags);
void Delete();
uint64 End() const { return offset + length; }
bool IsAllocated() const;
bool IsData() const;
void Info() const;
private:
CachedExtent() {}
CachedExtent(const CachedExtent& other);
};
struct TreeDefinition {
typedef uint64 Key;
typedef CachedExtent Value;
AVLTreeNode* GetAVLTreeNode(Value* value) const
{
return value;
}
Value* GetValue(AVLTreeNode* node) const
{
return static_cast<Value*>(node);
}
int Compare(const Key& a, const Value* b) const
{
if (a < b->offset)
return -1;
if (a >= b->End())
return 1;
return 0;
}
int Compare(const Value* a, const Value* b) const
{
int comp = Compare(a->offset, b);
//TODO: check more conditions here if necessary
return comp;
}
};
struct CachedExtentTree : AVLTree<TreeDefinition> {
public:
CachedExtentTree(
const TreeDefinition& definition);
~CachedExtentTree();
status_t FindNext(CachedExtent** chosen, uint64 offset,
uint64 size, uint64 type);
status_t AddExtent(CachedExtent* extent);
status_t FillFreeExtents(uint64 lowerBound,
uint64 upperBound);
void DumpInOrder() const;
void Delete();
private:
status_t _AddAllocatedExtent(CachedExtent* node);
status_t _AddFreeExtent(CachedExtent* node);
void _CombineFreeExtent(CachedExtent* node);
void _RemoveExtent(CachedExtent* node);
void _DumpInOrder(CachedExtent* node) const;
void _Delete(CachedExtent* node);
};
class BlockGroup {
public:
BlockGroup(BTree* extentTree);
~BlockGroup();
status_t SetExtentTree(off_t rootAddress);
status_t Initialize(uint64 flag);
status_t LoadExtent(CachedExtentTree* tree,
bool inverse = false);
uint64 Flags() const { return fItem->Flags(); }
uint64 Start() const { return fKey.ObjectID(); }
uint64 End() const
{ return fKey.ObjectID() + fKey.Offset(); }
private:
BlockGroup(const BlockGroup&);
BlockGroup& operator=(const BlockGroup&);
status_t _InsertExtent(CachedExtentTree* tree,
uint64 size, uint64 length, uint64 flags);
status_t _InsertExtent(CachedExtentTree* tree,
CachedExtent* extent);
private:
btrfs_key fKey;
btrfs_block_group* fItem;
BTree* fCurrentExtentTree;
};
class ExtentAllocator {
public:
ExtentAllocator(Volume* volume);
~ExtentAllocator();
status_t Initialize();
status_t AllocateTreeBlock(uint64& found,
uint64 start = (uint64)-1,
uint64 flags = BTRFS_BLOCKGROUP_FLAG_METADATA);
status_t AllocateDataBlock(uint64& found, uint64 size,
uint64 start = (uint64)-1,
uint64 flags = BTRFS_BLOCKGROUP_FLAG_DATA);
private:
ExtentAllocator(const ExtentAllocator&);
ExtentAllocator& operator=(const ExtentAllocator&);
status_t _LoadExtentTree(uint64 flags);
status_t _Allocate(uint64& found, uint64 start,
uint64 size, uint64 type);
private:
Volume* fVolume;
CachedExtentTree* fTree;
uint64 fStart;
uint64 fEnd;
};
#endif // EXTENT_ALLOCATOR_H

View File

@ -16,6 +16,7 @@ KernelAddon btrfs :
Chunk.cpp
CRCTable.cpp
DirectoryIterator.cpp
ExtentAllocator.cpp
Inode.cpp
Volume.cpp
: kernel_libz.a

View File

@ -13,6 +13,7 @@
#include "CachedBlock.h"
#include "Chunk.h"
#include "Inode.h"
#include "ExtentAllocator.h"
//#define TRACE_BTRFS
@ -386,6 +387,16 @@ Volume::Mount(const char* deviceName, uint32 flags)
fChecksumTree->SetRoot(root->LogicalAddress(), NULL);
free(root);
// Initialize ExtentAllocator;
fExtentAllocator = new(std::nothrow) ExtentAllocator(this);
if (fExtentAllocator == NULL)
return B_NO_MEMORY;
status = fExtentAllocator->Initialize();
if (status != B_OK) {
ERROR("could not initalize extent allocator!\n");
return status;
}
// ready
status = get_vnode(fFSVolume, BTRFS_FIRST_SUBVOLUME,
(void**)&fRootNode);
@ -432,10 +443,12 @@ Volume::Unmount()
delete fChecksumTree;
delete fFSTree;
delete fDevTree;
delete fExtentAllocator;
fExtentTree = NULL;
fChecksumTree = NULL;
fFSTree = NULL;
fDevTree = NULL;
fExtentAllocator = NULL;
TRACE("Volume::Unmount(): Putting root node\n");
put_vnode(fFSVolume, RootNode()->ID());

View File

@ -17,6 +17,7 @@ enum volume_flags {
class BTree;
class Chunk;
class Inode;
class ExtentAllocator;
class Volume {
@ -45,6 +46,7 @@ public:
uint32 SectorSize() const { return fSectorSize; }
uint32 BlockSize() const { return fBlockSize; }
Chunk* SystemChunk() const { return fChunk; }
ExtentAllocator* GetAllocator() const { return fExtentAllocator; }
btrfs_super_block& SuperBlock() { return fSuperBlock; }
@ -72,6 +74,7 @@ private:
void* fBlockCache;
Inode* fRootNode;
ExtentAllocator* fExtentAllocator;
Chunk* fChunk;
BTree* fChunkTree;
BTree* fRootTree;

View File

@ -449,10 +449,11 @@ struct btrfs_extent_data_ref {
#define BTRFS_EXTENT_DATA_PRE 2
#define BTRFS_EXTENT_FLAG_DATA 1
#define BTRFS_EXTENT_FLAG_TREE_BLOCK 2
#define BTRFS_EXTENT_FLAG_ALLOCATED 4
#define BTRFS_BLOCKGROUP_FLAG_DATA 1
#define BTRFS_BLOCKGROUP_FLAG_SYSTEM 2
#define BTRFS_BLOCKGROUP_FLAG_METADA 4
#define BTRFS_BLOCKGROUP_FLAG_METADATA 4
#define BTRFS_BLOCKGROUP_FLAG_RAID0 8
#define BTRFS_BLOCKGROUP_FLAG_RAID1 16
#define BTRFS_BLOCKGROUP_FLAG_DUP 32

View File

@ -42,6 +42,7 @@ local btrfsSources =
Chunk.cpp
CRCTable.cpp
DirectoryIterator.cpp
ExtentAllocator.cpp
Inode.cpp
Volume.cpp
kernel_interface.cpp