bonefish + mmlr + axeld:

* Fixed a bug that could easily corrupt your disks (yeah, one of those
  again): bfs_get_file_map() truncated the last vec incorrectly and
  would potentially return a too large file size -- which was later
  happily overwritten with file data, but could have belonged to
  anything but that file (like inodes, B+trees, etc.).
* Renamed previous round_up() function to key_align().
* Added round_up() template function, and used it where appropriate.
* The latter actually fixed two bugs where the and mask was computed in
  32 bit where it should have been in 64 bit.
* Inode::FindBlockRun() should have checked the max indirect size
  instead of the direct size - this didn't cause any problems, though.
* White space cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26531 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-07-20 23:59:32 +00:00
parent a0a18e64f0
commit 9083840f34
6 changed files with 37 additions and 30 deletions

View File

@ -1036,7 +1036,7 @@ BPlusTree::_SplitNode(bplustree_node *node, off_t nodeOffset,
}
out++;
if (round_up(sizeof(bplustree_node) + bytesBefore + bytesAfter + bytes)
if (key_align(sizeof(bplustree_node) + bytesBefore + bytesAfter + bytes)
+ out * (sizeof(uint16) + sizeof(off_t)) >= size) {
// we have found the number of keys in the new node!
break;
@ -1303,7 +1303,7 @@ BPlusTree::Insert(Transaction &transaction, const uint8 *key, uint16 keyLength,
return B_IO_ERROR;
// is the node big enough to hold the pair?
if (int32(round_up(sizeof(bplustree_node)
if (int32(key_align(sizeof(bplustree_node)
+ writableNode->AllKeyLength() + keyLength)
+ (writableNode->NumKeys() + 1) * (sizeof(uint16)
+ sizeof(off_t))) < fNodeSize) {

View File

@ -115,7 +115,7 @@ struct bplustree_node {
enum bplustree_traversing {
BPLUSTREE_FORWARD = 1,
BPLUSTREE_BACKWARD = -1,
BPLUSTREE_BEGIN = 0,
BPLUSTREE_END = 1
};
@ -239,7 +239,7 @@ class BPlusTree {
status_t _SeekDown(Stack<node_and_key> &stack, const uint8 *key,
uint16 keyLength);
status_t _FindFreeDuplicateFragment(Transaction &transaction,
status_t _FindFreeDuplicateFragment(Transaction &transaction,
const bplustree_node *node, CachedNode &cached,
off_t *_offset, bplustree_node **_fragment,
uint32 *_index);
@ -424,7 +424,7 @@ TreeIterator::GetPreviousEntry(void *key, uint16 *keyLength, uint16 maxLength,
// #pragma mark - bplustree_header inline functions
inline bool
inline bool
bplustree_header::CheckNode(bplustree_node *node) const
{
// sanity checks (links, all_key_count)
@ -450,7 +450,7 @@ bplustree_header::IsValidLink(off_t link) const
inline uint16 *
bplustree_node::KeyLengths() const
{
return (uint16 *)(((char *)this) + round_up(sizeof(bplustree_node)
return (uint16 *)(((char *)this) + key_align(sizeof(bplustree_node)
+ AllKeyLength()));
}
@ -472,12 +472,12 @@ bplustree_node::Keys() const
inline int32
bplustree_node::Used() const
{
return round_up(sizeof(bplustree_node) + AllKeyLength()) + NumKeys()
return key_align(sizeof(bplustree_node) + AllKeyLength()) + NumKeys()
* (sizeof(uint16) + sizeof(off_t));
}
inline bool
inline bool
bplustree_node::IsLeaf() const
{
return OverflowLink() == BPLUSTREE_NULL;
@ -514,7 +514,7 @@ bplustree_node::MakeLink(uint8 type, off_t link, uint32 fragmentIndex)
}
inline bool
inline bool
bplustree_node::IsDuplicate(off_t link)
{
return (LinkType(link)

View File

@ -1280,7 +1280,7 @@ Inode::FindBlockRun(off_t pos, block_run &run, off_t &offset)
// find matching block run
if (data->MaxDirectRange() > 0 && pos >= data->MaxDirectRange()) {
if (data->MaxIndirectRange() > 0 && pos >= data->MaxDirectRange()) {
if (data->MaxDoubleIndirectRange() > 0
&& pos >= data->MaxIndirectRange()) {
// access to double indirect blocks
@ -1581,7 +1581,7 @@ Inode::_GrowStream(Transaction &transaction, off_t size)
blocksRequested = blocksNeeded;
if (minimum > 1) {
// make sure that "blocks" is a multiple of minimum
blocksRequested = (blocksRequested + minimum - 1) & ~(minimum - 1);
blocksRequested = round_up(blocksRequested, minimum);
}
// Direct block range
@ -1694,8 +1694,7 @@ Inode::_GrowStream(Transaction &transaction, off_t size)
return status;
blocksNeeded += rest;
blocksRequested = (blocksNeeded + NUM_ARRAY_BLOCKS - 1)
& ~(NUM_ARRAY_BLOCKS - 1);
blocksRequested = round_up(blocksNeeded, NUM_ARRAY_BLOCKS);
minimum = NUM_ARRAY_BLOCKS;
// we make sure here that we have at minimum
// NUM_ARRAY_BLOCKS allocated, so if the allocation
@ -2042,8 +2041,7 @@ Inode::NeedsTrimming()
if (IsIndex() || IsDeleted())
return false;
off_t roundedSize = (Size() + fVolume->BlockSize() - 1)
& ~(fVolume->BlockSize() - 1);
off_t roundedSize = round_up(Size(), fVolume->BlockSize());
return Node().data.MaxDirectRange() > roundedSize
|| Node().data.MaxIndirectRange() > roundedSize

View File

@ -33,4 +33,14 @@ sorted_array::Find(off_t value) const
return _FindInternal(value, i) ? i : -1;
}
/*! \a to must be a power of 2.
*/
template<typename IntType, typename RoundType>
inline IntType
round_up(const IntType& value, const RoundType& to)
{
return (value + (to - 1)) & ~((IntType)to - 1);
}
#endif /* UTILITY_H */

View File

@ -160,8 +160,8 @@ struct small_data {
// the file name is part of the small_data structure
#define FILE_NAME_TYPE 'CSTR'
#define FILE_NAME_NAME 0x13
#define FILE_NAME_NAME_LENGTH 1
#define FILE_NAME_NAME 0x13
#define FILE_NAME_NAME_LENGTH 1
//**************************************
@ -193,7 +193,7 @@ struct bfs_inode {
int32 pad[4];
small_data small_data_start[0];
int32 Magic1() const { return BFS_ENDIAN_TO_HOST_INT32(magic1); }
int32 UserID() const { return BFS_ENDIAN_TO_HOST_INT32(uid); }
int32 GroupID() const { return BFS_ENDIAN_TO_HOST_INT32(gid); }
@ -207,7 +207,7 @@ struct bfs_inode {
status_t InitCheck(Volume *volume);
// defined in Inode.cpp
} _PACKED;
} _PACKED;
#define INODE_MAGIC1 0x3bbe0ad9
#define INODE_TIME_SHIFT 16
@ -267,8 +267,8 @@ get_shift(uint64 i)
return c;
}
inline int32
round_up(uint32 data)
inline uint32
key_align(uint32 data)
{
// rounds up to the next off_t boundary
return (data + sizeof(off_t) - 1) & ~(sizeof(off_t) - 1);
@ -304,7 +304,7 @@ block_run::IsZero() const
}
inline bool
inline bool
block_run::MergeableWith(block_run run) const
{
// 65535 is the maximum allowed run size for BFS
@ -352,7 +352,7 @@ small_data::Data() const
}
inline uint32
inline uint32
small_data::Size() const
{
return sizeof(small_data) + NameSize() + 3 + DataSize() + 1;

View File

@ -431,7 +431,7 @@ bfs_get_file_map(fs_volume *_volume, fs_vnode *_node, off_t offset, size_t size,
Inode *inode = (Inode *)_node->private_node;
int32 blockShift = volume->BlockShift();
size_t index = 0, max = *_count;
uint32 index = 0, max = *_count;
block_run run;
off_t fileOffset;
@ -445,21 +445,20 @@ bfs_get_file_map(fs_volume *_volume, fs_vnode *_node, off_t offset, size_t size,
vecs[index].offset = volume->ToOffset(run) + offset - fileOffset;
vecs[index].length = (run.Length() << blockShift) - offset + fileOffset;
offset += vecs[index].length;
// are we already done?
if (size <= vecs[index].length
|| offset >= inode->Size()) {
if (offset > inode->Size()) {
|| offset + vecs[index].length >= inode->Size()) {
if (offset + vecs[index].length > inode->Size()) {
// make sure the extent ends with the last official file
// block (without taking any preallocations into account)
vecs[index].length = (inode->Size() - fileOffset
+ volume->BlockSize() - 1) & ~(volume->BlockSize() - 1);
vecs[index].length = round_up(inode->Size() - offset,
volume->BlockSize());
}
*_count = index + 1;
return B_OK;
}
offset += vecs[index].length;
size -= vecs[index].length;
index++;