* When shrinking the stream size after a node had been removed, its

parent node could temporarily get invalid (ie. CheckNode() would 
  fail).
* Since shrinking the stream was done in edge cases only, anyway, we 
  will no longer do that which works around the problem. This fixes
  bug #1716.
* Minor cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23656 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-01-19 23:12:11 +00:00
parent f7a5d9c583
commit 1204649234
2 changed files with 30 additions and 16 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2001-2007, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2001-2008, Axel Dörfler, axeld@pinc-software.de.
* This file may be used under the terms of the MIT License.
*
* Roughly based on 'btlib' written by Marcus J. Ranum - it shares
@ -109,8 +109,8 @@ CachedNode::SetTo(off_t offset, bool check)
if (InternalSetTo(NULL, offset) != NULL && check) {
// sanity checks (links, all_key_count)
if (!fTree->fHeader->CheckNode(fNode)) {
FATAL(("invalid node (%p) read from offset %Ld, inode at %Ld\n",
fNode, offset, fTree->fStream->ID()));
FATAL(("invalid node [%p] read from offset %Ld (block %Ld), inode "
"at %Ld\n", fNode, offset, fBlockNumber, fTree->fStream->ID()));
return NULL;
}
}
@ -139,8 +139,8 @@ CachedNode::SetToWritable(Transaction &transaction, off_t offset, bool check)
if (InternalSetTo(&transaction, offset) != NULL && check) {
// sanity checks (links, all_key_count)
if (!fTree->fHeader->CheckNode(fNode)) {
FATAL(("invalid node [%p] read from offset %Ld, inode at %Ld\n",
fNode, offset, fTree->fStream->ID()));
FATAL(("invalid node [%p] read from offset %Ld (block %Ld), inode "
"at %Ld\n", fNode, offset, fBlockNumber, fTree->fStream->ID()));
return NULL;
}
}
@ -250,6 +250,9 @@ CachedNode::Free(Transaction &transaction, off_t offset)
if (header == NULL)
return B_IO_ERROR;
#if 0
// TODO: temporarily disabled because CheckNode() doesn't like this...
// Also, it's such an edge case that it's almost useless, anyway.
// if the node is the last one in the tree, we shrink
// the tree and file size by one node
off_t lastOffset = header->MaximumSize() - fTree->fNodeSize;
@ -261,6 +264,7 @@ CachedNode::Free(Transaction &transaction, off_t offset)
header->maximum_size = HOST_ENDIAN_TO_BFS_INT64(lastOffset);
return B_OK;
}
#endif
// add the node to the free nodes list
fNode->left_link = header->free_node_pointer;
@ -1670,7 +1674,8 @@ BPlusTree::Remove(Transaction &transaction, const uint8 *key, uint16 keyLength,
#endif
if (node->IsLeaf()) {
// first round, check for duplicate entries
status_t status = _FindKey(node, key, keyLength, &nodeAndKey.keyIndex);
status_t status = _FindKey(node, key, keyLength,
&nodeAndKey.keyIndex);
if (status < B_OK)
RETURN_ERROR(status);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2001-2007, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2001-2008, Axel Dörfler, axeld@pinc-software.de.
* This file may be used under the terms of the MIT License.
*/
#ifndef B_PLUS_TREE_H
@ -30,8 +30,10 @@ struct bplustree_header {
uint32 Magic() const { return BFS_ENDIAN_TO_HOST_INT32(magic); }
uint32 NodeSize() const { return BFS_ENDIAN_TO_HOST_INT32(node_size); }
uint32 DataType() const { return BFS_ENDIAN_TO_HOST_INT32(data_type); }
off_t RootNode() const { return BFS_ENDIAN_TO_HOST_INT64(root_node_pointer); }
off_t FreeNode() const { return BFS_ENDIAN_TO_HOST_INT64(free_node_pointer); }
off_t RootNode() const
{ return BFS_ENDIAN_TO_HOST_INT64(root_node_pointer); }
off_t FreeNode() const
{ return BFS_ENDIAN_TO_HOST_INT64(free_node_pointer); }
off_t MaximumSize() const { return BFS_ENDIAN_TO_HOST_INT64(maximum_size); }
uint32 MaxNumberOfLevels() const
{ return BFS_ENDIAN_TO_HOST_INT32(max_number_of_levels); }
@ -67,9 +69,11 @@ struct bplustree_node {
off_t LeftLink() const { return BFS_ENDIAN_TO_HOST_INT64(left_link); }
off_t RightLink() const { return BFS_ENDIAN_TO_HOST_INT64(right_link); }
off_t OverflowLink() const { return BFS_ENDIAN_TO_HOST_INT64(overflow_link); }
off_t OverflowLink() const
{ return BFS_ENDIAN_TO_HOST_INT64(overflow_link); }
uint16 NumKeys() const { return BFS_ENDIAN_TO_HOST_INT16(all_key_count); }
uint16 AllKeyLength() const { return BFS_ENDIAN_TO_HOST_INT16(all_key_length); }
uint16 AllKeyLength() const
{ return BFS_ENDIAN_TO_HOST_INT16(all_key_length); }
inline uint16 *KeyLengths() const;
inline off_t *Values() const;
@ -87,7 +91,8 @@ struct bplustree_node {
inline duplicate_array *DuplicateArray() const;
static inline uint8 LinkType(off_t link);
static inline off_t MakeLink(uint8 type, off_t link, uint32 fragmentIndex = 0);
static inline off_t MakeLink(uint8 type, off_t link,
uint32 fragmentIndex = 0);
static inline bool IsDuplicate(off_t link);
static inline off_t FragmentOffset(off_t link);
static inline uint32 FragmentIndex(off_t link);
@ -403,14 +408,16 @@ inline status_t
TreeIterator::GetNextEntry(void *key, uint16 *keyLength, uint16 maxLength,
off_t *value, uint16 *duplicate)
{
return Traverse(BPLUSTREE_FORWARD, key, keyLength, maxLength, value, duplicate);
return Traverse(BPLUSTREE_FORWARD, key, keyLength, maxLength, value,
duplicate);
}
inline status_t
TreeIterator::GetPreviousEntry(void *key, uint16 *keyLength, uint16 maxLength,
off_t *value, uint16 *duplicate)
{
return Traverse(BPLUSTREE_BACKWARD, key, keyLength, maxLength, value, duplicate);
return Traverse(BPLUSTREE_BACKWARD, key, keyLength, maxLength, value,
duplicate);
}
@ -424,14 +431,16 @@ bplustree_header::CheckNode(bplustree_node *node) const
return IsValidLink(node->LeftLink())
&& IsValidLink(node->RightLink())
&& IsValidLink(node->OverflowLink())
&& (int8 *)node->Values() + node->NumKeys() * sizeof(off_t) <= (int8 *)node + NodeSize();
&& (int8 *)node->Values() + node->NumKeys() * sizeof(off_t)
<= (int8 *)node + NodeSize();
}
inline bool
bplustree_header::IsValidLink(off_t link) const
{
return link == BPLUSTREE_NULL || (link > 0 && link <= MaximumSize() - NodeSize());
return link == BPLUSTREE_NULL
|| (link > 0 && link <= MaximumSize() - NodeSize());
}