Fixed two crucial bugs:

* When aborting the first sub-transaction, BFS didn't recognize this
  correctly, and aborted it via cache_abort_transaction() instead of
  cache_abort_sub_transaction() - that not only reversed the whole
  transaction, but also let subsequent cache_start_sub_transaction()
  fail (as there was no transaction to continue anymore). Reported
  in a reproducible way by Jerome Duval.
* Haiku didn't want to replay logs anymore - the off-by-one fix I did
  in r14428 was not complete, it only let Be's BFS replay our logs,
  but our's rejected them afterwards.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16018 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-01-21 13:35:08 +00:00
parent 4af3d066e1
commit 923e3aaf6f
2 changed files with 10 additions and 3 deletions

View File

@ -300,7 +300,8 @@ Journal::Journal(Volume *volume)
fLogSize(volume->Log().length), fLogSize(volume->Log().length),
fMaxTransactionSize(fLogSize / 4 - 5), fMaxTransactionSize(fLogSize / 4 - 5),
fUsed(0), fUsed(0),
fUnwrittenTransactions(0) fUnwrittenTransactions(0),
fHasSubtransaction(false)
{ {
if (fMaxTransactionSize > fLogSize / 2) if (fMaxTransactionSize > fLogSize / 2)
fMaxTransactionSize = fLogSize / 2 - 5; fMaxTransactionSize = fLogSize / 2 - 5;
@ -330,10 +331,13 @@ Journal::InitCheck()
status_t status_t
Journal::_CheckRunArray(const run_array *array) Journal::_CheckRunArray(const run_array *array)
{ {
int32 maxRuns = run_array::MaxRuns(fVolume->BlockSize()); int32 maxRuns = run_array::MaxRuns(fVolume->BlockSize()) - 1;
// the -1 works around an off-by-one bug in Be's BFS implementation,
// same as in run_array::MaxRuns()
if (array->MaxRuns() != maxRuns if (array->MaxRuns() != maxRuns
|| array->CountRuns() > maxRuns || array->CountRuns() > maxRuns
|| array->CountRuns() <= 0) { || array->CountRuns() <= 0) {
dprintf("run count: %ld, array max: %ld, max runs: %ld\n", array->CountRuns(), array->MaxRuns(), maxRuns);
FATAL(("Log entry has broken header!\n")); FATAL(("Log entry has broken header!\n"));
return B_ERROR; return B_ERROR;
} }
@ -507,6 +511,7 @@ Journal::_WriteTransactionToLog()
// changed blocks back to disk immediately // changed blocks back to disk immediately
fUnwrittenTransactions = 0; fUnwrittenTransactions = 0;
fHasSubtransaction = false;
int32 blockShift = fVolume->BlockShift(); int32 blockShift = fVolume->BlockShift();
off_t logOffset = fVolume->ToBlock(fVolume->Log()) << blockShift; off_t logOffset = fVolume->ToBlock(fVolume->Log()) << blockShift;
@ -721,6 +726,7 @@ Journal::Lock(Transaction *owner)
if (fUnwrittenTransactions > 0) { if (fUnwrittenTransactions > 0) {
// start a sub transaction // start a sub transaction
cache_start_sub_transaction(fVolume->BlockCache(), fTransactionID); cache_start_sub_transaction(fVolume->BlockCache(), fTransactionID);
fHasSubtransaction = true;
} else } else
fTransactionID = cache_start_transaction(fVolume->BlockCache()); fTransactionID = cache_start_transaction(fVolume->BlockCache());

View File

@ -60,7 +60,7 @@ class Journal {
inline uint32 FreeLogBlocks() const; inline uint32 FreeLogBlocks() const;
private: private:
bool _HasSubTransaction() { return fUnwrittenTransactions > 1; } bool _HasSubTransaction() { return fHasSubtransaction; }
uint32 _TransactionSize() const; uint32 _TransactionSize() const;
status_t _WriteTransactionToLog(); status_t _WriteTransactionToLog();
status_t _CheckRunArray(const run_array *array); status_t _CheckRunArray(const run_array *array);
@ -77,6 +77,7 @@ class Journal {
LogEntryList fEntries; LogEntryList fEntries;
bigtime_t fTimestamp; bigtime_t fTimestamp;
int32 fTransactionID; int32 fTransactionID;
bool fHasSubtransaction;
}; };