Fixed two crucial bugs:
* When aborting the first sub-transaction, BFS didn't recognize this correctly, and aborted it via cache_abort_transaction() instead of cache_abort_sub_transaction() - that not only reversed the whole transaction, but also let subsequent cache_start_sub_transaction() fail (as there was no transaction to continue anymore). Reported in a reproducible way by Jerome Duval. * Haiku didn't want to replay logs anymore - the off-by-one fix I did in r14428 was not complete, it only let Be's BFS replay our logs, but our's rejected them afterwards. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16018 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
4af3d066e1
commit
923e3aaf6f
|
@ -300,7 +300,8 @@ Journal::Journal(Volume *volume)
|
|||
fLogSize(volume->Log().length),
|
||||
fMaxTransactionSize(fLogSize / 4 - 5),
|
||||
fUsed(0),
|
||||
fUnwrittenTransactions(0)
|
||||
fUnwrittenTransactions(0),
|
||||
fHasSubtransaction(false)
|
||||
{
|
||||
if (fMaxTransactionSize > fLogSize / 2)
|
||||
fMaxTransactionSize = fLogSize / 2 - 5;
|
||||
|
@ -330,10 +331,13 @@ Journal::InitCheck()
|
|||
status_t
|
||||
Journal::_CheckRunArray(const run_array *array)
|
||||
{
|
||||
int32 maxRuns = run_array::MaxRuns(fVolume->BlockSize());
|
||||
int32 maxRuns = run_array::MaxRuns(fVolume->BlockSize()) - 1;
|
||||
// the -1 works around an off-by-one bug in Be's BFS implementation,
|
||||
// same as in run_array::MaxRuns()
|
||||
if (array->MaxRuns() != maxRuns
|
||||
|| array->CountRuns() > maxRuns
|
||||
|| array->CountRuns() <= 0) {
|
||||
dprintf("run count: %ld, array max: %ld, max runs: %ld\n", array->CountRuns(), array->MaxRuns(), maxRuns);
|
||||
FATAL(("Log entry has broken header!\n"));
|
||||
return B_ERROR;
|
||||
}
|
||||
|
@ -507,6 +511,7 @@ Journal::_WriteTransactionToLog()
|
|||
// changed blocks back to disk immediately
|
||||
|
||||
fUnwrittenTransactions = 0;
|
||||
fHasSubtransaction = false;
|
||||
|
||||
int32 blockShift = fVolume->BlockShift();
|
||||
off_t logOffset = fVolume->ToBlock(fVolume->Log()) << blockShift;
|
||||
|
@ -721,6 +726,7 @@ Journal::Lock(Transaction *owner)
|
|||
if (fUnwrittenTransactions > 0) {
|
||||
// start a sub transaction
|
||||
cache_start_sub_transaction(fVolume->BlockCache(), fTransactionID);
|
||||
fHasSubtransaction = true;
|
||||
} else
|
||||
fTransactionID = cache_start_transaction(fVolume->BlockCache());
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ class Journal {
|
|||
inline uint32 FreeLogBlocks() const;
|
||||
|
||||
private:
|
||||
bool _HasSubTransaction() { return fUnwrittenTransactions > 1; }
|
||||
bool _HasSubTransaction() { return fHasSubtransaction; }
|
||||
uint32 _TransactionSize() const;
|
||||
status_t _WriteTransactionToLog();
|
||||
status_t _CheckRunArray(const run_array *array);
|
||||
|
@ -77,6 +77,7 @@ class Journal {
|
|||
LogEntryList fEntries;
|
||||
bigtime_t fTimestamp;
|
||||
int32 fTransactionID;
|
||||
bool fHasSubtransaction;
|
||||
};
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue