axeld + bonefish: X86VMTranslationMap::Protect():
* Removed rounding up the end address to page alignment. It's not necessary and could cause an overflow. * Fixed possible infinite loop triggered by a rare race condition: When two threads of a team were accessing the same unmapped page at the same time each would trigger a page fault. One thread would map the page again, the second would wait until the first one was done and update the page protection (unnecessarily but harmlessly). If the first thread accessed the page again at an unfortunate time, it would implicitly change the accessed/dirty flags of the page's PTE, which was a situation the loop in Protect() didn't consider and thus run forever. Seen the problem twice today in form of an app server freeze. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36197 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
ac8b5878d7
commit
50e4dd9328
@ -976,7 +976,6 @@ X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes)
|
||||
page_directory_entry *pd = fArchData->pgdir_virt;
|
||||
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
end = ROUNDUP(end, B_PAGE_SIZE);
|
||||
|
||||
TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
|
||||
attributes);
|
||||
@ -1022,12 +1021,15 @@ restart:
|
||||
// set the new protection flags -- we want to do that atomically,
|
||||
// without changing the accessed or dirty flag
|
||||
page_table_entry oldEntry;
|
||||
do {
|
||||
while (true) {
|
||||
oldEntry = test_and_set_page_table_entry(&pt[index],
|
||||
(entry & ~(X86_PTE_WRITABLE | X86_PTE_USER))
|
||||
| newProtectionFlags,
|
||||
entry);
|
||||
} while (oldEntry != entry);
|
||||
if (oldEntry == entry)
|
||||
break;
|
||||
entry = oldEntry;
|
||||
}
|
||||
|
||||
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
|
Loading…
x
Reference in New Issue
Block a user