Add debug helper functions to mark pages present.

They can be used to mark pages as present/non-present without actually
unmapping them. Marking pages as non-present causes every access to
fault. We can use that for debugging as it allows us to "read protect"
individual kernel pages.
This commit is contained in:
Michael Lotz 2011-12-03 19:36:01 +01:00
parent f990399943
commit 643cf35ee8
6 changed files with 130 additions and 0 deletions

View File

@ -36,6 +36,9 @@ struct VMTranslationMap {
vm_page_reservation* reservation) = 0;
virtual status_t Unmap(addr_t start, addr_t end) = 0;
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent);
// map not locked
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue) = 0;

View File

@ -261,6 +261,61 @@ X86VMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
}
status_t
X86VMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent)
{
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
do {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no page table here, move the start up to access the next page
// table
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
if ((pt[index] & X86_PTE_PRESENT) == 0) {
if (!markPresent)
continue;
X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
X86_PTE_PRESENT);
} else {
if (markPresent)
continue;
page_table_entry oldEntry
= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
X86_PTE_PRESENT);
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could
// have been in any TLB.
InvalidatePage(start);
}
}
}
} while (start != 0 && start < end);
return B_OK;
}
/*! Caller must have locked the cache of the page to be unmapped.
This object shouldn't be locked.
*/

View File

@ -27,6 +27,9 @@ struct X86VMTranslationMap32Bit : X86VMTranslationMap {
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,

View File

@ -301,6 +301,64 @@ X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
}
status_t
X86VMTranslationMapPAE::DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent)
{
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
do {
pae_page_directory_entry* pageDirEntry
= X86PagingMethodPAE::PageDirEntryForAddress(
fPagingStructures->VirtualPageDirs(), start);
if ((*pageDirEntry & X86_PAE_PDE_PRESENT) == 0) {
// no page table here, move the start up to access the next page
// table
start = ROUNDUP(start + 1, kPAEPageTableRange);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
pae_page_table_entry* pageTable
= (pae_page_table_entry*)fPageMapper->GetPageTableAt(
*pageDirEntry & X86_PAE_PDE_ADDRESS_MASK);
uint32 index = start / B_PAGE_SIZE % kPAEPageTableEntryCount;
for (; index < kPAEPageTableEntryCount && start < end;
index++, start += B_PAGE_SIZE) {
if ((pageTable[index] & X86_PAE_PTE_PRESENT) == 0) {
if (!markPresent)
continue;
X86PagingMethodPAE::SetPageTableEntryFlags(
&pageTable[index], X86_PAE_PTE_PRESENT);
} else {
if (markPresent)
continue;
pae_page_table_entry oldEntry
= X86PagingMethodPAE::ClearPageTableEntryFlags(
&pageTable[index], X86_PAE_PTE_PRESENT);
if ((oldEntry & X86_PAE_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could
// have been in any TLB.
InvalidatePage(start);
}
}
}
} while (start != 0 && start < end);
return B_OK;
}
/*! Caller must have locked the cache of the page to be unmapped.
This object shouldn't be locked.
*/

View File

@ -30,6 +30,9 @@ struct X86VMTranslationMapPAE : X86VMTranslationMap {
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,

View File

@ -31,6 +31,14 @@ VMTranslationMap::~VMTranslationMap()
}
status_t
VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent)
{
return B_NOT_SUPPORTED;
}
/*! Unmaps a range of pages of an area.
The default implementation just iterates over all virtual pages of the