Introduce debug page wise kernel area protection functions.

This adds a pair of functions vm_prepare_kernel_area_debug_protection()
and vm_set_kernel_area_debug_protection() to set a kernel area up for
page wise protection and to actually protect individual pages
respectively.

It was already possible to read and write protect full areas via area
protection flags and not mapping any actual pages. For areas that
actually have mapped pages this doesn't work however as no fault, at
which the permissions could be checked, is generated on access.

These new functions use the debug helpers of the translation map to mark
individual pages as non-present without unmapping them. This allows them
to be "protected", i.e. causing a fault on read and write access. As they
aren't actually unmapped they can later be marked present again.

Note that these are debug helpers and have quite a few restrictions as
described in the comment above the function and is only useful for some
very specific and constrained use cases.
This commit is contained in:
Michael Lotz 2011-12-03 19:49:18 +01:00
parent 643cf35ee8
commit 7418dbd908
2 changed files with 127 additions and 12 deletions

View File

@ -86,6 +86,10 @@ area_id transfer_area(area_id id, void** _address, uint32 addressSpec,
const char* vm_cache_type_to_string(int32 type);
status_t vm_prepare_kernel_area_debug_protection(area_id id, void** cookie);
status_t vm_set_kernel_area_debug_protection(void* cookie, void* _address,
size_t size, uint32 protection);
status_t vm_block_address_range(const char* name, void* address, addr_t size);
status_t vm_unreserve_address_range(team_id team, void *address, addr_t size);
status_t vm_reserve_address_range(team_id team, void **_address,

View File

@ -447,6 +447,26 @@ lookup_area(VMAddressSpace* addressSpace, area_id id)
}
static status_t
allocate_area_page_protections(VMArea* area)
{
// In the page protections we store only the three user protections,
// so we use 4 bits per page.
uint32 bytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
area->page_protections = (uint8*)malloc_etc(bytes,
HEAP_DONT_LOCK_KERNEL_SPACE);
if (area->page_protections == NULL)
return B_NO_MEMORY;
// init the page protections for all pages to that of the area
uint32 areaProtection = area->protection
& (B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
memset(area->page_protections, areaProtection | (areaProtection << 4),
bytes);
return B_OK;
}
static inline void
set_area_page_protection(VMArea* area, addr_t pageAddress, uint32 protection)
{
@ -473,6 +493,17 @@ get_area_page_protection(VMArea* area, addr_t pageAddress)
else
protection >>= 4;
// If this is a kernel area we translate the user flags to kernel flags.
if (area->address_space == VMAddressSpace::Kernel()) {
uint32 kernelProtection = 0;
if ((protection & B_READ_AREA) != 0)
kernelProtection |= B_KERNEL_READ_AREA;
if ((protection & B_WRITE_AREA) != 0)
kernelProtection |= B_KERNEL_WRITE_AREA;
return kernelProtection;
}
return protection | B_KERNEL_READ_AREA
| (protection & B_WRITE_AREA ? B_KERNEL_WRITE_AREA : 0);
}
@ -976,6 +1007,95 @@ wait_if_address_range_is_wired(VMAddressSpace* addressSpace, addr_t base,
}
/*! Prepares an area to be used for vm_set_kernel_area_debug_protection().
It must be called in a situation where the kernel address space may be
locked.
*/
status_t
vm_prepare_kernel_area_debug_protection(area_id id, void** cookie)
{
AddressSpaceReadLocker locker;
VMArea* area;
status_t status = locker.SetFromArea(id, area);
if (status != B_OK)
return status;
if (area->page_protections == NULL) {
status = allocate_area_page_protections(area);
if (status != B_OK)
return status;
}
*cookie = (void*)area;
return B_OK;
}
/*! This is a debug helper function that can only be used with very specific
use cases.
Sets protection for the given address range to the protection specified.
If \a protection is 0 then the involved pages will be marked non-present
in the translation map to cause a fault on access. The pages aren't
actually unmapped however so that they can be marked present again with
additional calls to this function. For this to work the area must be
fully locked in memory so that the pages aren't otherwise touched.
This function does not lock the kernel address space and needs to be
supplied with a \a cookie retrieved from a successful call to
vm_prepare_kernel_area_debug_protection().
*/
status_t
vm_set_kernel_area_debug_protection(void* cookie, void* _address, size_t size,
uint32 protection)
{
// check address range
addr_t address = (addr_t)_address;
size = PAGE_ALIGN(size);
if ((address % B_PAGE_SIZE) != 0
|| (addr_t)address + size < (addr_t)address
|| !IS_KERNEL_ADDRESS(address)
|| !IS_KERNEL_ADDRESS((addr_t)address + size)) {
return B_BAD_VALUE;
}
// Translate the kernel protection to user protection as we only store that.
if ((protection & B_KERNEL_READ_AREA) != 0)
protection |= B_READ_AREA;
if ((protection & B_KERNEL_WRITE_AREA) != 0)
protection |= B_WRITE_AREA;
VMAddressSpace* addressSpace = VMAddressSpace::GetKernel();
VMTranslationMap* map = addressSpace->TranslationMap();
VMArea* area = (VMArea*)cookie;
addr_t offset = address - area->Base();
if (area->Size() - offset < size) {
panic("protect range not fully within supplied area");
return B_BAD_VALUE;
}
if (area->page_protections == NULL) {
panic("area has no page protections");
return B_BAD_VALUE;
}
// Invalidate the mapping entries so any access to them will fault or
// restore the mapping entries unchanged so that lookup will success again.
map->Lock();
map->DebugMarkRangePresent(address, address + size, protection != 0);
map->Unlock();
// And set the proper page protections so that the fault case will actually
// fail and not simply try to map a new page.
for (addr_t pageAddress = address; pageAddress < address + size;
pageAddress += B_PAGE_SIZE) {
set_area_page_protection(area, pageAddress, protection);
}
return B_OK;
}
status_t
vm_block_address_range(const char* name, void* address, addr_t size)
{
@ -6182,18 +6302,9 @@ _user_set_memory_protection(void* _address, size_t size, uint32 protection)
if (area->protection == protection)
continue;
// In the page protections we store only the three user protections,
// so we use 4 bits per page.
uint32 bytes = (area->Size() / B_PAGE_SIZE + 1) / 2;
area->page_protections = (uint8*)malloc(bytes);
if (area->page_protections == NULL)
return B_NO_MEMORY;
// init the page protections for all pages to that of the area
uint32 areaProtection = area->protection
& (B_READ_AREA | B_WRITE_AREA | B_EXECUTE_AREA);
memset(area->page_protections,
areaProtection | (areaProtection << 4), bytes);
status_t status = allocate_area_page_protections(area);
if (status != B_OK)
return status;
}
// We need to lock the complete cache chain, since we potentially unmap