kernel/vm: Make vm_copy_area take page protections into account.

When copying an area with vm_copy_area only the new protection would be
applied and any possibly existing page protections on the source area
were ignored.

For areas with stricter area protection than page protection, this lead
to faults when accessing the copy. In the opposite case it lead to too
relaxed protection. The currently only user of vm_copy_area is
fork_team which goes through all areas of the parent and copies them to
the new team. Hence page protections were ignored on all forked teams.

Remove the protection argument and instead always carry over the source
area protection and duplicate the page protections when present.

Also make sure to take the page protections into account for deciding
whether or not the copy is writable and therefore needs to have copy on
write semantics.

Change-Id: I52f295f2aaa66e31b4900b754343b3be9a19ba30
Reviewed-on: https://review.haiku-os.org/c/haiku/+/3166
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Michael Lotz 2020-08-23 02:27:42 +02:00 committed by waddlesplash
parent bafb111359
commit 75a10a74e8
3 changed files with 35 additions and 15 deletions

View File

@ -113,7 +113,7 @@ void vm_area_put_locked_cache(struct VMCache *cache);
area_id vm_create_null_area(team_id team, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 flags);
area_id vm_copy_area(team_id team, const char *name, void **_address,
uint32 addressSpec, uint32 protection, area_id sourceID);
uint32 addressSpec, area_id sourceID);
area_id vm_clone_area(team_id team, const char *name, void **address,
uint32 addressSpec, uint32 protection, uint32 mapping,
area_id sourceArea, bool kernel);

View File

@ -2138,7 +2138,7 @@ fork_team(void)
} else {
void* address;
area_id area = vm_copy_area(team->address_space->ID(), info.name,
&address, B_CLONE_ADDRESS, info.protection, info.area);
&address, B_CLONE_ADDRESS, info.area);
if (area < B_OK) {
status = area;
break;

View File

@ -2535,17 +2535,8 @@ vm_copy_on_write_area(VMCache* lowerCache,
area_id
vm_copy_area(team_id team, const char* name, void** _address,
uint32 addressSpec, uint32 protection, area_id sourceID)
uint32 addressSpec, area_id sourceID)
{
bool writableCopy = (protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) != 0;
if ((protection & B_KERNEL_PROTECTION) == 0) {
// set the same protection for the kernel as for userland
protection |= B_KERNEL_READ_AREA;
if (writableCopy)
protection |= B_KERNEL_WRITE_AREA;
}
// Do the locking: target address space, all address spaces associated with
// the source cache, and the cache itself.
MultiAddressSpaceLocker locker;
@ -2618,6 +2609,30 @@ vm_copy_area(team_id team, const char* name, void** _address,
vm_page_reservation* fReservation;
} pagesUnreserver(wiredPages > 0 ? &wiredPagesReservation : NULL);
bool writableCopy
= (source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) != 0;
uint8* targetPageProtections = NULL;
if (source->page_protections != NULL) {
size_t bytes = (source->Size() / B_PAGE_SIZE + 1) / 2;
targetPageProtections = (uint8*)malloc_etc(bytes,
HEAP_DONT_LOCK_KERNEL_SPACE);
if (targetPageProtections == NULL)
return B_NO_MEMORY;
memcpy(targetPageProtections, source->page_protections, bytes);
if (!writableCopy) {
for (size_t i = 0; i < bytes; i++) {
if ((targetPageProtections[i]
& (B_WRITE_AREA | B_WRITE_AREA << 4)) != 0) {
writableCopy = true;
break;
}
}
}
}
if (addressSpec == B_CLONE_ADDRESS) {
addressSpec = B_EXACT_ADDRESS;
*_address = (void*)source->Base();
@ -2631,12 +2646,17 @@ vm_copy_area(team_id team, const char* name, void** _address,
addressRestrictions.address = *_address;
addressRestrictions.address_specification = addressSpec;
status = map_backing_store(targetAddressSpace, cache, source->cache_offset,
name, source->Size(), source->wiring, protection,
name, source->Size(), source->wiring, source->protection,
sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP,
writableCopy ? 0 : CREATE_AREA_DONT_COMMIT_MEMORY,
&addressRestrictions, true, &target, _address);
if (status < B_OK)
if (status < B_OK) {
free_etc(targetPageProtections, HEAP_DONT_LOCK_KERNEL_SPACE);
return status;
}
if (targetPageProtections != NULL)
target->page_protections = targetPageProtections;
if (sharedArea) {
// The new area uses the old area's cache, but map_backing_store()
@ -2647,7 +2667,7 @@ vm_copy_area(team_id team, const char* name, void** _address,
// If the source area is writable, we need to move it one layer up as well
if (!sharedArea) {
if ((source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) != 0) {
if (writableCopy) {
// TODO: do something more useful if this fails!
if (vm_copy_on_write_area(cache,
wiredPages > 0 ? &wiredPagesReservation : NULL) < B_OK) {