kernel/vm: Introduce and use VMAddressSpace::AreaRangeIterator.

It iterates over all areas intersecting a given address range and
removes the need for manually skipping uninteresting initial areas. It
uses VMAddressSpace::FindClosestArea() to efficiently find the starting
area.

This speeds up the two iterations in unmap_address_range and one in
wait_if_address_range_is_wired and resolves a TODO in the latter hinting
at such a solution.

Change-Id: Iba1d39942db4e4b27e17706be194496f9d4279ed
Reviewed-on: https://review.haiku-os.org/c/haiku/+/2841
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Michael Lotz 2020-05-29 14:33:55 +02:00 committed by waddlesplash
parent a626bdab77
commit a6926d4287
2 changed files with 73 additions and 21 deletions

View File

@ -23,6 +23,7 @@ struct virtual_address_restrictions;
struct VMAddressSpace {
public:
class AreaIterator;
class AreaRangeIterator;
public:
VMAddressSpace(team_id id, addr_t base,
@ -67,6 +68,8 @@ public:
{ fRandomizingEnabled = enabled; }
inline AreaIterator GetAreaIterator();
inline AreaRangeIterator GetAreaRangeIterator(addr_t address,
addr_t size);
VMAddressSpace*& HashTableLink() { return fHashTableLink; }
@ -202,6 +205,54 @@ private:
};
class VMAddressSpace::AreaRangeIterator : public VMAddressSpace::AreaIterator {
public:
AreaRangeIterator()
{
}
AreaRangeIterator(VMAddressSpace* addressSpace, addr_t address, addr_t size)
:
fAddressSpace(addressSpace),
fNext(NULL),
fAddress(address),
fEndAddress(address + size - 1)
{
Rewind();
}
bool HasNext() const
{
return fNext != NULL;
}
VMArea* Next()
{
VMArea* result = fNext;
if (fNext != NULL) {
fNext = fAddressSpace->NextArea(fNext);
if (fNext != NULL && fNext->Base() > fEndAddress)
fNext = NULL;
}
return result;
}
void Rewind()
{
fNext = fAddressSpace->FindClosestArea(fAddress, true);
if (fNext != NULL && !fNext->ContainsAddress(fAddress))
Next();
}
private:
VMAddressSpace* fAddressSpace;
VMArea* fNext;
addr_t fAddress;
addr_t fEndAddress;
};
inline VMAddressSpace::AreaIterator
VMAddressSpace::GetAreaIterator()
{
@ -209,6 +260,13 @@ VMAddressSpace::GetAreaIterator()
}
inline VMAddressSpace::AreaRangeIterator
VMAddressSpace::GetAreaRangeIterator(addr_t address, addr_t size)
{
return AreaRangeIterator(this, address, size);
}
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -833,26 +833,26 @@ unmap_address_range(VMAddressSpace* addressSpace, addr_t address, addr_t size,
bool kernel)
{
size = PAGE_ALIGN(size);
addr_t lastAddress = address + (size - 1);
// Check, whether the caller is allowed to modify the concerned areas.
if (!kernel) {
for (VMAddressSpace::AreaIterator it = addressSpace->GetAreaIterator();
VMArea* area = it.Next();) {
addr_t areaLast = area->Base() + (area->Size() - 1);
if (area->Base() < lastAddress && address < areaLast) {
if ((area->protection & B_KERNEL_AREA) != 0) {
dprintf("unmap_address_range: team %" B_PRId32 " tried to "
"unmap range of kernel area %" B_PRId32 " (%s)\n",
team_get_current_team_id(), area->id, area->name);
return B_NOT_ALLOWED;
}
for (VMAddressSpace::AreaRangeIterator it
= addressSpace->GetAreaRangeIterator(address, size);
VMArea* area = it.Next();) {
if ((area->protection & B_KERNEL_AREA) != 0) {
dprintf("unmap_address_range: team %" B_PRId32 " tried to "
"unmap range of kernel area %" B_PRId32 " (%s)\n",
team_get_current_team_id(), area->id, area->name);
return B_NOT_ALLOWED;
}
}
}
for (VMAddressSpace::AreaIterator it = addressSpace->GetAreaIterator();
VMArea* area = it.Next();) {
for (VMAddressSpace::AreaRangeIterator it
= addressSpace->GetAreaRangeIterator(address, size);
VMArea* area = it.Next();) {
status_t error = cut_area(addressSpace, area, address, size, NULL,
kernel);
if (error != B_OK)
@ -1092,15 +1092,9 @@ static inline bool
wait_if_address_range_is_wired(VMAddressSpace* addressSpace, addr_t base,
size_t size, LockerType* locker)
{
addr_t end = base + size - 1;
for (VMAddressSpace::AreaIterator it = addressSpace->GetAreaIterator();
for (VMAddressSpace::AreaRangeIterator it
= addressSpace->GetAreaRangeIterator(base, size);
VMArea* area = it.Next();) {
// TODO: Introduce a VMAddressSpace method to get a close iterator!
if (area->Base() > end)
return false;
if (base >= area->Base() + area->Size() - 1)
continue;
AreaCacheLocker cacheLocker(vm_area_get_locked_cache(area));