diff --git a/headers/private/kernel/vm_types.h b/headers/private/kernel/vm_types.h index a198b4bcc9..1051daf1e5 100644 --- a/headers/private/kernel/vm_types.h +++ b/headers/private/kernel/vm_types.h @@ -314,7 +314,7 @@ enum { struct vm_address_space { struct vm_area *areas; struct vm_area *area_hint; - sem_id sem; + rw_lock lock; addr_t base; addr_t size; int32 change_count; diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index 2483447e0f..753dafd29f 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -271,7 +271,7 @@ AddressSpaceReadLocker::SetTo(team_id team) if (fSpace == NULL) return B_BAD_TEAM_ID; - acquire_sem_etc(fSpace->sem, READ_COUNT, 0, 0); + rw_lock_read_lock(&fSpace->lock); fLocked = true; return B_OK; } @@ -282,7 +282,7 @@ void AddressSpaceReadLocker::SetTo(vm_address_space* space) { fSpace = space; - acquire_sem_etc(fSpace->sem, READ_COUNT, 0, 0); + rw_lock_read_lock(&fSpace->lock); fLocked = true; } @@ -294,14 +294,14 @@ AddressSpaceReadLocker::SetFromArea(area_id areaID, vm_area*& area) if (fSpace == NULL) return B_BAD_TEAM_ID; - acquire_sem_etc(fSpace->sem, READ_COUNT, 0, 0); + rw_lock_read_lock(&fSpace->lock); acquire_sem_etc(sAreaHashLock, READ_COUNT, 0, 0); area = (vm_area *)hash_lookup(sAreaHash, &areaID); release_sem_etc(sAreaHashLock, READ_COUNT, 0); if (area == NULL || area->address_space != fSpace) { - release_sem_etc(fSpace->sem, READ_COUNT, 0); + rw_lock_read_unlock(&fSpace->lock); return B_BAD_VALUE; } @@ -314,7 +314,7 @@ void AddressSpaceReadLocker::Unlock() { if (fLocked) { - release_sem_etc(fSpace->sem, READ_COUNT, 0); + rw_lock_read_unlock(&fSpace->lock); fLocked = false; } } @@ -364,7 +364,7 @@ AddressSpaceWriteLocker::SetTo(team_id team) if (fSpace == NULL) return B_BAD_TEAM_ID; - acquire_sem_etc(fSpace->sem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&fSpace->lock); fLocked = true; return B_OK; } @@ -377,14 +377,14 @@ AddressSpaceWriteLocker::SetFromArea(area_id areaID, vm_area*& area) if (fSpace == NULL) return B_BAD_VALUE; - acquire_sem_etc(fSpace->sem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&fSpace->lock); acquire_sem_etc(sAreaHashLock, READ_COUNT, 0, 0); area = (vm_area*)hash_lookup(sAreaHash, &areaID); release_sem_etc(sAreaHashLock, READ_COUNT, 0); if (area == NULL || area->address_space != fSpace) { - release_sem_etc(fSpace->sem, WRITE_COUNT, 0); + rw_lock_write_unlock(&fSpace->lock); return B_BAD_VALUE; } @@ -415,14 +415,14 @@ AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID, // Second try to get the area -- this time with the address space // write lock held - acquire_sem_etc(fSpace->sem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&fSpace->lock); acquire_sem_etc(sAreaHashLock, READ_COUNT, 0, 0); area = (vm_area *)hash_lookup(sAreaHash, &areaID); release_sem_etc(sAreaHashLock, READ_COUNT, 0); if (area == NULL) { - release_sem_etc(fSpace->sem, WRITE_COUNT, 0); + rw_lock_write_unlock(&fSpace->lock); return B_BAD_VALUE; } @@ -443,7 +443,10 @@ void AddressSpaceWriteLocker::Unlock() { if (fLocked) { - release_sem_etc(fSpace->sem, fDegraded ? READ_COUNT : WRITE_COUNT, 0); + if (fDegraded) + rw_lock_read_unlock(&fSpace->lock); + else + rw_lock_write_unlock(&fSpace->lock); fLocked = false; fDegraded = false; } @@ -453,7 +456,9 @@ AddressSpaceWriteLocker::Unlock() void AddressSpaceWriteLocker::DegradeToReadLock() { - release_sem_etc(fSpace->sem, WRITE_COUNT - READ_COUNT, 0); + // TODO: the current R/W lock implementation just keeps the write lock here + rw_lock_read_lock(&fSpace->lock); + rw_lock_write_unlock(&fSpace->lock); fDegraded = true; } @@ -586,12 +591,18 @@ MultiAddressSpaceLocker::Lock() qsort(fItems, fCount, sizeof(lock_item), &_CompareItems); for (int32 i = 0; i < fCount; i++) { - status_t status = acquire_sem_etc(fItems[i].space->sem, - fItems[i].write_lock ? WRITE_COUNT : READ_COUNT, 0, 0); + status_t status; + if (fItems[i].write_lock) + status = rw_lock_write_lock(&fItems[i].space->lock); + else + status = rw_lock_read_lock(&fItems[i].space->lock); + if (status < B_OK) { while (--i >= 0) { - release_sem_etc(fItems[i].space->sem, - fItems[i].write_lock ? WRITE_COUNT : READ_COUNT, 0); + if (fItems[i].write_lock) + rw_lock_write_unlock(&fItems[i].space->lock); + else + rw_lock_read_unlock(&fItems[i].space->lock); } return status; } @@ -609,8 +620,10 @@ MultiAddressSpaceLocker::Unlock() return; for (int32 i = 0; i < fCount; i++) { - release_sem_etc(fItems[i].space->sem, - fItems[i].write_lock ? WRITE_COUNT : READ_COUNT, 0); + if (fItems[i].write_lock) + rw_lock_write_unlock(&fItems[i].space->lock); + else + rw_lock_read_unlock(&fItems[i].space->lock); } fLocked = false; @@ -3416,7 +3429,7 @@ vm_delete_areas(struct vm_address_space *addressSpace) TRACE(("vm_delete_areas: called on address space 0x%lx\n", addressSpace->id)); - acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&addressSpace->lock); // remove all reserved areas in this address space @@ -3445,7 +3458,7 @@ vm_delete_areas(struct vm_address_space *addressSpace) delete_area(addressSpace, area); } - release_sem_etc(addressSpace->sem, WRITE_COUNT, 0); + rw_lock_write_unlock(&addressSpace->lock); return B_OK; } @@ -3976,7 +3989,7 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, vm_address_space *addressSpace = vm_get_current_user_address_space(); vm_area *area; - acquire_sem_etc(addressSpace->sem, READ_COUNT, 0, 0); + rw_lock_read_lock(&addressSpace->lock); // TODO: The user_memcpy() below can cause a deadlock, if it causes a page // fault and someone is already waiting for a write lock on the same address // space. This thread will then try to acquire the semaphore again and will @@ -4038,7 +4051,7 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, } #endif // 0 (stack trace) - release_sem_etc(addressSpace->sem, READ_COUNT, 0); + rw_lock_read_unlock(&addressSpace->lock); vm_put_address_space(addressSpace); #endif struct thread *thread = thread_get_current_thread(); @@ -4756,7 +4769,7 @@ static status_t test_lock_memory(vm_address_space *addressSpace, addr_t address, bool &needsLocking) { - acquire_sem_etc(addressSpace->sem, READ_COUNT, 0, 0); + rw_lock_read_lock(&addressSpace->lock); vm_area *area = vm_area_lookup(addressSpace, address); if (area != NULL) { @@ -4767,7 +4780,7 @@ test_lock_memory(vm_address_space *addressSpace, addr_t address, && area->wiring != B_CONTIGUOUS; } - release_sem_etc(addressSpace->sem, READ_COUNT, 0); + rw_lock_read_unlock(&addressSpace->lock); if (area == NULL) return B_BAD_ADDRESS; diff --git a/src/system/kernel/vm/vm_address_space.cpp b/src/system/kernel/vm/vm_address_space.cpp index bfa3e3871a..1bf10dd65d 100644 --- a/src/system/kernel/vm/vm_address_space.cpp +++ b/src/system/kernel/vm/vm_address_space.cpp @@ -31,7 +31,7 @@ static vm_address_space *sKernelAddressSpace; #define ASPACE_HASH_TABLE_SIZE 1024 static struct hash_table *sAddressSpaceTable; -static sem_id sAddressSpaceHashSem; +static rw_lock sAddressSpaceTableLock; static void @@ -47,7 +47,6 @@ _dump_aspace(vm_address_space *aspace) dprintf("base: 0x%lx\n", aspace->base); dprintf("size: 0x%lx\n", aspace->size); dprintf("change_count: 0x%lx\n", aspace->change_count); - dprintf("sem: 0x%lx\n", aspace->sem); dprintf("area_hint: %p\n", aspace->area_hint); dprintf("area_list:\n"); for (area = aspace->areas; area != NULL; area = area->address_space_next) { @@ -154,9 +153,11 @@ delete_address_space(vm_address_space *addressSpace) if (addressSpace == sKernelAddressSpace) panic("tried to delete the kernel aspace!\n"); - acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0); - (*addressSpace->translation_map.ops->destroy)(&addressSpace->translation_map); - delete_sem(addressSpace->sem); + rw_lock_write_lock(&addressSpace->lock); + + addressSpace->translation_map.ops->destroy(&addressSpace->translation_map); + + rw_lock_destroy(&addressSpace->lock); free(addressSpace); } @@ -169,11 +170,11 @@ vm_get_address_space(team_id id) { vm_address_space *addressSpace; - acquire_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0, 0); + rw_lock_read_lock(&sAddressSpaceTableLock); addressSpace = (vm_address_space *)hash_lookup(sAddressSpaceTable, &id); if (addressSpace) atomic_add(&addressSpace->ref_count, 1); - release_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0); + rw_lock_read_unlock(&sAddressSpaceTableLock); return addressSpace; } @@ -236,12 +237,12 @@ vm_put_address_space(vm_address_space *addressSpace) { bool remove = false; - acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&sAddressSpaceTableLock); if (atomic_add(&addressSpace->ref_count, -1) == 1) { hash_remove(sAddressSpaceTable, addressSpace); remove = true; } - release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0); + rw_lock_write_unlock(&sAddressSpaceTableLock); if (remove) delete_address_space(addressSpace); @@ -258,9 +259,9 @@ vm_put_address_space(vm_address_space *addressSpace) void vm_delete_address_space(vm_address_space *addressSpace) { - acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&addressSpace->lock); addressSpace->state = VM_ASPACE_STATE_DELETION; - release_sem_etc(addressSpace->sem, WRITE_COUNT, 0); + rw_lock_write_unlock(&addressSpace->lock); vm_delete_areas(addressSpace); vm_put_address_space(addressSpace); @@ -286,15 +287,8 @@ vm_create_address_space(team_id id, addr_t base, addr_t size, addressSpace->areas = NULL; addressSpace->area_hint = NULL; addressSpace->change_count = 0; - if (!kernel) { - // the kernel address space will create its semaphore later - addressSpace->sem = create_sem(WRITE_COUNT, "address space"); - if (addressSpace->sem < B_OK) { - status_t status = addressSpace->sem; - free(addressSpace); - return status; - } - } + rw_lock_init(&addressSpace->lock, + kernel ? "kernel address space" : "address space"); addressSpace->id = id; addressSpace->ref_count = 1; @@ -310,9 +304,9 @@ vm_create_address_space(team_id id, addr_t base, addr_t size, } // add the aspace to the global hash table - acquire_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0, 0); + rw_lock_write_lock(&sAddressSpaceTableLock); hash_insert(sAddressSpaceTable, addressSpace); - release_sem_etc(sAddressSpaceHashSem, WRITE_COUNT, 0); + rw_lock_write_unlock(&sAddressSpaceTableLock); *_addressSpace = addressSpace; return B_OK; @@ -322,7 +316,7 @@ vm_create_address_space(team_id id, addr_t base, addr_t size, status_t vm_address_space_init(void) { - sAddressSpaceHashSem = -1; + rw_lock_init(&sAddressSpaceTableLock, "address spaces table"); // create the area and address space hash tables { @@ -334,8 +328,6 @@ vm_address_space_init(void) panic("vm_init: error creating aspace hash table\n"); } - sKernelAddressSpace = NULL; - // create the initial kernel address space if (vm_create_address_space(1, KERNEL_BASE, KERNEL_SIZE, true, &sKernelAddressSpace) != B_OK) @@ -358,14 +350,5 @@ vm_address_space_init_post_sem(void) if (status < B_OK) return status; - status = sKernelAddressSpace->sem = create_sem(WRITE_COUNT, - "kernel_aspacelock"); - if (status < B_OK) - return status; - - status = sAddressSpaceHashSem = create_sem(WRITE_COUNT, "aspace_hash_sem"); - if (status < B_OK) - return status; - return B_OK; }