* Merged vm_virtual_map with vm_address_space - there was no reason to have

them apart (this even saves a pointer from vm_virtual_map to its address space)
* aspace -> address_space
* vm_create_address_space() did not check if creating the semaphore succeeded
* Removed team::kaspace - was not really needed (introduced a new vm_kernel_address_space()
  function that doesn't grab a reference to the address space)
* Removed vm_address_space::name - it was just a copy of the team name, anyway,
  and there is always only one address space per team
* Removed aspace_id - the address space is now using the team_id
* Some cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15609 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-20 13:29:11 +00:00
parent 470dbda0b0
commit 96e01a27bf
14 changed files with 528 additions and 539 deletions

View File

@ -111,8 +111,7 @@ struct team {
bigtime_t kernel_time;
bigtime_t user_time;
} dead_children;
struct vm_address_space *aspace;
struct vm_address_space *kaspace;
struct vm_address_space *address_space;
struct thread *main_thread;
struct thread *thread_list;
struct team_loading_info *loading_info;

View File

@ -25,22 +25,23 @@ status_t vm_init(kernel_args *args);
status_t vm_init_post_sem(struct kernel_args *args);
status_t vm_init_post_thread(struct kernel_args *args);
status_t vm_init_post_modules(struct kernel_args *args);
status_t vm_aspace_init(void);
status_t vm_aspace_init_post_sem(void);
status_t vm_address_space_init(void);
status_t vm_address_space_init_post_sem(void);
void vm_free_kernel_args(kernel_args *args);
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
void vm_delete_aspace(vm_address_space *aspace);
status_t vm_create_aspace(const char *name, team_id id, addr_t base, addr_t size,
void vm_delete_address_space(vm_address_space *aspace);
status_t vm_create_address_space(team_id id, addr_t base, addr_t size,
bool kernel, vm_address_space **_aspace);
status_t vm_delete_areas(struct vm_address_space *aspace);
vm_address_space *vm_get_kernel_aspace(void);
aspace_id vm_get_kernel_aspace_id(void);
vm_address_space *vm_get_current_user_aspace(void);
aspace_id vm_get_current_user_aspace_id(void);
vm_address_space *vm_get_aspace_by_id(aspace_id aid);
void vm_put_aspace(vm_address_space *aspace);
#define vm_aspace_swap(aspace) arch_vm_aspace_swap(aspace)
vm_address_space *vm_get_kernel_address_space(void);
vm_address_space *vm_kernel_address_space(void);
team_id vm_kernel_address_space_id(void);
vm_address_space *vm_get_current_user_address_space(void);
team_id vm_current_user_address_space_id(void);
vm_address_space *vm_get_address_space_by_id(team_id aid);
void vm_put_address_space(vm_address_space *aspace);
#define vm_swap_address_space(aspace) arch_vm_aspace_swap(aspace)
// private kernel only extension (should be moved somewhere else):
struct team;
@ -48,29 +49,29 @@ area_id create_area_etc(struct team *team, const char *name, void **address,
uint32 addressSpec, uint32 size, uint32 lock, uint32 protection);
status_t delete_area_etc(struct team *team, area_id area);
status_t vm_unreserve_address_range(aspace_id aid, void *address, addr_t size);
status_t vm_reserve_address_range(aspace_id aid, void **_address,
status_t vm_unreserve_address_range(team_id aid, void *address, addr_t size);
status_t vm_reserve_address_range(team_id aid, void **_address,
uint32 addressSpec, addr_t size, uint32 flags);
area_id vm_create_anonymous_area(aspace_id aid, const char *name, void **address,
area_id vm_create_anonymous_area(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection);
area_id vm_map_physical_memory(aspace_id aid, const char *name, void **address,
area_id vm_map_physical_memory(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 protection, addr_t phys_addr);
area_id vm_map_file(aspace_id aid, const char *name, void **address,
area_id vm_map_file(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping,
const char *path, off_t offset);
area_id vm_create_null_area(aspace_id aid, const char *name, void **address,
area_id vm_create_null_area(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size);
area_id vm_copy_area(aspace_id addressSpaceID, const char *name, void **_address,
area_id vm_copy_area(team_id addressSpaceID, const char *name, void **_address,
uint32 addressSpec, uint32 protection, area_id sourceID);
area_id vm_clone_area(aspace_id aid, const char *name, void **address,
area_id vm_clone_area(team_id aid, const char *name, void **address,
uint32 addressSpec, uint32 protection, uint32 mapping,
area_id sourceArea);
status_t vm_delete_area(aspace_id aid, area_id id);
status_t vm_delete_area(team_id aid, area_id id);
status_t vm_create_vnode_cache(void *vnode, vm_cache_ref **_cacheRef);
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
status_t vm_get_page_mapping(aspace_id aid, addr_t vaddr, addr_t *paddr);
status_t vm_get_page_mapping(team_id aid, addr_t vaddr, addr_t *paddr);
status_t vm_get_physical_page(addr_t paddr, addr_t *vaddr, int flags);
status_t vm_put_physical_page(addr_t vaddr);

View File

@ -15,8 +15,6 @@
#include <arch/vm_translation_map.h>
typedef int32 aspace_id;
// vm page
typedef struct vm_page {
struct vm_page *queue_prev;
@ -90,25 +88,13 @@ typedef struct vm_area {
struct vm_cache_ref *cache_ref;
off_t cache_offset;
struct vm_address_space *aspace;
struct vm_area *aspace_next;
struct vm_virtual_map *map;
struct vm_address_space *address_space;
struct vm_area *address_space_next;
struct vm_area *cache_next;
struct vm_area *cache_prev;
struct vm_area *hash_next;
} vm_area;
// virtual map (1 per address space)
typedef struct vm_virtual_map {
vm_area *areas;
vm_area *area_hint;
int change_count;
sem_id sem;
struct vm_address_space *aspace;
addr_t base;
addr_t size;
} vm_virtual_map;
enum {
VM_ASPACE_STATE_NORMAL = 0,
VM_ASPACE_STATE_DELETION
@ -116,13 +102,17 @@ enum {
// address space
typedef struct vm_address_space {
vm_virtual_map virtual_map;
vm_area *areas;
vm_area *area_hint;
sem_id sem;
addr_t base;
addr_t size;
int32 change_count;
vm_translation_map translation_map;
char *name;
aspace_id id;
team_id id;
int32 ref_count;
int32 fault_count;
int state;
int32 state;
addr_t scan_va;
addr_t working_set_size;
addr_t max_working_set;

View File

@ -115,23 +115,23 @@ i386_get_user_iframe(void)
inline void *
x86_next_page_directory(struct thread *from, struct thread *to)
{
if (from->team->aspace != NULL && to->team->aspace != NULL) {
// they are both uspace threads
if (from->team->address_space != NULL && to->team->address_space != NULL) {
// they are both user space threads
if (from->team == to->team) {
// dont change the pgdir, same address space
return NULL;
}
// switching to a new address space
return i386_translation_map_get_pgdir(&to->team->aspace->translation_map);
} else if (from->team->aspace == NULL && to->team->aspace == NULL) {
// they must both be kspace threads
return i386_translation_map_get_pgdir(&to->team->address_space->translation_map);
} else if (from->team->address_space == NULL && to->team->address_space == NULL) {
// they must both be kernel space threads
return NULL;
} else if (to->team->aspace == NULL) {
// the one we're switching to is kspace
return i386_translation_map_get_pgdir(&to->team->kaspace->translation_map);
} else if (to->team->address_space == NULL) {
// the one we're switching to is kernel space
return i386_translation_map_get_pgdir(&vm_kernel_address_space()->translation_map);
}
return i386_translation_map_get_pgdir(&to->team->aspace->translation_map);
return i386_translation_map_get_pgdir(&to->team->address_space->translation_map);
}
@ -257,7 +257,7 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
dprintf("arch_thread_context_switch: cpu %d 0x%x -> 0x%x, aspace 0x%x -> 0x%x, old stack = 0x%x:0x%x, stack = 0x%x:0x%x\n",
smp_get_current_cpu(), t_from->id, t_to->id,
t_from->team->aspace, t_to->team->aspace,
t_from->team->address_space, t_to->team->address_space,
t_from->arch_info.current_stack.ss, t_from->arch_info.current_stack.esp,
t_to->arch_info.current_stack.ss, t_to->arch_info.current_stack.esp);
#endif
@ -279,7 +279,7 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
// reinit debugging; necessary, if the thread was preempted after
// initializing debugging before returning to userland
if (to->team->aspace != NULL)
if (to->team->address_space != NULL)
i386_reinit_user_debug_after_context_switch(to);
i386_fsave_swap(from->arch_info.fpu_state, to->arch_info.fpu_state);

View File

@ -833,7 +833,7 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
recursive_lock_destroy(&map->lock);
return B_NO_MEMORY;
}
vm_get_page_mapping(vm_get_kernel_aspace_id(),
vm_get_page_mapping(vm_kernel_address_space_id(),
(addr_t)map->arch_data->pgdir_virt, (addr_t *)&map->arch_data->pgdir_phys);
} else {
// kernel
@ -995,7 +995,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
TRACE(("vm_translation_map_init_post_area: creating iospace\n"));
temp = (void *)IOSPACE_BASE;
vm_create_null_area(vm_get_kernel_aspace_id(), "iospace", &temp,
vm_create_null_area(vm_kernel_address_space_id(), "iospace", &temp,
B_EXACT_ADDRESS, IOSPACE_SIZE);
TRACE(("vm_translation_map_init_post_area: done\n"));

View File

@ -92,8 +92,8 @@ BlockAddressPool::BlockAddressPool()
fBase = 0xa0000000;
// directly after the I/O space area
fArea = vm_create_null_area(vm_get_kernel_aspace_id(), "block cache", (void **)&fBase,
B_BASE_ADDRESS, kBlockAddressSize);
fArea = vm_create_null_area(vm_kernel_address_space_id(), "block cache",
(void **)&fBase, B_BASE_ADDRESS, kBlockAddressSize);
fFirstFree = fBase;
fNextFree = -1;
@ -216,15 +216,13 @@ block_range::Delete(block_cache *cache, block_range *range)
// unmap the memory
vm_address_space *addressSpace = vm_get_kernel_aspace();
vm_address_space *addressSpace = vm_kernel_address_space();
vm_translation_map *map = &addressSpace->translation_map;
map->ops->lock(map);
map->ops->unmap(map, range->base, range->base + kBlockRangeSize - 1);
map->ops->unlock(map);
vm_put_aspace(addressSpace);
sBlockAddressPool.Put(range->base);
// free pages
@ -355,7 +353,7 @@ block_range::Allocate(block_cache *cache, block_chunk **_chunk)
// map the memory
vm_address_space *addressSpace = vm_get_kernel_aspace();
vm_address_space *addressSpace = vm_kernel_address_space();
vm_translation_map *map = &addressSpace->translation_map;
map->ops->lock(map);
@ -366,7 +364,6 @@ block_range::Allocate(block_cache *cache, block_chunk **_chunk)
}
map->ops->unlock(map);
vm_put_aspace(addressSpace);
chunks[chunk].mapped = true;
}

View File

@ -1008,7 +1008,7 @@ symbol_found:
status_t
elf_load_user_image(const char *path, struct team *p, int flags, addr_t *entry)
elf_load_user_image(const char *path, struct team *team, int flags, addr_t *entry)
{
struct Elf32_Ehdr eheader;
struct Elf32_Phdr *pheaders = NULL;
@ -1018,7 +1018,7 @@ elf_load_user_image(const char *path, struct team *p, int flags, addr_t *entry)
int i;
ssize_t len;
TRACE(("elf_load: entry path '%s', team %p\n", path, p));
TRACE(("elf_load: entry path '%s', team %p\n", path, team));
fd = _kern_open(-1, path, O_RDONLY, 0);
if (fd < 0)
@ -1095,7 +1095,7 @@ elf_load_user_image(const char *path, struct team *p, int flags, addr_t *entry)
sprintf(regionName, "%s_seg%drw", baseName, i);
id = vm_map_file(p->aspace->id, regionName,
id = vm_map_file(team->id, regionName,
(void **)&regionAddress,
B_EXACT_ADDRESS,
fileUpperBound,
@ -1128,7 +1128,7 @@ elf_load_user_image(const char *path, struct team *p, int flags, addr_t *entry)
sprintf(regionName, "%s_bss%d", baseName, i);
regionAddress += fileUpperBound;
id = create_area_etc(p, regionName, (void **)&regionAddress,
id = create_area_etc(team, regionName, (void **)&regionAddress,
B_EXACT_ADDRESS, bss_size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
if (id < B_OK) {
dprintf("error allocating bss area: %s!\n", strerror(id));
@ -1142,7 +1142,7 @@ elf_load_user_image(const char *path, struct team *p, int flags, addr_t *entry)
*/
sprintf(regionName, "%s_seg%dro", baseName, i);
id = vm_map_file(p->aspace->id, regionName,
id = vm_map_file(team->id, regionName,
(void **)&regionAddress,
B_EXACT_ADDRESS,
ROUNDUP(pheaders[i].p_memsz + (pheaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE),
@ -1279,7 +1279,7 @@ load_kernel_add_on(const char *path)
}
// reserve that space and allocate the areas from that one
if (vm_reserve_address_range(vm_get_kernel_aspace_id(), &reservedAddress,
if (vm_reserve_address_range(vm_kernel_address_space_id(), &reservedAddress,
B_ANY_KERNEL_ADDRESS, reservedSize, 0) < B_OK)
goto error3;
@ -1394,7 +1394,7 @@ error5:
delete_area(image->data_region.id);
delete_area(image->text_region.id);
error4:
vm_unreserve_address_range(vm_get_kernel_aspace_id(), reservedAddress, reservedSize);
vm_unreserve_address_range(vm_kernel_address_space_id(), reservedAddress, reservedSize);
error3:
free(pheaders);
error2:

View File

@ -105,9 +105,8 @@ _dump_team_info(struct team *team)
kprintf("state: %d\n", team->state);
kprintf("pending_signals: 0x%x\n", team->pending_signals);
kprintf("io_context: %p\n", team->io_context);
if (team->aspace)
kprintf("aspace: %p (id = 0x%lx)\n", team->aspace, team->aspace->id);
kprintf("kaspace: %p\n", team->kaspace);
if (team->address_space)
kprintf("address_space: %p (id = 0x%lx)\n", team->address_space, team->address_space->id);
kprintf("main_thread: %p\n", team->main_thread);
kprintf("thread_list: %p\n", team->thread_list);
}
@ -661,8 +660,7 @@ team_get_address_space(team_id id, vm_address_space **_addressSpace)
if (id == 1) {
// we're the kernel team, so we don't have to go through all
// the hassle (locking and hash lookup)
atomic_add(&kernel_team->kaspace->ref_count, 1);
*_addressSpace = kernel_team->kaspace;
*_addressSpace = vm_get_kernel_address_space();
return B_OK;
}
@ -671,8 +669,8 @@ team_get_address_space(team_id id, vm_address_space **_addressSpace)
team = team_get_team_struct_locked(id);
if (team != NULL) {
atomic_add(&team->aspace->ref_count, 1);
*_addressSpace = team->aspace;
atomic_add(&team->address_space->ref_count, 1);
*_addressSpace = team->address_space;
status = B_OK;
} else
status = B_BAD_VALUE;
@ -696,9 +694,7 @@ create_team_struct(const char *name, bool kernel)
strlcpy(team->name, name, B_OS_NAME_LENGTH);
team->num_threads = 0;
team->io_context = NULL;
team->aspace = NULL;
team->kaspace = vm_get_kernel_aspace();
vm_put_aspace(team->kaspace);
team->address_space = NULL;
team->thread_list = NULL;
team->main_thread = NULL;
team->loading_info = NULL;
@ -885,7 +881,7 @@ team_delete_team(struct team *team)
// free team resources
vm_delete_aspace(team->aspace);
vm_delete_address_space(team->address_space);
delete_owned_ports(teamID);
sem_delete_owned_sems(teamID);
remove_images(team);
@ -1149,7 +1145,8 @@ load_image_etc(int32 argCount, char * const *args, int32 envCount, char * const
}
// create an address space for this team
status = vm_create_aspace(team->name, team->id, USER_BASE, USER_SIZE, false, &team->aspace);
status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
&team->address_space);
if (status < B_OK)
goto err3;
@ -1211,7 +1208,7 @@ load_image_etc(int32 argCount, char * const *args, int32 envCount, char * const
return thread;
err4:
vm_put_aspace(team->aspace);
vm_put_address_space(team->address_space);
err3:
vfs_free_io_context(team->io_context);
err2:
@ -1306,7 +1303,7 @@ exec_team(const char *path, int32 argCount, char * const *args,
user_debug_prepare_for_exec();
vm_delete_areas(team->aspace);
vm_delete_areas(team->address_space);
delete_owned_ports(team->id);
sem_delete_owned_sems(team->id);
remove_images(team);
@ -1422,7 +1419,8 @@ fork_team(void)
}
// create an address space for this team
status = vm_create_aspace(team->name, team->id, USER_BASE, USER_SIZE, false, &team->aspace);
status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
&team->address_space);
if (status < B_OK)
goto err3;
@ -1433,7 +1431,7 @@ fork_team(void)
cookie = 0;
while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
void *address;
area_id area = vm_copy_area(team->aspace->id, info.name, &address, B_CLONE_ADDRESS,
area_id area = vm_copy_area(team->address_space->id, info.name, &address, B_CLONE_ADDRESS,
info.protection, info.area);
if (area < B_OK) {
status = area;
@ -1469,7 +1467,7 @@ fork_team(void)
return threadID;
err4:
vm_delete_aspace(team->aspace);
vm_delete_address_space(team->address_space);
err3:
vfs_free_io_context(team->io_context);
err2:

View File

@ -918,7 +918,7 @@ thread_exit(void)
cancel_timer(&thread->alarm);
// delete the user stack area first, we won't need it anymore
if (team->aspace != NULL && thread->user_stack_area >= 0) {
if (team->address_space != NULL && thread->user_stack_area >= 0) {
area_id area = thread->user_stack_area;
thread->user_stack_area = -1;
delete_area_etc(team, area);
@ -977,7 +977,7 @@ thread_exit(void)
}
RELEASE_TEAM_LOCK();
// swap address spaces, to make sure we're running on the kernel's pgdir
vm_aspace_swap(team_get_kernel_team()->kaspace);
vm_swap_address_space(vm_kernel_address_space());
restore_interrupts(state);
TRACE(("thread_exit: thread 0x%lx now a kernel thread!\n", thread->id));

File diff suppressed because it is too large Load Diff

View File

@ -58,20 +58,19 @@ _dump_aspace(vm_address_space *aspace)
vm_area *area;
dprintf("dump of address space at %p:\n", aspace);
dprintf("name: '%s'\n", aspace->name);
dprintf("id: 0x%lx\n", aspace->id);
dprintf("ref_count: %ld\n", aspace->ref_count);
dprintf("fault_count: %ld\n", aspace->fault_count);
dprintf("working_set_size: 0x%lx\n", aspace->working_set_size);
dprintf("translation_map: %p\n", &aspace->translation_map);
dprintf("virtual_map.base: 0x%lx\n", aspace->virtual_map.base);
dprintf("virtual_map.size: 0x%lx\n", aspace->virtual_map.size);
dprintf("virtual_map.change_count: 0x%x\n", aspace->virtual_map.change_count);
dprintf("virtual_map.sem: 0x%lx\n", aspace->virtual_map.sem);
dprintf("virtual_map.region_hint: %p\n", aspace->virtual_map.area_hint);
dprintf("virtual_map.region_list:\n");
for (area = aspace->virtual_map.areas; area != NULL; area = area->aspace_next) {
dprintf(" region 0x%lx: ", area->id);
dprintf("base: 0x%lx\n", aspace->base);
dprintf("size: 0x%lx\n", aspace->size);
dprintf("change_count: 0x%lx\n", aspace->change_count);
dprintf("sem: 0x%lx\n", aspace->sem);
dprintf("area_hint: %p\n", aspace->area_hint);
dprintf("area_list:\n");
for (area = aspace->areas; area != NULL; area = area->address_space_next) {
dprintf(" area 0x%lx: ", area->id);
dprintf("base_addr = 0x%lx ", area->base);
dprintf("size = 0x%lx ", area->size);
dprintf("name = '%s' ", area->name);
@ -91,9 +90,9 @@ dump_aspace(int argc, char **argv)
}
// if the argument looks like a number, treat it as such
if (isdigit(argv[1][0])) {
unsigned long num = strtoul(argv[1], NULL, 0);
aspace_id id = num;
{
team_id id = strtoul(argv[1], NULL, 0);
aspace = hash_lookup(aspace_table, &id);
if (aspace == NULL) {
@ -102,16 +101,6 @@ dump_aspace(int argc, char **argv)
_dump_aspace(aspace);
}
return 0;
} else {
// walk through the aspace list, looking for the arguments as a name
struct hash_iterator iter;
hash_open(aspace_table, &iter);
while ((aspace = hash_next(aspace_table, &iter)) != NULL) {
if(aspace->name != NULL && strcmp(argv[1], aspace->name) == 0) {
_dump_aspace(aspace);
}
}
}
return 0;
}
@ -123,12 +112,12 @@ dump_aspace_list(int argc, char **argv)
vm_address_space *as;
struct hash_iterator iter;
dprintf("addr\tid\t%32s\tbase\t\tsize\n", "name");
dprintf("addr\tid\tbase\t\tsize\n");
hash_open(aspace_table, &iter);
while ((as = hash_next(aspace_table, &iter)) != NULL) {
dprintf("%p\t0x%lx\t%32s\t0x%lx\t\t0x%lx\n",
as, as->id, as->name, as->virtual_map.base, as->virtual_map.size);
dprintf("%p\t0x%lx\t0x%lx\t\t0x%lx\n",
as, as->id, as->base, as->size);
}
hash_close(aspace_table, &iter, false);
return 0;
@ -139,7 +128,7 @@ static int
aspace_compare(void *_a, const void *key)
{
vm_address_space *aspace = _a;
const aspace_id *id = key;
const team_id *id = key;
if (aspace->id == *id)
return 0;
@ -152,7 +141,7 @@ static uint32
aspace_hash(void *_a, const void *key, uint32 range)
{
vm_address_space *aspace = _a;
const aspace_id *id = key;
const team_id *id = key;
if (aspace != NULL)
return aspace->id % range;
@ -166,24 +155,23 @@ aspace_hash(void *_a, const void *key, uint32 range)
*/
static void
delete_address_space(vm_address_space *aspace)
delete_address_space(vm_address_space *addressSpace)
{
TRACE(("delete_address_space: called on aspace 0x%lx\n", aspace->id));
TRACE(("delete_address_space: called on aspace 0x%lx\n", addressSpace->id));
if (aspace == kernel_aspace)
if (addressSpace == kernel_aspace)
panic("tried to delete the kernel aspace!\n");
// put this aspace in the deletion state
// this guarantees that no one else will add regions to the list
acquire_sem_etc(aspace->virtual_map.sem, WRITE_COUNT, 0, 0);
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
aspace->state = VM_ASPACE_STATE_DELETION;
addressSpace->state = VM_ASPACE_STATE_DELETION;
(*aspace->translation_map.ops->destroy)(&aspace->translation_map);
(*addressSpace->translation_map.ops->destroy)(&addressSpace->translation_map);
free(aspace->name);
delete_sem(aspace->virtual_map.sem);
free(aspace);
delete_sem(addressSpace->sem);
free(addressSpace);
}
@ -191,7 +179,7 @@ delete_address_space(vm_address_space *aspace)
vm_address_space *
vm_get_aspace_by_id(aspace_id aid)
vm_get_address_space_by_id(team_id aid)
{
vm_address_space *aspace;
@ -206,44 +194,49 @@ vm_get_aspace_by_id(aspace_id aid)
vm_address_space *
vm_get_kernel_aspace(void)
vm_get_kernel_address_space(void)
{
/* we can treat this one a little differently since it can't be deleted */
acquire_sem_etc(aspace_hash_sem, READ_COUNT, 0, 0);
atomic_add(&kernel_aspace->ref_count, 1);
release_sem_etc(aspace_hash_sem, READ_COUNT, 0);
return kernel_aspace;
}
aspace_id
vm_get_kernel_aspace_id(void)
vm_address_space *
vm_kernel_address_space(void)
{
return kernel_aspace;
}
team_id
vm_kernel_address_space_id(void)
{
return kernel_aspace->id;
}
vm_address_space *
vm_get_current_user_aspace(void)
vm_get_current_user_address_space(void)
{
return vm_get_aspace_by_id(vm_get_current_user_aspace_id());
return vm_get_address_space_by_id(vm_current_user_address_space_id());
}
aspace_id
vm_get_current_user_aspace_id(void)
team_id
vm_current_user_address_space_id(void)
{
struct thread *thread = thread_get_current_thread();
if (thread != NULL && thread->team->aspace != NULL)
return thread->team->aspace->id;
if (thread != NULL && thread->team->address_space != NULL)
return thread->team->id;
return B_ERROR;
}
void
vm_put_aspace(vm_address_space *aspace)
vm_put_address_space(vm_address_space *aspace)
{
bool remove = false;
@ -268,69 +261,69 @@ vm_put_aspace(vm_address_space *aspace)
*/
void
vm_delete_aspace(vm_address_space *aspace)
vm_delete_address_space(vm_address_space *addressSpace)
{
acquire_sem_etc(aspace->virtual_map.sem, WRITE_COUNT, 0, 0);
aspace->state = VM_ASPACE_STATE_DELETION;
release_sem_etc(aspace->virtual_map.sem, WRITE_COUNT, 0);
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
addressSpace->state = VM_ASPACE_STATE_DELETION;
release_sem_etc(addressSpace->sem, WRITE_COUNT, 0);
vm_delete_areas(aspace);
vm_put_aspace(aspace);
vm_delete_areas(addressSpace);
vm_put_address_space(addressSpace);
}
status_t
vm_create_aspace(const char *name, team_id id, addr_t base, addr_t size, bool kernel, vm_address_space **_aspace)
vm_create_address_space(team_id id, addr_t base, addr_t size,
bool kernel, vm_address_space **_addressSpace)
{
vm_address_space *aspace;
vm_address_space *addressSpace;
status_t status;
aspace = (vm_address_space *)malloc(sizeof(vm_address_space));
if (aspace == NULL)
addressSpace = (vm_address_space *)malloc(sizeof(vm_address_space));
if (addressSpace == NULL)
return B_NO_MEMORY;
TRACE(("vm_create_aspace: %s: %lx bytes starting at 0x%lx => %p\n", name, size, base, aspace));
TRACE(("vm_create_aspace: %s: %lx bytes starting at 0x%lx => %p\n",
name, size, base, addressSpace));
aspace->name = (char *)malloc(strlen(name) + 1);
if (aspace->name == NULL ) {
free(aspace);
return B_NO_MEMORY;
addressSpace->base = base;
addressSpace->size = size;
addressSpace->areas = NULL;
addressSpace->area_hint = NULL;
addressSpace->change_count = 0;
if (!kernel) {
// the kernel address space will create its semaphore later
addressSpace->sem = create_sem(WRITE_COUNT, "address space");
if (addressSpace->sem < B_OK) {
status_t status = addressSpace->sem;
free(addressSpace);
return status;
}
}
strcpy(aspace->name, name);
aspace->id = id;
aspace->ref_count = 1;
aspace->state = VM_ASPACE_STATE_NORMAL;
aspace->fault_count = 0;
aspace->scan_va = base;
aspace->working_set_size = kernel ? DEFAULT_KERNEL_WORKING_SET : DEFAULT_WORKING_SET;
aspace->max_working_set = DEFAULT_MAX_WORKING_SET;
aspace->min_working_set = DEFAULT_MIN_WORKING_SET;
aspace->last_working_set_adjust = system_time();
addressSpace->id = id;
addressSpace->ref_count = 1;
addressSpace->state = VM_ASPACE_STATE_NORMAL;
addressSpace->fault_count = 0;
addressSpace->scan_va = base;
addressSpace->working_set_size = kernel ? DEFAULT_KERNEL_WORKING_SET : DEFAULT_WORKING_SET;
addressSpace->max_working_set = DEFAULT_MAX_WORKING_SET;
addressSpace->min_working_set = DEFAULT_MIN_WORKING_SET;
addressSpace->last_working_set_adjust = system_time();
// initialize the corresponding translation map
status = arch_vm_translation_map_init_map(&aspace->translation_map, kernel);
status = arch_vm_translation_map_init_map(&addressSpace->translation_map, kernel);
if (status < B_OK) {
free(aspace->name);
free(aspace);
free(addressSpace);
return status;
}
// initialize the virtual map
aspace->virtual_map.base = base;
aspace->virtual_map.size = size;
aspace->virtual_map.areas = NULL;
aspace->virtual_map.area_hint = NULL;
aspace->virtual_map.change_count = 0;
aspace->virtual_map.sem = create_sem(WRITE_COUNT, "aspacelock");
aspace->virtual_map.aspace = aspace;
// add the aspace to the global hash table
acquire_sem_etc(aspace_hash_sem, WRITE_COUNT, 0, 0);
hash_insert(aspace_table, aspace);
hash_insert(aspace_table, addressSpace);
release_sem_etc(aspace_hash_sem, WRITE_COUNT, 0);
*_aspace = aspace;
*_addressSpace = addressSpace;
return B_OK;
}
@ -358,7 +351,7 @@ vm_aspace_walk_next(struct hash_iterator *i)
status_t
vm_aspace_init(void)
vm_address_space_init(void)
{
aspace_hash_sem = -1;
@ -374,7 +367,8 @@ vm_aspace_init(void)
kernel_aspace = NULL;
// create the initial kernel address space
if (vm_create_aspace("kernel_land", 1, KERNEL_BASE, KERNEL_SIZE, true, &kernel_aspace) != B_OK)
if (vm_create_address_space(1, KERNEL_BASE, KERNEL_SIZE,
true, &kernel_aspace) != B_OK)
panic("vm_init: error creating kernel address space!\n");
add_debugger_command("aspaces", &dump_aspace_list, "Dump a list of all address spaces");
@ -385,13 +379,13 @@ vm_aspace_init(void)
status_t
vm_aspace_init_post_sem(void)
vm_address_space_init_post_sem(void)
{
status_t status = arch_vm_translation_map_init_kernel_map_post_sem(&kernel_aspace->translation_map);
if (status < B_OK)
return status;
status = kernel_aspace->virtual_map.sem = create_sem(WRITE_COUNT, "kernel_aspacelock");
status = kernel_aspace->sem = create_sem(WRITE_COUNT, "kernel_aspacelock");
if (status < B_OK)
return status;

View File

@ -37,17 +37,17 @@ scan_pages(vm_address_space *aspace, addr_t free_target)
// dprintf("scan_pages called on aspace 0x%x, id 0x%x, free_target %d\n", aspace, aspace->id, free_target);
acquire_sem_etc(aspace->virtual_map.sem, READ_COUNT, 0, 0);
acquire_sem_etc(aspace->sem, READ_COUNT, 0, 0);
firstArea = aspace->virtual_map.areas;
firstArea = aspace->areas;
while (firstArea && (firstArea->base + (firstArea->size - 1)) < aspace->scan_va)
firstArea = firstArea->aspace_next;
firstArea = firstArea->address_space_next;
if (!firstArea)
firstArea = aspace->virtual_map.areas;
firstArea = aspace->areas;
if (!firstArea) {
release_sem_etc(aspace->virtual_map.sem, READ_COUNT, 0);
release_sem_etc(aspace->sem, READ_COUNT, 0);
return;
}
@ -55,7 +55,7 @@ scan_pages(vm_address_space *aspace, addr_t free_target)
for (;;) {
// ignore reserved ranges
while (area != NULL && area->id == RESERVED_AREA_ID)
area = area->aspace_next;
area = area->address_space_next;
if (area == NULL)
break;
@ -120,7 +120,7 @@ scan_pages(vm_address_space *aspace, addr_t free_target)
}
mutex_unlock(&area->cache_ref->lock);
// move to the next area, wrapping around and stopping if we get back to the first area
area = area->aspace_next ? area->aspace_next : aspace->virtual_map.areas;
area = area->address_space_next ? area->address_space_next : aspace->areas;
if (area == firstArea)
break;
@ -128,8 +128,8 @@ scan_pages(vm_address_space *aspace, addr_t free_target)
break;
}
aspace->scan_va = area ? (firstArea->base + firstArea->size) : aspace->virtual_map.base;
release_sem_etc(aspace->virtual_map.sem, READ_COUNT, 0);
aspace->scan_va = area ? (firstArea->base + firstArea->size) : aspace->base;
release_sem_etc(aspace->sem, READ_COUNT, 0);
// dprintf("exiting scan_pages\n");
}
@ -199,7 +199,7 @@ page_daemon(void *unused)
// otherwise the iterator becomes out of date.
old_aspace = aspace;
aspace = vm_aspace_walk_next(&i);
vm_put_aspace(old_aspace);
vm_put_address_space(old_aspace);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -191,7 +191,7 @@ vm_page_write_modified(vm_cache *cache)
for (area = page->cache->ref->areas; area; area = area->cache_next) {
if (page->offset >= area->cache_offset
&& page->offset < area->cache_offset + area->size) {
vm_translation_map *map = &area->aspace->translation_map;
vm_translation_map *map = &area->address_space->translation_map;
map->ops->lock(map);
if (!gotPage) {

View File

@ -1,10 +1,10 @@
/*
** Copyright 2004, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the Haiku License.
**
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <kernel.h>
#include <vm.h>
@ -28,7 +28,7 @@ vm_test(void)
area_id region;
addr_t region_addr;
region = vm_create_anonymous_area(vm_get_kernel_aspace_id(), "test_region", (void **)&region_addr,
region = vm_create_anonymous_area(vm_kernel_address_space_id(), "test_region", (void **)&region_addr,
B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE * 16, B_NO_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (region < 0)
panic("vm_test 1: failed to create test region\n");
@ -37,7 +37,7 @@ vm_test(void)
memset((void *)region_addr, 0, B_PAGE_SIZE * 16);
dprintf("memsetted the region\n");
if (vm_delete_area(vm_get_kernel_aspace_id(), region) < 0)
if (vm_delete_area(vm_kernel_address_space_id(), region) < 0)
panic("vm_test 1: error deleting test region\n");
dprintf("deleted the region\n");
}
@ -50,7 +50,7 @@ vm_test(void)
char *ptr;
int i;
region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "test_physical_region", (void **)&ptr,
region = vm_map_physical_memory(vm_kernel_address_space_id(), "test_physical_region", (void **)&ptr,
B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE * 16, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0xb8000);
if (region < 0)
panic("vm_test 2: failed to create test region\n");
@ -63,7 +63,7 @@ vm_test(void)
for(i=0; i<64; i++) {
ptr[i] = 'a';
}
if (vm_delete_area(vm_get_kernel_aspace_id(), region) < 0)
if (vm_delete_area(vm_kernel_address_space_id(), region) < 0)
panic("vm_test 2: error deleting test region\n");
dprintf("deleted the region\n");
}
@ -74,12 +74,12 @@ vm_test(void)
addr_t va, pa;
addr_t va2;
vm_get_page_mapping(vm_get_kernel_aspace_id(), 0x80000000, &pa);
vm_get_page_mapping(vm_kernel_address_space_id(), 0x80000000, &pa);
vm_get_physical_page(pa, &va, PHYSICAL_PAGE_CAN_WAIT);
dprintf("pa 0x%lx va 0x%lx\n", pa, va);
dprintf("%d\n", memcmp((void *)0x80000000, (void *)va, B_PAGE_SIZE));
vm_get_page_mapping(vm_get_kernel_aspace_id(), 0x80001000, &pa);
vm_get_page_mapping(vm_kernel_address_space_id(), 0x80001000, &pa);
vm_get_physical_page(pa, &va2, PHYSICAL_PAGE_CAN_WAIT);
dprintf("pa 0x%lx va 0x%lx\n", pa, va2);
dprintf("%d\n", memcmp((void *)0x80001000, (void *)va2, B_PAGE_SIZE));
@ -87,7 +87,7 @@ vm_test(void)
vm_put_physical_page(va);
vm_put_physical_page(va2);
vm_get_page_mapping(vm_get_kernel_aspace_id(), 0x80000000, &pa);
vm_get_page_mapping(vm_kernel_address_space_id(), 0x80000000, &pa);
vm_get_physical_page(pa, &va, PHYSICAL_PAGE_CAN_WAIT);
dprintf("pa 0x%lx va 0x%lx\n", pa, va);
dprintf("%d\n", memcmp((void *)0x80000000, (void *)va, B_PAGE_SIZE));
@ -108,7 +108,7 @@ vm_test(void)
panic("vm_test 4: error finding region 'vid_mem'\n");
dprintf("vid_mem region = 0x%lx\n", region);
region2 = vm_clone_area(vm_get_kernel_aspace_id(), "vid_mem2",
region2 = vm_clone_area(vm_kernel_address_space_id(), "vid_mem2",
&ptr, B_ANY_KERNEL_ADDRESS, region, REGION_NO_PRIVATE_MAP, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (region2 < 0)
panic("vm_test 4: error cloning region 'vid_mem'\n");
@ -120,7 +120,7 @@ vm_test(void)
panic("vm_test 4: regions are not identical\n");
else
dprintf("vm_test 4: comparison ok\n");
if (vm_delete_area(vm_get_kernel_aspace_id(), region2) < 0)
if (vm_delete_area(vm_kernel_address_space_id(), region2) < 0)
panic("vm_test 4: error deleting cloned region\n");
}
#endif
@ -137,7 +137,7 @@ vm_test(void)
panic("vm_test 5: error finding region 'vid_mem'\n");
dprintf("vid_mem region = 0x%lx\n", region);
region2 = vm_clone_area(vm_get_kernel_aspace_id(), "vid_mem3",
region2 = vm_clone_area(vm_kernel_address_space_id(), "vid_mem3",
&ptr, B_ANY_KERNEL_ADDRESS, region, REGION_NO_PRIVATE_MAP, B_KERNEL_READ_AREA);
if (region2 < 0)
panic("vm_test 5: error cloning region 'vid_mem'\n");
@ -150,7 +150,7 @@ vm_test(void)
else
dprintf("vm_test 5: comparison ok\n");
if (vm_delete_area(vm_get_kernel_aspace_id(), region2) < 0)
if (vm_delete_area(vm_kernel_address_space_id(), region2) < 0)
panic("vm_test 5: error deleting cloned region\n");
}
#endif
@ -162,7 +162,7 @@ vm_test(void)
void *ptr;
int rc;
region = vm_create_anonymous_area(vm_get_kernel_aspace_id(), "test_region", &region_addr,
region = vm_create_anonymous_area(vm_kernel_address_space_id(), "test_region", &region_addr,
B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE * 16, B_NO_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (region < 0)
panic("vm_test 6: error creating test region\n");
@ -172,7 +172,7 @@ vm_test(void)
dprintf("memsetted the region\n");
region2 = vm_clone_area(vm_get_kernel_aspace_id(), "test_region2",
region2 = vm_clone_area(vm_kernel_address_space_id(), "test_region2",
&ptr, B_ANY_KERNEL_ADDRESS, region, REGION_NO_PRIVATE_MAP, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if(region2 < 0)
panic("vm_test 6: error cloning test region\n");
@ -184,10 +184,10 @@ vm_test(void)
else
dprintf("vm_test 6: comparison ok\n");
if(vm_delete_area(vm_get_kernel_aspace_id(), region) < 0)
if(vm_delete_area(vm_kernel_address_space_id(), region) < 0)
panic("vm_test 6: error deleting test region\n");
if(vm_delete_area(vm_get_kernel_aspace_id(), region2) < 0)
if(vm_delete_area(vm_kernel_address_space_id(), region2) < 0)
panic("vm_test 6: error deleting cloned region\n");
}
#endif
@ -201,18 +201,18 @@ vm_test(void)
fd = _kern_open("/boot/beos/system/kernel_" OBOS_ARCH, 0);
rid = vm_map_file(vm_get_kernel_aspace_id(), "mmap_test", &ptr, B_ANY_KERNEL_ADDRESS,
rid = vm_map_file(vm_kernel_address_space_id(), "mmap_test", &ptr, B_ANY_KERNEL_ADDRESS,
B_PAGE_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, REGION_NO_PRIVATE_MAP, "/boot/kernel", 0);
rid2 = vm_map_file(vm_get_kernel_aspace_id(), "mmap_test2", &ptr2, B_ANY_KERNEL_ADDRESS,
rid2 = vm_map_file(vm_kernel_address_space_id(), "mmap_test2", &ptr2, B_ANY_KERNEL_ADDRESS,
B_PAGE_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, REGION_NO_PRIVATE_MAP, "/boot/kernel", 0);
dprintf("diff %d\n", memcmp(ptr, ptr2, B_PAGE_SIZE));
dprintf("removing regions\n");
vm_delete_area(vm_get_kernel_aspace_id(), rid);
vm_delete_area(vm_get_kernel_aspace_id(), rid2);
vm_delete_area(vm_kernel_address_space_id(), rid);
vm_delete_area(vm_kernel_address_space_id(), rid2);
dprintf("regions deleted\n");
@ -231,7 +231,7 @@ vm_test(void)
dprintf("vm_test 8: creating test region...\n");
region = vm_create_anonymous_area(vm_get_kernel_aspace_id(), "test_region", &region_addr,
region = vm_create_anonymous_area(vm_kernel_address_space_id(), "test_region", &region_addr,
B_ANY_KERNEL_ADDRESS, B_PAGE_SIZE * 16, B_NO_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if(region < 0)
panic("vm_test 8: error creating test region\n");
@ -243,7 +243,7 @@ vm_test(void)
dprintf("vm_test 8: cloning test region with PRIVATE_MAP\n");
region2 = vm_clone_area(vm_get_kernel_aspace_id(), "test_region2",
region2 = vm_clone_area(vm_kernel_address_space_id(), "test_region2",
&ptr, B_ANY_KERNEL_ADDRESS, region, REGION_PRIVATE_MAP, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if(region2 < 0)
panic("vm_test 8: error cloning test region\n");
@ -296,10 +296,10 @@ vm_test(void)
else
panic("vm_test 8: comparison shows not private mapping\n");
if(vm_delete_area(vm_get_kernel_aspace_id(), region) < 0)
if(vm_delete_area(vm_kernel_address_space_id(), region) < 0)
panic("vm_test 8: error deleting test region\n");
if(vm_delete_area(vm_get_kernel_aspace_id(), region2) < 0)
if(vm_delete_area(vm_kernel_address_space_id(), region2) < 0)
panic("vm_test 8: error deleting cloned region\n");
}
#endif
@ -312,10 +312,10 @@ vm_test(void)
dprintf("vm_test 9: mapping /boot/kernel twice\n");
rid = vm_map_file(vm_get_kernel_aspace_id(), "mmap_test", &ptr, B_ANY_KERNEL_ADDRESS,
rid = vm_map_file(vm_kernel_address_space_id(), "mmap_test", &ptr, B_ANY_KERNEL_ADDRESS,
B_PAGE_SIZE*4, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, REGION_NO_PRIVATE_MAP, "/boot/kernel", 0);
rid2 = vm_map_file(vm_get_kernel_aspace_id(), "mmap_test2", &ptr2, B_ANY_KERNEL_ADDRESS,
rid2 = vm_map_file(vm_kernel_address_space_id(), "mmap_test2", &ptr2, B_ANY_KERNEL_ADDRESS,
B_PAGE_SIZE*4, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, REGION_PRIVATE_MAP, "/boot/kernel", 0);
err = memcmp(ptr, ptr2, B_PAGE_SIZE);
@ -348,8 +348,8 @@ vm_test(void)
dprintf("vm_test 9: removing regions\n");
vm_delete_area(vm_get_kernel_aspace_id(), rid);
vm_delete_area(vm_get_kernel_aspace_id(), rid2);
vm_delete_area(vm_kernel_address_space_id(), rid);
vm_delete_area(vm_kernel_address_space_id(), rid2);
dprintf("vm_test 9: regions deleted\n");
@ -371,7 +371,7 @@ vm_test(void)
ta[2048] = 0xabcd;
ta[3072] = 0xefef;
b = vm_copy_area(vm_get_kernel_aspace_id(), "copy of source", &address, B_ANY_KERNEL_ADDRESS,
b = vm_copy_area(vm_kernel_address_space_id(), "copy of source", &address, B_ANY_KERNEL_ADDRESS,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, a);
tb = (uint32 *)address;
if (tb[0] != 0x1234 || tb[1024] != 0x5678 || tb[2048] != 0xabcd || tb[3072] != 0xefef)
@ -387,8 +387,8 @@ vm_test(void)
dprintf("vm_test 10: remove areas\n");
vm_delete_area(vm_get_kernel_aspace_id(), a);
vm_delete_area(vm_get_kernel_aspace_id(), b);
vm_delete_area(vm_kernel_address_space_id(), a);
vm_delete_area(vm_kernel_address_space_id(), b);
}
dprintf("vm_test 10: passed\n");
#endif
@ -417,7 +417,7 @@ vm_test(void)
dprintf("vm_test 11: remove areas\n");
vm_delete_area(vm_get_kernel_aspace_id(), a);
vm_delete_area(vm_kernel_address_space_id(), a);
}
dprintf("vm_test 11: passed\n");
#endif