add a new flag VM_MAP_DYING, which is set before we start

tearing down a vm_map.  use this to skip the pmap_update()
at the end of all the removes, which allows pmaps to optimize
pmap tear-down.  also, use the new pmap_remove_all() hook to
let the pmap implemenation know what we're up to.
This commit is contained in:
chs 2002-09-22 07:21:29 +00:00
parent 2b73cf7ece
commit 94a62d45d6
3 changed files with 39 additions and 72 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.119 2002/09/15 16:54:31 chs Exp $ */
/* $NetBSD: uvm_map.c,v 1.120 2002/09/22 07:21:29 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.119 2002/09/15 16:54:31 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.120 2002/09/22 07:21:29 chs Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -1165,7 +1165,9 @@ uvm_unmap_remove(map, start, end, entry_list)
first_entry = entry;
entry = next;
}
pmap_update(vm_map_pmap(map));
if ((map->flags & VM_MAP_DYING) == 0) {
pmap_update(vm_map_pmap(map));
}
/*
* now we've cleaned up the map and are ready for the caller to drop
@ -1209,14 +1211,11 @@ uvm_unmap_detach(first_entry, flags)
* drop reference to our backing object, if we've got one
*/
if (UVM_ET_ISSUBMAP(first_entry)) {
/* ... unlikely to happen, but play it safe */
uvm_map_deallocate(first_entry->object.sub_map);
} else {
if (UVM_ET_ISOBJ(first_entry) &&
first_entry->object.uvm_obj->pgops->pgo_detach)
first_entry->object.uvm_obj->pgops->
pgo_detach(first_entry->object.uvm_obj);
KASSERT(!UVM_ET_ISSUBMAP(first_entry));
if (UVM_ET_ISOBJ(first_entry) &&
first_entry->object.uvm_obj->pgops->pgo_detach) {
(*first_entry->object.uvm_obj->pgops->pgo_detach)
(first_entry->object.uvm_obj);
}
next_entry = first_entry->next;
uvm_mapent_free(first_entry);
@ -2853,24 +2852,21 @@ uvmspace_exec(p, start, end)
* when a process execs another program image.
*/
vm_map_lock(map);
vm_map_modflags(map, 0, VM_MAP_WIREFUTURE);
vm_map_unlock(map);
/*
* now unmap the old program
*/
pmap_remove_all(map->pmap);
uvm_unmap(map, map->min_offset, map->max_offset);
/*
* resize the map
*/
vm_map_lock(map);
map->min_offset = start;
map->max_offset = end;
vm_map_unlock(map);
} else {
/*
@ -2904,35 +2900,35 @@ uvmspace_free(vm)
struct vmspace *vm;
{
struct vm_map_entry *dead_entries;
struct vm_map *map;
UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
if (--vm->vm_refcnt == 0) {
/*
* lock the map, to wait out all other references to it. delete
* all of the mappings and pages they hold, then call the pmap
* module to reclaim anything left.
*/
#ifdef SYSVSHM
/* Get rid of any SYSV shared memory segments. */
if (vm->vm_shm != NULL)
shmexit(vm);
#endif
vm_map_lock(&vm->vm_map);
if (vm->vm_map.nentries) {
uvm_unmap_remove(&vm->vm_map,
vm->vm_map.min_offset, vm->vm_map.max_offset,
&dead_entries);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
}
pmap_destroy(vm->vm_map.pmap);
vm->vm_map.pmap = NULL;
pool_put(&uvm_vmspace_pool, vm);
if (--vm->vm_refcnt > 0) {
return;
}
UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
/*
* at this point, there should be no other references to the map.
* delete all of the mappings, then destroy the pmap.
*/
map = &vm->vm_map;
map->flags |= VM_MAP_DYING;
pmap_remove_all(map->pmap);
#ifdef SYSVSHM
/* Get rid of any SYSV shared memory segments. */
if (vm->vm_shm != NULL)
shmexit(vm);
#endif
if (map->nentries) {
uvm_unmap_remove(map, map->min_offset, map->max_offset,
&dead_entries);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
}
pmap_destroy(map->pmap);
pool_put(&uvm_vmspace_pool, vm);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.h,v 1.31 2001/10/03 13:32:23 christos Exp $ */
/* $NetBSD: uvm_map.h,v 1.32 2002/09/22 07:21:31 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -220,6 +220,7 @@ struct vm_map {
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
#define VM_MAP_BUSY 0x08 /* rw: map is busy */
#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
/* XXX: number of kernel maps and entries to statically allocate */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map_i.h,v 1.22 2001/06/26 17:55:15 thorpej Exp $ */
/* $NetBSD: uvm_map_i.h,v 1.23 2002/09/22 07:21:31 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -178,36 +178,6 @@ uvm_map_reference(map)
map->ref_count++;
simple_unlock(&map->ref_lock);
}
/*
* uvm_map_deallocate: drop reference to a map
*
* => caller must not lock map
* => we will zap map if ref count goes to zero
*/
MAP_INLINE void
uvm_map_deallocate(map)
struct vm_map *map;
{
int c;
simple_lock(&map->ref_lock);
c = --map->ref_count;
simple_unlock(&map->ref_lock);
if (c > 0) {
return;
}
/*
* all references gone. unmap and free.
*/
uvm_unmap(map, map->min_offset, map->max_offset);
pmap_destroy(map->pmap);
FREE(map, M_VMMAP);
}
#endif /* defined(UVM_MAP_INLINE) || defined(UVM_MAP) */
#endif /* _UVM_UVM_MAP_I_H_ */