- uvmspace_exec(), uvmspace_free(): if pmap_remove_all() returns true the

pmap is emptied.  Pass UVM_FLAG_VAONLY when clearing out the map and avoid
  needless extra work to tear down each mapping individually.

- uvm_map_lookup_entry(): remove the code to do a linear scan of map entries
  for small maps, in preference to using the RB tree.  It's questionable,
  and I think the code is almost never triggered because the average number
  of map entries has probably exceeded the hard-coded threshold for quite
  some time.

- vm_map_entry: get it aligned on a cacheline boundary, and cluster fields
  used during rbtree lookup at the beginning.
This commit is contained in:
ad 2020-03-14 14:15:43 +00:00
parent be9c6147a4
commit 3b6112efc8
2 changed files with 48 additions and 99 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.372 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_map.c,v 1.373 2020/03/14 14:15:43 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.372 2020/02/23 15:46:43 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.373 2020/03/14 14:15:43 ad Exp $");
#include "opt_ddb.h"
#include "opt_pax.h"
@ -141,10 +141,8 @@ UVMMAP_EVCNT_DEFINE(knomerge)
UVMMAP_EVCNT_DEFINE(map_call)
UVMMAP_EVCNT_DEFINE(mlk_call)
UVMMAP_EVCNT_DEFINE(mlk_hint)
UVMMAP_EVCNT_DEFINE(mlk_list)
UVMMAP_EVCNT_DEFINE(mlk_tree)
UVMMAP_EVCNT_DEFINE(mlk_treeloop)
UVMMAP_EVCNT_DEFINE(mlk_listloop)
const char vmmapbsy[] = "vmmapbsy";
@ -823,8 +821,8 @@ static inline void
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
{
memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
((char *)src));
memcpy(dst, src, sizeof(*dst));
dst->flags = 0;
}
#if defined(DEBUG)
@ -940,7 +938,7 @@ uvm_map_init_caches(void)
*/
pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
coherency_unit, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
}
@ -1679,7 +1677,6 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
struct vm_map_entry **entry /* OUT */)
{
struct vm_map_entry *cur;
bool use_tree = false;
UVMHIST_FUNC("uvm_map_lookup_entry");
UVMHIST_CALLED(maphist);
@ -1687,95 +1684,41 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
(uintptr_t)map, address, (uintptr_t)entry, 0);
/*
* start looking either from the head of the
* list, or from the hint.
* make a quick check to see if we are already looking at
* the entry we want (which is usually the case). note also
* that we don't need to save the hint here... it is the
* same hint (unless we are at the header, in which case the
* hint didn't buy us anything anyway).
*/
cur = map->hint;
if (cur == &map->header)
cur = cur->next;
UVMMAP_EVCNT_INCR(mlk_call);
if (address >= cur->start) {
/*
* go from hint to end of list.
*
* but first, make a quick check to see if
* we are already looking at the entry we
* want (which is usually the case).
* note also that we don't need to save the hint
* here... it is the same hint (unless we are
* at the header, in which case the hint didn't
* buy us anything anyway).
*/
if (cur != &map->header && cur->end > address) {
UVMMAP_EVCNT_INCR(mlk_hint);
*entry = cur;
UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
(uintptr_t)cur, 0, 0, 0);
uvm_mapent_check(*entry);
return (true);
}
if (map->nentries > 15)
use_tree = true;
} else {
/*
* invalid hint. use tree.
*/
use_tree = true;
if (cur != &map->header &&
address >= cur->start && cur->end > address) {
UVMMAP_EVCNT_INCR(mlk_hint);
*entry = cur;
UVMHIST_LOG(maphist,"<- got it via hint (%#jx)",
(uintptr_t)cur, 0, 0, 0);
uvm_mapent_check(*entry);
return (true);
}
uvm_map_check(map, __func__);
if (use_tree) {
/*
* Simple lookup in the tree. Happens when the hint is
* invalid, or nentries reach a threshold.
*/
UVMMAP_EVCNT_INCR(mlk_tree);
if (uvm_map_lookup_entry_bytree(map, address, entry)) {
goto got;
} else {
goto failed;
}
}
/*
* search linearly
* lookup in the tree.
*/
UVMMAP_EVCNT_INCR(mlk_list);
while (cur != &map->header) {
UVMMAP_EVCNT_INCR(mlk_listloop);
if (cur->end > address) {
if (address >= cur->start) {
/*
* save this lookup for future
* hints, and return
*/
*entry = cur;
got:
SAVE_HINT(map, map->hint, *entry);
UVMHIST_LOG(maphist,"<- search got it (%#jx)",
(uintptr_t)cur, 0, 0, 0);
KDASSERT((*entry)->start <= address);
KDASSERT(address < (*entry)->end);
uvm_mapent_check(*entry);
return (true);
}
break;
}
cur = cur->next;
UVMMAP_EVCNT_INCR(mlk_tree);
if (__predict_true(uvm_map_lookup_entry_bytree(map, address, entry))) {
SAVE_HINT(map, map->hint, *entry);
UVMHIST_LOG(maphist,"<- search got it (%#jx)",
(uintptr_t)cur, 0, 0, 0);
KDASSERT((*entry)->start <= address);
KDASSERT(address < (*entry)->end);
uvm_mapent_check(*entry);
return (true);
}
*entry = cur->prev;
failed:
SAVE_HINT(map, map->hint, *entry);
UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
KDASSERT((*entry) == &map->header || (*entry)->end <= address);
KDASSERT((*entry)->next == &map->header ||
@ -4176,6 +4119,7 @@ uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
struct proc *p = l->l_proc;
struct vmspace *nvm, *ovm = p->p_vmspace;
struct vm_map *map;
int flags;
KASSERT(ovm != NULL);
#ifdef __HAVE_CPU_VMSPACE_EXEC
@ -4214,8 +4158,8 @@ uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end, bool topdown)
* now unmap the old program
*/
pmap_remove_all(map->pmap);
uvm_unmap(map, vm_map_min(map), vm_map_max(map));
flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
uvm_unmap1(map, vm_map_min(map), vm_map_max(map), flags);
KASSERT(map->header.prev == &map->header);
KASSERT(map->nentries == 0);
@ -4271,6 +4215,7 @@ uvmspace_free(struct vmspace *vm)
{
struct vm_map_entry *dead_entries;
struct vm_map *map = &vm->vm_map;
int flags;
UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
@ -4285,7 +4230,7 @@ uvmspace_free(struct vmspace *vm)
*/
map->flags |= VM_MAP_DYING;
pmap_remove_all(map->pmap);
flags = pmap_remove_all(map->pmap) ? UVM_FLAG_VAONLY : 0;
/* Get rid of any SYSV shared memory segments. */
if (uvm_shmexit && vm->vm_shm != NULL)
@ -4293,7 +4238,7 @@ uvmspace_free(struct vmspace *vm)
if (map->nentries) {
uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
&dead_entries, 0);
&dead_entries, flags);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.h,v 1.78 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_map.h,v 1.79 2020/03/14 14:15:43 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -125,36 +125,40 @@
* a VM object (or sharing map) and offset into that object,
* and user-exported inheritance and protection information.
* Also included is control information for virtual copy operations.
*
* At runtime this is aligned on a cacheline boundary, with fields
* used during fault processing to do RB tree lookup clustered at
* the beginning.
*/
struct vm_map_entry {
struct rb_node rb_node; /* tree information */
vaddr_t start; /* start address */
vaddr_t end; /* end address */
vsize_t gap; /* free space after */
vsize_t maxgap; /* space in subtree */
struct vm_map_entry *prev; /* previous entry */
struct vm_map_entry *next; /* next entry */
vaddr_t start; /* start address */
vaddr_t end; /* end address */
union {
struct uvm_object *uvm_obj; /* uvm object */
struct vm_map *sub_map; /* belongs to another map */
} object; /* object I point to */
voff_t offset; /* offset into object */
int etype; /* entry type */
uint8_t etype; /* entry type */
uint8_t flags; /* flags */
uint8_t advice; /* madvise advice */
uint8_t unused; /* unused */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if == 0 */
struct vm_aref aref; /* anonymous overlay */
int advice; /* madvise advice */
#define uvm_map_entry_stop_copy flags
u_int8_t flags; /* flags */
};
/* flags */
#define UVM_MAP_KERNEL 0x01 /* kernel map entry */
#define UVM_MAP_STATIC 0x04 /* special static entries */
#define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */
};
#define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
/*