PR kern/55071 (Panic shortly after running X11 due to kernel diagnostic assertion "mutex_owned(&pp->pp_lock)")

- Fix a locking bug in pmap_pp_clear_attrs() and in pmap_pp_remove() do the
  TLB shootdown while still holding the target pmap's lock.

Also:

- Finish PV list locking for x86 & update comments around same.

- Keep track of the min/max index of PTEs inserted into each PTP, and use
  that to clip ranges of VAs passed to pmap_remove_ptes().

- Based on the above, implement a pmap_remove_all() for x86 that clears out
  the pmap in a single pass.  Makes exit() / fork() much cheaper.
This commit is contained in:
ad 2020-03-14 18:24:10 +00:00
parent 16d4fad635
commit 0faf5aacb0
3 changed files with 487 additions and 216 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.112 2020/03/14 14:05:44 ad Exp $ */
/* $NetBSD: pmap.h,v 1.113 2020/03/14 18:24:10 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -248,6 +248,8 @@ extern struct pool_cache pmap_cache;
* (the other object locks are only used when uvm_pagealloc is called)
*/
struct pv_page;
struct pmap {
struct uvm_object pm_obj[PTP_LEVELS-1];/* objects for lvl >= 1) */
LIST_ENTRY(pmap) pm_list; /* list of all pmaps */
@ -256,11 +258,11 @@ struct pmap {
struct vm_page *pm_ptphint[PTP_LEVELS-1];
/* pointer to a PTP in our pmap */
struct pmap_statistics pm_stats; /* pmap stats */
struct pv_entry *pm_pve; /* spare pv_entry */
#if !defined(__x86_64__)
vaddr_t pm_hiexec; /* highest executable mapping */
#endif /* !defined(__x86_64__) */
struct lwp *pm_remove_all; /* who's emptying the pmap */
union descriptor *pm_ldt; /* user-set LDT */
size_t pm_ldt_len; /* size of LDT in bytes */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_pv.h,v 1.13 2020/03/10 22:38:41 ad Exp $ */
/* $NetBSD: pmap_pv.h,v 1.14 2020/03/14 18:24:10 ad Exp $ */
/*-
* Copyright (c)2008 YAMAMOTO Takashi,
@ -34,6 +34,7 @@
#include <sys/rbtree.h>
struct vm_page;
struct pmap_page;
/*
* structures to track P->V mapping
@ -51,14 +52,14 @@ struct pv_pte {
};
/*
* pv_entry: plug pv_pte into lists.
* pv_entry: plug pv_pte into lists. 32 bytes on i386, 64 on amd64.
*/
struct pv_entry {
struct pv_pte pve_pte; /* should be the first member */
LIST_ENTRY(pv_entry) pve_list; /* on pmap_page::pp_pvlist */
rb_node_t pve_rb; /* red-black tree node */
uintptr_t pve_padding; /* unused */
struct pmap_page *pve_pp; /* backpointer to mapped page */
};
#define pve_next pve_list.le_next
@ -71,16 +72,14 @@ struct pmap_page {
/* PTPs */
rb_tree_t rb;
/* PTPs */
/* PTPs, when being freed */
LIST_ENTRY(vm_page) link;
/* Non-PTPs */
/* Non-PTPs (i.e. normal pages) */
struct {
/* PP_EMBEDDED */
struct pv_pte pte;
LIST_HEAD(, pv_entry) pvlist;
uint8_t flags;
uint8_t embedded;
uint8_t attrs;
} s;
} pp_u;
@ -89,7 +88,7 @@ struct pmap_page {
#define pp_link pp_u.link
#define pp_pte pp_u.s.pte
#define pp_pvlist pp_u.s.pvlist
#define pp_pflags pp_u.s.flags
#define pp_embedded pp_u.s.embedded
#define pp_attrs pp_u.s.attrs
};
@ -97,10 +96,6 @@ struct pmap_page {
#define PP_ATTRS_A 0x02 /* Accessed */
#define PP_ATTRS_W 0x04 /* Writable */
/* pp_flags */
#define PP_EMBEDDED 1
#define PP_FREEING 2
#define PMAP_PAGE_INIT(pp) \
do { \
LIST_INIT(&(pp)->pp_pvlist); \

File diff suppressed because it is too large Load Diff