Go back to freeing struct vm_anon one by one. There may have been an

advantage circa ~2008 but there isn't now.
This commit is contained in:
ad 2020-03-20 19:08:54 +00:00
parent 1ffecadffe
commit 0622217a01
7 changed files with 76 additions and 146 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.118 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: uvm_amap.c,v 1.119 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.118 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.119 2020/03/20 19:08:54 ad Exp $");
#include "opt_uvmhist.h"
@ -358,7 +358,7 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
int *newppref, *oldppref;
#endif
int i, *newsl, *newbck, *oldsl, *oldbck;
struct vm_anon **newover, **oldover, *tofree;
struct vm_anon **newover, **oldover;
const km_flag_t kmflags =
(flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
@ -386,7 +386,6 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
slotadj = slotadd - slotoff;
slotarea = amap->am_maxslot - slotmapped;
}
tofree = NULL;
/*
* case 1: we already have enough slots in the map and thus
@ -399,10 +398,10 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff + slotmapped,
slotadd, 1, &tofree);
slotadd, 1);
}
#endif
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
UVMHIST_LOG(maphist,
"<- done (case 1f), amap = %#jx, sltneed=%jd",
(uintptr_t)amap, slotneed, 0, 0);
@ -414,11 +413,10 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
entry->aref.ar_pageoff = slotoff;
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff, slotadd, 1,
&tofree);
amap_pp_adjref(amap, slotoff, slotadd, 1);
}
#endif
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
UVMHIST_LOG(maphist,
"<- done (case 1b), amap = %#jx, sltneed=%jd",
(uintptr_t)amap, slotneed, 0, 0);
@ -439,14 +437,13 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
amap_pp_adjref(amap,
slotoff + slotmapped,
(amap->am_nslot -
(slotoff + slotmapped)), 1,
&tofree);
(slotoff + slotmapped)), 1);
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
#endif
amap->am_nslot = slotneed;
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
/*
* no need to zero am_anon since that was done at
@ -620,8 +617,7 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
if ((flags & AMAP_EXTEND_FORWARDS) &&
(slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1,
&tofree);
(amap->am_nslot - (slotoff + slotmapped)), 1);
if (flags & AMAP_EXTEND_FORWARDS)
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
@ -646,8 +642,7 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
}
oldnslots = amap->am_maxslot;
amap->am_maxslot = slotalloc;
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
kmem_free(oldsl, oldnslots * sizeof(*oldsl));
kmem_free(oldbck, oldnslots * sizeof(*oldbck));
@ -727,7 +722,6 @@ amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
void
amap_wipeout(struct vm_amap *amap)
{
struct vm_anon *tofree = NULL;
u_int lcv;
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
@ -757,14 +751,13 @@ amap_wipeout(struct vm_amap *amap)
(uintptr_t)anon, anon->an_ref, 0, 0);
/*
* Drop the reference. Defer freeing.
* Drop the reference.
*/
if (--anon->an_ref == 0) {
anon->an_link = tofree;
tofree = anon;
if (__predict_true(--anon->an_ref == 0)) {
uvm_anfree(anon);
}
if ((lcv & 31) == 31) {
if (__predict_false((lcv & 31) == 31)) {
preempt_point();
}
}
@ -774,7 +767,7 @@ amap_wipeout(struct vm_amap *amap)
*/
amap->am_nused = 0;
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
amap_free(amap);
UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
}
@ -799,7 +792,6 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
{
const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0;
struct vm_amap *amap, *srcamap;
struct vm_anon *tofree;
u_int slots, lcv;
krwlock_t *oldlock;
vsize_t len;
@ -958,15 +950,14 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0) {
srcamap->am_flags &= ~AMAP_SHARED;
}
tofree = NULL;
#ifdef UVM_AMAP_PPREF
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
len >> PAGE_SHIFT, -1, &tofree);
len >> PAGE_SHIFT, -1);
}
#endif
uvm_anon_freelst(srcamap, tofree);
amap_unlock(srcamap);
/*
* Install new amap.
@ -1078,7 +1069,7 @@ ReStart:
nanon->an_lock = NULL;
nanon->an_ref--;
KASSERT(nanon->an_ref == 0);
uvm_anon_free(nanon);
uvm_anfree(nanon);
}
uvm_wait("cownowpage");
goto ReStart;
@ -1173,8 +1164,7 @@ amap_pp_establish(struct vm_amap *amap, vaddr_t offset)
* => map and amap must be locked.
*/
void
amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval,
struct vm_anon **tofree)
amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
{
int stopslot, *ppref, lcv, prevlcv;
int ref, len, prevref, prevlen;
@ -1232,7 +1222,7 @@ amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval,
pp_setreflen(ppref, lcv, ref, len);
}
if (ref == 0) {
amap_wiperange(amap, lcv, len, tofree);
amap_wiperange(amap, lcv, len);
}
}
}
@ -1244,8 +1234,7 @@ amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval,
* => Both map and amap must be locked by caller.
*/
void
amap_wiperange(struct vm_amap *amap, int slotoff, int slots,
struct vm_anon **tofree)
amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
{
u_int lcv, stop, slotend;
bool byanon;
@ -1307,12 +1296,7 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots,
KASSERT(anon->an_lock == amap->am_lock);
if (--anon->an_ref == 0) {
/*
* Eliminated the last reference to an anon - defer
* freeing as uvm_anon_freelst() will unlock the amap.
*/
anon->an_link = *tofree;
*tofree = anon;
uvm_anfree(anon);
}
}
}
@ -1570,7 +1554,6 @@ static void
amap_adjref_anons(struct vm_amap *amap, vaddr_t offset, vsize_t len,
int refv, bool all)
{
struct vm_anon *tofree = NULL;
#ifdef UVM_AMAP_PPREF
KASSERT(rw_write_held(amap->am_lock));
@ -1590,13 +1573,13 @@ amap_adjref_anons(struct vm_amap *amap, vaddr_t offset, vsize_t len,
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if (all) {
amap_pp_adjref(amap, 0, amap->am_nslot, refv, &tofree);
amap_pp_adjref(amap, 0, amap->am_nslot, refv);
} else {
amap_pp_adjref(amap, offset, len, refv, &tofree);
amap_pp_adjref(amap, offset, len, refv);
}
}
#endif
uvm_anon_freelst(amap, tofree);
amap_unlock(amap);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.h,v 1.40 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_amap.h,v 1.41 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -266,12 +266,11 @@ struct vm_amap {
#define PPREF_NONE ((int *) -1) /* not using ppref */
void amap_pp_adjref /* adjust references */
(struct vm_amap *, int, vsize_t, int,
struct vm_anon **);
(struct vm_amap *, int, vsize_t, int);
void amap_pp_establish /* establish ppref */
(struct vm_amap *, vaddr_t);
void amap_wiperange /* wipe part of an amap */
(struct vm_amap *, int, int, struct vm_anon **);
(struct vm_amap *, int, int);
#endif /* UVM_AMAP_PPREF */
#endif /* _KERNEL */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $ */
/* $NetBSD: uvm_anon.c,v 1.76 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.76 2020/03/20 19:08:54 ad Exp $");
#include "opt_uvmhist.h"
@ -95,26 +95,28 @@ uvm_analloc(void)
}
/*
* uvm_anon_dispose: free any resident page or swap resources of anon.
* uvm_anfree: free a single anon structure
*
* => anon must be removed from the amap (if anon was in an amap).
* => amap must be locked; we may drop and re-acquire the lock here.
* => amap must be locked, if anon was owned by amap.
* => we may drop and re-acquire the lock here (to break loans).
*/
static bool
uvm_anon_dispose(struct vm_anon *anon)
void
uvm_anfree(struct vm_anon *anon)
{
struct vm_page *pg = anon->an_page;
struct vm_page *pg = anon->an_page, *pg2 __diagused;
UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
KASSERT(rw_write_held(anon->an_lock));
KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
KASSERT(anon->an_ref == 0);
/*
* Dispose the page, if it is resident.
* Dispose of the page, if it is resident.
*/
if (pg) {
if (__predict_true(pg != NULL)) {
KASSERT(anon->an_lock != NULL);
/*
@ -123,8 +125,9 @@ uvm_anon_dispose(struct vm_anon *anon)
* identify and lock the real owner of the page.
*/
if (pg->loan_count) {
pg = uvm_anon_lockloanpg(anon);
if (__predict_false(pg->loan_count != 0)) {
pg2 = uvm_anon_lockloanpg(anon);
KASSERT(pg2 == pg);
}
/*
@ -133,7 +136,7 @@ uvm_anon_dispose(struct vm_anon *anon)
* and release the object lock.
*/
if (pg->uobject) {
if (__predict_false(pg->uobject != NULL)) {
mutex_enter(&pg->interlock);
KASSERT(pg->loan_count > 0);
pg->loan_count--;
@ -155,25 +158,26 @@ uvm_anon_dispose(struct vm_anon *anon)
* that uvm_anon_release(9) would release it later.
*/
if (pg->flags & PG_BUSY) {
if (__predict_false((pg->flags & PG_BUSY) != 0)) {
pg->flags |= PG_RELEASED;
rw_obj_hold(anon->an_lock);
return false;
return;
}
uvm_pagefree(pg);
UVMHIST_LOG(maphist, "anon %#jx, page %#jx: "
"freed now!", (uintptr_t)anon, (uintptr_t)pg,
0, 0);
}
}
} else {
#if defined(VMSWAP)
if (pg == NULL && anon->an_swslot > 0) {
/* This page is no longer only in swap. */
KASSERT(uvmexp.swpgonly > 0);
atomic_dec_uint(&uvmexp.swpgonly);
}
if (anon->an_swslot > 0) {
/* This page is no longer only in swap. */
KASSERT(uvmexp.swpgonly > 0);
atomic_dec_uint(&uvmexp.swpgonly);
}
#endif
}
anon->an_lock = NULL;
/*
* Free any swap resources, leave a page replacement hint.
@ -182,53 +186,9 @@ uvm_anon_dispose(struct vm_anon *anon)
uvm_anon_dropswap(anon);
uvmpdpol_anfree(anon);
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
return true;
}
/*
* uvm_anon_free: free a single anon.
*
* => anon must be already disposed.
*/
void
uvm_anon_free(struct vm_anon *anon)
{
KASSERT(anon->an_ref == 0);
KASSERT(anon->an_lock == NULL);
KASSERT(anon->an_page == NULL);
#if defined(VMSWAP)
KASSERT(anon->an_swslot == 0);
#endif
pool_cache_put(&uvm_anon_cache, anon);
}
/*
* uvm_anon_freelst: free a linked list of anon structures.
*
* => amap must be locked, we will unlock it.
*/
void
uvm_anon_freelst(struct vm_amap *amap, struct vm_anon *anonlst)
{
struct vm_anon *next;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(rw_write_held(amap->am_lock));
for (; anonlst != NULL; anonlst = next) {
next = anonlst->an_link;
/* Note: clears an_ref as well. */
anonlst->an_link = NULL;
if (uvm_anon_dispose(anonlst)) {
anonlst->an_lock = NULL;
uvm_anon_free(anonlst);
}
}
amap_unlock(amap);
}
/*
* uvm_anon_lockloanpg: given a locked anon, lock its resident page owner.
*
@ -398,7 +358,7 @@ void
uvm_anon_release(struct vm_anon *anon)
{
struct vm_page *pg = anon->an_page;
bool success __diagused;
krwlock_t *lock;
KASSERT(rw_write_held(anon->an_lock));
KASSERT(pg != NULL);
@ -411,12 +371,9 @@ uvm_anon_release(struct vm_anon *anon)
uvm_pagefree(pg);
KASSERT(anon->an_page == NULL);
/* dispose should succeed as no one can reach this anon anymore. */
success = uvm_anon_dispose(anon);
KASSERT(success);
rw_exit(anon->an_lock);
lock = anon->an_lock;
uvm_anfree(anon);
rw_exit(lock);
/* Note: extra reference is held for PG_RELEASED case. */
rw_obj_free(anon->an_lock);
anon->an_lock = NULL;
uvm_anon_free(anon);
rw_obj_free(lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.h,v 1.31 2020/02/23 15:46:43 ad Exp $ */
/* $NetBSD: uvm_anon.h,v 1.32 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -46,12 +46,7 @@
struct vm_anon {
krwlock_t *an_lock; /* Lock for an_ref */
union {
uintptr_t au_ref; /* Reference count [an_lock] */
struct vm_anon *au_link; /* Link for deferred free */
} an_u;
#define an_ref an_u.au_ref
#define an_link an_u.au_link
uintptr_t an_ref; /* Reference count [an_lock] */
struct vm_page *an_page; /* If in RAM [an_lock] */
#if defined(VMSWAP) || 1 /* XXX libkvm */
/*
@ -100,8 +95,7 @@ struct vm_aref {
*/
struct vm_anon *uvm_analloc(void);
void uvm_anon_free(struct vm_anon *);
void uvm_anon_freelst(struct vm_amap *, struct vm_anon *);
void uvm_anfree(struct vm_anon *);
void uvm_anon_init(void);
struct vm_page *uvm_anon_lockloanpg(struct vm_anon *);
#if defined(VMSWAP)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.220 2020/03/20 18:50:09 ad Exp $ */
/* $NetBSD: uvm_fault.c,v 1.221 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.220 2020/03/20 18:50:09 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.221 2020/03/20 19:08:54 ad Exp $");
#include "opt_uvmhist.h"
@ -910,7 +910,7 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
flt.anon_spare->an_ref--;
KASSERT(flt.anon_spare->an_ref == 0);
KASSERT(flt.anon_spare->an_lock == NULL);
uvm_anon_free(flt.anon_spare);
uvm_anfree(flt.anon_spare);
}
return error;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.c,v 1.98 2020/03/17 18:31:39 ad Exp $ */
/* $NetBSD: uvm_loan.c,v 1.99 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.98 2020/03/17 18:31:39 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.99 2020/03/20 19:08:54 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -808,7 +808,7 @@ fail:
uvmfault_unlockall(ufi, amap, uobj, NULL);
if (anon) {
anon->an_ref--;
uvm_anon_free(anon);
uvm_anfree(anon);
}
#endif /* notdef */
return (-1);
@ -943,11 +943,10 @@ uvm_unloananon(struct vm_anon **aloans, int nanons)
while (nanons-- > 0) {
anon = *aloans++;
if (--anon->an_ref == 0) {
anon->an_link = to_free;
to_free = anon;
uvm_anfree(anon);
}
}
uvm_anon_freelst(amap, to_free);
amap_unlock(amap);
#endif /* notdef */
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.374 2020/03/14 17:29:53 ad Exp $ */
/* $NetBSD: uvm_map.c,v 1.375 2020/03/20 19:08:54 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.374 2020/03/14 17:29:53 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.375 2020/03/20 19:08:54 ad Exp $");
#include "opt_ddb.h"
#include "opt_pax.h"
@ -3796,7 +3796,7 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
struct vm_map_entry *current, *entry;
struct uvm_object *uobj;
struct vm_amap *amap;
struct vm_anon *anon, *anon_tofree;
struct vm_anon *anon;
struct vm_page *pg;
vaddr_t offset;
vsize_t size;
@ -3857,7 +3857,6 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
offset = start - current->start;
size = MIN(end, current->end) - start;
anon_tofree = NULL;
amap_lock(amap, RW_WRITER);
for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
@ -3917,13 +3916,12 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
amap_unadd(&current->aref, offset);
refs = --anon->an_ref;
if (refs == 0) {
anon->an_link = anon_tofree;
anon_tofree = anon;
uvm_anfree(anon);
}
continue;
}
}
uvm_anon_freelst(amap, anon_tofree);
amap_unlock(amap);
flush_object:
/*