Pull up following revision(s) (requested by chs in ticket #1598):

sys/uvm/uvm_amap.c: revision 1.123 (via patch)

fix amap_extend() to handle amaps where we previously failed to allocate
the ppref memory.
This commit is contained in:
martin 2020-08-19 18:39:18 +00:00
parent 6c5161bb05
commit 0843322e04
1 changed files with 33 additions and 3 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.107.32.1 2017/11/02 21:29:53 snj Exp $ */
/* $NetBSD: uvm_amap.c,v 1.107.32.2 2020/08/19 18:39:18 martin Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.107.32.1 2017/11/02 21:29:53 snj Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.107.32.2 2020/08/19 18:39:18 martin Exp $");
#include "opt_uvmhist.h"
@ -317,7 +317,7 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
struct vm_amap *amap = entry->aref.ar_amap;
int slotoff = entry->aref.ar_pageoff;
int slotmapped, slotadd, slotneed, slotadded, slotalloc;
int slotadj, slotspace;
int slotadj, slotspace, slotendoff;
int oldnslots;
#ifdef UVM_AMAP_PPREF
int *newppref, *oldppref;
@ -354,6 +354,36 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
}
tofree = NULL;
/*
* Because this amap only has 1 ref, we know that there is
* only one vm_map_entry pointing to it, and the one entry is
* using slots between slotoff and slotoff + slotmapped. If
* we have been using ppref then we know that only slots in
* the one map entry's range can have anons, since ppref
* allowed us to free any anons outside that range as other map
* entries which used this amap were removed. But without ppref,
* we couldn't know which slots were still needed by other map
* entries, so we couldn't free any anons as we removed map
* entries, and so any slot from 0 to am_nslot can have an
* anon. But now that we know there is only one map entry
* left and we know its range, we can free up any anons
* outside that range. This is necessary because the rest of
* this function assumes that there are no anons in the amap
* outside of the one map entry's range.
*/
slotendoff = slotoff + slotmapped;
if (amap->am_ppref == PPREF_NONE) {
amap_wiperange(amap, 0, slotoff, &tofree);
amap_wiperange(amap, slotendoff, amap->am_nslot - slotendoff, &tofree);
}
for (i = 0; i < slotoff; i++) {
KASSERT(amap->am_anon[i] == NULL);
}
for (i = slotendoff; i < amap->am_nslot - slotendoff; i++) {
KASSERT(amap->am_anon[i] == NULL);
}
/*
* case 1: we already have enough slots in the map and thus
* only need to bump the reference counts on the slots we are