Implement backwards extension of amaps. There are three cases to deal

with:

Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.

Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition.  This optimizes for hitting case #1 again on
the next small extension.

Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size.  This also
optimizes for hitting case #1 on the next backwards extension.

This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.

Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
This commit is contained in:
atatat 2002-11-14 17:58:48 +00:00
parent 56f6918fef
commit 42c2fe641b
3 changed files with 240 additions and 77 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.45 2002/09/15 16:54:27 chs Exp $ */
/* $NetBSD: uvm_amap.c,v 1.46 2002/11/14 17:58:49 atatat Exp $ */
/*
*
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.45 2002/09/15 16:54:27 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.46 2002/11/14 17:58:49 atatat Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@ -284,21 +284,24 @@ amap_free(amap)
* one (thus it can't be shared)
*/
int
amap_extend(entry, addsize)
amap_extend(entry, addsize, forwards)
struct vm_map_entry *entry;
vsize_t addsize;
int forwards;
{
struct vm_amap *amap = entry->aref.ar_amap;
int slotoff = entry->aref.ar_pageoff;
int slotmapped, slotadd, slotneed, slotadded, slotalloc;
int slotadj, slotspace;
#ifdef UVM_AMAP_PPREF
int *newppref, *oldppref;
#endif
int *newsl, *newbck, *oldsl, *oldbck;
int i, *newsl, *newbck, *oldsl, *oldbck;
struct vm_anon **newover, **oldover;
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x)", entry,addsize,0,0);
UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, forwards=%d)",
entry, addsize, forwards, 0);
/*
* first, determine how many slots we need in the amap. don't
@ -309,7 +312,16 @@ amap_extend(entry, addsize)
amap_lock(amap);
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
slotneed = slotoff + slotmapped + slotadd;
if (forwards) {
slotneed = slotoff + slotmapped + slotadd;
slotadj = 0;
slotspace = 0;
}
else {
slotneed = slotadd + slotmapped;
slotadj = slotadd - slotoff;
slotspace = amap->am_maxslot - slotmapped;
}
/*
* case 1: we already have enough slots in the map and thus
@ -317,16 +329,35 @@ amap_extend(entry, addsize)
* adding.
*/
if (amap->am_nslot >= slotneed) {
if (forwards) {
if (amap->am_nslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
}
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff + slotmapped,
slotadd, 1);
}
#endif
amap_unlock(amap);
UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
return 0;
amap_unlock(amap);
UVMHIST_LOG(maphist,
"<- done (case 1f), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
return 0;
}
} else {
if (slotadj <= 0) {
slotoff -= slotadd;
entry->aref.ar_pageoff = slotoff;
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff, slotadd, 1);
}
#endif
amap_unlock(amap);
UVMHIST_LOG(maphist,
"<- done (case 1b), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
return 0;
}
}
/*
@ -335,27 +366,87 @@ amap_extend(entry, addsize)
*/
if (amap->am_maxslot >= slotneed) {
if (forwards) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)),
1);
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap,
slotoff + slotmapped,
(amap->am_nslot -
(slotoff + slotmapped)), 1);
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
#endif
amap->am_nslot = slotneed;
amap_unlock(amap);
amap->am_nslot = slotneed;
amap_unlock(amap);
/*
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
*/
/*
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
*/
UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, "
"slotneed=%d", amap, slotneed, 0, 0);
return 0;
UVMHIST_LOG(maphist,"<- done (case 2f), amap = 0x%x, "
"slotneed=%d", amap, slotneed, 0, 0);
return 0;
} else {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
/*
* Slide up the ref counts on the pages that
* are actually in use.
*/
memmove(amap->am_ppref + slotspace,
amap->am_ppref + slotoff,
slotmapped * sizeof(int));
/*
* Mark the (adjusted) gap at the front as
* referenced/not referenced.
*/
pp_setreflen(amap->am_ppref,
0, 0, slotspace - slotadd);
pp_setreflen(amap->am_ppref,
slotspace - slotadd, 1, slotadd);
}
#endif
/*
* Slide the anon pointers up and clear out
* the space we just made.
*/
memmove(amap->am_anon + slotspace,
amap->am_anon + slotoff,
slotmapped * sizeof(struct vm_anon*));
memset(amap->am_anon + slotoff, 0,
(slotspace - slotoff) * sizeof(struct vm_anon *));
/*
* Slide the backpointers up, but don't bother
* wiping out the old slots.
*/
memmove(amap->am_bckptr + slotspace,
amap->am_bckptr + slotoff,
slotmapped * sizeof(int));
/*
* Adjust all the useful active slot numbers.
*/
for (i = 0; i < amap->am_nused; i++)
amap->am_slots[i] += (slotspace - slotoff);
/*
* We just filled all the empty space in the
* front of the amap by activating a few new
* slots.
*/
amap->am_nslot = amap->am_maxslot;
entry->aref.ar_pageoff = slotspace - slotadd;
amap_unlock(amap);
UVMHIST_LOG(maphist,"<- done (case 2b), amap = 0x%x, "
"slotneed=%d", amap, slotneed, 0, 0);
return 0;
}
}
/*
@ -385,6 +476,11 @@ amap_extend(entry, addsize)
newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
M_WAITOK | M_CANFAIL);
if (newsl == NULL || newbck == NULL || newover == NULL) {
#ifdef UVM_AMAP_PPREF
if (newppref != NULL) {
free(amap->am_ppref, M_UVMAMAP);
}
#endif
if (newsl != NULL) {
free(newsl, M_UVMAMAP);
}
@ -404,41 +500,78 @@ amap_extend(entry, addsize)
*/
slotadded = slotalloc - amap->am_nslot;
if (!forwards)
slotspace = slotalloc - slotmapped;
/* do am_slots */
oldsl = amap->am_slots;
memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
if (forwards)
memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
else
for (i = 0; i < amap->am_nused; i++)
newsl[i] = oldsl[i] + slotspace - slotoff;
amap->am_slots = newsl;
/* do am_anon */
oldover = amap->am_anon;
memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
slotadded);
if (forwards) {
memcpy(newover, oldover,
sizeof(struct vm_anon *) * amap->am_nslot);
memset(newover + amap->am_nslot, 0,
sizeof(struct vm_anon *) * slotadded);
} else {
memcpy(newover + slotspace, oldover + slotoff,
sizeof(struct vm_anon *) * slotmapped);
memset(newover, 0,
sizeof(struct vm_anon *) * slotspace);
}
amap->am_anon = newover;
/* do am_bckptr */
oldbck = amap->am_bckptr;
memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
if (forwards)
memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
else
memcpy(newbck + slotspace, oldbck + slotoff,
sizeof(int) * slotmapped);
amap->am_bckptr = newbck;
#ifdef UVM_AMAP_PPREF
/* do ppref */
oldppref = amap->am_ppref;
if (newppref) {
memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot);
memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);
if (forwards) {
memcpy(newppref, oldppref,
sizeof(int) * amap->am_nslot);
memset(newppref + amap->am_nslot, 0,
sizeof(int) * slotadded);
} else {
memcpy(newppref + slotspace, oldppref + slotoff,
sizeof(int) * slotmapped);
}
amap->am_ppref = newppref;
if ((slotoff + slotmapped) < amap->am_nslot)
if (forwards && (slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1);
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
if (forwards)
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
else {
pp_setreflen(newppref, 0, 0,
slotalloc - slotneed);
pp_setreflen(newppref, slotalloc - slotneed, 1,
slotneed - slotmapped);
}
}
#endif
/* update master values */
amap->am_nslot = slotneed;
if (forwards)
amap->am_nslot = slotneed;
else {
entry->aref.ar_pageoff = slotspace - slotadd;
amap->am_nslot = slotalloc;
}
amap->am_maxslot = slotalloc;
amap_unlock(amap);
@ -1039,6 +1172,7 @@ amap_wiperange(amap, slotoff, slots)
byanon = TRUE;
lcv = slotoff;
stop = slotoff + slots;
slotend = 0;
} else {
byanon = FALSE;
lcv = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.h,v 1.18 2002/09/15 16:54:29 chs Exp $ */
/* $NetBSD: uvm_amap.h,v 1.19 2002/11/14 17:58:48 atatat Exp $ */
/*
*
@ -92,7 +92,7 @@ void amap_copy /* clear amap needs-copy flag */
void amap_cow_now /* resolve all COW faults now */
__P((struct vm_map *, struct vm_map_entry *));
int amap_extend /* make amap larger */
__P((struct vm_map_entry *, vsize_t));
__P((struct vm_map_entry *, vsize_t, int));
int amap_flags /* get amap's flags */
__P((struct vm_amap *));
void amap_free /* free amap */
@ -136,6 +136,12 @@ void amap_wipeout /* remove all anons from amap */
#define AMAP_SHARED 0x1 /* amap is shared */
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */
/*
* amap_extend directions
*/
#define AMAP_EXTEND_BACKWARDS 0 /* add "size" to start of map */
#define AMAP_EXTEND_FORWARDS 1 /* add "size" to end of map */
#endif /* _KERNEL */
/**********************************************************************/

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.124 2002/11/02 07:40:47 perry Exp $ */
/* $NetBSD: uvm_map.c,v 1.125 2002/11/14 17:58:48 atatat Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.124 2002/11/02 07:40:47 perry Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.125 2002/11/14 17:58:48 atatat Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -101,9 +101,9 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.124 2002/11/02 07:40:47 perry Exp $");
extern struct vm_map *pager_map;
struct uvm_cnt map_ubackmerge, map_uforwmerge;
struct uvm_cnt map_ubimerge, map_uforwmerge_fail, map_unomerge;
struct uvm_cnt map_ubimerge, map_unomerge;
struct uvm_cnt map_kbackmerge, map_kforwmerge;
struct uvm_cnt map_kbimerge, map_kforwmerge_fail, map_knomerge;
struct uvm_cnt map_kbimerge, map_knomerge;
struct uvm_cnt uvm_map_call, uvm_mlk_call, uvm_mlk_hint;
const char vmmapbsy[] = "vmmapbsy";
@ -354,8 +354,6 @@ uvm_map_init()
"# uvm_map() forward umerges", 0);
UVMCNT_INIT(map_ubimerge, UVMCNT_CNT, 0,
"# uvm_map() dual umerge", 0);
UVMCNT_INIT(map_uforwmerge_fail, UVMCNT_CNT, 0,
"# uvm_map() forward umerge fails", 0);
UVMCNT_INIT(map_unomerge, UVMCNT_CNT, 0,
"# uvm_map() no umerge", 0);
@ -365,8 +363,6 @@ uvm_map_init()
"# uvm_map() forward kmerges", 0);
UVMCNT_INIT(map_kbimerge, UVMCNT_CNT, 0,
"# uvm_map() dual kmerge", 0);
UVMCNT_INIT(map_kforwmerge_fail, UVMCNT_CNT, 0,
"# uvm_map() forward kmerge fails", 0);
UVMCNT_INIT(map_knomerge, UVMCNT_CNT, 0,
"# uvm_map() no kmerge", 0);
@ -684,7 +680,8 @@ uvm_map(map, startp, size, uobj, uoffset, align, flags)
}
if (prev_entry->aref.ar_amap) {
error = amap_extend(prev_entry, size);
error = amap_extend(prev_entry, size,
AMAP_EXTEND_FORWARDS);
if (error) {
vm_map_unlock(map);
if (new_entry) {
@ -751,6 +748,12 @@ forwardmerge:
* merged with the previous entry which has an amap,
* and the next entry also has an amap, we give up.
*
* Interesting cases:
* amap, new, amap -> give up second merge (single fwd extend)
* amap, new, none -> double forward extend (extend again here)
* none, new, amap -> double backward extend (done here)
* uobj, new, amap -> single backward extend (done here)
*
* XXX should we attempt to deal with someone refilling
* the deallocated region between two entries that are
* backed by the same amap (ie, arefs is 2, "prev" and
@ -765,27 +768,6 @@ forwardmerge:
goto nomerge;
}
/* got it...almost */
if (prev_entry->next->aref.ar_amap) {
/*
* XXX if not for this, we could have merged
* forwards, so the number of times we missed
* a *possible* chance to merge more. note,
* however, that only processes use amaps,
* and they only *VERY* rarely present solely
* forward mergeable allocations. -- @@@
*/
if (kmap)
UVMCNT_INCR(map_kforwmerge_fail);
else
UVMCNT_INCR(map_uforwmerge_fail);
goto nomerge;
}
/*
* XXX call amap_extend() to merge backwards here if needed. -- @@@
*/
if (merged) {
/*
* Try to extend the amap of the previous entry to
@ -793,10 +775,47 @@ forwardmerge:
* just skip on, don't actually give up, since we've
* already completed the back merge.
*/
if (prev_entry->aref.ar_amap &&
amap_extend(prev_entry, prev_entry->next->end -
prev_entry->next->start))
if (prev_entry->aref.ar_amap) {
if (amap_extend(prev_entry,
prev_entry->next->end -
prev_entry->next->start,
AMAP_EXTEND_FORWARDS))
goto nomerge;
}
/*
* Try to extend the amap of the *next* entry
* back to cover the new allocation *and* the
* previous entry as well (the previous merge
* didn't have an amap already otherwise we
* wouldn't be checking here for an amap). If
* it doesn't work just skip on, again, don't
* actually give up, since we've already
* completed the back merge.
*/
else if (prev_entry->next->aref.ar_amap) {
if (amap_extend(prev_entry->next,
prev_entry->end -
prev_entry->start + size,
AMAP_EXTEND_BACKWARDS))
goto nomerge;
}
} else {
/*
* Pull the next entry's amap backwards to cover this
* new allocation.
*/
if (prev_entry->next->aref.ar_amap) {
error = amap_extend(prev_entry->next, size,
AMAP_EXTEND_BACKWARDS);
if (error) {
vm_map_unlock(map);
if (new_entry) {
uvm_mapent_free(new_entry);
}
return error;
}
}
}
if (merged) {
@ -827,6 +846,10 @@ forwardmerge:
struct vm_map_entry *dead = prev_entry->next;
prev_entry->end = dead->end;
uvm_map_entry_unlink(map, dead);
if (dead->aref.ar_amap != NULL) {
prev_entry->aref = dead->aref;
dead->aref.ar_amap = NULL;
}
uvm_mapent_free(dead);
} else {
prev_entry->next->start -= size;