When initially allocating or extending arrays in struct uvm_amap,

adjust allocation size using malloc_roundup().  This eliminates many
unnecessary malloc/memcpy calls.
This commit is contained in:
enami 2001-12-05 01:33:09 +00:00
parent 996ee10484
commit 76858f7620
1 changed files with 19 additions and 13 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.39 2001/12/05 00:34:05 enami Exp $ */
/* $NetBSD: uvm_amap.c,v 1.40 2001/12/05 01:33:09 enami Exp $ */
/*
*
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.39 2001/12/05 00:34:05 enami Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.40 2001/12/05 01:33:09 enami Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@ -177,12 +177,14 @@ amap_alloc1(slots, padslots, waitf)
int slots, padslots, waitf;
{
struct vm_amap *amap;
int totalslots = slots + padslots;
int totalslots;
amap = pool_get(&uvm_amap_pool, (waitf == M_WAITOK) ? PR_WAITOK : 0);
if (amap == NULL)
return(NULL);
totalslots = malloc_roundup((slots + padslots) * sizeof(int)) /
sizeof(int);
simple_lock_init(&amap->am_l);
amap->am_ref = 1;
amap->am_flags = 0;
@ -241,7 +243,7 @@ amap_alloc(sz, padsz, waitf)
amap = amap_alloc1(slots, padslots, waitf);
if (amap)
memset(amap->am_anon, 0,
(slots + padslots) * sizeof(struct vm_anon *));
amap->am_maxslot * sizeof(struct vm_anon *));
UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0);
return(amap);
@ -291,13 +293,12 @@ amap_extend(entry, addsize)
{
struct vm_amap *amap = entry->aref.ar_amap;
int slotoff = entry->aref.ar_pageoff;
int slotmapped, slotadd, slotneed;
int slotmapped, slotadd, slotneed, slotadded, slotalloc;
#ifdef UVM_AMAP_PPREF
int *newppref, *oldppref;
#endif
int *newsl, *newbck, *oldsl, *oldbck;
struct vm_anon **newover, **oldover;
int slotadded;
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x)", entry,addsize,0,0);
@ -366,10 +367,12 @@ amap_extend(entry, addsize)
*/
amap_unlock(amap); /* unlock in case we sleep in malloc */
slotalloc = malloc_roundup(slotneed * sizeof(int)) / sizeof(int);
#ifdef UVM_AMAP_PPREF
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
newppref = malloc(slotneed * sizeof(int), M_UVMAMAP, M_NOWAIT);
newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
M_NOWAIT);
if (newppref == NULL) {
/* give up if malloc fails */
free(amap->am_ppref, M_UVMAMAP);
@ -377,9 +380,9 @@ amap_extend(entry, addsize)
}
}
#endif
newsl = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
newbck = malloc(slotneed * sizeof(int), M_UVMAMAP, M_WAITOK);
newover = malloc(slotneed * sizeof(struct vm_anon *),
newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
newover = malloc(slotalloc * sizeof(struct vm_anon *),
M_UVMAMAP, M_WAITOK);
amap_lock(amap); /* re-lock! */
KASSERT(amap->am_maxslot < slotneed);
@ -388,7 +391,7 @@ amap_extend(entry, addsize)
* now copy everything over to new malloc'd areas...
*/
slotadded = slotneed - amap->am_nslot;
slotadded = slotalloc - amap->am_nslot;
/* do am_slots */
oldsl = amap->am_slots;
@ -416,13 +419,14 @@ amap_extend(entry, addsize)
if ((slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1);
pp_setreflen(newppref, amap->am_nslot, 1, slotadded);
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
#endif
/* update master values */
amap->am_nslot = slotneed;
amap->am_maxslot = slotneed;
amap->am_maxslot = slotalloc;
amap_unlock(amap);
free(oldsl, M_UVMAMAP);
@ -682,6 +686,8 @@ amap_copy(map, entry, waitf, canchunk, startva, endva)
amap->am_slots[amap->am_nused] = lcv;
amap->am_nused++;
}
memset(&amap->am_anon[lcv], 0,
(amap->am_maxslot - lcv) * sizeof(struct vm_anon *));
/*
* drop our reference to the old amap (srcamap) and unlock.