make amap use kmem_alloc, rather than malloc.
(ie. make it use kernel_map, rather than kmem_map.) kmem_map is more restricted than kernel_map, and there's no point for amap to use it.
This commit is contained in:
parent
bc4977819f
commit
d447115889
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uvm_amap.c,v 1.73 2006/04/21 14:04:45 yamt Exp $ */
|
||||
/* $NetBSD: uvm_amap.c,v 1.74 2006/06/25 08:03:46 yamt Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -42,15 +42,15 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.73 2006/04/21 14:04:45 yamt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.74 2006/06/25 08:03:46 yamt Exp $");
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/pool.h>
|
||||
|
||||
#include <uvm/uvm.h>
|
||||
|
@ -66,8 +66,6 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.73 2006/04/21 14:04:45 yamt Exp $");
|
|||
POOL_INIT(uvm_amap_pool, sizeof(struct vm_amap), 0, 0, 0, "amappl",
|
||||
&pool_allocator_nointr);
|
||||
|
||||
MALLOC_DEFINE(M_UVMAMAP, "UVM amap", "UVM amap and related structures");
|
||||
|
||||
static struct simplelock amap_list_lock = SIMPLELOCK_INITIALIZER;
|
||||
static LIST_HEAD(, vm_amap) amap_list;
|
||||
|
||||
|
@ -93,6 +91,13 @@ amap_list_remove(struct vm_amap *amap)
|
|||
simple_unlock(&amap_list_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
amap_roundup_slots(int slots)
|
||||
{
|
||||
|
||||
return kmem_roundup_size(slots * sizeof(int)) / sizeof(int);
|
||||
}
|
||||
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
/*
|
||||
* what is ppref? ppref is an _optional_ amap feature which is used
|
||||
|
@ -175,13 +180,15 @@ amap_alloc1(int slots, int padslots, int waitf)
|
|||
{
|
||||
struct vm_amap *amap;
|
||||
int totalslots;
|
||||
km_flag_t kmflags;
|
||||
|
||||
amap = pool_get(&uvm_amap_pool, (waitf == M_WAITOK) ? PR_WAITOK : 0);
|
||||
amap = pool_get(&uvm_amap_pool,
|
||||
((waitf & UVM_FLAG_NOWAIT) != 0) ? PR_NOWAIT : PR_WAITOK);
|
||||
if (amap == NULL)
|
||||
return(NULL);
|
||||
|
||||
totalslots = malloc_roundup((slots + padslots) * sizeof(int)) /
|
||||
sizeof(int);
|
||||
kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
|
||||
totalslots = amap_roundup_slots(slots + padslots);
|
||||
simple_lock_init(&amap->am_l);
|
||||
amap->am_ref = 1;
|
||||
amap->am_flags = 0;
|
||||
|
@ -192,26 +199,25 @@ amap_alloc1(int slots, int padslots, int waitf)
|
|||
amap->am_nslot = slots;
|
||||
amap->am_nused = 0;
|
||||
|
||||
amap->am_slots = malloc(totalslots * sizeof(int), M_UVMAMAP,
|
||||
waitf);
|
||||
amap->am_slots = kmem_alloc(totalslots * sizeof(int), kmflags);
|
||||
if (amap->am_slots == NULL)
|
||||
goto fail1;
|
||||
|
||||
amap->am_bckptr = malloc(totalslots * sizeof(int), M_UVMAMAP, waitf);
|
||||
amap->am_bckptr = kmem_alloc(totalslots * sizeof(int), kmflags);
|
||||
if (amap->am_bckptr == NULL)
|
||||
goto fail2;
|
||||
|
||||
amap->am_anon = malloc(totalslots * sizeof(struct vm_anon *),
|
||||
M_UVMAMAP, waitf);
|
||||
amap->am_anon = kmem_alloc(totalslots * sizeof(struct vm_anon *),
|
||||
kmflags);
|
||||
if (amap->am_anon == NULL)
|
||||
goto fail3;
|
||||
|
||||
return(amap);
|
||||
|
||||
fail3:
|
||||
free(amap->am_bckptr, M_UVMAMAP);
|
||||
kmem_free(amap->am_bckptr, totalslots * sizeof(int));
|
||||
fail2:
|
||||
free(amap->am_slots, M_UVMAMAP);
|
||||
kmem_free(amap->am_slots, totalslots * sizeof(int));
|
||||
fail1:
|
||||
pool_put(&uvm_amap_pool, amap);
|
||||
|
||||
|
@ -219,7 +225,7 @@ fail1:
|
|||
* XXX hack to tell the pagedaemon how many pages we need,
|
||||
* since we can need more than it would normally free.
|
||||
*/
|
||||
if (waitf == M_NOWAIT) {
|
||||
if ((waitf & UVM_FLAG_NOWAIT) != 0) {
|
||||
extern int uvm_extrapages;
|
||||
uvm_extrapages += ((sizeof(int) * 2 +
|
||||
sizeof(struct vm_anon *)) *
|
||||
|
@ -267,17 +273,20 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf)
|
|||
void
|
||||
amap_free(struct vm_amap *amap)
|
||||
{
|
||||
int slots;
|
||||
|
||||
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
|
||||
|
||||
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
|
||||
KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
|
||||
LOCK_ASSERT(!simple_lock_held(&amap->am_l));
|
||||
free(amap->am_slots, M_UVMAMAP);
|
||||
free(amap->am_bckptr, M_UVMAMAP);
|
||||
free(amap->am_anon, M_UVMAMAP);
|
||||
slots = amap->am_maxslot;
|
||||
kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
|
||||
kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
|
||||
kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon));
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
|
||||
free(amap->am_ppref, M_UVMAMAP);
|
||||
kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
|
||||
#endif
|
||||
pool_put(&uvm_amap_pool, amap);
|
||||
UVMHIST_LOG(maphist,"<- done, freed amap = 0x%x", amap, 0, 0, 0);
|
||||
|
@ -299,13 +308,14 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
|
|||
int slotoff = entry->aref.ar_pageoff;
|
||||
int slotmapped, slotadd, slotneed, slotadded, slotalloc;
|
||||
int slotadj, slotspace;
|
||||
int oldnslots;
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
int *newppref, *oldppref;
|
||||
#endif
|
||||
int i, *newsl, *newbck, *oldsl, *oldbck;
|
||||
struct vm_anon **newover, **oldover;
|
||||
int mflag = (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT :
|
||||
(M_WAITOK | M_CANFAIL);
|
||||
const km_flag_t kmflags =
|
||||
(flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
|
||||
|
||||
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
|
||||
|
||||
|
@ -474,30 +484,29 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
|
|||
return E2BIG;
|
||||
}
|
||||
|
||||
slotalloc = malloc_roundup(slotneed * sizeof(int)) / sizeof(int);
|
||||
slotalloc = amap_roundup_slots(slotneed);
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
newppref = NULL;
|
||||
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
|
||||
newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
|
||||
newppref = kmem_alloc(slotalloc * sizeof(*newppref), kmflags);
|
||||
#endif
|
||||
newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
|
||||
newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
|
||||
newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
|
||||
mflag);
|
||||
newsl = kmem_alloc(slotalloc * sizeof(*newsl), kmflags);
|
||||
newbck = kmem_alloc(slotalloc * sizeof(*newbck), kmflags);
|
||||
newover = kmem_alloc(slotalloc * sizeof(*newover), kmflags);
|
||||
if (newsl == NULL || newbck == NULL || newover == NULL) {
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (newppref != NULL) {
|
||||
free(newppref, M_UVMAMAP);
|
||||
kmem_free(newppref, slotalloc * sizeof(*newppref));
|
||||
}
|
||||
#endif
|
||||
if (newsl != NULL) {
|
||||
free(newsl, M_UVMAMAP);
|
||||
kmem_free(newsl, slotalloc * sizeof(*newsl));
|
||||
}
|
||||
if (newbck != NULL) {
|
||||
free(newbck, M_UVMAMAP);
|
||||
kmem_free(newbck, slotalloc * sizeof(*newbck));
|
||||
}
|
||||
if (newover != NULL) {
|
||||
free(newover, M_UVMAMAP);
|
||||
kmem_free(newover, slotalloc * sizeof(*newover));
|
||||
}
|
||||
return ENOMEM;
|
||||
}
|
||||
|
@ -585,15 +594,16 @@ amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
|
|||
entry->aref.ar_pageoff = slotspace - slotadd;
|
||||
amap->am_nslot = slotalloc;
|
||||
}
|
||||
oldnslots = amap->am_maxslot;
|
||||
amap->am_maxslot = slotalloc;
|
||||
|
||||
amap_unlock(amap);
|
||||
free(oldsl, M_UVMAMAP);
|
||||
free(oldbck, M_UVMAMAP);
|
||||
free(oldover, M_UVMAMAP);
|
||||
kmem_free(oldsl, oldnslots * sizeof(*oldsl));
|
||||
kmem_free(oldbck, oldnslots * sizeof(*oldbck));
|
||||
kmem_free(oldover, oldnslots * sizeof(*oldover));
|
||||
#ifdef UVM_AMAP_PPREF
|
||||
if (oldppref && oldppref != PPREF_NONE)
|
||||
free(oldppref, M_UVMAMAP);
|
||||
kmem_free(oldppref, oldnslots * sizeof(*oldppref));
|
||||
#endif
|
||||
UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
|
||||
amap, slotneed, 0, 0);
|
||||
|
@ -731,7 +741,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
|
|||
struct vm_amap *amap, *srcamap;
|
||||
int slots, lcv;
|
||||
vaddr_t chunksize;
|
||||
const int waitf = (flags & AMAP_COPY_NOWAIT) ? M_NOWAIT : M_WAITOK;
|
||||
const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0;
|
||||
const boolean_t canchunk = (flags & AMAP_COPY_NOCHUNK) == 0;
|
||||
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
|
||||
UVMHIST_LOG(maphist, " (map=%p, entry=%p, flags=%d)",
|
||||
|
@ -1076,8 +1086,9 @@ amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
|
|||
void
|
||||
amap_pp_establish(struct vm_amap *amap, vaddr_t offset)
|
||||
{
|
||||
amap->am_ppref = malloc(sizeof(int) * amap->am_maxslot,
|
||||
M_UVMAMAP, M_NOWAIT);
|
||||
|
||||
amap->am_ppref = kmem_alloc(amap->am_maxslot * sizeof(*amap->am_ppref),
|
||||
KM_NOSLEEP);
|
||||
|
||||
/*
|
||||
* if we fail then we just won't use ppref for this amap
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uvm_amap.h,v 1.29 2006/02/15 14:06:45 yamt Exp $ */
|
||||
/* $NetBSD: uvm_amap.h,v 1.30 2006/06/25 08:03:46 yamt Exp $ */
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -240,8 +240,6 @@ struct vm_amap {
|
|||
#define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */
|
||||
|
||||
#ifdef _KERNEL
|
||||
#include <sys/mallocvar.h>
|
||||
MALLOC_DECLARE(M_UVMAMAP);
|
||||
|
||||
/*
|
||||
* macros
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uvm_map.c,v 1.226 2006/05/25 14:27:28 yamt Exp $ */
|
||||
/* $NetBSD: uvm_map.c,v 1.227 2006/06/25 08:03:46 yamt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
|
@ -71,7 +71,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.226 2006/05/25 14:27:28 yamt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.227 2006/06/25 08:03:46 yamt Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_uvmhist.h"
|
||||
|
@ -1367,7 +1367,7 @@ nomerge:
|
|||
vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
|
||||
UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
|
||||
struct vm_amap *amap = amap_alloc(size, to_add,
|
||||
(flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
|
||||
(flags & UVM_FLAG_NOWAIT));
|
||||
if (__predict_false(amap == NULL)) {
|
||||
error = ENOMEM;
|
||||
goto done;
|
||||
|
|
Loading…
Reference in New Issue