call pmap_growkernel once after the kmem_arena is created

to make the pmap cover it's address space
assert on the growth in uvm_km_kmem_alloc

for the 3rd uvm_map_entry uvm_map_prepare will grow the kernel,
but we might call into uvm_km_kmem_alloc through imports to
the kmem_meta_arena earlier

while here guard uvm_km_va_starved_p from kmem_arena not yet created

thanks for tracking this down to everyone involved
This commit is contained in:
para 2012-09-07 06:45:04 +00:00
parent 9af1ccf1bf
commit 75ebc1f88a
1 changed files with 23 additions and 14 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.134 2012/09/04 13:37:41 matt Exp $ */
/* $NetBSD: uvm_km.c,v 1.135 2012/09/07 06:45:04 para Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -152,7 +152,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.134 2012/09/04 13:37:41 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.135 2012/09/07 06:45:04 para Exp $");
#include "opt_uvmhist.h"
@ -202,7 +202,7 @@ int nkmempages = 0;
vaddr_t kmembase;
vsize_t kmemsize;
vmem_t *kmem_arena;
vmem_t *kmem_arena = NULL;
vmem_t *kmem_va_arena;
/*
@ -329,6 +329,18 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t end)
kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE,
NULL, NULL, NULL,
0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
#ifdef PMAP_GROWKERNEL
/*
* kmem_arena VA allocations happen independently of uvm_map.
* grow kernel to accommodate the kmem_arena.
*/
if (uvm_maxkaddr < kmembase + kmemsize) {
uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
"%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
uvm_maxkaddr, kmembase, kmemsize);
}
#endif
vmem_init(kmem_arena);
@ -782,18 +794,12 @@ again:
#ifdef PMAP_GROWKERNEL
/*
* These VA allocations happen independently of uvm_map so if this allocation
* extends beyond the current limit, then allocate more resources for it.
* This can only happen while the kmem_map is the only map entry in the
* kernel_map because as soon as another map entry is created, uvm_map_prepare
* will set uvm_maxkaddr to an address beyond the kmem_map.
* These VA allocations happen independently of uvm_map
* so this allocation must not extend beyond the current limit.
*/
if (uvm_maxkaddr < va + size) {
uvm_maxkaddr = pmap_growkernel(va + size);
KASSERTMSG(uvm_maxkaddr >= va + size,
"%#"PRIxVADDR" %#"PRIxPTR" %#zx",
uvm_maxkaddr, va, size);
}
KASSERTMSG(uvm_maxkaddr >= va + size,
"%#"PRIxVADDR" %#"PRIxPTR" %#zx",
uvm_maxkaddr, va, size);
#endif
loopva = va;
@ -864,6 +870,9 @@ uvm_km_va_starved_p(void)
vmem_size_t total;
vmem_size_t free;
if (kmem_arena == NULL)
return false;
total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
free = vmem_size(kmem_arena, VMEM_FREE);