user maps are always pageable.

This commit is contained in:
chs 2001-08-16 01:37:50 +00:00
parent 79dbd4ba43
commit e9fbc91f95
3 changed files with 12 additions and 17 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: init_main.c,v 1.194 2001/07/18 00:51:54 matt Exp $ */
/* $NetBSD: init_main.c,v 1.195 2001/08/16 01:44:53 chs Exp $ */
/*
* Copyright (c) 1995 Christopher G. Demetriou. All rights reserved.
@ -300,7 +300,7 @@ main(void)
* share proc0's vmspace, and thus, the kernel pmap.
*/
uvmspace_init(&vmspace0, pmap_kernel(), round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAX_ADDRESS), TRUE);
trunc_page(VM_MAX_ADDRESS));
p->p_vmspace = &vmspace0;
p->p_addr = proc0paddr; /* XXX */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.65 2001/06/02 18:09:26 chs Exp $ */
/* $NetBSD: uvm_extern.h,v 1.66 2001/08/16 01:37:50 chs Exp $ */
/*
*
@ -600,10 +600,9 @@ boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t,
vaddr_t, vm_prot_t));
int uvm_map_protect __P((struct vm_map *, vaddr_t,
vaddr_t, vm_prot_t, boolean_t));
struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t,
boolean_t));
struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t));
void uvmspace_init __P((struct vmspace *, struct pmap *,
vaddr_t, vaddr_t, boolean_t));
vaddr_t, vaddr_t));
void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t));
struct vmspace *uvmspace_fork __P((struct vmspace *));
void uvmspace_free __P((struct vmspace *));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.100 2001/07/22 13:34:12 wiz Exp $ */
/* $NetBSD: uvm_map.c,v 1.101 2001/08/16 01:37:50 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -2683,15 +2683,14 @@ uvm_map_checkprot(map, start, end, protection)
* - refcnt set to 1, rest must be init'd by caller
*/
struct vmspace *
uvmspace_alloc(min, max, pageable)
uvmspace_alloc(min, max)
vaddr_t min, max;
int pageable;
{
struct vmspace *vm;
UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
vm = pool_get(&uvm_vmspace_pool, PR_WAITOK);
uvmspace_init(vm, NULL, min, max, pageable);
uvmspace_init(vm, NULL, min, max);
UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
return (vm);
}
@ -2703,16 +2702,15 @@ uvmspace_alloc(min, max, pageable)
* - refcnt set to 1, rest must me init'd by caller
*/
void
uvmspace_init(vm, pmap, min, max, pageable)
uvmspace_init(vm, pmap, min, max)
struct vmspace *vm;
struct pmap *pmap;
vaddr_t min, max;
boolean_t pageable;
{
UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
memset(vm, 0, sizeof(*vm));
uvm_map_setup(&vm->vm_map, min, max, pageable ? VM_MAP_PAGEABLE : 0);
uvm_map_setup(&vm->vm_map, min, max, VM_MAP_PAGEABLE);
if (pmap)
pmap_reference(pmap);
else
@ -2833,8 +2831,7 @@ uvmspace_exec(p, start, end)
* for p
*/
nvm = uvmspace_alloc(start, end,
(map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
nvm = uvmspace_alloc(start, end);
/*
* install new vmspace and drop our ref to the old one.
@ -2915,8 +2912,7 @@ uvmspace_fork(vm1)
vm_map_lock(old_map);
vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
(old_map->flags & VM_MAP_PAGEABLE) ? TRUE : FALSE);
vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset);
memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
new_map = &vm2->vm_map; /* XXX */