change uvm_uarea_alloc() to indicate whether the returned uarea is already

backed by physical pages (ie. because it reused a previously-freed one),
so that we can skip a bunch of useless work in that case.
this fixes the underlying problem behind PR 18543, and also speeds up fork()
quite a bit (eg. 7% on my pc, 1% on my ultra2) when we get a cache hit.
This commit is contained in:
chs 2002-11-17 08:32:43 +00:00
parent fcdaf149b3
commit 4b2625143d
6 changed files with 50 additions and 36 deletions

View File

@ -1,4 +1,4 @@
.\" $NetBSD: uvm.9,v 1.36 2002/09/27 07:52:48 wiz Exp $
.\" $NetBSD: uvm.9,v 1.37 2002/11/17 08:32:44 chs Exp $
.\"
.\" Copyright (c) 1998 Matthew R. Green
.\" All rights reserved.
@ -133,10 +133,10 @@ initialises the swap sub-system.
.Fn uvmspace_share "struct proc *p1" "struct proc *p2"
.Ft void
.Fn uvmspace_unshare "struct proc *p"
.Ft vaddr_t
.Fn uvm_uarea_alloc "void"
.Ft boolean_t
.Fn uvm_uarea_alloc "vaddr_t *uaddrp"
.Ft void
.Fn uvm_uarea_free "vaddr_t va"
.Fn uvm_uarea_free "vaddr_t uaddr"
.nr nS 0
.Pp
.Fn uvm_map
@ -359,8 +359,13 @@ necessary by calling
.Fn uvmspace_fork .
.Pp
.Fn uvm_uarea_alloc
allocates virtual space for a u-area (i.e., a kernel stack) and returns
its virtual address.
allocates virtual space for a u-area (i.e., a kernel stack) and stores
its virtual address in
.Fa *uaddrp .
The return value is
.Dv TRUE
if the u-area is already backed by wired physical memory, otherwise
.Dv FALSE .
.Pp
.Fn uvm_uarea_free
frees a u-area allocated with

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.45 2002/10/13 21:14:28 chris Exp $ */
/* $NetBSD: cpu.c,v 1.46 2002/11/17 08:32:43 chs Exp $ */
/*
* Copyright (c) 1995 Mark Brinicombe.
@ -45,7 +45,7 @@
#include <sys/param.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.45 2002/10/13 21:14:28 chris Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.46 2002/11/17 08:32:43 chs Exp $");
#include <sys/systm.h>
#include <sys/malloc.h>
@ -527,11 +527,12 @@ cpu_alloc_idlepcb(struct cpu_info *ci)
* Generate a kernel stack and PCB (in essence, a u-area) for the
* new CPU.
*/
uaddr = uvm_uarea_alloc();
if (uvm_uarea_alloc(&uaddr)) {
error = uvm_fault_wire(kernel_map, uaddr, uaddr + USPACE,
VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE);
if (error)
return error;
}
ci->ci_idlepcb = pcb = (struct pcb *)uaddr;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_fork.c,v 1.98 2002/11/13 00:51:02 provos Exp $ */
/* $NetBSD: kern_fork.c,v 1.99 2002/11/17 08:32:44 chs Exp $ */
/*-
* Copyright (c) 1999, 2001 The NetBSD Foundation, Inc.
@ -78,7 +78,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.98 2002/11/13 00:51:02 provos Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.99 2002/11/17 08:32:44 chs Exp $");
#include "opt_ktrace.h"
#include "opt_systrace.h"
@ -198,6 +198,7 @@ fork1(struct proc *p1, int flags, int exitsig, void *stack, size_t stacksize,
uid_t uid;
int count, s;
vaddr_t uaddr;
boolean_t inmem;
static int nextpid, pidchecked;
/*
@ -231,10 +232,10 @@ fork1(struct proc *p1, int flags, int exitsig, void *stack, size_t stacksize,
* Allocate virtual address space for the U-area now, while it
* is still easy to abort the fork operation if we're out of
* kernel virtual address space. The actual U-area pages will
* be allocated and wired in uvm_fork().
* be allocated and wired in uvm_fork() if needed.
*/
uaddr = uvm_uarea_alloc();
inmem = uvm_uarea_alloc(&uaddr);
if (__predict_false(uaddr == 0)) {
(void)chgproccnt(uid, -1);
nprocs--;
@ -280,7 +281,7 @@ fork1(struct proc *p1, int flags, int exitsig, void *stack, size_t stacksize,
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in uvm_fork().
*/
p2->p_flag = P_INMEM | (p1->p_flag & P_SUGID);
p2->p_flag = (inmem ? P_INMEM : 0) | (p1->p_flag & P_SUGID);
p2->p_emul = p1->p_emul;
p2->p_execsw = p1->p_execsw;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_kthread.c,v 1.13 2002/06/01 23:51:05 lukem Exp $ */
/* $NetBSD: kern_kthread.c,v 1.14 2002/11/17 08:32:44 chs Exp $ */
/*-
* Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.13 2002/06/01 23:51:05 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.14 2002/11/17 08:32:44 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -82,7 +82,7 @@ kthread_create1(void (*func)(void *), void *arg,
* to init(8) when they exit. init(8) can easily wait them
* out for us.
*/
p2->p_flag |= P_INMEM | P_SYSTEM | P_NOCLDWAIT; /* XXX */
p2->p_flag |= P_SYSTEM | P_NOCLDWAIT;
/* Name it as specified. */
va_start(ap, fmt);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.73 2002/09/22 07:20:31 chs Exp $ */
/* $NetBSD: uvm_extern.h,v 1.74 2002/11/17 08:32:45 chs Exp $ */
/*
*
@ -572,7 +572,7 @@ void uvm_init_limits __P((struct proc *));
boolean_t uvm_kernacc __P((caddr_t, size_t, int));
__dead void uvm_scheduler __P((void)) __attribute__((noreturn));
void uvm_swapin __P((struct proc *));
vaddr_t uvm_uarea_alloc(void);
boolean_t uvm_uarea_alloc(vaddr_t *);
void uvm_uarea_free(vaddr_t);
boolean_t uvm_useracc __P((caddr_t, size_t, int));
int uvm_vslock __P((struct proc *, caddr_t, size_t,

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.60 2002/09/22 07:20:32 chs Exp $ */
/* $NetBSD: uvm_glue.c,v 1.61 2002/11/17 08:32:45 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.60 2002/09/22 07:20:32 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.61 2002/11/17 08:32:45 chs Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@ -290,15 +290,20 @@ uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
* Wire down the U-area for the process, which contains the PCB
* and the kernel stack. Wired state is stored in p->p_flag's
* P_INMEM bit rather than in the vm_map_entry's wired count
* to prevent kernel_map fragmentation.
* to prevent kernel_map fragmentation. If we reused a cached U-area,
* P_INMEM will already be set and we don't need to do anything.
*
* Note the kernel stack gets read/write accesses right off
* the bat.
* Note the kernel stack gets read/write accesses right off the bat.
*/
error = uvm_fault_wire(kernel_map, (vaddr_t)up, (vaddr_t)up + USPACE,
VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE);
if ((p2->p_flag & P_INMEM) == 0) {
error = uvm_fault_wire(kernel_map, (vaddr_t)up,
(vaddr_t)up + USPACE, VM_FAULT_WIRE,
VM_PROT_READ | VM_PROT_WRITE);
if (error)
panic("uvm_fork: uvm_fault_wire failed: %d", error);
p2->p_flag |= P_INMEM;
}
#ifdef KSTACK_CHECK_MAGIC
/*
@ -354,8 +359,8 @@ uvm_exit(p)
* uvm_uarea_alloc: allocate a u-area
*/
vaddr_t
uvm_uarea_alloc(void)
boolean_t
uvm_uarea_alloc(vaddr_t *uaddrp)
{
vaddr_t uaddr;
@ -367,10 +372,12 @@ uvm_uarea_alloc(void)
if (uaddr) {
uvm_uareas = *(void **)uvm_uareas;
uvm_nuarea--;
*uaddrp = uaddr;
return TRUE;
} else {
uaddr = uvm_km_valloc_align(kernel_map, USPACE, USPACE_ALIGN);
*uaddrp = uvm_km_valloc_align(kernel_map, USPACE, USPACE_ALIGN);
return FALSE;
}
return uaddr;
}
/*