diff --git a/sys/sys/userret.h b/sys/sys/userret.h index 58f0742e9539..01e43880ea3f 100644 --- a/sys/sys/userret.h +++ b/sys/sys/userret.h @@ -1,4 +1,4 @@ -/* $NetBSD: userret.h,v 1.22 2011/02/25 22:37:12 yamt Exp $ */ +/* $NetBSD: userret.h,v 1.23 2011/04/08 10:36:58 yamt Exp $ */ /*- * Copyright (c) 1998, 2000, 2003, 2006, 2008 The NetBSD Foundation, Inc. @@ -94,6 +94,13 @@ mi_userret(struct lwp *l) if (__predict_false(((l->l_flag & LW_USERRET) | p->p_timerpend) != 0)) lwp_userret(l); l->l_kpriority = false; + /* + * cpu_set_curpri(prio) is a MD optimized version of: + * + * kpreempt_disable(); + * curcpu()->ci_schedstate.spc_curpriority = prio; + * kpreempt_enable(); + */ cpu_set_curpri(l->l_priority); /* XXX this needs to die */ #else ci = l->l_cpu; diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 6f0fbcc72be3..904f6031e566 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.295 2011/02/02 15:25:27 chuck Exp $ */ +/* $NetBSD: uvm_map.c,v 1.296 2011/04/08 10:38:36 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.295 2011/02/02 15:25:27 chuck Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.296 2011/04/08 10:38:36 yamt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -3398,11 +3398,12 @@ uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end) KASSERT(entry != &map->header); KASSERT(start < entry->end); /* - * XXX IMPLEMENT ME. - * Should invent a "weak" mode for uvm_fault() - * which would only do the PGO_LOCKED pgo_get(). + * For now, we handle only the easy but commonly-requested case. + * ie. start prefetching of backing uobj pages. * - * for now, we handle only the easy but common case. + * XXX It might be useful to pmap_enter() the already-in-core + * pages by inventing a "weak" mode for uvm_fault() which would + * only do the PGO_LOCKED pgo_get(). */ if (UVM_ET_ISOBJ(entry) && amap == NULL && uobj != NULL) { off_t offset;