- Hide the details of SPCF_SHOULDYIELD and related behind a couple of small

functions: preempt_point() and preempt_needed().

- preempt(): if the LWP has exceeded its timeslice in kernel, strip it of
  any priority boost gained earlier from blocking.
This commit is contained in:
ad 2020-03-14 18:08:38 +00:00
parent bc9936a48c
commit 16d4fad635
22 changed files with 144 additions and 104 deletions

View File

@ -1,7 +1,7 @@
/* $NetBSD: bus_dma.c,v 1.120 2020/02/22 08:22:09 skrll Exp $ */
/* $NetBSD: bus_dma.c,v 1.121 2020/03/14 18:08:38 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
* Copyright (c) 1996, 1997, 1998, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -36,7 +36,7 @@
#include "opt_cputypes.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.120 2020/02/22 08:22:09 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.121 2020/03/14 18:08:38 ad Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@ -1790,10 +1790,8 @@ _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
continue;
cnt = MIN(resid, iov->iov_len);
if (!VMSPACE_IS_KERNEL_P(vm) &&
(curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
if (!VMSPACE_IS_KERNEL_P(vm)) {
preempt_point();
}
if (direction == UIO_READ) {
error = copyout_vmspace(vm, cp, iov->iov_base, cnt);

View File

@ -1,7 +1,7 @@
/* $NetBSD: bus_dma.c,v 1.39 2020/03/13 03:49:39 thorpej Exp $ */
/* $NetBSD: bus_dma.c,v 1.40 2020/03/14 18:08:38 ad Exp $ */
/*-
* Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
* Copyright (c) 1997, 1998, 2001, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -32,7 +32,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.39 2020/03/13 03:49:39 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.40 2020/03/14 18:08:38 ad Exp $");
#define _MIPS_BUS_DMA_PRIVATE
@ -1291,10 +1291,8 @@ _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
continue;
cnt = MIN(resid, iov->iov_len);
if (!VMSPACE_IS_KERNEL_P(vm) &&
(curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
if (!VMSPACE_IS_KERNEL_P(vm)) {
preempt_point();
}
if (direction == UIO_READ) {
error = copyout_vmspace(vm, cp, iov->iov_base, cnt);

View File

@ -1,7 +1,7 @@
/* $NetBSD: bus_dma.c,v 1.81 2019/11/14 16:23:52 maxv Exp $ */
/* $NetBSD: bus_dma.c,v 1.82 2020/03/14 18:08:38 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2007 The NetBSD Foundation, Inc.
* Copyright (c) 1996, 1997, 1998, 2007, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.81 2019/11/14 16:23:52 maxv Exp $");
__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.82 2020/03/14 18:08:38 ad Exp $");
/*
* The following is included because _bus_dma_uiomove is derived from
@ -1064,10 +1064,8 @@ _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
continue;
cnt = MIN(resid, iov->iov_len);
if (!VMSPACE_IS_KERNEL_P(vm) &&
(curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
if (!VMSPACE_IS_KERNEL_P(vm)) {
preempt_point();
}
if (direction == UIO_READ) {
error = copyout_vmspace(vm, cp, iov->iov_base, cnt);

View File

@ -1,4 +1,4 @@
/* $NetBSD: linux_futex.c,v 1.37 2017/04/10 15:04:32 dholland Exp $ */
/* $NetBSD: linux_futex.c,v 1.38 2020/03/14 18:08:38 ad Exp $ */
/*-
* Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.37 2017/04/10 15:04:32 dholland Exp $");
__KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.38 2020/03/14 18:08:38 ad Exp $");
#include <sys/param.h>
#include <sys/time.h>
@ -45,6 +45,7 @@ __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.37 2017/04/10 15:04:32 dholland Ex
#include <sys/kmem.h>
#include <sys/kernel.h>
#include <sys/atomic.h>
#include <sys/sched.h>
#include <compat/linux/common/linux_types.h>
#include <compat/linux/common/linux_emuldata.h>
@ -801,7 +802,7 @@ release_futexes(struct lwp *l)
if (!--limit)
break;
yield(); /* XXX why? */
preempt_point();
}
if (pending)

View File

@ -1,7 +1,7 @@
/* $NetBSD: nvmm_x86_svm.c,v 1.56 2020/02/21 00:26:22 joerg Exp $ */
/* $NetBSD: nvmm_x86_svm.c,v 1.57 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
* Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.56 2020/02/21 00:26:22 joerg Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.57 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -1441,10 +1441,7 @@ svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
}
/* If no reason to return to userland, keep rolling. */
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
break;
}
if (curcpu()->ci_data.cpu_softints != 0) {
if (preempt_needed()) {
break;
}
if (curlwp->l_flag & LW_USERRET) {

View File

@ -1,7 +1,7 @@
/* $NetBSD: nvmm_x86_vmx.c,v 1.50 2020/03/12 13:01:59 tnn Exp $ */
/* $NetBSD: nvmm_x86_vmx.c,v 1.51 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
* Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.50 2020/03/12 13:01:59 tnn Exp $");
__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.51 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -2063,10 +2063,7 @@ vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
}
/* If no reason to return to userland, keep rolling. */
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
break;
}
if (curcpu()->ci_data.cpu_softints != 0) {
if (preempt_needed()) {
break;
}
if (curlwp->l_flag & LW_USERRET) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: qatvar.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
/* $NetBSD: qatvar.h,v 1.2 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 2019 Internet Initiative Japan, Inc.
@ -889,7 +889,7 @@ struct qat_softc {
* and the configroot threads, which is running for qat_init(),
* takes kernel_lock and the uvm_scheduler is not working at that point.
*/
#define QAT_YIELD() yield()
#define QAT_YIELD() preempt_point()
extern int qat_dump;

View File

@ -1,4 +1,4 @@
/* $NetBSD: sched.h,v 1.13 2019/09/28 15:13:08 christos Exp $ */
/* $NetBSD: sched.h,v 1.14 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -87,8 +87,7 @@ static inline void
cond_resched(void)
{
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
preempt();
preempt_point();
}
#endif /* _LINUX_SCHED_H_ */

View File

@ -1,7 +1,7 @@
/* $NetBSD: kern_ktrace.c,v 1.175 2020/02/21 00:26:22 joerg Exp $ */
/* $NetBSD: kern_ktrace.c,v 1.176 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
* Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.175 2020/02/21 00:26:22 joerg Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.176 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -720,7 +720,7 @@ ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len)
*/
ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE);
if (resid > 0) {
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
if (preempt_needed()) {
(void)ktrenter(l);
preempt();
ktrexit(l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.342 2020/02/23 16:27:09 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.343 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.342 2020/02/23 16:27:09 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.343 2020/03/14 18:08:39 ad Exp $");
#include "opt_kstack.h"
#include "opt_dtrace.h"
@ -306,8 +306,7 @@ wakeup(wchan_t ident)
/*
* General yield call. Puts the current LWP back on its run queue and
* performs a voluntary context switch. Should only be called when the
* current LWP explicitly requests it (eg sched_yield(2)).
* performs a context switch.
*/
void
yield(void)
@ -329,7 +328,12 @@ yield(void)
/*
* General preemption call. Puts the current LWP back on its run queue
* and performs an involuntary context switch.
* and performs an involuntary context switch. Different from yield()
* in that:
*
* - It's counted differently (involuntary vs. voluntary).
* - Realtime threads go to the head of their runqueue vs. tail for yield().
* - Priority boost is retained unless LWP has exceeded timeslice.
*/
void
preempt(void)
@ -342,13 +346,56 @@ preempt(void)
KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
KASSERT(l->l_stat == LSONPROC);
/* Involuntary - keep kpriority boost. */
l->l_pflag |= LP_PREEMPTING;
spc_lock(l->l_cpu);
/* Involuntary - keep kpriority boost unless a CPU hog. */
if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) != 0) {
l->l_kpriority = false;
}
l->l_pflag |= LP_PREEMPTING;
mi_switch(l);
KERNEL_LOCK(l->l_biglocks, l);
}
/*
* A breathing point for long running code in kernel.
*/
void
preempt_point(void)
{
lwp_t *l = curlwp;
int needed;
KPREEMPT_DISABLE(l);
needed = l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD;
#ifndef __HAVE_FAST_SOFTINTS
needed |= l->l_cpu->ci_data.cpu_softints;
#endif
KPREEMPT_ENABLE(l);
if (__predict_false(needed)) {
preempt();
}
}
/*
* Check the SPCF_SHOULDYIELD flag.
*/
bool
preempt_needed(void)
{
lwp_t *l = curlwp;
int needed;
KPREEMPT_DISABLE(l);
needed = l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD;
#ifndef __HAVE_FAST_SOFTINTS
needed |= l->l_cpu->ci_data.cpu_softints;
#endif
KPREEMPT_ENABLE(l);
return (bool)needed;
}
/*
* Handle a request made by another agent to preempt the current LWP
* in-kernel. Usually called when l_dopreempt may be non-zero.

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_copy.c,v 1.12 2020/02/22 21:59:30 chs Exp $ */
/* $NetBSD: subr_copy.c,v 1.13 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
@ -80,7 +80,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.12 2020/02/22 21:59:30 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.13 2020/03/14 18:08:39 ad Exp $");
#define __UFETCHSTORE_PRIVATE
#define __UCAS_PRIVATE
@ -123,9 +123,7 @@ uiomove(void *buf, size_t n, struct uio *uio)
if (cnt > n)
cnt = n;
if (!VMSPACE_IS_KERNEL_P(vm)) {
if (curcpu()->ci_schedstate.spc_flags &
SPCF_SHOULDYIELD)
preempt();
preempt_point();
}
if (uio->uio_rw == UIO_READ) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_bio.c,v 1.289 2020/02/21 02:04:40 riastradh Exp $ */
/* $NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
@ -123,7 +123,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.289 2020/02/21 02:04:40 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_bufcache.h"
@ -1378,8 +1378,7 @@ allocbuf(buf_t *bp, int size, int preserve)
* Need to trim overall memory usage.
*/
while (buf_canrelease()) {
if (curcpu()->ci_schedstate.spc_flags &
SPCF_SHOULDYIELD) {
if (preempt_needed()) {
mutex_exit(&bufcache_lock);
preempt();
mutex_enter(&bufcache_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.89 2020/03/14 15:34:24 ad Exp $ */
/* $NetBSD: genfs_io.c,v 1.90 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.89 2020/03/14 15:34:24 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.90 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -1047,8 +1047,7 @@ retry:
* a preempt point.
*/
if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
if (preempt_needed()) {
nextoff = pg->offset; /* visit this page again */
rw_exit(slock);
preempt();

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_syscalls.c,v 1.161 2019/02/03 03:19:28 mrg Exp $ */
/* $NetBSD: nfs_syscalls.c,v 1.162 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 1989, 1993
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.161 2019/02/03 03:19:28 mrg Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.162 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -635,10 +635,8 @@ nfssvc_nfsd(struct nfssvc_copy_ops *ops, struct nfsd_srvargs *nsd,
for (;;) {
bool dummy;
if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
}
preempt_point();
if (nfsd->nfsd_slp == NULL) {
mutex_enter(&nfsd_lock);
while (nfsd->nfsd_slp == NULL &&

View File

@ -1,4 +1,4 @@
/* $NetBSD: scheduler.c,v 1.50 2020/02/15 18:12:15 ad Exp $ */
/* $NetBSD: scheduler.c,v 1.51 2020/03/14 18:08:39 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.50 2020/02/15 18:12:15 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.51 2020/03/14 18:08:39 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -597,3 +597,16 @@ sched_dequeue(struct lwp *l)
panic("sched_dequeue not implemented");
}
void
preempt_point(void)
{
}
bool
preempt_needed(void)
{
return false;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sched.h,v 1.87 2020/01/12 22:03:23 ad Exp $ */
/* $NetBSD: sched.h,v 1.88 2020/03/14 18:08:39 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008, 2019, 2020
@ -268,6 +268,8 @@ void sched_print_runqueue(void (*pr)(const char *, ...)
/* Dispatching */
bool kpreempt(uintptr_t);
void preempt(void);
bool preempt_needed(void);
void preempt_point(void);
void yield(void);
void mi_switch(struct lwp *);
void updatertime(lwp_t *, const struct bintime *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ext2fs_lookup.c,v 1.88 2016/08/23 06:40:25 christos Exp $ */
/* $NetBSD: ext2fs_lookup.c,v 1.89 2020/03/14 18:08:40 ad Exp $ */
/*
* Modified for NetBSD 1.2E
@ -48,7 +48,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ext2fs_lookup.c,v 1.88 2016/08/23 06:40:25 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: ext2fs_lookup.c,v 1.89 2020/03/14 18:08:40 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -418,8 +418,8 @@ ext2fs_lookup(void *v)
searchloop:
while (results->ulr_offset < endsearch) {
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
preempt();
preempt_point();
/*
* If necessary, get the next directory block.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_dirhash.c,v 1.17 2016/06/20 01:53:38 dholland Exp $ */
/* $NetBSD: ulfs_dirhash.c,v 1.18 2020/03/14 18:08:40 ad Exp $ */
/* from NetBSD: ufs_dirhash.c,v 1.37 2014/12/20 00:28:05 christos Exp */
/*
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_dirhash.c,v 1.17 2016/06/20 01:53:38 dholland Exp $");
__KERNEL_RCSID(0, "$NetBSD: ulfs_dirhash.c,v 1.18 2020/03/14 18:08:40 ad Exp $");
/*
* This implements a hash-based lookup scheme for ULFS directories.
@ -214,10 +214,8 @@ ulfsdirhash_build(struct inode *ip)
bmask = VFSTOULFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
pos = 0;
while (pos < ip->i_size) {
if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
}
preempt_point();
/* If necessary, get the next directory block. */
if ((pos & bmask) == 0) {
if (bp != NULL)

View File

@ -1,4 +1,4 @@
/* $NetBSD: ulfs_lookup.c,v 1.41 2017/06/10 05:29:36 maya Exp $ */
/* $NetBSD: ulfs_lookup.c,v 1.42 2020/03/14 18:08:40 ad Exp $ */
/* from NetBSD: ufs_lookup.c,v 1.135 2015/07/11 11:04:48 mlelstv */
/*
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ulfs_lookup.c,v 1.41 2017/06/10 05:29:36 maya Exp $");
__KERNEL_RCSID(0, "$NetBSD: ulfs_lookup.c,v 1.42 2020/03/14 18:08:40 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_lfs.h"
@ -292,8 +292,8 @@ ulfs_lookup(void *v)
searchloop:
while (results->ulr_offset < endsearch) {
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
preempt();
preempt_point();
/*
* If necessary, get the next directory block.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: ufs_dirhash.c,v 1.38 2020/03/08 00:23:59 chs Exp $ */
/* $NetBSD: ufs_dirhash.c,v 1.39 2020/03/14 18:08:40 ad Exp $ */
/*
* Copyright (c) 2001, 2002 Ian Dowse. All rights reserved.
@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ufs_dirhash.c,v 1.38 2020/03/08 00:23:59 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: ufs_dirhash.c,v 1.39 2020/03/14 18:08:40 ad Exp $");
/*
* This implements a hash-based lookup scheme for UFS directories.
@ -212,10 +212,8 @@ ufsdirhash_build(struct inode *ip)
bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
pos = 0;
while (pos < ip->i_size) {
if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
!= 0) {
preempt();
}
preempt_point();
/* If necessary, get the next directory block. */
if ((pos & bmask) == 0) {
if (bp != NULL)

View File

@ -1,4 +1,4 @@
/* $NetBSD: ufs_lookup.c,v 1.150 2019/05/05 15:07:12 christos Exp $ */
/* $NetBSD: ufs_lookup.c,v 1.151 2020/03/14 18:08:40 ad Exp $ */
/*
* Copyright (c) 1989, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ufs_lookup.c,v 1.150 2019/05/05 15:07:12 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: ufs_lookup.c,v 1.151 2020/03/14 18:08:40 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_ffs.h"
@ -453,8 +453,8 @@ ufs_lookup(void *v)
searchloop:
while (results->ulr_offset < endsearch) {
if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
preempt();
preempt_point();
/*
* If necessary, get the next directory block.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.116 2020/02/24 12:38:57 rin Exp $ */
/* $NetBSD: uvm_amap.c,v 1.117 2020/03/14 18:08:40 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.116 2020/02/24 12:38:57 rin Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.117 2020/03/14 18:08:40 ad Exp $");
#include "opt_uvmhist.h"
@ -764,8 +764,8 @@ amap_wipeout(struct vm_amap *amap)
anon->an_link = tofree;
tofree = anon;
}
if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
preempt();
if ((lcv & 31) == 31) {
preempt_point();
}
}