Switch vfs_vnode_lock_print() and printlockedvnodes() to _mountlist_next().

Switch sched_sync() and sysctl_kern_vnode() to mountlist iterator.
This commit is contained in:
hannken 2017-04-12 10:26:33 +00:00
parent 5ff843c227
commit 3137e0cee1

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_subr.c,v 1.461 2017/03/31 08:38:14 msaitoh Exp $ */
/* $NetBSD: vfs_subr.c,v 1.462 2017/04/12 10:26:33 hannken Exp $ */
/*-
* Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008 The NetBSD Foundation, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.461 2017/03/31 08:38:14 msaitoh Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.462 2017/04/12 10:26:33 hannken Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -759,9 +759,10 @@ lazy_sync_vnode(struct vnode *vp)
void
sched_sync(void *arg)
{
mount_iterator_t *iter;
synclist_t *slp;
struct vnode *vp;
struct mount *mp, *nmp;
struct mount *mp;
time_t starttime;
bool synced;
@ -773,23 +774,19 @@ sched_sync(void *arg)
/*
* Sync mounts whose dirty time has expired.
*/
mutex_enter(&mountlist_lock);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
mountlist_iterator_init(&iter);
while ((mp = mountlist_iterator_next(iter)) != NULL) {
if ((mp->mnt_iflag & IMNT_ONWORKLIST) == 0 ||
mp->mnt_synclist_slot != syncer_delayno) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
mp->mnt_synclist_slot = sync_delay_slot(sync_delay(mp));
if (vfs_busy(mp, &nmp))
continue;
if (fstrans_start_nowait(mp, FSTRANS_SHARED) == 0) {
VFS_SYNC(mp, MNT_LAZY, curlwp->l_cred);
fstrans_done(mp);
}
vfs_unbusy(mp, false, &nmp);
}
mutex_exit(&mountlist_lock);
mountlist_iterator_destroy(iter);
mutex_enter(&syncer_data_lock);
@ -967,8 +964,9 @@ sysctl_kern_vnode(SYSCTLFN_ARGS)
{
char *where = oldp;
size_t *sizep = oldlenp;
struct mount *mp, *nmp;
struct mount *mp;
vnode_t *vp, vbuf;
mount_iterator_t *iter;
struct vnode_iterator *marker;
char *bp = where;
char *ewhere;
@ -988,17 +986,14 @@ sysctl_kern_vnode(SYSCTLFN_ARGS)
ewhere = where + *sizep;
sysctl_unlock();
mutex_enter(&mountlist_lock);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, &nmp)) {
continue;
}
mountlist_iterator_init(&iter);
while ((mp = mountlist_iterator_next(iter)) != NULL) {
vfs_vnode_iterator_init(mp, &marker);
while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
if (bp + VPTRSZ + VNODESZ > ewhere) {
vrele(vp);
vfs_vnode_iterator_destroy(marker);
vfs_unbusy(mp, false, NULL);
mountlist_iterator_destroy(iter);
sysctl_relock();
*sizep = bp - where;
return (ENOMEM);
@ -1008,7 +1003,7 @@ sysctl_kern_vnode(SYSCTLFN_ARGS)
(error = copyout(&vbuf, bp + VPTRSZ, VNODESZ))) {
vrele(vp);
vfs_vnode_iterator_destroy(marker);
vfs_unbusy(mp, false, NULL);
mountlist_iterator_destroy(iter);
sysctl_relock();
return (error);
}
@ -1016,9 +1011,8 @@ sysctl_kern_vnode(SYSCTLFN_ARGS)
bp += VPTRSZ + VNODESZ;
}
vfs_vnode_iterator_destroy(marker);
vfs_unbusy(mp, false, &nmp);
}
mutex_exit(&mountlist_lock);
mountlist_iterator_destroy(iter);
sysctl_relock();
*sizep = bp - where;
@ -1549,7 +1543,7 @@ vfs_vnode_lock_print(void *vlock, int full, void (*pr)(const char *, ...))
struct mount *mp;
vnode_impl_t *vip;
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
if (&vip->vi_lock != vlock)
continue;
@ -1656,25 +1650,18 @@ void printlockedvnodes(void);
void
printlockedvnodes(void)
{
struct mount *mp, *nmp;
struct mount *mp;
vnode_t *vp;
vnode_impl_t *vip;
printf("Locked vnodes\n");
mutex_enter(&mountlist_lock);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, &nmp)) {
continue;
}
for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp)) {
TAILQ_FOREACH(vip, &mp->mnt_vnodelist, vi_mntvnodes) {
vp = VIMPL_TO_VNODE(vip);
if (VOP_ISLOCKED(vp))
vprint(NULL, vp);
}
mutex_enter(&mountlist_lock);
vfs_unbusy(mp, false, &nmp);
}
mutex_exit(&mountlist_lock);
}
#endif /* DDB || DEBUGPRINT */