TRUE -> true, FALSE -> false

This commit is contained in:
thorpej 2007-02-22 06:05:00 +00:00
parent ab5a292aa4
commit b3667ada6d
34 changed files with 372 additions and 372 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_bio.c,v 1.148 2007/02/21 23:00:08 thorpej Exp $ */
/* $NetBSD: nfs_bio.c,v 1.149 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.148 2007/02/21 23:00:08 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.149 2007/02/22 06:14:28 thorpej Exp $");
#include "opt_nfs.h"
#include "opt_ddb.h"
@ -749,7 +749,7 @@ nfs_asyncio(bp)
again:
if (nmp->nm_flag & NFSMNT_INT)
slpflag = PCATCH;
gotiod = FALSE;
gotiod = false;
/*
* Find a free iod to process this request.
@ -770,7 +770,7 @@ again:
simple_lock(&nmp->nm_slock);
simple_unlock(&iod->nid_slock);
nmp->nm_bufqiods++;
gotiod = TRUE;
gotiod = true;
break;
}
simple_unlock(&iod->nid_slock);
@ -784,7 +784,7 @@ again:
if (!gotiod) {
simple_lock(&nmp->nm_slock);
if (nmp->nm_bufqiods > 0)
gotiod = TRUE;
gotiod = true;
}
LOCK_ASSERT(simple_lock_held(&nmp->nm_slock));
@ -809,7 +809,7 @@ again:
/* Enque for later, to avoid free-page deadlock */
(void) 0;
} else while (nmp->nm_bufqlen >= 2*nfs_numasync) {
nmp->nm_bufqwant = TRUE;
nmp->nm_bufqwant = true;
error = ltsleep(&nmp->nm_bufq,
slpflag | PRIBIO | PNORELOCK,
"nfsaio", slptimeo, &nmp->nm_slock);
@ -944,13 +944,13 @@ nfs_doio_write(bp, uiop)
struct nfsnode *np = VTONFS(vp);
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
int iomode;
bool stalewriteverf = FALSE;
bool stalewriteverf = false;
int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct vm_page *pgs[npages];
#ifndef NFS_V2_ONLY
bool needcommit = TRUE; /* need only COMMIT RPC */
bool needcommit = true; /* need only COMMIT RPC */
#else
bool needcommit = FALSE; /* need only COMMIT RPC */
bool needcommit = false; /* need only COMMIT RPC */
#endif
bool pageprotected;
struct uvm_object *uobj = &vp->v_uobj;
@ -992,11 +992,11 @@ again:
* we need do WRITE RPC.
*/
if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
needcommit = FALSE;
needcommit = false;
simple_unlock(&uobj->vmobjlock);
} else {
iomode = NFSV3WRITE_FILESYNC;
needcommit = FALSE;
needcommit = false;
}
}
if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
@ -1006,9 +1006,9 @@ again:
pmap_page_protect(pgs[i], VM_PROT_READ);
}
simple_unlock(&uobj->vmobjlock);
pageprotected = TRUE; /* pages can't be modified during i/o. */
pageprotected = true; /* pages can't be modified during i/o. */
} else
pageprotected = FALSE;
pageprotected = false;
/*
* Send the data to the server if necessary,
@ -1033,11 +1033,11 @@ again:
if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
bool pushedrange;
if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
pushedrange = TRUE;
pushedrange = true;
off = np->n_pushlo;
cnt = np->n_pushhi - np->n_pushlo;
} else {
pushedrange = FALSE;
pushedrange = false;
}
error = nfs_commit(vp, off, cnt, curlwp);
if (error == 0) {
@ -1099,7 +1099,7 @@ again:
nfs_del_tobecommitted_range(vp, off, cnt);
}
if (error == NFSERR_STALEWRITEVERF) {
stalewriteverf = TRUE;
stalewriteverf = true;
error = 0; /* it isn't a real error */
}
} else {
@ -1169,7 +1169,7 @@ nfs_doio_phys(bp, uiop)
uiop->uio_rw = UIO_WRITE;
nfsstats.write_physios++;
rw_enter(&nmp->nm_writeverflock, RW_READER);
error = nfs_writerpc(vp, uiop, &iomode, FALSE, &stalewriteverf);
error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
rw_exit(&nmp->nm_writeverflock);
if (stalewriteverf) {
nfs_clearcommit(bp->b_vp->v_mount);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_serv.c,v 1.124 2007/02/20 16:27:21 pooka Exp $ */
/* $NetBSD: nfs_serv.c,v 1.125 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -55,7 +55,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_serv.c,v 1.124 2007/02/20 16:27:21 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_serv.c,v 1.125 2007/02/22 06:14:28 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -123,7 +123,7 @@ nfsrv3_access(nfsd, slp, lwp, mrq)
nfsm_srvmtofh(&nsfh);
nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam, &rdonly,
(nfsd->nd_flag & ND_KERBAUTH), FALSE);
(nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(NFSX_UNSIGNED);
nfsm_srvpostop_attr(1, (struct vattr *)0);
@ -189,7 +189,7 @@ nfsrv_getattr(nfsd, slp, lwp, mrq)
nfsm_srvmtofh(&nsfh);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam, &rdonly,
(nfsd->nd_flag & ND_KERBAUTH), FALSE);
(nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(0);
return (0);
@ -285,7 +285,7 @@ nfsrv_setattr(nfsd, slp, lwp, mrq)
* Now that we have all the fields, lets do it.
*/
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam, &rdonly,
(nfsd->nd_flag & ND_KERBAUTH), FALSE);
(nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_srvwcc_data(preat_ret, &preat, postat_ret, &va);
@ -523,7 +523,7 @@ nfsrv_readlink(nfsd, slp, lwp, mrq)
uiop->uio_rw = UIO_READ;
UIO_SETUP_SYSSPACE(uiop);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
m_freem(mp3);
nfsm_reply(2 * NFSX_UNSIGNED);
@ -605,7 +605,7 @@ nfsrv_read(nfsd, slp, lwp, mrq)
reqlen = fxdr_unsigned(uint32_t, *tl);
reqlen = MIN(reqlen, NFS_SRVMAXDATA(nfsd));
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_srvpostop_attr(1, (struct vattr *)0);
@ -882,7 +882,7 @@ nfsrv_write(nfsd, slp, lwp, mrq)
return (0);
}
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, &va);
@ -1187,7 +1187,7 @@ loop1:
forat_ret = aftat_ret = 1;
error = nfsrv_fhtovp(&nfsd->nd_fh, 1, &vp, cred, slp,
nfsd->nd_nam, &rdonly, (nfsd->nd_flag & ND_KERBAUTH),
FALSE);
false);
if (!error) {
if (v3)
forat_ret = VOP_GETATTR(vp, &forat, cred, lwp);
@ -1422,7 +1422,7 @@ nfsrv_create(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -1645,7 +1645,7 @@ nfsrv_mknod(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp)
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
if (error) {
@ -1719,7 +1719,7 @@ abort:
out:
vp = nd.ni_vp;
if (!error) {
error = nfsrv_composefh(vp, &nsfh, TRUE);
error = nfsrv_composefh(vp, &nsfh, true);
if (!error)
error = VOP_GETATTR(vp, &va, cred, lwp);
vput(vp);
@ -1728,7 +1728,7 @@ out:
diraft_ret = VOP_GETATTR(dirp, &diraft, cred, lwp);
vrele(dirp);
}
nfsm_reply(NFSX_SRVFH(&nsfh, TRUE) + NFSX_POSTOPATTR(1) +
nfsm_reply(NFSX_SRVFH(&nsfh, true) + NFSX_POSTOPATTR(1) +
NFSX_WCCDATA(1));
if (!error) {
nfsm_srvpostop_fh(&nsfh);
@ -1791,7 +1791,7 @@ nfsrv_remove(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = DELETE;
nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -1889,7 +1889,7 @@ nfsrv_rename(nfsd, slp, lwp, mrq)
fromnd.ni_cnd.cn_nameiop = DELETE;
fromnd.ni_cnd.cn_flags = LOCKPARENT | SAVESTART;
error = nfs_namei(&fromnd, &fnsfh, len, slp, nam, &md,
&dpos, &fdirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dpos, &fdirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (fdirp && v3) {
fdirfor_ret = VOP_GETATTR(fdirp, &fdirfor, cred, lwp);
}
@ -1921,7 +1921,7 @@ nfsrv_rename(nfsd, slp, lwp, mrq)
tond.ni_cnd.cn_nameiop = RENAME;
tond.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART;
error = nfs_namei(&tond, &tnsfh, len2, slp, nam, &md,
&dpos, &tdirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dpos, &tdirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (tdirp && v3) {
tdirfor_ret = VOP_GETATTR(tdirp, &tdirfor, cred, lwp);
}
@ -2088,8 +2088,8 @@ nfsrv_link(nfsd, slp, lwp, mrq)
vn_start_write(NULL, &mp, V_WAIT);
nfsm_srvmtofh(&dnsfh);
nfsm_srvnamesiz(len);
error = nfsrv_fhtovp(&nsfh, FALSE, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
error = nfsrv_fhtovp(&nsfh, false, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_WCCDATA(v3));
nfsm_srvpostop_attr(getret, &at);
@ -2104,7 +2104,7 @@ nfsrv_link(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT;
error = nfs_namei(&nd, &dnsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -2194,7 +2194,7 @@ nfsrv_symlink(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -2331,7 +2331,7 @@ nfsrv_mkdir(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = CREATE;
nd.ni_cnd.cn_flags = LOCKPARENT;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -2445,7 +2445,7 @@ nfsrv_rmdir(nfsd, slp, lwp, mrq)
nd.ni_cnd.cn_nameiop = DELETE;
nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
error = nfs_namei(&nd, &nsfh, len, slp, nam, &md, &dpos,
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&dirp, lwp, (nfsd->nd_flag & ND_KERBAUTH), false);
if (dirp && v3) {
dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, lwp);
}
@ -2596,7 +2596,7 @@ nfsrv_readdir(nfsd, slp, lwp, mrq)
siz = xfer;
fullsiz = siz;
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (!error && vp->v_type != VDIR) {
error = ENOTDIR;
vput(vp);
@ -2854,7 +2854,7 @@ nfsrv_readdirplus(nfsd, slp, lwp, mrq)
siz = xfer;
fullsiz = siz;
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (!error && vp->v_type != VDIR) {
error = ENOTDIR;
vput(vp);
@ -3003,7 +3003,7 @@ again:
*/
if (VFS_VGET(vp->v_mount, dp->d_fileno, &nvp))
goto invalid;
if (nfsrv_composefh(nvp, &nnsfh, TRUE)) {
if (nfsrv_composefh(nvp, &nnsfh, true)) {
vput(nvp);
goto invalid;
}
@ -3169,7 +3169,7 @@ nfsrv_commit(nfsd, slp, lwp, mrq)
tl += 2;
cnt = fxdr_unsigned(uint32_t, *tl);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(2 * NFSX_UNSIGNED);
nfsm_srvwcc_data(for_ret, &bfor, aft_ret, &aft);
@ -3229,7 +3229,7 @@ nfsrv_statfs(nfsd, slp, lwp, mrq)
nfsm_srvmtofh(&nsfh);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(NFSX_UNSIGNED);
nfsm_srvpostop_attr(getret, &at);
@ -3303,7 +3303,7 @@ nfsrv_fsinfo(nfsd, slp, lwp, mrq)
nfsm_srvmtofh(&nsfh);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(NFSX_UNSIGNED);
nfsm_srvpostop_attr(getret, &at);
@ -3376,7 +3376,7 @@ nfsrv_pathconf(nfsd, slp, lwp, mrq)
nfsm_srvmtofh(&nsfh);
error = nfsrv_fhtovp(&nsfh, 1, &vp, cred, slp, nam,
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), FALSE);
&rdonly, (nfsd->nd_flag & ND_KERBAUTH), false);
if (error) {
nfsm_reply(NFSX_UNSIGNED);
nfsm_srvpostop_attr(getret, &at);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_socket.c,v 1.146 2007/02/21 23:00:08 thorpej Exp $ */
/* $NetBSD: nfs_socket.c,v 1.147 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1991, 1993, 1995
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_socket.c,v 1.146 2007/02/21 23:00:08 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_socket.c,v 1.147 2007/02/22 06:14:28 thorpej Exp $");
#include "fs_nfs.h"
#include "opt_nfs.h"
@ -976,7 +976,7 @@ nfs_request(np, mrest, procnum, lwp, cred, mrp, mdp, dposp, rexmitp)
kauth_cred_t acred;
struct mbuf *mrest_backup = NULL;
kauth_cred_t origcred = NULL; /* XXX: gcc */
bool retry_cred = TRUE;
bool retry_cred = true;
bool use_opencred = (np->n_flag & NUSEOPENCRED) != 0;
if (rexmitp != NULL)
@ -1020,7 +1020,7 @@ kerbauth:
return (error);
}
}
retry_cred = FALSE;
retry_cred = false;
} else {
/* AUTH_UNIX */
uid_t uid;
@ -1045,7 +1045,7 @@ kerbauth:
gid = np->n_vattr->va_gid;
if (kauth_cred_geteuid(cred) == uid &&
kauth_cred_getegid(cred) == gid) {
retry_cred = FALSE;
retry_cred = false;
break;
}
if (use_opencred)
@ -1059,7 +1059,7 @@ kerbauth:
cred = acred;
break;
default:
retry_cred = FALSE;
retry_cred = false;
break;
}
/*
@ -1259,7 +1259,7 @@ tryagain:
mrest_backup = NULL;
cred = origcred;
error = 0;
retry_cred = FALSE;
retry_cred = false;
goto tryagain_cred;
case NFSERR_EXIST:
@ -2445,7 +2445,7 @@ nfsrv_dorec(slp, nfsd, ndp)
*ndp = NULL;
if (nfsdsock_lock(slp, TRUE)) {
if (nfsdsock_lock(slp, true)) {
return ENOBUFS;
}
m = slp->ns_rec;
@ -2470,7 +2470,7 @@ nfsrv_dorec(slp, nfsd, ndp)
nd->nd_md = nd->nd_mrep = m;
nd->nd_nam2 = nam;
nd->nd_dpos = mtod(m, caddr_t);
error = nfs_getreq(nd, nfsd, TRUE);
error = nfs_getreq(nd, nfsd, true);
if (error) {
m_freem(nam);
nfsdreq_free(nd);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_srvcache.c,v 1.36 2007/02/05 11:55:45 yamt Exp $ */
/* $NetBSD: nfs_srvcache.c,v 1.37 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.36 2007/02/05 11:55:45 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.37 2007/02/22 06:14:28 thorpej Exp $");
#include "opt_iso.h"
@ -95,51 +95,51 @@ static void nfsrv_unlockcache(struct nfsrvcache *rp);
* Static array that defines which nfs rpc's are nonidempotent
*/
const int nonidempotent[NFS_NPROCS] = {
FALSE, /* NULL */
FALSE, /* GETATTR */
TRUE, /* SETATTR */
FALSE, /* LOOKUP */
FALSE, /* ACCESS */
FALSE, /* READLINK */
FALSE, /* READ */
TRUE, /* WRITE */
TRUE, /* CREATE */
TRUE, /* MKDIR */
TRUE, /* SYMLINK */
TRUE, /* MKNOD */
TRUE, /* REMOVE */
TRUE, /* RMDIR */
TRUE, /* RENAME */
TRUE, /* LINK */
FALSE, /* READDIR */
FALSE, /* READDIRPLUS */
FALSE, /* FSSTAT */
FALSE, /* FSINFO */
FALSE, /* PATHCONF */
FALSE, /* COMMIT */
FALSE, /* NOOP */
false, /* NULL */
false, /* GETATTR */
true, /* SETATTR */
false, /* LOOKUP */
false, /* ACCESS */
false, /* READLINK */
false, /* READ */
true, /* WRITE */
true, /* CREATE */
true, /* MKDIR */
true, /* SYMLINK */
true, /* MKNOD */
true, /* REMOVE */
true, /* RMDIR */
true, /* RENAME */
true, /* LINK */
false, /* READDIR */
false, /* READDIRPLUS */
false, /* FSSTAT */
false, /* FSINFO */
false, /* PATHCONF */
false, /* COMMIT */
false, /* NOOP */
};
/* True iff the rpc reply is an nfs status ONLY! */
static const int nfsv2_repstat[NFS_NPROCS] = {
FALSE, /* NULL */
FALSE, /* GETATTR */
FALSE, /* SETATTR */
FALSE, /* NOOP */
FALSE, /* LOOKUP */
FALSE, /* READLINK */
FALSE, /* READ */
FALSE, /* Obsolete WRITECACHE */
FALSE, /* WRITE */
FALSE, /* CREATE */
TRUE, /* REMOVE */
TRUE, /* RENAME */
TRUE, /* LINK */
TRUE, /* SYMLINK */
FALSE, /* MKDIR */
TRUE, /* RMDIR */
FALSE, /* READDIR */
FALSE, /* STATFS */
false, /* NULL */
false, /* GETATTR */
false, /* SETATTR */
false, /* NOOP */
false, /* LOOKUP */
false, /* READLINK */
false, /* READ */
false, /* Obsolete WRITECACHE */
false, /* WRITE */
false, /* CREATE */
true, /* REMOVE */
true, /* RENAME */
true, /* LINK */
true, /* SYMLINK */
false, /* MKDIR */
true, /* RMDIR */
false, /* READDIR */
false, /* STATFS */
};
static void

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_subs.c,v 1.181 2007/02/21 23:00:09 thorpej Exp $ */
/* $NetBSD: nfs_subs.c,v 1.182 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.181 2007/02/21 23:00:09 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.182 2007/02/22 06:14:28 thorpej Exp $");
#include "fs_nfs.h"
#include "opt_nfs.h"
@ -1502,8 +1502,8 @@ nfs_init0(void)
rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4);
nfs_prog = txdr_unsigned(NFS_PROG);
nfs_true = txdr_unsigned(TRUE);
nfs_false = txdr_unsigned(FALSE);
nfs_true = txdr_unsigned(true);
nfs_false = txdr_unsigned(false);
nfs_xdrneg1 = txdr_unsigned(-1);
nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
if (nfs_ticks < 1)
@ -2095,7 +2095,7 @@ nfs_namei(ndp, nsfh, len, slp, nam, mdp, dposp, retdirp, l, kerbflag, pubflag)
/*
* Extract and set starting directory.
*/
error = nfsrv_fhtovp(nsfh, FALSE, &dp, ndp->ni_cnd.cn_cred, slp,
error = nfsrv_fhtovp(nsfh, false, &dp, ndp->ni_cnd.cn_cred, slp,
nam, &rdonly, kerbflag, pubflag);
if (error)
goto out;
@ -2556,22 +2556,22 @@ nfs_ispublicfh(const nfsrvfh_t *nsfh)
int i;
if (NFSRVFH_SIZE(nsfh) == 0) {
return TRUE;
return true;
}
if (NFSRVFH_SIZE(nsfh) != NFSX_V2FH) {
return FALSE;
return false;
}
for (i = 0; i < NFSX_V2FH; i++)
if (*cp++ != 0)
return FALSE;
return TRUE;
return false;
return true;
}
#endif /* NFSSERVER */
/*
* This function compares two net addresses by family and returns TRUE
* This function compares two net addresses by family and returns true
* if they are the same host.
* If there is any doubt, return FALSE.
* If there is any doubt, return false.
* The AF_INET family is handled as a special case so that address mbufs
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_syscalls.c,v 1.105 2007/02/09 21:55:37 ad Exp $ */
/* $NetBSD: nfs_syscalls.c,v 1.106 2007/02/22 06:14:28 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.105 2007/02/09 21:55:37 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.106 2007/02/22 06:14:28 thorpej Exp $");
#include "fs_nfs.h"
#include "opt_nfs.h"
@ -700,13 +700,13 @@ nfssvc_nfsd(nsd, argp, l)
}
if (error) {
nfsstats.srv_errs++;
nfsrv_updatecache(nd, FALSE, mreq);
nfsrv_updatecache(nd, false, mreq);
if (nd->nd_nam2)
m_freem(nd->nd_nam2);
break;
}
nfsstats.srvrpccnt[nd->nd_procnum]++;
nfsrv_updatecache(nd, TRUE, mreq);
nfsrv_updatecache(nd, true, mreq);
nd->nd_mrep = (struct mbuf *)0;
case RC_REPLY:
m = mreq;
@ -790,7 +790,7 @@ done:
free((caddr_t)nfsd, M_NFSD);
nsd->nsd_nfsd = (struct nfsd *)0;
if (--nfs_numnfsd == 0)
nfsrv_init(TRUE); /* Reinitialize everything */
nfsrv_init(true); /* Reinitialize everything */
return (error);
}
@ -1028,7 +1028,7 @@ nfssvc_iod(l)
* Just loop around doing our stuff until SIGKILL
*/
for (;;) {
while (/*CONSTCOND*/ TRUE) {
while (/*CONSTCOND*/ true) {
simple_lock(&myiod->nid_slock);
nmp = myiod->nid_mount;
if (nmp) {
@ -1055,7 +1055,7 @@ nfssvc_iod(l)
nmp->nm_bufqlen--;
if (nmp->nm_bufqwant &&
nmp->nm_bufqlen < 2 * nfs_numasync) {
nmp->nm_bufqwant = FALSE;
nmp->nm_bufqwant = false;
wakeup(&nmp->nm_bufq);
}
simple_unlock(&nmp->nm_slock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_vfsops.c,v 1.172 2007/02/15 16:01:51 yamt Exp $ */
/* $NetBSD: nfs_vfsops.c,v 1.173 2007/02/22 06:14:29 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993, 1995
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_vfsops.c,v 1.172 2007/02/15 16:01:51 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_vfsops.c,v 1.173 2007/02/22 06:14:29 thorpej Exp $");
#if defined(_KERNEL_OPT)
#include "opt_compat_netbsd.h"
@ -708,7 +708,7 @@ mountnfs(argp, mp, nam, pth, hst, vpp, l)
if (nfs_niothreads < 0) {
nfs_niothreads = NFS_DEFAULT_NIOTHREADS;
nfs_getset_niothreads(TRUE);
nfs_getset_niothreads(true);
}
if (mp->mnt_flag & MNT_UPDATE) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_vnops.c,v 1.250 2007/02/21 23:00:09 thorpej Exp $ */
/* $NetBSD: nfs_vnops.c,v 1.251 2007/02/22 06:14:29 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.250 2007/02/21 23:00:09 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.251 2007/02/22 06:14:29 thorpej Exp $");
#include "opt_inet.h"
#include "opt_nfs.h"
@ -727,7 +727,7 @@ nfs_setattrrpc(vp, vap, cred, l)
nfsm_fhtom(np, v3);
#ifndef NFS_V2_ONLY
if (v3) {
nfsm_v3attrbuild(vap, TRUE);
nfsm_v3attrbuild(vap, true);
nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
*tl = nfs_false;
} else {
@ -754,7 +754,7 @@ nfs_setattrrpc(vp, vap, cred, l)
nfsm_request(np, NFSPROC_SETATTR, l, cred);
#ifndef NFS_V2_ONLY
if (v3) {
nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
} else
#endif
nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
@ -1340,7 +1340,7 @@ retry:
while (tsiz > 0) {
uint32_t datalen; /* data bytes need to be allocated in mbuf */
uint32_t backup;
bool stalewriteverf = FALSE;
bool stalewriteverf = false;
nfsstats.rpccnt[NFSPROC_WRITE]++;
len = min(tsiz, nmp->nm_wsize);
@ -1453,7 +1453,7 @@ retry:
*/
if ((nmp->nm_iflag &
NFSMNT_STALEWRITEVERF) == 0) {
stalewriteverf = TRUE;
stalewriteverf = true;
nmp->nm_iflag |=
NFSMNT_STALEWRITEVERF;
}
@ -1471,8 +1471,8 @@ retry:
tsiz -= len;
byte_count += len;
if (stalewriteverf) {
*stalewriteverfp = TRUE;
stalewriteverf = FALSE;
*stalewriteverfp = true;
stalewriteverf = false;
if (committed == NFSV3WRITE_UNSTABLE &&
len != origresid) {
/*
@ -1557,7 +1557,7 @@ nfs_mknodrpc(dvp, vpp, cnp, vap)
if (v3) {
nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
*tl++ = vtonfsv3_type(vap->va_type);
nfsm_v3attrbuild(vap, FALSE);
nfsm_v3attrbuild(vap, false);
if (vap->va_type == VCHR || vap->va_type == VBLK) {
nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
*tl++ = txdr_unsigned(major(vap->va_rdev));
@ -1699,7 +1699,7 @@ again:
*tl = ++create_verf;
} else {
*tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
nfsm_v3attrbuild(vap, FALSE);
nfsm_v3attrbuild(vap, false);
}
} else
#endif
@ -1833,7 +1833,7 @@ nfs_remove(v)
error = nfs_removerpc(dvp, cnp->cn_nameptr,
cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp);
} else if (!np->n_sillyrename)
error = nfs_sillyrename(dvp, vp, cnp, FALSE);
error = nfs_sillyrename(dvp, vp, cnp, false);
PNBUF_PUT(cnp->cn_pnbuf);
if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
vattr.va_nlink == 1) {
@ -1956,7 +1956,7 @@ nfs_rename(v)
* that there's no window when the "to" file doesn't exist.
*/
if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, TRUE)) {
tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
VN_KNOTE(tvp, NOTE_DELETE);
vput(tvp);
tvp = NULL;
@ -2204,7 +2204,7 @@ nfs_symlink(v)
nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
#ifndef NFS_V2_ONlY
if (v3)
nfsm_v3attrbuild(vap, FALSE);
nfsm_v3attrbuild(vap, false);
#endif
nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
#ifndef NFS_V2_ONlY
@ -2296,7 +2296,7 @@ nfs_mkdir(v)
nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
#ifndef NFS_V2_ONLY
if (v3) {
nfsm_v3attrbuild(vap, FALSE);
nfsm_v3attrbuild(vap, false);
} else
#endif
{
@ -3165,7 +3165,7 @@ nfs_commit(vp, offset, cnt, l)
tl += 2;
*tl = txdr_unsigned(cnt);
nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
if (!error) {
nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
simple_lock(&nmp->nm_slock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfsm_subs.h,v 1.48 2007/02/21 23:00:09 thorpej Exp $ */
/* $NetBSD: nfsm_subs.h,v 1.49 2007/02/22 06:14:29 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -250,7 +250,7 @@
* NFSV3_WCCCHK return true if pre_op_attr's mtime is the same
* as our n_mtime. (ie. our cache isn't stale.)
* flags: (IN) flags for nfsm_loadattrcache
* docheck: (IN) TRUE if timestamp change is expected
* docheck: (IN) true if timestamp change is expected
*/
/* Used as (f) for nfsm_wcc_data() */
@ -261,10 +261,10 @@
{ int ttattrf, ttretf = 0, renewctime = 0, renewnctime = 0; \
struct timespec ctime, mtime; \
struct nfsnode *nfsp = VTONFS(v); \
bool haspreopattr = FALSE; \
bool haspreopattr = false; \
nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); \
if (*tl == nfs_true) { \
haspreopattr = TRUE; \
haspreopattr = true; \
nfsm_dissect(tl, u_int32_t *, 6 * NFSX_UNSIGNED); \
fxdr_nfsv3time(tl + 2, &mtime); \
fxdr_nfsv3time(tl + 4, &ctime); \

View File

@ -1,4 +1,4 @@
/* $NetBSD: ffs_softdep.c,v 1.84 2007/02/21 23:00:10 thorpej Exp $ */
/* $NetBSD: ffs_softdep.c,v 1.85 2007/02/22 06:10:48 thorpej Exp $ */
/*
* Copyright 1998 Marshall Kirk McKusick. All Rights Reserved.
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ffs_softdep.c,v 1.84 2007/02/21 23:00:10 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: ffs_softdep.c,v 1.85 2007/02/22 06:10:48 thorpej Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@ -1901,7 +1901,7 @@ setup_allocindir_phase2(bp, ip, aip)
if (newindirdep) {
if (indirdep->ir_savebp != NULL) {
brelse(newindirdep->ir_savebp);
softdep_trackbufs(ip->i_devvp, -1, FALSE);
softdep_trackbufs(ip->i_devvp, -1, false);
}
WORKITEM_FREE(newindirdep, D_INDIRDEP);
}
@ -1918,7 +1918,7 @@ setup_allocindir_phase2(bp, ip, aip)
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
NULL);
}
softdep_trackbufs(ip->i_devvp, 1, TRUE);
softdep_trackbufs(ip->i_devvp, 1, true);
newindirdep->ir_savebp =
getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0);
newindirdep->ir_savebp->b_flags |= B_ASYNC;
@ -2554,7 +2554,7 @@ indir_trunc(freeblks, dbn, level, lbn, countp)
FREE_LOCK(&lk);
} else {
FREE_LOCK(&lk);
softdep_trackbufs(devvp, 1, FALSE);
softdep_trackbufs(devvp, 1, false);
error = bread(devvp, dbn, (int)fs->fs_bsize, NOCRED, &bp);
if (error)
return (error);
@ -2591,7 +2591,7 @@ indir_trunc(freeblks, dbn, level, lbn, countp)
}
bp->b_flags |= B_INVAL | B_NOCACHE;
brelse(bp);
softdep_trackbufs(devvp, -1, FALSE);
softdep_trackbufs(devvp, -1, false);
return (allerror);
}
@ -3430,7 +3430,7 @@ softdep_disk_io_initiation(bp)
if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) {
indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
brelse(indirdep->ir_savebp);
softdep_trackbufs(NULL, -1, FALSE);
softdep_trackbufs(NULL, -1, false);
/* inline expand WORKLIST_REMOVE(wk); */
wk->wk_state &= ~ONWORKLIST;

View File

@ -1,4 +1,4 @@
/* $NetBSD: lfs_vfsops.c,v 1.230 2007/02/21 23:00:11 thorpej Exp $ */
/* $NetBSD: lfs_vfsops.c,v 1.231 2007/02/22 06:10:49 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.230 2007/02/21 23:00:11 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.231 2007/02/22 06:10:49 thorpej Exp $");
#if defined(_KERNEL_OPT)
#include "opt_quota.h"
@ -1485,15 +1485,15 @@ lfs_issequential_hole(const struct ufsmount *ump,
* treat UNWRITTENs and all resident blocks as 'contiguous'
*/
if (daddr0 != 0 && daddr1 != 0)
return TRUE;
return true;
/*
* both are in hole?
*/
if (daddr0 == 0 && daddr1 == 0)
return TRUE; /* all holes are 'contiguous' for us. */
return true; /* all holes are 'contiguous' for us. */
return FALSE;
return false;
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: ufs_bmap.c,v 1.43 2007/02/21 23:00:11 thorpej Exp $ */
/* $NetBSD: ufs_bmap.c,v 1.44 2007/02/22 06:10:49 thorpej Exp $ */
/*
* Copyright (c) 1989, 1991, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ufs_bmap.c,v 1.43 2007/02/21 23:00:11 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: ufs_bmap.c,v 1.44 2007/02/22 06:10:49 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -62,7 +62,7 @@ ufs_issequential(const struct ufsmount *ump, daddr_t daddr0, daddr_t daddr1)
/* for ufs, blocks in a hole is not 'contiguous'. */
if (daddr0 == 0)
return FALSE;
return false;
return (daddr0 + ump->um_seqinc == daddr1);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: ufs_readwrite.c,v 1.75 2007/02/21 23:00:11 thorpej Exp $ */
/* $NetBSD: ufs_readwrite.c,v 1.76 2007/02/22 06:10:49 thorpej Exp $ */
/*-
* Copyright (c) 1993
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.75 2007/02/21 23:00:11 thorpej Exp $");
__KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.76 2007/02/22 06:10:49 thorpej Exp $");
#ifdef LFS_READWRITE
#define BLKSIZE(a, b, c) blksize(a, b, c)
@ -79,7 +79,7 @@ READ(void *v)
off_t bytesinfile;
long size, xfersize, blkoffset;
int error, flags, ioflag;
bool usepc = FALSE;
bool usepc = false;
vp = ap->a_vp;
ip = VTOI(vp);
@ -220,9 +220,9 @@ WRITE(void *v)
void *win;
vsize_t bytelen;
bool async;
bool usepc = FALSE;
bool usepc = false;
#ifdef LFS_READWRITE
bool need_unreserve = FALSE;
bool need_unreserve = false;
#endif
struct ufsmount *ump;
@ -292,7 +292,7 @@ WRITE(void *v)
usepc = vp->v_type == VREG;
#ifdef LFS_READWRITE
async = TRUE;
async = true;
lfs_check(vp, LFS_UNUSED_LBN, 0);
#endif /* !LFS_READWRITE */
if (!usepc)
@ -443,7 +443,7 @@ WRITE(void *v)
btofsb(fs, (NIADDR + 1) << fs->lfs_bshift));
if (error)
break;
need_unreserve = TRUE;
need_unreserve = true;
#endif
error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
ap->a_cred, flags, &bp);
@ -476,7 +476,7 @@ WRITE(void *v)
(void)VOP_BWRITE(bp);
lfs_reserve(fs, vp, NULL,
-btofsb(fs, (NIADDR + 1) << fs->lfs_bshift));
need_unreserve = FALSE;
need_unreserve = false;
#else
if (ioflag & IO_SYNC)
(void)bwrite(bp);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.78 2007/02/21 23:00:12 thorpej Exp $ */
/* $NetBSD: uvm_amap.c,v 1.79 2007/02/22 06:05:00 thorpej Exp $ */
/*
*
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.78 2007/02/21 23:00:12 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.79 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
@ -978,7 +978,7 @@ ReStart:
if (pg->flags & PG_BUSY) {
pg->flags |= PG_WANTED;
amap_unlock(amap);
UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, FALSE,
UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, false,
"cownow", 0);
goto ReStart;
}
@ -1196,12 +1196,12 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
*/
if (slots < amap->am_nused) {
byanon = TRUE;
byanon = true;
lcv = slotoff;
stop = slotoff + slots;
slotend = 0;
} else {
byanon = FALSE;
byanon = false;
lcv = 0;
stop = amap->am_nused;
slotend = slotoff + slots;
@ -1267,7 +1267,7 @@ amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
* => called with swap_syscall_lock held.
* => note that we don't always traverse all anons.
* eg. amaps being wiped out, released anons.
* => return TRUE if failed.
* => return true if failed.
*/
bool
@ -1278,7 +1278,7 @@ amap_swap_off(int startslot, int endslot)
struct vm_amap marker_prev;
struct vm_amap marker_next;
struct lwp *l = curlwp;
bool rv = FALSE;
bool rv = false;
#if defined(DIAGNOSTIC)
memset(&marker_prev, 0, sizeof(marker_prev));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.c,v 1.42 2007/02/21 23:00:12 thorpej Exp $ */
/* $NetBSD: uvm_anon.c,v 1.43 2007/02/22 06:05:00 thorpej Exp $ */
/*
*
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.42 2007/02/21 23:00:12 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.43 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
@ -272,7 +272,7 @@ struct vm_page *
uvm_anon_lockloanpg(struct vm_anon *anon)
{
struct vm_page *pg;
bool locked = FALSE;
bool locked = false;
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
@ -301,7 +301,7 @@ uvm_anon_lockloanpg(struct vm_anon *anon)
simple_lock_try(&pg->uobject->vmobjlock);
} else {
/* object disowned before we got PQ lock */
locked = TRUE;
locked = true;
}
uvm_unlock_pageq();
@ -345,7 +345,7 @@ uvm_anon_lockloanpg(struct vm_anon *anon)
* fetch an anon's page.
*
* => anon must be locked, and is unlocked upon return.
* => returns TRUE if pagein was aborted due to lack of memory.
* => returns true if pagein was aborted due to lack of memory.
*/
bool
@ -378,10 +378,10 @@ uvm_anon_pagein(struct vm_anon *anon)
* so again there's nothing to do.
*/
return FALSE;
return false;
default:
return TRUE;
return true;
}
/*
@ -419,7 +419,7 @@ uvm_anon_pagein(struct vm_anon *anon)
if (uobj) {
simple_unlock(&uobj->vmobjlock);
}
return FALSE;
return false;
}
#endif /* defined(VMSWAP) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.c,v 1.86 2007/02/22 04:38:07 matt Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.87 2007/02/22 06:05:00 thorpej Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.86 2007/02/22 04:38:07 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.87 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
@ -293,7 +293,7 @@ uao_find_swslot(struct uvm_object *uobj, int pageidx)
*/
if (UAO_USES_SWHASH(aobj)) {
elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
elt = uao_find_swhash_elt(aobj, pageidx, false);
if (elt)
return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
else
@ -550,7 +550,7 @@ uao_init(void)
if (uao_initialized)
return;
uao_initialized = TRUE;
uao_initialized = true;
LIST_INIT(&uao_list);
simple_lock_init(&uao_list_lock);
}
@ -664,7 +664,7 @@ uao_detach_locked(struct uvm_object *uobj)
if (pg->flags & PG_BUSY) {
pg->flags |= PG_WANTED;
uvm_unlock_pageq();
UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
"uao_det", 0);
simple_lock(&uobj->vmobjlock);
uvm_lock_pageq();
@ -942,7 +942,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
* time through).
*/
done = TRUE; /* be optimistic */
done = true; /* be optimistic */
gotpages = 0; /* # of pages we got so far */
for (lcv = 0, current_offset = offset ; lcv < maxpages ;
lcv++, current_offset += PAGE_SIZE) {
@ -976,7 +976,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
if (lcv == centeridx ||
(flags & PGO_ALLPAGES) != 0)
/* need to do a wait or I/O! */
done = FALSE;
done = false;
continue;
}
@ -1088,7 +1088,7 @@ gotpage:
"sleeping, ptmp->flags 0x%x\n",
ptmp->flags,0,0,0);
UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
FALSE, "uao_get", 0);
false, "uao_get", 0);
simple_lock(&uobj->vmobjlock);
continue;
}
@ -1235,7 +1235,7 @@ uao_dropswap(struct uvm_object *uobj, int pageidx)
* page in every page in every aobj that is paged-out to a range of swslots.
*
* => nothing should be locked.
* => returns TRUE if pagein was aborted due to lack of memory.
* => returns true if pagein was aborted due to lack of memory.
*/
bool
@ -1303,7 +1303,7 @@ restart:
* done with traversal, unlock the list
*/
simple_unlock(&uao_list_lock);
return FALSE;
return false;
}
@ -1311,7 +1311,7 @@ restart:
* page in any pages from aobj in the given range.
*
* => aobj must be locked and is returned locked.
* => returns TRUE if pagein was aborted due to lack of memory.
* => returns true if pagein was aborted due to lack of memory.
*/
static bool
uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
@ -1382,12 +1382,12 @@ restart:
}
}
return FALSE;
return false;
}
/*
* page in a page from an aobj. used for swap_off.
* returns TRUE if pagein was aborted due to lack of memory.
* returns true if pagein was aborted due to lack of memory.
*
* => aobj must be locked and is returned locked.
*/
@ -1423,10 +1423,10 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
* so again there's nothing to do.
*/
return FALSE;
return false;
default:
return TRUE;
return true;
}
/*
@ -1449,7 +1449,7 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
return FALSE;
return false;
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_bio.c,v 1.55 2007/02/21 23:00:12 thorpej Exp $ */
/* $NetBSD: uvm_bio.c,v 1.56 2007/02/22 06:05:00 thorpej Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.55 2007/02/21 23:00:12 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.56 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
#include "opt_ubc.h"
@ -588,9 +588,9 @@ ubc_release(void *va, int flags)
simple_lock(&uobj->vmobjlock);
uvm_page_unbusy(pgs, npages);
simple_unlock(&uobj->vmobjlock);
unmapped = TRUE;
unmapped = true;
} else {
unmapped = FALSE;
unmapped = false;
}
simple_lock(&ubc_object.uobj.vmobjlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_device.c,v 1.48 2006/09/03 21:37:22 christos Exp $ */
/* $NetBSD: uvm_device.c,v 1.49 2007/02/22 06:05:00 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.48 2006/09/03 21:37:22 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.49 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
@ -187,7 +187,7 @@ udv_attach(void *arg, vm_prot_t accessprot,
if (lcv->u_flags & UVM_DEVICE_HOLD) {
lcv->u_flags |= UVM_DEVICE_WANTED;
UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, FALSE,
UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
"udv_attach",0);
continue;
}
@ -315,7 +315,7 @@ again:
if (udv->u_flags & UVM_DEVICE_HOLD) {
udv->u_flags |= UVM_DEVICE_WANTED;
simple_unlock(&uobj->vmobjlock);
UVM_UNLOCK_AND_WAIT(udv, &udv_lock, FALSE, "udv_detach",0);
UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
goto again;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.126 2007/02/21 23:00:12 thorpej Exp $ */
/* $NetBSD: uvm_extern.h,v 1.127 2007/02/22 06:05:00 thorpej Exp $ */
/*
*
@ -204,7 +204,7 @@ typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */
#ifdef PMAP_CACHE_VIVT
#define UBC_WANT_UNMAP(vp) (((vp)->v_flag & VTEXT) != 0)
#else
#define UBC_WANT_UNMAP(vp) FALSE
#define UBC_WANT_UNMAP(vp) false
#endif
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.118 2007/02/21 23:00:12 thorpej Exp $ */
/* $NetBSD: uvm_fault.c,v 1.119 2007/02/22 06:05:00 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.118 2007/02/21 23:00:12 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.119 2007/02/22 06:05:00 thorpej Exp $");
#include "opt_uvmhist.h"
@ -233,7 +233,7 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
* no mapping? give up.
*/
if (uvmfault_lookup(ufi, TRUE) == FALSE)
if (uvmfault_lookup(ufi, true) == false)
return;
/*
@ -249,7 +249,7 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
*/
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
uvmfault_unlockmaps(ufi, TRUE);
uvmfault_unlockmaps(ufi, true);
uvm_wait("fltamapcopy");
continue;
}
@ -258,7 +258,7 @@ uvmfault_amapcopy(struct uvm_faultinfo *ufi)
* got it! unlock and return.
*/
uvmfault_unlockmaps(ufi, TRUE);
uvmfault_unlockmaps(ufi, true);
return;
}
/*NOTREACHED*/
@ -305,7 +305,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
*/
for (;;) {
we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
we_own = false; /* true if we set PG_BUSY on a page */
pg = anon->an_page;
/*
@ -348,7 +348,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
0,0,0);
UVM_UNLOCK_AND_WAIT(pg,
&pg->uobject->vmobjlock,
FALSE, "anonget1",0);
false, "anonget1",0);
} else {
/* anon owns page */
uvmfault_unlockall(ufi, amap, NULL, NULL);
@ -376,7 +376,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
uvm_wait("flt_noram1");
} else {
/* we set the PG_BUSY bit */
we_own = TRUE;
we_own = true;
uvmfault_unlockall(ufi, amap, NULL, anon);
/*
@ -727,10 +727,10 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
wire_fault = (fault_flag > 0);
if (wire_fault)
narrow = TRUE; /* don't look for neighborhood
narrow = true; /* don't look for neighborhood
* pages on wire */
else
narrow = FALSE; /* normal fault */
narrow = false; /* normal fault */
/*
* "goto ReFault" means restart the page fault from ground zero.
@ -741,7 +741,7 @@ ReFault:
* lookup and lock the maps
*/
if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
if (uvmfault_lookup(&ufi, false) == false) {
UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0);
error = EFAULT;
goto done;
@ -768,7 +768,7 @@ ReFault:
UVMHIST_LOG(maphist,
"<- protection failure (prot=0x%x, access=0x%x)",
ufi.entry->protection, access_type, 0, 0);
uvmfault_unlockmaps(&ufi, FALSE);
uvmfault_unlockmaps(&ufi, false);
error = EACCES;
goto done;
}
@ -802,7 +802,7 @@ ReFault:
/* need to clear */
UVMHIST_LOG(maphist,
" need to clear needs_copy and refault",0,0,0,0);
uvmfault_unlockmaps(&ufi, FALSE);
uvmfault_unlockmaps(&ufi, false);
uvmfault_amapcopy(&ufi);
uvmexp.fltamcopy++;
goto ReFault;
@ -831,7 +831,7 @@ ReFault:
*/
if (amap == NULL && uobj == NULL) {
uvmfault_unlockmaps(&ufi, FALSE);
uvmfault_unlockmaps(&ufi, false);
UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
error = EFAULT;
goto done;
@ -844,7 +844,7 @@ ReFault:
* ReFault we will disable this by setting "narrow" to true.
*/
if (narrow == FALSE) {
if (narrow == false) {
/* wide fault (!narrow) */
KASSERT(uvmadvice[ufi.entry->advice].advice ==
@ -862,7 +862,7 @@ ReFault:
npages = nback + nforw + 1;
centeridx = nback;
narrow = TRUE; /* ensure only once per-fault */
narrow = true; /* ensure only once per-fault */
} else {
@ -935,7 +935,7 @@ ReFault:
*/
currva = startva;
shadowed = FALSE;
shadowed = false;
for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
/*
@ -962,7 +962,7 @@ ReFault:
pages[lcv] = PGO_DONTCARE;
if (lcv == centeridx) { /* save center for later! */
shadowed = TRUE;
shadowed = true;
continue;
}
anon = anons[lcv];
@ -997,9 +997,9 @@ ReFault:
/* locked: maps(read), amap(if there) */
LOCK_ASSERT(amap == NULL || simple_lock_held(&amap->am_l));
/* (shadowed == TRUE) if there is an anon at the faulting address */
/* (shadowed == true) if there is an anon at the faulting address */
UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
(uobj && shadowed == FALSE),0,0);
(uobj && shadowed == false),0,0);
/*
* note that if we are really short of RAM we could sleep in the above
@ -1017,7 +1017,7 @@ ReFault:
* providing a pgo_fault routine.
*/
if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
if (uobj && shadowed == false && uobj->pgops->pgo_fault != NULL) {
simple_lock(&uobj->vmobjlock);
/* locked: maps(read), amap (if there), uobj */
@ -1042,7 +1042,7 @@ ReFault:
* (PGO_LOCKED).
*/
if (uobj && shadowed == FALSE) {
if (uobj && shadowed == false) {
simple_lock(&uobj->vmobjlock);
/* locked (!shadowed): maps(read), amap (if there), uobj */
@ -1177,7 +1177,7 @@ ReFault:
* redirect case 2: if we are not shadowed, go to case 2.
*/
if (shadowed == FALSE)
if (shadowed == false)
goto Case2;
/* locked: maps(read), amap */
@ -1472,7 +1472,7 @@ Case2:
if (uobj == NULL) {
uobjpage = PGO_DONTCARE;
promote = TRUE; /* always need anon here */
promote = true; /* always need anon here */
} else {
KASSERT(uobjpage != PGO_DONTCARE);
promote = cow_now && UVM_ET_ISCOPYONWRITE(ufi.entry);
@ -1558,14 +1558,14 @@ Case2:
ufi.orig_rvaddr - ufi.entry->start))) {
if (locked)
uvmfault_unlockall(&ufi, amap, NULL, NULL);
locked = FALSE;
locked = false;
}
/*
* didn't get the lock? release the page and retry.
*/
if (locked == FALSE) {
if (locked == false) {
UVMHIST_LOG(maphist,
" wasn't able to relock after fault: retry",
0,0,0,0);
@ -1610,7 +1610,7 @@ Case2:
KASSERT(uobj == NULL || uobj == uobjpage->uobject);
KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
(uobjpage->flags & PG_CLEAN) != 0);
if (promote == FALSE) {
if (promote == false) {
/*
* we are not promoting. if the mapping is COW ensure that we
@ -1932,11 +1932,11 @@ uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
*/
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
if (uvm_map_lookup_entry(map, start, &entry) == false)
panic("uvm_fault_unwire_locked: address not in map");
for (va = start; va < end; va += PAGE_SIZE) {
if (pmap_extract(pmap, va, &pa) == FALSE)
if (pmap_extract(pmap, va, &pa) == false)
continue;
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault_i.h,v 1.22 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_fault_i.h,v 1.23 2007/02/22 06:05:01 thorpej Exp $ */
/*
*
@ -81,7 +81,7 @@ uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
simple_unlock(&uobj->vmobjlock);
if (amap)
amap_unlock(amap);
uvmfault_unlockmaps(ufi, FALSE);
uvmfault_unlockmaps(ufi, false);
}
/*
@ -91,7 +91,7 @@ uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* params properly filled in
* => we will lookup the map entry (handling submaps) as we go
* => if the lookup is a success we will return with the maps locked
* => if "write_lock" is TRUE, we write_lock the map, otherwise we only
* => if "write_lock" is true, we write_lock the map, otherwise we only
* get a read lock.
* => note that submaps can only appear in the kernel and they are
* required to use the same virtual addresses as the map they
@ -124,7 +124,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
* a fault.
*/
if (ufi->map->flags & VM_MAP_INTRSAFE)
return (FALSE);
return (false);
/*
* lock map
@ -141,7 +141,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
&ufi->entry)) {
uvmfault_unlockmaps(ufi, write_lock);
return(FALSE);
return(false);
}
/*
@ -170,7 +170,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
*/
ufi->mapv = ufi->map->timestamp;
return(TRUE);
return(true);
} /* while loop */
@ -181,7 +181,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
* uvmfault_relock: attempt to relock the same version of the map
*
* => fault data structures should be unlocked before calling.
* => if a success (TRUE) maps will be locked after call.
* => if a success (true) maps will be locked after call.
*/
static __inline bool
@ -193,7 +193,7 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
*/
if (ufi == NULL) {
return TRUE;
return true;
}
uvmexp.fltrelck++;
@ -206,11 +206,11 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
vm_map_lock_read(ufi->map);
if (ufi->mapv != ufi->map->timestamp) {
vm_map_unlock_read(ufi->map);
return(FALSE);
return(false);
}
uvmexp.fltrelckok++;
return(TRUE);
return(true);
}
#endif /* _UVM_UVM_FAULT_I_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.102 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_glue.c,v 1.103 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.102 2007/02/21 23:00:13 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.103 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_coredump.h"
#include "opt_kgdb.h"
@ -155,7 +155,7 @@ uvm_chgkprot(caddr_t addr, size_t len, int rw)
/*
* Extract physical address for the page.
*/
if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
if (pmap_extract(pmap_kernel(), sva, &pa) == false)
panic("chgkprot: invalid page");
pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
}
@ -207,7 +207,7 @@ void
uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
{
if (shared == TRUE) {
if (shared == true) {
p2->p_vmspace = NULL;
uvmspace_share(p1, p2);
} else {
@ -299,12 +299,12 @@ uvm_uarea_alloc(vaddr_t *uaddrp)
uvm_nuarea--;
simple_unlock(&uvm_uareas_slock);
*uaddrp = uaddr;
return TRUE;
return true;
} else {
simple_unlock(&uvm_uareas_slock);
*uaddrp = uvm_km_alloc(kernel_map, USPACE, USPACE_ALIGN,
UVM_KMF_PAGEABLE);
return FALSE;
return false;
}
}
@ -460,11 +460,11 @@ void
uvm_kick_scheduler(void)
{
if (uvm.swap_running == FALSE)
if (uvm.swap_running == false)
return;
mutex_enter(&uvm.scheduler_mutex);
uvm.scheduler_kicked = TRUE;
uvm.scheduler_kicked = true;
cv_signal(&uvm.scheduler_cv);
mutex_exit(&uvm.scheduler_mutex);
}
@ -522,10 +522,10 @@ uvm_scheduler(void)
*/
if ((l = ll) == NULL) {
mutex_enter(&uvm.scheduler_mutex);
if (uvm.scheduler_kicked == FALSE)
if (uvm.scheduler_kicked == false)
cv_wait(&uvm.scheduler_cv,
&uvm.scheduler_mutex);
uvm.scheduler_kicked = FALSE;
uvm.scheduler_kicked = false;
mutex_exit(&uvm.scheduler_mutex);
continue;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.c,v 1.64 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_loan.c,v 1.65 2007/02/22 06:05:01 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.64 2007/02/21 23:00:13 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.65 2007/02/22 06:05:01 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -216,7 +216,7 @@ uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
if (aref->ar_amap)
amap_unlock(aref->ar_amap);
uvmfault_unlockmaps(ufi, FALSE);
uvmfault_unlockmaps(ufi, false);
UVMHIST_LOG(loanhist, "done %d", result, 0,0,0);
return (result);
}
@ -280,7 +280,7 @@ uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
* an unmapped region (an error)
*/
if (!uvmfault_lookup(&ufi, FALSE)) {
if (!uvmfault_lookup(&ufi, false)) {
error = ENOENT;
goto fail;
}
@ -702,14 +702,14 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
ufi->orig_rvaddr - ufi->entry->start))) {
if (locked)
uvmfault_unlockall(ufi, amap, NULL, NULL);
locked = FALSE;
locked = false;
}
/*
* didn't get the lock? release the page and retry.
*/
if (locked == FALSE) {
if (locked == false) {
if (pg->flags & PG_WANTED) {
wakeup(pg);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.233 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_map.c,v 1.234 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.233 2007/02/21 23:00:13 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.234 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@ -418,21 +418,21 @@ _uvm_map_check(struct vm_map *map, const char *name,
int
_uvm_map_sanity(struct vm_map *map)
{
bool first_free_found = FALSE;
bool hint_found = FALSE;
bool first_free_found = false;
bool hint_found = false;
const struct vm_map_entry *e;
e = &map->header;
for (;;) {
if (map->first_free == e) {
first_free_found = TRUE;
first_free_found = true;
} else if (!first_free_found && e->next->start > e->end) {
printf("first_free %p should be %p\n",
map->first_free, e);
return -1;
}
if (map->hint == e) {
hint_found = TRUE;
hint_found = true;
}
e = e->next;
@ -992,7 +992,7 @@ uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
*/
retry:
if (vm_map_lock_try(map) == FALSE) {
if (vm_map_lock_try(map) == false) {
if (flags & UVM_FLAG_TRYLOCK) {
return EAGAIN;
}
@ -1426,7 +1426,7 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
struct vm_map_entry **entry /* OUT */)
{
struct vm_map_entry *cur;
bool use_tree = FALSE;
bool use_tree = false;
UVMHIST_FUNC("uvm_map_lookup_entry");
UVMHIST_CALLED(maphist);
@ -1466,17 +1466,17 @@ uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
cur, 0, 0, 0);
uvm_mapent_check(*entry);
return (TRUE);
return (true);
}
if (map->nentries > 30)
use_tree = TRUE;
use_tree = true;
} else {
/*
* invalid hint. use tree.
*/
use_tree = TRUE;
use_tree = true;
}
uvm_map_check(map, __func__);
@ -1524,7 +1524,7 @@ got:
KDASSERT((*entry)->start <= address);
KDASSERT(address < (*entry)->end);
uvm_mapent_check(*entry);
return (TRUE);
return (true);
}
break;
}
@ -1537,7 +1537,7 @@ failed:
KDASSERT((*entry) == &map->header || (*entry)->end <= address);
KDASSERT((*entry)->next == &map->header ||
address < (*entry)->next->start);
return (FALSE);
return (false);
}
/*
@ -1961,7 +1961,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
* find first entry
*/
if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
/* clip and go... */
entry = first_entry;
UVM_MAP_CLIP_START(map, entry, start, umr);
@ -2236,11 +2236,11 @@ uvm_map_reserve(struct vm_map *map, vsize_t size,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
return (FALSE);
return (false);
}
UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
return (TRUE);
return (true);
}
/*
@ -2248,7 +2248,7 @@ uvm_map_reserve(struct vm_map *map, vsize_t size,
* real mappings.
*
* => caller must WRITE-LOCK the map
* => we return TRUE if replacement was a success
* => we return true if replacement was a success
* => we expect the newents chain to have nnewents entrys on it and
* we expect newents->prev to point to the last entry on the list
* => note newents is allowed to be NULL
@ -2267,7 +2267,7 @@ uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
*/
if (!uvm_map_lookup_entry(map, start, &oldent)) {
return (FALSE);
return (false);
}
/*
@ -2279,7 +2279,7 @@ uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
}
if (oldent->start != start || oldent->end != end ||
oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
return (FALSE);
return (false);
}
#ifdef DIAGNOSTIC
@ -2364,7 +2364,7 @@ uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
*/
uvm_mapent_free(oldent);
return (TRUE);
return (true);
}
/*
@ -2585,7 +2585,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
* 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
*/
if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
copy_ok = 1;
if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
nchain)) {
@ -2678,7 +2678,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
nchain);
vm_map_unlock(dstmap);
if (error == FALSE) {
if (error == false) {
error = EIO;
goto bad2;
}
@ -2897,7 +2897,7 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
old_prot == VM_PROT_NONE &&
new_prot != VM_PROT_NONE) {
if (uvm_map_pageable(map, entry->start,
entry->end, FALSE,
entry->end, false,
UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
/*
@ -3034,7 +3034,7 @@ uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
* => regions specified as not pageable require lock-down (wired) memory
* and page tables.
* => map must never be read-locked
* => if islocked is TRUE, map is already write-locked
* => if islocked is true, map is already write-locked
* => we always unlock the map, since we must downgrade to a read-lock
* to call uvm_fault_wire()
* => XXXCDC: check this and try and clean it up.
@ -3066,7 +3066,7 @@ uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
* making any changes.
*/
if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
if ((lockflags & UVM_LK_EXIT) == 0)
vm_map_unlock(map);
@ -3544,7 +3544,7 @@ uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
if (uvm_map_lookup_entry(map, start, &entry) == false) {
vm_map_unlock_read(map);
return EFAULT;
}
@ -3698,12 +3698,12 @@ uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
struct vm_map_entry *tmp_entry;
if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
return (FALSE);
return (false);
}
entry = tmp_entry;
while (start < end) {
if (entry == &map->header) {
return (FALSE);
return (false);
}
/*
@ -3711,7 +3711,7 @@ uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
*/
if (start < entry->start) {
return (FALSE);
return (false);
}
/*
@ -3719,12 +3719,12 @@ uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
*/
if ((entry->protection & protection) != protection) {
return (FALSE);
return (false);
}
start = entry->end;
entry = entry->next;
}
return (TRUE);
return (true);
}
/*
@ -4932,11 +4932,11 @@ vm_map_starved_p(struct vm_map *map)
{
if ((map->flags & VM_MAP_WANTVA) != 0) {
return TRUE;
return true;
}
/* XXX */
if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
return TRUE;
return true;
}
return FALSE;
return false;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.h,v 1.55 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_map.h,v 1.56 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -417,7 +417,7 @@ vm_map_lock_try(struct vm_map *map)
simple_lock(&map->flags_lock);
if (map->flags & VM_MAP_BUSY) {
simple_unlock(&map->flags_lock);
return (FALSE);
return (false);
}
rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
&map->flags_lock) == 0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_mmap.c,v 1.106 2007/02/21 23:00:13 thorpej Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.107 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -51,7 +51,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.106 2007/02/21 23:00:13 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_mmap.c,v 1.107 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_compat_netbsd.h"
#include "opt_pax.h"
@ -177,7 +177,7 @@ sys_mincore(struct lwp *l, void *v, register_t *retval)
}
vm_map_lock_read(map);
if (uvm_map_lookup_entry(map, start, &entry) == FALSE) {
if (uvm_map_lookup_entry(map, start, &entry) == false) {
error = ENOMEM;
goto out;
}
@ -607,12 +607,12 @@ sys___msync13(struct lwp *l, void *v, register_t *retval)
vm_map_lock_read(map);
rv = uvm_map_lookup_entry(map, addr, &entry);
if (rv == TRUE) {
if (rv == true) {
addr = entry->start;
size = entry->end - entry->start;
}
vm_map_unlock_read(map);
if (rv == FALSE)
if (rv == false)
return (EINVAL);
}
@ -736,7 +736,7 @@ sys_mprotect(struct lwp *l, void *v, register_t *retval)
size = round_page(size);
error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot,
FALSE);
false);
return error;
}
@ -923,7 +923,7 @@ sys_mlock(struct lwp *l, void *v, register_t *retval)
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
return (EAGAIN);
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false,
0);
if (error == EFAULT)
error = ENOMEM;
@ -966,7 +966,7 @@ sys_munlock(struct lwp *l, void *v, register_t *retval)
if (addr + size < addr)
return (EINVAL);
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true,
0);
if (error == EFAULT)
error = ENOMEM;
@ -1216,7 +1216,7 @@ uvm_mmap(map, addr, size, prot, maxprot, flags, handle, foff, locklimit)
*/
error = uvm_map_pageable(map, *addr, *addr + size,
FALSE, UVM_LK_ENTER);
false, UVM_LK_ENTER);
if (error) {
uvm_unmap(map, *addr, *addr + size);
return error;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.118 2007/02/21 23:00:14 thorpej Exp $ */
/* $NetBSD: uvm_page.c,v 1.119 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.118 2007/02/21 23:00:14 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.119 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -107,7 +107,7 @@ int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
* XXX disabled until we can find a way to do this without causing
* problems for either CPU caches or DMA latency.
*/
bool vm_page_zero_enable = FALSE;
bool vm_page_zero_enable = false;
/*
* local variables
@ -138,7 +138,7 @@ static struct pglist uvm_bootbucket;
* uvm_pageboot_alloc().
*/
static bool have_recolored_pages /* = FALSE */;
static bool have_recolored_pages /* = false */;
MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
@ -431,7 +431,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
* done!
*/
uvm.page_init_done = TRUE;
uvm.page_init_done = true;
}
/*
@ -468,7 +468,7 @@ uvm_setpagesize(void)
vaddr_t
uvm_pageboot_alloc(vsize_t size)
{
static bool initialized = FALSE;
static bool initialized = false;
vaddr_t addr;
#if !defined(PMAP_STEAL_MEMORY)
vaddr_t vaddr;
@ -478,14 +478,14 @@ uvm_pageboot_alloc(vsize_t size)
/*
* on first call to this function, initialize ourselves.
*/
if (initialized == FALSE) {
if (initialized == false) {
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
/* round it the way we like it */
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
initialized = TRUE;
initialized = true;
}
/* round to page size */
@ -577,7 +577,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
#endif
{
if (uvm.page_init_done == TRUE)
if (uvm.page_init_done == true)
panic("uvm_page_physget: called _after_ bootstrap");
if (vm_physmem[lcv].free_list != freelist)
@ -599,7 +599,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
/* structure copy */
vm_physmem[x] = vm_physmem[x+1];
}
return (TRUE);
return (true);
}
/* try from rear */
@ -618,7 +618,7 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
/* structure copy */
vm_physmem[x] = vm_physmem[x+1];
}
return (TRUE);
return (true);
}
}
@ -648,10 +648,10 @@ uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
/* structure copy */
vm_physmem[x] = vm_physmem[x+1];
}
return (TRUE);
return (true);
}
return (FALSE); /* whoops! */
return (false); /* whoops! */
}
bool
@ -661,9 +661,9 @@ uvm_page_physget(paddr_t *paddrp)
/* try in the order of freelist preference */
for (i = 0; i < VM_NFREELIST; i++)
if (uvm_page_physget_freelist(paddrp, i) == TRUE)
return (TRUE);
return (FALSE);
if (uvm_page_physget_freelist(paddrp, i) == true)
return (true);
return (false);
}
#endif /* PMAP_STEAL_MEMORY */
@ -908,7 +908,7 @@ uvm_page_recolor(int newncolors)
if (newncolors <= uvmexp.ncolors)
return;
if (uvm.page_init_done == FALSE) {
if (uvm.page_init_done == false) {
uvmexp.ncolors = newncolors;
return;
}
@ -963,7 +963,7 @@ uvm_page_recolor(int newncolors)
return;
}
have_recolored_pages = TRUE;
have_recolored_pages = true;
uvm_unlock_fpageq(s);
}
@ -1543,7 +1543,7 @@ uvm_pageidlezero(void)
if (sched_whichqs != 0)
goto quit;
if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
uvm.page_idle_zero = FALSE;
uvm.page_idle_zero = false;
goto quit;
}
for (free_list = 0; free_list < VM_NFREELIST; free_list++) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.c,v 1.80 2007/02/21 23:00:14 thorpej Exp $ */
/* $NetBSD: uvm_pager.c,v 1.81 2007/02/22 06:05:01 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.80 2007/02/21 23:00:14 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.81 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -92,16 +92,16 @@ uvm_pager_init(void)
sva = 0;
pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
FALSE, NULL);
false, NULL);
simple_lock_init(&pager_map_wanted_lock);
pager_map_wanted = FALSE;
pager_map_wanted = false;
emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
UVM_KMF_VAONLY);
#if defined(DEBUG)
if (emergva == 0)
panic("emergva");
#endif
emerginuse = FALSE;
emerginuse = false;
/*
* init ASYNC I/O queue
@ -158,11 +158,11 @@ ReStart:
simple_lock(&pager_map_wanted_lock);
if (emerginuse) {
UVM_UNLOCK_AND_WAIT(&emergva,
&pager_map_wanted_lock, FALSE,
&pager_map_wanted_lock, false,
"emergva", 0);
goto ReStart;
}
emerginuse = TRUE;
emerginuse = true;
simple_unlock(&pager_map_wanted_lock);
kva = emergva;
/* The shift implicitly truncates to PAGE_SIZE */
@ -174,9 +174,9 @@ ReStart:
return(0);
}
simple_lock(&pager_map_wanted_lock);
pager_map_wanted = TRUE;
pager_map_wanted = true;
UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
"pager_map", 0);
goto ReStart;
}
@ -218,7 +218,7 @@ uvm_pagermapout(vaddr_t kva, int npages)
pmap_kremove(kva, npages << PAGE_SHIFT);
if (kva == emergva) {
simple_lock(&pager_map_wanted_lock);
emerginuse = FALSE;
emerginuse = false;
wakeup(&emergva);
simple_unlock(&pager_map_wanted_lock);
return;
@ -228,7 +228,7 @@ uvm_pagermapout(vaddr_t kva, int npages)
uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
simple_lock(&pager_map_wanted_lock);
if (pager_map_wanted) {
pager_map_wanted = FALSE;
pager_map_wanted = false;
wakeup(pager_map);
}
simple_unlock(&pager_map_wanted_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.83 2007/02/21 23:00:14 thorpej Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.84 2007/02/22 06:05:01 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.83 2007/02/21 23:00:14 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.84 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -154,7 +154,7 @@ uvm_wait(const char *wmsg)
simple_lock(&uvm.pagedaemon_lock);
wakeup(&uvm.pagedaemon); /* wake the daemon! */
UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, false, wmsg,
timo);
splx(s);
@ -243,7 +243,7 @@ uvm_pageout(void *arg)
UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
&uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
&uvm.pagedaemon_lock, false, "pgdaemon", 0);
uvmexp.pdwoke++;
UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
@ -307,7 +307,7 @@ uvm_pageout(void *arg)
/*
* free any cached u-areas we don't need
*/
uvm_uarea_drain(TRUE);
uvm_uarea_drain(true);
}
/*NOTREACHED*/
@ -416,7 +416,7 @@ swapcluster_allocslots(struct swapcluster *swc)
/* Even with strange MAXPHYS, the shift
implicitly rounds down to a page. */
npages = MAXPHYS >> PAGE_SHIFT;
slot = uvm_swap_alloc(&npages, TRUE);
slot = uvm_swap_alloc(&npages, true);
if (slot == 0) {
return ENOMEM;
}
@ -506,27 +506,27 @@ swapcluster_flush(struct swapcluster *swc, bool now)
* uvmpd_dropswap: free any swap allocated to this page.
*
* => called with owner locked.
* => return TRUE if a page had an associated slot.
* => return true if a page had an associated slot.
*/
static bool
uvmpd_dropswap(struct vm_page *pg)
{
bool result = FALSE;
bool result = false;
struct vm_anon *anon = pg->uanon;
if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
uvm_swap_free(anon->an_swslot, 1);
anon->an_swslot = 0;
pg->flags &= ~PG_CLEAN;
result = TRUE;
result = true;
} else if (pg->pqflags & PQ_AOBJ) {
int slot = uao_set_swslot(pg->uobject,
pg->offset >> PAGE_SHIFT, 0);
if (slot) {
uvm_swap_free(slot, 1);
pg->flags &= ~PG_CLEAN;
result = TRUE;
result = true;
}
}
@ -536,7 +536,7 @@ uvmpd_dropswap(struct vm_page *pg)
/*
* uvmpd_trydropswap: try to free any swap allocated to this page.
*
* => return TRUE if a slot is successfully freed.
* => return true if a slot is successfully freed.
*/
bool
@ -546,7 +546,7 @@ uvmpd_trydropswap(struct vm_page *pg)
bool result;
if ((pg->flags & PG_BUSY) != 0) {
return FALSE;
return false;
}
/*
@ -555,7 +555,7 @@ uvmpd_trydropswap(struct vm_page *pg)
slock = uvmpd_trylockowner(pg);
if (slock == NULL) {
return FALSE;
return false;
}
/*
@ -564,7 +564,7 @@ uvmpd_trydropswap(struct vm_page *pg)
if ((pg->flags & PG_BUSY) != 0) {
simple_unlock(slock);
return FALSE;
return false;
}
result = uvmpd_dropswap(pg);
@ -821,7 +821,7 @@ uvmpd_scan_queue(void)
}
simple_unlock(slock);
swapcluster_flush(&swc, FALSE);
swapcluster_flush(&swc, false);
uvm_lock_pageq();
/*
@ -839,7 +839,7 @@ uvmpd_scan_queue(void)
#if defined(VMSWAP)
uvm_unlock_pageq();
swapcluster_flush(&swc, TRUE);
swapcluster_flush(&swc, true);
uvm_lock_pageq();
#endif /* defined(VMSWAP) */
}
@ -908,7 +908,7 @@ uvmpd_scan(void)
/*
* uvm_reclaimable: decide whether to wait for pagedaemon.
*
* => return TRUE if it seems to be worth to do uvm_wait.
* => return true if it seems to be worth to do uvm_wait.
*
* XXX should be tunable.
* XXX should consider pools, etc?
@ -925,7 +925,7 @@ uvm_reclaimable(void)
*/
if (!uvm_swapisfull()) {
return TRUE;
return true;
}
/*
@ -942,14 +942,14 @@ uvm_reclaimable(void)
uvm_estimatepageable(&active, &inactive);
if (filepages >= MIN((active + inactive) >> 4,
5 * 1024 * 1024 >> PAGE_SHIFT)) {
return TRUE;
return true;
}
/*
* kill the process, fail allocation, etc..
*/
return FALSE;
return false;
}
void

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdpolicy_clock.c,v 1.7 2007/02/21 23:00:14 thorpej Exp $ */
/* $NetBSD: uvm_pdpolicy_clock.c,v 1.8 2007/02/22 06:05:01 thorpej Exp $ */
/* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
/*
@ -74,7 +74,7 @@
#else /* defined(PDSIM) */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.7 2007/02/21 23:00:14 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.8 2007/02/22 06:05:01 thorpej Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -160,13 +160,13 @@ uvmpdpol_scaninit(void)
filereact = fileunder || (!fileover && (anonover || execover));
execreact = execunder || (!execover && (anonover || fileover));
if (filereact && execreact && (anonreact || uvm_swapisfull())) {
anonreact = filereact = execreact = FALSE;
anonreact = filereact = execreact = false;
}
ss->ss_anonreact = anonreact;
ss->ss_filereact = filereact;
ss->ss_execreact = execreact;
ss->ss_first = TRUE;
ss->ss_first = true;
}
struct vm_page *
@ -183,7 +183,7 @@ uvmpdpol_selectvictim(void)
if (ss->ss_first) {
pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
ss->ss_first = FALSE;
ss->ss_first = false;
} else {
pg = ss->ss_nextpg;
if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.7 2007/02/21 23:00:14 thorpej Exp $ */
/* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.8 2007/02/22 06:05:01 thorpej Exp $ */
/*-
* Copyright (c)2005, 2006 YAMAMOTO Takashi,
@ -43,7 +43,7 @@
#else /* defined(PDSIM) */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.7 2007/02/21 23:00:14 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.8 2007/02/22 06:05:01 thorpej Exp $");
#include "opt_ddb.h"
@ -462,10 +462,10 @@ nonresident_lookupremove(objid_t obj, off_t idx)
for (i = 0; i < BUCKETSIZE; i++) {
if (b->pages[i] == cookie) {
b->pages[i] = NONRES_COOKIE_INVAL;
return TRUE;
return true;
}
}
return FALSE;
return false;
}
static objid_t
@ -713,7 +713,7 @@ clockpro_pageenqueue(struct vm_page *pg)
s->s_npages++;
pg->pqflags &= ~(PQ_HOT|PQ_TEST);
if (speculative) {
hot = FALSE;
hot = false;
PDPOL_EVCNT_INCR(speculativeenqueue);
} else {
hot = nonresident_pagelookupremove(pg);
@ -1215,9 +1215,9 @@ uvmpdpol_needsscan_p(void)
struct clockpro_state * const s = &clockpro;
if (s->s_ncold < s->s_coldtarget) {
return TRUE;
return true;
}
return FALSE;
return false;
}
void

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.c,v 1.120 2007/02/22 04:38:07 matt Exp $ */
/* $NetBSD: uvm_swap.c,v 1.121 2007/02/22 06:05:02 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.120 2007/02/22 04:38:07 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.121 2007/02/22 06:05:02 thorpej Exp $");
#include "fs_nfs.h"
#include "opt_uvmhist.h"
@ -282,7 +282,7 @@ uvm_swap_init(void)
/*
* done!
*/
uvm.swap_running = TRUE;
uvm.swap_running = true;
UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.h,v 1.15 2007/02/21 23:00:15 thorpej Exp $ */
/* $NetBSD: uvm_swap.h,v 1.16 2007/02/22 06:05:02 thorpej Exp $ */
/*
* Copyright (c) 1997 Matthew R. Green
@ -50,7 +50,7 @@ void uvm_swap_free(int, int);
void uvm_swap_markbad(int, int);
bool uvm_swapisfull(void);
#else /* defined(VMSWAP) */
#define uvm_swapisfull() TRUE
#define uvm_swapisfull() true
#endif /* defined(VMSWAP) */
void uvm_swap_stats(int, struct swapent *, int, register_t *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.79 2007/02/21 23:00:15 thorpej Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.80 2007/02/22 06:05:02 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.79 2007/02/21 23:00:15 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.80 2007/02/22 06:05:02 thorpej Exp $");
#include "fs_nfs.h"
#include "opt_uvmhist.h"
@ -142,7 +142,7 @@ uvn_attach(void *arg, vm_prot_t accessprot)
while (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false,
"uvn_attach", 0);
simple_lock(&uobj->vmobjlock);
UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);