From b78a6618bd8e0d06dd1e0a63a6837090fc05490f Mon Sep 17 00:00:00 2001 From: ad Date: Tue, 31 Dec 2019 12:40:27 +0000 Subject: [PATCH] Rename uvm_page_locked_p() -> uvm_page_owner_locked_p() --- sys/arch/hppa/hppa/pmap.c | 8 ++++---- sys/arch/x86/x86/pmap.c | 18 +++++++++--------- sys/miscfs/genfs/genfs_io.c | 6 +++--- sys/rump/librump/rumpkern/vm.c | 8 ++++---- sys/uvm/uvm_page.c | 26 +++++++++++++------------- sys/uvm/uvm_page.h | 6 +++--- sys/uvm/uvm_pdaemon.c | 6 +++--- sys/uvm/uvm_pdpolicy_clock.c | 6 +++--- sys/uvm/uvm_pdpolicy_clockpro.c | 6 +++--- 9 files changed, 45 insertions(+), 45 deletions(-) diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c index d1691c55f386..2527b25c2d9a 100644 --- a/sys/arch/hppa/hppa/pmap.c +++ b/sys/arch/hppa/hppa/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $ */ +/* $NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $ */ /*- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. @@ -65,7 +65,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $"); #include "opt_cputype.h" @@ -579,7 +579,7 @@ pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", __func__, pg, pve, pm, va, pdep, flags)); - KASSERT(pm == pmap_kernel() || uvm_page_locked_p(pg)); + KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg)); pve->pv_pmap = pm; pve->pv_va = va | flags; @@ -594,7 +594,7 @@ pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry **pve, *pv; - KASSERT(pmap == pmap_kernel() || uvm_page_locked_p(pg)); + KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg)); for (pv = *(pve = &md->pvh_list); pv; pv = *(pve = &(*pve)->pv_next)) { diff --git a/sys/arch/x86/x86/pmap.c b/sys/arch/x86/x86/pmap.c index 41d12e6e6d3a..7de12d00a79a 100644 --- a/sys/arch/x86/x86/pmap.c +++ b/sys/arch/x86/x86/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $ */ +/* $NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $ */ /* * Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc. @@ -130,7 +130,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $"); #include "opt_user_ldt.h" #include "opt_lockdebug.h" @@ -3530,7 +3530,7 @@ pmap_remove_pte(struct pmap *pmap, struct vm_page *ptp, pt_entry_t *pte, } if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pp = VM_PAGE_TO_PP(pg); } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) { paddr_t pa = pmap_pte2pa(opte); @@ -3868,7 +3868,7 @@ pmap_page_remove(struct vm_page *pg) struct pmap_page *pp; paddr_t pa; - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pp = VM_PAGE_TO_PP(pg); pa = VM_PAGE_TO_PHYS(pg); @@ -3909,7 +3909,7 @@ pmap_test_attrs(struct vm_page *pg, unsigned testbits) u_int result; paddr_t pa; - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pp = VM_PAGE_TO_PP(pg); if ((pp->pp_attrs & testbits) != 0) { @@ -3982,7 +3982,7 @@ pmap_clear_attrs(struct vm_page *pg, unsigned clearbits) struct pmap_page *pp; paddr_t pa; - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pp = VM_PAGE_TO_PP(pg); pa = VM_PAGE_TO_PHYS(pg); @@ -4374,7 +4374,7 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t va, paddr_t ma, paddr_t pa, */ if ((~opte & (PTE_P | PTE_PVLIST)) == 0) { if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) { - KASSERT(uvm_page_locked_p(old_pg)); + KASSERT(uvm_page_owner_locked_p(old_pg)); old_pp = VM_PAGE_TO_PP(old_pg); } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) { panic("%s: PTE_PVLIST with pv-untracked page" @@ -5271,7 +5271,7 @@ pmap_ept_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, */ if ((~opte & (EPT_R | EPT_PVLIST)) == 0) { if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) { - KASSERT(uvm_page_locked_p(old_pg)); + KASSERT(uvm_page_owner_locked_p(old_pg)); old_pp = VM_PAGE_TO_PP(old_pg); } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) { panic("%s: EPT_PVLIST with pv-untracked page" @@ -5444,7 +5444,7 @@ pmap_ept_remove_pte(struct pmap *pmap, struct vm_page *ptp, pt_entry_t *pte, } if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pp = VM_PAGE_TO_PP(pg); } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) { paddr_t pa = pmap_pte2pa(opte); diff --git a/sys/miscfs/genfs/genfs_io.c b/sys/miscfs/genfs/genfs_io.c index 8c40c64fccbe..be7efdc41e1d 100644 --- a/sys/miscfs/genfs/genfs_io.c +++ b/sys/miscfs/genfs/genfs_io.c @@ -1,4 +1,4 @@ -/* $NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $ */ +/* $NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $"); #include #include @@ -75,7 +75,7 @@ genfs_rel_pages(struct vm_page **pgs, unsigned int npages) if (pg == NULL || pg == PGO_DONTCARE) continue; - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); if (pg->flags & PG_FAKE) { pg->flags |= PG_RELEASED; } diff --git a/sys/rump/librump/rumpkern/vm.c b/sys/rump/librump/rumpkern/vm.c index ecf1493346f3..3e5a3fd76930 100644 --- a/sys/rump/librump/rumpkern/vm.c +++ b/sys/rump/librump/rumpkern/vm.c @@ -1,4 +1,4 @@ -/* $NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $ */ +/* $NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $ */ /* * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. @@ -41,7 +41,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $"); #include #include @@ -238,12 +238,12 @@ uvm_pagezero(struct vm_page *pg) } /* - * uvm_page_locked_p: return true if object associated with page is + * uvm_page_owner_locked_p: return true if object associated with page is * locked. this is a weak check for runtime assertions only. */ bool -uvm_page_locked_p(struct vm_page *pg) +uvm_page_owner_locked_p(struct vm_page *pg) { return mutex_owned(pg->uobject->vmobjlock); diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c index fa61ef364aa2..7ea1e220b0d1 100644 --- a/sys/uvm/uvm_page.c +++ b/sys/uvm/uvm_page.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $ */ +/* $NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -95,7 +95,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $"); #include "opt_ddb.h" #include "opt_uvm.h" @@ -1304,7 +1304,7 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon, pg->offset = off; pg->uobject = obj; pg->uanon = anon; - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; if (anon) { anon->an_page = pg; @@ -1636,7 +1636,7 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs) continue; } - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); KASSERT(pg->flags & PG_BUSY); KASSERT((pg->flags & PG_PAGEOUT) == 0); if (pg->flags & PG_WANTED) { @@ -1676,7 +1676,7 @@ uvm_page_own(struct vm_page *pg, const char *tag) KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0); KASSERT((pg->flags & PG_WANTED) == 0); - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); /* gain ownership? */ if (tag) { @@ -1750,7 +1750,7 @@ void uvm_pagewire(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); #if defined(READAHEAD_STATS) if ((pg->flags & PG_READAHEAD) != 0) { uvm_ra_hit.ev_count++; @@ -1778,7 +1778,7 @@ void uvm_pageunwire(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); KASSERT(pg->wire_count != 0); KASSERT(!uvmpdpol_pageisqueued_p(pg)); mutex_enter(&pg->interlock); @@ -1804,7 +1804,7 @@ void uvm_pagedeactivate(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); if (pg->wire_count == 0) { KASSERT(uvmpdpol_pageisqueued_p(pg)); uvmpdpol_pagedeactivate(pg); @@ -1821,7 +1821,7 @@ void uvm_pageactivate(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); #if defined(READAHEAD_STATS) if ((pg->flags & PG_READAHEAD) != 0) { uvm_ra_hit.ev_count++; @@ -1842,7 +1842,7 @@ void uvm_pagedequeue(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); if (uvmpdpol_pageisqueued_p(pg)) { uvmpdpol_pagedequeue(pg); } @@ -1858,7 +1858,7 @@ void uvm_pageenqueue(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) { uvmpdpol_pageenqueue(pg); } @@ -1919,12 +1919,12 @@ uvm_page_lookup_freelist(struct vm_page *pg) } /* - * uvm_page_locked_p: return true if object associated with page is + * uvm_page_owner_locked_p: return true if object associated with page is * locked. this is a weak check for runtime assertions only. */ bool -uvm_page_locked_p(struct vm_page *pg) +uvm_page_owner_locked_p(struct vm_page *pg) { if (pg->uobject != NULL) { diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h index f14ea44dbbd2..fd09e5fe1be9 100644 --- a/sys/uvm/uvm_page.h +++ b/sys/uvm/uvm_page.h @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.h,v 1.90 2019/12/27 13:13:17 ad Exp $ */ +/* $NetBSD: uvm_page.h,v 1.91 2019/12/31 12:40:27 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -112,7 +112,7 @@ * c: cpu private * s: stable, does not change * - * UVM and pmap(9) may use uvm_page_locked_p() to assert whether the + * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the * page owner's lock is acquired. * * A page can have one of four identities: @@ -311,7 +311,7 @@ void uvm_pageunwire(struct vm_page *); void uvm_pagewire(struct vm_page *); void uvm_pagezero(struct vm_page *); bool uvm_pageismanaged(paddr_t); -bool uvm_page_locked_p(struct vm_page *); +bool uvm_page_owner_locked_p(struct vm_page *); void uvm_pgfl_lock(void); void uvm_pgfl_unlock(void); diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index f490d73c75fe..c7301b57f362 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $ */ +/* $NetBSD: uvm_pdaemon.c,v 1.120 2019/12/31 12:40:27 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.120 2019/12/31 12:40:27 ad Exp $"); #include "opt_uvmhist.h" #include "opt_readahead.h" @@ -699,7 +699,7 @@ uvmpd_scan_queue(void) break; } KASSERT(uvmpdpol_pageisqueued_p(p)); - KASSERT(uvm_page_locked_p(p)); + KASSERT(uvm_page_owner_locked_p(p)); KASSERT(p->wire_count == 0); /* diff --git a/sys/uvm/uvm_pdpolicy_clock.c b/sys/uvm/uvm_pdpolicy_clock.c index c5f49321af43..f30d1123227a 100644 --- a/sys/uvm/uvm_pdpolicy_clock.c +++ b/sys/uvm/uvm_pdpolicy_clock.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pdpolicy_clock.c,v 1.25 2019/12/30 18:18:03 ad Exp $ */ +/* $NetBSD: uvm_pdpolicy_clock.c,v 1.26 2019/12/31 12:40:27 ad Exp $ */ /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */ /* @@ -69,7 +69,7 @@ #else /* defined(PDSIM) */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.25 2019/12/30 18:18:03 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.26 2019/12/31 12:40:27 ad Exp $"); #include #include @@ -398,7 +398,7 @@ static void uvmpdpol_pagedeactivate_locked(struct vm_page *pg) { - KASSERT(uvm_page_locked_p(pg)); + KASSERT(uvm_page_owner_locked_p(pg)); if (pg->pqflags & PQ_ACTIVE) { TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue); diff --git a/sys/uvm/uvm_pdpolicy_clockpro.c b/sys/uvm/uvm_pdpolicy_clockpro.c index 13eadc580b86..961c1240c8ff 100644 --- a/sys/uvm/uvm_pdpolicy_clockpro.c +++ b/sys/uvm/uvm_pdpolicy_clockpro.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.20 2019/12/30 18:08:38 ad Exp $ */ +/* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.21 2019/12/31 12:40:27 ad Exp $ */ /*- * Copyright (c)2005, 2006 YAMAMOTO Takashi, @@ -43,7 +43,7 @@ #else /* defined(PDSIM) */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.20 2019/12/30 18:08:38 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.21 2019/12/31 12:40:27 ad Exp $"); #include "opt_ddb.h" @@ -643,7 +643,7 @@ clockpro_movereferencebit(struct vm_page *pg, bool locked) bool referenced; KASSERT(mutex_owned(&clockpro.lock)); - KASSERT(!locked || uvm_page_locked_p(pg)); + KASSERT(!locked || uvm_page_owner_locked_p(pg)); if (!locked) { /* * acquire interlock to stablize page identity.