Rename uvm_page_locked_p() -> uvm_page_owner_locked_p()

This commit is contained in:
ad 2019-12-31 12:40:27 +00:00
parent 439fe91982
commit b78a6618bd
9 changed files with 45 additions and 45 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $ */ /* $NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $ */
/*- /*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@ -65,7 +65,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.101 2019/12/15 21:11:34 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.102 2019/12/31 12:40:27 ad Exp $");
#include "opt_cputype.h" #include "opt_cputype.h"
@ -579,7 +579,7 @@ pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
__func__, pg, pve, pm, va, pdep, flags)); __func__, pg, pve, pm, va, pdep, flags));
KASSERT(pm == pmap_kernel() || uvm_page_locked_p(pg)); KASSERT(pm == pmap_kernel() || uvm_page_owner_locked_p(pg));
pve->pv_pmap = pm; pve->pv_pmap = pm;
pve->pv_va = va | flags; pve->pv_va = va | flags;
@ -594,7 +594,7 @@ pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry **pve, *pv; struct pv_entry **pve, *pv;
KASSERT(pmap == pmap_kernel() || uvm_page_locked_p(pg)); KASSERT(pmap == pmap_kernel() || uvm_page_owner_locked_p(pg));
for (pv = *(pve = &md->pvh_list); for (pv = *(pve = &md->pvh_list);
pv; pv = *(pve = &(*pve)->pv_next)) { pv; pv = *(pve = &(*pve)->pv_next)) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $ */ /* $NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $ */
/* /*
* Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc. * Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc.
@ -130,7 +130,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.348 2019/12/22 15:15:20 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.349 2019/12/31 12:40:27 ad Exp $");
#include "opt_user_ldt.h" #include "opt_user_ldt.h"
#include "opt_lockdebug.h" #include "opt_lockdebug.h"
@ -3530,7 +3530,7 @@ pmap_remove_pte(struct pmap *pmap, struct vm_page *ptp, pt_entry_t *pte,
} }
if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) { if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pp = VM_PAGE_TO_PP(pg); pp = VM_PAGE_TO_PP(pg);
} else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) { } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
paddr_t pa = pmap_pte2pa(opte); paddr_t pa = pmap_pte2pa(opte);
@ -3868,7 +3868,7 @@ pmap_page_remove(struct vm_page *pg)
struct pmap_page *pp; struct pmap_page *pp;
paddr_t pa; paddr_t pa;
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pp = VM_PAGE_TO_PP(pg); pp = VM_PAGE_TO_PP(pg);
pa = VM_PAGE_TO_PHYS(pg); pa = VM_PAGE_TO_PHYS(pg);
@ -3909,7 +3909,7 @@ pmap_test_attrs(struct vm_page *pg, unsigned testbits)
u_int result; u_int result;
paddr_t pa; paddr_t pa;
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pp = VM_PAGE_TO_PP(pg); pp = VM_PAGE_TO_PP(pg);
if ((pp->pp_attrs & testbits) != 0) { if ((pp->pp_attrs & testbits) != 0) {
@ -3982,7 +3982,7 @@ pmap_clear_attrs(struct vm_page *pg, unsigned clearbits)
struct pmap_page *pp; struct pmap_page *pp;
paddr_t pa; paddr_t pa;
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pp = VM_PAGE_TO_PP(pg); pp = VM_PAGE_TO_PP(pg);
pa = VM_PAGE_TO_PHYS(pg); pa = VM_PAGE_TO_PHYS(pg);
@ -4374,7 +4374,7 @@ pmap_enter_ma(struct pmap *pmap, vaddr_t va, paddr_t ma, paddr_t pa,
*/ */
if ((~opte & (PTE_P | PTE_PVLIST)) == 0) { if ((~opte & (PTE_P | PTE_PVLIST)) == 0) {
if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) { if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
KASSERT(uvm_page_locked_p(old_pg)); KASSERT(uvm_page_owner_locked_p(old_pg));
old_pp = VM_PAGE_TO_PP(old_pg); old_pp = VM_PAGE_TO_PP(old_pg);
} else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) { } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
panic("%s: PTE_PVLIST with pv-untracked page" panic("%s: PTE_PVLIST with pv-untracked page"
@ -5271,7 +5271,7 @@ pmap_ept_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
*/ */
if ((~opte & (EPT_R | EPT_PVLIST)) == 0) { if ((~opte & (EPT_R | EPT_PVLIST)) == 0) {
if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) { if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
KASSERT(uvm_page_locked_p(old_pg)); KASSERT(uvm_page_owner_locked_p(old_pg));
old_pp = VM_PAGE_TO_PP(old_pg); old_pp = VM_PAGE_TO_PP(old_pg);
} else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) { } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
panic("%s: EPT_PVLIST with pv-untracked page" panic("%s: EPT_PVLIST with pv-untracked page"
@ -5444,7 +5444,7 @@ pmap_ept_remove_pte(struct pmap *pmap, struct vm_page *ptp, pt_entry_t *pte,
} }
if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) { if ((pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pp = VM_PAGE_TO_PP(pg); pp = VM_PAGE_TO_PP(pg);
} else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) { } else if ((pp = pmap_pv_tracked(pmap_pte2pa(opte))) == NULL) {
paddr_t pa = pmap_pte2pa(opte); paddr_t pa = pmap_pte2pa(opte);

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $ */ /* $NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $ */
/* /*
* Copyright (c) 1982, 1986, 1989, 1993 * Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.81 2019/12/16 18:17:32 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.82 2019/12/31 12:40:27 ad Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -75,7 +75,7 @@ genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
if (pg == NULL || pg == PGO_DONTCARE) if (pg == NULL || pg == PGO_DONTCARE)
continue; continue;
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
if (pg->flags & PG_FAKE) { if (pg->flags & PG_FAKE) {
pg->flags |= PG_RELEASED; pg->flags |= PG_RELEASED;
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $ */ /* $NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $ */
/* /*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@ -41,7 +41,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.177 2019/12/21 12:59:12 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.178 2019/12/31 12:40:27 ad Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/atomic.h> #include <sys/atomic.h>
@ -238,12 +238,12 @@ uvm_pagezero(struct vm_page *pg)
} }
/* /*
* uvm_page_locked_p: return true if object associated with page is * uvm_page_owner_locked_p: return true if object associated with page is
* locked. this is a weak check for runtime assertions only. * locked. this is a weak check for runtime assertions only.
*/ */
bool bool
uvm_page_locked_p(struct vm_page *pg) uvm_page_owner_locked_p(struct vm_page *pg)
{ {
return mutex_owned(pg->uobject->vmobjlock); return mutex_owned(pg->uobject->vmobjlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $ */ /* $NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $ */
/*- /*-
* Copyright (c) 2019 The NetBSD Foundation, Inc. * Copyright (c) 2019 The NetBSD Foundation, Inc.
@ -95,7 +95,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.217 2019/12/30 17:45:53 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.218 2019/12/31 12:40:27 ad Exp $");
#include "opt_ddb.h" #include "opt_ddb.h"
#include "opt_uvm.h" #include "opt_uvm.h"
@ -1304,7 +1304,7 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
pg->offset = off; pg->offset = off;
pg->uobject = obj; pg->uobject = obj;
pg->uanon = anon; pg->uanon = anon;
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
if (anon) { if (anon) {
anon->an_page = pg; anon->an_page = pg;
@ -1636,7 +1636,7 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs)
continue; continue;
} }
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
KASSERT(pg->flags & PG_BUSY); KASSERT(pg->flags & PG_BUSY);
KASSERT((pg->flags & PG_PAGEOUT) == 0); KASSERT((pg->flags & PG_PAGEOUT) == 0);
if (pg->flags & PG_WANTED) { if (pg->flags & PG_WANTED) {
@ -1676,7 +1676,7 @@ uvm_page_own(struct vm_page *pg, const char *tag)
KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0); KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
KASSERT((pg->flags & PG_WANTED) == 0); KASSERT((pg->flags & PG_WANTED) == 0);
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
/* gain ownership? */ /* gain ownership? */
if (tag) { if (tag) {
@ -1750,7 +1750,7 @@ void
uvm_pagewire(struct vm_page *pg) uvm_pagewire(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
#if defined(READAHEAD_STATS) #if defined(READAHEAD_STATS)
if ((pg->flags & PG_READAHEAD) != 0) { if ((pg->flags & PG_READAHEAD) != 0) {
uvm_ra_hit.ev_count++; uvm_ra_hit.ev_count++;
@ -1778,7 +1778,7 @@ void
uvm_pageunwire(struct vm_page *pg) uvm_pageunwire(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
KASSERT(pg->wire_count != 0); KASSERT(pg->wire_count != 0);
KASSERT(!uvmpdpol_pageisqueued_p(pg)); KASSERT(!uvmpdpol_pageisqueued_p(pg));
mutex_enter(&pg->interlock); mutex_enter(&pg->interlock);
@ -1804,7 +1804,7 @@ void
uvm_pagedeactivate(struct vm_page *pg) uvm_pagedeactivate(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
if (pg->wire_count == 0) { if (pg->wire_count == 0) {
KASSERT(uvmpdpol_pageisqueued_p(pg)); KASSERT(uvmpdpol_pageisqueued_p(pg));
uvmpdpol_pagedeactivate(pg); uvmpdpol_pagedeactivate(pg);
@ -1821,7 +1821,7 @@ void
uvm_pageactivate(struct vm_page *pg) uvm_pageactivate(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
#if defined(READAHEAD_STATS) #if defined(READAHEAD_STATS)
if ((pg->flags & PG_READAHEAD) != 0) { if ((pg->flags & PG_READAHEAD) != 0) {
uvm_ra_hit.ev_count++; uvm_ra_hit.ev_count++;
@ -1842,7 +1842,7 @@ void
uvm_pagedequeue(struct vm_page *pg) uvm_pagedequeue(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
if (uvmpdpol_pageisqueued_p(pg)) { if (uvmpdpol_pageisqueued_p(pg)) {
uvmpdpol_pagedequeue(pg); uvmpdpol_pagedequeue(pg);
} }
@ -1858,7 +1858,7 @@ void
uvm_pageenqueue(struct vm_page *pg) uvm_pageenqueue(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) { if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
uvmpdpol_pageenqueue(pg); uvmpdpol_pageenqueue(pg);
} }
@ -1919,12 +1919,12 @@ uvm_page_lookup_freelist(struct vm_page *pg)
} }
/* /*
* uvm_page_locked_p: return true if object associated with page is * uvm_page_owner_locked_p: return true if object associated with page is
* locked. this is a weak check for runtime assertions only. * locked. this is a weak check for runtime assertions only.
*/ */
bool bool
uvm_page_locked_p(struct vm_page *pg) uvm_page_owner_locked_p(struct vm_page *pg)
{ {
if (pg->uobject != NULL) { if (pg->uobject != NULL) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.h,v 1.90 2019/12/27 13:13:17 ad Exp $ */ /* $NetBSD: uvm_page.h,v 1.91 2019/12/31 12:40:27 ad Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -112,7 +112,7 @@
* c: cpu private * c: cpu private
* s: stable, does not change * s: stable, does not change
* *
* UVM and pmap(9) may use uvm_page_locked_p() to assert whether the * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the
* page owner's lock is acquired. * page owner's lock is acquired.
* *
* A page can have one of four identities: * A page can have one of four identities:
@ -311,7 +311,7 @@ void uvm_pageunwire(struct vm_page *);
void uvm_pagewire(struct vm_page *); void uvm_pagewire(struct vm_page *);
void uvm_pagezero(struct vm_page *); void uvm_pagezero(struct vm_page *);
bool uvm_pageismanaged(paddr_t); bool uvm_pageismanaged(paddr_t);
bool uvm_page_locked_p(struct vm_page *); bool uvm_page_owner_locked_p(struct vm_page *);
void uvm_pgfl_lock(void); void uvm_pgfl_lock(void);
void uvm_pgfl_unlock(void); void uvm_pgfl_unlock(void);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $ */ /* $NetBSD: uvm_pdaemon.c,v 1.120 2019/12/31 12:40:27 ad Exp $ */
/* /*
* Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.119 2019/12/30 18:08:37 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.120 2019/12/31 12:40:27 ad Exp $");
#include "opt_uvmhist.h" #include "opt_uvmhist.h"
#include "opt_readahead.h" #include "opt_readahead.h"
@ -699,7 +699,7 @@ uvmpd_scan_queue(void)
break; break;
} }
KASSERT(uvmpdpol_pageisqueued_p(p)); KASSERT(uvmpdpol_pageisqueued_p(p));
KASSERT(uvm_page_locked_p(p)); KASSERT(uvm_page_owner_locked_p(p));
KASSERT(p->wire_count == 0); KASSERT(p->wire_count == 0);
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdpolicy_clock.c,v 1.25 2019/12/30 18:18:03 ad Exp $ */ /* $NetBSD: uvm_pdpolicy_clock.c,v 1.26 2019/12/31 12:40:27 ad Exp $ */
/* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */ /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
/* /*
@ -69,7 +69,7 @@
#else /* defined(PDSIM) */ #else /* defined(PDSIM) */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.25 2019/12/30 18:18:03 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.26 2019/12/31 12:40:27 ad Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/proc.h> #include <sys/proc.h>
@ -398,7 +398,7 @@ static void
uvmpdpol_pagedeactivate_locked(struct vm_page *pg) uvmpdpol_pagedeactivate_locked(struct vm_page *pg)
{ {
KASSERT(uvm_page_locked_p(pg)); KASSERT(uvm_page_owner_locked_p(pg));
if (pg->pqflags & PQ_ACTIVE) { if (pg->pqflags & PQ_ACTIVE) {
TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue); TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.20 2019/12/30 18:08:38 ad Exp $ */ /* $NetBSD: uvm_pdpolicy_clockpro.c,v 1.21 2019/12/31 12:40:27 ad Exp $ */
/*- /*-
* Copyright (c)2005, 2006 YAMAMOTO Takashi, * Copyright (c)2005, 2006 YAMAMOTO Takashi,
@ -43,7 +43,7 @@
#else /* defined(PDSIM) */ #else /* defined(PDSIM) */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.20 2019/12/30 18:08:38 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clockpro.c,v 1.21 2019/12/31 12:40:27 ad Exp $");
#include "opt_ddb.h" #include "opt_ddb.h"
@ -643,7 +643,7 @@ clockpro_movereferencebit(struct vm_page *pg, bool locked)
bool referenced; bool referenced;
KASSERT(mutex_owned(&clockpro.lock)); KASSERT(mutex_owned(&clockpro.lock));
KASSERT(!locked || uvm_page_locked_p(pg)); KASSERT(!locked || uvm_page_owner_locked_p(pg));
if (!locked) { if (!locked) {
/* /*
* acquire interlock to stablize page identity. * acquire interlock to stablize page identity.