Consistently use %#jx instead of 0x%jx or just %jx in UVMHIST_LOG formats

This commit is contained in:
skrll 2021-03-13 15:29:55 +00:00
parent 5256ea236d
commit e9de112945
9 changed files with 49 additions and 49 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_segtab.c,v 1.26 2020/10/08 14:02:40 skrll Exp $ */
/* $NetBSD: pmap_segtab.c,v 1.27 2021/03/13 15:29:55 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.26 2020/10/08 14:02:40 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.27 2021/03/13 15:29:55 skrll Exp $");
/*
* Manages physical address maps.
@ -265,7 +265,7 @@ pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp,
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx stpp=%#jx free=%jd",
(uintptr_t)pmap, (uintptr_t)stp_p, free_stp, 0);
UVMHIST_LOG(pmapsegtabhist, " callback=%#jx flags=%jx va=%#jx vinc=%#jx",
UVMHIST_LOG(pmapsegtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
(uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
i < PMAP_SEGTABSIZE;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.125 2020/09/21 18:41:59 chs Exp $ */
/* $NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.125 2020/09/21 18:41:59 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.126 2021/03/13 15:29:55 skrll Exp $");
#include "opt_uvmhist.h"
@ -827,7 +827,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
vsize_t len;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(maphist, " (map=%#j, entry=%#j, flags=%jd)",
UVMHIST_CALLARGS(maphist, " (map=%#jx, entry=%#jx, flags=%#jx)",
(uintptr_t)map, (uintptr_t)entry, flags, -2);
KASSERT(map != kernel_map); /* we use nointr pool */
@ -903,7 +903,7 @@ amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
return;
}
UVMHIST_LOG(maphist," amap=%#j, ref=%jd, must copy it",
UVMHIST_LOG(maphist," amap=%#jx, ref=%jd, must copy it",
(uintptr_t)srcamap, srcamap->am_ref, 0, 0);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.153 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.152 2020/11/04 01:30:19 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.153 2021/03/13 15:29:55 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@ -810,7 +810,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
struct uvm_page_array a;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx",
(uintptr_t)uobj, offset, flags,0);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_bio.c,v 1.124 2020/11/10 04:27:22 chs Exp $ */
/* $NetBSD: uvm_bio.c,v 1.125 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.124 2020/11/10 04:27:22 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.125 2021/03/13 15:29:55 skrll Exp $");
#include "opt_uvmhist.h"
#include "opt_ubc.h"
@ -342,7 +342,7 @@ ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
*/
access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
UVMHIST_LOG(ubchist, "va 0x%jx ubc_offset 0x%jx access_type %jd",
UVMHIST_LOG(ubchist, "va %#jx ubc_offset %#jx access_type %jd",
va, ubc_offset, access_type, 0);
if ((access_type & VM_PROT_WRITE) != 0) {
@ -374,9 +374,9 @@ again:
memset(pgs, 0, sizeof (pgs));
rw_enter(uobj->vmobjlock, RW_WRITER);
UVMHIST_LOG(ubchist, "slot_offset 0x%jx writeoff 0x%jx writelen 0x%jx ",
UVMHIST_LOG(ubchist, "slot_offset %#jx writeoff %#jx writelen %#jx ",
slot_offset, umap->writeoff, umap->writelen, 0);
UVMHIST_LOG(ubchist, "getpages uobj %#jx offset 0x%jx npages %jd",
UVMHIST_LOG(ubchist, "getpages uobj %#jx offset %#jx npages %jd",
(uintptr_t)uobj, umap->offset + slot_offset, npages, 0);
error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
@ -409,7 +409,7 @@ again:
va = ufi->orig_rvaddr;
eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
UVMHIST_LOG(ubchist, "va 0x%jx eva 0x%jx", va, eva, 0, 0);
UVMHIST_LOG(ubchist, "va %#jx eva %#jx", va, eva, 0, 0);
/*
* Note: normally all returned pages would have the same UVM object.
@ -483,7 +483,7 @@ ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
voff_t umap_offset;
int error;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
UVMHIST_CALLARGS(ubchist, "uobj %#jx offset %#jx len %#jx",
(uintptr_t)uobj, offset, *lenp, 0);
KASSERT(*lenp > 0);
@ -560,7 +560,7 @@ again:
umap->refcount++;
umap->advice = advice;
rw_exit(ubc_object.uobj.vmobjlock);
UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags 0x%jx",
UVMHIST_LOG(ubchist, "umap %#jx refs %jd va %#jx flags %#jx",
(uintptr_t)umap, umap->refcount, (uintptr_t)va, flags);
if (flags & UBC_FAULTBUSY) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_device.c,v 1.72 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.72 2021/03/13 15:29:55 skrll Exp $");
#include "opt_uvmhist.h"
@ -364,7 +364,7 @@ udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
dev_t device;
vm_prot_t mapprot;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist," flags=%jd", flags,0,0,0);
UVMHIST_LOG(maphist," flags=%#jx", flags,0,0,0);
/*
* we do not allow device mappings to be mapped copy-on-write

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_km.c,v 1.160 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -152,7 +152,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.160 2021/03/13 15:29:55 skrll Exp $");
#include "opt_uvmhist.h"
@ -618,7 +618,7 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
kva = vm_map_min(map); /* hint */
size = round_page(size);
obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%jd)",
UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)",
(uintptr_t)map, (uintptr_t)obj, size, flags);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $ */
/* $NetBSD: uvm_map.c,v 1.386 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.386 2021/03/13 15:29:55 skrll Exp $");
#include "opt_ddb.h"
#include "opt_pax.h"
@ -806,7 +806,7 @@ static void
uvm_mapent_free(struct vm_map_entry *me)
{
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%jd]",
UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%#jx]",
(uintptr_t)me, me->flags, 0, 0);
pool_cache_put(&uvm_map_entry_cache, me);
}
@ -2115,9 +2115,9 @@ nextgap:
SAVE_HINT(map, map->hint, entry);
*result = hint;
UVMHIST_LOG(maphist,"<- got it! (result=%#jx)", hint, 0,0,0);
KASSERTMSG( topdown || hint >= orig_hint, "hint: %jx, orig_hint: %jx",
KASSERTMSG( topdown || hint >= orig_hint, "hint: %#jx, orig_hint: %#jx",
(uintmax_t)hint, (uintmax_t)orig_hint);
KASSERTMSG(!topdown || hint <= orig_hint, "hint: %jx, orig_hint: %jx",
KASSERTMSG(!topdown || hint <= orig_hint, "hint: %#jx, orig_hint: %#jx",
(uintmax_t)hint, (uintmax_t)orig_hint);
KASSERT(entry->end <= hint);
KASSERT(hint + length <= entry->next->start);
@ -4811,7 +4811,7 @@ uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
void (*unlock_fn)(struct vm_map *);
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=%#jx,va=%jx)", (uintptr_t)map, va, 0, 0);
UVMHIST_LOG(maphist,"(map=%#jx,va=%#jx)", (uintptr_t)map, va, 0, 0);
const vaddr_t start = trunc_page(va);
const vaddr_t end = round_page(va+1);
@ -4969,7 +4969,7 @@ uvm_voaddr_acquire(struct vm_map * const map, vaddr_t const va,
if (result) {
UVMHIST_LOG(maphist,
"<- done OK (type=%jd,owner=#%jx,offset=%jx)",
"<- done OK (type=%jd,owner=%#jx,offset=%#jx)",
UVM_VOADDR_GET_TYPE(voaddr),
UVM_VOADDR_GET_OBJECT(voaddr),
voaddr->offset, 0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.c,v 1.202 2021/02/19 13:20:43 hannken Exp $ */
/* $NetBSD: uvm_swap.c,v 1.203 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.202 2021/02/19 13:20:43 hannken Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.203 2021/03/13 15:29:55 skrll Exp $");
#include "opt_uvmhist.h"
#include "opt_compat_netbsd.h"
@ -933,7 +933,7 @@ swap_on(struct lwp *l, struct swapdev *sdp)
goto bad;
}
UVMHIST_LOG(pdhist, " dev=%jx: size=%jd addr=%jd", dev, size, addr, 0);
UVMHIST_LOG(pdhist, " dev=%#jx: size=%jd addr=%jd", dev, size, addr, 0);
/*
* now we need to allocate an extent to manage this swap device
@ -1052,7 +1052,7 @@ swap_off(struct lwp *l, struct swapdev *sdp)
int error = 0;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, " dev=%jx, npages=%jd", sdp->swd_dev,npages, 0, 0);
UVMHIST_CALLARGS(pdhist, " dev=%#jx, npages=%jd", sdp->swd_dev,npages, 0, 0);
KASSERT(rw_write_held(&swap_syscall_lock));
KASSERT(mutex_owned(&uvm_swap_data_lock));
@ -1216,7 +1216,7 @@ swstrategy(struct buf *bp)
pageno -= sdp->swd_drumoffset; /* page # on swapdev */
bn = btodb((uint64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
UVMHIST_LOG(pdhist, " Rd/Wr (0/1) %jd: mapoff=%jx bn=%jx bcount=%jd",
UVMHIST_LOG(pdhist, " Rd/Wr (0/1) %jd: mapoff=%#jx bn=%#jx bcount=%jd",
((bp->b_flags & B_READ) == 0) ? 1 : 0,
sdp->swd_drumoffset, bn, bp->b_bcount);
@ -1279,7 +1279,7 @@ static int
swread(dev_t dev, struct uio *uio, int ioflag)
{
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, " dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
UVMHIST_CALLARGS(pdhist, " dev=%#jx offset=%#jx", dev, uio->uio_offset, 0, 0);
return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
}
@ -1292,7 +1292,7 @@ static int
swwrite(dev_t dev, struct uio *uio, int ioflag)
{
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, " dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
UVMHIST_CALLARGS(pdhist, " dev=%#jx offset=%#jx", dev, uio->uio_offset, 0, 0);
return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
}
@ -1408,7 +1408,7 @@ sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
sz = resid;
UVMHIST_LOG(pdhist, "sw_reg_strategy: "
"vp %#jx/%#jx offset 0x%jx/0x%jx",
"vp %#jx/%#jx offset %#jx/%#jx",
(uintptr_t)sdp->swd_vp, (uintptr_t)vp, byteoff, nbn);
/*
@ -1499,7 +1499,7 @@ sw_reg_start(struct swapdev *sdp)
sdp->swd_active++;
UVMHIST_LOG(pdhist,
"sw_reg_start: bp %#jx vp %#jx blkno %#jx cnt %jx",
"sw_reg_start: bp %#jx vp %#jx blkno %#jx cnt %#jx",
(uintptr_t)bp, (uintptr_t)bp->b_vp, (uintptr_t)bp->b_blkno,
bp->b_bcount);
vp = bp->b_vp;
@ -1538,10 +1538,10 @@ sw_reg_iodone(struct work *wk, void *dummy)
int s, resid, error;
KASSERT(&vbp->vb_buf.b_work == wk);
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, " vbp=%#jx vp=%#jx blkno=%jx addr=%#jx",
UVMHIST_CALLARGS(pdhist, " vbp=%#jx vp=%#jx blkno=%#jx addr=%#jx",
(uintptr_t)vbp, (uintptr_t)vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno,
(uintptr_t)vbp->vb_buf.b_data);
UVMHIST_LOG(pdhist, " cnt=%jx resid=%jx",
UVMHIST_LOG(pdhist, " cnt=%#jx resid=%#jx",
vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
/*
@ -1836,7 +1836,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
int error, mapinflags;
bool write, async, swap_encrypt;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%jd",
UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%#jx",
startslot, npages, flags, 0);
write = (flags & B_READ) == 0;
@ -1967,7 +1967,7 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags)
BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
}
UVMHIST_LOG(pdhist,
"about to start io: data = %#jx blkno = 0x%jx, bcount = %jd",
"about to start io: data = %#jx blkno = %#jx, bcount = %jd",
(uintptr_t)bp->b_data, bp->b_blkno, bp->b_bcount, 0);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@ -176,7 +176,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset,
int error;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, offset,
UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset,
0, 0);
if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
@ -290,7 +290,7 @@ uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
{
struct vm_page *pg;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset,
0, 0);
/*
@ -441,7 +441,7 @@ uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
rw_enter(uobj->vmobjlock, RW_WRITER);
UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx",
(uintptr_t)vp, vp->v_size, newsize, 0);
/*