Add evcnt's for common pmap activities when PMAPCOUNTERS is defined
in the config file.
This commit is contained in:
parent
5ba45ecc9c
commit
8a75cdd09b
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.55 2002/08/14 14:25:16 matt Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.56 2002/08/18 19:18:33 matt Exp $ */
|
||||
/*-
|
||||
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
|
@ -67,12 +67,14 @@
|
|||
*/
|
||||
|
||||
#include "opt_altivec.h"
|
||||
#include "opt_pmap.h"
|
||||
#include <sys/param.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/user.h>
|
||||
#include <sys/pool.h>
|
||||
#include <sys/queue.h>
|
||||
#include <sys/device.h> /* for evcnt */
|
||||
#include <sys/systm.h>
|
||||
|
||||
#if __NetBSD_Version__ < 105010000
|
||||
|
@ -92,8 +94,6 @@
|
|||
#include <powerpc/bat.h>
|
||||
#endif
|
||||
|
||||
/*#define PMAPCHECK*/
|
||||
|
||||
#if defined(DEBUG) || defined(PMAPCHECK)
|
||||
#define STATIC
|
||||
#else
|
||||
|
@ -270,6 +270,64 @@ unsigned int pmapdebug = 0;
|
|||
# define DPRINTFN(n, x)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef PMAPCOUNTERS
|
||||
#define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
|
||||
struct evcnt pmap_evcnt_mappings =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
|
||||
"pmap", "pages mapped");
|
||||
struct evcnt pmap_evcnt_unmappings =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
|
||||
"pmap", "pages unmapped");
|
||||
|
||||
struct evcnt pmap_evcnt_kernel_mappings =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
|
||||
"pmap", "kernel pages mapped");
|
||||
struct evcnt pmap_evcnt_kernel_unmappings =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
|
||||
"pmap", "kernel pages unmapped");
|
||||
|
||||
struct evcnt pmap_evcnt_mappings_replaced =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
|
||||
"pmap", "pages mappings replaced");
|
||||
|
||||
struct evcnt pmap_evcnt_exec_mappings =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
|
||||
"pmap", "exec pages mapped");
|
||||
struct evcnt pmap_evcnt_exec_cached =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
|
||||
"pmap", "exec pages cached");
|
||||
|
||||
struct evcnt pmap_evcnt_exec_synced =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages synced");
|
||||
struct evcnt pmap_evcnt_exec_synced_clear_modify =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages synced (CM)");
|
||||
|
||||
struct evcnt pmap_evcnt_exec_uncached_page_protect =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages uncached (PP)");
|
||||
struct evcnt pmap_evcnt_exec_uncached_clear_modify =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages uncached (CM)");
|
||||
struct evcnt pmap_evcnt_exec_uncached_zero_page =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages uncached (ZP)");
|
||||
struct evcnt pmap_evcnt_exec_uncached_copy_page =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
|
||||
"pmap", "exec pages uncached (CP)");
|
||||
|
||||
/*
|
||||
* From pmap_subr.c
|
||||
*/
|
||||
extern struct evcnt pmap_evcnt_zeroed_pages;
|
||||
extern struct evcnt pmap_evcnt_copied_pages;
|
||||
extern struct evcnt pmap_evcnt_idlezeroed_pages;
|
||||
#else
|
||||
#define PMAPCOUNT(ev) ((void) 0)
|
||||
#endif
|
||||
|
||||
#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va))
|
||||
#define TLBSYNC() __asm __volatile("tlbsync")
|
||||
#define SYNC() __asm __volatile("sync")
|
||||
|
@ -791,6 +849,29 @@ pmap_init(void)
|
|||
|
||||
pmap_initialized = 1;
|
||||
splx(s);
|
||||
|
||||
#ifdef PMAPCOUNTERS
|
||||
evcnt_attach_static(&pmap_evcnt_mappings);
|
||||
evcnt_attach_static(&pmap_evcnt_mappings_replaced);
|
||||
evcnt_attach_static(&pmap_evcnt_unmappings);
|
||||
|
||||
evcnt_attach_static(&pmap_evcnt_kernel_mappings);
|
||||
evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
|
||||
|
||||
evcnt_attach_static(&pmap_evcnt_exec_mappings);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_cached);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_synced);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
|
||||
|
||||
evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
|
||||
evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
|
||||
|
||||
evcnt_attach_static(&pmap_evcnt_zeroed_pages);
|
||||
evcnt_attach_static(&pmap_evcnt_copied_pages);
|
||||
evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1182,7 +1263,6 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
panic("pmap_pvo_enter: called recursively!");
|
||||
#endif
|
||||
|
||||
pmap_pvo_enter_calls++;
|
||||
/*
|
||||
* Compute the PTE Group index.
|
||||
*/
|
||||
|
@ -1213,6 +1293,7 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
#endif
|
||||
}
|
||||
#endif
|
||||
PMAPCOUNT(mappings_replaced);
|
||||
pmap_pvo_remove(pvo, -1);
|
||||
break;
|
||||
}
|
||||
|
@ -1245,12 +1326,18 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
pvo->pvo_pmap = pm;
|
||||
LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
|
||||
pvo->pvo_vaddr &= ~ADDR_POFF;
|
||||
if (flags & VM_PROT_EXECUTE)
|
||||
if (flags & VM_PROT_EXECUTE) {
|
||||
PMAPCOUNT(exec_mappings);
|
||||
pvo->pvo_vaddr |= PVO_EXECUTABLE;
|
||||
}
|
||||
if (flags & PMAP_WIRED)
|
||||
pvo->pvo_vaddr |= PVO_WIRED;
|
||||
if (pvo_head != &pmap_pvo_kunmanaged)
|
||||
if (pvo_head != &pmap_pvo_kunmanaged) {
|
||||
pvo->pvo_vaddr |= PVO_MANAGED;
|
||||
PMAPCOUNT(mappings);
|
||||
} else {
|
||||
PMAPCOUNT(kernel_mappings);
|
||||
}
|
||||
pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
|
||||
|
||||
LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
|
||||
|
@ -1334,12 +1421,14 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
|||
* ... if we aren't going to reuse it.
|
||||
*/
|
||||
LIST_REMOVE(pvo, pvo_olink);
|
||||
if (pvo->pvo_vaddr & PVO_MANAGED)
|
||||
PMAPCOUNT(unmappings);
|
||||
else
|
||||
PMAPCOUNT(kernel_unmappings);
|
||||
pool_put(pvo->pvo_vaddr & PVO_MANAGED
|
||||
? &pmap_mpvo_pool
|
||||
: &pmap_upvo_pool,
|
||||
pvo);
|
||||
pmap_pvo_entries--;
|
||||
pmap_pvo_remove_calls++;
|
||||
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
|
||||
pmap_pvo_remove_depth--;
|
||||
#endif
|
||||
|
@ -1449,9 +1538,11 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
|
|||
(pte_lo & PTE_I) == 0 &&
|
||||
was_exec == 0) {
|
||||
DPRINTFN(ENTER, (" syncicache"));
|
||||
PMAPCOUNT(exec_synced);
|
||||
pmap_syncicache(pa, NBPG);
|
||||
if (pg != NULL) {
|
||||
pmap_attr_save(pg, PTE_EXEC);
|
||||
PMAPCOUNT(exec_cached);
|
||||
#if defined(DEBUG) || defined(PMAPDEBUG)
|
||||
if (pmapdebug & PMAPDEBUG_ENTER)
|
||||
printf(" marked-as-exec"));
|
||||
|
@ -1709,7 +1800,10 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
|
|||
if ((prot & VM_PROT_READ) == 0) {
|
||||
DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
|
||||
pg->phys_addr));
|
||||
pmap_attr_clear(pg, PTE_EXEC);
|
||||
if (pmap_attr_fetch(pg) & PTE_EXEC) {
|
||||
PMAPCOUNT(exec_uncached_page_protect);
|
||||
pmap_attr_clear(pg, PTE_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
pvo_head = vm_page_to_pvoh(pg);
|
||||
|
@ -1924,10 +2018,12 @@ pmap_clear_bit(struct vm_page *pg, int ptebit)
|
|||
DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
|
||||
pg->phys_addr));
|
||||
pmap_attr_clear(pg, PTE_EXEC);
|
||||
PMAPCOUNT(exec_uncached_clear_modify);
|
||||
} else {
|
||||
DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
|
||||
pg->phys_addr));
|
||||
pmap_syncicache(pg->phys_addr, NBPG);
|
||||
PMAPCOUNT(exec_synced_clear_modify);
|
||||
}
|
||||
}
|
||||
return (rv & ptebit) != 0;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap_subr.c,v 1.4 2002/08/14 14:25:15 matt Exp $ */
|
||||
/* $NetBSD: pmap_subr.c,v 1.5 2002/08/18 19:18:33 matt Exp $ */
|
||||
/*-
|
||||
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
|
@ -36,10 +36,12 @@
|
|||
*/
|
||||
|
||||
#include "opt_altivec.h"
|
||||
#include "opt_pmap.h"
|
||||
#include <sys/param.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/device.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <uvm/uvm_extern.h>
|
||||
|
@ -54,6 +56,22 @@
|
|||
#define MFMSR() mfmsr()
|
||||
#define MTMSR(psl) __asm __volatile("sync; mtmsr %0; isync" :: "r"(psl))
|
||||
|
||||
#ifdef PMAPCOUNTERS
|
||||
struct evcnt pmap_evcnt_zeroed_pages =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
|
||||
"pages zeroed");
|
||||
struct evcnt pmap_evcnt_copied_pages =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
|
||||
"pages copied");
|
||||
struct evcnt pmap_evcnt_idlezeroed_pages =
|
||||
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
|
||||
"pages idle zeroed");
|
||||
#ifdef PPC_MPC6XX
|
||||
extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
|
||||
extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
|
||||
#endif
|
||||
#endif /* PMAPCOUNTERS */
|
||||
|
||||
/*
|
||||
* This file uses a sick & twisted method to deal with the common pmap
|
||||
* operations of zero'ing, copying, and syncing the page with the
|
||||
|
@ -73,7 +91,7 @@
|
|||
* However while relocation is off, we MUST not access the kernel stack in
|
||||
* any manner since it will probably no longer be mapped. This mean no
|
||||
* calls while relocation is off. The AltiVEC routines need to handle the
|
||||
* MSR fiddling themselves so they save things on the stack.
|
||||
* MSR fiddling themselves so they can save things on the stack.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -85,7 +103,7 @@ pmap_zero_page(paddr_t pa)
|
|||
size_t linewidth;
|
||||
register_t msr;
|
||||
|
||||
#if defined(PPC_MPC6XX) && !defined(OLDPMAP)
|
||||
#if defined(PPC_MPC6XX)
|
||||
{
|
||||
/*
|
||||
* If we are zeroing this page, we must clear the EXEC-ness
|
||||
|
@ -94,9 +112,17 @@ pmap_zero_page(paddr_t pa)
|
|||
struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
|
||||
KDASSERT(pg != NULL);
|
||||
KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
|
||||
#ifdef PMAPCOUNTERS
|
||||
if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
|
||||
pmap_evcnt_exec_uncached_zero_page.ev_count++;
|
||||
}
|
||||
#endif
|
||||
pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
|
||||
}
|
||||
#endif
|
||||
#ifdef PMAPCOUNTERS
|
||||
pmap_evcnt_zeroed_pages.ev_count++;
|
||||
#endif
|
||||
#ifdef ALTIVEC
|
||||
if (pmap_use_altivec) {
|
||||
vzeropage(pa);
|
||||
|
@ -153,10 +179,10 @@ pmap_copy_page(paddr_t src, paddr_t dst)
|
|||
{
|
||||
const register_t *sp;
|
||||
register_t *dp;
|
||||
size_t i;
|
||||
register_t msr;
|
||||
size_t i;
|
||||
|
||||
#if defined(PPC_MPC6XX) && !defined(OLDPMAP)
|
||||
#if defined(PPC_MPC6XX)
|
||||
{
|
||||
/*
|
||||
* If we are copying to the destination page, we must clear
|
||||
|
@ -166,9 +192,17 @@ pmap_copy_page(paddr_t src, paddr_t dst)
|
|||
struct vm_page *pg = PHYS_TO_VM_PAGE(dst);
|
||||
KDASSERT(pg != NULL);
|
||||
KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
|
||||
#ifdef PMAPCOUNTERS
|
||||
if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
|
||||
pmap_evcnt_exec_uncached_copy_page.ev_count++;
|
||||
}
|
||||
#endif
|
||||
pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
|
||||
}
|
||||
#endif
|
||||
#ifdef PMAPCOUNTERS
|
||||
pmap_evcnt_copied_pages.ev_count++;
|
||||
#endif
|
||||
#ifdef ALTIVEC
|
||||
if (pmap_use_altivec) {
|
||||
vcopypage(dst, src);
|
||||
|
@ -278,6 +312,9 @@ pmap_pageidlezero(paddr_t pa)
|
|||
return FALSE;
|
||||
*dp++ = 0;
|
||||
}
|
||||
#ifdef PMAPCOUNTERS
|
||||
pmap_evcnt_idlezeroed_pages.ev_count++;
|
||||
#endif
|
||||
return TRUE;
|
||||
}
|
||||
#endif
|
||||
|
@ -303,5 +340,9 @@ pmap_pageidlezero(paddr_t pa)
|
|||
* Restore relocation (MMU on).
|
||||
*/
|
||||
MTMSR(msr);
|
||||
#ifdef PMAPCOUNTERS
|
||||
if (rv)
|
||||
pmap_evcnt_idlezeroed_pages.ev_count++;
|
||||
#endif
|
||||
return rv;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue