From 31fa5c6f7838215ec9dae06e619b19822d6aa25f Mon Sep 17 00:00:00 2001 From: thorpej Date: Wed, 28 Jul 1999 01:07:52 +0000 Subject: [PATCH] Don't inline pmap_kenter_pa(). It doesn't buy us much to do so, and it's nice to have it show up in stack traces. --- sys/arch/i386/i386/pmap.c | 34 +++++++++++++++++++++++++++++++++- sys/arch/i386/include/pmap.h | 35 +---------------------------------- sys/arch/pc532/include/pmap.h | 35 +---------------------------------- sys/arch/pc532/pc532/pmap.c | 34 +++++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 70 deletions(-) diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index 52aab3157e9a..d2d6a79935ff 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.74 1999/07/18 21:33:20 chs Exp $ */ +/* $NetBSD: pmap.c,v 1.75 1999/07/28 01:07:58 thorpej Exp $ */ /* * @@ -3542,6 +3542,38 @@ enter_now: PMAP_MAP_TO_HEAD_UNLOCK(); } +/* + * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking + * + * => no need to lock anything, assume va is already allocated + * => should be faster than normal pmap enter function + */ + +void +pmap_kenter_pa(va, pa, prot) + vaddr_t va; + paddr_t pa; + vm_prot_t prot; +{ + struct pmap *pm = pmap_kernel(); + pt_entry_t *pte, opte; + int s; + + s = splimp(); + simple_lock(&pm->pm_obj.vmobjlock); + pm->pm_stats.resident_count++; + pm->pm_stats.wired_count++; + simple_unlock(&pm->pm_obj.vmobjlock); + splx(s); + + pte = vtopte(va); + opte = *pte; + *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | + PG_V | pmap_pg_g; /* zap! */ + if (pmap_valid_entry(opte)) + pmap_update_pg(va); +} + /* * pmap_growkernel: increase usage of KVM space * diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h index f4490419071b..52481e20a8e2 100644 --- a/sys/arch/i386/include/pmap.h +++ b/sys/arch/i386/include/pmap.h @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.41 1999/07/18 21:33:21 chs Exp $ */ +/* $NetBSD: pmap.h,v 1.42 1999/07/28 01:07:59 thorpej Exp $ */ /* * @@ -391,7 +391,6 @@ void pmap_activate __P((struct proc *)); void pmap_bootstrap __P((vaddr_t)); boolean_t pmap_change_attrs __P((struct vm_page *, int, int)); void pmap_deactivate __P((struct proc *)); -static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t)); static void pmap_page_protect __P((struct vm_page *, vm_prot_t)); void pmap_page_remove __P((struct vm_page *)); static void pmap_protect __P((struct pmap *, vaddr_t, @@ -495,38 +494,6 @@ pmap_protect(pmap, sva, eva, prot) } } -/* - * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking - * - * => no need to lock anything, assume va is already allocated - * => should be faster than normal pmap enter function - */ - -__inline static void -pmap_kenter_pa(va, pa, prot) - vaddr_t va; - paddr_t pa; - vm_prot_t prot; -{ - struct pmap *pm = pmap_kernel(); - pt_entry_t *pte, opte; - int s; - - s = splimp(); - simple_lock(&pm->pm_obj.vmobjlock); - pm->pm_stats.resident_count++; - pm->pm_stats.wired_count++; - simple_unlock(&pm->pm_obj.vmobjlock); - splx(s); - - pte = vtopte(va); - opte = *pte; - *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | - PG_V | pmap_pg_g; /* zap! */ - if (pmap_valid_entry(opte)) - pmap_update_pg(va); -} - vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t)); #if defined(USER_LDT) diff --git a/sys/arch/pc532/include/pmap.h b/sys/arch/pc532/include/pmap.h index d5a3b539ea8c..f21d0d3f36b5 100644 --- a/sys/arch/pc532/include/pmap.h +++ b/sys/arch/pc532/include/pmap.h @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.26 1999/07/18 21:33:22 chs Exp $ */ +/* $NetBSD: pmap.h,v 1.27 1999/07/28 01:07:53 thorpej Exp $ */ /* * @@ -371,7 +371,6 @@ void pmap_activate __P((struct proc *)); void pmap_bootstrap __P((vaddr_t)); boolean_t pmap_change_attrs __P((struct vm_page *, int, int)); void pmap_deactivate __P((struct proc *)); -static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t)); static void pmap_page_protect __P((struct vm_page *, vm_prot_t)); void pmap_page_remove __P((struct vm_page *)); static void pmap_protect __P((struct pmap *, vaddr_t, @@ -463,38 +462,6 @@ pmap_protect(pmap, sva, eva, prot) } } -/* - * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking - * - * => no need to lock anything, assume va is already allocated - * => should be faster than normal pmap enter function - */ - -__inline static void -pmap_kenter_pa(va, pa, prot) - vaddr_t va; - paddr_t pa; - vm_prot_t prot; -{ - struct pmap *pm = pmap_kernel(); - pt_entry_t *pte, opte; - int s; - - s = splimp(); - simple_lock(&pm->pm_obj.vmobjlock); - pm->pm_stats.resident_count++; - pm->pm_stats.wired_count++; - simple_unlock(&pm->pm_obj.vmobjlock); - splx(s); - - pte = vtopte(va); - opte = *pte; - *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | - PG_V; /* zap! */ - if (pmap_valid_entry(opte)) - pmap_update_pg(va); -} - vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int)); #endif /* _KERNEL */ diff --git a/sys/arch/pc532/pc532/pmap.c b/sys/arch/pc532/pc532/pmap.c index ac4847c4a326..55c4782799db 100644 --- a/sys/arch/pc532/pc532/pmap.c +++ b/sys/arch/pc532/pc532/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.40 1999/07/18 21:33:22 chs Exp $ */ +/* $NetBSD: pmap.c,v 1.41 1999/07/28 01:07:52 thorpej Exp $ */ /* * @@ -3307,6 +3307,38 @@ enter_now: cinv(ia, 0); } +/* + * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking + * + * => no need to lock anything, assume va is already allocated + * => should be faster than normal pmap enter function + */ + +void +pmap_kenter_pa(va, pa, prot) + vaddr_t va; + paddr_t pa; + vm_prot_t prot; +{ + struct pmap *pm = pmap_kernel(); + pt_entry_t *pte, opte; + int s; + + s = splimp(); + simple_lock(&pm->pm_obj.vmobjlock); + pm->pm_stats.resident_count++; + pm->pm_stats.wired_count++; + simple_unlock(&pm->pm_obj.vmobjlock); + splx(s); + + pte = vtopte(va); + opte = *pte; + *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | + PG_V; /* zap! */ + if (pmap_valid_entry(opte)) + pmap_update_pg(va); +} + /* * pmap_growkernel: increase usage of KVM space *