From c44c5152c9527a1d968cee383063d7494fa2090c Mon Sep 17 00:00:00 2001 From: tsutsui Date: Wed, 20 Sep 2006 09:35:57 +0000 Subject: [PATCH] - move internal function declarations from pmap_pvt.h to pmap.c itself - make some local functions static inline - KNF a bit --- sys/arch/sun3/sun3x/pmap.c | 378 +++++++++++++++++++-------------- sys/arch/sun3/sun3x/pmap_pvt.h | 43 +--- 2 files changed, 219 insertions(+), 202 deletions(-) diff --git a/sys/arch/sun3/sun3x/pmap.c b/sys/arch/sun3/sun3x/pmap.c index 5b7299aac147..61debfe714a6 100644 --- a/sys/arch/sun3/sun3x/pmap.c +++ b/sys/arch/sun3/sun3x/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.91 2006/09/16 03:35:50 tsutsui Exp $ */ +/* $NetBSD: pmap.c,v 1.92 2006/09/20 09:35:57 tsutsui Exp $ */ /*- * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. @@ -112,7 +112,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.91 2006/09/16 03:35:50 tsutsui Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.92 2006/09/20 09:35:57 tsutsui Exp $"); #include "opt_ddb.h" #include "opt_pmap_debug.h" @@ -269,8 +269,10 @@ static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool; * Flags used to mark the safety/availability of certain operations or * resources. */ -static boolean_t bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/ -int tmp_vpages_inuse; /* Temporary virtual pages are in use */ +/* Safe to use pmap_bootstrap_alloc(). */ +static boolean_t bootstrap_alloc_enabled = FALSE; +/* Temporary virtual pages are in use */ +int tmp_vpages_inuse; /* * XXX: For now, retain the traditional variables that were @@ -374,10 +376,10 @@ static INLINE void *mmu_ptov(paddr_t); static INLINE paddr_t mmu_vtop(void *); #if 0 -static INLINE a_tmgr_t * mmuA2tmgr(mmu_long_dte_t *); +static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *); #endif /* 0 */ -static INLINE b_tmgr_t * mmuB2tmgr(mmu_short_dte_t *); -static INLINE c_tmgr_t * mmuC2tmgr(mmu_short_pte_t *); +static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *); +static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *); static INLINE pv_t *pa2pv(paddr_t); static INLINE int pteidx(mmu_short_pte_t *); @@ -399,7 +401,7 @@ mmu_ptov(paddr_t pa) if ((va < KERNBASE) || (va >= virtual_contig_end)) panic("mmu_ptov"); #endif - return ((void*)va); + return (void *)va; } static INLINE paddr_t @@ -412,7 +414,7 @@ mmu_vtop(void *vva) if ((va < KERNBASE) || (va >= virtual_contig_end)) panic("mmu_vtop"); #endif - return (va - KERNBASE); + return va - KERNBASE; } /* @@ -443,7 +445,7 @@ mmuA2tmgr(mmu_long_dte_t *mmuAtbl) if ((idx < 0) || (idx >= NUM_A_TABLES)) panic("mmuA2tmgr"); #endif - return (&Atmgrbase[idx]); + return &Atmgrbase[idx]; } #endif /* 0 */ @@ -458,7 +460,7 @@ mmuB2tmgr(mmu_short_dte_t *mmuBtbl) if ((idx < 0) || (idx >= NUM_B_TABLES)) panic("mmuB2tmgr"); #endif - return (&Btmgrbase[idx]); + return &Btmgrbase[idx]; } /* mmuC2tmgr INTERNAL @@ -477,7 +479,7 @@ mmuC2tmgr(mmu_short_pte_t *mmuCtbl) if ((idx < 0) || (idx >= NUM_C_TABLES)) panic("mmuC2tmgr"); #endif - return (&Ctmgrbase[idx]); + return &Ctmgrbase[idx]; } /* This is now a function call below. @@ -519,7 +521,8 @@ pa2pv(paddr_t pa) static INLINE int pteidx(mmu_short_pte_t *pte) { - return (pte - kernCbase); + + return pte - kernCbase; } /* @@ -541,7 +544,7 @@ current_pmap(void) pmap = vm_map_pmap(map); } - return (pmap); + return pmap; } @@ -550,18 +553,58 @@ current_pmap(void) * all function calls. * *************************************************************************/ -/** Internal functions - ** Most functions used only within this module are defined in - ** pmap_pvt.h (why not here if used only here?) - **/ +/* + * Internal functions + */ +a_tmgr_t *get_a_table(void); +b_tmgr_t *get_b_table(void); +c_tmgr_t *get_c_table(void); +int free_a_table(a_tmgr_t *, boolean_t); +int free_b_table(b_tmgr_t *, boolean_t); +int free_c_table(c_tmgr_t *, boolean_t); + +void pmap_bootstrap_aalign(int); +void pmap_alloc_usermmu(void); +void pmap_alloc_usertmgr(void); +void pmap_alloc_pv(void); +void pmap_init_a_tables(void); +void pmap_init_b_tables(void); +void pmap_init_c_tables(void); +void pmap_init_pv(void); +void pmap_clear_pv(paddr_t, int); +static INLINE boolean_t is_managed(paddr_t); + +boolean_t pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t); +boolean_t pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t); +boolean_t pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t); +void pmap_remove_pte(mmu_short_pte_t *); + +void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t); +static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t); +static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t); +static INLINE boolean_t pmap_extract_kernel(vaddr_t, paddr_t *); +vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **); +static INLINE int pmap_dereference(pmap_t); + +boolean_t pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **, + mmu_short_pte_t **, int *, int *, int *); +void pmap_bootstrap_copyprom(void); +void pmap_takeover_mmu(void); +void pmap_bootstrap_setprom(void); static void pmap_page_upload(void); +#ifdef PMAP_DEBUG +/* Debugging function definitions */ +void pv_list(paddr_t, int); +#endif /* PMAP_DEBUG */ + /** Interface functions ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG ** defined. + ** The new UVM doesn't require them so now INTERNAL. **/ -void pmap_pinit(pmap_t); -void pmap_release(pmap_t); +static INLINE void pmap_pinit(pmap_t); +static INLINE void pmap_release(pmap_t); /********************************** CODE ******************************** * Functions that are called from other parts of the kernel are labeled * @@ -740,12 +783,12 @@ pmap_bootstrap(vaddr_t nextva) */ for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) { kernAbase[i].attr.raw = - MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT; + MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT; kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]); - for (j=0; j < MMU_B_TBL_SIZE; j++) { - kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c]) - | MMU_DT_SHORT; + for (j = 0; j < MMU_B_TBL_SIZE; j++) { + kernBbase[b + j].attr.raw = + mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT; c += MMU_C_TBL_SIZE; } b += MMU_B_TBL_SIZE; @@ -830,11 +873,13 @@ pmap_bootstrap(vaddr_t nextva) * It is non-cached, mostly due to paranoia. */ pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL); - va += PAGE_SIZE; pa += PAGE_SIZE; + va += PAGE_SIZE; + pa += PAGE_SIZE; /* Next page is used as the temporary stack. */ pmap_enter_kernel(va, pa, VM_PROT_ALL); - va += PAGE_SIZE; pa += PAGE_SIZE; + va += PAGE_SIZE; + pa += PAGE_SIZE; /* * Map all of the kernel's text segment as read-only and cacheable. @@ -880,6 +925,7 @@ pmap_bootstrap(vaddr_t nextva) void pmap_alloc_usermmu(void) { + /* XXX: Moved into caller. */ } @@ -912,13 +958,12 @@ pmap_alloc_pv(void) total_mem = 0; for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { avail_mem[i].pmem_pvbase = m68k_btop(total_mem); - total_mem += avail_mem[i].pmem_end - - avail_mem[i].pmem_start; + total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start; if (avail_mem[i].pmem_next == NULL) break; } - pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) * - m68k_btop(total_phys_mem)); + pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) * + m68k_btop(total_phys_mem)); } /* pmap_alloc_usertmgr INTERNAL @@ -933,19 +978,19 @@ pmap_alloc_usertmgr(void) /* Allocate user MMU table managers */ /* It would be a lot simpler to just make these BSS, but */ /* we may want to change their size at boot time... -j */ - Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t) - * NUM_A_TABLES); - Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t) - * NUM_B_TABLES); - Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t) - * NUM_C_TABLES); + Atmgrbase = + (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES); + Btmgrbase = + (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES); + Ctmgrbase = + (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES); /* * Allocate PV list elements for the physical to virtual * mapping system. */ - pvebase = (pv_elem_t *) pmap_bootstrap_alloc( - sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES)); + pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) * + (NUM_USER_PTES + NUM_KERN_PTES)); } /* pmap_bootstrap_copyprom() INTERNAL @@ -990,7 +1035,7 @@ pmap_bootstrap_copyprom(void) i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE); kpte = &kernCbase[i]; len = m68k_btop(SUN3X_MON_DVMA_SIZE); - for (i = (len-1); i < len; i++) { + for (i = (len - 1); i < len; i++) { kpte[i].attr.raw = mon_ctbl[i]; } } @@ -1022,7 +1067,7 @@ pmap_bootstrap_setprom(void) extern struct mmu_rootptr mon_crp; int i; - mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr; + mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr; for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) { mon_dte[i].attr.raw = kernAbase[i].attr.raw; mon_dte[i].addr.raw = kernAbase[i].addr.raw; @@ -1040,6 +1085,7 @@ pmap_bootstrap_setprom(void) void pmap_init(void) { + /** Initialize the manager pools **/ TAILQ_INIT(&a_pool); TAILQ_INIT(&b_pool); @@ -1090,8 +1136,8 @@ pmap_init_a_tables(void) * or kernel, mapping. This ensures that every process has * the kernel mapped in the top part of its address space. */ - memcpy(a_tbl->at_dtbl, kernAbase, MMU_A_TBL_SIZE * - sizeof(mmu_long_dte_t)); + memcpy(a_tbl->at_dtbl, kernAbase, + MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t)); /* * Finally, insert the manager into the A pool, @@ -1125,7 +1171,7 @@ pmap_init_b_tables(void) b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE]; /* Invalidate every descriptor in the table */ - for (j=0; j < MMU_B_TBL_SIZE; j++) + for (j = 0; j < MMU_B_TBL_SIZE; j++) b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID; /* Insert the manager into the B pool */ @@ -1158,7 +1204,7 @@ pmap_init_c_tables(void) /* Assign it the next available MMU C table from the pool */ c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE]; - for (j=0; j < MMU_C_TBL_SIZE; j++) + for (j = 0; j < MMU_C_TBL_SIZE; j++) c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID; TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); @@ -1181,6 +1227,30 @@ pmap_init_pv(void) } } +/* is_managed INTERNAL + ** + * Determine if the given physical address is managed by the PV system. + * Note that this logic assumes that no one will ask for the status of + * addresses which lie in-between the memory banks on the 3/80. If they + * do so, it will falsely report that it is managed. + * + * Note: A "managed" address is one that was reported to the VM system as + * a "usable page" during system startup. As such, the VM system expects the + * pmap module to keep an accurate track of the useage of those pages. + * Any page not given to the VM system at startup does not exist (as far as + * the VM system is concerned) and is therefore "unmanaged." Examples are + * those pages which belong to the ROM monitor and the memory allocated before + * the VM system was started. + */ +static INLINE boolean_t +is_managed(paddr_t pa) +{ + if (pa >= avail_start && pa < avail_end) + return TRUE; + else + return FALSE; +} + /* get_a_table INTERNAL ** * Retrieve and return a level A table for use in a user map. @@ -1337,7 +1407,7 @@ free_a_table(a_tmgr_t *a_tbl, boolean_t relink) at_wired = a_tbl->at_wcnt; if (a_tbl->at_ecnt) { dte = a_tbl->at_dtbl; - for (i=0; i < MMU_TIA(KERNBASE); i++) { + for (i = 0; i < MMU_TIA(KERNBASE); i++) { /* * If a table entry points to a valid B table, free * it and its children. @@ -1402,7 +1472,7 @@ free_b_table(b_tmgr_t *b_tbl, boolean_t relink) bt_wired = b_tbl->bt_wcnt; if (b_tbl->bt_ecnt) { dte = b_tbl->bt_dtbl; - for (i=0; i < MMU_B_TBL_SIZE; i++) { + for (i = 0; i < MMU_B_TBL_SIZE; i++) { if (MMU_VALID_DT(dte[i])) { dtbl = mmu_ptov(MMU_DTE_PA(dte[i])); c_tbl = mmuC2tmgr(dtbl); @@ -1445,7 +1515,7 @@ free_c_table(c_tmgr_t *c_tbl, boolean_t relink) removed_cnt = 0; ct_wired = c_tbl->ct_wcnt; if (c_tbl->ct_ecnt) { - for (i=0; i < MMU_C_TBL_SIZE; i++) { + for (i = 0; i < MMU_C_TBL_SIZE; i++) { c_pte = &c_tbl->ct_dtbl[i]; if (MMU_VALID_DT(*c_pte)) { if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) @@ -1535,7 +1605,7 @@ pmap_remove_pte(mmu_short_pte_t *pte) * which is used on the MMU ptes. */ -pv_not_found: + pv_not_found: pv->pv_flags |= (u_short) pte->attr.raw; } pte->attr.raw = MMU_DT_INVALID; @@ -1575,7 +1645,8 @@ pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl, return FALSE; /* No. Return unknown. */ /* Yes. Extract B table from the A table. */ *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw)); - /* Does the B table have a valid C table + /* + * Does the B table have a valid C table * under the corresponding table entry? */ *b_idx = MMU_TIB(va); @@ -1587,7 +1658,7 @@ pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl, *pte_idx = MMU_TIC(va); *pte = &((*c_tbl)->ct_dtbl[*pte_idx]); - return TRUE; + return TRUE; } /* pmap_enter INTERFACE @@ -1645,8 +1716,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) * * Extract sun3x specific flags from the physical address. */ - mapflags = (pa & ~MMU_PAGE_MASK); - pa &= MMU_PAGE_MASK; + mapflags = (pa & ~MMU_PAGE_MASK); + pa &= MMU_PAGE_MASK; /* * Determine if the physical address being mapped is on-board RAM. @@ -1890,7 +1961,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) * bits on the PTE. */ c_pte->attr.raw &= (MMU_SHORT_PTE_M - | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED); + | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED); } else { /* No, remove the old entry */ pmap_remove_pte(c_pte); @@ -2034,7 +2105,7 @@ pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) /* * Calculate the index of the PTE being modified. */ - pte_idx = (u_long) m68k_btop(va - KERNBASE); + pte_idx = (u_long)m68k_btop(va - KERNBASE); /* This array is traditionally named "Sysmap" */ pte = &kernCbase[pte_idx]; @@ -2143,7 +2214,44 @@ pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) sz -= PAGE_SIZE; } while (sz > 0); pmap_update(pmap_kernel()); - return(va); + return va; +} + +/* pmap_protect_kernel INTERNAL + ** + * Apply the given protection code to a kernel address range. + */ +static INLINE void +pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot) +{ + vaddr_t va; + mmu_short_pte_t *pte; + + pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)]; + for (va = startva; va < endva; va += PAGE_SIZE, pte++) { + if (MMU_VALID_DT(*pte)) { + switch (prot) { + case VM_PROT_ALL: + break; + case VM_PROT_EXECUTE: + case VM_PROT_READ: + case VM_PROT_READ|VM_PROT_EXECUTE: + pte->attr.raw |= MMU_SHORT_PTE_WP; + break; + case VM_PROT_NONE: + /* this is an alias for 'pmap_remove_kernel' */ + pmap_remove_pte(pte); + break; + default: + break; + } + /* + * since this is the kernel, immediately flush any cached + * descriptors for this address. + */ + TBIS(va); + } + } } /* pmap_protect INTERFACE @@ -2274,43 +2382,6 @@ pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot) } } -/* pmap_protect_kernel INTERNAL - ** - * Apply the given protection code to a kernel address range. - */ -void -pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot) -{ - vaddr_t va; - mmu_short_pte_t *pte; - - pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)]; - for (va = startva; va < endva; va += PAGE_SIZE, pte++) { - if (MMU_VALID_DT(*pte)) { - switch (prot) { - case VM_PROT_ALL: - break; - case VM_PROT_EXECUTE: - case VM_PROT_READ: - case VM_PROT_READ|VM_PROT_EXECUTE: - pte->attr.raw |= MMU_SHORT_PTE_WP; - break; - case VM_PROT_NONE: - /* this is an alias for 'pmap_remove_kernel' */ - pmap_remove_pte(pte); - break; - default: - break; - } - /* - * since this is the kernel, immediately flush any cached - * descriptors for this address. - */ - TBIS(va); - } - } -} - /* pmap_unwire INTERFACE ** * Clear the wired attribute of the specified page. @@ -2381,6 +2452,7 @@ pmap_unwire(pmap_t pmap, vaddr_t va) void pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src) { + /* not implemented. */ } @@ -2411,10 +2483,10 @@ pmap_copy_page(paddr_t srcpa, paddr_t dstpa) /* Map pages as non-cacheable to avoid cache polution? */ pmap_kenter_pa(srcva, srcpa, VM_PROT_READ); - pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE); + pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE); /* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */ - copypage((char *) srcva, (char *) dstva); + copypage((char *)srcva, (char *)dstva); pmap_kremove(srcva, PAGE_SIZE); pmap_kremove(dstva, PAGE_SIZE); @@ -2446,10 +2518,10 @@ pmap_zero_page(paddr_t dstpa) #endif /* The comments in pmap_copy_page() above apply here also. */ - pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE); + pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE); /* Hand-optimized version of bzero(ptr, PAGE_SIZE) */ - zeropage((char *) dstva); + zeropage((char *)dstva); pmap_kremove(dstva, PAGE_SIZE); #ifdef DIAGNOSTIC @@ -2467,9 +2539,25 @@ pmap_zero_page(paddr_t dstpa) void pmap_collect(pmap_t pmap) { + /* XXX - todo... */ } +/* pmap_pinit INTERNAL + ** + * Initialize a pmap structure. + */ +static INLINE void +pmap_pinit(pmap_t pmap) +{ + + memset(pmap, 0, sizeof(struct pmap)); + pmap->pm_a_tmgr = NULL; + pmap->pm_a_phys = kernAphys; + pmap->pm_refcount = 1; + simple_lock_init(&pmap->pm_lock); +} + /* pmap_create INTERFACE ** * Create and return a pmap structure. @@ -2484,21 +2572,7 @@ pmap_create(void) return pmap; } -/* pmap_pinit INTERNAL - ** - * Initialize a pmap structure. - */ -void -pmap_pinit(pmap_t pmap) -{ - memset(pmap, 0, sizeof(struct pmap)); - pmap->pm_a_tmgr = NULL; - pmap->pm_a_phys = kernAphys; - pmap->pm_refcount = 1; - simple_lock_init(&pmap->pm_lock); -} - -/* pmap_release INTERFACE +/* pmap_release INTERNAL ** * Release any resources held by the given pmap. * @@ -2506,9 +2580,10 @@ pmap_pinit(pmap_t pmap) * necessarily mean for the pmap structure to be deallocated, * as in pmap_destroy. */ -void +static INLINE void pmap_release(pmap_t pmap) { + /* * As long as the pmap contains no mappings, * which always should be the case whenever @@ -2561,7 +2636,7 @@ pmap_reference(pmap_t pmap) * Decrease the reference count on the given pmap * by one and return the current count. */ -int +static INLINE int pmap_dereference(pmap_t pmap) { int rtn; @@ -2582,6 +2657,7 @@ pmap_dereference(pmap_t pmap) void pmap_destroy(pmap_t pmap) { + if (pmap_dereference(pmap) == 0) { pmap_release(pmap); pool_put(&pmap_pmap_pool, pmap); @@ -2763,7 +2839,7 @@ pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl) * in ct_va. We then increment this address by a page for * every slot skipped until we reach the PTE. */ - va = (*tbl)->ct_va; + va = (*tbl)->ct_va; va += m68k_ptob(idx % MMU_C_TBL_SIZE); } else { /* @@ -2861,6 +2937,23 @@ pmap_clear_pv(paddr_t pa, int flag) } } +/* pmap_extract_kernel INTERNAL + ** + * Extract a translation from the kernel address space. + */ +static INLINE boolean_t +pmap_extract_kernel(vaddr_t va, paddr_t *pap) +{ + mmu_short_pte_t *pte; + + pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE)]; + if (!MMU_VALID_DT(*pte)) + return FALSE; + if (pap != NULL) + *pap = MMU_PTE_PA(*pte); + return TRUE; +} + /* pmap_extract INTERFACE ** * Return the physical address mapped by the virtual address @@ -2890,24 +2983,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) if (pap != NULL) *pap = MMU_PTE_PA(*c_pte); - return (TRUE); -} - -/* pmap_extract_kernel INTERNAL - ** - * Extract a translation from the kernel address space. - */ -boolean_t -pmap_extract_kernel(vaddr_t va, paddr_t *pap) -{ - mmu_short_pte_t *pte; - - pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)]; - if (!MMU_VALID_DT(*pte)) - return (FALSE); - if (pap != NULL) - *pap = MMU_PTE_PA(*pte); - return (TRUE); + return TRUE; } /* pmap_remove_kernel INTERNAL @@ -2915,7 +2991,7 @@ pmap_extract_kernel(vaddr_t va, paddr_t *pap) * Remove the mapping of a range of virtual addresses from the kernel map. * The arguments are already page-aligned. */ -void +static INLINE void pmap_remove_kernel(vaddr_t sva, vaddr_t eva) { int idx, eidx; @@ -3352,7 +3428,7 @@ pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva) idx = MMU_TIC(sva); c_pte = &c_tbl->ct_dtbl[idx]; - for (;sva < eva; sva += MMU_PAGE_SIZE, c_pte++) { + for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) { if (MMU_VALID_DT(*c_pte)) { if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) c_tbl->ct_wcnt--; @@ -3382,30 +3458,6 @@ pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva) return empty; } -/* is_managed INTERNAL - ** - * Determine if the given physical address is managed by the PV system. - * Note that this logic assumes that no one will ask for the status of - * addresses which lie in-between the memory banks on the 3/80. If they - * do so, it will falsely report that it is managed. - * - * Note: A "managed" address is one that was reported to the VM system as - * a "usable page" during system startup. As such, the VM system expects the - * pmap module to keep an accurate track of the useage of those pages. - * Any page not given to the VM system at startup does not exist (as far as - * the VM system is concerned) and is therefore "unmanaged." Examples are - * those pages which belong to the ROM monitor and the memory allocated before - * the VM system was started. - */ -boolean_t -is_managed(paddr_t pa) -{ - if (pa >= avail_start && pa < avail_end) - return TRUE; - else - return FALSE; -} - /* pmap_bootstrap_alloc INTERNAL ** * Used internally for memory allocation at startup when malloc is not @@ -3453,7 +3505,7 @@ pmap_bootstrap_aalign(int size) off = virtual_avail & (size - 1); if (off) { - (void) pmap_bootstrap_alloc(size - off); + (void)pmap_bootstrap_alloc(size - off); } } @@ -3470,11 +3522,11 @@ pmap_pa_exists(paddr_t pa) for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { if ((pa >= avail_mem[i].pmem_start) && (pa < avail_mem[i].pmem_end)) - return (1); + return 1; if (avail_mem[i].pmem_next == NULL) break; } - return (0); + return 0; } /* Called only from locore.s and pmap.c */ @@ -3521,6 +3573,7 @@ _pmap_switch(pmap_t pmap) void pmap_activate(struct lwp *l) { + if (l->l_proc == curproc) { _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap); } @@ -3534,6 +3587,7 @@ pmap_activate(struct lwp *l) void pmap_deactivate(struct lwp *l) { + /* Nothing to do. */ } @@ -3570,6 +3624,7 @@ pmap_kcore_hdr(struct sun3x_kcore_hdr *sh) void pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend) { + *vstart = virtual_avail; *vend = virtual_end; } @@ -3714,6 +3769,7 @@ set_pte(vaddr_t va, u_int pte) void pmap_procwr(struct proc *p, vaddr_t va, size_t len) { + (void)cachectl1(0x80000004, va, len, p); } diff --git a/sys/arch/sun3/sun3x/pmap_pvt.h b/sys/arch/sun3/sun3x/pmap_pvt.h index a271bfa6dcc3..a63bcdea3eda 100644 --- a/sys/arch/sun3/sun3x/pmap_pvt.h +++ b/sys/arch/sun3/sun3x/pmap_pvt.h @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_pvt.h,v 1.13 2005/12/11 12:19:27 christos Exp $ */ +/* $NetBSD: pmap_pvt.h,v 1.14 2006/09/20 09:35:57 tsutsui Exp $ */ /*- * Copyright (c) 1996 The NetBSD Foundation, Inc. @@ -166,46 +166,7 @@ struct pmap_physmem_struct { struct pmap_physmem_struct *pmem_next; /* Next block of memory */ }; -/* Internal function definitions. */ -a_tmgr_t *get_a_table(void); -b_tmgr_t *get_b_table(void); -c_tmgr_t *get_c_table(void); -int free_a_table(a_tmgr_t *, boolean_t); -int free_b_table(b_tmgr_t *, boolean_t); -int free_c_table(c_tmgr_t *, boolean_t); -void pmap_bootstrap_aalign(int); -void pmap_alloc_usermmu(void); -void pmap_alloc_usertmgr(void); -void pmap_alloc_pv(void); -void pmap_init_a_tables(void); -void pmap_init_b_tables(void); -void pmap_init_c_tables(void); -void pmap_init_pv(void); -void pmap_clear_pv(paddr_t, int); -boolean_t pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t); -boolean_t pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t); -boolean_t pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t); -void pmap_remove_pte(mmu_short_pte_t *); -void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t); -void pmap_remove_kernel(vaddr_t, vaddr_t); -void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t); -boolean_t pmap_extract_kernel(vaddr_t, paddr_t *); -vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **); -void pmap_pinit(pmap_t); -int pmap_dereference(pmap_t); -boolean_t is_managed(paddr_t); -boolean_t pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **, - mmu_short_pte_t **, int *, int *, int *); -void pmap_bootstrap_copyprom(void); -void pmap_takeover_mmu(void); -void pmap_bootstrap_setprom(void); - -#ifdef PMAP_DEBUG -/* Debugging function definitions */ -void pv_list(paddr_t, int); -#endif /* PMAP_DEBUG */ - /* These are defined in pmap.c */ extern struct pmap_physmem_struct avail_mem[]; -#endif /* _SUN3X_MYPMAP_H */ +#endif /* _SUN3X_PMAPPVT_H */