- Add pmap_page_is_cacheable() to allow the bus_dma code to query the

cacheable attribute of a mapping.
- Honour PMAP_NC in pmap_enter() using NOCACHE, instead of DEVICE.
- No longer need to re-fetch the ptel in pmap_pa_unmap_kva() as
  syncing the cache no longer risks causing a TLB miss.
This commit is contained in:
scw 2002-09-28 10:53:57 +00:00
parent 11255a6d43
commit 05e55efada
2 changed files with 39 additions and 17 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.9 2002/09/22 07:53:49 chs Exp $ */ /* $NetBSD: pmap.h,v 1.10 2002/09/28 10:53:57 scw Exp $ */
/* /*
* Copyright 2002 Wasabi Systems, Inc. * Copyright 2002 Wasabi Systems, Inc.
@ -111,7 +111,7 @@ pmap_remove_all(struct pmap *pmap)
extern int pmap_initialized; extern int pmap_initialized;
extern u_int pmap_ipt_hash(vsid_t vsid, vaddr_t va); /* See exception.S */ extern u_int pmap_ipt_hash(vsid_t vsid, vaddr_t va); /* See exception.S */
extern vaddr_t pmap_map_device(paddr_t, u_int); extern vaddr_t pmap_map_device(paddr_t, u_int);
extern void pmap_unmap_device(vaddr_t, u_int); extern int pmap_page_is_cacheable(pmap_t, vaddr_t);
extern void (*__cpu_tlbinv)(pteh_t, pteh_t); extern void (*__cpu_tlbinv)(pteh_t, pteh_t);
extern void (*__cpu_tlbinv_cookie)(pteh_t, u_int); extern void (*__cpu_tlbinv_cookie)(pteh_t, u_int);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.9 2002/09/22 20:46:32 scw Exp $ */ /* $NetBSD: pmap.c,v 1.10 2002/09/28 10:53:58 scw Exp $ */
/* /*
* Copyright 2002 Wasabi Systems, Inc. * Copyright 2002 Wasabi Systems, Inc.
@ -1097,6 +1097,31 @@ pmap_map_device(paddr_t pa, u_int len)
return (rv); return (rv);
} }
/*
* Returns non-zero if the specified mapping is cacheable
*/
int
pmap_page_is_cacheable(pmap_t pm, vaddr_t va)
{
struct pvo_entry *pvo;
ptel_t ptel = 0;
int s;
s = splhigh();
pvo = pmap_pvo_find_va(pm, va, NULL);
if (pvo != NULL)
ptel = pvo->pvo_ptel;
else
if (pm == pmap_kernel()) {
int idx = kva_to_iptidx(va);
if (idx >= 0 && pmap_kernel_ipt[idx])
ptel = pmap_kernel_ipt[idx];
}
splx(s);
return ((ptel & SH5_PTEL_CB_MASK) > SH5_PTEL_CB_NOCACHE);
}
void void
pmap_init(void) pmap_init(void)
{ {
@ -1439,16 +1464,12 @@ pmap_pa_unmap_kva(vaddr_t kva, ptel_t *ptel)
ptel = &pmap_kernel_ipt[idx]; ptel = &pmap_kernel_ipt[idx];
} }
oldptel = *ptel;
/* /*
* Synchronise the cache before deleting the mapping. * Synchronise the cache before deleting the mapping.
*/ */
pmap_cache_sync_unmap(kva, *ptel); pmap_cache_sync_unmap(kva, oldptel);
/*
* Re-fetch the PTEL, as the cache-sync may have caused a TLB
* miss which changed the Ref/Mod bits.
*/
oldptel = *((volatile ptel_t *)ptel);
/* /*
* Now safe to delete the mapping. * Now safe to delete the mapping.
@ -1680,6 +1701,7 @@ int
pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{ {
struct pvo_head *pvo_head; struct pvo_head *pvo_head;
struct mem_region *mp;
struct vm_page *pg; struct vm_page *pg;
ptel_t ptel; ptel_t ptel;
int error; int error;
@ -1704,16 +1726,16 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* available memory array. * available memory array.
*/ */
ptel = SH5_PTEL_CB_DEVICE; ptel = SH5_PTEL_CB_DEVICE;
if ((flags & PMAP_NC) == 0) {
struct mem_region *mp;
for (mp = mem; mp->mr_size; mp++) { for (mp = mem; mp->mr_size; mp++) {
if (pa >= mp->mr_start && if (pa >= mp->mr_start &&
pa < (mp->mr_start + mp->mr_size)) { pa < (mp->mr_start + mp->mr_size)) {
if ((flags & PMAP_NC) == 0)
ptel = SH5_PTEL_CB_WRITEBACK; ptel = SH5_PTEL_CB_WRITEBACK;
else
ptel = SH5_PTEL_CB_NOCACHE;
break; break;
} }
} }
}
/* Pages are always readable */ /* Pages are always readable */
ptel |= SH5_PTEL_PR_R; ptel |= SH5_PTEL_PR_R;