add a PMAP_NC flag for pmap_kenter_pa() to specific a non-cached mapping.
use this in mbus_dmamem_map() to fix corruption of DMA memory. note that this TLB bit is ignored on some CPUs (PA7100 and probably others of that era), so this doesn't fix the problem in general, but it does work on newer models and will make things easier later.
This commit is contained in:
parent
113854bb28
commit
7e6508e29e
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: mainbus.c,v 1.15 2003/11/24 02:51:35 chs Exp $ */
|
||||
/* $NetBSD: mainbus.c,v 1.16 2004/01/05 02:25:32 chs Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
|
||||
|
@ -70,7 +70,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.15 2003/11/24 02:51:35 chs Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.16 2004/01/05 02:25:32 chs Exp $");
|
||||
|
||||
#undef BTLBDEBUG
|
||||
|
||||
|
@ -849,8 +849,7 @@ mbus_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
|
|||
mapsize = size;
|
||||
off = (bus_size_t)addr & (PAGE_SIZE - 1);
|
||||
addr = (void *) ((caddr_t)addr - off);
|
||||
for(; size > 0; ) {
|
||||
|
||||
while (size > 0) {
|
||||
pa = kvtop(addr);
|
||||
if (pa != pa_next) {
|
||||
if (++seg >= map->_dm_segcnt)
|
||||
|
@ -871,7 +870,6 @@ mbus_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
|
|||
/* Make the map truly valid. */
|
||||
map->dm_nsegs = seg + 1;
|
||||
map->dm_mapsize = mapsize;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1111,11 +1109,10 @@ int
|
|||
mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
|
||||
caddr_t *kvap, int flags)
|
||||
{
|
||||
struct vm_page *m;
|
||||
struct vm_page *pg;
|
||||
struct pglist *pglist;
|
||||
vaddr_t va;
|
||||
struct pglist *mlist;
|
||||
paddr_t pa, pa_next;
|
||||
int seg;
|
||||
paddr_t pa;
|
||||
|
||||
size = round_page(size);
|
||||
|
||||
|
@ -1137,32 +1134,15 @@ mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
|
|||
*kvap = (caddr_t)va;
|
||||
|
||||
/* Map the allocated pages into the chunk. */
|
||||
mlist = segs[0]._ds_mlist;
|
||||
pa_next = 0;
|
||||
seg = -1;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
|
||||
if (size == 0)
|
||||
panic("mbus_dmamem_map: size botch");
|
||||
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
if (pa != pa_next) {
|
||||
if (++seg >= nsegs)
|
||||
panic("mbus_dmamem_map: nsegs botch");
|
||||
}
|
||||
pa_next = pa + PAGE_SIZE;
|
||||
|
||||
pmap_kenter_pa(va, pa,
|
||||
VM_PROT_READ|VM_PROT_WRITE);
|
||||
#if notused
|
||||
pmap_changebit(va, TLB_UNCACHEABLE, 0); /* XXX for now */
|
||||
#endif
|
||||
|
||||
pglist = segs[0]._ds_mlist;
|
||||
TAILQ_FOREACH(pg, pglist, pageq) {
|
||||
KASSERT(size != 0);
|
||||
pa = VM_PAGE_TO_PHYS(pg);
|
||||
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE | PMAP_NC);
|
||||
va += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
pmap_update();
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1407,4 +1387,3 @@ mbsubmatch(struct device *parent, struct cfdata *cf, void *aux)
|
|||
ca->ca_irq = saved_irq;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.11 2003/11/28 19:02:25 chs Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.12 2004/01/05 02:25:32 chs Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
|
||||
|
@ -171,7 +171,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.11 2003/11/28 19:02:25 chs Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.12 2004/01/05 02:25:32 chs Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
@ -1826,6 +1826,7 @@ pmap_is_referenced(struct vm_page *pg)
|
|||
void
|
||||
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
||||
{
|
||||
u_int tlbprot;
|
||||
int s;
|
||||
#ifdef PMAPDEBUG
|
||||
int opmapdebug = pmapdebug;
|
||||
|
@ -1843,12 +1844,14 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
|
|||
PMAP_PRINTF(PDB_KENTER, ("(%p, %p, %x)\n",
|
||||
(caddr_t)va, (caddr_t)pa, prot));
|
||||
va = hppa_trunc_page(va);
|
||||
tlbprot = TLB_WIRED | TLB_UNMANAGED;
|
||||
tlbprot |= (prot & PMAP_NC) ? TLB_UNCACHEABLE : 0;
|
||||
tlbprot |= pmap_prot(pmap_kernel(), prot & VM_PROT_ALL);
|
||||
s = splvm();
|
||||
KASSERT(pmap_pv_find_va(HPPA_SID_KERNEL, va) == NULL);
|
||||
pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, va, pa,
|
||||
pmap_prot(pmap_kernel(), prot) |
|
||||
TLB_WIRED | TLB_UNMANAGED);
|
||||
pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, va, pa, tlbprot);
|
||||
splx(s);
|
||||
|
||||
#ifdef PMAPDEBUG
|
||||
pmapdebug = opmapdebug;
|
||||
#endif /* PMAPDEBUG */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.4 2003/08/31 01:26:37 chs Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.5 2004/01/05 02:25:33 chs Exp $ */
|
||||
|
||||
/* $OpenBSD: pmap.h,v 1.14 2001/05/09 15:31:24 art Exp $ */
|
||||
|
||||
|
@ -96,6 +96,9 @@ struct pmap {
|
|||
extern pmap_t kernel_pmap; /* The kernel's map */
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#define PMAP_NC 0x100
|
||||
|
||||
#define cache_align(x) (((x) + dcache_line_mask) & ~(dcache_line_mask))
|
||||
extern int dcache_line_mask;
|
||||
|
||||
|
@ -142,9 +145,10 @@ pmap_remove_all(struct pmap *pmap)
|
|||
}
|
||||
|
||||
static __inline int
|
||||
pmap_prot(struct pmap *pmap, int prot)
|
||||
pmap_prot(struct pmap *pmap, vm_prot_t prot)
|
||||
{
|
||||
extern u_int kern_prot[], user_prot[];
|
||||
|
||||
return (pmap == kernel_pmap ? kern_prot : user_prot)[prot];
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue