vme4_dmamap_[un]load: ensure enough DVMA space is allocated to cover the

buffer and store that amount in the DMA map to avoid re-computation.
Also add a cache flush as the bus_dmamap_sync() don't do that yet.
This commit is contained in:
pk 2000-06-18 19:30:21 +00:00
parent 5f887c484e
commit d982283952
1 changed files with 35 additions and 30 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vme_machdep.c,v 1.24 2000/06/04 19:15:03 cgd Exp $ */ /* $NetBSD: vme_machdep.c,v 1.25 2000/06/18 19:30:21 pk Exp $ */
/*- /*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -828,59 +828,62 @@ sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
struct proc *p; struct proc *p;
int flags; int flags;
{ {
bus_addr_t dvmaddr; bus_addr_t dva;
bus_size_t sgsize; bus_size_t sgsize;
vaddr_t vaddr; vaddr_t va, voff;
pmap_t pmap; pmap_t pmap;
int pagesz = PAGE_SIZE; int pagesz = PAGE_SIZE;
int error; int error;
error = extent_alloc(vme_dvmamap, round_page(buflen), NBPG, cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */
va = (vaddr_t)buf;
voff = va & (pagesz - 1);
va &= -pagesz;
/*
* Allocate an integral number of pages from DVMA space
* covering the passed buffer.
*/
sgsize = (buflen + voff + pagesz - 1) & -pagesz;
error = extent_alloc(vme_dvmamap, sgsize, pagesz,
map->_dm_boundary, map->_dm_boundary,
(flags & BUS_DMA_NOWAIT) == 0 (flags & BUS_DMA_NOWAIT) == 0
? EX_WAITOK ? EX_WAITOK
: EX_NOWAIT, : EX_NOWAIT,
(u_long *)&dvmaddr); (u_long *)&dva);
if (error != 0) if (error != 0)
return (error); return (error);
vaddr = (vaddr_t)buf;
map->dm_mapsize = buflen; map->dm_mapsize = buflen;
map->dm_nsegs = 1; map->dm_nsegs = 1;
map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET); /* Adjust DVMA address to VME view */
map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE;
map->dm_segs[0].ds_len = buflen; map->dm_segs[0].ds_len = buflen;
map->dm_segs[0]._ds_sgsize = sgsize;
pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap;
for (; buflen > 0; ) { for (; sgsize != 0; ) {
paddr_t pa; paddr_t pa;
/* /*
* Get the physical address for this page. * Get the physical address for this page.
*/ */
(void) pmap_extract(pmap, vaddr, &pa); (void) pmap_extract(pmap, va, &pa);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = pagesz - ((u_long)vaddr & (pagesz - 1));
if (buflen < sgsize)
sgsize = buflen;
#ifdef notyet #ifdef notyet
if (have_iocache) if (have_iocache)
curaddr |= PG_IOC; pa |= PG_IOC;
#endif #endif
pmap_enter(pmap_kernel(), dvmaddr, pmap_enter(pmap_kernel(), dva,
(pa & ~(pagesz-1)) | PMAP_NC, pa | PMAP_NC,
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
dvmaddr += pagesz; dva += pagesz;
vaddr += sgsize; va += pagesz;
buflen -= sgsize; sgsize -= pagesz;
} }
/* Adjust DVMA address to VME view */
map->dm_segs[0].ds_addr -= VME4_DVMA_BASE;
return (0); return (0);
} }
@ -893,20 +896,22 @@ sparc_vme4_dmamap_unload(t, map)
int nsegs = map->dm_nsegs; int nsegs = map->dm_nsegs;
bus_addr_t dva; bus_addr_t dva;
bus_size_t len; bus_size_t len;
int i; int i, s, error;
for (i = 0; i < nsegs; i++) { for (i = 0; i < nsegs; i++) {
/* Go from VME to CPU view */ /* Go from VME to CPU view */
dva = segs[i].ds_addr + VME4_DVMA_BASE; dva = segs[i].ds_addr + VME4_DVMA_BASE;
dva &= -PAGE_SIZE;
dva &= ~PGOFSET; len = segs[i]._ds_sgsize;
len = round_page(segs[i].ds_len);
/* Remove double-mapping in DVMA space */ /* Remove double-mapping in DVMA space */
pmap_remove(pmap_kernel(), dva, dva + len); pmap_remove(pmap_kernel(), dva, dva + len);
/* Release DVMA space */ /* Release DVMA space */
if (extent_free(vme_dvmamap, dva, len, EX_NOWAIT) != 0) s = splhigh();
error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT);
splx(s);
if (error != 0)
printf("warning: %ld of DVMA space lost\n", len); printf("warning: %ld of DVMA space lost\n", len);
} }