Simplify the bus_dma(9) implementation:

- the `alignment' and `boundary' arguments in bus_dmamem_alloc() only
  serve as hints on how to break up a DMA transaction into chunks
  when necessary; it is not necessary on sparc DVMA hardware.

- exclusively use the VM page size (i.e. PAGE_SIZE et.al.) when computing
  resource allocations.

- remember the size of DVMA resource allocations in the private `_ds_sgsize'
  field in a dma segment descriptor.
This commit is contained in:
pk 2000-05-23 11:39:57 +00:00
parent e459e7617b
commit 1273031c4d
3 changed files with 47 additions and 73 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus.h,v 1.18 2000/05/09 22:39:35 pk Exp $ */
/* $NetBSD: bus.h,v 1.19 2000/05/23 11:39:57 pk Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -933,10 +933,9 @@ typedef struct sparc_bus_dmamap *bus_dmamap_t;
struct sparc_bus_dma_segment {
bus_addr_t ds_addr; /* DVMA address */
bus_size_t ds_len; /* length of transfer */
bus_size_t _ds_sgsize; /* size of allocated DVMA segment */
void *_ds_mlist; /* page list when dmamem_alloc'ed */
vaddr_t _ds_va; /* VA when dmamem_map'ed */
bus_size_t _ds_alignment; /* dmamem_alloc() alignment */
bus_size_t _ds_boundary; /* dmamem_alloc() boundary */
};
typedef struct sparc_bus_dma_segment bus_dma_segment_t;

View File

@ -1,4 +1,4 @@
/* $NetBSD: iommu.c,v 1.40 2000/05/10 11:17:50 pk Exp $ */
/* $NetBSD: iommu.c,v 1.41 2000/05/23 11:39:58 pk Exp $ */
/*
* Copyright (c) 1996
@ -105,7 +105,7 @@ int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
int nsegs, size_t size, caddr_t *kvap, int flags));
int iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
int nsegs, int off, int prot, int flags));
int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, bus_size_t, int,
int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, int,
bus_addr_t *, bus_size_t *);
@ -436,14 +436,13 @@ if ((int)sc->sc_dvmacur + len > 0)
/*
* Common routine to allocate space in the IOMMU map.
* Internal routine to allocate space in the IOMMU map.
*/
int
iommu_dvma_alloc(map, va, len, boundary, flags, dvap, sgsizep)
iommu_dvma_alloc(map, va, len, flags, dvap, sgsizep)
bus_dmamap_t map;
vaddr_t va;
bus_size_t len;
bus_size_t boundary;
int flags;
bus_addr_t *dvap;
bus_size_t *sgsizep;
@ -452,21 +451,20 @@ iommu_dvma_alloc(map, va, len, boundary, flags, dvap, sgsizep)
u_long align, voff;
u_long ex_start, ex_end;
int s, error;
int pagesz = PAGE_SIZE;
/*
* Remember page offset, then truncate the buffer address to
* a page boundary.
*/
voff = va & PGOFSET;
va &= ~PGOFSET;
voff = va & (pagesz - 1);
va &= -pagesz;
if (len > map->_dm_size)
return (EINVAL);
sgsize = (len + voff + PGOFSET) & ~PGOFSET;
align = dvma_cachealign ? dvma_cachealign : NBPG;
if (boundary == 0)
boundary = map->_dm_boundary;
sgsize = (len + voff + pagesz - 1) & -pagesz;
align = dvma_cachealign ? dvma_cachealign : pagesz;
s = splhigh();
@ -480,7 +478,8 @@ iommu_dvma_alloc(map, va, len, boundary, flags, dvap, sgsizep)
}
error = extent_alloc_subregion1(iommu_dvmamap,
ex_start, ex_end,
sgsize, align, va & (align-1), boundary,
sgsize, align, va & (align-1),
map->_dm_boundary,
(flags & BUS_DMA_NOWAIT) == 0
? EX_WAITOK : EX_NOWAIT,
(u_long *)dvap);
@ -505,6 +504,7 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
bus_size_t sgsize;
bus_addr_t dva;
vaddr_t va = (vaddr_t)buf;
int pagesz = PAGE_SIZE;
pmap_t pmap;
int error;
@ -514,7 +514,7 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
map->dm_nsegs = 0;
/* Allocate IOMMU resources */
if ((error = iommu_dvma_alloc(map, va, buflen, 0, flags,
if ((error = iommu_dvma_alloc(map, va, buflen, flags,
&dva, &sgsize)) != 0)
return (error);
@ -525,8 +525,9 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
*/
map->dm_mapsize = buflen;
map->dm_nsegs = 1;
map->dm_segs[0].ds_addr = dva + (va & PGOFSET);
map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
map->dm_segs[0].ds_len = buflen;
map->dm_segs[0]._ds_sgsize = sgsize;
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
@ -542,9 +543,9 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
iommu_enter(dva, pa);
dva += PAGE_SIZE;
va += PAGE_SIZE;
sgsize -= PAGE_SIZE;
dva += pagesz;
va += pagesz;
sgsize -= pagesz;
}
return (0);
@ -561,7 +562,7 @@ iommu_dmamap_load_mbuf(t, map, m, flags)
int flags;
{
panic("_bus_dmamap_load: not implemented");
panic("_bus_dmamap_load_mbuf: not implemented");
}
/*
@ -601,17 +602,8 @@ iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
map->dm_nsegs = 0;
#ifdef DIAGNOSTIC
/* XXX - unhelpful since we can't reset these in map_unload() */
if (segs[0].ds_addr != 0 || segs[0].ds_len != 0)
panic("iommu_dmamap_load_raw: segment already loaded: "
"addr 0x%lx, size 0x%lx",
segs[0].ds_addr, segs[0].ds_len);
#endif
/* Allocate IOMMU resources */
if ((error = iommu_dvma_alloc(map, segs[0]._ds_va, size,
segs[0]._ds_boundary,
flags, &dva, &sgsize)) != 0)
return (error);
@ -625,6 +617,7 @@ iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
map->dm_segs[0].ds_addr = dva;
map->dm_segs[0].ds_len = size;
map->dm_segs[0]._ds_sgsize = sgsize;
/* Map physical pages into IOMMU */
mlist = segs[0]._ds_mlist;
@ -658,10 +651,8 @@ iommu_dmamap_unload(t, map)
int i, s, error;
for (i = 0; i < nsegs; i++) {
dva = segs[i].ds_addr;
len = segs[i].ds_len;
len = ((dva & PGOFSET) + len + PGOFSET) & ~PGOFSET;
dva &= ~PGOFSET;
dva = segs[i].ds_addr & -PAGE_SIZE;
len = segs[i]._ds_sgsize;
iommu_remove(dva, len);
s = splhigh();

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.164 2000/05/18 10:10:55 pk Exp $ */
/* $NetBSD: machdep.c,v 1.165 2000/05/23 11:39:58 pk Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -1222,8 +1222,6 @@ _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
segs[0].ds_addr = 0;
segs[0].ds_len = 0;
segs[0]._ds_va = 0;
segs[0]._ds_alignment = (alignment == 0) ? NBPG : round_page(alignment);
segs[0]._ds_boundary = boundary;
*rsegs = 1;
return (0);
}
@ -1261,7 +1259,7 @@ _bus_dmamem_unmap(t, kva, size)
{
#ifdef DIAGNOSTIC
if ((u_long)kva & PGOFSET)
if ((u_long)kva & PAGE_MASK)
panic("_bus_dmamem_unmap");
#endif
@ -1374,6 +1372,7 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
{
bus_size_t sgsize;
vaddr_t va = (vaddr_t)buf;
int pagesz = PAGE_SIZE;
bus_addr_t dva;
pmap_t pmap;
@ -1401,9 +1400,9 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
return (0);
}
sgsize = round_page(buflen + (va & PGOFSET));
sgsize = round_page(buflen + (va & (pagesz - 1)));
if (extent_alloc(dvmamap24, sgsize, NBPG, map->_dm_boundary,
if (extent_alloc(dvmamap24, sgsize, pagesz, map->_dm_boundary,
(flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
(u_long *)&dva) != 0) {
return (ENOMEM);
@ -1413,8 +1412,9 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
* We always use just one segment.
*/
map->dm_mapsize = buflen;
map->dm_segs[0].ds_addr = dva + (va & PGOFSET);
map->dm_segs[0].ds_addr = dva + (va & (pagesz - 1));
map->dm_segs[0].ds_len = buflen;
map->dm_segs[0]._ds_sgsize = sgsize;
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
@ -1431,7 +1431,7 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - (va & PGOFSET);
sgsize = pagesz - (va & (pagesz - 1));
if (buflen < sgsize)
sgsize = buflen;
@ -1442,10 +1442,10 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
#endif
#endif
pmap_enter(pmap_kernel(), dva,
(pa & ~(NBPG-1)) | PMAP_NC,
(pa & -pagesz) | PMAP_NC,
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
dva += PAGE_SIZE;
dva += pagesz;
va += sgsize;
buflen -= sgsize;
}
@ -1471,35 +1471,17 @@ sun4_dmamap_load_raw(t, map, segs, nsegs, size, flags)
paddr_t pa;
bus_addr_t dva;
bus_size_t sgsize;
u_long boundary;
struct pglist *mlist;
int pagesz = PAGE_SIZE;
int error;
map->dm_nsegs = 0;
#ifdef DIAGNOSTIC
/* XXX - unhelpful since we can't reset these in map_unload() */
if (segs[0].ds_addr != 0 || segs[0].ds_len != 0)
panic("sun4_dmamap_load_raw: segment already loaded: "
"addr 0x%lx, size 0x%lx",
segs[0].ds_addr, segs[0].ds_len);
#endif
sgsize = round_page(size);
/*
* A boundary presented to bus_dmamem_alloc() takes precedence
* over boundary in the map.
*/
if ((boundary = segs[0]._ds_boundary) == 0)
boundary = map->_dm_boundary;
sgsize = (size + pagesz - 1) & -pagesz;
/* Allocate DVMA addresses */
if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
error = extent_alloc(dvmamap24, sgsize,
segs[0]._ds_alignment,
boundary,
error = extent_alloc(dvmamap24, sgsize, pagesz,
map->_dm_boundary,
(flags & BUS_DMA_NOWAIT) == 0
? EX_WAITOK : EX_NOWAIT,
(u_long *)&dva);
@ -1507,14 +1489,15 @@ sun4_dmamap_load_raw(t, map, segs, nsegs, size, flags)
return (error);
} else {
/* Any properly aligned virtual address will do */
dva = _bus_dma_valloc_skewed(sgsize, boundary,
segs[0]._ds_alignment, 0);
dva = _bus_dma_valloc_skewed(sgsize, map->_dm_boundary,
pagesz, 0);
if (dva == 0)
return (ENOMEM);
}
map->dm_segs[0].ds_addr = dva;
map->dm_segs[0].ds_len = size;
map->dm_segs[0]._ds_sgsize = sgsize;
/* Map physical pages into IOMMU */
mlist = segs[0]._ds_mlist;
@ -1555,7 +1538,7 @@ sun4_dmamap_unload(t, map)
int flags = map->_dm_flags;
bus_addr_t dva;
bus_size_t len;
int i;
int i, s, error;
if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
/* Nothing to release */
@ -1566,15 +1549,16 @@ sun4_dmamap_unload(t, map)
}
for (i = 0; i < nsegs; i++) {
dva = segs[i].ds_addr;
len = segs[i].ds_len;
len = ((dva & PGOFSET) + len + PGOFSET) & ~PGOFSET;
dva &= ~PGOFSET;
dva = segs[i].ds_addr & -PAGE_SIZE;
len = segs[i]._ds_sgsize;
pmap_remove(pmap_kernel(), dva, dva + len);
if ((flags & BUS_DMA_24BIT) != 0) {
if (extent_free(dvmamap24, dva, len, EX_NOWAIT) != 0)
s = splhigh();
error = extent_free(dvmamap24, dva, len, EX_NOWAIT);
splx(s);
if (error != 0)
printf("warning: %ld of DVMA space lost\n", len);
} else {
uvm_unmap(kernel_map, dva, dva + len);