The bus_dma(9) implementation now allocates DVMA addresses in
bus_dmamap_load_*() routines only. Note that DMA memory buffers allocated with bus_dmamem_alloc() _must_ be loaded by bus_dmamap_load_raw().
This commit is contained in:
parent
6c97e2bd78
commit
2d12c9e33b
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: vme_machdep.c,v 1.22 2000/01/11 12:59:47 pk Exp $ */
|
||||
/* $NetBSD: vme_machdep.c,v 1.23 2000/05/09 22:39:36 pk Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
|
||||
|
@ -124,12 +124,6 @@ static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
|
|||
static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
|
||||
static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_addr_t, bus_size_t, int));
|
||||
|
||||
static int sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
|
||||
bus_size_t, bus_size_t, bus_dma_segment_t *,
|
||||
int, int *, int));
|
||||
static void sparc_vme4_dmamem_free __P((bus_dma_tag_t,
|
||||
bus_dma_segment_t *, int));
|
||||
#endif
|
||||
|
||||
#if defined(SUN4M)
|
||||
|
@ -141,12 +135,6 @@ static int sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
|
|||
static void sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
|
||||
static void sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_addr_t, bus_size_t, int));
|
||||
|
||||
static int sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
|
||||
bus_size_t, bus_size_t, bus_dma_segment_t *,
|
||||
int, int *, int));
|
||||
static void sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
|
||||
bus_dma_segment_t *, int));
|
||||
#endif
|
||||
|
||||
static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
|
||||
|
@ -223,8 +211,8 @@ struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
|
|||
sparc_vme4_dmamap_unload,
|
||||
sparc_vme4_dmamap_sync,
|
||||
|
||||
sparc_vme4_dmamem_alloc,
|
||||
sparc_vme4_dmamem_free,
|
||||
_bus_dmamem_alloc,
|
||||
_bus_dmamem_free,
|
||||
sparc_vme_dmamem_map,
|
||||
_bus_dmamem_unmap,
|
||||
_bus_dmamem_mmap
|
||||
|
@ -243,8 +231,8 @@ struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
|
|||
sparc_vme4m_dmamap_unload,
|
||||
sparc_vme4m_dmamap_sync,
|
||||
|
||||
sparc_vme4m_dmamem_alloc,
|
||||
sparc_vme4m_dmamem_free,
|
||||
_bus_dmamem_alloc,
|
||||
_bus_dmamem_free,
|
||||
sparc_vme_dmamem_map,
|
||||
_bus_dmamem_unmap,
|
||||
_bus_dmamem_mmap
|
||||
|
@ -888,103 +876,32 @@ sparc_vme4_dmamap_unload(t, map)
|
|||
bus_dma_tag_t t;
|
||||
bus_dmamap_t map;
|
||||
{
|
||||
bus_addr_t addr;
|
||||
bus_dma_segment_t *segs = map->dm_segs;
|
||||
int nsegs = map->dm_nsegs;
|
||||
bus_addr_t dva;
|
||||
bus_size_t len;
|
||||
int i;
|
||||
|
||||
/* Go from VME to CPU view */
|
||||
map->dm_segs[0].ds_addr += VME4_DVMA_BASE;
|
||||
for (i = 0; i < nsegs; i++) {
|
||||
/* Go from VME to CPU view */
|
||||
dva = segs[i].ds_addr + VME4_DVMA_BASE;
|
||||
|
||||
addr = map->dm_segs[0].ds_addr & ~PGOFSET;
|
||||
len = round_page(map->dm_segs[0].ds_len);
|
||||
dva &= ~PGOFSET;
|
||||
len = round_page(segs[i].ds_len);
|
||||
|
||||
/* Remove double-mapping in DVMA space */
|
||||
pmap_remove(pmap_kernel(), addr, addr + len);
|
||||
/* Remove double-mapping in DVMA space */
|
||||
pmap_remove(pmap_kernel(), dva, dva + len);
|
||||
|
||||
/* Release DVMA space */
|
||||
if (extent_free(vme_dvmamap, addr, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
/* Release DVMA space */
|
||||
if (extent_free(vme_dvmamap, dva, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
}
|
||||
|
||||
/* Mark the mappings as invalid. */
|
||||
map->dm_mapsize = 0;
|
||||
map->dm_nsegs = 0;
|
||||
}
|
||||
|
||||
int
|
||||
sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_size_t size, alignment, boundary;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
int *rsegs;
|
||||
int flags;
|
||||
{
|
||||
bus_addr_t dvmaddr;
|
||||
struct pglist *mlist;
|
||||
vm_page_t m;
|
||||
paddr_t pa;
|
||||
int error;
|
||||
|
||||
size = round_page(size);
|
||||
error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
|
||||
segs, nsegs, rsegs, flags);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
if (extent_alloc(vme_dvmamap, size, alignment, boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
|
||||
(u_long *)&dvmaddr) != 0)
|
||||
return (ENOMEM);
|
||||
|
||||
/*
|
||||
* Compute the location, size, and number of segments actually
|
||||
* returned by the VM code.
|
||||
*/
|
||||
segs[0].ds_addr = dvmaddr - VME4_DVMA_BASE;
|
||||
segs[0].ds_len = size;
|
||||
*rsegs = 1;
|
||||
|
||||
/* Map memory into DVMA space */
|
||||
mlist = segs[0]._ds_mlist;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
#ifdef notyet
|
||||
if (have_iocache)
|
||||
pa |= PG_IOC;
|
||||
#endif
|
||||
pmap_enter(pmap_kernel(), dvmaddr, pa | PMAP_NC,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
|
||||
dvmaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
sparc_vme4_dmamem_free(t, segs, nsegs)
|
||||
bus_dma_tag_t t;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
{
|
||||
bus_addr_t addr;
|
||||
bus_size_t len;
|
||||
|
||||
addr = segs[0].ds_addr + VME4_DVMA_BASE;
|
||||
len = round_page(segs[0].ds_len);
|
||||
|
||||
/* Remove DVMA kernel map */
|
||||
pmap_remove(pmap_kernel(), addr, addr + len);
|
||||
|
||||
/* Release DVMA address range */
|
||||
if (extent_free(vme_dvmamap, addr, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
|
||||
/*
|
||||
* Return the list of pages back to the VM system.
|
||||
*/
|
||||
_bus_dmamem_free_common(t, segs, nsegs);
|
||||
}
|
||||
|
||||
void
|
||||
sparc_vme4_dmamap_sync(t, map, offset, len, ops)
|
||||
bus_dma_tag_t t;
|
||||
|
@ -1043,7 +960,7 @@ sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
volatile u_int32_t *ioctags;
|
||||
int error;
|
||||
|
||||
buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
|
||||
buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ;
|
||||
error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
@ -1082,37 +999,6 @@ sparc_vme4m_dmamap_unload(t, map)
|
|||
bus_dmamap_unload(sc->sc_dmatag, map);
|
||||
}
|
||||
|
||||
int
|
||||
sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_size_t size, alignmnt, boundary;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
int *rsegs;
|
||||
int flags;
|
||||
{
|
||||
struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie;
|
||||
int error;
|
||||
|
||||
error = bus_dmamem_alloc(sc->sc_dmatag, size, alignmnt, boundary,
|
||||
segs, nsegs, rsegs, flags);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
sparc_vme4m_dmamem_free(t, segs, nsegs)
|
||||
bus_dma_tag_t t;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
{
|
||||
struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie;
|
||||
|
||||
bus_dmamem_free(sc->sc_dmatag, segs, nsegs);
|
||||
}
|
||||
|
||||
void
|
||||
sparc_vme4m_dmamap_sync(t, map, offset, len, ops)
|
||||
bus_dma_tag_t t;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: bus.h,v 1.17 2000/01/25 22:13:24 drochner Exp $ */
|
||||
/* $NetBSD: bus.h,v 1.18 2000/05/09 22:39:35 pk Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
|
||||
|
@ -906,6 +906,9 @@ bus_space_copy_region_8(t, h1, o1, h2, o2, c)
|
|||
/* For devices that have a 24-bit address space */
|
||||
#define BUS_DMA_24BIT BUS_DMA_BUS1
|
||||
|
||||
/* Internal flag: current DVMA address is equal to the KVA buffer address */
|
||||
#define _BUS_DMA_DIRECTMAP BUS_DMA_BUS2
|
||||
|
||||
/* Forwards needed by prototypes below. */
|
||||
struct mbuf;
|
||||
struct uio;
|
||||
|
@ -930,7 +933,10 @@ typedef struct sparc_bus_dmamap *bus_dmamap_t;
|
|||
struct sparc_bus_dma_segment {
|
||||
bus_addr_t ds_addr; /* DVMA address */
|
||||
bus_size_t ds_len; /* length of transfer */
|
||||
void *_ds_mlist; /* XXX - dmamap_alloc'ed pages */
|
||||
void *_ds_mlist; /* page list when dmamem_alloc'ed */
|
||||
vaddr_t _ds_va; /* VA when dmamem_map'ed */
|
||||
bus_size_t _ds_alignment; /* dmamem_alloc() alignment */
|
||||
bus_size_t _ds_boundary; /* dmamem_alloc() boundary */
|
||||
};
|
||||
typedef struct sparc_bus_dma_segment bus_dma_segment_t;
|
||||
|
||||
|
@ -1012,7 +1018,7 @@ struct sparc_bus_dma_tag {
|
|||
*/
|
||||
struct sparc_bus_dmamap {
|
||||
/*
|
||||
* PRIVATE MEMBERS: not for use my machine-independent code.
|
||||
* PRIVATE MEMBERS: not for use by machine-independent code.
|
||||
*/
|
||||
bus_size_t _dm_size; /* largest DMA transfer mappable */
|
||||
int _dm_segcnt; /* number of segs this map can map */
|
||||
|
@ -1044,10 +1050,10 @@ void _bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
|
|||
void _bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
|
||||
bus_size_t, int));
|
||||
|
||||
int _bus_dmamem_alloc_common __P((bus_dma_tag_t tag, bus_size_t size,
|
||||
int _bus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
|
||||
bus_size_t alignment, bus_size_t boundary,
|
||||
bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
|
||||
void _bus_dmamem_free_common __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
void _bus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs));
|
||||
void _bus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva,
|
||||
size_t size));
|
||||
|
@ -1058,6 +1064,8 @@ int _bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size,
|
|||
bus_size_t alignment, bus_size_t boundary,
|
||||
bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
|
||||
vaddr_t low, vaddr_t high));
|
||||
|
||||
vaddr_t _bus_dma_valloc_skewed(size_t, u_long, u_long, u_long);
|
||||
#endif /* _SPARC_BUS_DMA_PRIVATE */
|
||||
|
||||
#endif /* _SPARC_BUS_H_ */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: iommu.c,v 1.38 2000/01/11 13:01:52 pk Exp $ */
|
||||
/* $NetBSD: iommu.c,v 1.39 2000/05/09 22:39:35 pk Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1996
|
||||
|
@ -90,26 +90,23 @@ struct cfattach iommu_ca = {
|
|||
|
||||
/* IOMMU DMA map functions */
|
||||
int iommu_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
|
||||
bus_size_t, struct proc *, int));
|
||||
bus_size_t, struct proc *, int));
|
||||
int iommu_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
struct mbuf *, int));
|
||||
struct mbuf *, int));
|
||||
int iommu_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
struct uio *, int));
|
||||
struct uio *, int));
|
||||
int iommu_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dma_segment_t *, int, bus_size_t, int));
|
||||
bus_dma_segment_t *, int, bus_size_t, int));
|
||||
void iommu_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
|
||||
void iommu_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
|
||||
bus_size_t, int));
|
||||
bus_size_t, int));
|
||||
|
||||
int iommu_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
|
||||
bus_size_t alignment, bus_size_t boundary,
|
||||
bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags));
|
||||
void iommu_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs));
|
||||
int iommu_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs, size_t size, caddr_t *kvap, int flags));
|
||||
int nsegs, size_t size, caddr_t *kvap, int flags));
|
||||
int iommu_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs, int off, int prot, int flags));
|
||||
int nsegs, int off, int prot, int flags));
|
||||
int iommu_dvma_alloc(bus_dmamap_t, vaddr_t, bus_size_t, bus_size_t, int,
|
||||
bus_addr_t *, bus_size_t *);
|
||||
|
||||
|
||||
struct sparc_bus_dma_tag iommu_dma_tag = {
|
||||
|
@ -123,8 +120,8 @@ struct sparc_bus_dma_tag iommu_dma_tag = {
|
|||
iommu_dmamap_unload,
|
||||
iommu_dmamap_sync,
|
||||
|
||||
iommu_dmamem_alloc,
|
||||
iommu_dmamem_free,
|
||||
_bus_dmamem_alloc,
|
||||
_bus_dmamem_free,
|
||||
iommu_dmamem_map,
|
||||
_bus_dmamem_unmap,
|
||||
iommu_dmamem_mmap
|
||||
|
@ -338,23 +335,25 @@ iommu_attach(parent, self, aux)
|
|||
}
|
||||
|
||||
void
|
||||
iommu_enter(va, pa)
|
||||
bus_addr_t va;
|
||||
iommu_enter(dva, pa)
|
||||
bus_addr_t dva;
|
||||
paddr_t pa;
|
||||
{
|
||||
struct iommu_softc *sc = iommu_sc;
|
||||
int pte;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (va < sc->sc_dvmabase)
|
||||
panic("iommu_enter: va 0x%lx not in DVMA space", (long)va);
|
||||
/* This routine relies on the fact that sc->sc_pagesize == PAGE_SIZE */
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (dva < sc->sc_dvmabase)
|
||||
panic("iommu_enter: dva 0x%lx not in DVMA space", (long)dva);
|
||||
#endif
|
||||
|
||||
pte = atop(pa) << IOPTE_PPNSHFT;
|
||||
pte &= IOPTE_PPN;
|
||||
pte |= IOPTE_V | IOPTE_W | (has_iocache ? IOPTE_C : 0);
|
||||
sc->sc_ptes[atop(va - sc->sc_dvmabase)] = pte;
|
||||
IOMMU_FLUSHPAGE(sc, va);
|
||||
sc->sc_ptes[atop(dva - sc->sc_dvmabase)] = pte;
|
||||
IOMMU_FLUSHPAGE(sc, dva);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -437,24 +436,21 @@ if ((int)sc->sc_dvmacur + len > 0)
|
|||
|
||||
|
||||
/*
|
||||
* IOMMU DMA map functions.
|
||||
* Common routine to allocate space in the IOMMU map.
|
||||
*/
|
||||
int
|
||||
iommu_dmamap_load(t, map, buf, buflen, p, flags)
|
||||
bus_dma_tag_t t;
|
||||
iommu_dvma_alloc(map, va, len, boundary, flags, dvap, sgsizep)
|
||||
bus_dmamap_t map;
|
||||
void *buf;
|
||||
bus_size_t buflen;
|
||||
struct proc *p;
|
||||
vaddr_t va;
|
||||
bus_size_t len;
|
||||
bus_size_t boundary;
|
||||
int flags;
|
||||
bus_addr_t *dvap;
|
||||
bus_size_t *sgsizep;
|
||||
{
|
||||
bus_size_t sgsize;
|
||||
bus_addr_t dva;
|
||||
bus_addr_t boundary;
|
||||
vaddr_t va = (vaddr_t)buf;
|
||||
u_long align, voff;
|
||||
u_long ex_start, ex_end;
|
||||
pmap_t pmap;
|
||||
int s, error;
|
||||
|
||||
/*
|
||||
|
@ -464,17 +460,13 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
voff = va & PGOFSET;
|
||||
va &= ~PGOFSET;
|
||||
|
||||
/*
|
||||
* Make sure that on error condition we return "no valid mappings".
|
||||
*/
|
||||
map->dm_nsegs = 0;
|
||||
|
||||
if (buflen > map->_dm_size)
|
||||
if (len > map->_dm_size)
|
||||
return (EINVAL);
|
||||
|
||||
sgsize = (buflen + voff + PGOFSET) & ~PGOFSET;
|
||||
sgsize = (len + voff + PGOFSET) & ~PGOFSET;
|
||||
align = dvma_cachealign ? dvma_cachealign : NBPG;
|
||||
boundary = map->_dm_boundary;
|
||||
if (boundary == 0)
|
||||
boundary = map->_dm_boundary;
|
||||
|
||||
s = splhigh();
|
||||
|
||||
|
@ -491,20 +483,49 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
sgsize, align, va & (align-1), boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0
|
||||
? EX_WAITOK : EX_NOWAIT,
|
||||
(u_long *)&dva);
|
||||
(u_long *)dvap);
|
||||
splx(s);
|
||||
|
||||
if (error != 0)
|
||||
*sgsizep = sgsize;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* IOMMU DMA map functions.
|
||||
*/
|
||||
int
|
||||
iommu_dmamap_load(t, map, buf, buflen, p, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_dmamap_t map;
|
||||
void *buf;
|
||||
bus_size_t buflen;
|
||||
struct proc *p;
|
||||
int flags;
|
||||
{
|
||||
bus_size_t sgsize;
|
||||
bus_addr_t dva;
|
||||
vaddr_t va = (vaddr_t)buf;
|
||||
pmap_t pmap;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Make sure that on error condition we return "no valid mappings".
|
||||
*/
|
||||
map->dm_nsegs = 0;
|
||||
|
||||
/* Allocate IOMMU resources */
|
||||
if ((error = iommu_dvma_alloc(map, va, buflen, 0, flags,
|
||||
&dva, &sgsize)) != 0)
|
||||
return (error);
|
||||
|
||||
cpuinfo.cache_flush(buf, buflen);
|
||||
cpuinfo.cache_flush(buf, buflen); /* XXX - move to bus_dma_sync? */
|
||||
|
||||
/*
|
||||
* We always use just one segment.
|
||||
*/
|
||||
map->dm_mapsize = buflen;
|
||||
map->dm_nsegs = 1;
|
||||
map->dm_segs[0].ds_addr = dva + voff;
|
||||
map->dm_segs[0].ds_addr = dva + (va & PGOFSET);
|
||||
map->dm_segs[0].ds_len = buflen;
|
||||
|
||||
if (p != NULL)
|
||||
|
@ -521,9 +542,9 @@ iommu_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
|
||||
iommu_enter(dva, pa);
|
||||
|
||||
dva += NBPG;
|
||||
va += NBPG;
|
||||
sgsize -= NBPG;
|
||||
dva += PAGE_SIZE;
|
||||
va += PAGE_SIZE;
|
||||
sgsize -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
@ -570,37 +591,84 @@ iommu_dmamap_load_raw(t, map, segs, nsegs, size, flags)
|
|||
bus_size_t size;
|
||||
int flags;
|
||||
{
|
||||
vm_page_t m;
|
||||
paddr_t pa;
|
||||
bus_addr_t dva;
|
||||
bus_size_t sgsize;
|
||||
struct pglist *mlist;
|
||||
int error;
|
||||
|
||||
panic("_bus_dmamap_load_raw: not implemented");
|
||||
map->dm_nsegs = 0;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
/* XXX - unhelpful since we can't reset these in map_unload() */
|
||||
if (segs[0].ds_addr != 0 || segs[0].ds_len != 0)
|
||||
panic("iommu_dmamap_load_raw: segment already loaded: "
|
||||
"addr 0x%lx, size 0x%lx",
|
||||
segs[0].ds_addr, segs[0].ds_len);
|
||||
#endif
|
||||
|
||||
/* Allocate IOMMU resources */
|
||||
if ((error = iommu_dvma_alloc(map, segs[0]._ds_va, size,
|
||||
segs[0]._ds_boundary,
|
||||
flags, &dva, &sgsize)) != 0)
|
||||
return (error);
|
||||
|
||||
/*
|
||||
* Note DVMA address in case bus_dmamem_map() is called later.
|
||||
* It can then insure cache coherency by choosing a KVA that
|
||||
* is aligned to `ds_addr'.
|
||||
*/
|
||||
segs[0].ds_addr = dva;
|
||||
segs[0].ds_len = size;
|
||||
|
||||
map->dm_segs[0].ds_addr = dva;
|
||||
map->dm_segs[0].ds_len = size;
|
||||
|
||||
/* Map physical pages into IOMMU */
|
||||
mlist = segs[0]._ds_mlist;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
if (sgsize == 0)
|
||||
panic("iommu_dmamap_load_raw: size botch");
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
iommu_enter(dva, pa);
|
||||
dva += PAGE_SIZE;
|
||||
sgsize -= PAGE_SIZE;
|
||||
}
|
||||
|
||||
map->dm_nsegs = 1;
|
||||
map->dm_mapsize = size;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for unloading a DMA map. May be called by
|
||||
* bus-specific DMA map unload functions.
|
||||
* Unload an IOMMU DMA map.
|
||||
*/
|
||||
void
|
||||
iommu_dmamap_unload(t, map)
|
||||
bus_dma_tag_t t;
|
||||
bus_dmamap_t map;
|
||||
{
|
||||
bus_addr_t addr;
|
||||
bus_dma_segment_t *segs = map->dm_segs;
|
||||
int nsegs = map->dm_nsegs;
|
||||
bus_addr_t dva;
|
||||
bus_size_t len;
|
||||
int s, error;
|
||||
int i, s, error;
|
||||
|
||||
if (map->dm_nsegs != 1)
|
||||
panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
|
||||
for (i = 0; i < nsegs; i++) {
|
||||
dva = segs[i].ds_addr;
|
||||
len = segs[i].ds_len;
|
||||
len = ((dva & PGOFSET) + len + PGOFSET) & ~PGOFSET;
|
||||
dva &= ~PGOFSET;
|
||||
|
||||
addr = map->dm_segs[0].ds_addr;
|
||||
len = map->dm_segs[0].ds_len;
|
||||
len = ((addr & PGOFSET) + len + PGOFSET) & ~PGOFSET;
|
||||
addr &= ~PGOFSET;
|
||||
|
||||
iommu_remove(addr, len);
|
||||
s = splhigh();
|
||||
error = extent_free(iommu_dvmamap, addr, len, EX_NOWAIT);
|
||||
splx(s);
|
||||
if (error != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", (long)len);
|
||||
iommu_remove(dva, len);
|
||||
s = splhigh();
|
||||
error = extent_free(iommu_dvmamap, dva, len, EX_NOWAIT);
|
||||
splx(s);
|
||||
if (error != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", (long)len);
|
||||
}
|
||||
|
||||
/* Mark the mappings as invalid. */
|
||||
map->dm_mapsize = 0;
|
||||
|
@ -608,8 +676,7 @@ iommu_dmamap_unload(t, map)
|
|||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA map synchronization. May be called
|
||||
* by bus-specific DMA map synchronization functions.
|
||||
* DMA map synchronization.
|
||||
*/
|
||||
void
|
||||
iommu_dmamap_sync(t, map, offset, len, ops)
|
||||
|
@ -626,106 +693,7 @@ iommu_dmamap_sync(t, map, offset, len, ops)
|
|||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA-safe memory allocation. May be called
|
||||
* by bus-specific DMA memory allocation functions.
|
||||
*/
|
||||
int
|
||||
iommu_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_size_t size, alignment, boundary;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
int *rsegs;
|
||||
int flags;
|
||||
{
|
||||
paddr_t pa;
|
||||
bus_addr_t dva;
|
||||
vm_page_t m;
|
||||
int s, error;
|
||||
u_long ex_start, ex_end;
|
||||
struct pglist *mlist;
|
||||
|
||||
size = round_page(size);
|
||||
error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
|
||||
segs, nsegs, rsegs, flags);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
s = splhigh();
|
||||
|
||||
if ((flags & BUS_DMA_24BIT) != 0) {
|
||||
ex_start = D24_DVMA_BASE;
|
||||
ex_end = D24_DVMA_END;
|
||||
} else {
|
||||
ex_start = iommu_dvmamap->ex_start;
|
||||
ex_end = iommu_dvmamap->ex_end;
|
||||
}
|
||||
|
||||
error = extent_alloc_subregion(iommu_dvmamap,
|
||||
ex_start, ex_end,
|
||||
size, alignment, boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0
|
||||
? EX_WAITOK : EX_NOWAIT,
|
||||
(u_long *)&dva);
|
||||
splx(s);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
/*
|
||||
* Compute the location, size, and number of segments actually
|
||||
* returned by the VM code.
|
||||
*/
|
||||
segs[0].ds_addr = dva;
|
||||
segs[0].ds_len = size;
|
||||
*rsegs = 1;
|
||||
|
||||
mlist = segs[0]._ds_mlist;
|
||||
/* Map memory into DVMA space */
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
iommu_enter(dva, pa);
|
||||
dva += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for freeing DMA-safe memory. May be called by
|
||||
* bus-specific DMA memory free functions.
|
||||
*/
|
||||
void
|
||||
iommu_dmamem_free(t, segs, nsegs)
|
||||
bus_dma_tag_t t;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
{
|
||||
bus_addr_t addr;
|
||||
bus_size_t len;
|
||||
int s, error;
|
||||
|
||||
if (nsegs != 1)
|
||||
panic("bus_dmamem_free: nsegs = %d", nsegs);
|
||||
|
||||
addr = segs[0].ds_addr;
|
||||
len = segs[0].ds_len;
|
||||
|
||||
iommu_remove(addr, len);
|
||||
s = splhigh();
|
||||
error = extent_free(iommu_dvmamap, addr, len, EX_NOWAIT);
|
||||
splx(s);
|
||||
if (error != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", (long)len);
|
||||
/*
|
||||
* Return the list of pages back to the VM system.
|
||||
*/
|
||||
_bus_dmamem_free_common(t, segs, nsegs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for mapping DMA-safe memory. May be called by
|
||||
* bus-specific DMA memory map functions.
|
||||
* Map DMA-safe memory.
|
||||
*/
|
||||
int
|
||||
iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
||||
|
@ -737,11 +705,10 @@ iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
|||
int flags;
|
||||
{
|
||||
vm_page_t m;
|
||||
vaddr_t va, sva;
|
||||
vaddr_t va;
|
||||
bus_addr_t addr;
|
||||
struct pglist *mlist;
|
||||
int cbit;
|
||||
size_t oversize;
|
||||
u_long align;
|
||||
|
||||
if (nsegs != 1)
|
||||
|
@ -753,28 +720,22 @@ iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
|||
size = round_page(size);
|
||||
|
||||
/*
|
||||
* Find a region of kernel virtual addresses that can accomodate
|
||||
* our aligment requirements.
|
||||
* In case the segment has already been loaded by
|
||||
* iommu_dmamap_load_raw(), find a region of kernel virtual
|
||||
* addresses that can accomodate our aligment requirements.
|
||||
*/
|
||||
oversize = size + align - PAGE_SIZE;
|
||||
sva = uvm_km_valloc(kernel_map, oversize);
|
||||
if (sva == 0)
|
||||
va = _bus_dma_valloc_skewed(size, 0, align, segs[0].ds_addr & -align);
|
||||
if (va == 0)
|
||||
return (ENOMEM);
|
||||
|
||||
/* Compute start of aligned region */
|
||||
va = sva;
|
||||
va += ((segs[0].ds_addr & (align - 1)) + align - va) & (align - 1);
|
||||
|
||||
/* Return excess virtual addresses */
|
||||
if (va != sva)
|
||||
(void)uvm_unmap(kernel_map, sva, va);
|
||||
if (va + size != sva + oversize)
|
||||
(void)uvm_unmap(kernel_map, va + size, sva + oversize);
|
||||
|
||||
|
||||
segs[0]._ds_va = va;
|
||||
*kvap = (caddr_t)va;
|
||||
mlist = segs[0]._ds_mlist;
|
||||
|
||||
/*
|
||||
* Map the pages allocated in _bus_dmamem_alloc() to the
|
||||
* kernel virtual address space.
|
||||
*/
|
||||
mlist = segs[0]._ds_mlist;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
|
||||
if (size == 0)
|
||||
|
@ -796,8 +757,7 @@ iommu_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
|||
}
|
||||
|
||||
/*
|
||||
* Common functin for mmap(2)'ing DMA-safe memory. May be called by
|
||||
* bus-specific DMA mmap(2)'ing functions.
|
||||
* mmap(2)'ing DMA-safe memory.
|
||||
*/
|
||||
int
|
||||
iommu_dmamem_mmap(t, segs, nsegs, off, prot, flags)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: machdep.c,v 1.160 2000/01/19 20:05:48 thorpej Exp $ */
|
||||
/* $NetBSD: machdep.c,v 1.161 2000/05/09 22:39:35 pk Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
|
||||
|
@ -1169,8 +1169,12 @@ _bus_dmamap_sync(t, map, offset, len, ops)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA-safe memory allocation. May be called
|
||||
* by bus-specific DMA memory allocation functions.
|
||||
*/
|
||||
int
|
||||
_bus_dmamem_alloc_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
||||
_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_size_t size, alignment, boundary;
|
||||
bus_dma_segment_t *segs;
|
||||
|
@ -1195,8 +1199,8 @@ _bus_dmamem_alloc_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags
|
|||
* Allocate pages from the VM system.
|
||||
*/
|
||||
TAILQ_INIT(mlist);
|
||||
error = uvm_pglistalloc(size, low, high,
|
||||
alignment, boundary, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
|
||||
error = uvm_pglistalloc(size, low, high, 0, 0,
|
||||
mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
|
||||
if (error)
|
||||
return (error);
|
||||
|
||||
|
@ -1208,6 +1212,19 @@ _bus_dmamem_alloc_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags
|
|||
* ARE IN OUR CUSTODY.
|
||||
*/
|
||||
segs[0]._ds_mlist = mlist;
|
||||
|
||||
/*
|
||||
* We now have physical pages, but no DVMA addresses yet. These
|
||||
* will be allocated in bus_dmamap_load*() routines. Hence we
|
||||
* save any alignment and boundary requirements in this dma
|
||||
* segment.
|
||||
*/
|
||||
segs[0].ds_addr = 0;
|
||||
segs[0].ds_len = 0;
|
||||
segs[0]._ds_va = 0;
|
||||
segs[0]._ds_alignment = alignment;
|
||||
segs[0]._ds_boundary = boundary;
|
||||
*rsegs = 1;
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1216,7 +1233,7 @@ _bus_dmamem_alloc_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags
|
|||
* bus-specific DMA memory free functions.
|
||||
*/
|
||||
void
|
||||
_bus_dmamem_free_common(t, segs, nsegs)
|
||||
_bus_dmamem_free(t, segs, nsegs)
|
||||
bus_dma_tag_t t;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
|
@ -1266,16 +1283,69 @@ _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
|
|||
panic("_bus_dmamem_mmap: not implemented");
|
||||
}
|
||||
|
||||
/*
|
||||
* Utility to allocate an aligned kernel virtual address range
|
||||
*/
|
||||
vaddr_t
|
||||
_bus_dma_valloc_skewed(size, boundary, align, skew)
|
||||
size_t size;
|
||||
u_long boundary;
|
||||
u_long align;
|
||||
u_long skew;
|
||||
{
|
||||
size_t oversize;
|
||||
vaddr_t va, sva;
|
||||
|
||||
/*
|
||||
* Find a region of kernel virtual addresses that is aligned
|
||||
* to the given address modulo the requested alignment, i.e.
|
||||
*
|
||||
* (va - skew) == 0 mod align
|
||||
*
|
||||
* The following conditions apply to the arguments:
|
||||
*
|
||||
* - `size' must be a multiple of the VM page size
|
||||
* - `align' must be a power of two
|
||||
* and greater than or equal to the VM page size
|
||||
* - `skew' must be smaller than `align'
|
||||
* - `size' must be smaller than `boundary'
|
||||
*/
|
||||
|
||||
/* XXX - Implement this! */
|
||||
if (boundary)
|
||||
panic("_bus_dma_valloc_skewed: not implemented");
|
||||
|
||||
/*
|
||||
* First, find a region large enough to contain any aligned chunk
|
||||
*/
|
||||
oversize = size + align - PAGE_SIZE;
|
||||
sva = uvm_km_valloc(kernel_map, oversize);
|
||||
if (sva == 0)
|
||||
return (ENOMEM);
|
||||
|
||||
/*
|
||||
* Compute start of aligned region
|
||||
*/
|
||||
va = sva;
|
||||
va += (skew + align - va) & -align;
|
||||
|
||||
/*
|
||||
* Return excess virtual addresses
|
||||
*/
|
||||
if (va != sva)
|
||||
(void)uvm_unmap(kernel_map, sva, va);
|
||||
if (va + size != sva + oversize)
|
||||
(void)uvm_unmap(kernel_map, va + size, sva + oversize);
|
||||
|
||||
return (va);
|
||||
}
|
||||
|
||||
/* sun4/sun4c dma map functions */
|
||||
int sun4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
|
||||
bus_size_t, struct proc *, int));
|
||||
int sun4_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t,
|
||||
bus_dma_segment_t *, int, bus_size_t, int));
|
||||
void sun4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
|
||||
int sun4_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size,
|
||||
bus_size_t alignment, bus_size_t boundary,
|
||||
bus_dma_segment_t *segs, int nsegs, int *rsegs,
|
||||
int flags));
|
||||
void sun4_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs));
|
||||
int sun4_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs,
|
||||
int nsegs, size_t size, caddr_t *kvap,
|
||||
int flags));
|
||||
|
@ -1292,12 +1362,10 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
struct proc *p;
|
||||
int flags;
|
||||
{
|
||||
#if notyet
|
||||
bus_size_t sgsize;
|
||||
caddr_t vaddr = buf;
|
||||
vaddr_t va = (vaddr_t)buf;
|
||||
bus_addr_t dva;
|
||||
pmap_t pmap;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make sure that on error condition we return "no valid mappings".
|
||||
|
@ -1307,24 +1375,23 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
if (buflen > map->_dm_size)
|
||||
return (EINVAL);
|
||||
|
||||
/*
|
||||
* XXX Need to implement "don't dma across this boundry".
|
||||
*/
|
||||
if (map->_dm_boundary != 0)
|
||||
panic("bus_dmamap_load: boundaries not implemented");
|
||||
|
||||
cpuinfo.cache_flush(buf, buflen);
|
||||
|
||||
if (p == NULL && (map->_dm_flags & BUS_DMA_24BIT) == 0) {
|
||||
if ((map->_dm_flags & BUS_DMA_24BIT) == 0) {
|
||||
/*
|
||||
* XXX Need to implement "don't dma across this boundry".
|
||||
*/
|
||||
if (map->_dm_boundary != 0)
|
||||
panic("bus_dmamap_load: boundaries not implemented");
|
||||
map->dm_mapsize = buflen;
|
||||
map->dm_nsegs = 1;
|
||||
map->dm_segs[0].ds_addr = (bus_addr_t)buf;
|
||||
map->dm_segs[0].ds_addr = (bus_addr_t)va;
|
||||
map->dm_segs[0].ds_len = buflen;
|
||||
map->_dm_flags |= _BUS_DMA_DIRECTMAP;
|
||||
return (0);
|
||||
}
|
||||
|
||||
#if notyet
|
||||
sgsize = round_page(buflen + ((int)vaddr & PGOFSET));
|
||||
sgsize = round_page(buflen + (va & PGOFSET));
|
||||
|
||||
if (extent_alloc(dvmamap24, sgsize, NBPG, map->_dm_boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
|
||||
|
@ -1336,10 +1403,8 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
* We always use just one segment.
|
||||
*/
|
||||
map->dm_mapsize = buflen;
|
||||
map->dm_nsegs = 1;
|
||||
map->dm_segs[0].ds_addr = dva + (vaddr & PGOFSET);
|
||||
map->dm_segs[0].ds_addr = dva + (va & PGOFSET);
|
||||
map->dm_segs[0].ds_len = buflen;
|
||||
map->_dm_flags |= BUS_DMA_HASMAP;
|
||||
|
||||
if (p != NULL)
|
||||
pmap = p->p_vmspace->vm_map.pmap;
|
||||
|
@ -1351,12 +1416,12 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
/*
|
||||
* Get the physical address for this page.
|
||||
*/
|
||||
(void) pmap_extract(pmap, (vaddr_t)vaddr, &pa);
|
||||
(void) pmap_extract(pmap, va, &pa);
|
||||
|
||||
/*
|
||||
* Compute the segment size, and adjust counts.
|
||||
*/
|
||||
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
|
||||
sgsize = NBPG - (va & PGOFSET);
|
||||
if (buflen < sgsize)
|
||||
sgsize = buflen;
|
||||
|
||||
|
@ -1367,16 +1432,103 @@ sun4_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
#endif
|
||||
#endif
|
||||
pmap_enter(pmap_kernel(), dva,
|
||||
(pa & ~(NBPG-1))| PMAP_NC,
|
||||
(pa & ~(NBPG-1)) | PMAP_NC,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
|
||||
|
||||
dva += PAGE_SIZE;
|
||||
vaddr += sgsize;
|
||||
va += sgsize;
|
||||
buflen -= sgsize;
|
||||
}
|
||||
#else
|
||||
panic("sun4_dmamap_load: not implemented");
|
||||
|
||||
map->dm_nsegs = 1;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Like _bus_dmamap_load(), but for raw memory allocated with
|
||||
* bus_dmamem_alloc().
|
||||
*/
|
||||
int
|
||||
sun4_dmamap_load_raw(t, map, segs, nsegs, size, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_dmamap_t map;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
bus_size_t size;
|
||||
int flags;
|
||||
{
|
||||
vm_page_t m;
|
||||
paddr_t pa;
|
||||
bus_addr_t dva;
|
||||
bus_size_t sgsize;
|
||||
u_long boundary;
|
||||
struct pglist *mlist;
|
||||
int pagesz = PAGE_SIZE;
|
||||
int error;
|
||||
|
||||
map->dm_nsegs = 0;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
/* XXX - unhelpful since we can't reset these in map_unload() */
|
||||
if (segs[0].ds_addr != 0 || segs[0].ds_len != 0)
|
||||
panic("sun4_dmamap_load_raw: segment already loaded: "
|
||||
"addr 0x%lx, size 0x%lx",
|
||||
segs[0].ds_addr, segs[0].ds_len);
|
||||
#endif
|
||||
|
||||
sgsize = round_page(size);
|
||||
|
||||
/*
|
||||
* A boundary presented to bus_dmamem_alloc() takes precedence
|
||||
* over boundary in the map.
|
||||
*/
|
||||
if ((boundary = segs[0]._ds_boundary) == 0)
|
||||
boundary = map->_dm_boundary;
|
||||
|
||||
/* Allocate DVMA addresses */
|
||||
if ((map->_dm_flags & BUS_DMA_24BIT) != 0) {
|
||||
error = extent_alloc(dvmamap24, sgsize,
|
||||
segs[0]._ds_alignment,
|
||||
boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0
|
||||
? EX_WAITOK : EX_NOWAIT,
|
||||
(u_long *)&dva);
|
||||
if (error)
|
||||
return (error);
|
||||
} else {
|
||||
/* Any properly aligned virtual address will do */
|
||||
dva = _bus_dma_valloc_skewed(sgsize, boundary,
|
||||
segs[0]._ds_alignment, 0);
|
||||
if (dva == 0)
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
map->dm_segs[0].ds_addr = dva;
|
||||
map->dm_segs[0].ds_len = size;
|
||||
|
||||
/* Map physical pages into IOMMU */
|
||||
mlist = segs[0]._ds_mlist;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
if (sgsize == 0)
|
||||
panic("sun4_dmamap_load_raw: size botch");
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
#ifdef notyet
|
||||
#if defined(SUN4)
|
||||
if (have_iocache)
|
||||
pa |= PG_IOC;
|
||||
#endif
|
||||
#endif
|
||||
pmap_enter(pmap_kernel(), dva,
|
||||
(pa & -pagesz) | PMAP_NC,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
|
||||
|
||||
dva += pagesz;
|
||||
sgsize -= pagesz;
|
||||
}
|
||||
|
||||
map->dm_nsegs = 1;
|
||||
map->dm_mapsize = size;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1388,149 +1540,40 @@ sun4_dmamap_unload(t, map)
|
|||
bus_dma_tag_t t;
|
||||
bus_dmamap_t map;
|
||||
{
|
||||
|
||||
if (map->dm_nsegs != 1)
|
||||
panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
|
||||
|
||||
if (1) {
|
||||
map->dm_mapsize = 0;
|
||||
map->dm_nsegs = 0;
|
||||
return;
|
||||
}
|
||||
#if notyet
|
||||
bus_addr_t addr;
|
||||
bus_dma_segment_t *segs = map->dm_segs;
|
||||
int nsegs = map->dm_nsegs;
|
||||
int flags = map->_dm_flags;
|
||||
bus_addr_t dva;
|
||||
bus_size_t len;
|
||||
int i;
|
||||
|
||||
if ((map->_dm_flags & (BUS_DMA_BIT24 | BUS_DMA_HASMAP)) == 0) {
|
||||
if ((flags & _BUS_DMA_DIRECTMAP) != 0) {
|
||||
/* Nothing to release */
|
||||
map->dm_mapsize = 0;
|
||||
map->dm_nsegs = 0;
|
||||
map->_dm_flags &= ~_BUS_DMA_DIRECTMAP;
|
||||
return;
|
||||
}
|
||||
|
||||
addr = map->dm_segs[0].ds_addr & ~PGOFSET;
|
||||
len = map->dm_segs[0].ds_len;
|
||||
for (i = 0; i < nsegs; i++) {
|
||||
dva = segs[i].ds_addr;
|
||||
len = segs[i].ds_len;
|
||||
len = ((dva & PGOFSET) + len + PGOFSET) & ~PGOFSET;
|
||||
dva &= ~PGOFSET;
|
||||
|
||||
pmap_remove(pmap_kernel(), addr, addr + len);
|
||||
pmap_remove(pmap_kernel(), dva, dva + len);
|
||||
|
||||
if (extent_free(dvmamap24, addr, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
if ((flags & BUS_DMA_24BIT) != 0) {
|
||||
if (extent_free(dvmamap24, dva, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
} else {
|
||||
uvm_unmap(kernel_map, dva, dva + len);
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark the mappings as invalid. */
|
||||
map->dm_mapsize = 0;
|
||||
map->dm_nsegs = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for DMA-safe memory allocation. May be called
|
||||
* by bus-specific DMA memory allocation functions.
|
||||
*/
|
||||
int
|
||||
sun4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
|
||||
bus_dma_tag_t t;
|
||||
bus_size_t size, alignment, boundary;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
int *rsegs;
|
||||
int flags;
|
||||
{
|
||||
bus_addr_t dva;
|
||||
vm_page_t m;
|
||||
struct pglist *mlist;
|
||||
int error;
|
||||
|
||||
if ((flags & BUS_DMA_24BIT) == 0) {
|
||||
/* Any memory will do */
|
||||
vaddr_t va;
|
||||
va = uvm_km_kmemalloc(kernel_map, uvm.kernel_object, size,
|
||||
(flags & BUS_DMA_NOWAIT) != 0
|
||||
? UVM_KMF_NOWAIT
|
||||
: 0);
|
||||
if (va == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
kvm_uncache((caddr_t)va, btoc(size));
|
||||
segs[0].ds_addr = (bus_addr_t)va;
|
||||
segs[0].ds_len = size;
|
||||
segs[0]._ds_mlist = NULL;
|
||||
*rsegs = 1;
|
||||
return (0);
|
||||
}
|
||||
|
||||
error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
|
||||
segs, nsegs, rsegs, flags);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
if (extent_alloc(dvmamap24, round_page(size), alignment, boundary,
|
||||
(flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
|
||||
(u_long *)&dva) != 0) {
|
||||
_bus_dmamem_free_common(t, segs, nsegs);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the location, size, and number of segments actually
|
||||
* returned by the VM code.
|
||||
*/
|
||||
segs[0].ds_addr = dva;
|
||||
segs[0].ds_len = size;
|
||||
*rsegs = 1;
|
||||
|
||||
mlist = segs[0]._ds_mlist;
|
||||
|
||||
/* Map memory into DVMA space */
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
paddr_t pa = VM_PAGE_TO_PHYS(m);
|
||||
|
||||
#ifdef notyet
|
||||
#if defined(SUN4)
|
||||
if (have_iocache)
|
||||
pa |= PG_IOC;
|
||||
#endif
|
||||
#endif
|
||||
pmap_enter(pmap_kernel(), (vaddr_t)dva,
|
||||
pa | PMAP_NC,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
|
||||
dva += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Common function for freeing DMA-safe memory. May be called by
|
||||
* bus-specific DMA memory free functions.
|
||||
*/
|
||||
void
|
||||
sun4_dmamem_free(t, segs, nsegs)
|
||||
bus_dma_tag_t t;
|
||||
bus_dma_segment_t *segs;
|
||||
int nsegs;
|
||||
{
|
||||
bus_addr_t addr;
|
||||
bus_size_t len;
|
||||
|
||||
if (segs[0]._ds_mlist == NULL) {
|
||||
vaddr_t kva = (vaddr_t)segs[0].ds_addr;
|
||||
vsize_t size = round_page(segs[0].ds_len);
|
||||
uvm_unmap(kernel_map, kva, kva + size);
|
||||
return;
|
||||
}
|
||||
|
||||
addr = segs[0].ds_addr;
|
||||
len = round_page(segs[0].ds_len);
|
||||
|
||||
if (extent_free(dvmamap24, addr, len, EX_NOWAIT) != 0)
|
||||
printf("warning: %ld of DVMA space lost\n", len);
|
||||
|
||||
pmap_remove(pmap_kernel(), addr, addr + len);
|
||||
|
||||
/*
|
||||
* Return the list of pages back to the VM system.
|
||||
*/
|
||||
_bus_dmamem_free_common(t, segs, nsegs);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1553,20 +1596,16 @@ sun4_dmamem_map(t, segs, nsegs, size, kvap, flags)
|
|||
if (nsegs != 1)
|
||||
panic("sun4_dmamem_map: nsegs = %d", nsegs);
|
||||
|
||||
if (segs[0]._ds_mlist == NULL) {
|
||||
*kvap = (caddr_t)segs[0].ds_addr;
|
||||
return (0);
|
||||
}
|
||||
|
||||
size = round_page(size);
|
||||
|
||||
va = uvm_km_valloc(kernel_map, size);
|
||||
if (va == 0)
|
||||
return (ENOMEM);
|
||||
|
||||
segs[0]._ds_va = va;
|
||||
*kvap = (caddr_t)va;
|
||||
mlist = segs[0]._ds_mlist;
|
||||
|
||||
mlist = segs[0]._ds_mlist;
|
||||
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
|
||||
paddr_t pa;
|
||||
|
||||
|
@ -1593,12 +1632,12 @@ struct sparc_bus_dma_tag mainbus_dma_tag = {
|
|||
sun4_dmamap_load,
|
||||
_bus_dmamap_load_mbuf,
|
||||
_bus_dmamap_load_uio,
|
||||
_bus_dmamap_load_raw,
|
||||
sun4_dmamap_load_raw,
|
||||
sun4_dmamap_unload,
|
||||
_bus_dmamap_sync,
|
||||
|
||||
sun4_dmamem_alloc,
|
||||
sun4_dmamem_free,
|
||||
_bus_dmamem_alloc,
|
||||
_bus_dmamem_free,
|
||||
sun4_dmamem_map,
|
||||
_bus_dmamem_unmap,
|
||||
_bus_dmamem_mmap
|
||||
|
|
Loading…
Reference in New Issue