Don't use rmaps; use extent maps.

This commit is contained in:
thorpej 2002-09-25 21:58:39 +00:00
parent 1f35d185e0
commit dc5c7db69d
2 changed files with 58 additions and 64 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: dvma.c,v 1.22 2001/09/05 13:21:09 tsutsui Exp $ */
/* $NetBSD: dvma.c,v 1.23 2002/09/25 21:58:39 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -41,7 +41,7 @@
#include <sys/device.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/map.h>
#include <sys/extent.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/user.h>
@ -62,9 +62,8 @@
/* DVMA is the last 1MB, but the PROM owns the last page. */
#define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
/* Resource map used by dvma_mapin/dvma_mapout */
#define NUM_DVMA_SEGS 10
struct map dvma_segmap[NUM_DVMA_SEGS];
/* Extent map used by dvma_mapin/dvma_mapout */
struct extent *dvma_extent;
/* XXX: Might need to tune this... */
vsize_t dvma_segmap_size = 6 * NBSG;
@ -83,7 +82,7 @@ dvma_init()
* remainder will be used as the DVMA page pool.
*
* Note that no INTRSAFE is needed here because the
* dvma_segmap manages things handled in interrupt
* dvma_extent manages things handled in interrupt
* context.
*/
phys_map = uvm_map_create(pmap_kernel(),
@ -104,8 +103,9 @@ dvma_init()
* Create the VM pool used for mapping whole segments
* into DVMA space for the purpose of data transfer.
*/
rminit(dvma_segmap, dvma_segmap_size, segmap_addr,
"dvma_segmap", NUM_DVMA_SEGS);
dvma_extent = extent_create("dvma", segmap_addr,
segmap_addr + (dvma_segmap_size - 1), M_DEVBUF,
NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
}
/*
@ -186,8 +186,7 @@ dvma_mapin(kva, len, canwait)
vaddr_t seg_kva, seg_dma;
vsize_t seg_len, seg_off;
vaddr_t v, x;
int sme;
int s;
int s, sme, error;
/* Get seg-aligned address and length. */
seg_kva = (vaddr_t)kva;
@ -199,35 +198,39 @@ dvma_mapin(kva, len, canwait)
s = splvm();
/* Allocate the DVMA segment(s) */
seg_dma = rmalloc(dvma_segmap, seg_len);
error = extent_alloc(dvma_extent, seg_len, NBSG, 0,
EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma);
if (error) {
splx(s);
return (NULL);
}
#ifdef DIAGNOSTIC
if (seg_dma & SEGOFSET)
panic("dvma_mapin: seg not aligned");
#endif
if (seg_dma != 0) {
/* Duplicate the mappings into DMA space. */
v = seg_kva;
x = seg_dma;
while (seg_len > 0) {
sme = get_segmap(v);
/* Duplicate the mappings into DMA space. */
v = seg_kva;
x = seg_dma;
while (seg_len > 0) {
sme = get_segmap(v);
#ifdef DIAGNOSTIC
if (sme == SEGINV)
panic("dvma_mapin: seg not mapped");
if (sme == SEGINV)
panic("dvma_mapin: seg not mapped");
#endif
#ifdef HAVECACHE
/* flush write-back on old mappings */
if (cache_size)
cache_flush_segment(v);
/* flush write-back on old mappings */
if (cache_size)
cache_flush_segment(v);
#endif
set_segmap_allctx(x, sme);
v += NBSG;
x += NBSG;
seg_len -= NBSG;
}
seg_dma += seg_off;
set_segmap_allctx(x, sme);
v += NBSG;
x += NBSG;
seg_len -= NBSG;
}
seg_dma += seg_off;
splx(s);
return ((caddr_t)seg_dma);
@ -276,6 +279,9 @@ dvma_mapout(dma, len)
v += NBSG;
}
rmfree(dvma_segmap, seg_len, seg_dma);
if (extent_free(dvma_extent, seg_dma, seg_len,
EX_NOWAIT | EX_MALLOCOK))
panic("dvma_mapout: unable to free 0x%lx,0x%lx",
seg_dma, seg_len);
splx(s);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: dvma.c,v 1.23 2001/09/11 20:37:13 chs Exp $ */
/* $NetBSD: dvma.c,v 1.24 2002/09/25 21:58:40 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -80,7 +80,7 @@
#include <sys/device.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/map.h>
#include <sys/extent.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/user.h>
@ -100,28 +100,25 @@
#include <sun3/sun3x/iommu.h>
/*
* Use a resource map to manage DVMA scratch-memory pages.
* Use an extent map to manage DVMA scratch-memory pages.
* Note: SunOS says last three pages are reserved (PROM?)
* Note: need a separate map (sub-map?) for last 1MB for
* use by VME slave interface.
*/
/* Number of slots in dvmamap. */
int dvma_max_segs = btoc(DVMA_MAP_SIZE);
struct map *dvmamap;
struct extent *dvma_extent;
void
dvma_init()
{
/*
* Create the resource map for DVMA pages.
* Create the extent map for DVMA pages.
*/
dvmamap = malloc((sizeof(struct map) * dvma_max_segs),
M_DEVBUF, M_WAITOK);
rminit(dvmamap, btoc(DVMA_MAP_AVAIL), btoc(DVMA_MAP_BASE),
"dvmamap", dvma_max_segs);
dvma_extent = extent_create("dvma", DVMA_MAP_BASE,
DVMA_MAP_BASE + (DVMA_MAP_AVAIL - 1), M_DEVBUF,
NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
/*
* Enable DVMA in the System Enable register.
@ -173,9 +170,9 @@ dvma_mapin(kmem_va, len, canwait)
{
void * dvma_addr;
vaddr_t kva, tva;
int npf, s;
int npf, s, error;
paddr_t pa;
long off, pn;
long off;
boolean_t rv;
kva = (vaddr_t)kmem_va;
@ -196,32 +193,22 @@ dvma_mapin(kmem_va, len, canwait)
len = round_page(len + off); /* Round the buffer length to pages. */
npf = btoc(len); /* Determine the number of pages to be mapped. */
/*
* Try to allocate DVMA space of the appropriate size
* in which to do a transfer.
*/
s = splvm();
for (;;) {
/*
* Try to allocate DVMA space of the appropriate size
* in which to do a transfer.
*/
pn = rmalloc(dvmamap, npf);
if (pn != 0)
break;
if (canwait) {
(void)tsleep(dvmamap, PRIBIO+1, "physio", 0);
continue;
}
splx(s);
return NULL;
}
error = extent_alloc(dvma_extent, len, PAGE_SIZE, 0,
EX_FAST | EX_NOWAIT | (canwait ? EX_WAITSPACE : 0), &tva);
splx(s);
if (error)
return (NULL);
/*
* Tva is the starting page to which the data buffer will be double
* mapped. Dvma_addr is the starting address of the buffer within
* that page and is the return value of the function.
*/
tva = ctob(pn);
dvma_addr = (void *) (tva + off);
for (;npf--; kva += NBPG, tva += NBPG) {
@ -254,8 +241,8 @@ dvma_mapin(kmem_va, len, canwait)
*/
void
dvma_mapout(dvma_addr, len)
void * dvma_addr;
int len;
void *dvma_addr;
int len;
{
u_long kva;
int s, off;
@ -270,8 +257,9 @@ dvma_mapout(dvma_addr, len)
pmap_update(pmap_kernel());
s = splvm();
rmfree(dvmamap, btoc(len), btoc(kva));
wakeup(dvmamap);
if (extent_free(dvma_extent, kva, len, EX_NOWAIT | EX_MALLOCOK))
panic("dvma_mapout: unable to free region: 0x%lx,0x%x",
kva, len);
splx(s);
}