add gemini pseudo-nic and support code.

This commit is contained in:
cliff 2008-12-06 05:22:39 +00:00
parent 8586c9700b
commit 41bfa2c41d
10 changed files with 1357 additions and 18 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.gemini,v 1.8 2008/11/26 05:25:27 matt Exp $
# $NetBSD: files.gemini,v 1.9 2008/12/06 05:22:39 cliff Exp $
#
# Configuration info for GEMINI CPU support
# Based on omap/files.omap2
@ -47,8 +47,13 @@ device geminiipi {}
attach geminiipi at obio
file arch/arm/gemini/gemini_ipi.c geminiipi needs-flag
# Gemini inter-processor-messages
device geminiipm {}
attach geminiipm at geminiipi
file arch/arm/gemini/gemini_ipm.c geminiipm needs-flag
device gpn: ifnet, ether, arp
attach gpn at geminiipi
attach gpn at geminiipm
file arch/arm/gemini/if_gpn.c gpn needs-flag
# GEMINI GPIO controllers

View File

@ -3,11 +3,11 @@
# error IPI needs GEMINI_MASTER or GEMINI_SLAVE
#endif
#include "locators.h"
#include "gpn.h"
#include "geminiipm.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gemini_ipi.c,v 1.3 2008/11/26 05:30:50 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: gemini_ipi.c,v 1.4 2008/12/06 05:22:39 cliff Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -88,8 +88,8 @@ gemini_ipi_attach(struct device *parent, struct device *self, void *aux)
aprint_normal("\n");
aprint_naive("\n");
#if NGNP > 0
config_found(self, "gpn", NULL);
#if NGEMINIIPM > 0
config_found(self, __UNCONST("geminiipm"), NULL);
#endif
}

View File

@ -0,0 +1,340 @@
#include "opt_gemini.h"
#if !defined(GEMINI_MASTER) && !defined(GEMINI_SLAVE)
# error IPI needs GEMINI_MASTER or GEMINI_SLAVE
#endif
#include "locators.h"
#include "gpn.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gemini_ipm.c,v 1.1 2008/12/06 05:22:39 cliff Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/intr.h>
#include <sys/malloc.h>
#include <arm/cpufunc.h>
#include <arch/arm/gemini/gemini_obiovar.h>
#include <arch/arm/gemini/gemini_ipivar.h>
#include <arch/arm/gemini/gemini_ipm.h>
#include <arch/arm/gemini/gemini_ipmvar.h>
#include <evbarm/gemini/gemini.h>
// #define IPMDEBUG
#if defined IPMDEBUG
static int ipmdebug;
# define DPRINTFN(n, x) do { if ((n) >= ipmdebug) printf x ; } while (1)
#else
# define DPRINTFN(n, x)
#endif
typedef struct dispatch_entry {
unsigned int ipl;
size_t quota;
void *arg;
void (*consume)(void *, const void *);
void (*counter)(void *, size_t);
#ifdef NOTYET
void *sih; /* softint handle */
#endif
} ipm_dispatch_entry_t;
typedef struct gemini_ipm_softc {
device_t sc_dev;
void *sc_ih;
ipm_queue_t *sc_rxqueue;
ipm_queue_t *sc_txqueue;
size_t sc_txqavail; /* quota available */
unsigned long long sc_rxcount;
unsigned long long sc_txcount;
ipm_dispatch_entry_t sc_dispatch_tab[256];
} gemini_ipm_softc_t;
static int gemini_ipm_match(struct device *, struct cfdata *, void *);
static void gemini_ipm_attach(struct device *, struct device *, void *);
static int gemini_ipm_intr(void *);
static void gemini_ipm_count_txdone(gemini_ipm_softc_t *);
CFATTACH_DECL_NEW(geminiipm, sizeof(struct gemini_ipm_softc),
gemini_ipm_match, gemini_ipm_attach, NULL, NULL);
gemini_ipm_softc_t *gemini_ipm_sc = NULL;
/*
* copy from shared queue to private copy
* SW coherency would go here if desc_src were in cached mem
*/
static inline void
gemini_ipm_desc_read(ipm_desc_t *desc_dst, const ipm_desc_t *desc_src)
{
extern void gpn_print_gd(const void *); /* XXX DEBUG */
DPRINTFN(2, ("%s: %p %p\n", __FUNCTION__, desc_dst, desc_src));
#ifdef IPMDEBUG
if (ipmdebug >= 3)
gpn_print_gd(desc_src);
#endif
*desc_dst = *desc_src;
KASSERT(desc_dst->tag != IPM_TAG_NONE);
}
/*
* copy from private copy to shared queue
* SW coherency would go here if desc_dst were in cached mem
*/
static inline void
gemini_ipm_desc_write(ipm_desc_t *desc_dst, const ipm_desc_t *desc_src)
{
extern void gpn_print_gd(const void *); /* XXX DEBUG */
DPRINTFN(2, ("%s: %p %p\n", __FUNCTION__, desc_dst, desc_src));
#ifdef IPMDEBUG
if (ipmdebug >= 3)
gpn_print_gd(desc_src);
#endif
KASSERT(desc_src->tag != IPM_TAG_NONE);
*desc_dst = *desc_src;
}
static int
gemini_ipm_match(struct device *parent, struct cfdata *cf, void *aux)
{
char *name = aux;
if (strcmp(name, "geminiipm") != 0)
return 0;
return 1;
}
static void
gemini_ipm_attach(struct device *parent, struct device *self, void *aux)
{
gemini_ipm_softc_t *sc = device_private(self);
void *ih;
sc->sc_dev = self;
ih = ipi_intr_establish(gemini_ipm_intr, sc);
if (ih == NULL)
panic("%s: Cannot establish IPI interrupt\n",
device_xname(self));
sc->sc_ih = ih;
memset(&sc->sc_dispatch_tab, 0, sizeof(sc->sc_dispatch_tab));
/*
* queues are flipped tx/rx for mater/slave
*/
KASSERT(GEMINI_IPMQ_SIZE == (2 * sizeof(ipm_queue_t)));
#if defined(GEMINI_MASTER)
sc->sc_rxqueue = (ipm_queue_t *)GEMINI_IPMQ_VBASE;
sc->sc_txqueue = sc->sc_rxqueue + 1;
memset(sc->sc_rxqueue, 0, sizeof(ipm_queue_t));
memset(sc->sc_txqueue, 0, sizeof(ipm_queue_t));
#elif defined(GEMINI_SLAVE)
sc->sc_txqueue = (ipm_queue_t *)GEMINI_IPMQ_VBASE;
sc->sc_rxqueue = sc->sc_txqueue + 1;
#else
# error one of GEMINI_MASTER or GEMINI_SLAVE must be defined
#endif
sc->sc_txqavail = NIPMDESC;
sc->sc_rxcount = 0LL;
sc->sc_txcount = 0LL;
gemini_ipm_sc = sc;
aprint_normal("\n");
aprint_naive("\n");
#if NGPN > 0
config_found(self, __UNCONST("gpn"), NULL);
#endif
}
void *
gemini_ipm_register(uint8_t tag, unsigned int ipl, size_t quota,
void (*consume)(void *, const void *),
void (*counter)(void *, size_t),
void *arg)
{
gemini_ipm_softc_t *sc = gemini_ipm_sc;
ipm_dispatch_entry_t *disp;
void *ipmh = NULL;
int psw;
DPRINTFN(1, ("%s:%d: %d %d %ld %p %p %p\n", __FUNCTION__, __LINE__,
tag, ipl, quota, consume, counter, arg));
if (sc == NULL)
return NULL; /* not attached yet */
if (tag == 0)
return NULL; /* tag #0 is reserved */
psw = disable_interrupts(I32_bit);
disp = &sc->sc_dispatch_tab[tag];
if (disp->consume == 0) {
if (sc->sc_txqavail >= quota) {
sc->sc_txqavail -= quota;
disp->ipl = ipl;
disp->consume = consume;
disp->counter = counter;
disp->arg = arg;
#ifdef NOTYET
if (ipl > SOFTINT_LVLMASK)
panic("%s: bad level %d",
device_xname(sc->sc_dev), ipl);
disp->sih = softint_establish(ipl, consume, arg);
#endif
ipmh = disp;
}
}
restore_interrupts(psw);
return ipmh;
}
void
gemini_ipm_deregister(void *ipmh)
{
gemini_ipm_softc_t *sc = gemini_ipm_sc;
ipm_dispatch_entry_t *disp = ipmh;
int psw;
if (sc == NULL)
return;
psw = disable_interrupts(I32_bit);
memset(disp, 0, sizeof(*disp));
#ifdef NOTYET
softint_disestablish(sc->sih);
#endif
restore_interrupts(psw);
}
static inline int
gemini_ipm_dispatch(gemini_ipm_softc_t *sc)
{
ipm_dispatch_entry_t *disp;
ipm_desc_t desc;
ipmqindex_t ix_read;
ipmqindex_t ix_write;
int rv = 0;
ix_read = sc->sc_rxqueue->ix_read;
ix_write = sc->sc_rxqueue->ix_write;
if (! ipmqisempty(ix_read, ix_write)) {
rv = 1;
do {
gemini_ipm_desc_read(&desc,
&sc->sc_rxqueue->ipm_desc[ix_read]);
ix_read = ipmqnext(ix_read);
KASSERT(desc.tag != IPM_TAG_NONE);
disp = &sc->sc_dispatch_tab[desc.tag];
#ifdef NOTYET
softint_schedule(disp->sih);
#else
(*disp->consume)(disp->arg, &desc);
#endif
ix_write = sc->sc_rxqueue->ix_write;
sc->sc_rxqueue->ix_read = ix_read;
sc->sc_rxcount++;
} while (! ipmqisempty(ix_read, ix_write));
} else {
DPRINTFN(1, ("%s: ipmqisempty %d %d\n",
__FUNCTION__, ix_read, ix_write));
}
return rv;
}
static int
gemini_ipm_intr(void *arg)
{
gemini_ipm_softc_t *sc = arg;
int rv;
rv = gemini_ipm_dispatch(sc);
gemini_ipm_count_txdone(sc);
return rv;
}
int
gemini_ipm_produce(const void *adescp, size_t ndesc)
{
const ipm_desc_t *descp = adescp;
gemini_ipm_softc_t *sc = gemini_ipm_sc;
ipmqindex_t ix_read;
ipmqindex_t ix_write;
KASSERT(ndesc == 1); /* XXX TMP */
DPRINTFN(2, ("%s:%d: %p %ld, tag %d\n",
__FUNCTION__, __LINE__, descp, ndesc, descp->tag));
ix_read = sc->sc_txqueue->ix_read;
ix_write = sc->sc_txqueue->ix_write;
if (ipmqisfull(ix_read, ix_write)) {
/* we expect this to "never" happen; check your quotas */
panic("%s: queue full\n", device_xname(sc->sc_dev));
}
gemini_ipm_desc_write(&sc->sc_txqueue->ipm_desc[ix_write], descp);
sc->sc_txqueue->ix_write = ipmqnext(ix_write);
sc->sc_txcount++;
ipi_send();
gemini_ipm_count_txdone(sc);
return 0;
}
static void *
gemini_ba_to_va(bus_addr_t ba)
{
return (void *)(GEMINI_ALLMEM_VBASE + ba);
}
void
gemini_ipm_copyin(void *dst, bus_addr_t ba, size_t len)
{
void *src;
DPRINTFN(2, ("%s:%d: %p %#lx %ld\n",
__FUNCTION__, __LINE__, dst, ba, len));
src = gemini_ba_to_va(ba);
memcpy(dst, src, len);
cpu_dcache_inv_range((vaddr_t)src, len);
}
static void
gemini_ipm_count_txdone(gemini_ipm_softc_t *sc)
{
ipmqindex_t count = 0; /* XXX must count per tag */
ipm_dispatch_entry_t *disp;
ipmqindex_t ixr = sc->sc_txqueue->ix_read;
uint8_t tag = IPM_TAG_GPN;
static ipmqindex_t oixr = 0;
while (oixr != ixr) {
oixr = ipmqnext(oixr);
count++;
}
if (count != 0) {
disp = &sc->sc_dispatch_tab[tag];
(*disp->counter)(disp->arg, count);
}
}
void gemini_ipm_stats_print(void);
void
gemini_ipm_stats_print(void)
{
gemini_ipm_softc_t *sc = gemini_ipm_sc;
printf("rxcount %lld, txcount %lld\n", sc->sc_rxcount, sc->sc_txcount);
}

View File

@ -0,0 +1,76 @@
/* $NetBSD: gemini_ipm.h,v 1.1 2008/12/06 05:22:39 cliff Exp $ */
#ifndef _GEMINI_IPM_H_
#define _GEMINI_IPM_H_
/*
* generic/non-specific Messages
*/
#define IPM_TAG_NONE 0
#define IPM_TAG_GPN 1
#define IPM_TAG_WDOG 2
#define IPM_NTAGS (IPM_TAG_WDOG + 1) /* bump when you add new ones */
typedef struct ipm_desc {
uint8_t tag;
uint8_t blob[15];
} ipm_desc_t;
/*
* void *gemini_ipm_register(uint8_t tag, unsigned int ipl, size_t quota,
* void (*consume)(void *arg, const void *desc),
* void (*counter)(void *arg, size_t n),
* void *arg);
*
* - register an IPM service, identified by 'tag'
* - callback functions may be dispatched using softint at indicated 'ipl'
* using softint_establish(), softint_disestablish()
* for now they are called directly at IPL_NET
* - reserve 'quota' descriptors; minimum is 1.
* - 'consume' function is called for each message received
* - 'counter' function is called to update count of
* of completed produced (sent) descriptors since last update
* - 'arg' is private to the service
* - return value is an IPM handle ('ipmh') that can be used
* e.g. to de-register
* - if the 'tag' is already in use, or if 'quota' descriptors are not available,
* then NULL is returned.
*/
void *gemini_ipm_register(uint8_t, unsigned int, size_t,
void (*)(void *, const void *),
void (*)(void *, size_t),
void *);
/*
* void gemini_ipm_deregister(void *ipmh);
*
* - tear down a service
* - 'ipmh' is handle returned from priot call to gemini_ipm_register()
*/
void gemini_ipm_deregister(void *);
/*
* void gemini_ipm_produce(const void *desc, unsigned size_t ndesc);
*
* - service produces (sends) 'ndesc' messages described by the array of
* descriptors 'desc'.
* - if not all messages can be sent due to lack of descriptor queue resources,
* then the calling service has exceeded it's quota and the system will panic.
* - after return the descriptors at 'desc' revert to the caller
* caller can recycle or free as he likes.
*/
int gemini_ipm_produce(const void *, size_t);
/*
* void gemini_ipm_copyin(void *dest, bus_addr_t ba, size_t len);
*
* - service copies in (receives) message 'len' bytes of data to be copied
* from bus address 'ba' to virtual address 'dest'
* - this function is meant to be called from the service's registered
* 'consume' callback function
*/
void gemini_ipm_copyin(void *, bus_addr_t, size_t);
#endif /* _GEMINI_IPM_H_ */

View File

@ -0,0 +1,50 @@
/* $NetBSD: gemini_ipmvar.h,v 1.1 2008/12/06 05:22:39 cliff Exp $ */
#ifndef _GEMINI_IPMVAR_H_
#define _GEMINI_IPMVAR_H_
/*
* message queue
*
* - the queue gets located in memory shared between cores
* - is mapped non-cached so SW coherency is not required.
* - be sure ipm_queue_t starts on 32 bit (min) boundary to align descriptors
* - note that indicies are 8 bit and NIPMDESC < (1<<8)
* be sure to adjust typedef if size is increased
* - current sizes, typedef, and padding make sizeof(ipm_queue_t) == 4096
*/
typedef uint32_t ipmqindex_t;
#define NIPMDESC 255
#define IPMQPADSZ (4096 - ((sizeof(ipm_desc_t) * NIPMDESC) + (2 * sizeof(ipmqindex_t))))
typedef struct ipm_queue {
ipm_desc_t ipm_desc[NIPMDESC];
volatile ipmqindex_t ix_write; /* writer increments and inserts here */
volatile ipmqindex_t ix_read; /* reader extracts here and increments */
uint8_t pad[IPMQPADSZ];
} ipm_queue_t;
static inline ipmqindex_t
ipmqnext(ipmqindex_t ix)
{
if (++ix >= NIPMDESC)
ix = 0;
return ix;
}
static inline bool
ipmqisempty(ipmqindex_t ixr, ipmqindex_t ixw)
{
if (ixr == ixw)
return TRUE;
return FALSE;
}
static inline bool
ipmqisfull(ipmqindex_t ixr, ipmqindex_t ixw)
{
if (ipmqnext(ixw) == ixr)
return TRUE;
return FALSE;
}
#endif /* _GEMINI_IPMVAR_H_ */

View File

@ -0,0 +1,742 @@
/* $NetBSD: if_gpn.c,v 1.1 2008/12/06 05:22:39 cliff Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <matt@3am-software.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include "opt_gemini.h"
#include "bpfilter.h"
__KERNEL_RCSID(0, "$NetBSD: if_gpn.c,v 1.1 2008/12/06 05:22:39 cliff Exp $");
#include <sys/param.h>
#include <sys/device.h>
#include <sys/mbuf.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_ether.h>
#include <net/if_dl.h>
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#include <machine/bus.h>
#include <arm/gemini/gemini_var.h>
#include <arm/gemini/gemini_ipm.h>
#define GPN_MOF 0x00 /* Middle Of Frame */
#define GPN_SOF 0x01 /* Start of Frame */
#define GPN_EOF 0x02 /* End of Frame */
#define GPN_FRAME 0x03 /* Complete Frame */
#define GPN_IFUP 0x05 /* partner is up */
#define GPN_IFDOWN 0x06 /* partner is down */
#define GPN_ACK0 0x10 /* placeholder */
#define GPN_ACK1 0x11 /* Ack 1 descriptor */
#define GPN_ACK2 0x12 /* Ack 2 descriptors */
#define GPN_ACK3 0x13 /* Ack 3 descriptors */
#define GPN_ACK4 0x14 /* Ack 4 descriptors */
#define GPN_ACK5 0x15 /* Ack 5 descriptors */
#define GPN_ACK6 0x16 /* Ack 6 descriptors */
#define GPN_ACK7 0x17 /* Ack 7 descriptors */
#define GPN_ACK8 0x18 /* Ack 8 descriptors */
#define GPN_ACK9 0x19 /* Ack 9 descriptors */
#define GPN_ACK10 0x1a /* Ack 10 descriptors */
#define GPN_ACK11 0x1b /* Ack 11 descriptors */
#define GPN_ACK12 0x1c /* Ack 12 descriptors */
#define GPN_ACK13 0x1d /* Ack 13 descriptors */
#define GPN_ACK14 0x1e /* Ack 14 descriptors */
typedef struct {
uint8_t gd_tag;
uint8_t gd_subtype;
uint8_t gd_txid;
uint8_t gd_pktlen64;
uint16_t gd_len1;
uint16_t gd_len2;
uint32_t gd_addr1;
uint32_t gd_addr2;
} ipm_gpn_desc_t;
typedef struct {
uint8_t agd_tag;
uint8_t agd_subtype;
uint8_t agd_txids[14];
} ipm_gpn_ack_desc_t;
#define MAX_TXACTIVE 60
struct gpn_txinfo {
struct mbuf *ti_mbuf;
bus_dmamap_t ti_map;
};
struct gpn_softc {
device_t sc_dev;
bus_dma_tag_t sc_dmat;
struct ifmedia sc_im;
struct ethercom sc_ec;
#define sc_if sc_ec.ec_if
size_t sc_free;
size_t sc_txactive;
void *sc_ih;
ipm_gpn_ack_desc_t sc_ack_desc;
struct mbuf *sc_rxmbuf;
struct gpn_txinfo sc_txinfo[MAX_TXACTIVE];
uint8_t sc_lastid;
bool sc_remoteup; /* remote side up? */
};
CTASSERT((GPN_SOF | GPN_EOF) == GPN_FRAME);
CTASSERT((GPN_SOF & GPN_EOF) == 0);
extern struct cfdriver gpn_cd;
static void gpn_ifstart(struct ifnet *);
#ifdef GPNDEBUG
static uint32_t
m_crc32_le(struct mbuf *m)
{
static const uint32_t crctab[] = {
0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
};
uint32_t crc;
size_t i;
crc = 0xffffffffU; /* initial value */
for (; m; m = m->m_next) {
for (i = 0; i < m->m_len; i++) {
crc ^= m->m_data[i];
crc = (crc >> 4) ^ crctab[crc & 0xf];
crc = (crc >> 4) ^ crctab[crc & 0xf];
}
}
return (crc);
}
#endif
static void
gpn_free_dmamaps(struct gpn_softc *sc)
{
struct gpn_txinfo *ti = sc->sc_txinfo;
struct gpn_txinfo * const end_ti = ti + __arraycount(sc->sc_txinfo);
for (; ti < end_ti; ti++) {
if (ti->ti_map == NULL)
continue;
bus_dmamap_destroy(sc->sc_dmat, ti->ti_map);
ti->ti_map = NULL;
}
}
static int
gpn_alloc_dmamaps(struct gpn_softc *sc)
{
struct gpn_txinfo *ti = sc->sc_txinfo;
struct gpn_txinfo * const end_ti = ti + __arraycount(sc->sc_txinfo);
int error;
for (error = 0; ti < end_ti; ti++) {
if (ti->ti_map != NULL)
continue;
error = bus_dmamap_create(sc->sc_dmat,
10000, 2, 8192, 0,
BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK,
&ti->ti_map);
if (error)
break;
}
if (error)
gpn_free_dmamaps(sc);
return error;
}
static bool
gpn_add_data(struct gpn_softc *sc, bus_addr_t addr, bus_size_t len)
{
struct mbuf *m, *m0;
size_t space;
m = sc->sc_rxmbuf;
KASSERT(m != NULL);
m->m_pkthdr.len += len;
while (m->m_next != NULL)
m = m->m_next;
KASSERT(len > 0);
space = M_TRAILINGSPACE(m);
for (;;) {
if (space > 0) {
if (len < space)
space = len;
gemini_ipm_copyin(mtod(m, uint8_t *) + m->m_len, addr,
space);
len -= space;
m->m_len += space;
if (len == 0)
return true;
addr += space;
}
MGET(m0, M_DONTWAIT, MT_DATA);
if (m0 == NULL)
break;
space = MLEN;
if (len > space) {
MCLGET(m0, M_DONTWAIT);
if (m0->m_flags & M_EXT)
space = MCLBYTES;
}
m->m_len = 0;
m->m_next = m0;
m = m0;
}
return false;
}
static void
gpn_ack_txid(struct gpn_softc *sc, unsigned int txid)
{
ipm_gpn_ack_desc_t * const agd = &sc->sc_ack_desc;
agd->agd_txids[agd->agd_subtype] = txid;
if (++agd->agd_subtype == __arraycount(agd->agd_txids)) {
agd->agd_subtype += GPN_ACK0;
sc->sc_free--;
gemini_ipm_produce(agd, 1);
agd->agd_subtype = 0;
}
}
static void
gpn_process_data(struct gpn_softc *sc, const ipm_gpn_desc_t *gd)
{
struct ifnet * const ifp = &sc->sc_if;
size_t pktlen = gd->gd_pktlen64 * 64;
unsigned int subtype = gd->gd_subtype;
bool ok;
if ((subtype & GPN_SOF) == 0 && sc->sc_rxmbuf == NULL) {
ifp->if_ierrors++;
goto out;
}
if ((subtype & GPN_SOF) && sc->sc_rxmbuf != NULL) {
ifp->if_ierrors++;
m_freem(sc->sc_rxmbuf);
sc->sc_rxmbuf = NULL;
}
if (sc->sc_rxmbuf == NULL) {
struct mbuf *m;
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
ifp->if_ierrors++;
goto out;
}
if (pktlen > MHLEN - 2) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
ifp->if_ierrors++;
m_free(m);
goto out;
}
}
m->m_data += 2; /* makes ethernet payload 32bit aligned */
m->m_len = 0;
m->m_pkthdr.len = 0;
sc->sc_rxmbuf = m;
}
ok = gpn_add_data(sc, gd->gd_addr1, gd->gd_len1);
if (ok && gd->gd_addr2 && gd->gd_len2)
ok = gpn_add_data(sc, gd->gd_addr2, gd->gd_len2);
if (!ok) {
ifp->if_ierrors++;
m_freem(sc->sc_rxmbuf);
sc->sc_rxmbuf = NULL;
goto out;
}
if (subtype & GPN_EOF) {
struct mbuf *m;
m = sc->sc_rxmbuf;
sc->sc_rxmbuf = NULL;
m->m_pkthdr.rcvif = ifp;
KASSERT(((m->m_pkthdr.len + 63) >> 6) == gd->gd_pktlen64);
ifp->if_ipackets++;
ifp->if_ibytes += m->m_pkthdr.len;
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp->if_bpf, m);
#endif
#ifdef GPNDEBUG
printf("%s: rx len=%d crc=%#x\n", ifp->if_xname,
m->m_pkthdr.len, m_crc32_le(m));
#endif
(*ifp->if_input)(ifp, m);
}
out:
gpn_ack_txid(sc, gd->gd_txid);
}
static void
gpn_free_txid(struct gpn_softc *sc, size_t txid)
{
struct gpn_txinfo * const ti = sc->sc_txinfo + txid;
KASSERT(txid < MAX_TXACTIVE);
if (ti->ti_mbuf == NULL)
return;
bus_dmamap_sync(sc->sc_dmat, ti->ti_map,
0, ti->ti_mbuf->m_len, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, ti->ti_map);
m_freem(ti->ti_mbuf);
ti->ti_mbuf = NULL;
sc->sc_txactive--;
KASSERT(sc->sc_txactive < MAX_TXACTIVE);
if (sc->sc_if.if_flags & IFF_OACTIVE) {
sc->sc_if.if_flags &= ~IFF_OACTIVE;
gpn_ifstart(&sc->sc_if);
}
}
static void
gpn_ipm_rebate(void *arg, size_t count)
{
struct gpn_softc * const sc = arg;
int s;
s = splnet();
sc->sc_free += count;
sc->sc_if.if_flags &= ~IFF_OACTIVE;
gpn_ifstart(&sc->sc_if);
splx(s);
}
static void
gpn_ifstart(struct ifnet *ifp)
{
struct gpn_softc * const sc = ifp->if_softc;
for (;;) {
struct mbuf *m, *m0;
ipm_gpn_desc_t gd;
ipm_gpn_desc_t *last_gd;
size_t count;
if (sc->sc_free == 0) {
ifp->if_flags |= IFF_OACTIVE;
break;
}
IF_DEQUEUE(&ifp->if_snd, m);
if (!m)
break;
if ((ifp->if_flags & IFF_UP) == 0) {
m_freem(m);
continue;
}
/*
* Make sure to send any pending acks first.
*/
if (sc->sc_ack_desc.agd_subtype) {
sc->sc_free--;
sc->sc_ack_desc.agd_subtype += GPN_ACK0;
gemini_ipm_produce(&sc->sc_ack_desc, 1);
sc->sc_ack_desc.agd_subtype = 0;
}
/*
* Let's find out how many mbufs we are using.
*/
for (m0 = m, count = 0; m0; m0 = m0->m_next) {
if (m0->m_len == 0)
continue;
count++;
}
/*
* Make sure there is always enough room.
*/
if (sc->sc_free < count
|| sc->sc_txactive + count > MAX_TXACTIVE) {
IF_PREPEND(&ifp->if_snd, m);
ifp->if_flags |= IFF_OACTIVE;
return;
}
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp->if_bpf, m);
#endif
#ifdef GPNDEBUG
printf("%s: tx len=%d crc=%#x\n", ifp->if_xname,
m->m_pkthdr.len, m_crc32_le(m));
#endif
last_gd = NULL;
gd.gd_tag = IPM_TAG_GPN;
gd.gd_subtype = GPN_SOF;
gd.gd_pktlen64 = (m->m_pkthdr.len + 63) >> 6;
for (; m != NULL; m = m0) {
struct gpn_txinfo *ti;
bus_dmamap_t map;
size_t id;
int error;
m0 = m->m_next;
m->m_next = NULL;
if (m->m_len == 0) {
m_free(m);
continue;
}
if (last_gd) {
sc->sc_txactive++;
sc->sc_free--;
gemini_ipm_produce(last_gd, 1);
last_gd = NULL;
gd.gd_subtype = GPN_MOF;
}
for (id = sc->sc_lastid;
sc->sc_txinfo[id].ti_mbuf != NULL;) {
if (++id == __arraycount(sc->sc_txinfo))
id = 0;
}
KASSERT(id < MAX_TXACTIVE);
ti = sc->sc_txinfo + id;
map = ti->ti_map;
error = bus_dmamap_load(sc->sc_dmat, map,
mtod(m, void *), m->m_len, NULL,
BUS_DMA_READ|BUS_DMA_NOWAIT);
if (error) {
ifp->if_oerrors++;
m_freem(m);
break;
}
bus_dmamap_sync(sc->sc_dmat, map, 0,
m->m_len, BUS_DMASYNC_PREREAD);
KASSERT(map->dm_nsegs > 0);
KASSERT(map->dm_nsegs <= 2);
KASSERT(map->dm_segs[0].ds_addr != 0);
gd.gd_len1 = map->dm_segs[0].ds_len;
gd.gd_addr1 = map->dm_segs[0].ds_addr;
if (map->dm_nsegs == 1) {
gd.gd_len2 = 0;
gd.gd_addr2 = 0;
} else {
KASSERT(map->dm_segs[0].ds_addr != 0);
gd.gd_len2 = map->dm_segs[1].ds_len;
gd.gd_addr2 = map->dm_segs[1].ds_addr;
}
gd.gd_txid = id;
ti->ti_mbuf = m;
last_gd = &gd;
ifp->if_obytes += m->m_len;
}
ifp->if_opackets++;
last_gd->gd_subtype |= GPN_EOF;
sc->sc_txactive++;
sc->sc_free--;
gemini_ipm_produce(last_gd, 1);
}
}
static void
gpn_ipm_ifup(struct gpn_softc *sc)
{
sc->sc_remoteup = true;
if (sc->sc_if.if_flags & IFF_UP)
ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX);
}
static void
gpn_ipm_ifdown(struct gpn_softc *sc)
{
struct gpn_txinfo *ti = sc->sc_txinfo;
struct gpn_txinfo * const end_ti = ti + __arraycount(sc->sc_txinfo);
if (sc->sc_rxmbuf) {
m_freem(sc->sc_rxmbuf);
sc->sc_rxmbuf = NULL;
}
IF_PURGE(&sc->sc_if.if_snd);
for (; ti < end_ti; ti++) {
if (ti->ti_mbuf == NULL)
continue;
bus_dmamap_sync(sc->sc_dmat, ti->ti_map,
0, ti->ti_mbuf->m_len, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, ti->ti_map);
m_freem(ti->ti_mbuf);
ti->ti_mbuf = NULL;
}
sc->sc_lastid = 0;
ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_NONE);
sc->sc_remoteup = false;
}
static void
gpn_ipm_handler(void *arg, const void *desc)
{
struct gpn_softc * const sc = arg;
const ipm_gpn_desc_t * const gd = desc;
const ipm_gpn_ack_desc_t * const agd = desc;
int s;
s = splnet();
switch (gd->gd_subtype) {
case GPN_ACK14: gpn_free_txid(sc, agd->agd_txids[13]); /* FALLTHROUGH */
case GPN_ACK13: gpn_free_txid(sc, agd->agd_txids[12]); /* FALLTHROUGH */
case GPN_ACK12: gpn_free_txid(sc, agd->agd_txids[11]); /* FALLTHROUGH */
case GPN_ACK11: gpn_free_txid(sc, agd->agd_txids[10]); /* FALLTHROUGH */
case GPN_ACK10: gpn_free_txid(sc, agd->agd_txids[9]); /* FALLTHROUGH */
case GPN_ACK9: gpn_free_txid(sc, agd->agd_txids[8]); /* FALLTHROUGH */
case GPN_ACK8: gpn_free_txid(sc, agd->agd_txids[7]); /* FALLTHROUGH */
case GPN_ACK7: gpn_free_txid(sc, agd->agd_txids[6]); /* FALLTHROUGH */
case GPN_ACK6: gpn_free_txid(sc, agd->agd_txids[5]); /* FALLTHROUGH */
case GPN_ACK5: gpn_free_txid(sc, agd->agd_txids[4]); /* FALLTHROUGH */
case GPN_ACK4: gpn_free_txid(sc, agd->agd_txids[3]); /* FALLTHROUGH */
case GPN_ACK3: gpn_free_txid(sc, agd->agd_txids[2]); /* FALLTHROUGH */
case GPN_ACK2: gpn_free_txid(sc, agd->agd_txids[1]); /* FALLTHROUGH */
case GPN_ACK1: gpn_free_txid(sc, agd->agd_txids[0]); break;
case GPN_MOF:
case GPN_SOF:
case GPN_FRAME:
case GPN_EOF:
gpn_process_data(sc, gd);
break;
case GPN_IFUP:
gpn_ipm_ifup(sc);
break;
case GPN_IFDOWN:
gpn_ipm_ifdown(sc);
break;
default:
KASSERT(0);
}
splx(s);
}
static int
gpn_ifinit(struct ifnet *ifp)
{
struct gpn_softc * const sc = ifp->if_softc;
ipm_gpn_desc_t gd;
int error;
error = gpn_alloc_dmamaps(sc);
if (error)
return error;
memset(&gd, 0, sizeof(gd));
gd.gd_tag = IPM_TAG_GPN;
gd.gd_subtype = GPN_IFUP;
KASSERT(sc->sc_free > 0);
sc->sc_free--;
gemini_ipm_produce(&gd, 1);
if (sc->sc_remoteup)
ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX);
ifp->if_flags |= IFF_RUNNING;
return error;
}
static void
gpn_ifstop(struct ifnet *ifp, int disable)
{
struct gpn_softc * const sc = ifp->if_softc;
ipm_gpn_desc_t gd;
memset(&gd, 0, sizeof(gd));
gd.gd_tag = IPM_TAG_GPN;
gd.gd_subtype = GPN_IFDOWN;
KASSERT(sc->sc_free > 0);
sc->sc_free--;
gemini_ipm_produce(&gd, 1);
ifp->if_flags &= ~IFF_RUNNING;
gpn_ipm_ifdown(sc);
if (disable) {
gpn_free_dmamaps(sc);
}
}
static int
gpn_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
{
struct gpn_softc * const sc = ifp->if_softc;
struct ifreq * const ifr = data;
struct ifaliasreq * const ifra = data;
int s, error;
s = splnet();
switch (cmd) {
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd);
break;
case SIOCSIFPHYADDR: {
const struct sockaddr_dl *sdl = satosdl(&ifra->ifra_addr);
if (sdl->sdl_family != AF_LINK) {
error = EINVAL;
break;
}
if_set_sadl(ifp, CLLADDR(sdl), ETHER_ADDR_LEN, false);
error = 0;
break;
}
default:
error = ether_ioctl(ifp, cmd, data);
if (error == ENETRESET)
error = 0;
break;
}
splx(s);
return error;
}
static int
gpn_mediachange(struct ifnet *ifp)
{
return 0;
}
static void
gpn_mediastatus(struct ifnet *ifp, struct ifmediareq *imr)
{
struct gpn_softc * const sc = ifp->if_softc;
imr->ifm_active = sc->sc_im.ifm_cur->ifm_media;
}
static int
gpn_match(device_t parent, cfdata_t cf, void *aux)
{
return strcmp(gpn_cd.cd_name, aux) == 0;
}
static void
gpn_attach(device_t parent, device_t self, void *aux)
{
struct gpn_softc * const sc = device_private(self);
struct ifnet * const ifp = &sc->sc_if;
char enaddr[6];
enaddr[0] = 2;
enaddr[1] = 0;
enaddr[2] = 0;
enaddr[3] = 0;
enaddr[4] = 0;
#ifdef GEMINI_MASTER
enaddr[5] = 0;
#elif defined(GEMINI_SLAVE)
enaddr[5] = 1;
#else
#error not master nor slave
#endif
aprint_normal("\n");
aprint_naive("\n");
sc->sc_dev = self;
sc->sc_dmat = &gemini_bus_dma_tag;
/*
* Pretend we are full-duplex gigabit ethernet.
*/
ifmedia_init(&sc->sc_im, 0, gpn_mediachange, gpn_mediastatus);
ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_NONE, 0, NULL);
ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_NONE);
strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = gpn_ifioctl;
ifp->if_start = gpn_ifstart;
ifp->if_init = gpn_ifinit;
ifp->if_stop = gpn_ifstop;
IFQ_SET_READY(&ifp->if_snd);
sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
if_attach(ifp);
ether_ifattach(ifp, enaddr);
sc->sc_free = MAX_TXACTIVE*2;
sc->sc_ih = gemini_ipm_register(IPM_TAG_GPN, IPL_SOFTNET, sc->sc_free,
gpn_ipm_handler, gpn_ipm_rebate, sc);
KASSERT(sc->sc_ih);
sc->sc_ack_desc.agd_tag = IPM_TAG_GPN;
}
void gpn_print_gd(ipm_gpn_desc_t *);
void
gpn_print_gd(ipm_gpn_desc_t *gd)
{
printf("%s: %p\n", __FUNCTION__, gd);
printf("\ttag %d, subtype %d, id %d, pktlen64 %d\n",
gd->gd_tag, gd->gd_subtype, gd->gd_txid, gd->gd_pktlen64);
printf("\tlen1 %d, len2 %d, addr1 %#x, addr2 %#x\n",
gd->gd_len1, gd->gd_len2, gd->gd_addr1, gd->gd_addr2);
}
CFATTACH_DECL_NEW(gpn, sizeof(struct gpn_softc),
gpn_match, gpn_attach, NULL, NULL);

View File

@ -125,7 +125,7 @@ options KTRACE # system call tracing, a la ktrace(1)
options DIAGNOSTIC # internally consistency checks
#options DEBUG
#options PMAP_DEBUG # Enable pmap_debug_level code
#options VERBOSE_INIT_ARM # verbose bootstraping messages
options VERBOSE_INIT_ARM # verbose bootstraping messages
options DDB # in-kernel debugger
options DDB_ONPANIC=1
options DDB_HISTORY_SIZE=100 # Enable history editing in DDB
@ -198,6 +198,12 @@ options GEMINI_TIMER_CLOCK_FREQ=25000000 # 25 MHz
# Gemini Inter-processor-interrupt (IPI)
geminiipi0 at obio? intr 0
# Gemini Inter-processor-messages (IPM)
geminiipm0 at geminiipi0
# Gemini Pseudo NIC (GPN)
gpn0 at geminiipm0
# On-board GPIO controllers
geminigpio0 at obio0 addr 0x4d000000 intrbase 32 intr 22
geminigpio1 at obio0 addr 0x4e000000 intrbase 64 intr 23

View File

@ -49,7 +49,7 @@ file-system PTYFS # /dev/pts/N support
# Networking options
#options GATEWAY # packet forwarding
options GATEWAY # packet forwarding
options INET # IP + ICMP + TCP + UDP
#options INET6 # IPV6
#options IPSEC # IP security
@ -198,6 +198,12 @@ options GEMINI_TIMER_CLOCK_FREQ=25000000 # 25 MHz
# Gemini Inter-processor-interrupt (IPI)
geminiipi0 at obio? intr 0
# Gemini Inter-processor-messages (IPM)
geminiipm0 at geminiipi0
# Gemini Pseudo NIC (GPN)
gpn0 at geminiipm0
# On-board GPIO controllers

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini.h,v 1.7 2008/11/20 07:49:54 cliff Exp $ */
/* $NetBSD: gemini.h,v 1.8 2008/12/06 05:22:39 cliff Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -32,8 +32,6 @@
#ifndef _EVBARM_GEMINI_GEMINI_H
#define _EVBARM_GEMINI_GEMINI_H
#include <arm/gemini/gemini_reg.h>
/*
* sanity check opt_gemini.h
*/
@ -54,6 +52,10 @@
# endif
#endif
#include <machine/vmparam.h>
#include <arm/gemini/gemini_reg.h>
/*
* Kernel VM space: 192MB at KERNEL_VM_BASE
*/
@ -71,10 +73,66 @@
#define GEMINI_LPCIO_VBASE (GEMINI_LPCHC_VBASE + L1_S_SIZE)
#define GEMINI_TIMER_VBASE (GEMINI_LPCIO_VBASE + L1_S_SIZE)
#define GEMINI_DRAMC_VBASE (GEMINI_TIMER_VBASE + L1_S_SIZE)
/*
* mapping of physical RAM
*/
#define GEMINI_RAMDISK_VBASE (GEMINI_DRAMC_VBASE + L1_S_SIZE)
#define GEMINI_RAMDISK_PBASE 0x00800000
#define GEMINI_RAMDISK_SIZE 0x00300000
#define GEMINI_RAMDISK_PEND (GEMINI_RAMDISK_PBASE + GEMINI_RAMDISK_SIZE)
#define GEMINI_IPMQ_VBASE \
((GEMINI_RAMDISK_VBASE + GEMINI_RAMDISK_SIZE \
+ (L1_S_SIZE * 4) - 1) & ~((L1_S_SIZE * 4) - 1))
/* round up for l2pt alignment */
#ifdef GEMINI_SLAVE
# define GEMINI_IPMQ_PBASE (GEMINI_RAMDISK_PEND + (GEMINI_BUSBASE * 1024 * 1024))
#else
# define GEMINI_IPMQ_PBASE GEMINI_RAMDISK_PEND
#endif
#if 0
# define GEMINI_IPMQ_SIZE (2 * sizeof(ipm_queue_t))
#else
# define GEMINI_IPMQ_SIZE (2 * 4096)
#endif
#define GEMINI_IPMQ_PEND (GEMINI_IPMQ_PBASE + GEMINI_IPMQ_SIZE)
/*
* reserve physical RAM, as needed
*
* NOTE: the RAM used for the IPM queues is owned by the MASTER
* so MASTER needs to reserve those pages from VM; the slave does not.
* Hence, GEMINI_RAM_RESV_PEND is adjusted for the MASTER but not the SLAVE.
*/
#define GEMINI_RAM_RESV_PBASE 0
#define GEMINI_RAM_RESV_PEND 0
#if defined(MEMORY_DISK_DYNAMIC)
# undef GEMINI_RAM_RESV_PBASE
# undef GEMINI_RAM_RESV_PEND
# define GEMINI_RAM_RESV_PBASE GEMINI_RAMDISK_PBASE
# define GEMINI_RAM_RESV_PEND GEMINI_RAMDISK_PEND
#endif
#if (NGEMINIIPM > 0) && !defined(GEMINI_SLAVE)
# if (NGEMINIIPM > 1)
# error unexpected NGEMINIIPM > 1
# endif
# if (GEMINI_RAM_RESV_PBASE == 0)
# undef GEMINI_RAM_RESV_PBASE
# define GEMINI_RAM_RESV_PBASE GEMINI_IPMQ_PBASE
# endif
# undef GEMINI_RAM_RESV_PEND
# define GEMINI_RAM_RESV_PEND GEMINI_IPMQ_PEND
#endif
/*
* to map all of memory, including RAM owned by both core.
* we start at nice round vbase to simplify conversion
* from VA to PA and back
*/
#define GEMINI_ALLMEM_PBASE 0
#define GEMINI_ALLMEM_VBASE 0xf0000000
#define GEMINI_ALLMEM_SIZE 128 /* units of MB */
#endif /* _EVBARM_GEMINI_GEMINI_H */

View File

@ -1,4 +1,4 @@
/* $NetBSD: gemini_machdep.c,v 1.10 2008/11/20 23:27:10 cliff Exp $ */
/* $NetBSD: gemini_machdep.c,v 1.11 2008/12/06 05:22:39 cliff Exp $ */
/* adapted from:
* NetBSD: sdp24xx_machdep.c,v 1.4 2008/08/27 11:03:10 matt Exp
@ -129,7 +129,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gemini_machdep.c,v 1.10 2008/11/20 23:27:10 cliff Exp $");
__KERNEL_RCSID(0, "$NetBSD: gemini_machdep.c,v 1.11 2008/12/06 05:22:39 cliff Exp $");
#include "opt_machdep.h"
#include "opt_ddb.h"
@ -139,6 +139,7 @@ __KERNEL_RCSID(0, "$NetBSD: gemini_machdep.c,v 1.10 2008/11/20 23:27:10 cliff Ex
#include "opt_com.h"
#include "opt_gemini.h"
#include "geminiwdt.h"
#include "geminiipm.h"
#include "md.h"
#include <sys/param.h>
@ -251,6 +252,16 @@ extern char _end[];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
#if (NGEMINIIPM > 0)
pv_addr_t ipmq_pt; /* L2 Page table for mapping IPM queues */
#if defined(DEBUG) || 1
unsigned long gemini_ipmq_pbase = GEMINI_IPMQ_PBASE;
unsigned long gemini_ipmq_vbase = GEMINI_IPMQ_VBASE;
#endif /* DEBUG */
#endif /* NGEMINIIPM > 0 */
extern struct user *proc0paddr;
/*
@ -732,12 +743,12 @@ initarm(void *arg)
#endif
uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
#if defined(MEMORY_DISK_DYNAMIC)
uvm_page_physload(atop(physical_freestart), atop(GEMINI_RAMDISK_PBASE),
atop(physical_freestart), atop(GEMINI_RAMDISK_PBASE),
#if (GEMINI_RAM_RESV_PBASE != 0)
uvm_page_physload(atop(physical_freestart), atop(GEMINI_RAM_RESV_PBASE),
atop(physical_freestart), atop(GEMINI_RAM_RESV_PBASE),
VM_FREELIST_DEFAULT);
uvm_page_physload(atop(GEMINI_RAMDISK_PEND), atop(physical_freeend),
atop(GEMINI_RAMDISK_PEND), atop(physical_freeend),
uvm_page_physload(atop(GEMINI_RAM_RESV_PEND), atop(physical_freeend),
atop(GEMINI_RAM_RESV_PEND), atop(physical_freeend),
VM_FREELIST_DEFAULT);
#else
uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
@ -981,6 +992,10 @@ setup_real_page_tables(void)
}
}
#if (NGEMINIIPM > 0)
valloc_pages(ipmq_pt, L2_TABLE_SIZE / PAGE_SIZE);
#endif
#ifdef VERBOSE_INIT_ARM
pt_index=0;
printf("%s: kernel_l1pt: %#lx:%#lx\n",
@ -991,6 +1006,10 @@ setup_real_page_tables(void)
kernel_pt_table[pt_index].pv_pa);
++pt_index;
}
#if (NGEMINIIPM > 0)
printf("%s: ipmq_pt:\n", __func__);
printf("\t%#lx:%#lx\n", ipmq_pt.pv_va, ipmq_pt.pv_pa);
#endif
#endif
/* This should never be able to happen but better confirm that. */
@ -1049,6 +1068,11 @@ setup_real_page_tables(void)
pmap_curmaxkvaddr =
KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);
#if (NGEMINIIPM > 0)
printf("%s:%d: pmap_link_l2pt ipmq_pt\n", __FUNCTION__, __LINE__);
pmap_link_l2pt(l1_va, GEMINI_IPMQ_VBASE, &ipmq_pt);
#endif
#ifdef VERBOSE_INIT_ARM
printf("Mapping kernel\n");
#endif
@ -1109,6 +1133,38 @@ setup_real_page_tables(void)
pmap_map_entry(l1_va, ARM_VECTORS_HIGH, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#if (NGEMINIIPM > 0)
/* Map the IPM queue l2pt */
pmap_map_chunk(l1_va, ipmq_pt.pv_va, ipmq_pt.pv_pa,
L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
/* Map the IPM queue pages */
pmap_map_chunk(l1_va, GEMINI_IPMQ_VBASE, GEMINI_IPMQ_PBASE,
GEMINI_IPMQ_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef GEMINI_SLAVE
/*
* Map all memory, incluuding that owned by other core
* take into account the RAM remap, so view in this region
* is consistent with MASTER
*/
pmap_map_chunk(l1_va,
GEMINI_ALLMEM_VBASE,
GEMINI_ALLMEM_PBASE + ((GEMINI_ALLMEM_SIZE - MEMSIZE) * 1024 * 1024),
(GEMINI_ALLMEM_SIZE - MEMSIZE) * 1024 * 1024,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1_va,
GEMINI_ALLMEM_VBASE + GEMINI_BUSBASE * 1024 * 1024,
GEMINI_ALLMEM_PBASE,
(MEMSIZE * 1024 * 1024),
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#else
/* Map all memory, incluuding that owned by other core */
pmap_map_chunk(l1_va, GEMINI_ALLMEM_VBASE, GEMINI_ALLMEM_PBASE,
GEMINI_ALLMEM_SIZE * 1024 * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif /* GEMINI_SLAVE */
#endif /* NGEMINIIPM */
/*
* Map integrated peripherals at same address in first level page
* table so that we can continue to use console.