remove some vestigial FPU and cache code that's not need on ultrasparcs.

misc tidiness.
This commit is contained in:
chs 2002-09-29 04:12:02 +00:00
parent 70d1ddac15
commit 50403f5ce2
8 changed files with 79 additions and 535 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.sparc64,v 1.66 2002/09/06 13:18:43 gehenna Exp $
# $NetBSD: files.sparc64,v 1.67 2002/09/29 04:12:02 chs Exp $
# @(#)files.sparc64 8.1 (Berkeley) 7/19/93
# sparc64-specific configuration info
@ -179,10 +179,7 @@ file arch/sparc/fpu/fpu_mul.c
file arch/sparc/fpu/fpu_sqrt.c
file arch/sparc/fpu/fpu_subr.c
# N.B.: optimizer breaks pmap.c and/or cache.c somehow -- have not
# identified the exact problem yet. NOOPT_C suffices for now.
file arch/sparc64/sparc64/autoconf.c
file arch/sparc64/sparc64/cache.c
file arch/sparc64/sparc64/conf.c
file arch/sparc64/sparc64/emul.c
file arch/sparc64/sparc64/in_cksum.S
@ -200,9 +197,6 @@ file arch/sparc64/sparc64/openprom.c
file arch/sparc64/sparc64/openfirm.c
file arch/sparc64/sparc64/ofw_machdep.c
file arch/sparc64/sparc64/pmap.c
# the following overrides the generic "sys_process.c"
# commented out by deraadt
#file arch/sparc64/sparc64/sys_process.c
file arch/sparc64/sparc64/sys_machdep.c
file arch/sparc64/sparc64/trap.c
file arch/sparc64/sparc64/vm_machdep.c
@ -211,8 +205,6 @@ file arch/sparc64/sparc64/disksubr.c
file arch/sparc64/sparc64/db_interface.c ddb | kgdb
file arch/sparc64/sparc64/db_trace.c ddb
file arch/sparc64/sparc64/db_disasm.c ddb
#file ddb/db_aout.c ddb_aout
#file ddb/db_elf.c ddb
#
# Raster Console support

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.31 2002/05/14 21:21:45 eeh Exp $ */
/* $NetBSD: cpu.h,v 1.32 2002/09/29 04:12:02 chs Exp $ */
/*
* Copyright (c) 1992, 1993
@ -93,22 +93,22 @@
struct cpu_info {
/* Most important fields first */
struct proc *ci_curproc;
struct pcb *ci_cpcb; /* also initial stack */
struct pcb *ci_cpcb;
struct cpu_info *ci_next;
struct proc *ci_fpproc;
int ci_number;
int ci_upaid;
struct schedstate_percpu ci_schedstate; /* scheduler state */
struct schedstate_percpu ci_schedstate;
/* DEBUG/DIAGNOSTIC stuff */
u_long ci_spin_locks; /* # of spin locks held */
u_long ci_simple_locks;/* # of simple locks held */
u_long ci_spin_locks;
u_long ci_simple_locks;
/* Spinning up the CPU */
void (*ci_spinup) __P((void)); /* spinup routine */
void (*ci_spinup) __P((void));
void *ci_initstack;
paddr_t ci_paddr; /* Phys addr of this structure. */
paddr_t ci_paddr;
};
extern struct cpu_info *cpus;
@ -227,7 +227,6 @@ int want_resched; /* resched() was called */
* XXX this must be per-cpu (eventually)
*/
struct proc *fpproc; /* FPU owner */
int foundfpu; /* true => we have an FPU */
/*
* Interrupt handler chains. Interrupt handlers should return 0 for

View File

@ -1,4 +1,4 @@
/* $NetBSD: autoconf.c,v 1.63 2002/09/27 20:36:15 thorpej Exp $ */
/* $NetBSD: autoconf.c,v 1.64 2002/09/29 04:12:02 chs Exp $ */
/*
* Copyright (c) 1996
@ -464,9 +464,6 @@ st_crazymap(n)
void
cpu_configure()
{
#if 0
extern struct user *proc0paddr; /* XXX see below */
#endif
/* build the bootpath */
bootpath_build();
@ -487,15 +484,6 @@ cpu_configure()
/* Enable device interrupts */
setpstate(getpstate()|PSTATE_IE);
#if 0
/*
* XXX Re-zero proc0's user area, to nullify the effect of the
* XXX stack running into it during auto-configuration.
* XXX - should fix stack usage.
*/
bzero(proc0paddr, sizeof(struct user));
#endif
(void)spl0();
}
@ -643,7 +631,6 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
OF_getprop(findroot(), "name", platform_type, sizeof(platform_type));
printf(": %s\n", platform_type);
/*
* Locate and configure the ``early'' devices. These must be
* configured before we can do the rest. For instance, the
@ -651,37 +638,27 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
* If the device cannot be located or configured, panic.
*/
/*
* The rest of this routine is for OBP machines exclusively.
*/
node = findroot();
/* Establish the first component of the boot path */
bootpath_store(1, bootpath);
/* the first early device to be configured is the cpu */
{
/* XXX - what to do on multiprocessor machines? */
for (node = OF_child(node); node; node = OF_peer(node)) {
if (OF_getprop(node, "device_type",
buf, sizeof(buf)) <= 0)
continue;
if (strcmp(buf, "cpu") == 0) {
bzero(&ma, sizeof(ma));
ma.ma_bustag = &mainbus_space_tag;
ma.ma_dmatag = &mainbus_dma_tag;
ma.ma_node = node;
ma.ma_name = "cpu";
config_found(dev, (void *)&ma, mbprint);
break;
}
}
if (node == 0)
panic("None of the CPUs found");
/* first early device to be configured is the cpu */
for (node = OF_child(node); node; node = OF_peer(node)) {
if (OF_getprop(node, "device_type", buf, sizeof(buf)) <= 0)
continue;
if (strcmp(buf, "cpu") != 0)
continue;
bzero(&ma, sizeof(ma));
ma.ma_bustag = &mainbus_space_tag;
ma.ma_dmatag = &mainbus_dma_tag;
ma.ma_node = node;
ma.ma_name = "cpu";
config_found(dev, &ma, mbprint);
break;
}
if (node == 0)
panic("None of the CPUs found");
node = findroot(); /* re-init root node */
@ -701,7 +678,7 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
DPRINTF(ACDB_PROBE, ("Node: %x", node));
if ((OF_getprop(node, "device_type", buf, sizeof(buf)) > 0) &&
strcmp(buf, "cpu") == 0)
strcmp(buf, "cpu") == 0)
continue;
OF_getprop(node, "name", buf, sizeof(buf));
DPRINTF(ACDB_PROBE, (" name %s\n", buf));
@ -717,12 +694,12 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
ma.ma_name = buf;
ma.ma_node = node;
if (OF_getprop(node, "upa-portid", &portid, sizeof(portid)) !=
sizeof(portid))
sizeof(portid))
portid = -1;
ma.ma_upaid = portid;
if (PROM_getprop(node, "reg", sizeof(*ma.ma_reg),
&ma.ma_nreg, (void**)&ma.ma_reg) != 0)
&ma.ma_nreg, (void**)&ma.ma_reg) != 0)
continue;
#ifdef DEBUG
if (autoconf_debug & ACDB_PROBE) {
@ -734,7 +711,7 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
printf(" no reg\n");
}
#endif
rv = PROM_getprop(node, "interrupts", sizeof(*ma.ma_interrupts),
rv = PROM_getprop(node, "interrupts", sizeof(*ma.ma_interrupts),
&ma.ma_ninterrupts, (void**)&ma.ma_interrupts);
if (rv != 0 && rv != ENOENT) {
free(ma.ma_reg, M_DEVBUF);
@ -743,8 +720,7 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
#ifdef DEBUG
if (autoconf_debug & ACDB_PROBE) {
if (ma.ma_interrupts)
printf(" interrupts %08x\n",
*ma.ma_interrupts);
printf(" interrupts %08x\n", *ma.ma_interrupts);
else
printf(" no interrupts\n");
}
@ -760,8 +736,7 @@ extern struct sparc_bus_space_tag mainbus_space_tag;
#ifdef DEBUG
if (autoconf_debug & ACDB_PROBE) {
if (ma.ma_naddress)
printf(" address %08x\n",
*ma.ma_address);
printf(" address %08x\n", *ma.ma_address);
else
printf(" no address\n");
}

View File

@ -1,175 +0,0 @@
/* $NetBSD: cache.c,v 1.5 2000/12/06 01:47:50 mrg Exp $ */
/*
* Copyright (c) 1996
* The President and Fellows of Harvard College. All rights reserved.
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Harvard University.
* This product includes software developed by the University of
* California, Lawrence Berkeley Laboratory.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Aaron Brown and
* Harvard University.
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)cache.c 8.2 (Berkeley) 10/30/93
*
*/
/*
* Cache routines.
*
* UltraSPARC has VIPT D$ and PIPT I$.
*
* TODO:
* - rework range flush
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <machine/ctlreg.h>
#include <machine/pte.h>
#include <sparc64/sparc64/cache.h>
enum vactype vactype;
struct cachestats cachestats;
int cachedebug = 0;
/*
* Enable the cache.
* The prom does this for us.
*/
void
cache_enable()
{
/*
* No point in implementing this unless we have a cache_disable().
* Anyway, sun4u ECC is generated in the E$, so we can't disable that
* and expect to use any RAM.
*/
cacheinfo.c_enabled = 1; /* enable cache flusing */
}
/*
* Flush the given virtual page from the cache.
* (va is the actual address, and must be aligned on a page boundary.)
* To get the E$ we read to each cache line.
*/
int
cache_flush_page(pa)
paddr_t pa;
{
register int i, j, ls;
register char *p;
register int *kp;
#ifdef DEBUG
if (cachedebug)
printf("cache_flush_page %llx\n", (unsigned long long)pa);
if (pa & PGOFSET)
panic("cache_flush_page: asked to flush misaligned pa %llx", (unsigned long long)pa);
#endif
/* Don't flush if not enabled or not probed. */
if (!cacheinfo.c_enabled) return 0;
cachestats.cs_npgflush++;
p = (char *)(u_long)pa;
ls = cacheinfo.c_linesize;
i = NBPG >> cacheinfo.dc_l2linesize;
/* Assume E$ takes care of itself*/
kp = (int *)(u_long)((pa & (cacheinfo.ec_totalsize - 1)) + KERNBASE);
j = 0; /* defeat optimizer? */
for (; --i >= 0; p += ls) {
flush(p); /* Take care of I$. */
j += kp[i]; /* Take care of E$. */
}
return j;
}
/*
* Flush a range of virtual addresses (in the current context).
* The first byte is at (base&~PGOFSET) and the last one is just
* before byte (base+len).
*
* We may need to get more complex if we need to flush E$ because
* the virtual color may not match the physical color. Assume cache
* coherence is handled by H/W.
*/
#define CACHE_FLUSH_MAGIC (cacheinfo.ec_totalsize / NBPG)
int
cache_flush(base, len)
vaddr_t base;
size_t len;
{
int i, j, ls;
vaddr_t baseoff;
char *p;
int *kp;
#ifdef DEBUG
if (cachedebug)
printf("cache_flush %p %x\n", (void *)(u_long)base, (u_int)len);
#endif
/* Don't flush if not enabled or not probed. */
if (!cacheinfo.c_enabled) return 0;
baseoff = (vaddr_t)base & PGOFSET;
i = (baseoff + len + PGOFSET) >> PGSHIFT;
cachestats.cs_nraflush++;
i = min(i,CACHE_FLUSH_MAGIC);
p = (char *)((vaddr_t)base & ~baseoff);
ls = cacheinfo.dc_linesize;
i >>= cacheinfo.dc_l2linesize;
/* Pick right physical color for E$ */
kp = (int *)(((vaddr_t)p & (cacheinfo.ec_totalsize - 1)) + KERNBASE);
j = 0; /* defeat optimizer? */
for (; --i >= 0; p += ls) {
flush(p); /* Take care of I$. */
j += kp[i]; /* Take care of E$. */
}
return j;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.h,v 1.5 2002/09/22 07:19:46 chs Exp $ */
/* $NetBSD: cache.h,v 1.6 2002/09/29 04:12:03 chs Exp $ */
/*
* Copyright (c) 1996
@ -43,106 +43,40 @@
* @(#)cache.h 8.1 (Berkeley) 6/11/93
*/
enum vactype { VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK };
extern enum vactype vactype; /* XXX move into cacheinfo struct */
/*
* Cache tags can be written in control space, and must be set to 0
* (or invalid anyway) before turning on the cache. The tags are
* addressed as an array of 32-bit structures of the form:
*
* struct cache_tag {
* u_int :7, (unused; must be zero)
* ct_cid:3, (context ID)
* ct_w:1, (write flag from PTE)
* ct_s:1, (supervisor flag from PTE)
* ct_v:1, (set => cache entry is valid)
* :3, (unused; must be zero)
* ct_tid:14, (cache tag ID)
* :2; (unused; must be zero)
* };
*
* The SPARCstation 1 cache sees virtual addresses as:
*
* struct cache_va {
* u_int :2, (unused; probably copies of va_tid<13>)
* cva_tid:14, (tag ID)
* cva_line:12, (cache line number)
* cva_byte:4; (byte in cache line)
* };
*
* (The SS2 cache is similar but has half as many lines, each twice as long.)
*
* Note that, because the 12-bit line ID is `wider' than the page offset,
* it is possible to have one page map to two different cache lines.
* This can happen whenever two different physical pages have the same bits
* in the part of the virtual address that overlaps the cache line ID, i.e.,
* bits <15:12>. In order to prevent cache duplication, we have to
* make sure that no one page has more than one virtual address where
* (va1 & 0xf000) != (va2 & 0xf000). (The cache hardware turns off ct_v
* when a cache miss occurs on a write, i.e., if va1 is in the cache and
* va2 is not, and you write to va2, va1 goes out of the cache. If va1
* is in the cache and va2 is not, reading va2 also causes va1 to become
* uncached, and the [same] data is then read from main memory into the
* cache.)
*
* The other alternative, of course, is to disable caching of aliased
* pages. (In a few cases this might be faster anyway, but we do it
* only when forced.)
*
* The Sun4, since it has an 8K pagesize instead of 4K, needs to check
* bits that are one position higher.
*/
/*
* The spitfire has a 16K two-way set associative level-1 I$ and a separate
* 16K level-1 D$. The I$ can be invalidated using the FLUSH instructions,
* so we don't really need to worry about it much. The D$ is 16K write-through
* direct mapped virtually addressed cache with two 16-byte sub-blocks per line.
* The E$ is a 512KB-4MB direct mapped physically indexed physically tagged cache.
* Since the level-1 caches are write-through, they don't need flushing and can be
* invalidated directly.
* The spitfire has a 16K two-way set-associative L1 I$ and a separate
* 16K L2 D$. The I$ can be invalidated using the FLUSH instructions,
* so we don't really need to worry about it much. The D$ is a 16K
* write-through, direct mapped virtually-addressed cache with two 16-byte
* sub-blocks per line. The E$ is a 512KB to 4MB direct mapped
* physically-indexed physically-tagged cache. Since the L1 caches
* are write-through, they don't need flushing and can be invalidated directly.
*
* The spitfire sees virtual addresses as:
*
* struct cache_va {
* u_int64_t :22, (unused; we only have 40-bit addresses)
* uint64_t :22, (unused; VAs are only 40 bits)
* cva_tag:28, (tag ID)
* cva_line:9, (cache line number)
* cva_byte:5; (byte within line)
* };
*
* Since there is one bit of overlap between the page offset and the line index,
* all we need to do is make sure that bit 14 of the va remains constant and we have
* no aliasing problems.
* all we need to do is make sure that bit 14 of the va remains constant
* and we have no aliasing problems.
*
* Let me try again. Page size is 8K, cache size is 16K so if (va1&0x3fff != va2&0x3fff)
* we have a problem. Bit 14 *must* be the same for all mappings of a page to be cacheable
* in the D$. (The I$ is 16K 2-way associative--each bank is 8K. No conflict there.)
* Let me try again...
* Page size is 8K, cache size is 16K so if (va1 & 0x3fff != va2 & 0x3fff)
* then we have a problem. Bit 14 *must* be the same for all mappings
* of a page to be cacheable in the D$. (The I$ is 16K 2-way
* set-associative -- each bank is 8K. No conflict there.)
*/
/* Some more well-known values: */
#define CACHE_ALIAS_MASK 0x7fff
#define CACHE_ALIAS_BITS 0x4000
/*
* True iff a1 and a2 are `bad' aliases (will cause cache duplication).
*/
#define BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & CACHE_ALIAS_BITS)
/*
* Routines for dealing with the cache.
*/
void cache_enable __P((void)); /* turn it on */
int cache_flush_page __P((paddr_t)); /* flush page from E$ */
int cache_flush __P((vaddr_t, vsize_t)); /* flush region */
/* The following two are for I$ and D$ flushes and are in locore.s */
/* The following are for I$ and D$ flushes and are in locore.s */
void dcache_flush_page __P((paddr_t)); /* flush page from D$ */
void icache_flush_page __P((paddr_t)); /* flush page from I$ */
void blast_dcache __P((void)); /* Clear entire contents of D$ */
void blast_icache __P((void)); /* Clear entire contents of I$ */
void blast_dcache __P((void)); /* Clear entire D$ */
void blast_icache __P((void)); /* Clear entire I$ */
/* The following flush a range from the D$ and I$ but not E$. */
void cache_flush_virt __P((vaddr_t, vsize_t));
@ -152,14 +86,6 @@ void cache_flush_phys __P((paddr_t, psize_t, int));
* Cache control information.
*/
struct cacheinfo {
int c_totalsize; /* total size, in bytes */
/* if split, MAX(icache,dcache) */
int c_enabled; /* true => cache is enabled */
int c_hwflush; /* true => have hardware flush */
int c_linesize; /* line size, in bytes */
int c_l2linesize; /* log2(linesize) */
int c_physical; /* true => cache is physical */
int c_split; /* true => cache is split */
int ic_totalsize; /* instruction cache */
int ic_enabled;
int ic_linesize;
@ -174,14 +100,3 @@ struct cacheinfo {
int ec_l2linesize;
};
extern struct cacheinfo cacheinfo;
/*
* Cache control statistics.
*/
struct cachestats {
int cs_npgflush; /* # page flushes */
int cs_nraflush; /* # range flushes */
#ifdef notyet
int cs_ra[65]; /* pages/range */
#endif
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.23 2002/09/27 20:36:16 thorpej Exp $ */
/* $NetBSD: cpu.c,v 1.24 2002/09/29 04:12:03 chs Exp $ */
/*
* Copyright (c) 1996
@ -89,13 +89,8 @@ const struct cfattach cpu_ca = {
extern struct cfdriver cpu_cd;
#if defined(SUN4C) || defined(SUN4M)
static char *psrtoname __P((int, int, int, char *));
#endif
static char *fsrtoname __P((int, int, int, char *));
#define IU_IMPL(v) ((((u_int64_t)(v))&VER_IMPL) >> VER_IMPL_SHIFT)
#define IU_VERS(v) ((((u_int64_t)(v))&VER_MASK) >> VER_MASK_SHIFT)
#define IU_IMPL(v) ((((uint64_t)(v)) & VER_IMPL) >> VER_IMPL_SHIFT)
#define IU_VERS(v) ((((uint64_t)(v)) & VER_MASK) >> VER_MASK_SHIFT)
#ifdef notdef
/*
@ -130,31 +125,31 @@ static char *iu_vendor[16] = {
* Initialize the cpuinfo
* Return the TLB entry for the cpuinfo.
*/
u_int64_t
uint64_t
cpu_init(pa, cpu_num)
paddr_t pa;
int cpu_num;
{
struct cpu_info *ci;
u_int64_t pagesize;
u_int64_t pte;
struct vm_page *m;
uint64_t pagesize;
uint64_t pte;
struct vm_page *pg;
psize_t size;
vaddr_t va;
struct pglist mlist;
struct pglist pglist;
int error;
size = NBPG; /* XXXX 8K, 64K, 512K, or 4MB */
if ((error = uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
(paddr_t)size, (paddr_t)0, &mlist, 1, 0)) != 0)
if ((error = uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1,
(paddr_t)size, (paddr_t)0, &pglist, 1, 0)) != 0)
panic("cpu_start: no memory, error %d", error);
va = uvm_km_valloc(kernel_map, size);
if (va == 0)
panic("cpu_start: no memory");
m = TAILQ_FIRST(&mlist);
pa = VM_PAGE_TO_PHYS(m);
pg = TAILQ_FIRST(&pglist);
pa = VM_PAGE_TO_PHYS(pg);
pte = TSB_DATA(0 /* global */,
pagesize,
pa,
@ -166,8 +161,8 @@ cpu_init(pa, cpu_num)
0 /* IE */);
/* Map the pages */
for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
pa = VM_PAGE_TO_PHYS(m);
for (; pg != NULL; pg = TAILQ_NEXT(pg, pageq)) {
pa = VM_PAGE_TO_PHYS(pg);
pmap_zero_page(pa);
pmap_kenter_pa(va, pa | PMAP_NVC, VM_PROT_READ | VM_PROT_WRITE);
va += NBPG;
@ -203,7 +198,6 @@ cpu_init(pa, cpu_num)
return (pte | TLB_L);
}
int
cpu_match(parent, cf, aux)
struct device *parent;
@ -229,45 +223,38 @@ cpu_attach(parent, dev, aux)
int node;
long clk;
int impl, vers, fver;
char *fpuname;
struct mainbus_attach_args *ma = aux;
struct fpstate64 *fpstate;
struct fpstate64 fps[2];
char *sep;
char fpbuf[40];
register int i, l;
u_int64_t ver;
uint64_t ver;
int bigcache, cachesize;
extern u_int64_t cpu_clockrate[];
extern uint64_t cpu_clockrate[];
/* This needs to be 64-bit aligned */
fpstate = ALIGNFPSTATE(&fps[1]);
/*
* Get the FSR and clear any exceptions. If we do not unload
* the queue here and it is left over from a previous crash, we
* will panic in the first loadfpstate(), due to a sequence error,
* so we need to dump the whole state anyway.
*
* If there is no FPU, trap.c will advance over all the stores,
* so we initialize fs_fsr here.
*/
fpstate->fs_fsr = 7 << FSR_VER_SHIFT; /* 7 is reserved for "none" */
savefpstate(fpstate);
fver = (fpstate->fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT);
ver = getver();
impl = IU_IMPL(ver);
vers = IU_VERS(ver);
if (fver != 7) {
foundfpu = 1;
fpuname = fsrtoname(impl, vers, fver, fpbuf);
} else
fpuname = "no";
/* tell them what we have */
node = ma->ma_node;
clk = PROM_getpropint(node, "clock-frequency", 0);
if (clk == 0) {
/*
* Try to find it in the OpenPROM root...
*/
@ -275,18 +262,17 @@ cpu_attach(parent, dev, aux)
}
if (clk) {
cpu_clockrate[0] = clk; /* Tell OS what frequency we run on */
cpu_clockrate[1] = clk/1000000;
cpu_clockrate[1] = clk / 1000000;
}
sprintf(cpu_model, "%s @ %s MHz, %s FPU",
sprintf(cpu_model, "%s @ %s MHz, version %d FPU",
PROM_getpropstring(node, "name"),
clockfreq(clk), fpuname);
clockfreq(clk), fver);
printf(": %s\n", cpu_model);
bigcache = 0;
cacheinfo.c_physical = 1; /* Dunno... */
cacheinfo.c_split = 1;
cacheinfo.ic_linesize = l = PROM_getpropint(node, "icache-line-size", 0);
cacheinfo.ic_linesize = l =
PROM_getpropint(node, "icache-line-size", 0);
for (i = 0; (1 << i) < l && l; i++)
/* void */;
if ((1 << i) != l && l)
@ -344,25 +330,8 @@ cpu_attach(parent, dev, aux)
if (cachesize > bigcache)
bigcache = cachesize;
/*
* XXX - The following will have to do until
* we have per-cpu cache handling.
*/
cacheinfo.c_l2linesize =
min(cacheinfo.ic_l2linesize,
cacheinfo.dc_l2linesize);
cacheinfo.c_linesize =
min(cacheinfo.ic_linesize,
cacheinfo.dc_linesize);
cacheinfo.c_totalsize =
cacheinfo.ic_totalsize +
cacheinfo.dc_totalsize;
if (cacheinfo.c_totalsize == 0)
return;
sep = " ";
printf("%s: physical", dev->dv_xname);
printf("%s:", dev->dv_xname);
if (cacheinfo.ic_totalsize > 0) {
printf("%s%ldK instruction (%ld b/l)", sep,
(long)cacheinfo.ic_totalsize/1024,
@ -381,7 +350,6 @@ cpu_attach(parent, dev, aux)
(long)cacheinfo.ec_linesize);
}
printf("\n");
cache_enable();
/*
* Now that we know the size of the largest cache on this CPU,
@ -389,118 +357,3 @@ cpu_attach(parent, dev, aux)
*/
uvm_page_recolor(atop(bigcache));
}
/*
* The following tables convert <IU impl, IU version, FPU version> triples
* into names for the CPU and FPU chip. In most cases we do not need to
* inspect the FPU version to name the IU chip, but there is one exception
* (for Tsunami), and this makes the tables the same.
*
* The table contents (and much of the structure here) are from Guy Harris.
*
*/
struct info {
u_char valid;
u_char iu_impl;
u_char iu_vers;
u_char fpu_vers;
char *name;
};
#define ANY 0xff /* match any FPU version (or, later, IU version) */
#if defined(SUN4C) || defined(SUN4M)
static struct info iu_types[] = {
{ 1, 0x0, 0x4, 4, "MB86904" },
{ 1, 0x0, 0x0, ANY, "MB86900/1A or L64801" },
{ 1, 0x1, 0x0, ANY, "RT601 or L64811 v1" },
{ 1, 0x1, 0x1, ANY, "RT601 or L64811 v2" },
{ 1, 0x1, 0x3, ANY, "RT611" },
{ 1, 0x1, 0xf, ANY, "RT620" },
{ 1, 0x2, 0x0, ANY, "B5010" },
{ 1, 0x4, 0x0, 0, "TMS390Z50 v0 or TMS390Z55" },
{ 1, 0x4, 0x1, 0, "TMS390Z50 v1" },
{ 1, 0x4, 0x1, 4, "TMS390S10" },
{ 1, 0x5, 0x0, ANY, "MN10501" },
{ 1, 0x9, 0x0, ANY, "W8601/8701 or MB86903" },
{ 0 }
};
static char *
psrtoname(impl, vers, fver, buf)
register int impl, vers, fver;
char *buf;
{
register struct info *p;
for (p = iu_types; p->valid; p++)
if (p->iu_impl == impl && p->iu_vers == vers &&
(p->fpu_vers == fver || p->fpu_vers == ANY))
return (p->name);
/* Not found. */
sprintf(buf, "IU impl 0x%x vers 0x%x", impl, vers);
return (buf);
}
#endif /* SUN4C || SUN4M */
/* NB: table order matters here; specific numbers must appear before ANY. */
static struct info fpu_types[] = {
/*
* Vendor 0, IU Fujitsu0.
*/
{ 1, 0x0, ANY, 0, "MB86910 or WTL1164/5" },
{ 1, 0x0, ANY, 1, "MB86911 or WTL1164/5" },
{ 1, 0x0, ANY, 2, "L64802 or ACT8847" },
{ 1, 0x0, ANY, 3, "WTL3170/2" },
{ 1, 0x0, 4, 4, "on-chip" }, /* Swift */
{ 1, 0x0, ANY, 4, "L64804" },
/*
* Vendor 1, IU ROSS0/1 or Pinnacle.
*/
{ 1, 0x1, 0xf, 0, "on-chip" }, /* Pinnacle */
{ 1, 0x1, ANY, 0, "L64812 or ACT8847" },
{ 1, 0x1, ANY, 1, "L64814" },
{ 1, 0x1, ANY, 2, "TMS390C602A" },
{ 1, 0x1, ANY, 3, "RT602 or WTL3171" },
/*
* Vendor 2, IU BIT0.
*/
{ 1, 0x2, ANY, 0, "B5010 or B5110/20 or B5210" },
/*
* Vendor 4, Texas Instruments.
*/
{ 1, 0x4, ANY, 0, "on-chip" }, /* Viking */
{ 1, 0x4, ANY, 4, "on-chip" }, /* Tsunami */
/*
* Vendor 5, IU Matsushita0.
*/
{ 1, 0x5, ANY, 0, "on-chip" },
/*
* Vendor 9, Weitek.
*/
{ 1, 0x9, ANY, 3, "on-chip" },
{ 0 }
};
static char *
fsrtoname(impl, vers, fver, buf)
register int impl, vers, fver;
char *buf;
{
register struct info *p;
for (p = fpu_types; p->valid; p++)
if (p->iu_impl == impl &&
(p->iu_vers == vers || p->iu_vers == ANY) &&
(p->fpu_vers == fver))
return (p->name);
sprintf(buf, "version %x", fver);
return (buf);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: emul.c,v 1.9 2002/04/18 16:37:26 eeh Exp $ */
/* $NetBSD: emul.c,v 1.10 2002/09/29 04:12:03 chs Exp $ */
/*-
* Copyright (c) 1997, 2001 The NetBSD Foundation, Inc.
@ -43,6 +43,7 @@
#include <machine/instr.h>
#include <machine/cpu.h>
#include <machine/psl.h>
#include <sparc64/sparc64/cache.h>
#define DEBUG_EMUL
#ifdef DEBUG_EMUL
@ -433,8 +434,7 @@ emulinstr(pc, tf)
switch (code.i_op3.i_op3) {
case IOP3_FLUSH:
printf("emulinstr: we can't execute a cache flush???");
/* cpuinfo.cache_flush((caddr_t)(rs1 + rs2), 4); XXX */
blast_icache(); /* XXX overkill */
return 0;
default:

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.83 2002/09/22 07:19:52 chs Exp $ */
/* $NetBSD: trap.c,v 1.84 2002/09/29 04:12:03 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath. All rights reserved.
@ -711,20 +711,6 @@ badtrap:
fs->fs_qsize = 0;
p->p_md.md_fpstate = fs;
}
/*
* If we have not found an FPU, we have to emulate it.
*
* Since All UltraSPARC CPUs have an FPU how can this happen?
*/
if (!foundfpu) {
#ifdef notyet
fpu_emulate(p, tf, fs);
break;
#else
trapsignal(p, SIGFPE, 0); /* XXX code?? */
break;
#endif
}
/*
* We may have more FPEs stored up and/or ops queued.
* If they exist, handle them and get out. Otherwise,
@ -733,7 +719,6 @@ badtrap:
* Ultras should never have a FPU queue.
*/
if (fs->fs_qsize) {
printf("trap: Warning fs_qsize is %d\n",fs->fs_qsize);
fpu_cleanup(p, fs);
break;
@ -744,7 +729,7 @@ badtrap:
loadfpstate(fs);
fpproc = p; /* now we do have it */
}
tf->tf_tstate |= (PSTATE_PEF<<TSTATE_PSTATE_SHIFT);
tf->tf_tstate |= (PSTATE_PEF << TSTATE_PSTATE_SHIFT);
break;
}