checkpoint of MP work from dennis and myself. includes cross-processor

interrupt framework, a sledgehammer TLB invalidation and misc MP fixes.
doesn't work at all yet.
This commit is contained in:
chs 2004-03-14 18:18:54 +00:00
parent 2781c357ae
commit cec587ddf6
18 changed files with 903 additions and 199 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.sparc64,v 1.89 2004/02/10 16:43:43 jdolecek Exp $
# $NetBSD: files.sparc64,v 1.90 2004/03/14 18:18:54 chs Exp $
# @(#)files.sparc64 8.1 (Berkeley) 7/19/93
# sparc64-specific configuration info
@ -187,6 +187,7 @@ file arch/sparc64/sparc64/pmap.c
file arch/sparc64/sparc64/sys_machdep.c
file arch/sparc64/sparc64/trap.c
file arch/sparc64/sparc64/vm_machdep.c
file arch/sparc64/sparc64/ipifuncs.c multiprocessor
file arch/sparc64/sparc64/db_interface.c ddb | kgdb
file arch/sparc64/sparc64/db_trace.c ddb

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.42 2004/01/06 09:38:19 petrov Exp $ */
/* $NetBSD: cpu.h,v 1.43 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1992, 1993
@ -73,6 +73,7 @@
#include <machine/psl.h>
#include <machine/reg.h>
#include <machine/intr.h>
#include <machine/cpuset.h>
#include <sparc64/sparc64/intreg.h>
#include <sys/sched.h>
@ -93,6 +94,7 @@
*/
struct cpu_info {
/*
* SPARC cpu_info structures live at two VAs: one global
* VA (so each CPU can access any other CPU's cpu_info)
@ -170,11 +172,14 @@ extern struct cpu_bootargs *cpu_args;
extern int ncpus;
extern struct cpu_info *cpus;
#define curcpu() ((struct cpu_info *)CPUINFO_VA)
#define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
#define cpu_number() (curcpu()->ci_number)
#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
#define CPU_INFO_ITERATOR int
#define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \
ci = ci->ci_next
#define curlwp curcpu()->ci_curlwp
#define fplwp curcpu()->ci_fplwp
#define curpcb curcpu()->ci_cpcb

View File

@ -0,0 +1,59 @@
/* $NetBSD: cpuset.h,v 1.1 2004/03/14 18:18:54 chs Exp $ */
/*-
* Copyright (c) 2004 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SPARC64_CPUSET_H_
#define _SPARC64_CPUSET_H_
typedef uint64_t cpuset_t;
extern __volatile cpuset_t cpus_active;
#define CPUSET_SINGLE(cpu) ((cpuset_t)1 << (cpu))
#define CPUSET_ADD(set, cpu) ((set) |= CPUSET_SINGLE(cpu))
#define CPUSET_DEL(set, cpu) ((set) &= ~CPUSET_SINGLE(cpu))
#define CPUSET_SUB(set1, set2) ((set1) &= ~(set2))
#define CPUSET_ALL(set) ((set) = (cpuset_t)-1)
#define CPUSET_ALL_BUT(set, cpu) ((set) = ~CPUSET_SINGLE(cpu))
#define CPUSET_HAS(set, cpu) ((set) & CPUSET_SINGLE(cpu))
#define CPUSET_NEXT(set) (ffs(set) - 1)
#define CPUSET_EMPTY(set) ((set) == (cpuset_t)0)
#define CPUSET_EQUAL(set1, set2) ((set1) == (set2))
#define CPUSET_CLEAR(set) ((set) = (cpuset_t)0)
#define CPUSET_ASSIGN(set1, set2) ((set1) = (set2))
#endif /* _SPARC64_CPUSET_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: ctlreg.h,v 1.30 2002/04/24 23:54:24 eeh Exp $ */
/* $NetBSD: ctlreg.h,v 1.31 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath
@ -393,8 +393,11 @@
#define IDSR_BUSY 0x01
#define ASI_INTERRUPT_DISPATCH 0x77 /* [4u] spitfire interrupt dispatch regs */
#define IDCR(x) (((x)<<14)&0x70) /* Store anything to this address to dispatch crosscall to CPU (x) */
#define IDDR_0H 0x40 /* Store data to send in these regs */
/* Interrupt delivery initiation */
#define IDCR(x) ((((uint64_t)(x)) << 14) | 0x70)
#define IDDR_0H 0x40 /* Store data to send in these regs */
#define IDDR_0L 0x48 /* unimplemented */
#define IDDR_1H 0x50
#define IDDR_1L 0x58 /* unimplemented */

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.h,v 1.9 2003/06/16 20:01:06 thorpej Exp $ */
/* $NetBSD: intr.h,v 1.10 2004/03/14 18:18:54 chs Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -36,6 +36,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SPARC64_INTR_H_
#define _SPARC64_INTR_H_
#include <machine/cpuset.h>
/* XXX - arbitrary numbers; no interpretation is defined yet */
#define IPL_NONE 0 /* nothing */
#define IPL_SOFTINT 1 /* softint */
@ -52,6 +57,38 @@
#define IPL_SCHED PIL_SCHED /* scheduler */
#define IPL_LOCK PIL_LOCK /* locks */
#define IPL_HIGH PIL_HIGH /* everything */
#define IPL_HALT 5 /* cpu stop-self */
#define IPL_PAUSE 13 /* pause cpu */
/*
* Interprocessor interrupts. In order how we want them processed.
*/
#define SPARC64_IPI_HALT (1UL << 0)
#define SPARC64_IPI_PAUSE (1UL << 1)
#define SPARC64_IPI_FLUSH_PTE (1UL << 2)
#define SPARC64_IPI_FLUSH_CTX (1UL << 3)
#define SPARC64_IPI_FLUSH_ALL (1UL << 4)
#define SPARC64_IPI_SAVE_FP (1UL << 5)
#define SPARC64_NIPIS 6
#if defined(MULTIPROCESSOR)
void sparc64_ipi_init __P((void));
void sparc64_multicast_ipi __P((cpuset_t, u_long));
void sparc64_broadcast_ipi __P((u_long));
void sparc64_send_ipi __P((int, u_long));
void sparc64_ipi_halt_cpus __P((void));
void sparc64_ipi_pause_cpus __P((void));
void sparc64_ipi_resume_cpus __P((void));
#else
#define sparc64_ipi_init() ((void)0)
#define sparc64_multicast_ipi(set,ipi) ((void)0)
#define sparc64_broadcast_ipi(ipi) ((void)0)
#define sparc64_send_ipi(cpu,ipi) ((void)0)
#define sparc64_ipi_halt_cpus() ((void)0)
#define sparc64_ipi_pause_cpus() ((void)0)
#define sparc64_ipi_resume_cpus() ((void)0)
#endif
void *
softintr_establish __P((int level, void (*fun)(void *), void *arg));
@ -61,3 +98,5 @@ softintr_disestablish __P((void *cookie));
void
softintr_schedule __P((void *cookie));
#endif /* _SPARC64_INTR_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: openfirm.h,v 1.10 2004/01/06 09:38:19 petrov Exp $ */
/* $NetBSD: openfirm.h,v 1.11 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -65,4 +65,6 @@ int OF_searchprop (int node, char *prop, void *buf, int buflen);
int OF_mapintr(int node, int *interrupt, int validlen, int buflen);
void* OF_claim __P((void*, u_int, u_int));
int openfirmware_exit(void *);
#endif /* _SPARC64_OPENFIRM_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.30 2004/02/26 20:24:29 petrov Exp $ */
/* $NetBSD: pmap.h,v 1.31 2004/03/14 18:18:54 chs Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -172,18 +172,7 @@ int pmap_count_wired __P((struct pmap *));
#define pmap_phys_address(x) (x)
void pmap_activate_pmap(struct pmap *);
static __inline void
pmap_update(struct pmap *pmap)
{
if (pmap->pm_refs > 0) {
return;
}
pmap->pm_refs = 1;
pmap_activate_pmap(pmap);
}
void pmap_update(struct pmap *);
void pmap_bootstrap __P((u_long kernelstart, u_long kernelend, u_int numctx));
/* make sure all page mappings are modulo 16K to prevent d$ aliasing */
#define PMAP_PREFER(pa, va) (*(va)+=(((*(va))^(pa))&(1<<(PGSHIFT))))

View File

@ -1,4 +1,4 @@
/* $NetBSD: psl.h,v 1.25 2003/11/15 05:24:51 petrov Exp $ */
/* $NetBSD: psl.h,v 1.26 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1992, 1993
@ -256,6 +256,8 @@ static __inline void setcwp __P((int));
static __inline void splx __P((int));
#endif
static __inline u_int64_t getver __P((void));
static __inline int intr_disable __P((void));
static __inline void intr_restore __P((int));
/*
* GCC pseudo-functions for manipulating privileged registers
@ -296,6 +298,21 @@ static __inline u_int64_t getver()
return (ver);
}
static __inline int
intr_disable(void)
{
int pstate = getpstate();
setpstate(pstate & ~PSTATE_IE);
return (pstate);
}
static __inline void
intr_restore(int pstate)
{
setpstate(pstate);
}
/*
* GCC pseudo-functions for manipulating PIL
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.12 2003/01/06 20:30:35 wiz Exp $ */
/* $NetBSD: pte.h,v 1.13 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1996-1999 Eduardo Horvath
@ -127,10 +127,29 @@ struct sun4u_tte {
#endif
typedef struct sun4u_tte pte_t;
/* TLB shootdown handler arguments. */
struct ipi_tlb_args {
vaddr_t ita_vaddr;
int ita_ctx;
};
/* Assembly routines to flush TLB mappings */
void tlb_flush_pte __P((vaddr_t, int));
void tlb_flush_ctx __P((int));
void tlb_flush_all __P((void));
void sp_tlb_flush_pte __P((vaddr_t, int));
void sp_tlb_flush_ctx __P((int));
void sp_tlb_flush_all __P((void));
#if defined(MULTIPROCESSOR)
void smp_tlb_flush_pte __P((vaddr_t, int));
void smp_tlb_flush_ctx __P((int));
void smp_tlb_flush_all __P((void));
#define tlb_flush_pte(va,ctx) smp_tlb_flush_pte(va, ctx)
#define tlb_flush_ctx(ctx) smp_tlb_flush_ctx(ctx)
#define tlb_flush_all() smp_tlb_flush_all()
#else
#define tlb_flush_pte(va,ctx) sp_tlb_flush_pte(va, ctx)
#define tlb_flush_ctx(ctx) sp_tlb_flush_ctx(ctx)
#define tlb_flush_all() sp_tlb_flush_all()
#endif
#endif /* _LOCORE */

View File

@ -1,4 +1,4 @@
/* $NetBSD: sparc64.h,v 1.5 2004/01/06 09:38:19 petrov Exp $ */
/* $NetBSD: sparc64.h,v 1.6 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (C) 1996 Wolfgang Solfrank.
@ -50,6 +50,7 @@ u_int64_t prom_claim_phys (paddr_t, int);
int prom_free_phys (paddr_t, int);
u_int64_t prom_get_msgbuf (int, int);
void prom_stopself(void);
void prom_startcpu(u_int, void *, u_long);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.35 2004/02/13 11:36:18 wiz Exp $ */
/* $NetBSD: cpu.c,v 1.36 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1996
@ -52,11 +52,12 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.35 2004/02/13 11:36:18 wiz Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.36 2004/03/14 18:18:54 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/kernel.h>
#include <uvm/uvm_extern.h>
@ -73,16 +74,15 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.35 2004/02/13 11:36:18 wiz Exp $");
/* This is declared here so that you must include a CPU for the cache code. */
struct cacheinfo cacheinfo;
/* Our exported CPU info; we have only one for now. */
struct cpu_info cpu_info_store;
/* Linked list of all CPUs in system. */
int ncpus = 0;
struct cpu_info *cpus = NULL;
struct cpu_bootargs *cpu_args; /* allocated very earlt in pmap_bootstrap. */
__volatile cpuset_t cpus_active;/* set of active cpus */
struct cpu_bootargs *cpu_args; /* allocated very early in pmap_bootstrap. */
static struct cpu_info * alloc_cpuinfo(u_int);
static struct cpu_info *alloc_cpuinfo(u_int);
void mp_main(void);
/* The following are used externally (sysctl_hw). */
char machine[] = MACHINE; /* from <machine/param.h> */
@ -91,8 +91,8 @@ char cpu_model[100]; /* machine model (primary CPU) */
extern char machine_model[];
/* The CPU configuration driver. */
static void cpu_attach __P((struct device *, struct device *, void *));
int cpu_match __P((struct device *, struct cfdata *, void *));
void cpu_attach(struct device *, struct device *, void *);
int cpu_match(struct device *, struct cfdata *, void *);
CFATTACH_DECL(cpu, sizeof(struct device),
cpu_match, cpu_attach, NULL, NULL);
@ -102,39 +102,13 @@ extern struct cfdriver cpu_cd;
#define IU_IMPL(v) ((((uint64_t)(v)) & VER_IMPL) >> VER_IMPL_SHIFT)
#define IU_VERS(v) ((((uint64_t)(v)) & VER_MASK) >> VER_MASK_SHIFT)
#ifdef notdef
/*
* IU implementations are parceled out to vendors (with some slight
* glitches). Printing these is cute but takes too much space.
*/
static char *iu_vendor[16] = {
"Fujitsu", /* and also LSI Logic */
"ROSS", /* ROSS (ex-Cypress) */
"BIT",
"LSIL", /* LSI Logic finally got their own */
"TI", /* Texas Instruments */
"Matsushita",
"Philips",
"Harvest", /* Harvest VLSI Design Center */
"SPEC", /* Systems and Processes Engineering Corporation */
"Weitek",
"vendor#10",
"vendor#11",
"vendor#12",
"vendor#13",
"vendor#14",
"vendor#15"
};
#endif
struct cpu_info *
alloc_cpuinfo(cpu_node)
u_int cpu_node;
{
paddr_t pa0, pa;
vaddr_t va, va0;
vsize_t sz = 8*PAGE_SIZE;
vsize_t sz = 8 * PAGE_SIZE;
int portid;
struct cpu_info *cpi, *ci;
extern paddr_t cpu0paddr;
@ -150,19 +124,19 @@ alloc_cpuinfo(cpu_node)
return cpi;
/* Allocate the aligned VA and determine the size. */
va = uvm_km_valloc_align(kernel_map, 8 * PAGE_SIZE, 8 * PAGE_SIZE);
va = uvm_km_valloc_align(kernel_map, sz, sz);
if (!va)
panic("alloc_cpuinfo: no virtual space");
va0 = va;
pa0 = cpu0paddr;
cpu0paddr += 8*PAGE_SIZE;
cpu0paddr += sz;
for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pmap_update(pmap_kernel());
cpi = (struct cpu_info*)(va0 + CPUINFO_VA - INTSTACK);
cpi = (struct cpu_info *)(va0 + CPUINFO_VA - INTSTACK);
memset((void *)va0, 0, sz);
@ -173,16 +147,16 @@ alloc_cpuinfo(cpu_node)
* way as is done for the boot CPU in locore.
*/
cpi->ci_next = NULL;
cpi->ci_curlwp = &lwp0;
cpi->ci_curlwp = NULL;
cpi->ci_number = portid;
cpi->ci_cpuid = portid;
cpi->ci_upaid = portid;
cpi->ci_fplwp = NULL;
cpi->ci_spinup = NULL; /* XXX */
cpi->ci_eintstack = (void *)(EINTSTACK); /* XXX */
cpi->ci_idle_u = (struct pcb *)(CPUINFO_VA + 2*PAGE_SIZE); /* XXX */
cpi->ci_cpcb = cpi->ci_idle_u /* (struct pcb *)va0 */; /* XXX */
cpi->ci_initstack = (void *)((vaddr_t)cpi->ci_idle_u + 2*PAGE_SIZE); /* XXX */
cpi->ci_eintstack = (void *)EINTSTACK; /* XXX */
cpi->ci_idle_u = (struct pcb *)(CPUINFO_VA + 2 * PAGE_SIZE); /* XXX */
cpi->ci_cpcb = cpi->ci_idle_u; /* XXX */
cpi->ci_initstack = (void *)((vaddr_t)cpi->ci_idle_u + 2 * PAGE_SIZE); /* XXX */
cpi->ci_paddr = pa0;
cpi->ci_self = cpi;
cpi->ci_node = cpu_node;
@ -212,7 +186,7 @@ cpu_match(parent, cf, aux)
* Discover interesting goop about the virtual address cache
* (slightly funny place to do it, but this is where it is to be found).
*/
static void
void
cpu_attach(parent, dev, aux)
struct device *parent;
struct device *dev;
@ -387,6 +361,8 @@ cpu_boot_secondary_processors()
vaddr_t mp_start;
int mp_start_size;
sparc64_ipi_init();
cpu_args->cb_ktext = ktext;
cpu_args->cb_ktextp = ktextp;
cpu_args->cb_ektext = ektext;
@ -420,10 +396,9 @@ cpu_boot_secondary_processors()
continue;
cpu_args->cb_node = ci->ci_node;
cpu_args->cb_flags = 0;
cpu_args->cb_cpuinfo = ci->ci_paddr;
cpu_args->cb_initstack = ci->ci_initstack;
membar_storeload();
membar_sync();
#ifdef DEBUG
printf("node %x. cpuinfo %lx, initstack %p\n",
@ -438,13 +413,14 @@ cpu_boot_secondary_processors()
prom_startcpu(ci->ci_node, (void *)mp_start, 0);
for (i = 0; i < 2000; i++) {
if (cpu_args->cb_flags == 1)
membar_sync();
if (CPUSET_HAS(cpus_active, ci->ci_number))
break;
delay(10000);
}
setpstate(pstate);
if (cpu_args->cb_flags == 0)
if (!CPUSET_HAS(cpus_active, ci->ci_number))
printf("cpu%d: startup failed\n", ci->ci_upaid);
else
printf(" cpu%d now spinning idle (waited %d iterations)\n", ci->ci_upaid, i);
@ -453,22 +429,12 @@ cpu_boot_secondary_processors()
printf("\n");
}
/* XXX */
void mp_main(void);
void
mp_main()
{
cpu_args->cb_flags = 1;
membar_storeload();
#if 1
printf("mp_main: started\n");
#endif
while (!cpu_go_smp)
;
printf("mp_main: ...\n");
CPUSET_ADD(cpus_active, cpu_number());
membar_sync();
spl0();
}
#endif /* MULTIPROCESSOR */

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_interface.c,v 1.74 2004/01/06 20:41:23 petrov Exp $ */
/* $NetBSD: db_interface.c,v 1.75 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath. All rights reserved.
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.74 2004/01/06 20:41:23 petrov Exp $");
__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.75 2004/03/14 18:18:54 chs Exp $");
#include "opt_ddb.h"
@ -254,9 +254,10 @@ void db_dump_buf __P((db_expr_t, int, db_expr_t, char *));
void db_dump_espcmd __P((db_expr_t, int, db_expr_t, char *));
void db_watch __P((db_expr_t, int, db_expr_t, char *));
void db_pm_extract __P((db_expr_t, int, db_expr_t, char *));
void db_cpus_cmd __P((db_expr_t, int, db_expr_t, char *));
#ifdef DDB
static void db_dump_pmap __P((struct pmap*));
static void db_dump_pmap __P((struct pmap *));
static void db_print_trace_entry __P((struct traptrace *, int));
/*
@ -350,6 +351,7 @@ kdb_trap(type, tf)
s = splhigh();
db_active++;
sparc64_ipi_pause_cpus();
cnpollc(TRUE);
/* Need to do spl stuff till cnpollc works */
tl = ddb_regs.ddb_tl = savetstate(ts);
@ -358,6 +360,7 @@ kdb_trap(type, tf)
restoretstate(tl,ts);
cnpollc(FALSE);
db_active--;
sparc64_ipi_resume_cpus();
splx(s);
if (fplwp) {
@ -1130,6 +1133,21 @@ db_watch(addr, have_addr, count, modif)
}
}
void
db_cpus_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
char *modif;
{
struct cpu_info *ci;
for (ci = cpus; ci; ci = ci->ci_next) {
db_printf("cpu%d: self 0x%08lx lwp 0x%08lx pcb 0x%08lx\n",
ci->ci_number, (u_long)ci->ci_self,
(u_long)ci->ci_curlwp, (u_long)ci->ci_cpcb);
}
}
#include <uvm/uvm.h>
@ -1181,7 +1199,8 @@ const struct db_command db_machine_command_table[] = {
{ "uvmdump", db_uvmhistdump, 0, 0 },
{ "watch", db_watch, 0, 0 },
{ "window", db_dump_window, 0, 0 },
{ (char *)0, }
{ "cpus", db_cpus_cmd, 0, 0 },
{ NULL, }
};
#endif /* DDB */

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.33 2004/01/06 21:35:18 martin Exp $
# $NetBSD: genassym.cf,v 1.34 2004/03/14 18:18:54 chs Exp $
#
# Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -165,6 +165,7 @@ define V_INTR offsetof(struct uvmexp, intrs)
define V_FAULTS offsetof(struct uvmexp, faults)
# CPU info structure
define CI_SELF offsetof(struct cpu_info, ci_self)
define CI_CURLWP offsetof(struct cpu_info, ci_curlwp)
define CI_CPCB offsetof(struct cpu_info, ci_cpcb)
define CI_NEXT offsetof(struct cpu_info, ci_next)
@ -292,3 +293,7 @@ define DBR_OUT offsetof(struct db_regs, dbr_out)
define DBR_LOCAL offsetof(struct db_regs, dbr_local)
define DBR_IN offsetof(struct db_regs, dbr_in)
endif
# TLB IPI handler arguments.
define ITA_VADDR offsetof(struct ipi_tlb_args, ita_vaddr)
define ITA_CTX offsetof(struct ipi_tlb_args, ita_ctx)

View File

@ -0,0 +1,402 @@
/* $NetBSD: ipifuncs.c,v 1.1 2004/03/14 18:18:54 chs Exp $ */
/*-
* Copyright (c) 2004 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.1 2004/03/14 18:18:54 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <machine/cpu.h>
#include <machine/ctlreg.h>
#include <machine/pte.h>
#include <machine/sparc64.h>
#define IPI_TLB_SHOOTDOWN 0
#define SPARC64_IPI_HALT_NO 100
#define SPARC64_IPI_RESUME_NO 101
#define SPARC64_IPI_RETRIES 100
#define sparc64_ipi_sleep() delay(1000)
extern int db_active;
typedef void (* ipifunc_t)(void *);
/* CPU sets containing halted, paused and resumed cpus */
static __volatile cpuset_t cpus_halted;
static __volatile cpuset_t cpus_paused;
static __volatile cpuset_t cpus_resumed;
__volatile struct ipi_tlb_args ipi_tlb_args;
/* IPI handlers. */
static int sparc64_ipi_halt(void *);
static int sparc64_ipi_pause(void *);
static int sparc64_ipi_wait(cpuset_t __volatile *, cpuset_t);
static void sparc64_ipi_error(const char *, cpuset_t, cpuset_t);
void sparc64_ipi_flush_pte(void *);
void sparc64_ipi_flush_ctx(void *);
void sparc64_ipi_flush_all(void *);
/* IPI handlers working at SOFTINT level. */
static struct intrhand ipi_halt_intr = {
sparc64_ipi_halt, NULL, SPARC64_IPI_HALT_NO, IPL_HALT
};
static struct intrhand ipi_pause_intr = {
sparc64_ipi_pause, NULL, SPARC64_IPI_RESUME_NO, IPL_PAUSE
};
static struct intrhand *ipihand[] = {
&ipi_halt_intr,
&ipi_pause_intr
};
/*
* Fast IPI table. If function is null, the handler is looked up
* in 'ipihand' table.
*/
static ipifunc_t ipifuncs[SPARC64_NIPIS] = {
NULL, /* ipi_halt_intr */
NULL, /* ipi_pause_intr */
sparc64_ipi_flush_pte,
sparc64_ipi_flush_ctx,
sparc64_ipi_flush_all
};
/*
* Process cpu stop-self event.
*/
static int
sparc64_ipi_halt(arg)
void *arg;
{
printf("cpu%d: shutting down\n", cpu_number());
CPUSET_ADD(cpus_halted, cpu_number());
prom_stopself();
return(1);
}
/*
* Pause cpu.
*/
static int
sparc64_ipi_pause(arg)
void *arg;
{
int s;
cpuid_t cpuid;
cpuid = cpu_number();
printf("cpu%ld paused.\n", cpuid);
s = intr_disable();
CPUSET_ADD(cpus_paused, cpuid);
do {
membar_sync();
} while(CPUSET_HAS(cpus_paused, cpuid));
membar_sync();
CPUSET_ADD(cpus_resumed, cpuid);
intr_restore(s);
printf("cpu%ld resumed.\n", cpuid);
return (1);
}
/*
* Initialize IPI machinery.
*/
void
sparc64_ipi_init()
{
/* Clear all cpu sets. */
CPUSET_CLEAR(cpus_halted);
CPUSET_CLEAR(cpus_paused);
CPUSET_CLEAR(cpus_resumed);
/* Install interrupt handlers. */
intr_establish(ipi_halt_intr.ih_pil, &ipi_halt_intr);
intr_establish(ipi_pause_intr.ih_pil, &ipi_pause_intr);
}
/*
* Send an IPI to all in the list but ourselves.
*/
void
sparc64_multicast_ipi(cpuset, ipimask)
cpuset_t cpuset;
u_long ipimask;
{
struct cpu_info *ci;
CPUSET_DEL(cpuset, cpu_number());
if (CPUSET_EMPTY(cpuset))
return;
for (ci = cpus; ci != NULL; ci = ci->ci_next) {
if (CPUSET_HAS(cpuset, ci->ci_number)) {
CPUSET_DEL(cpuset, ci->ci_number);
sparc64_send_ipi(ci->ci_upaid, ipimask);
}
}
}
/*
* Broadcast an IPI to all but ourselves.
*/
void
sparc64_broadcast_ipi(ipimask)
u_long ipimask;
{
cpuset_t cpuset;
CPUSET_ALL_BUT(cpuset, cpu_number());
sparc64_multicast_ipi(cpuset, ipimask);
}
/*
* Send an interprocessor interrupt.
*/
void
sparc64_send_ipi(upaid, ipimask)
int upaid;
u_long ipimask;
{
int i;
uint64_t intr_number, intr_func, intr_arg;
KASSERT(ipimask < (1UL << SPARC64_NIPIS));
KASSERT((ldxa(0, ASR_IDSR) & IDSR_BUSY) == 0);
/* Setup interrupt data. */
i = ffs(ipimask) - 1;
intr_func = (uint64_t)ipifuncs[i];
if (intr_func) {
/* fast trap */
intr_number = 0;
intr_arg = (uint64_t)&ipi_tlb_args;
} else {
/* softint trap */
struct intrhand *ih = ipihand[i];
intr_number = (uint64_t)ih->ih_number;
intr_func = (uint64_t)ih->ih_fun;
intr_arg = (uint64_t)ih->ih_arg;
}
/* Schedule an interrupt. */
for (i = 0; i < SPARC64_IPI_RETRIES; i++) {
int s = intr_disable();
stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_number);
stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, intr_func);
stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, intr_arg);
stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0);
membar_sync();
while (ldxa(0, ASR_IDSR) & IDSR_BUSY)
;
intr_restore(s);
if ((ldxa(0, ASR_IDSR) & IDSR_NACK) == 0)
return;
}
if (db_active || panicstr != NULL)
printf("ipi_send: couldn't send ipi to module %u\n", upaid);
else
panic("ipi_send: couldn't send ipi");
}
/*
* Wait for IPI operation to complete.
*/
int
sparc64_ipi_wait(cpus_watchset, cpus_mask)
cpuset_t __volatile *cpus_watchset;
cpuset_t cpus_mask;
{
int i;
for (i = 0; i < SPARC64_IPI_RETRIES; i++) {
membar_sync();
if (CPUSET_EQUAL(*cpus_watchset, cpus_mask))
break;
sparc64_ipi_sleep();
}
return (i == SPARC64_IPI_RETRIES);
}
/*
* Halt all cpus but ourselves.
*/
void
sparc64_ipi_halt_cpus()
{
cpuset_t cpumask, cpuset;
CPUSET_ASSIGN(cpuset, cpus_active);
CPUSET_DEL(cpuset, cpu_number());
CPUSET_ASSIGN(cpumask, cpuset);
CPUSET_SUB(cpuset, cpus_halted);
if (CPUSET_EMPTY(cpuset))
return;
sparc64_multicast_ipi(cpuset, SPARC64_IPI_HALT);
if (sparc64_ipi_wait(&cpus_halted, cpumask))
sparc64_ipi_error("halt", cpumask, cpus_halted);
}
/*
* Pause all cpus but ourselves.
*/
void
sparc64_ipi_pause_cpus()
{
cpuset_t cpuset;
CPUSET_ASSIGN(cpuset, cpus_active);
CPUSET_DEL(cpuset, cpu_number());
if (CPUSET_EMPTY(cpuset))
return;
sparc64_multicast_ipi(cpuset, SPARC64_IPI_PAUSE);
if (sparc64_ipi_wait(&cpus_paused, cpuset))
sparc64_ipi_error("pause", cpus_paused, cpuset);
}
/*
* Resume all paused cpus.
*/
void
sparc64_ipi_resume_cpus()
{
cpuset_t cpuset;
CPUSET_CLEAR(cpus_resumed);
CPUSET_ASSIGN(cpuset, cpus_paused);
membar_sync();
CPUSET_CLEAR(cpus_paused);
/* CPUs awake on cpus_paused clear */
if (sparc64_ipi_wait(&cpus_resumed, cpuset))
sparc64_ipi_error("resume", cpus_resumed, cpuset);
}
/*
* Flush pte on all active processors.
*/
void
smp_tlb_flush_pte(va, ctx)
vaddr_t va;
int ctx;
{
/* Flush our own TLB */
sp_tlb_flush_pte(va, ctx);
#if defined(IPI_TLB_SHOOTDOWN)
/* Flush others */
ipi_tlb_args.ita_vaddr = va;
ipi_tlb_args.ita_ctx = ctx;
sparc64_broadcast_ipi(SPARC64_IPI_FLUSH_PTE);
#endif
}
/*
* Flush context on all active processors.
*/
void
smp_tlb_flush_ctx(ctx)
int ctx;
{
/* Flush our own TLB */
sp_tlb_flush_ctx(ctx);
#if defined(IPI_TLB_SHOOTDOWN)
/* Flush others */
ipi_tlb_args.ita_vaddr = (vaddr_t)0;
ipi_tlb_args.ita_ctx = ctx;
sparc64_broadcast_ipi(SPARC64_IPI_FLUSH_CTX);
#endif
}
/*
* Flush whole TLB on all active processors.
*/
void
smp_tlb_flush_all()
{
/* Flush our own TLB */
sp_tlb_flush_all();
#if defined(IPI_TLB_SHOOTDOWN)
/* Flush others */
sparc64_broadcast_ipi(SPARC64_IPI_FLUSH_ALL);
#endif
}
/*
* Print an error message.
*/
void
sparc64_ipi_error(s, cpus_succeeded, cpus_expected)
const char *s;
cpuset_t cpus_succeeded, cpus_expected;
{
int cpuid;
CPUSET_DEL(cpus_expected, cpus_succeeded);
printf("Failed to %s:", s);
do {
cpuid = CPUSET_NEXT(cpus_expected);
CPUSET_DEL(cpus_expected, cpuid);
printf(" cpu%d", cpuid);
} while(!CPUSET_EMPTY(cpus_expected));
printf("\n");
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.193 2004/02/13 11:36:18 wiz Exp $ */
/* $NetBSD: locore.s,v 1.194 2004/03/14 18:18:54 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath
@ -108,10 +108,10 @@
#undef CPCB
#undef FPLWP
#define CURLWP (CPUINFO_VA+CI_CURLWP)
#define CPCB (CPUINFO_VA+CI_CPCB)
#define FPLWP (CPUINFO_VA+CI_FPLWP)
#define IDLE_U (CPUINFO_VA+CI_IDLE_U)
#define CURLWP (CPUINFO_VA + CI_CURLWP)
#define CPCB (CPUINFO_VA + CI_CPCB)
#define FPLWP (CPUINFO_VA + CI_FPLWP)
#define IDLE_U (CPUINFO_VA + CI_IDLE_U)
/* Let us use same syntax as C code */
#define Debugger() ta 1; nop
@ -198,11 +198,7 @@
#define ICACHE_ALIGN .align 32
/* Give this real authority: reset the machine */
#if 1
#define NOTREACHED sir
#else
#define NOTREACHED
#endif
/*
* This macro will clear out a cache line before an explicit
@ -441,9 +437,12 @@ _C_LABEL(cpcb): POINTER _C_LABEL(u0)
/*
* romp is the prom entry pointer
* romtba is the prom trap table base address
*/
.globl romp
romp: POINTER 0
.globl romtba
romtba: POINTER 0
/* NB: Do we really need the following around? */
@ -4066,12 +4065,17 @@ interrupt_vector:
membar #Sync
stxa %g0, [%g0] ASI_IRSR ! Ack IRQ
membar #Sync ! Should not be needed due to retry
#if NOT_DEBUG
STACKFRAME(-CC64FSZ) ! Get a clean register window
mov %g1, %o1
mov %g2, %o2
mov %g1, %o2
mov %g2, %o3
LOAD_ASCIZ(%o0, "interrupt_vector: ASI_IRSR %lx ASI_IRDR(0x40) %lx\r\n")
ldxa [%g0] ASI_MID_REG, %o1
srax %o1, 17, %o1 ! Isolate UPAID from CPU reg
and %o1, 0x1f, %o1
LOAD_ASCIZ(%o0, "cpu%d: interrupt_vector: ASI_IRSR %lx ASI_IRDR(0x40) %lx\r\n")
GLOBTOLOC
call prom_printf
clr %g4
@ -4079,16 +4083,24 @@ interrupt_vector:
restore
nop
#endif
sethi %hi(_C_LABEL(intrlev)), %g3
btst IRSR_BUSY, %g1
or %g3, %lo(_C_LABEL(intrlev)), %g3
bz,pn %icc, 3f ! spurious interrupt
sllx %g2, PTRSHFT, %g5 ! Calculate entry number
cmp %g2, MAXINTNUM
#ifdef DEBUG
tgeu 55
#endif
brnz,pt %g2, Lsoftint_regular ! interrupt #0 is a fast cross-call
cmp %g2, MAXINTNUM
mov IRDR_1H, %g1
ldxa [%g1] ASI_IRDR, %g1 ! Get IPI handler address
mov IRDR_2H, %g2
jmpl %g1, %o7
ldxa [%g2] ASI_IRDR, %g2 ! Get IPI handler argument
Lsoftint_regular:
bgeu,pn %xcc, 3f
nop
LDPTR [%g3 + %g5], %g5 ! We have a pointer to the handler
@ -4106,10 +4118,6 @@ interrupt_vector:
restore
nop
1:
#endif
#ifdef NOT_DEBUG
tst %g5
tz 56
#endif
brz,pn %g5, 3f ! NULL means it isn't registered yet. Skip it.
@ -4233,6 +4241,87 @@ ret_from_intr_vector:
ba,a ret_from_intr_vector
nop ! XXX spitfire bug?
/*
* IPI handler to flush single pte.
* void sparc64_ipi_flush_pte(void *);
*
* On Entry:
*
* %g2 - pointer to 'ipi_tlb_args' structure
*/
ENTRY(sparc64_ipi_flush_pte)
ba,a ret_from_intr_vector
nop
/*
* IPI handler to flush single context.
* void sparc64_ipi_flush_ctx(void *);
*
* On Entry:
*
* %g2 - pointer to 'ipi_tlb_args' structure
*/
ENTRY(sparc64_ipi_flush_ctx)
ba,a ret_from_intr_vector
nop
/*
* IPI handler to flush the whole TLB.
* void sparc64_ipi_flush_all(void *);
*
* On Entry:
*
* %g2 - pointer to 'ipi_tlb_args' structure
*/
ENTRY(sparc64_ipi_flush_all)
rdpr %pstate, %g3
andn %g3, PSTATE_IE, %g2 ! disable interrupts
wrpr %g2, 0, %pstate
set (63 * 8), %g1 ! last TLB entry
membar #Sync
! %g1 = loop counter
! %g2 = TLB data value
! %g3 = saved %pstate
0:
ldxa [%g1] ASI_DMMU_TLB_DATA, %g2 ! fetch the TLB data
btst TTE_L, %g2 ! locked entry?
bnz,pt %icc, 1f ! if so, skip
nop
stxa %g0, [%g1] ASI_DMMU_TLB_DATA ! zap it
membar #Sync
1:
dec 8, %g1
brgz,pt %g1, 0b ! loop over all entries
nop
set (63 * 8), %g1 ! last TLB entry
0:
ldxa [%g1] ASI_IMMU_TLB_DATA, %g2 ! fetch the TLB data
btst TTE_L, %g2 ! locked entry?
bnz,pt %icc, 1f ! if so, skip
nop
stxa %g0, [%g1] ASI_IMMU_TLB_DATA ! zap it
membar #Sync
1:
dec 8, %g1
brgz,pt %g1, 0b ! loop over all entries
nop
sethi %hi(KERNBASE), %g4
membar #Sync
flush %g4
wrpr %g3, %pstate
ba,a ret_from_intr_vector
nop
/*
* Ultra1 and Ultra2 CPUs use soft interrupts for everything. What we do
* on a soft interrupt, is we should check which bits in ASR_SOFTINT(0x16)
@ -5455,12 +5544,16 @@ dostart:
1:
#endif
/*
* Step 1: Save rom entry pointer
* Step 1: Save rom entry pointer and prom tba
*/
set romp, %o5
STPTR %o4, [%o5] ! It's initialized data, I hope
rdpr %tba, %o4
set romtba, %o5
STPTR %o4, [%o5]
/*
* Step 2: Set up a v8-like stack if we need to
*/
@ -5919,6 +6012,22 @@ _C_LABEL(cpu_initialize):
mov %l0, %sp
flushw
#ifdef DEBUG
set _C_LABEL(pmapdebug), %o1
ld [%o1], %o1
sethi %hi(0x40000), %o2
btst %o2, %o1
bz 0f
set 1f, %o0 ! Debug printf
call _C_LABEL(prom_printf)
.data
1:
.asciz "Setting trap base...\r\n"
_ALIGN
.text
0:
#endif
/*
* Step 7: change the trap base register, and install our TSB pointers
*/
@ -5960,6 +6069,22 @@ _C_LABEL(cpu_initialize):
wrpr %g0, 0, %tstate
#endif
#ifdef DEBUG
set _C_LABEL(pmapdebug), %o1
ld [%o1], %o1
sethi %hi(0x40000), %o2
btst %o2, %o1
bz 0f
set 1f, %o0 ! Debug printf
call _C_LABEL(prom_printf)
.data
1:
.asciz "Calling startup routine...\r\n"
_ALIGN
.text
0:
#endif
/*
* Call our startup routine.
*/
@ -6086,7 +6211,6 @@ ENTRY(cpu_mp_startup)
membar #Sync ! We may need more membar #Sync in here
stxa %o2, [%g0] ASI_DMMU_DATA_IN ! Store TTE for DSEG
membar #Sync ! We may need more membar #Sync in here
! flush %o5 ! Make IMMU see this too
1:
add %o1, %l6, %o1 ! increment VA
cmp %o1, %l4 ! Next 4MB mapping....
@ -6142,16 +6266,12 @@ ENTRY(cpu_mp_startup)
* Get pointer to our cpu_info struct
*/
ldx [%g2 + CBA_CPUINFO], %l1 ! Load the interrupt stack's PA
sethi %hi(0xa0000000), %l2 ! V=1|SZ=01|NFO=0|IE=0
sllx %l2, 32, %l2 ! Shift it into place
mov -1, %l3 ! Create a nice mask
sllx %l3, 41, %l4 ! Mask off high bits
or %l4, 0xfff, %l4 ! We can just load this in 12 (of 13) bits
andn %l1, %l4, %l1 ! Mask the phys page number
or %l2, %l1, %l1 ! Now take care of the high bits
#ifdef NO_VCACHE
or %l1, TTE_L|TTE_CP|TTE_P|TTE_W, %l2 ! And low bits: L=1|CP=1|CV=0|E=0|P=1|W=0|G=0
@ -6166,9 +6286,9 @@ ENTRY(cpu_mp_startup)
set 1f, %o5
set INTSTACK, %l0
stxa %l0, [%l5] ASI_DMMU ! Make DMMU point to it
membar #Sync ! We may need more membar #Sync in here
membar #Sync
stxa %l2, [%g0] ASI_DMMU_DATA_IN ! Store it
membar #Sync ! We may need more membar #Sync in here
membar #Sync
flush %o5
flush %l0
1:
@ -6182,20 +6302,10 @@ ENTRY(cpu_mp_startup)
!!! Make sure our stack's OK.
LDPTR [%g2 + %lo(CBA_INITSTACK)], %l0
! sethi %hi(EINTSTACK), %l0
add %l0, -CC64FSZ-80-BIAS, %l0 ! via syscall(boot_me_up) or somesuch
add %l0, -CC64FSZ-80-BIAS, %l0
mov %l0, %sp
#if 0
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
rdpr %ver, %l7
and %l7, CWP, %l7
wrpr %l7, 0, %cleanwin
dec 1, %l7 ! NWINDOWS-1-1
wrpr %l7, %cansave
clr %fp ! End of stack.
#endif
set 1, %fp
clr %i7
/*
* install our TSB pointers
@ -6236,25 +6346,19 @@ ENTRY(cpu_mp_startup)
jmpl %l1, %o7
clr %g4
/* set up state required by idle */
set _C_LABEL(sched_lock_idle), %l1 ! Acquire sched_lock
jmpl %l1, %o7
nop
sethi %hi(_C_LABEL(sched_whichqs)), %l2
sethi %hi(CURLWP), %l7
set _C_LABEL(idle), %l1
jmpl %l1, %g0
nop
NOTREACHED
#if 0
/*
* XXX this funcltion is relocated so all calls from here has to be PIC.
*/
set 1f, %o0 ! Main should never come back here
call _C_LABEL(panic)
nop
.data
1:
.asciz "mp_main() returned\r\n"
_ALIGN
.text
#endif
.globl cpu_mp_startup_end
cpu_mp_startup_end:
#endif
@ -6350,12 +6454,47 @@ ENTRY(openfirmware)
restore %o0, %g0, %o0
/*
* tlb_flush_pte(vaddr_t va, int ctx)
* void ofw_exit(cell_t args[])
*/
ENTRY(openfirmware_exit)
STACKFRAME(-CC64FSZ) ! Flush register windows
flushw
wrpr %g0, PIL_HIGH, %pil ! Disable interrupts
set romtba, %l5
wrpr %l5, 0, %tba ! restore the ofw trap table
/* Arrange locked kernel stack as PROM stack */
sethi %hi(CPUINFO_VA+CI_INITSTACK), %l5
LDPTR [%l5 + %lo(CPUINFO_VA+CI_INITSTACK)], %l5
add %l5, - CC64FSZ - 80, %l5 ! via syscall(boot_me_up) or somesuch
#ifdef _LP64
andn %l5, 0x0f, %l5 ! Needs to be 16-byte aligned
sub %l5, BIAS, %l5 ! and biased
#endif
mov %l5, %sp
flushw
set romp, %l6
LDPTR [%l6], %l6
mov CTX_PRIMARY, %l3 ! set context 0
stxa %g0, [%l3] ASI_DMMU
membar #Sync
wrpr %g0, 0, %tl ! force trap level 0
call %l6
mov %i0, %o0
NOTREACHED
/*
* sp_tlb_flush_pte(vaddr_t va, int ctx)
*
* Flush tte from both IMMU and DMMU.
*/
.align 8
ENTRY(tlb_flush_pte)
ENTRY(sp_tlb_flush_pte)
#ifdef DEBUG
set DATA_START, %o4 ! Forget any recent TLB misses
stx %g0, [%o4]
@ -6378,7 +6517,7 @@ ENTRY(tlb_flush_pte)
restore
.data
1:
.asciz "tlb_flush_pte: demap ctx=%x va=%08x res=%x\r\n"
.asciz "sp_tlb_flush_pte: demap ctx=%x va=%08x res=%x\r\n"
_ALIGN
.text
2:
@ -6435,12 +6574,12 @@ ENTRY(tlb_flush_pte)
#endif
/*
* tlb_flush_ctx(int ctx)
* sp_tlb_flush_ctx(int ctx)
*
* Flush entire context from both IMMU and DMMU.
*/
.align 8
ENTRY(tlb_flush_ctx)
ENTRY(sp_tlb_flush_ctx)
#ifdef DEBUG
set DATA_START, %o4 ! Forget any recent TLB misses
stx %g0, [%o4]
@ -6453,7 +6592,7 @@ ENTRY(tlb_flush_ctx)
restore
.data
1:
.asciz "tlb_flush_ctx: context flush of %d attempted\r\n"
.asciz "sp_tlb_flush_ctx: context flush of %d attempted\r\n"
_ALIGN
.text
#endif
@ -6465,7 +6604,7 @@ ENTRY(tlb_flush_ctx)
nop
.data
1:
.asciz "tlb_flush_ctx: attempted demap of NUCLEUS context\r\n"
.asciz "sp_tlb_flush_ctx: attempted demap of NUCLEUS context\r\n"
_ALIGN
.text
2:
@ -6512,12 +6651,12 @@ ENTRY(tlb_flush_ctx)
#endif
/*
* tlb_flush_all(void)
* sp_tlb_flush_all(void)
*
* Flush all user TLB entries from both IMMU and DMMU.
*/
.align 8
ENTRY(tlb_flush_all)
ENTRY(sp_tlb_flush_all)
#ifdef SPITFIRE
save %sp, -CC64FSZ, %sp
rdpr %pstate, %o3
@ -7672,11 +7811,6 @@ Lcopyfault:
*/
ENTRY(cpu_exit)
flushw ! We don't have anything else to run, so why not
#ifdef DEBUG
save %sp, -CC64FSZ, %sp
flushw
restore
#endif
wrpr %g0, PSTATE_KERN, %pstate ! Make sure we're on the right globals
mov %o0, %l2 ! save l arg for lwp_exit2() call
@ -7694,25 +7828,16 @@ ENTRY(cpu_exit)
.text
#endif
/*
* Change pcb to idle u. area, i.e., set %sp to top of stack
* and %psr to PSR_S|PSR_ET, and set cpcb to point to _idle_u.
* Once we have left the old stack, we can call kmem_free to
* destroy it. Call it any sooner and the register windows
* go bye-bye.
* Change pcb to idle u. area, i.e., set %sp to top of stack and
* %psr to PSR_S|PSR_ET, and set cpcb to point to curcpu()->ci_idle_u.
* Once we have left the old stack, we can free it.
* Free it any sooner and the register windows go bye-bye.
*/
#if 0
XXXPETR
set _C_LABEL(idle_u), %l1
#endif
sethi %hi(IDLE_U), %l1
LDPTR [%l1 + %lo(IDLE_U)], %l1
sethi %hi(CPCB), %l6
STPTR %l1, [%l6 + %lo(CPCB)] ! cpcb = &idle_u
#if 0
XXXPETR
set _C_LABEL(idle_u) + USPACE - CC64FSZ, %o0 ! set new %sp
#endif
STPTR %l1, [%l6 + %lo(CPCB)] ! cpcb = curcpu()->ci_idle_u
set USPACE - CC64FSZ, %o0 ! set new %sp
add %l1, %o0, %o0
#ifdef _LP64
@ -7725,11 +7850,11 @@ ENTRY(cpu_exit)
rdpr %ver, %l7
and %l7, CWP, %l7
wrpr %l7, 0, %cleanwin
dec 1, %l7 ! NWINDOWS-1-1
dec 1, %l7 ! NWINDOWS - 1 - 1
wrpr %l7, %cansave
clr %fp ! End of stack.
clr %fp ! End of stack.
#ifdef DEBUG
flushw ! DEBUG
flushw ! DEBUG
sethi %hi(IDLE_U), %l6
LDPTR [%l1 + %lo(IDLE_U)], %l6
! set _C_LABEL(idle_u), %l6
@ -7803,14 +7928,15 @@ ENTRY(cpu_exit)
* When no processes are on the runq, switch
* idles here waiting for something to come ready.
* The registers are set up as noted above.
* We are running on this CPU's idle stack.
*/
ENTRY(idle)
ENTRY_NOPROFILE(idle)
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
call _C_LABEL(sched_unlock_idle) ! Release sched_lock
#endif
STPTR %g0, [%l7 + %lo(CURLWP)] ! curlwp = NULL;
1: ! spin reading _whichqs until nonzero
wrpr %g0, PSTATE_INTR, %pstate ! Make sure interrupts are enabled
wrpr %g0, PSTATE_INTR, %pstate ! Make sure interrupts are enabled
wrpr %g0, 0, %pil ! (void) spl0();
#ifdef NOTDEF_DEBUG
save %sp, -CC64FSZ, %sp
@ -7819,9 +7945,6 @@ ENTRY(idle)
mov %g1, %o1
mov %g2, %o2
mov %g3, %o3
mov %g5, %l5
mov %g6, %l6
mov %g7, %l7
call _C_LABEL(prom_printf)
mov %g4, %o4
set idlemsg1, %o0
@ -7832,6 +7955,7 @@ ENTRY(idle)
LOCTOGLOB
restore
#endif
ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
brnz,pt %o3, notidle ! Something to run
nop
@ -8011,7 +8135,6 @@ Lsw_scan:
sll %o4, PTRSHFT+1, %o0
add %o0, %o5, %o5
LDPTR [%o5], %l3 ! p = q->ph_link;
! cpu_loadproc:
cmp %l3, %o5 ! if (p == q)
be,pn %icc, Lsw_panic_rq ! panic("switch rq");
EMPTY
@ -8062,6 +8185,7 @@ cpu_loadproc:
* p->p_cpu = curcpu();
*/
set CPUINFO_VA, %o0
LDPTR [%o0 + CI_SELF], %o0
STPTR %o0, [%l3 + L_CPU]
#endif
mov LSONPROC, %o0 ! l->l_stat = SONPROC

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.163 2004/01/19 08:42:20 martin Exp $ */
/* $NetBSD: machdep.c,v 1.164 2004/03/14 18:18:55 chs Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -78,7 +78,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.163 2004/01/19 08:42:20 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.164 2004/03/14 18:18:55 chs Exp $");
#include "opt_ddb.h"
#include "opt_compat_netbsd.h"
@ -625,6 +625,9 @@ cpu_reboot(howto, user_boot_string)
}
(void) splhigh(); /* ??? */
/* Stop all secondary cpus */
sparc64_ipi_halt_cpus();
/* If rebooting and a dump is requested, do it. */
if (howto & RB_DUMP)
dumpsys();

View File

@ -1,4 +1,4 @@
/* $NetBSD: ofw_machdep.c,v 1.20 2004/01/06 09:38:20 petrov Exp $ */
/* $NetBSD: ofw_machdep.c,v 1.21 2004/03/14 18:18:56 chs Exp $ */
/*
* Copyright (C) 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: ofw_machdep.c,v 1.20 2004/01/06 09:38:20 petrov Exp $");
__KERNEL_RCSID(0, "$NetBSD: ofw_machdep.c,v 1.21 2004/03/14 18:18:56 chs Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@ -42,6 +42,7 @@ __KERNEL_RCSID(0, "$NetBSD: ofw_machdep.c,v 1.20 2004/01/06 09:38:20 petrov Exp
#include <sys/disklabel.h>
#include <sys/fcntl.h>
#include <sys/ioctl.h>
#include <sys/kprintf.h>
#include <sys/malloc.h>
#include <sys/stat.h>
#include <sys/systm.h>
@ -562,6 +563,9 @@ prom_get_msgbuf(len, align)
}
#ifdef MULTIPROCESSOR
/*
* Start secondary cpu, arrange 'func' as the entry.
*/
void
prom_startcpu(u_int cpu, void *func, u_long arg)
{
@ -583,6 +587,26 @@ prom_startcpu(u_int cpu, void *func, u_long arg)
openfirmware(&args);
}
/*
* Stop the calling cpu.
*/
void
prom_stopself(void)
{
static struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
} args;
args.name = ADR2CELL(&"SUNW,stop-self");
args.nargs = 0;
args.nreturns = 0;
openfirmware_exit(&args);
panic("sun4u_stopself: failed.");
}
#endif
/*
@ -632,15 +656,19 @@ prom_printf(fmt, va_alist)
va_dcl
#endif
{
int len;
int s, len;
static char buf[256];
va_list ap;
#ifdef MULTIPROCESSOR
extern struct simplelock kprintf_slock;
#endif
KPRINTF_MUTEX_ENTER(s);
va_start(ap, fmt);
len = vsprintf(buf, fmt, ap);
va_end(ap);
OF_write(OF_stdout(), buf, len);
KPRINTF_MUTEX_EXIT(s);
}
#ifdef DEBUG

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.154 2004/02/12 03:25:48 chs Exp $ */
/* $NetBSD: pmap.c,v 1.155 2004/03/14 18:18:56 chs Exp $ */
/*
*
* Copyright (C) 1996-1999 Eduardo Horvath.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.154 2004/02/12 03:25:48 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.155 2004/03/14 18:18:56 chs Exp $");
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
#define HWREF
@ -1051,6 +1051,7 @@ remap_data:
avail->start += PAGE_SIZE;
avail->size -= PAGE_SIZE;
}
/*
* Now we need to remove the area we valloc'ed from the available
* memory lists. (NB: we may have already alloc'ed the entire space).
@ -1297,10 +1298,10 @@ remap_data:
#ifdef DIAGNOSTIC
vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
#endif
if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
intstk = vmmap;
cpus = (struct cpu_info *)(intstk+CPUINFO_VA-INTSTACK);
cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK);
BDPRINTF(PDB_BOOT1,
("Inserting cpu_info into pmap_kernel() at %p\r\n",
@ -1308,6 +1309,7 @@ remap_data:
/* Now map in all 8 pages of cpu_info */
pa = cpu0paddr;
prom_map_phys(pa, 64*KB, vmmap, -1);
/*
* Also map it in as the interrupt stack.
* This lets the PROM see this if needed.
@ -1316,7 +1318,7 @@ remap_data:
* before installing the locked TTE.
*/
prom_map_phys(pa, 64*KB, CPUINFO_VA, -1);
for (i=0; i<8; i++) {
for (i = 0; i < 8; i++) {
int64_t data;
data = TSB_DATA(0 /* global */,
@ -1335,7 +1337,8 @@ remap_data:
BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\r\n"));
/* Initialize our cpu_info structure */
memset((void*)intstk, 0, 8*PAGE_SIZE);
memset((void *)intstk, 0, 8 * PAGE_SIZE);
cpus->ci_self = cpus;
cpus->ci_next = NULL;
cpus->ci_curlwp = &lwp0;
cpus->ci_cpcb = (struct pcb *)u0[0]; /* Need better source */
@ -1350,7 +1353,10 @@ remap_data:
cpus->ci_eintstack = (void *)EINTSTACK;
cpus->ci_idle_u = (struct pcb *)(CPUINFO_VA + 2 * PAGE_SIZE);
cpu0paddr += 64*KB;
cpu0paddr += 64 * KB;
CPUSET_CLEAR(cpus_active);
CPUSET_ADD(cpus_active, 0);
/* The rest will be done at CPU attach time. */
BDPRINTF(PDB_BOOT1,
@ -1358,6 +1364,7 @@ remap_data:
}
vmmap = (vaddr_t)reserve_dumppages((caddr_t)(u_long)vmmap);
/*
* Set up bounds of allocatable memory for vmstat et al.
*/
@ -3654,3 +3661,18 @@ pmap_testout()
pmap_free_page(pa);
}
#endif
void
pmap_update(struct pmap *pmap)
{
#ifdef MULTIPROCESSOR
smp_tlb_flush_all();
#endif
if (pmap->pm_refs > 0) {
return;
}
pmap->pm_refs = 1;
pmap_activate_pmap(pmap);
}