From 9b4c0e34d2bef817bb574f10dc1b5ef7fedc0721 Mon Sep 17 00:00:00 2001 From: fvdl Date: Tue, 19 Jun 2001 00:19:12 +0000 Subject: [PATCH] Initial commit of the port to AMD's upcoming 64 bit architecture, the x86-64. Since there's no hardware available yet, this port is only known to run on the Simics simulator for at the moment, and as such uses the PC devices that it simulates for now. It will be developed more (and cleaned up) as the hardware becomes available. --- sys/arch/x86_64/compile/.keep_me | 3 + sys/arch/x86_64/conf/Makefile.x86_64 | 219 ++ sys/arch/x86_64/conf/SIMICS | 508 +++ sys/arch/x86_64/conf/files.x86_64 | 199 ++ sys/arch/x86_64/conf/std.x86_64 | 8 + sys/arch/x86_64/include/Makefile | 17 + sys/arch/x86_64/include/ansi.h | 100 + sys/arch/x86_64/include/aout_machdep.h | 15 + sys/arch/x86_64/include/asm.h | 101 + sys/arch/x86_64/include/bios32.h | 12 + sys/arch/x86_64/include/bootinfo.h | 16 + sys/arch/x86_64/include/bswap.h | 19 + sys/arch/x86_64/include/bus.h | 1221 +++++++ sys/arch/x86_64/include/byte_swap.h | 84 + sys/arch/x86_64/include/cdefs.h | 8 + sys/arch/x86_64/include/conf.h | 56 + sys/arch/x86_64/include/cpu.h | 238 ++ sys/arch/x86_64/include/cpufunc.h | 228 ++ sys/arch/x86_64/include/disklabel.h | 62 + sys/arch/x86_64/include/elf_machdep.h | 34 + sys/arch/x86_64/include/endian.h | 3 + sys/arch/x86_64/include/endian_machdep.h | 14 + sys/arch/x86_64/include/float.h | 12 + sys/arch/x86_64/include/fpu.h | 84 + sys/arch/x86_64/include/frame.h | 179 + sys/arch/x86_64/include/frameasm.h | 53 + sys/arch/x86_64/include/gdt.h | 46 + sys/arch/x86_64/include/ieee.h | 7 + sys/arch/x86_64/include/ieeefp.h | 12 + sys/arch/x86_64/include/int_const.h | 63 + sys/arch/x86_64/include/int_fmtio.h | 219 ++ sys/arch/x86_64/include/int_limits.h | 144 + sys/arch/x86_64/include/int_mwgwtypes.h | 72 + sys/arch/x86_64/include/int_types.h | 61 + sys/arch/x86_64/include/intr.h | 167 + sys/arch/x86_64/include/isa_machdep.h | 246 ++ sys/arch/x86_64/include/kcore.h | 49 + sys/arch/x86_64/include/limits.h | 97 + sys/arch/x86_64/include/loadfile_machdep.h | 81 + sys/arch/x86_64/include/lock.h | 98 + sys/arch/x86_64/include/math.h | 11 + sys/arch/x86_64/include/netbsd32_machdep.h | 106 + sys/arch/x86_64/include/param.h | 145 + sys/arch/x86_64/include/pcb.h | 130 + sys/arch/x86_64/include/pccons.h | 30 + sys/arch/x86_64/include/pci_machdep.h | 109 + sys/arch/x86_64/include/pio.h | 233 ++ sys/arch/x86_64/include/pmap.h | 555 ++++ sys/arch/x86_64/include/pmc.h | 43 + sys/arch/x86_64/include/proc.h | 53 + sys/arch/x86_64/include/profile.h | 65 + sys/arch/x86_64/include/psl.h | 7 + sys/arch/x86_64/include/pte.h | 135 + sys/arch/x86_64/include/ptrace.h | 40 + sys/arch/x86_64/include/reg.h | 111 + sys/arch/x86_64/include/rnd.h | 78 + sys/arch/x86_64/include/segments.h | 298 ++ sys/arch/x86_64/include/setjmp.h | 20 + sys/arch/x86_64/include/signal.h | 96 + sys/arch/x86_64/include/specialreg.h | 24 + sys/arch/x86_64/include/stdarg.h | 65 + sys/arch/x86_64/include/sysarch.h | 93 + sys/arch/x86_64/include/trap.h | 7 + sys/arch/x86_64/include/tss.h | 56 + sys/arch/x86_64/include/types.h | 70 + sys/arch/x86_64/include/userret.h | 97 + sys/arch/x86_64/include/varargs.h | 56 + sys/arch/x86_64/include/vmparam.h | 152 + sys/arch/x86_64/isa/clock.c | 845 +++++ sys/arch/x86_64/isa/isa_machdep.c | 1184 +++++++ sys/arch/x86_64/isa/pccons.c | 2695 +++++++++++++++ sys/arch/x86_64/pci/pchb.c | 273 ++ sys/arch/x86_64/pci/pchbvar.h | 60 + sys/arch/x86_64/pci/pci_machdep.c | 614 ++++ sys/arch/x86_64/pci/pcib.c | 221 ++ sys/arch/x86_64/pci/pciide_machdep.c | 72 + sys/arch/x86_64/x86_64/autoconf.c | 480 +++ sys/arch/x86_64/x86_64/bios32.c | 177 + sys/arch/x86_64/x86_64/bus_machdep.c | 963 ++++++ sys/arch/x86_64/x86_64/conf.c | 468 +++ sys/arch/x86_64/x86_64/consinit.c | 217 ++ sys/arch/x86_64/x86_64/copy.S | 438 +++ sys/arch/x86_64/x86_64/disksubr.c | 520 +++ sys/arch/x86_64/x86_64/fpu.c | 225 ++ sys/arch/x86_64/x86_64/gdt.c | 373 +++ sys/arch/x86_64/x86_64/locore.S | 1129 +++++++ sys/arch/x86_64/x86_64/machdep.c | 1676 ++++++++++ sys/arch/x86_64/x86_64/mainbus.c | 142 + sys/arch/x86_64/x86_64/md_root.c | 83 + sys/arch/x86_64/x86_64/mem.c | 202 ++ sys/arch/x86_64/x86_64/netbsd32_machdep.c | 374 +++ sys/arch/x86_64/x86_64/netbsd32_sigcode.S | 29 + sys/arch/x86_64/x86_64/netbsd32_syscall.c | 254 ++ sys/arch/x86_64/x86_64/pmap.c | 3460 ++++++++++++++++++++ sys/arch/x86_64/x86_64/process_machdep.c | 305 ++ sys/arch/x86_64/x86_64/sys_machdep.c | 418 +++ sys/arch/x86_64/x86_64/syscall.c | 307 ++ sys/arch/x86_64/x86_64/trap.c | 474 +++ sys/arch/x86_64/x86_64/vector.S | 676 ++++ sys/arch/x86_64/x86_64/vm_machdep.c | 397 +++ sys/lib/libkern/arch/x86_64/Makefile.inc | 12 + sys/lib/libkern/arch/x86_64/bcmp.S | 24 + sys/lib/libkern/arch/x86_64/bcopy.S | 97 + sys/lib/libkern/arch/x86_64/byte_swap_2.S | 52 + sys/lib/libkern/arch/x86_64/byte_swap_4.S | 52 + sys/lib/libkern/arch/x86_64/bzero.S | 44 + sys/lib/libkern/arch/x86_64/ffs.S | 21 + sys/lib/libkern/arch/x86_64/index.S | 29 + sys/lib/libkern/arch/x86_64/memchr.S | 25 + sys/lib/libkern/arch/x86_64/memcmp.S | 40 + sys/lib/libkern/arch/x86_64/memcpy.S | 4 + sys/lib/libkern/arch/x86_64/memmove.S | 4 + sys/lib/libkern/arch/x86_64/memset.S | 58 + sys/lib/libkern/arch/x86_64/random.S | 97 + sys/lib/libkern/arch/x86_64/rindex.S | 29 + sys/lib/libkern/arch/x86_64/scanc.S | 62 + sys/lib/libkern/arch/x86_64/skpc.S | 56 + sys/lib/libkern/arch/x86_64/strcat.S | 65 + sys/lib/libkern/arch/x86_64/strchr.S | 4 + sys/lib/libkern/arch/x86_64/strcmp.S | 88 + sys/lib/libkern/arch/x86_64/strcpy.S | 57 + sys/lib/libkern/arch/x86_64/strlen.S | 21 + sys/lib/libkern/arch/x86_64/strrchr.S | 4 + 123 files changed, 28436 insertions(+) create mode 100644 sys/arch/x86_64/compile/.keep_me create mode 100644 sys/arch/x86_64/conf/Makefile.x86_64 create mode 100644 sys/arch/x86_64/conf/SIMICS create mode 100644 sys/arch/x86_64/conf/files.x86_64 create mode 100644 sys/arch/x86_64/conf/std.x86_64 create mode 100644 sys/arch/x86_64/include/Makefile create mode 100644 sys/arch/x86_64/include/ansi.h create mode 100644 sys/arch/x86_64/include/aout_machdep.h create mode 100644 sys/arch/x86_64/include/asm.h create mode 100644 sys/arch/x86_64/include/bios32.h create mode 100644 sys/arch/x86_64/include/bootinfo.h create mode 100644 sys/arch/x86_64/include/bswap.h create mode 100644 sys/arch/x86_64/include/bus.h create mode 100644 sys/arch/x86_64/include/byte_swap.h create mode 100644 sys/arch/x86_64/include/cdefs.h create mode 100644 sys/arch/x86_64/include/conf.h create mode 100644 sys/arch/x86_64/include/cpu.h create mode 100644 sys/arch/x86_64/include/cpufunc.h create mode 100644 sys/arch/x86_64/include/disklabel.h create mode 100644 sys/arch/x86_64/include/elf_machdep.h create mode 100644 sys/arch/x86_64/include/endian.h create mode 100644 sys/arch/x86_64/include/endian_machdep.h create mode 100644 sys/arch/x86_64/include/float.h create mode 100644 sys/arch/x86_64/include/fpu.h create mode 100644 sys/arch/x86_64/include/frame.h create mode 100644 sys/arch/x86_64/include/frameasm.h create mode 100644 sys/arch/x86_64/include/gdt.h create mode 100644 sys/arch/x86_64/include/ieee.h create mode 100644 sys/arch/x86_64/include/ieeefp.h create mode 100644 sys/arch/x86_64/include/int_const.h create mode 100644 sys/arch/x86_64/include/int_fmtio.h create mode 100644 sys/arch/x86_64/include/int_limits.h create mode 100644 sys/arch/x86_64/include/int_mwgwtypes.h create mode 100644 sys/arch/x86_64/include/int_types.h create mode 100644 sys/arch/x86_64/include/intr.h create mode 100644 sys/arch/x86_64/include/isa_machdep.h create mode 100644 sys/arch/x86_64/include/kcore.h create mode 100644 sys/arch/x86_64/include/limits.h create mode 100644 sys/arch/x86_64/include/loadfile_machdep.h create mode 100644 sys/arch/x86_64/include/lock.h create mode 100644 sys/arch/x86_64/include/math.h create mode 100644 sys/arch/x86_64/include/netbsd32_machdep.h create mode 100644 sys/arch/x86_64/include/param.h create mode 100644 sys/arch/x86_64/include/pcb.h create mode 100644 sys/arch/x86_64/include/pccons.h create mode 100644 sys/arch/x86_64/include/pci_machdep.h create mode 100644 sys/arch/x86_64/include/pio.h create mode 100644 sys/arch/x86_64/include/pmap.h create mode 100644 sys/arch/x86_64/include/pmc.h create mode 100644 sys/arch/x86_64/include/proc.h create mode 100644 sys/arch/x86_64/include/profile.h create mode 100644 sys/arch/x86_64/include/psl.h create mode 100644 sys/arch/x86_64/include/pte.h create mode 100644 sys/arch/x86_64/include/ptrace.h create mode 100644 sys/arch/x86_64/include/reg.h create mode 100644 sys/arch/x86_64/include/rnd.h create mode 100644 sys/arch/x86_64/include/segments.h create mode 100644 sys/arch/x86_64/include/setjmp.h create mode 100644 sys/arch/x86_64/include/signal.h create mode 100644 sys/arch/x86_64/include/specialreg.h create mode 100644 sys/arch/x86_64/include/stdarg.h create mode 100644 sys/arch/x86_64/include/sysarch.h create mode 100644 sys/arch/x86_64/include/trap.h create mode 100644 sys/arch/x86_64/include/tss.h create mode 100644 sys/arch/x86_64/include/types.h create mode 100644 sys/arch/x86_64/include/userret.h create mode 100644 sys/arch/x86_64/include/varargs.h create mode 100644 sys/arch/x86_64/include/vmparam.h create mode 100644 sys/arch/x86_64/isa/clock.c create mode 100644 sys/arch/x86_64/isa/isa_machdep.c create mode 100644 sys/arch/x86_64/isa/pccons.c create mode 100644 sys/arch/x86_64/pci/pchb.c create mode 100644 sys/arch/x86_64/pci/pchbvar.h create mode 100644 sys/arch/x86_64/pci/pci_machdep.c create mode 100644 sys/arch/x86_64/pci/pcib.c create mode 100644 sys/arch/x86_64/pci/pciide_machdep.c create mode 100644 sys/arch/x86_64/x86_64/autoconf.c create mode 100644 sys/arch/x86_64/x86_64/bios32.c create mode 100644 sys/arch/x86_64/x86_64/bus_machdep.c create mode 100644 sys/arch/x86_64/x86_64/conf.c create mode 100644 sys/arch/x86_64/x86_64/consinit.c create mode 100644 sys/arch/x86_64/x86_64/copy.S create mode 100644 sys/arch/x86_64/x86_64/disksubr.c create mode 100644 sys/arch/x86_64/x86_64/fpu.c create mode 100644 sys/arch/x86_64/x86_64/gdt.c create mode 100644 sys/arch/x86_64/x86_64/locore.S create mode 100644 sys/arch/x86_64/x86_64/machdep.c create mode 100644 sys/arch/x86_64/x86_64/mainbus.c create mode 100644 sys/arch/x86_64/x86_64/md_root.c create mode 100644 sys/arch/x86_64/x86_64/mem.c create mode 100644 sys/arch/x86_64/x86_64/netbsd32_machdep.c create mode 100644 sys/arch/x86_64/x86_64/netbsd32_sigcode.S create mode 100644 sys/arch/x86_64/x86_64/netbsd32_syscall.c create mode 100644 sys/arch/x86_64/x86_64/pmap.c create mode 100644 sys/arch/x86_64/x86_64/process_machdep.c create mode 100644 sys/arch/x86_64/x86_64/sys_machdep.c create mode 100644 sys/arch/x86_64/x86_64/syscall.c create mode 100644 sys/arch/x86_64/x86_64/trap.c create mode 100644 sys/arch/x86_64/x86_64/vector.S create mode 100644 sys/arch/x86_64/x86_64/vm_machdep.c create mode 100644 sys/lib/libkern/arch/x86_64/Makefile.inc create mode 100644 sys/lib/libkern/arch/x86_64/bcmp.S create mode 100644 sys/lib/libkern/arch/x86_64/bcopy.S create mode 100644 sys/lib/libkern/arch/x86_64/byte_swap_2.S create mode 100644 sys/lib/libkern/arch/x86_64/byte_swap_4.S create mode 100644 sys/lib/libkern/arch/x86_64/bzero.S create mode 100644 sys/lib/libkern/arch/x86_64/ffs.S create mode 100644 sys/lib/libkern/arch/x86_64/index.S create mode 100644 sys/lib/libkern/arch/x86_64/memchr.S create mode 100644 sys/lib/libkern/arch/x86_64/memcmp.S create mode 100644 sys/lib/libkern/arch/x86_64/memcpy.S create mode 100644 sys/lib/libkern/arch/x86_64/memmove.S create mode 100644 sys/lib/libkern/arch/x86_64/memset.S create mode 100644 sys/lib/libkern/arch/x86_64/random.S create mode 100644 sys/lib/libkern/arch/x86_64/rindex.S create mode 100644 sys/lib/libkern/arch/x86_64/scanc.S create mode 100644 sys/lib/libkern/arch/x86_64/skpc.S create mode 100644 sys/lib/libkern/arch/x86_64/strcat.S create mode 100644 sys/lib/libkern/arch/x86_64/strchr.S create mode 100644 sys/lib/libkern/arch/x86_64/strcmp.S create mode 100644 sys/lib/libkern/arch/x86_64/strcpy.S create mode 100644 sys/lib/libkern/arch/x86_64/strlen.S create mode 100644 sys/lib/libkern/arch/x86_64/strrchr.S diff --git a/sys/arch/x86_64/compile/.keep_me b/sys/arch/x86_64/compile/.keep_me new file mode 100644 index 000000000000..bf3c9d6c1b2f --- /dev/null +++ b/sys/arch/x86_64/compile/.keep_me @@ -0,0 +1,3 @@ +$NetBSD: .keep_me,v 1.1 2001/06/19 00:19:12 fvdl Exp $ + +This normally empty directory needs to be kept in the distribution. diff --git a/sys/arch/x86_64/conf/Makefile.x86_64 b/sys/arch/x86_64/conf/Makefile.x86_64 new file mode 100644 index 000000000000..c36d3ef2a4d5 --- /dev/null +++ b/sys/arch/x86_64/conf/Makefile.x86_64 @@ -0,0 +1,219 @@ +# $NetBSD: Makefile.x86_64,v 1.1 2001/06/19 00:19:47 fvdl Exp $ + +# Makefile for NetBSD +# +# This makefile is constructed from a machine description: +# config machineid +# Most changes should be made in the machine description +# /sys/arch/x86_64/conf/``machineid'' +# after which you should do +# config machineid +# Machine generic makefile changes should be made in +# /sys/arch/x86_64/conf/Makefile.x86_64 +# after which config should be rerun for all machines of that type. + +.include + +# DEBUG is set to -g if debugging. +# PROF is set to -pg if profiling. + +AR?= ar +AS?= as +CC?= cc +CPP?= cpp +LD?= ld +LORDER?=lorder +MKDEP?= mkdep +NM?= nm +RANLIB?=ranlib +SIZE?= size +STRIP?= strip +TSORT?= tsort -q + +COPTS?= -O2 + +# source tree is located via $S relative to the compilation directory +.ifndef S +S!= cd ../../../..; pwd +.endif +X86_64= $S/arch/x86_64 + +INCLUDES= -I. -I$S/arch -I$S -nostdinc +CPPFLAGS= ${INCLUDES} ${IDENT} ${PARAM} -D_KERNEL -D_KERNEL_OPT -Dx86_64 +CWARNFLAGS?= -Werror -Wall -Wmissing-prototypes -Wstrict-prototypes \ + -Wpointer-arith + +CWARNFLAGS+= -Wno-uninitialized +CWARNFLAGS+= -fno-builtin -fno-strict-aliasing -fno-reorder-blocks +CWARNFLAGS+= -Wno-cast-qual +CWARNFLAGS+= -Wno-main +CWARNFLAGS+= -Wno-format +CFLAGS= ${DEBUG} ${COPTS} ${CWARNFLAGS} -mcmodel=large -mno-red-zone +AFLAGS= -x assembler-with-cpp -traditional-cpp -D_LOCORE +LINKFLAGS= -Ttext 0xffff800000100000 -e start + +STRIPFLAGS= -g + +%INCLUDES + +### find out what to use for libkern +KERN_AS= obj +.include "$S/lib/libkern/Makefile.inc" +.ifndef PROF +LIBKERN= ${KERNLIB} +.else +LIBKERN= ${KERNLIB_PROF} +.endif + +### find out what to use for libcompat +.include "$S/compat/common/Makefile.inc" +.ifndef PROF +LIBCOMPAT= ${COMPATLIB} +.else +LIBCOMPAT= ${COMPATLIB_PROF} +.endif + +# compile rules: rules are named ${TYPE}_${SUFFIX} where TYPE is NORMAL or +# HOSTED}, and SUFFIX is the file suffix, capitalized (e.g. C for a .c file). + +NORMAL_C= ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c $< +NOPROF_C= ${CC} ${CFLAGS} ${CPPFLAGS} -c $< +NORMAL_S= ${CC} ${AFLAGS} ${CPPFLAGS} -c $< + +%OBJS + +%CFILES + +%SFILES + +# load lines for config "xxx" will be emitted as: +# xxx: ${SYSTEM_DEP} swapxxx.o +# ${SYSTEM_LD_HEAD} +# ${SYSTEM_LD} swapxxx.o +# ${SYSTEM_LD_TAIL} +SYSTEM_OBJ= locore.o vector.o copy.o \ + param.o ioconf.o ${OBJS} ${LIBCOMPAT} ${LIBKERN} +SYSTEM_DEP= Makefile ${SYSTEM_OBJ} +SYSTEM_LD_HEAD= rm -f $@ +SYSTEM_LD= @echo ${LD} ${LINKFLAGS} -o $@ '$${SYSTEM_OBJ}' vers.o; \ + ${LD} ${LINKFLAGS} -o $@ ${SYSTEM_OBJ} vers.o +SYSTEM_LD_TAIL= @${SIZE} $@; chmod 755 $@ + +DEBUG?= +.if ${DEBUG} == "-g" +LINKFLAGS+= -X +SYSTEM_LD_TAIL+=; \ + echo mv -f $@ $@.gdb; mv -f $@ $@.gdb; \ + echo ${STRIP} ${STRIPFLAGS} -o $@ $@.gdb; \ + ${STRIP} ${STRIPFLAGS} -o $@ $@.gdb +.else +LINKFLAGS+= -X +.endif + +%LOAD + +assym.h: $S/kern/genassym.sh ${X86_64}/x86_64/genassym.cf + sh $S/kern/genassym.sh ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} \ + < ${X86_64}/x86_64/genassym.cf > assym.h.tmp && \ + mv -f assym.h.tmp assym.h + +param.c: $S/conf/param.c + rm -f param.c + cp $S/conf/param.c . + +param.o: param.c Makefile + ${NORMAL_C} + +ioconf.o: ioconf.c + ${NORMAL_C} + +newvers: ${SYSTEM_DEP} ${SYSTEM_SWAP_DEP} + sh $S/conf/newvers.sh + ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} -c vers.c + + +__CLEANKERNEL: .USE + @echo "${.TARGET}ing the kernel objects" + rm -f eddep *netbsd netbsd.gdb tags *.[io] [a-z]*.s \ + [Ee]rrs linterrs makelinks assym.h.tmp assym.h + +__CLEANDEPEND: .USE + rm -f .depend + +clean: __CLEANKERNEL + +cleandir distclean: __CLEANKERNEL __CLEANDEPEND + +lint: + @lint -hbxncez -Dvolatile= ${CPPFLAGS} -UKGDB \ + ${X86_64}/x86_64/Locore.c ${CFILES} \ + ioconf.c param.c | \ + grep -v 'static function .* unused' + +tags: + @echo "see $S/kern/Makefile for tags" + +links: + egrep '#if' ${CFILES} | sed -f $S/conf/defines | \ + sed -e 's/:.*//' -e 's/\.c/.o/' | sort -u > dontlink + echo ${CFILES} | tr -s ' ' '\12' | sed 's/\.c/.o/' | \ + sort -u | comm -23 - dontlink | \ + sed 's,../.*/\(.*.o\),rm -f \1; ln -s ../GENERIC/\1 \1,' > makelinks + sh makelinks && rm -f dontlink + +SRCS= ${X86_64}/x86_64/locore.S ${X86_64}/x86_64/vector.S \ + ${X86_64}/x86_64/copy.S \ + param.c ioconf.c ${CFILES} ${SFILES} +depend: .depend +.depend: ${SRCS} assym.h param.c + ${MKDEP} ${AFLAGS} ${CPPFLAGS} ${X86_64}/x86_64/locore.S + ${MKDEP} ${AFLAGS} ${CPPFLAGS} ${X86_64}/x86_64/vector.S + ${MKDEP} ${AFLAGS} ${CPPFLAGS} ${X86_64}/x86_64/copy.S + ${MKDEP} -a ${CFLAGS} ${CPPFLAGS} param.c ioconf.c ${CFILES} + ${MKDEP} -a ${AFLAGS} ${CPPFLAGS} ${SFILES} + sh $S/kern/genassym.sh ${MKDEP} -f assym.dep ${CFLAGS} \ + ${CPPFLAGS} < ${X86_64}/x86_64/genassym.cf + @sed -e 's/.*\.o:.*\.c/assym.h:/' < assym.dep >> .depend + @rm -f assym.dep + +dependall: depend all + + +# depend on root or device configuration +autoconf.o conf.o: Makefile + +# depend on network or filesystem configuration +uipc_proto.o vfs_conf.o: Makefile + +# depend on maxusers +machdep.o: Makefile + +# depend on CPU configuration +locore.o machdep.o: Makefile + +locore.o: ${X86_64}/x86_64/locore.S assym.h + ${NORMAL_S} + +vector.o: ${X86_64}/x86_64/vector.S assym.h + ${NORMAL_S} + +copy.o: ${X86_64}/x86_64/vector.S assym.h + ${NORMAL_S} + +in_cksum.o: assym.h + +netbsd32_sigcode.o: assym.h + +# The install target can be redefined by putting a +# install-kernel-${MACHINE_NAME} target into /etc/mk.conf +MACHINE_NAME!= uname -n +install: install-kernel-${MACHINE_NAME} +.if !target(install-kernel-${MACHINE_NAME}}) +install-kernel-${MACHINE_NAME}: + rm -f /onetbsd + ln /netbsd /onetbsd + cp netbsd /nnetbsd + mv /nnetbsd /netbsd +.endif + +%RULES diff --git a/sys/arch/x86_64/conf/SIMICS b/sys/arch/x86_64/conf/SIMICS new file mode 100644 index 000000000000..cfffbb0d2b97 --- /dev/null +++ b/sys/arch/x86_64/conf/SIMICS @@ -0,0 +1,508 @@ +# $NetBSD: SIMICS,v 1.1 2001/06/19 00:19:48 fvdl Exp $ +# +# INSTALL_TINY - Tiny Installation kernel, suitable for 4M machines. +# +# This kernel should be derived from INSTALL (which is derived +# from GENERIC) with some features commented out. +# +# This kernel has been optimized for space. It is targeted towards +# small memory machines (4M). It has no SCSI, PCI or EISA. It does +# have pcmcia, for old laptops. +# + +include "arch/x86_64/conf/std.x86_64" + +makeoptions COPTS="-Os" + +# Enable the hooks used for initializing the root memory-disk. +#options MEMORY_DISK_HOOKS +#options MEMORY_DISK_IS_ROOT # force root on memory disk +#options MEMORY_DISK_SERVER=0 # no userspace memory disk support +#options MINIROOTSIZE=8192 # size of memory disk, in blocks +#options MINIROOTSIZE=2880 # 1.44M, same as a floppy + +maxusers 2 # estimated number of users + +# CPU-related options. +#options USER_LDT # user-settable LDT; used by WINE + +# Misc. x86_64-specific options +#options XSERVER # X server support in console drivers + +# This option allows you to force a serial console at the specified +# I/O address. +#options "CONSDEVNAME=\"com\"",CONADDR=0x3f8,CONSPEED=9600 + +#options UCONSOLE # users can use TIOCCONS (for xconsole) +options INSECURE # disable kernel security levels + +options RTC_OFFSET=0 # hardware clock is this many mins. west of GMT +#options NTP # NTP phase/frequency locked loop + +#options KTRACE # system call tracing via ktrace(1) + +options SYSVMSG # System V-like message queues +options SYSVSEM # System V-like semaphores +options SYSVSHM # System V-like memory sharing +#options SHMMAXPGS=1024 # 1024 pages is the default + +#options LKM # loadable kernel modules + +# Diagnostic/debugging support options +#options DIAGNOSTIC # cheap kernel consistency checks +#options DEBUG # expensive debugging checks/support +#options KMEMSTATS # kernel memory statistics (vmstat -m) +#options DDB # in-kernel debugger +#options DDB_HISTORY_SIZE=512 # enable history editing in DDB +#options KGDB # remote debugger +#options "KGDB_DEVNAME=\"com\"",KGDBADDR=0x3f8,KGDBRATE=9600 +#makeoptions DEBUG="-g" # compile full symbol table + +# Compatibility options +#options COMPAT_NOMID # compatibility with 386BSD, BSDI, NetBSD 0.8, +#options COMPAT_09 # NetBSD 0.9, +#options COMPAT_10 # NetBSD 1.0, +#options COMPAT_11 # NetBSD 1.1, +#options COMPAT_12 # NetBSD 1.2, +#options COMPAT_13 # NetBSD 1.3, +#options COMPAT_14 # NetBSD 1.4, +#options COMPAT_43 # and 4.3BSD +#options COMPAT_386BSD_MBRPART # recognize old partition ID + +#options COMPAT_SVR4 # binary compatibility with SVR4 +#options COMPAT_IBCS2 # binary compatibility with SCO and ISC +#options COMPAT_LINUX # binary compatibility with Linux +#options COMPAT_FREEBSD # binary compatibility with FreeBSD + +options COMPAT_NETBSD32 +options EXEC_ELF32 + +# File systems +file-system FFS # UFS +#file-system EXT2FS # second extended file system (linux) +#file-system LFS # log-structured file system +#file-system MFS # memory file system +#file-system NFS # Network File System client +#file-system CD9660 # ISO 9660 + Rock Ridge file system +#file-system MSDOSFS # MS-DOS file system +#file-system FDESC # /dev/fd +#file-system KERNFS # /kern +#file-system NULLFS # loopback file system +#file-system PORTAL # portal filesystem (still experimental) +#file-system PROCFS # /proc +#file-system UMAPFS # NULLFS + uid and gid remapping +#file-system UNION # union file system + +# Filesystem options +#options QUOTA # UFS quotas +#options NFSSERVER # Network File System server +#options NFS_V2_ONLY # Exclude NFS3 and NQNFS code to save space +#options VNODE_OP_NOINLINE # Save space by not inlining vnode op calls +#options EXT2FS_SYSTEM_FLAGS # makes ext2fs file flags (append and + # immutable) behave as system flags. + +# Networking options +#options GATEWAY # packet forwarding +options INET # IP + ICMP + TCP + UDP +options INET6 +#options MROUTING # IP multicast routing +#options NS # XNS +#options NSIP # XNS tunneling over IP +#options ISO,TPIP # OSI +#options EON # OSI tunneling over IP +#options CCITT,LLC,HDLC # X.25 +#options NETATALK # AppleTalk networking protocols +#options PPP_BSDCOMP # BSD-Compress compression support for PPP +#options PPP_DEFLATE # Deflate compression support for PPP +#options PPP_FILTER # Active filter support for PPP (requires bpf) +#options PFIL_HOOKS # pfil(9) packet filter hooks + +# Compatibility with 4.2BSD implementation of TCP/IP. Not recommended. +#options TCP_COMPAT_42 + +# These options enable verbose messages for several subsystems. +# Warning, these may compile large string tables into the kernel! +#options EISAVERBOSE # verbose EISA device autoconfig messages +options PCIVERBOSE # verbose PCI device autoconfig messages +#options PCI_CONFIG_DUMP # verbosely dump PCI config space +#options PCMCIAVERBOSE # verbose PCMCIA configuration messages +#options SCSIVERBOSE # human readable SCSI error messages +#options I2OVERBOSE # verbose I2O driver messages + +# Squeeze... +#options NVNODE=50 +#options NBUF=20 +#options BUFPAGES=20 +#options NMBCLUSTERS=64 + +# Kernel root file system and dump configuration. +config netbsd root on ? type ? +#config netbsd root on sd0a type ffs +#config netbsd root on ? type nfs + +# +# Device configuration +# + +mainbus0 at root + +#apm0 at mainbus0 # Advanced power management + + +# Basic Bus Support + +# PCI bus support +pci* at mainbus? bus ? +pci* at pchb? bus ? +#pci* at ppb? bus ? + +# PCI bridges +pchb* at pci? dev ? function ? # PCI-Host bridges +#pceb* at pci? dev ? function ? # PCI-EISA bridges +pcib* at pci? dev ? function ? # PCI-ISA bridges +#ppb* at pci? dev ? function ? # PCI-PCI bridges +# XXX 'puc's aren't really bridges, but there's no better place for them here +#puc* at pci? dev ? function ? # PCI "universal" comm. cards + +# As well as 5 and 7, avoid irq 4 and 3 because we do not probe com* here +#options PCIC_ISA_INTR_ALLOC_MASK=0xff47 + +# PCMCIA bus support +#pcmcia* at pcic? controller ? socket ? +#pcmcia* at tcic? controller ? socket ? + +# ISA PCMCIA controllers +#pcic0 at isa? port 0x3e0 iomem 0xd0000 iosiz 0x10000 +#pcic1 at isa? port 0x3e2 iomem 0xe0000 iosiz 0x4000 + +# EISA bus support +#eisa* at mainbus? +#eisa* at pceb? + +# ISA bus support +isa* at mainbus? +#eisa* at pceb? +isa* at pcib? + +# ISA Plug-and-Play bus support +#isapnp0 at isa? + +# Coprocessor Support + +# Console Devices + +# ISA console +pc0 at isa? port 0x60 irq 1 # pccons generic PC console driver + +#options WSEMUL_VT100 # VT100 / VT220 emulation +#options WS_KERNEL_FG=WSCOL_GREEN +#options WSDISPLAY_COMPAT_USL +#options PCDISPLAY_SOFTCURSOR + +#pckbc0 at isa? # pc keyboard controller +#pckbd* at pckbc? # PC keyboard +#vga0 at isa? +#vga* at pci? dev ? function ? + +#pcdisplay0 at isa? # CGA, MDA, EGA, HGA +#wsdisplay* at vga? console ? +#wsdisplay* at pcdisplay? console ? +#wskbd* at pckbd? console ? + + +#pcppi0 at isa? +#sysbeep0 at pcppi? + + +# Serial Devices + +# PCI serial interfaces +#com* at puc? port ? # 16x50s on "universal" comm boards +#cy* at pci? dev ? function ? # Cyclades Cyclom-Y serial boards + +# ISA Plug-and-Play serial interfaces +#com* at isapnp? # Modems and serial boards + +# PCMCIA serial interfaces +#com* at pcmcia? function ? # Modems and serial cards + +# ISA serial interfaces +#options COM_HAYESP # adds Hayes ESP serial board support +#com0 at isa? port 0x3f8 irq 4 # Standard PC serial ports +#com1 at isa? port 0x2f8 irq 3 +#com2 at isa? port 0x3e8 irq 5 +#com3 at isa? port 0x2e8 irq 9 +#ast0 at isa? port 0x1a0 irq 5 # AST 4-port serial cards +#com* at ast? slave ? +#boca0 at isa? port 0x100 irq 5 # BOCA 8-port serial cards +#com* at boca? slave ? +#rtfps0 at isa? port 0x1230 irq 10 # RT 4-port serial cards +#com* at rtfps? slave ? +#cy0 at isa? iomem 0xd4000 irq 12 # Cyclades serial cards + + +# Parallel Printer Interfaces + +# PCI parallel printer interfaces +#lpt* at puc? port ? # || ports on "universal" comm boards + +# ISA parallel printer interfaces +#lpt0 at isa? port 0x378 irq 7 # standard PC parallel ports +#lpt1 at isa? port 0x278 +#lpt2 at isa? port 0x3bc + +# I2O devices +#iop* at pci? dev ? function ? # I/O processor +#iopsp* at iop? tid ? # SCSI/FC-AL ports +#ld* at iop? tid ? # block devices + +# SCSI Controllers and Devices + +# PCI SCSI controllers +#ahc* at pci? dev ? function ? # Adaptec [23]94x, aic78x0 SCSI +#bha* at pci? dev ? function ? # BusLogic 9xx SCSI +#dpt* at pci? dev ? function ? # DPT SmartCache/SmartRAID +#isp* at pci? dev ? function ? # Qlogic ISP [12]0x0 SCSI/FibreChannel +#ncr* at pci? dev ? function ? # NCR 53c8xx SCSI + +# EISA SCSI controllers +#ahb* at eisa? slot ? # Adaptec 174[02] SCSI +#ahc* at eisa? slot ? # Adaptec 274x, aic7770 SCSI +#bha* at eisa? slot ? # BusLogic 7xx SCSI +#dpt* at eisa? slot ? # DPT SmartCache/SmartRAID +#uha* at eisa? slot ? # UltraStor 24f SCSI + +# PCMCIA SCSI controllers +#aic* at pcmcia? function ? # Adaptec APA-1460 SCSI + +# ISA Plug-and-Play SCSI controllers +#aic* at isapnp? # Adaptec AHA-1520B + +# ISA SCSI controllers +#aha0 at isa? port 0x330 irq ? drq ? # Adaptec 154[02] SCSI +#aha1 at isa? port 0x334 irq ? drq ? +#ahc0 at isa? port ? irq ? # Adaptec 284x SCSI +#aic0 at isa? port 0x340 irq 11 # Adaptec 152[02] SCSI +#bha0 at isa? port 0x330 irq ? drq ? # BusLogic [457]4X SCSI +#bha1 at isa? port 0x334 irq ? drq ? +# The "nca" and "dpt" probes might give false hits or hang your machine. +#dpt0 at isa? port 0x170 irq ? drq ? # DPT SmartCache/SmartRAID +#nca0 at isa? port 0x360 irq 15 # Port-mapped NCR 53C80 controller +#nca1 at isa? iomem 0xd8000 irq 5 # Memory-mapped controller (T128, etc.) +#sea0 at isa? iomem 0xc8000 irq 5 # Seagate/Future Domain SCSI +#uha0 at isa? port 0x330 irq ? drq ? # UltraStor [13]4f SCSI +#uha1 at isa? port 0x340 irq ? drq ? +#wds0 at isa? port 0x350 irq 15 drq 6 # WD7000 and TMC-7000 controllers +#wds1 at isa? port 0x358 irq 11 drq 5 + +# SCSI bus support +#scsibus* at aha? +#scsibus* at ahb? +#scsibus* at ahc? +#scsibus* at aic? +#scsibus* at bha? +#scsibus* at dpt? +#scsibus* at iopsp? +#scsibus* at isp? +#scsibus* at nca? +#scsibus* at ncr? +#scsibus* at sea? +#scsibus* at uha? +#scsibus* at wds? + +# SCSI devices +#sd* at scsibus? target ? lun ? # SCSI disk drives +#st* at scsibus? target ? lun ? # SCSI tape drives +#cd* at scsibus? target ? lun ? # SCSI CD-ROM drives +#ch* at scsibus? target ? lun ? # SCSI autochangers +#ss* at scsibus? target ? lun ? # SCSI scanners +#uk* at scsibus? target ? lun ? # SCSI unknown + + +# RAID controllers and devices +#cac* at eisa? # Compaq EISA array controllers +#cac* at pci? dev ? function ? # Compaq PCI array controllers +#mlx* at pci? dev ? function ? # Mylex DAC960 & DEC SWXCR family +#twe* at pci? dev ? function ? # 3ware Escalade RAID controllers + +#ld* at cac? unit ? # logical disk devices +#ld* at twe? unit ? +#ld* at mlx? unit ? + + +# IDE and related devices + +# PCMCIA IDE controllers +#wdc* at pcmcia? function ? + +pciide* at pci? dev ? function ? flags 0x0000 + +# ST506, ESDI, and ISA IDE controllers +# Use flags 0x01 if you want to try to use 32bits data I/O (the driver will +# fall back to 16bits I/O if 32bits I/O are not functional). +# Some controllers pass the initial 32bit test, but will fail later. +wdc0 at isa? port 0x1f0 irq 14 flags 0x00 +wdc1 at isa? port 0x170 irq 15 flags 0x00 + +# IDE drives +wd* at wdc? drive ? # the drives themselves +wd* at pciide? channel ? drive ? flags 0x0000 + +# ATAPI bus support +atapibus* at wdc? +atapibus* at pciide? channel ? + +# ATAPI devices +cd* at atapibus? drive ? # ATAPI CD-ROM drives +sd* at atapibus? drive ? # ATAPI disk drives + + +# Miscellaneous mass storage devices + +# ISA floppy +fdc0 at isa? port 0x3f0 irq 6 drq 2 # standard PC floppy controllers +#fdc1 at isa? port 0x370 irq ? drq ? +#fd* at fdc? drive ? # the drives themselves +# some machines need you to do this instead of fd* +fd0 at fdc0 drive 0 + +# ISA CD-ROM devices +#mcd0 at isa? port 0x300 irq 10 # Mitsumi CD-ROM drives + +# ISA tape devices +# note: the wt driver conflicts unpleasantly with ed devices at the +# same I/O address. The probe reprograms their EEPROMs. Don't +# uncomment it unless you are actually using it. +#wt0 at isa? port 0x308 irq 5 drq 1 # Archive and Wangtek QIC tape drives + + +# Network Interfaces + +# PCI network interfaces +#de* at pci? dev ? function ? # DEC 21x4x-based Ethernet +#en* at pci? dev ? function ? # ENI/Adaptec ATM +#ep* at pci? dev ? function ? # 3Com 3c59x/3c90x Ethernet +#fpa* at pci? dev ? function ? # DEC DEFPA FDDI +#fxp* at pci? dev ? function ? # Intel EtherExpress PRO 10+/100B +#le* at pci? dev ? function ? # PCnet-PCI Ethernet +#ne* at pci? dev ? function ? # NE2000-compatible Ethernet +#tl* at pci? dev ? function ? # ThunderLAN-based Ethernet + +# EISA network interfaces +#ep* at eisa? slot ? # 3Com 3c579 Ethernet +#fea* at eisa? slot ? # DEC DEFEA FDDI + +# ISA Plug-and-Play network interfaces +#ep* at isapnp? # 3Com 3c509 Ethernet +#ne* at isapnp? # NE2000-compatible Ethernet + +# PCMCIA network interfaces +#ep* at pcmcia? function ? # 3Com 3c589 and 3c562 Ethernet +#mbe* at pcmcia? function ? # MB8696x based Ethernet +#ne* at pcmcia? function ? # NE2000-compatible Ethernet +#sm* at pcmcia? function ? # Megahertz Ethernet + +# ISA network interfaces +#ate0 at isa? port 0x2a0 irq ? # AT1700 +#ec0 at isa? port 0x250 iomem 0xd8000 irq 9 # 3Com 3c503 Ethernet +#eg0 at isa? port 0x280 irq 9 # 3C505 ethernet cards +#el0 at isa? port 0x300 irq 9 # 3C501 ethernet cards +#ep0 at isa? port ? irq ? # 3C509 ethernet cards +#ef0 at isa? port 0x360 iomem 0xd0000 irq 7 # 3C507 +#ai0 at isa? port 0x360 iomem 0xd0000 irq 7 # StarLAN +#fmv0 at isa? port 0x2a0 irq ? # FMV-180 series +#ix0 at isa? port 0x300 irq 10 # EtherExpress/16 +#iy0 at isa? port 0x360 irq ? # EtherExpress PRO 10 ISA +#lc0 at isa? port 0x320 iomem ? irq ? # DEC EtherWORKS III (LEMAC) +#depca0 at isa? port 0x300 iomem 0xc8000 iosiz 0x8000 irq 5 # DEPCA +#le* at depca? +nele0 at isa? port 0x300 irq 7 drq 7 # NE2100 +le* at nele? +#bicc0 at isa? port 0x320 irq 10 drq 7 # BICC IsoLan +#le* at bicc? +#ne0 at isa? port 0x280 irq 9 # NE[12]000 ethernet cards +#ne1 at isa? port 0x300 irq 10 +#sm0 at isa? port 0x300 irq 10 # SMC91C9x Ethernet +#we0 at isa? port 0x280 iomem 0xd0000 irq 9 # WD/SMC Ethernet +#we1 at isa? port 0x300 iomem 0xcc000 irq 10 + +# MII bus support +#mii* at tl? + +# MII PHY network interfaces +#tlphy* at mii? dev ? # ThunderLAN PHYs +#nsphy* at mii? dev ? # NS and compatible PHYs + + +# Audio Devices + +# ISA Plug-and-Play audio devices +#guspnp* at isapnp? # Gravis Ultra Sound PnP audio +#sb* at isapnp? # SoundBlaster-compatible audio + +# ISA audio devices +#gus0 at isa? port 0x220 irq 7 drq 1 drq2 6 # Gravis Ultra Sound +#pas0 at isa? port 0x220 irq 7 drq 1 # ProAudio Spectrum +#pss0 at isa? port 0x220 irq 7 drq 6 # Personal Sound System +#sp0 at pss0 port 0x530 irq 10 drq 0 # sound port driver +#sb0 at isa? port 0x220 irq 7 drq 1 drq2 5 # SoundBlaster +#wss0 at isa? port 0x530 irq 10 drq 0 drq2 1 # Windows Sound System + +# Audio support +#audio* at gus? +#audio* at guspnp? +#audio* at pas? +#audio* at sb? +#audio* at sp? +#audio* at wss? + +# The spkr driver provides a simple tone interface to the built in speaker. +#spkr0 at pcppi? # PC speaker + + +# Mice + +# ISA busmice +#olms0 at isa? port 0x23c irq 5 # Logitech bus mouse +#olms1 at isa? port 0x238 irq 5 +#omms0 at isa? port 0x23c irq 5 # Microsoft InPort mouse +#omms1 at isa? port 0x238 irq 5 +#opms0 at pc? irq 12 # PS/2 auxiliary port mouse + + +# Joysticks + +# ISA Plug-and-Play joysticks +#joy* at isapnp? # Game ports (usually on audio cards) + +# ISA joysticks. Probe is a little strange; add only if you have one. +#joy0 at isa? port 0x201 + + +# Miscellaneous Devices + +# Planetconnect Satellite receiver driver. +#satlink0 at isa? port 0x300 drq 1 + + +# Pseudo-Devices + +# disk/mass storage pseudo-devices +#pseudo-device ccd 4 # concatenated/striped disk devices +#pseudo-device md 1 # memory disk device (ramdisk) +#pseudo-device vnd 4 # disk-like interface to files + +# network pseudo-devices +#pseudo-device bpfilter 8 # Berkeley packet filter +#pseudo-device ipfilter # IP filter (firewall) and NAT +pseudo-device loop # network loopback +#pseudo-device ppp 2 # Point-to-Point Protocol +#pseudo-device sl 2 # Serial Line IP +#pseudo-device strip 2 # Starmode Radio IP (Metricom) +#pseudo-device tun 2 # network tunneling over tty + +# miscellaneous pseudo-devices +pseudo-device pty # pseudo-terminals (Sysinst needs two) +#pseudo-device tb 1 # tablet line discipline +#pseudo-device rnd # /dev/random and in-kernel generator +#options RND_COM # use "com" randomness as well (BROKEN) diff --git a/sys/arch/x86_64/conf/files.x86_64 b/sys/arch/x86_64/conf/files.x86_64 new file mode 100644 index 000000000000..c83362773b2d --- /dev/null +++ b/sys/arch/x86_64/conf/files.x86_64 @@ -0,0 +1,199 @@ +# $NetBSD: files.x86_64,v 1.1 2001/06/19 00:19:48 fvdl Exp $ +# +# new style config file for x86_64 architecture +# + +# maxpartitions must be first item in files.${ARCH}.newconf +maxpartitions 16 + +maxusers 2 16 64 + +# delay before cpu_reset() for reboot. +defopt CPURESET_DELAY + +# Large page size +defopt LARGEPAGES + +# +# XXX these are just here at the moment so that we can share files +# with the i386 (they include the opt_*.h for these) +# + +defopt USER_LDT +defopt VM86 + +file arch/x86_64/x86_64/autoconf.c +file arch/x86_64/x86_64/bus_machdep.c +file arch/x86_64/x86_64/conf.c +file arch/x86_64/x86_64/consinit.c +file arch/x86_64/x86_64/db_dbgreg.s ddb +file arch/x86_64/x86_64/db_disasm.c ddb +file arch/x86_64/x86_64/db_interface.c ddb +file arch/x86_64/x86_64/db_memrw.c ddb | kgdb +file arch/x86_64/x86_64/db_trace.c ddb +file arch/x86_64/x86_64/disksubr.c disk +file arch/x86_64/x86_64/gdt.c + +# +# Write the optimized versions for these. +# +file netinet/in_cksum.c inet +file netinet/in4_cksum.c inet + +#file arch/x86_64/x86_64/in_cksum.S inet + +file arch/x86_64/x86_64/ipkdb_glue.c ipkdb +file arch/x86_64/x86_64/kgdb_machdep.c kgdb +file arch/x86_64/x86_64/machdep.c +file arch/x86_64/x86_64/math_emulate.c math_emulate +file arch/x86_64/x86_64/mem.c +file arch/x86_64/x86_64/microtime.s +file netns/ns_cksum.c ns +file arch/x86_64/x86_64/pmap.c +file arch/x86_64/x86_64/process_machdep.c +#file arch/x86_64/x86_64/procfs_machdep.c procfs +file arch/x86_64/x86_64/sys_machdep.c +file arch/x86_64/x86_64/syscall.c +file arch/x86_64/x86_64/trap.c +file arch/x86_64/x86_64/vm_machdep.c +file arch/x86_64/x86_64/fpu.c +file dev/cons.c + +file arch/x86_64/x86_64/pmc.c perfctrs + +# +# Machine-independent SCSI drivers +# + +include "dev/scsipi/files.scsipi" +major {sd = 4} +major {cd = 6} + +# +# Machine-independent ATA drivers +# + +include "dev/ata/files.ata" +major {wd = 0} + +# Memory Disk for install floppy +file arch/x86_64/x86_64/md_root.c memory_disk_hooks +major {md = 17} + +# RAIDframe +major {raid = 18} + +# Logical disk +major {ld = 19} + +# +# Machine-independent I2O drivers +# + +include "dev/i2o/files.i2o" + +# BIOS32 routines +define bios32 +file arch/x86_64/x86_64/bios32.c bios32 needs-flag + +# +# System bus types +# + +define mainbus { } +# XXX BIOS32 only if something that uses it is configured! +device mainbus: isabus, pcibus, mainbus, bios32 +attach mainbus at root +file arch/x86_64/x86_64/mainbus.c mainbus + +# +# PCI-only drivers +# XXX MUST BE INCLUDED BEFORE files.isa, as long as files.isa attaches +# XXX devices to 'pci'. +# + +include "dev/pci/files.pci" +file arch/x86_64/pci/pci_machdep.c pci +defopt PCI_CONF_MODE +file arch/x86_64/pci/pciide_machdep.c pciide + +# PCI-Host bridge chipsets +device pchb: pcibus +attach pchb at pci +file arch/x86_64/pci/pchb.c pchb + +# PCI-ISA bridges +device pcib: isabus +attach pcib at pci +file arch/x86_64/pci/pcib.c pcib + +# +# ISA or ISA+PCI drivers +# + +include "dev/isa/files.isa" +major {mcd = 7} +#major {scd = 15} + +# XXX THE FOLLOWING BLOCK SHOULD GO INTO dev/pci/files.pci, BUT CANNOT +# XXX BECAUSE NOT 'lpt' IS DEFINED IN files.isa, RATHER THAN files. +# XXX (when the conf/files and files.isa bogons are fixed, this can +# XXX be fixed as well.) + +attach lpt at puc with lpt_puc +file dev/pci/lpt_puc.c lpt_puc + +file arch/x86_64/isa/isa_machdep.c isa + +# PC clock +file arch/x86_64/isa/clock.c isa +file dev/clock_subr.c isa + +# attribute used to represent the "keyboard controller" +# XXX should be a real device +define pckbcport { [irq = -1], [port = -1] } + +# PC console support a la "pccons" +device pc: tty, pckbcport +attach pc at isa +device pcconskbd +attach pcconskbd at pckbc +file arch/x86_64/isa/pccons.c pc | pcconskbd needs-flag + +include "dev/wscons/files.wscons" + +include "dev/pckbc/files.pckbc" + +device sysbeep +attach sysbeep at pcppi + +# Floppy disk controller +device fdc {drive = -1}: isadma +file dev/isa/fd.c fdc needs-flag + +attach fdc at isa with fdc_isa +file dev/isa/fdc_isa.c fdc_isa + +device fd: disk +attach fd at fdc +major {fd = 2} + +# +# Compatibility modules +# + +# NetBSD/i386 32-bit binary compatibility (COMPAT_NETBSD32) +include "compat/netbsd32/files.netbsd32" +file arch/x86_64/x86_64/netbsd32_machdep.c compat_netbsd32 +file arch/x86_64/x86_64/netbsd32_sigcode.S compat_netbsd32 +file arch/x86_64/x86_64/netbsd32_syscall.c compat_netbsd32 + +# OSS audio driver compatibility +include "compat/ossaudio/files.ossaudio" + +# network devices MII bus +include "dev/mii/files.mii" + +include "dev/usb/files.usb" + +include "dev/ieee1394/files.ieee1394" diff --git a/sys/arch/x86_64/conf/std.x86_64 b/sys/arch/x86_64/conf/std.x86_64 new file mode 100644 index 000000000000..e5bc62577247 --- /dev/null +++ b/sys/arch/x86_64/conf/std.x86_64 @@ -0,0 +1,8 @@ +# $NetBSD: std.x86_64,v 1.1 2001/06/19 00:19:48 fvdl Exp $ +# +# standard, required NetBSD/x86_64 'options' + +machine x86_64 + +options EXEC_ELF64 # exec ELF binaries +options EXEC_SCRIPT # exec #! scripts diff --git a/sys/arch/x86_64/include/Makefile b/sys/arch/x86_64/include/Makefile new file mode 100644 index 000000000000..7ce4b2a854e8 --- /dev/null +++ b/sys/arch/x86_64/include/Makefile @@ -0,0 +1,17 @@ +# $NetBSD: Makefile,v 1.1 2001/06/19 00:20:09 fvdl Exp $ + +KDIR= /sys/arch/x86_64/include +INCSDIR= /usr/include/x86_64 + +INCS= ansi.h aout_machdep.h asm.h bios32.h bootinfo.h bswap.h \ + bus.h byte_swap.h cdefs.h conf.h cpu.h cpufunc.h \ + disklabel.h elf_machdep.h endian.h endian_machdep.h float.h \ + fpu.h frame.h frameasm.h gdt.h ieee.h ieeefp.h int_const.h \ + int_fmtio.h int_limits.h int_mwgwtypes.h int_types.h intr.h \ + isa_machdep.h kcore.h limits.h loadfile_machdep.h \ + lock.h math.h netbsd32_machdep.h out param.h pcb.h pccons.h \ + pci_machdep.h pio.h pmap.h pmc.h proc.h profile.h psl.h \ + pte.h ptrace.h reg.h rnd.h segments.h setjmp.h signal.h specialreg.h \ + stdarg.h sysarch.h trap.h tss.h types.h userret.h varargs.h vmparam.h \ + +.include diff --git a/sys/arch/x86_64/include/ansi.h b/sys/arch/x86_64/include/ansi.h new file mode 100644 index 000000000000..2e9c523b0a5a --- /dev/null +++ b/sys/arch/x86_64/include/ansi.h @@ -0,0 +1,100 @@ +/* $NetBSD: ansi.h,v 1.1 2001/06/19 00:20:09 fvdl Exp $ */ + +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ansi.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _ANSI_H_ +#define _ANSI_H_ + +#include + +#include + +/* + * Types which are fundamental to the implementation and may appear in + * more than one standard header are defined here. Standard headers + * then use: + * #ifdef _BSD_SIZE_T_ + * typedef _BSD_SIZE_T_ size_t; + * #undef _BSD_SIZE_T_ + * #endif + */ +#define _BSD_CLOCK_T_ unsigned int /* clock() */ +#define _BSD_PTRDIFF_T_ long /* ptr1 - ptr2 */ +#define _BSD_SIZE_T_ unsigned long /* sizeof() */ +#define _BSD_SSIZE_T_ long /* byte count or error */ +#define _BSD_TIME_T_ int /* time() */ +#if 1 +#define _BSD_VA_LIST_ __builtin_va_list /* GCC built-in type */ +#else +#define _BSD_VA_LIST_ char * /* XXXfvdl should be ok? */ +#endif +#define _BSD_CLOCKID_T_ int /* clockid_t */ +#define _BSD_TIMER_T_ int /* timer_t */ +#define _BSD_SUSECONDS_T_ int /* suseconds_t */ +#define _BSD_USECONDS_T_ unsigned int /* useconds_t */ + +/* + * NOTE: rune_t is not covered by ANSI nor other standards, and should not + * be instantiated outside of lib/libc/locale. use wchar_t. + * + * Runes (wchar_t) is declared to be an ``int'' instead of the more natural + * ``unsigned long'' or ``long''. Two things are happening here. It is not + * unsigned so that EOF (-1) can be naturally assigned to it and used. Also, + * it looks like 10646 will be a 31 bit standard. This means that if your + * ints cannot hold 32 bits, you will be in trouble. The reason an int was + * chosen over a long is that the is*() and to*() routines take ints (says + * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you + * lose a bit of ANSI conformance, but your programs will still work. + * + * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t + * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains + * defined for ctype.h. + */ +#define _BSD_WCHAR_T_ int /* wchar_t */ +#define _BSD_WINT_T_ int /* wint_t */ +#define _BSD_RUNE_T_ int /* rune_t */ + +/* + * mbstate_t is an opaque object to keep conversion state, during multibyte + * stream conversions. The content must not be referenced by user programs. + */ +typedef union { + char __mbstate8[128]; + __int64_t __mbstateL; /* for alignment */ +} __mbstate_t; +#define _BSD_MBSTATE_T_ __mbstate_t /* mbstate_t */ + +#endif /* _ANSI_H_ */ diff --git a/sys/arch/x86_64/include/aout_machdep.h b/sys/arch/x86_64/include/aout_machdep.h new file mode 100644 index 000000000000..2dbf87f6232f --- /dev/null +++ b/sys/arch/x86_64/include/aout_machdep.h @@ -0,0 +1,15 @@ +/* $NetBSD: aout_machdep.h,v 1.1 2001/06/19 00:20:09 fvdl Exp $ */ + +#ifndef _X86_64_AOUT_H_ +#define _X86_64_AOUT_H + +/* + * Only needed for 32 bit binaries in compatibility mode. + */ +#ifdef _KERNEL +#include +#else +#include +#endif + +#endif /* _X86_64_AOUT_H */ diff --git a/sys/arch/x86_64/include/asm.h b/sys/arch/x86_64/include/asm.h new file mode 100644 index 000000000000..f188f568fcd0 --- /dev/null +++ b/sys/arch/x86_64/include/asm.h @@ -0,0 +1,101 @@ +/* $NetBSD: asm.h,v 1.1 2001/06/19 00:20:09 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)asm.h 5.5 (Berkeley) 5/7/91 + */ + +#ifndef _X86_64_ASM_H_ +#define _X86_64_ASM_H_ + +#ifdef PIC +#define PIC_PLT(x) x@PLT +#define PIC_GOT(x) x@GOTPCREL(%rip) +#else +#define PIC_PLT(x) x +#define PIC_GOT(x) x +#endif + +# define _C_LABEL(x) x +#define _ASM_LABEL(x) x + +#ifdef __STDC__ +# define __CONCAT(x,y) x ## y +# define __STRING(x) #x +#else +# define __CONCAT(x,y) x/**/y +# define __STRING(x) "x" +#endif + +/* let kernels and others override entrypoint alignment */ +#ifndef _ALIGN_TEXT +#define _ALIGN_TEXT .align 4 +#endif + +#define _ENTRY(x) \ + .text; _ALIGN_TEXT; .globl x; .type x,@function; x: + +#ifdef GPROF +# define _PROF_PROLOGUE \ + pushq %rbp; leaq (%rsp),%rbp; call PIC_PLT(__mcount); popq %rbp +#else +# define _PROF_PROLOGUE +#endif + +#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE +#define NENTRY(y) _ENTRY(_C_LABEL(y)) +#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE + +#define ASMSTR .asciz + +#define RCSID(x) .text; .asciz x + +#define WEAK_ALIAS(alias,sym) \ + .weak alias; \ + alias = sym + +/* XXXfvdl do not use stabs here */ +#ifdef __STDC__ +#define WARN_REFERENCES(sym,msg) \ + .stabs msg ## ,30,0,0,0 ; \ + .stabs __STRING(_C_LABEL(sym)) ## ,1,0,0,0 +#else +#define WARN_REFERENCES(sym,msg) \ + .stabs msg,30,0,0,0 ; \ + .stabs __STRING(sym),1,0,0,0 +#endif /* __STDC__ */ + +#endif /* !_X86_64_ASM_H_ */ diff --git a/sys/arch/x86_64/include/bios32.h b/sys/arch/x86_64/include/bios32.h new file mode 100644 index 000000000000..ea1fbd8bd4af --- /dev/null +++ b/sys/arch/x86_64/include/bios32.h @@ -0,0 +1,12 @@ +/* $NetBSD: bios32.h,v 1.1 2001/06/19 00:20:09 fvdl Exp $ */ + +/* + * XXXfvdl paddr_t, etc, isn't right in bios32 structures, use explicit + * sizes + */ + +#ifdef _KERNEL +#include +#else +#include +#endif diff --git a/sys/arch/x86_64/include/bootinfo.h b/sys/arch/x86_64/include/bootinfo.h new file mode 100644 index 000000000000..293f8d4422d2 --- /dev/null +++ b/sys/arch/x86_64/include/bootinfo.h @@ -0,0 +1,16 @@ +/* $NetBSD: bootinfo.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#ifndef _X86_64_BOOTINFO_H_ +#define _X86_64_BOOTINFO_H_ +/* + * Only the plain i386 info for now, could add more later, but that depends + * on the eventual architecture of the systems. + */ +#ifdef _KERNEL +#include +#else +#include +#endif + +#define VAR32_SIZE 4096 +#endif /* _X86_64_BOOTINFO_H_ */ diff --git a/sys/arch/x86_64/include/bswap.h b/sys/arch/x86_64/include/bswap.h new file mode 100644 index 000000000000..4d23b9836b0c --- /dev/null +++ b/sys/arch/x86_64/include/bswap.h @@ -0,0 +1,19 @@ +/* $NetBSD: bswap.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/* Written by Manuel Bouyer. Public domain */ + +#ifndef _MACHINE_BSWAP_H_ +#define _MACHINE_BSWAP_H_ + +#define __BSWAP_RENAME +#include + +#ifdef __GNUC__ + +#include +#define bswap16(x) __byte_swap_word(x) +#define bswap32(x) __byte_swap_long(x) + +#endif /* __GNUC__ */ + +#endif /* !_MACHINE_BSWAP_H_ */ diff --git a/sys/arch/x86_64/include/bus.h b/sys/arch/x86_64/include/bus.h new file mode 100644 index 000000000000..02f130266f5d --- /dev/null +++ b/sys/arch/x86_64/include/bus.h @@ -0,0 +1,1221 @@ +/* $NetBSD: bus.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 1996 Charles M. Hannum. All rights reserved. + * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Christopher G. Demetriou + * for the NetBSD Project. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * XXXfvdl plain copy of x86_64 stuff. The arrival of the real hardware + * may cause enough changes to this file to be seperate. + */ + +#ifndef _X86_64_BUS_H_ +#define _X86_64_BUS_H_ + +#include + +#ifdef BUS_SPACE_DEBUG +#include /* for printf() prototype */ +/* + * Macros for sanity-checking the aligned-ness of pointers passed to + * bus space ops. These are not strictly necessary on the x86, but + * could lead to performance improvements, and help catch problems + * with drivers that would creep up on other architectures. + */ +#define __BUS_SPACE_ALIGNED_ADDRESS(p, t) \ + ((((u_long)(p)) & (sizeof(t)-1)) == 0) + +#define __BUS_SPACE_ADDRESS_SANITY(p, t, d) \ +({ \ + if (__BUS_SPACE_ALIGNED_ADDRESS((p), t) == 0) { \ + printf("%s 0x%lx not aligned to %d bytes %s:%d\n", \ + d, (u_long)(p), sizeof(t), __FILE__, __LINE__); \ + } \ + (void) 0; \ +}) + +#define BUS_SPACE_ALIGNED_POINTER(p, t) __BUS_SPACE_ALIGNED_ADDRESS(p, t) +#else +#define __BUS_SPACE_ADDRESS_SANITY(p,t,d) (void) 0 +#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t) +#endif /* BUS_SPACE_DEBUG */ + +/* + * Values for the x86_64 bus space tag, not to be used directly by MI code. + */ +#define X86_64_BUS_SPACE_IO 0 /* space is i/o space */ +#define X86_64_BUS_SPACE_MEM 1 /* space is mem space */ + +#define __BUS_SPACE_HAS_STREAM_METHODS 1 + +/* + * Bus address and size types + */ +typedef u_long bus_addr_t; +typedef u_long bus_size_t; + +/* + * Access methods for bus resources and address space. + */ +typedef int bus_space_tag_t; +typedef u_long bus_space_handle_t; + +/* + * int bus_space_map __P((bus_space_tag_t t, bus_addr_t addr, + * bus_size_t size, int flags, bus_space_handle_t *bshp)); + * + * Map a region of bus space. + */ + +#define BUS_SPACE_MAP_CACHEABLE 0x01 +#define BUS_SPACE_MAP_LINEAR 0x02 +#define BUS_SPACE_MAP_PREFETCHABLE 0x04 + +int x86_64_memio_map __P((bus_space_tag_t t, bus_addr_t addr, + bus_size_t size, int flags, bus_space_handle_t *bshp)); +/* like map, but without extent map checking/allocation */ +int _x86_64_memio_map __P((bus_space_tag_t t, bus_addr_t addr, + bus_size_t size, int flags, bus_space_handle_t *bshp)); + +#define bus_space_map(t, a, s, f, hp) \ + x86_64_memio_map((t), (a), (s), (f), (hp)) + +/* + * int bus_space_unmap __P((bus_space_tag_t t, + * bus_space_handle_t bsh, bus_size_t size)); + * + * Unmap a region of bus space. + */ + +void x86_64_memio_unmap __P((bus_space_tag_t t, bus_space_handle_t bsh, + bus_size_t size)); + +#define bus_space_unmap(t, h, s) \ + x86_64_memio_unmap((t), (h), (s)) + +/* + * int bus_space_subregion __P((bus_space_tag_t t, + * bus_space_handle_t bsh, bus_size_t offset, bus_size_t size, + * bus_space_handle_t *nbshp)); + * + * Get a new handle for a subregion of an already-mapped area of bus space. + */ + +int x86_64_memio_subregion __P((bus_space_tag_t t, bus_space_handle_t bsh, + bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)); + +#define bus_space_subregion(t, h, o, s, nhp) \ + x86_64_memio_subregion((t), (h), (o), (s), (nhp)) + +/* + * int bus_space_alloc __P((bus_space_tag_t t, bus_addr_t rstart, + * bus_addr_t rend, bus_size_t size, bus_size_t align, + * bus_size_t boundary, int flags, bus_addr_t *addrp, + * bus_space_handle_t *bshp)); + * + * Allocate a region of bus space. + */ + +int x86_64_memio_alloc __P((bus_space_tag_t t, bus_addr_t rstart, + bus_addr_t rend, bus_size_t size, bus_size_t align, + bus_size_t boundary, int flags, bus_addr_t *addrp, + bus_space_handle_t *bshp)); + +#define bus_space_alloc(t, rs, re, s, a, b, f, ap, hp) \ + x86_64_memio_alloc((t), (rs), (re), (s), (a), (b), (f), (ap), (hp)) + +/* + * int bus_space_free __P((bus_space_tag_t t, + * bus_space_handle_t bsh, bus_size_t size)); + * + * Free a region of bus space. + */ + +void x86_64_memio_free __P((bus_space_tag_t t, bus_space_handle_t bsh, + bus_size_t size)); + +#define bus_space_free(t, h, s) \ + x86_64_memio_free((t), (h), (s)) + +/* + * void *bus_space_vaddr __P((bus_space_tag_t, bus_space_handle_t)); + * + * Get the kernel virtual address for the mapped bus space. + * Only allowed for regions mapped with BUS_SPACE_MAP_LINEAR. + * (XXX not enforced) + */ +#define bus_space_vaddr(t, h) \ + ((t) == X86_64_BUS_SPACE_MEM ? (void *)(h) : (void *)0) + +/* + * u_intN_t bus_space_read_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset)); + * + * Read a 1, 2, 4, or 8 byte quantity from bus space + * described by tag/handle/offset. + */ + +#define bus_space_read_1(t, h, o) \ + ((t) == X86_64_BUS_SPACE_IO ? (inb((h) + (o))) : \ + (*(volatile u_int8_t *)((h) + (o)))) + +#define bus_space_read_2(t, h, o) \ + (__BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"), \ + ((t) == X86_64_BUS_SPACE_IO ? (inw((h) + (o))) : \ + (*(volatile u_int16_t *)((h) + (o))))) + +#define bus_space_read_4(t, h, o) \ + (__BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"), \ + ((t) == X86_64_BUS_SPACE_IO ? (inl((h) + (o))) : \ + (*(volatile u_int32_t *)((h) + (o))))) + +#define bus_space_read_stream_1 bus_space_read_1 +#define bus_space_read_stream_2 bus_space_read_2 +#define bus_space_read_stream_4 bus_space_read_4 + +#if 0 /* Cause a link error for bus_space_read_8 */ +#define bus_space_read_8(t, h, o) !!! bus_space_read_8 unimplemented !!! +#define bus_space_read_stream_8(t, h, o) \ + !!! bus_space_read_stream_8 unimplemented !!! +#endif + +/* + * void bus_space_read_multi_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * u_intN_t *addr, size_t count)); + * + * Read `count' 1, 2, 4, or 8 byte quantities from bus space + * described by tag/handle/offset and copy into buffer provided. + */ + +#define bus_space_read_multi_1(t, h, o, a, c) \ +do { \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + insb((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: movb (%2),%%al ; \ + stosb ; \ + loop 1b" : \ + "=D" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_multi_2(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int16_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + insw((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: movw (%2),%%ax ; \ + stosw ; \ + loop 1b" : \ + "=D" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_multi_4(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int32_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + insl((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: movl (%2),%%eax ; \ + stosl ; \ + loop 1b" : \ + "=D" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_multi_stream_1 bus_space_read_multi_1 +#define bus_space_read_multi_stream_2 bus_space_read_multi_2 +#define bus_space_read_multi_stream_4 bus_space_read_multi_4 + +#if 0 /* Cause a link error for bus_space_read_multi_8 */ +#define bus_space_read_multi_8 !!! bus_space_read_multi_8 unimplemented !!! +#define bus_space_read_multi_stream_8 \ + !!! bus_space_read_multi_stream_8 unimplemented !!! +#endif + +/* + * void bus_space_read_region_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * u_intN_t *addr, size_t count)); + * + * Read `count' 1, 2, 4, or 8 byte quantities from bus space + * described by tag/handle and starting at `offset' and copy into + * buffer provided. + */ + +#define bus_space_read_region_1(t, h, o, a, c) \ +do { \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: inb %w1,%%al ; \ + stosb ; \ + incl %1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=D" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsb" : \ + "=S" (dummy1), "=D" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_region_2(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int16_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: inw %w1,%%ax ; \ + stosw ; \ + addl $2,%1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=D" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsw" : \ + "=S" (dummy1), "=D" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_region_4(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int32_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: inl %w1,%%eax ; \ + stosl ; \ + addl $4,%1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=D" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsl" : \ + "=S" (dummy1), "=D" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_read_region_stream_1 bus_space_read_region_1 +#define bus_space_read_region_stream_2 bus_space_read_region_2 +#define bus_space_read_region_stream_4 bus_space_read_region_4 + +#if 0 /* Cause a link error for bus_space_read_region_8 */ +#define bus_space_read_region_8 !!! bus_space_read_region_8 unimplemented !!! +#define bus_space_read_region_stream_8 \ + !!! bus_space_read_region_stream_8 unimplemented !!! +#endif + +/* + * void bus_space_write_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * u_intN_t value)); + * + * Write the 1, 2, 4, or 8 byte value `value' to bus space + * described by tag/handle/offset. + */ + +#define bus_space_write_1(t, h, o, v) \ +do { \ + if ((t) == X86_64_BUS_SPACE_IO) \ + outb((h) + (o), (v)); \ + else \ + ((void)(*(volatile u_int8_t *)((h) + (o)) = (v))); \ +} while (0) + +#define bus_space_write_2(t, h, o, v) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) \ + outw((h) + (o), (v)); \ + else \ + ((void)(*(volatile u_int16_t *)((h) + (o)) = (v))); \ +} while (0) + +#define bus_space_write_4(t, h, o, v) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) \ + outl((h) + (o), (v)); \ + else \ + ((void)(*(volatile u_int32_t *)((h) + (o)) = (v))); \ +} while (0) + +#define bus_space_write_stream_1 bus_space_write_1 +#define bus_space_write_stream_2 bus_space_write_2 +#define bus_space_write_stream_4 bus_space_write_4 + +#if 0 /* Cause a link error for bus_space_write_8 */ +#define bus_space_write_8 !!! bus_space_write_8 not implemented !!! +#define bus_space_write_stream_8 \ + !!! bus_space_write_stream_8 not implemented !!! +#endif + +/* + * void bus_space_write_multi_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * const u_intN_t *addr, size_t count)); + * + * Write `count' 1, 2, 4, or 8 byte quantities from the buffer + * provided to bus space described by tag/handle/offset. + */ + +#define bus_space_write_multi_1(t, h, o, a, c) \ +do { \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + outsb((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsb ; \ + movb %%al,(%2) ; \ + loop 1b" : \ + "=S" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o))); \ + } \ +} while (0) + +#define bus_space_write_multi_2(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int16_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + outsw((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsw ; \ + movw %%ax,(%2) ; \ + loop 1b" : \ + "=S" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o))); \ + } \ +} while (0) + +#define bus_space_write_multi_4(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int32_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + outsl((h) + (o), (a), (c)); \ + } else { \ + void *dummy1; \ + int dummy2; \ + void *dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsl ; \ + movl %%eax,(%2) ; \ + loop 1b" : \ + "=S" (dummy1), "=c" (dummy2), "=r" (dummy3), "=&a" (__x) : \ + "0" ((a)), "1" ((c)), "2" ((h) + (o))); \ + } \ +} while (0) + +#define bus_space_write_multi_stream_1 bus_space_write_multi_1 +#define bus_space_write_multi_stream_2 bus_space_write_multi_2 +#define bus_space_write_multi_stream_4 bus_space_write_multi_4 + +#if 0 /* Cause a link error for bus_space_write_multi_8 */ +#define bus_space_write_multi_8(t, h, o, a, c) \ + !!! bus_space_write_multi_8 unimplemented !!! +#define bus_space_write_multi_stream_8(t, h, o, a, c) \ + !!! bus_space_write_multi_stream_8 unimplemented !!! +#endif + +/* + * void bus_space_write_region_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * const u_intN_t *addr, size_t count)); + * + * Write `count' 1, 2, 4, or 8 byte quantities from the buffer provided + * to bus space described by tag/handle starting at `offset'. + */ + +#define bus_space_write_region_1(t, h, o, a, c) \ +do { \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsb ; \ + outb %%al,%w1 ; \ + incl %1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=S" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsb" : \ + "=D" (dummy1), "=S" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_write_region_2(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int16_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsw ; \ + outw %%ax,%w1 ; \ + addl $2,%1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=S" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsw" : \ + "=D" (dummy1), "=S" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_write_region_4(t, h, o, a, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((a), u_int32_t, "buffer"); \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + if ((t) == X86_64_BUS_SPACE_IO) { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + int __x; \ + __asm __volatile(" \ + cld ; \ + 1: lodsl ; \ + outl %%eax,%w1 ; \ + addl $4,%1 ; \ + loop 1b" : \ + "=&a" (__x), "=d" (dummy1), "=S" (dummy2), \ + "=c" (dummy3) : \ + "1" ((h) + (o)), "2" ((a)), "3" ((c)) : \ + "memory"); \ + } else { \ + int dummy1; \ + void *dummy2; \ + int dummy3; \ + __asm __volatile(" \ + cld ; \ + repne ; \ + movsl" : \ + "=D" (dummy1), "=S" (dummy2), "=c" (dummy3) : \ + "0" ((h) + (o)), "1" ((a)), "2" ((c)) : \ + "memory"); \ + } \ +} while (0) + +#define bus_space_write_region_stream_1 bus_space_write_region_1 +#define bus_space_write_region_stream_2 bus_space_write_region_2 +#define bus_space_write_region_stream_4 bus_space_write_region_4 + +#if 0 /* Cause a link error for bus_space_write_region_8 */ +#define bus_space_write_region_8 \ + !!! bus_space_write_region_8 unimplemented !!! +#define bus_space_write_region_stream_8 \ + !!! bus_space_write_region_stream_8 unimplemented !!! +#endif + +/* + * void bus_space_set_multi_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val, + * size_t count)); + * + * Write the 1, 2, 4, or 8 byte value `val' to bus space described + * by tag/handle/offset `count' times. + */ + +static __inline void x86_64_memio_set_multi_1 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int8_t, size_t)); +static __inline void x86_64_memio_set_multi_2 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int16_t, size_t)); +static __inline void x86_64_memio_set_multi_4 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int32_t, size_t)); + +#define bus_space_set_multi_1(t, h, o, v, c) \ + x86_64_memio_set_multi_1((t), (h), (o), (v), (c)) + +#define bus_space_set_multi_2(t, h, o, v, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + x86_64_memio_set_multi_2((t), (h), (o), (v), (c)); \ +} while (0) + +#define bus_space_set_multi_4(t, h, o, v, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + x86_64_memio_set_multi_4((t), (h), (o), (v), (c)); \ +} while (0) + +static __inline void +x86_64_memio_set_multi_1(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int8_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + while (c--) + outb(addr, v); + else + while (c--) + *(volatile u_int8_t *)(addr) = v; +} + +static __inline void +x86_64_memio_set_multi_2(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int16_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + while (c--) + outw(addr, v); + else + while (c--) + *(volatile u_int16_t *)(addr) = v; +} + +static __inline void +x86_64_memio_set_multi_4(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int32_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + while (c--) + outl(addr, v); + else + while (c--) + *(volatile u_int32_t *)(addr) = v; +} + +#if 0 /* Cause a link error for bus_space_set_multi_8 */ +#define bus_space_set_multi_8 !!! bus_space_set_multi_8 unimplemented !!! +#endif + +/* + * void bus_space_set_region_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, u_intN_t val, + * size_t count)); + * + * Write `count' 1, 2, 4, or 8 byte value `val' to bus space described + * by tag/handle starting at `offset'. + */ + +static __inline void x86_64_memio_set_region_1 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int8_t, size_t)); +static __inline void x86_64_memio_set_region_2 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int16_t, size_t)); +static __inline void x86_64_memio_set_region_4 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, u_int32_t, size_t)); + +#define bus_space_set_region_1(t, h, o, v, c) \ + x86_64_memio_set_region_1((t), (h), (o), (v), (c)) + +#define bus_space_set_region_2(t, h, o, v, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int16_t, "bus addr"); \ + x86_64_memio_set_region_2((t), (h), (o), (v), (c)); \ +} while (0) + +#define bus_space_set_region_4(t, h, o, v, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h) + (o), u_int32_t, "bus addr"); \ + x86_64_memio_set_region_4((t), (h), (o), (v), (c)); \ +} while (0) + +static __inline void +x86_64_memio_set_region_1(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int8_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + for (; c != 0; c--, addr++) + outb(addr, v); + else + for (; c != 0; c--, addr++) + *(volatile u_int8_t *)(addr) = v; +} + +static __inline void +x86_64_memio_set_region_2(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int16_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + for (; c != 0; c--, addr += 2) + outw(addr, v); + else + for (; c != 0; c--, addr += 2) + *(volatile u_int16_t *)(addr) = v; +} + +static __inline void +x86_64_memio_set_region_4(t, h, o, v, c) + bus_space_tag_t t; + bus_space_handle_t h; + bus_size_t o; + u_int32_t v; + size_t c; +{ + bus_addr_t addr = h + o; + + if (t == X86_64_BUS_SPACE_IO) + for (; c != 0; c--, addr += 4) + outl(addr, v); + else + for (; c != 0; c--, addr += 4) + *(volatile u_int32_t *)(addr) = v; +} + +#if 0 /* Cause a link error for bus_space_set_region_8 */ +#define bus_space_set_region_8 !!! bus_space_set_region_8 unimplemented !!! +#endif + +/* + * void bus_space_copy_region_N __P((bus_space_tag_t tag, + * bus_space_handle_t bsh1, bus_size_t off1, + * bus_space_handle_t bsh2, bus_size_t off2, + * size_t count)); + * + * Copy `count' 1, 2, 4, or 8 byte values from bus space starting + * at tag/bsh1/off1 to bus space starting at tag/bsh2/off2. + */ + +static __inline void x86_64_memio_copy_region_1 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, bus_space_handle_t, + bus_size_t, size_t)); +static __inline void x86_64_memio_copy_region_2 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, bus_space_handle_t, + bus_size_t, size_t)); +static __inline void x86_64_memio_copy_region_4 __P((bus_space_tag_t, + bus_space_handle_t, bus_size_t, bus_space_handle_t, + bus_size_t, size_t)); + +#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \ + x86_64_memio_copy_region_1((t), (h1), (o1), (h2), (o2), (c)) + +#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h1) + (o1), u_int16_t, "bus addr 1"); \ + __BUS_SPACE_ADDRESS_SANITY((h2) + (o2), u_int16_t, "bus addr 2"); \ + x86_64_memio_copy_region_2((t), (h1), (o1), (h2), (o2), (c)); \ +} while (0) + +#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \ +do { \ + __BUS_SPACE_ADDRESS_SANITY((h1) + (o1), u_int32_t, "bus addr 1"); \ + __BUS_SPACE_ADDRESS_SANITY((h2) + (o2), u_int32_t, "bus addr 2"); \ + x86_64_memio_copy_region_4((t), (h1), (o1), (h2), (o2), (c)); \ +} while (0) + +static __inline void +x86_64_memio_copy_region_1(t, h1, o1, h2, o2, c) + bus_space_tag_t t; + bus_space_handle_t h1; + bus_size_t o1; + bus_space_handle_t h2; + bus_size_t o2; + size_t c; +{ + bus_addr_t addr1 = h1 + o1; + bus_addr_t addr2 = h2 + o2; + + if (t == X86_64_BUS_SPACE_IO) { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1++, addr2++) + outb(addr2, inb(addr1)); + } else { + /* dest after src: copy backwards */ + for (addr1 += (c - 1), addr2 += (c - 1); + c != 0; c--, addr1--, addr2--) + outb(addr2, inb(addr1)); + } + } else { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1++, addr2++) + *(volatile u_int8_t *)(addr2) = + *(volatile u_int8_t *)(addr1); + } else { + /* dest after src: copy backwards */ + for (addr1 += (c - 1), addr2 += (c - 1); + c != 0; c--, addr1--, addr2--) + *(volatile u_int8_t *)(addr2) = + *(volatile u_int8_t *)(addr1); + } + } +} + +static __inline void +x86_64_memio_copy_region_2(t, h1, o1, h2, o2, c) + bus_space_tag_t t; + bus_space_handle_t h1; + bus_size_t o1; + bus_space_handle_t h2; + bus_size_t o2; + size_t c; +{ + bus_addr_t addr1 = h1 + o1; + bus_addr_t addr2 = h2 + o2; + + if (t == X86_64_BUS_SPACE_IO) { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1 += 2, addr2 += 2) + outw(addr2, inw(addr1)); + } else { + /* dest after src: copy backwards */ + for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1); + c != 0; c--, addr1 -= 2, addr2 -= 2) + outw(addr2, inw(addr1)); + } + } else { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1 += 2, addr2 += 2) + *(volatile u_int16_t *)(addr2) = + *(volatile u_int16_t *)(addr1); + } else { + /* dest after src: copy backwards */ + for (addr1 += 2 * (c - 1), addr2 += 2 * (c - 1); + c != 0; c--, addr1 -= 2, addr2 -= 2) + *(volatile u_int16_t *)(addr2) = + *(volatile u_int16_t *)(addr1); + } + } +} + +static __inline void +x86_64_memio_copy_region_4(t, h1, o1, h2, o2, c) + bus_space_tag_t t; + bus_space_handle_t h1; + bus_size_t o1; + bus_space_handle_t h2; + bus_size_t o2; + size_t c; +{ + bus_addr_t addr1 = h1 + o1; + bus_addr_t addr2 = h2 + o2; + + if (t == X86_64_BUS_SPACE_IO) { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1 += 4, addr2 += 4) + outl(addr2, inl(addr1)); + } else { + /* dest after src: copy backwards */ + for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1); + c != 0; c--, addr1 -= 4, addr2 -= 4) + outl(addr2, inl(addr1)); + } + } else { + if (addr1 >= addr2) { + /* src after dest: copy forward */ + for (; c != 0; c--, addr1 += 4, addr2 += 4) + *(volatile u_int32_t *)(addr2) = + *(volatile u_int32_t *)(addr1); + } else { + /* dest after src: copy backwards */ + for (addr1 += 4 * (c - 1), addr2 += 4 * (c - 1); + c != 0; c--, addr1 -= 4, addr2 -= 4) + *(volatile u_int32_t *)(addr2) = + *(volatile u_int32_t *)(addr1); + } + } +} + +#if 0 /* Cause a link error for bus_space_copy_8 */ +#define bus_space_copy_region_8 !!! bus_space_copy_region_8 unimplemented !!! +#endif + + +/* + * Bus read/write barrier methods. + * + * void bus_space_barrier __P((bus_space_tag_t tag, + * bus_space_handle_t bsh, bus_size_t offset, + * bus_size_t len, int flags)); + * + * Note: the x86_64 does not currently require barriers, but we must + * provide the flags to MI code. + */ +#define bus_space_barrier(t, h, o, l, f) \ + ((void)((void)(t), (void)(h), (void)(o), (void)(l), (void)(f))) +#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */ +#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */ + + +/* + * Flags used in various bus DMA methods. + */ +#define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */ +#define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */ +#define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */ +#define BUS_DMA_COHERENT 0x04 /* hint: map memory DMA coherent */ +#define BUS_DMA_STREAMING 0x08 /* hint: sequential, unidirectional */ +#define BUS_DMA_BUS1 0x10 /* placeholders for bus functions... */ +#define BUS_DMA_BUS2 0x20 +#define BUS_DMA_BUS3 0x40 +#define BUS_DMA_BUS4 0x80 + +/* Forwards needed by prototypes below. */ +struct mbuf; +struct uio; + +/* + * Operations performed by bus_dmamap_sync(). + */ +#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */ +#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */ +#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */ +#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */ + +typedef struct x86_64_bus_dma_tag *bus_dma_tag_t; +typedef struct x86_64_bus_dmamap *bus_dmamap_t; + +/* + * bus_dma_segment_t + * + * Describes a single contiguous DMA transaction. Values + * are suitable for programming into DMA registers. + */ +struct x86_64_bus_dma_segment { + bus_addr_t ds_addr; /* DMA address */ + bus_size_t ds_len; /* length of transfer */ +}; +typedef struct x86_64_bus_dma_segment bus_dma_segment_t; + +/* + * bus_dma_tag_t + * + * A machine-dependent opaque type describing the implementation of + * DMA for a given bus. + */ + +struct x86_64_bus_dma_tag { + /* + * The `bounce threshold' is checked while we are loading + * the DMA map. If the physical address of the segment + * exceeds the threshold, an error will be returned. The + * caller can then take whatever action is necessary to + * bounce the transfer. If this value is 0, it will be + * ignored. + */ + bus_addr_t _bounce_thresh; + + /* + * DMA mapping methods. + */ + int (*_dmamap_create) __P((bus_dma_tag_t, bus_size_t, int, + bus_size_t, bus_size_t, int, bus_dmamap_t *)); + void (*_dmamap_destroy) __P((bus_dma_tag_t, bus_dmamap_t)); + int (*_dmamap_load) __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int)); + int (*_dmamap_load_mbuf) __P((bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, int)); + int (*_dmamap_load_uio) __P((bus_dma_tag_t, bus_dmamap_t, + struct uio *, int)); + int (*_dmamap_load_raw) __P((bus_dma_tag_t, bus_dmamap_t, + bus_dma_segment_t *, int, bus_size_t, int)); + void (*_dmamap_unload) __P((bus_dma_tag_t, bus_dmamap_t)); + void (*_dmamap_sync) __P((bus_dma_tag_t, bus_dmamap_t, + bus_addr_t, bus_size_t, int)); + + /* + * DMA memory utility functions. + */ + int (*_dmamem_alloc) __P((bus_dma_tag_t, bus_size_t, bus_size_t, + bus_size_t, bus_dma_segment_t *, int, int *, int)); + void (*_dmamem_free) __P((bus_dma_tag_t, + bus_dma_segment_t *, int)); + int (*_dmamem_map) __P((bus_dma_tag_t, bus_dma_segment_t *, + int, size_t, caddr_t *, int)); + void (*_dmamem_unmap) __P((bus_dma_tag_t, caddr_t, size_t)); + paddr_t (*_dmamem_mmap) __P((bus_dma_tag_t, bus_dma_segment_t *, + int, off_t, int, int)); +}; + +#define bus_dmamap_create(t, s, n, m, b, f, p) \ + (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p)) +#define bus_dmamap_destroy(t, p) \ + (*(t)->_dmamap_destroy)((t), (p)) +#define bus_dmamap_load(t, m, b, s, p, f) \ + (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f)) +#define bus_dmamap_load_mbuf(t, m, b, f) \ + (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f)) +#define bus_dmamap_load_uio(t, m, u, f) \ + (*(t)->_dmamap_load_uio)((t), (m), (u), (f)) +#define bus_dmamap_load_raw(t, m, sg, n, s, f) \ + (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f)) +#define bus_dmamap_unload(t, p) \ + (*(t)->_dmamap_unload)((t), (p)) +#define bus_dmamap_sync(t, p, o, l, ops) \ + (void)((t)->_dmamap_sync ? \ + (*(t)->_dmamap_sync)((t), (p), (o), (l), (ops)) : (void)0) + +#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \ + (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f)) +#define bus_dmamem_free(t, sg, n) \ + (*(t)->_dmamem_free)((t), (sg), (n)) +#define bus_dmamem_map(t, sg, n, s, k, f) \ + (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f)) +#define bus_dmamem_unmap(t, k, s) \ + (*(t)->_dmamem_unmap)((t), (k), (s)) +#define bus_dmamem_mmap(t, sg, n, o, p, f) \ + (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f)) + +/* + * bus_dmamap_t + * + * Describes a DMA mapping. + */ +struct x86_64_bus_dmamap { + /* + * PRIVATE MEMBERS: not for use my machine-independent code. + */ + bus_size_t _dm_size; /* largest DMA transfer mappable */ + int _dm_segcnt; /* number of segs this map can map */ + bus_size_t _dm_maxsegsz; /* largest possible segment */ + bus_size_t _dm_boundary; /* don't cross this */ + bus_addr_t _dm_bounce_thresh; /* bounce threshold; see tag */ + int _dm_flags; /* misc. flags */ + + void *_dm_cookie; /* cookie for bus-specific functions */ + + /* + * PUBLIC MEMBERS: these are used by machine-independent code. + */ + bus_size_t dm_mapsize; /* size of the mapping */ + int dm_nsegs; /* # valid segments in mapping */ + bus_dma_segment_t dm_segs[1]; /* segments; variable length */ +}; + +#ifdef _X86_64_BUS_DMA_PRIVATE +int _bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, bus_size_t, + bus_size_t, int, bus_dmamap_t *)); +void _bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); +int _bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int)); +int _bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, int)); +int _bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, + struct uio *, int)); +int _bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, + bus_dma_segment_t *, int, bus_size_t, int)); +void _bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); +void _bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, + bus_size_t, int)); + +int _bus_dmamem_alloc __P((bus_dma_tag_t tag, bus_size_t size, + bus_size_t alignment, bus_size_t boundary, + bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)); +void _bus_dmamem_free __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, + int nsegs)); +int _bus_dmamem_map __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, + int nsegs, size_t size, caddr_t *kvap, int flags)); +void _bus_dmamem_unmap __P((bus_dma_tag_t tag, caddr_t kva, + size_t size)); +paddr_t _bus_dmamem_mmap __P((bus_dma_tag_t tag, bus_dma_segment_t *segs, + int nsegs, off_t off, int prot, int flags)); + +int _bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size, + bus_size_t alignment, bus_size_t boundary, + bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags, + paddr_t low, paddr_t high)); +#endif /* _X86_64_BUS_DMA_PRIVATE */ + +#endif /* _X86_64_BUS_H_ */ diff --git a/sys/arch/x86_64/include/byte_swap.h b/sys/arch/x86_64/include/byte_swap.h new file mode 100644 index 000000000000..83b27001c5ce --- /dev/null +++ b/sys/arch/x86_64/include/byte_swap.h @@ -0,0 +1,84 @@ +/* $NetBSD: byte_swap.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copy of the i386 version. 64 bit versions may be added later. + */ + +#ifndef _X86_64_BYTE_SWAP_H_ +#define _X86_64_BYTE_SWAP_H_ + +#define __byte_swap_long_variable(x) __extension__ \ +({ register in_addr_t __x = (x); \ + __asm ("bswap %1" \ + : "=r" (__x) \ + : "0" (__x)); \ + __x; }) + +#define __byte_swap_word_variable(x) __extension__ \ +({ register in_port_t __x = (x); \ + __asm ("rorw $8, %w1" \ + : "=r" (__x) \ + : "0" (__x)); \ + __x; }) + +#ifdef __OPTIMIZE__ + +#define __byte_swap_long_constant(x) \ + ((((x) & 0xff000000) >> 24) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x000000ff) << 24)) +#define __byte_swap_word_constant(x) \ + ((((x) & 0xff00) >> 8) | \ + (((x) & 0x00ff) << 8)) +#define __byte_swap_long(x) \ + (__builtin_constant_p((x)) ? \ + __byte_swap_long_constant(x) : __byte_swap_long_variable(x)) +#define __byte_swap_word(x) \ + (__builtin_constant_p((x)) ? \ + __byte_swap_word_constant(x) : __byte_swap_word_variable(x)) + +#else /* __OPTIMIZE__ */ + +#define __byte_swap_long(x) __byte_swap_long_variable(x) +#define __byte_swap_word(x) __byte_swap_word_variable(x) + +#endif /* __OPTIMIZE__ */ + +#endif /* !_X86_64_BYTE_SWAP_H_ */ diff --git a/sys/arch/x86_64/include/cdefs.h b/sys/arch/x86_64/include/cdefs.h new file mode 100644 index 000000000000..84f96bd5d549 --- /dev/null +++ b/sys/arch/x86_64/include/cdefs.h @@ -0,0 +1,8 @@ +/* $NetBSD: cdefs.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#ifndef _MACHINE_CDEFS_H_ +#define _MACHINE_CDEFS_H_ + +/* No arch-specific cdefs. */ + +#endif /* !_MACHINE_CDEFS_H_ */ diff --git a/sys/arch/x86_64/include/conf.h b/sys/arch/x86_64/include/conf.h new file mode 100644 index 000000000000..29859d82c7be --- /dev/null +++ b/sys/arch/x86_64/include/conf.h @@ -0,0 +1,56 @@ +/* $NetBSD: conf.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1996 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Christos Zoulas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#define mmread mmrw +#define mmwrite mmrw +cdev_decl(mm); + +bdev_decl(fd); +cdev_decl(fd); + +cdev_decl(pc); + +cdev_decl(mms); + +cdev_decl(lms); + +cdev_decl(pms); + +cdev_decl(joy); diff --git a/sys/arch/x86_64/include/cpu.h b/sys/arch/x86_64/include/cpu.h new file mode 100644 index 000000000000..967664247d2e --- /dev/null +++ b/sys/arch/x86_64/include/cpu.h @@ -0,0 +1,238 @@ +/* $NetBSD: cpu.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cpu.h 5.4 (Berkeley) 5/9/91 + */ + +/* + * XXXfvdl plain copy of i386 version, but may change enough in the + * future to be seperate. + */ + +#ifndef _X86_64_CPU_H_ +#define _X86_64_CPU_H_ + +#if defined(_KERNEL) && !defined(_LKM) +#include "opt_multiprocessor.h" +#include "opt_lockdebug.h" +#endif + +/* + * Definitions unique to x86-64 cpu support. + */ +#include +#include +#include + +#include +struct cpu_info { + struct schedstate_percpu ci_schedstate; /* scheduler state */ +#if defined(DIAGNOSTIC) || defined(LOCKDEBUG) + u_long ci_spin_locks; /* # of spin locks held */ + u_long ci_simple_locks; /* # of simple locks held */ +#endif +}; + +#ifdef _KERNEL +extern struct cpu_info cpu_info_store; + +#define curcpu() (&cpu_info_store) +#endif + +/* + * definitions of cpu-dependent requirements + * referenced in generic code + */ +#define cpu_swapin(p) /* nothing */ +#define cpu_number() 0 + +/* + * Arguments to hardclock, softclock and statclock + * encapsulate the previous machine state in an opaque + * clockframe; for now, use generic intrframe. + * + * XXX intrframe has a lot of gunk we don't need. + */ +#define clockframe intrframe + +#define CLKF_USERMODE(frame) USERMODE((frame)->if_cs, (frame)->if_eflags) +#define CLKF_BASEPRI(frame) ((frame)->if_ppl == 0) +#define CLKF_PC(frame) ((frame)->if_rip) +#define CLKF_INTR(frame) ((frame)->if_ppl & (1 << IPL_TAGINTR)) + +/* + * This is used during profiling to integrate system time. It can safely + * assume that the process is resident. + */ +#define PROC_PC(p) ((p)->p_md.md_regs->tf_rip) + +/* + * Preempt the current process if in interrupt from user mode, + * or after the current trap/syscall if in system mode. + */ +int want_resched; /* resched() was called */ +#define need_resched(ci) (want_resched = 1, setsoftast()) + +/* + * Give a profiling tick to the current process when the user profiling + * buffer pages are invalid. On the i386, request an ast to send us + * through trap(), marking the proc as needing a profiling tick. + */ +#define need_proftick(p) ((p)->p_flag |= P_OWEUPC, setsoftast()) + +/* + * Notify the current process (p) that it has a signal pending, + * process as soon as possible. + */ +#define signotify(p) setsoftast() + +/* + * We need a machine-independent name for this. + */ +#define DELAY(x) delay(x) + +/* + * pull in #defines for kinds of processors + */ + +#ifdef _KERNEL +extern int biosbasemem; +extern int biosextmem; +extern int cpu; +extern int cpu_feature; +extern int cpu_id; +extern char cpu_vendor[]; +extern int cpuid_level; + +/* machdep.c */ +void delay __P((int)); +void dumpconf __P((void)); +void cpu_reset __P((void)); +void x86_64_proc0_tss_ldt_init __P((void)); +void x86_64_bufinit __P((void)); + +/* locore.s */ +struct region_descriptor; +void lgdt __P((struct region_descriptor *)); +void fillw __P((short, void *, size_t)); + +struct pcb; +void savectx __P((struct pcb *)); +void switch_exit __P((struct proc *)); +void proc_trampoline __P((void)); + +/* clock.c */ +void initrtclock __P((void)); +void startrtclock __P((void)); + +/* vm_machdep.c */ +int kvtop __P((caddr_t)); + +#if 0 /* XXXfvdl was USER_LDT, need to check if that can be supported */ +/* sys_machdep.h */ +int i386_get_ldt __P((struct proc *, void *, register_t *)); +int i386_set_ldt __P((struct proc *, void *, register_t *)); +#endif + +/* isa_machdep.c */ +void isa_defaultirq __P((void)); +int isa_nmi __P((void)); + +/* trap.c */ +void child_return __P((void *)); + +/* consinit.c */ +void kgdb_port_init __P((void)); + +/* bus_machdep.c */ +void x86_64_bus_space_init __P((void)); +void x86_64_bus_space_mallocok __P((void)); + +#endif /* _KERNEL */ + +/* + * CTL_MACHDEP definitions. + */ +#define CPU_CONSDEV 1 /* dev_t: console terminal device */ +#define CPU_BIOSBASEMEM 2 /* int: bios-reported base mem (K) */ +#define CPU_BIOSEXTMEM 3 /* int: bios-reported ext. mem (K) */ +#define CPU_NKPDE 4 /* int: number of kernel PDEs */ +#define CPU_BOOTED_KERNEL 5 /* string: booted kernel name */ +#define CPU_DISKINFO 6 /* disk geometry information */ +#define CPU_FPU_PRESENT 7 /* FPU is present */ +#define CPU_MAXID 8 /* number of valid machdep ids */ + +#define CTL_MACHDEP_NAMES { \ + { 0, 0 }, \ + { "console_device", CTLTYPE_STRUCT }, \ + { "biosbasemem", CTLTYPE_INT }, \ + { "biosextmem", CTLTYPE_INT }, \ + { "nkpde", CTLTYPE_INT }, \ + { "booted_kernel", CTLTYPE_STRING }, \ + { "diskinfo", CTLTYPE_STRUCT }, \ + { "fpu_present", CTLTYPE_INT }, \ +} + + +/* + * Structure for CPU_DISKINFO sysctl call. + * XXX this should be somewhere else. + */ +#define MAX_BIOSDISKS 16 + +struct disklist { + int dl_nbiosdisks; /* number of bios disks */ + struct biosdisk_info { + int bi_dev; /* BIOS device # (0x80 ..) */ + int bi_cyl; /* cylinders on disk */ + int bi_head; /* heads per track */ + int bi_sec; /* sectors per track */ + u_int64_t bi_lbasecs; /* total sec. (iff ext13) */ +#define BIFLAG_INVALID 0x01 +#define BIFLAG_EXTINT13 0x02 + int bi_flags; + } dl_biosdisks[MAX_BIOSDISKS]; + + int dl_nnativedisks; /* number of native disks */ + struct nativedisk_info { + char ni_devname[16]; /* native device name */ + int ni_nmatches; /* # of matches w/ BIOS */ + int ni_biosmatches[MAX_BIOSDISKS]; /* indices in dl_biosdisks */ + } dl_nativedisks[1]; /* actually longer */ +}; + +#endif /* !_X86_64_CPU_H_ */ diff --git a/sys/arch/x86_64/include/cpufunc.h b/sys/arch/x86_64/include/cpufunc.h new file mode 100644 index 000000000000..f92173fee1fa --- /dev/null +++ b/sys/arch/x86_64/include/cpufunc.h @@ -0,0 +1,228 @@ +/* $NetBSD: cpufunc.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_CPUFUNC_H_ +#define _X86_64_CPUFUNC_H_ + +/* + * Functions to provide access to i386-specific instructions. + */ + +#include +#include + +#ifdef _KERNEL + +static __inline void +invlpg(u_int64_t addr) +{ + __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory"); +} + +static __inline void +lidt(void *p) +{ + __asm __volatile("lidt (%0)" : : "r" (p)); +} + +static __inline void +lldt(u_short sel) +{ + __asm __volatile("lldt %0" : : "r" (sel)); +} + +static __inline void +ltr(u_short sel) +{ + __asm __volatile("ltr %0" : : "r" (sel)); +} + +/* + * Upper 32 bits are reserved anyway, so just keep this 32bits. + */ +static __inline void +lcr0(u_int val) +{ + u_int64_t val64 = val; + __asm __volatile("movq %0,%%cr0" : : "r" (val64)); +} + +static __inline u_int +rcr0(void) +{ + u_int64_t val64; + u_int val; + __asm __volatile("movq %%cr0,%0" : "=r" (val64)); + val = val64; + return val; +} + +static __inline u_int64_t +rcr2(void) +{ + u_int64_t val; + __asm __volatile("movq %%cr2,%0" : "=r" (val)); + return val; +} + +static __inline void +lcr3(u_int64_t val) +{ + __asm __volatile("movq %0,%%cr3" : : "r" (val)); +} + +static __inline u_int64_t +rcr3(void) +{ + u_int64_t val; + __asm __volatile("movq %%cr3,%0" : "=r" (val)); + return val; +} + +/* + * Same as for cr0. Don't touch upper 32 bits. + */ +static __inline void +lcr4(u_int val) +{ + u_int64_t val64 = val; + + __asm __volatile("movq %0,%%cr4" : : "r" (val64)); +} + +static __inline u_int +rcr4(void) +{ + u_int val; + u_int64_t val64; + __asm __volatile("movq %%cr4,%0" : "=r" (val64)); + val = val64; + return val; +} + +static __inline void +tlbflush(void) +{ + u_int64_t val; + __asm __volatile("movq %%cr3,%0" : "=r" (val)); + __asm __volatile("movq %0,%%cr3" : : "r" (val)); +} + +#ifdef notyet +void setidt __P((int idx, /*XXX*/caddr_t func, int typ, int dpl)); +#endif + + +/* XXXX ought to be in psl.h with spl() functions */ + +static __inline void +disable_intr(void) +{ + __asm __volatile("cli"); +} + +static __inline void +enable_intr(void) +{ + __asm __volatile("sti"); +} + +static __inline u_long +read_eflags(void) +{ + u_long ef; + + __asm __volatile("pushfq; popq %0" : "=r" (ef)); + return (ef); +} + +static __inline void +write_eflags(u_long ef) +{ + __asm __volatile("pushq %0; popfq" : : "r" (ef)); +} + +static __inline u_int64_t +rdmsr(u_int msr) +{ + u_int64_t rv; + + __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr)); + return (rv); +} + +static __inline void +wrmsr(u_int msr, u_int64_t newval) +{ + __asm __volatile("wrmsr" : : "A" (newval), "c" (msr)); +} + +static __inline void +wbinvd(void) +{ + __asm __volatile("wbinvd"); +} + +static __inline u_int64_t +rdtsc(void) +{ + u_int64_t rv; + + __asm __volatile("rdtsc" : "=A" (rv)); + return (rv); +} + +static __inline u_int64_t +rdpmc(u_int pmc) +{ + u_int64_t rv; + + __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc)); + return (rv); +} + +/* Break into DDB/KGDB. */ +static __inline void +breakpoint(void) +{ + __asm __volatile("int $3"); +} + +#endif /* _KERNEL */ + +#endif /* !_X86_64_CPUFUNC_H_ */ diff --git a/sys/arch/x86_64/include/disklabel.h b/sys/arch/x86_64/include/disklabel.h new file mode 100644 index 000000000000..0c7c8d512681 --- /dev/null +++ b/sys/arch/x86_64/include/disklabel.h @@ -0,0 +1,62 @@ +/* $NetBSD: disklabel.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/* + * Copyright (c) 1994 Christopher G. Demetriou + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Christopher G. Demetriou. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_DISKLABEL_H_ +#define _MACHINE_DISKLABEL_H_ + +#define LABELSECTOR 1 /* sector containing label */ +#define LABELOFFSET 0 /* offset of label in sector */ +#define MAXPARTITIONS 16 /* number of partitions */ +#define RAW_PART 3 /* raw partition: XX?d (XXX) */ + +/* + * We use the highest bit of the minor number for the partition number. + * This maintains backward compatibility with device nodes created before + * MAXPARTITIONS was increased. + */ +/* Pull in MBR partition definitions. */ +#include + +#ifndef __ASSEMBLER__ +#include +struct cpu_disklabel { + struct mbr_partition dosparts[NMBRPART]; + struct dkbad bad; +}; +#endif + +#ifdef _KERNEL +struct disklabel; +int bounds_check_with_label __P((struct buf *, struct disklabel *, int)); +#endif + +#endif /* _MACHINE_DISKLABEL_H_ */ diff --git a/sys/arch/x86_64/include/elf_machdep.h b/sys/arch/x86_64/include/elf_machdep.h new file mode 100644 index 000000000000..800f56ffe3b8 --- /dev/null +++ b/sys/arch/x86_64/include/elf_machdep.h @@ -0,0 +1,34 @@ +/* $NetBSD: elf_machdep.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#define ELF32_MACHDEP_ENDIANNESS +#define ELF32_MACHDEP_ID_CASES \ + case EM_386: \ + break; + +#define ELF64_MACHDEP_ENDIANNESS ELFDATA2LSB +#define ELF64_MACHDEP_ID_CASES \ + case EM_X8664: \ + break; + +#define ARCH_ELFSIZE 64 /* MD native binary size */ + +/* x86-64 relocations */ + +#define R_X86_64_NONE 0 +#define R_X86_64_64 1 +#define R_X86_64_PC32 2 +#define R_X86_64_GOT32 3 +#define R_X86_64_PLT32 4 +#define R_X86_64_COPY 5 +#define R_X86_64_GLOB_DAT 6 +#define R_X86_64_JUMP_SLOT 7 +#define R_X86_64_RELATIVE 8 +#define R_X86_64_GOTPCREL 9 +#define R_X86_64_32 10 +#define R_X86_64_32S 11 +#define R_X86_64_16 12 +#define R_X86_64_PC16 13 +#define R_X86_64_8 14 +#define R_X86_64_PC8 15 + +#define R_TYPE(name) __CONCAT(R_X86_64_,name) diff --git a/sys/arch/x86_64/include/endian.h b/sys/arch/x86_64/include/endian.h new file mode 100644 index 000000000000..c09808bca567 --- /dev/null +++ b/sys/arch/x86_64/include/endian.h @@ -0,0 +1,3 @@ +/* $NetBSD: endian.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#include diff --git a/sys/arch/x86_64/include/endian_machdep.h b/sys/arch/x86_64/include/endian_machdep.h new file mode 100644 index 000000000000..86cc1331cb17 --- /dev/null +++ b/sys/arch/x86_64/include/endian_machdep.h @@ -0,0 +1,14 @@ +/* $NetBSD: endian_machdep.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#define _BYTE_ORDER _LITTLE_ENDIAN + +#ifdef __GNUC__ + +#include + +#define ntohl(x) ((in_addr_t)__byte_swap_long((in_addr_t)(x))) +#define ntohs(x) ((in_port_t)__byte_swap_word((in_port_t)(x))) +#define htonl(x) ((in_addr_t)__byte_swap_long((in_addr_t)(x))) +#define htons(x) ((in_port_t)__byte_swap_word((in_port_t)(x))) + +#endif diff --git a/sys/arch/x86_64/include/float.h b/sys/arch/x86_64/include/float.h new file mode 100644 index 000000000000..f6e7fa550dbc --- /dev/null +++ b/sys/arch/x86_64/include/float.h @@ -0,0 +1,12 @@ +/* $NetBSD: float.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#ifndef _X86_64_FLOAT_H_ +#define _X86_64_FLOAT_H_ + +#ifdef _KERNEL +#include +#else +#include +#endif + +#endif /* _X86_64_FLOAT_H_ */ diff --git a/sys/arch/x86_64/include/fpu.h b/sys/arch/x86_64/include/fpu.h new file mode 100644 index 000000000000..09b3f3c95b4c --- /dev/null +++ b/sys/arch/x86_64/include/fpu.h @@ -0,0 +1,84 @@ +/* $NetBSD: fpu.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#ifndef _X86_64_FPU_H_ +#define _X86_64_FPU_H_ + +/* + * NetBSD/x86_64 only uses the extended save/restore format used + * by fxsave/fsrestore, to always deal with the SSE registers, + * which are part of the ABI to pass floating point values. + */ + +struct fxsave64 { + u_int64_t fx_fcw:16; + u_int64_t fx_fsw:16; + u_int64_t fx_unused1:8; + u_int64_t fx_ftw:8; + u_int64_t fx_fop:16; + u_int64_t fx_rip; + u_int64_t fx_dp; + u_int64_t fx_mxcsr:32; + u_int64_t fx_unused2:32; + u_int64_t fx_st[8 * 2]; /* 8 normal FP regs */ + u_int64_t fx_xmm[16 * 2]; /* 16 SSE2 registers */ + u_int8_t fx_unused3[96]; +} __attribute__ ((aligned (16))); + +/* + * This one only used for backward compat coredumping. + */ +struct oldfsave { + u_int16_t fs_control; + u_int16_t fs_unused0; + u_int16_t fs_status; + u_int16_t fs_unused1; + u_int16_t fs_tag; + u_int16_t fs_unused2; + u_int32_t fs_ipoff; + u_int16_t fs_ipsel; + u_int16_t fs_op; + u_int32_t fs_opoff; + u_int16_t fs_opsel; +} __attribute__ ((packed)); + + +/* + * The i387 defaults to Intel extended precision mode and round to nearest, + * with all exceptions masked. + * XXXfvdl check this. This stuff is probably invalid. + */ +#define __INITIAL_NPXCW__ 0x037f +/* NetBSD uses IEEE double precision. */ +#define __NetBSD_NPXCW__ 0x127f +/* Linux just uses the default control word. */ +#define __Linux_NPXCW__ 0x037f + +/* + * The standard control word from finit is 0x37F, giving: + * round to nearest + * 64-bit precision + * all exceptions masked. + * + * Now we want: + * affine mode (if we decide to support 287's) + * round to nearest + * 53-bit precision + * all exceptions masked. + * + * 64-bit precision often gives bad results with high level languages + * because it makes the results of calculations depend on whether + * intermediate values are stored in memory or in FPU registers. + */ +/* + * XXX + */ +struct trapframe; + +extern void fpuinit(void); +extern void fpudrop(void); +extern void fpusave(void); +extern void fputrap(struct trapframe *); + +extern struct proc *fpuproc; + +#endif /* _X86_64_FPU_H_ */ diff --git a/sys/arch/x86_64/include/frame.h b/sys/arch/x86_64/include/frame.h new file mode 100644 index 000000000000..5123e4b3568e --- /dev/null +++ b/sys/arch/x86_64/include/frame.h @@ -0,0 +1,179 @@ +/* $NetBSD: frame.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)frame.h 5.2 (Berkeley) 1/18/91 + */ + +/* + * Adapted for NetBSD/x86_64 by fvdl@wasabisystems.com + */ + +#ifndef _X86_64_FRAME_H_ +#define _X86_64_FRAME_H_ + +#include +#include + +/* + * System stack frames. + */ + +/* + * Exception/Trap Stack Frame + */ +struct trapframe { + int64_t tf_r15; + int64_t tf_r14; + int64_t tf_r13; + int64_t tf_r12; + int64_t tf_r11; + int64_t tf_r10; + int64_t tf_r9; + int64_t tf_r8; + int64_t tf_rdi; + int64_t tf_rsi; + int64_t tf_rbp; + int64_t tf_rbx; + int64_t tf_rdx; + int64_t tf_rcx; + int64_t tf_rax; + int64_t tf_trapno; + /* below portion defined in hardware */ + int64_t tf_err; + int64_t tf_rip; + int64_t tf_cs; + int64_t tf_eflags; + /* These are pushed unconditionally on the x86-64 */ + int64_t tf_rsp; + int64_t tf_ss; +}; + +/* + * Interrupt stack frame + */ +struct intrframe { + int64_t if_ppl; + int64_t if_r15; + int64_t if_r14; + int64_t if_r13; + int64_t if_r12; + int64_t if_r11; + int64_t if_r10; + int64_t if_r9; + int64_t if_r8; + int64_t if_rdi; + int64_t if_rsi; + int64_t if_rbp; + int64_t if_rbx; + int64_t if_rdx; + int64_t if_rcx; + int64_t if_rax; + u_int64_t __if_trapno; /* for compat with trap frame - trapno */ + u_int64_t __if_err; /* for compat with trap frame - err */ + /* below portion defined in hardware */ + int64_t if_rip; + int64_t if_cs; + int64_t if_eflags; + /* These are pushed unconditionally on the x86-64 */ + int64_t if_rsp; + int64_t if_ss; +}; + +/* + * Stack frame inside cpu_switch() + */ +struct switchframe { + int64_t sf_ppl; + int64_t sf_r15; + int64_t sf_r14; + int64_t sf_r13; + int64_t sf_r12; + int64_t sf_rbp; + int64_t sf_rbx; + int64_t sf_rip; +}; + +/* + * Signal frame + */ +struct sigframe { + int64_t sf_signum; + int64_t sf_code; + struct sigcontext *sf_scp; + sig_t sf_handler; + struct sigcontext sf_sc; + struct fxsave64 *sf_fpp; + struct fxsave64 sf_fp; +}; + +#endif /* _X86_64FRAME_H_ */ diff --git a/sys/arch/x86_64/include/frameasm.h b/sys/arch/x86_64/include/frameasm.h new file mode 100644 index 000000000000..44d896d4df2e --- /dev/null +++ b/sys/arch/x86_64/include/frameasm.h @@ -0,0 +1,53 @@ +/* $NetBSD: frameasm.h,v 1.1 2001/06/19 00:20:10 fvdl Exp $ */ + +#ifndef _X86_64_MACHINE_FRAMEASM_H +#define _X86_64_MACHINE_FRAMEASM_H + +/* + * Macros to define pushing/popping frames for interrupts, traps + * and system calls. Currently all the same; will diverge later. + */ + +/* + * These are used on interrupt or trap entry or exit. + */ +#define INTRENTRY \ + pushq %rax ; \ + pushq %rcx ; \ + pushq %rdx ; \ + pushq %rbx ; \ + pushq %rbp ; \ + pushq %rsi ; \ + pushq %rdi ; \ + pushq %r8 ; \ + pushq %r9 ; \ + pushq %r10 ; \ + pushq %r11 ; \ + pushq %r12 ; \ + pushq %r13 ; \ + pushq %r14 ; \ + pushq %r15 + +#define INTR_RESTOREARGS \ + popq %r15 ; \ + popq %r14 ; \ + popq %r13 ; \ + popq %r12 ; \ + popq %r11 ; \ + popq %r10 ; \ + popq %r9 ; \ + popq %r8 ; \ + popq %rdi ; \ + popq %rsi ; \ + popq %rbp ; \ + popq %rbx ; \ + popq %rdx ; \ + popq %rcx ; \ + popq %rax + +#define INTRFASTEXIT \ + INTR_RESTOREARGS ; \ + addq $16,%rsp ; \ + iretq + +#endif /* _X86_64_MACHINE_FRAMEASM_H */ diff --git a/sys/arch/x86_64/include/gdt.h b/sys/arch/x86_64/include/gdt.h new file mode 100644 index 000000000000..4fa469592204 --- /dev/null +++ b/sys/arch/x86_64/include/gdt.h @@ -0,0 +1,46 @@ +/* $NetBSD: gdt.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by John T. Kohl and Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +struct proc; +struct pmap; + +void gdt_init __P((void)); +void tss_alloc __P((struct proc *)); +void tss_free __P((struct proc *)); +void ldt_alloc __P((struct pmap *, char *, size_t)); +void ldt_free __P((struct pmap *)); diff --git a/sys/arch/x86_64/include/ieee.h b/sys/arch/x86_64/include/ieee.h new file mode 100644 index 000000000000..218bdddbe723 --- /dev/null +++ b/sys/arch/x86_64/include/ieee.h @@ -0,0 +1,7 @@ +/* $NetBSD: ieee.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +#ifdef _KERNEL +#include +#else +#include +#endif diff --git a/sys/arch/x86_64/include/ieeefp.h b/sys/arch/x86_64/include/ieeefp.h new file mode 100644 index 000000000000..88a31dd5eaec --- /dev/null +++ b/sys/arch/x86_64/include/ieeefp.h @@ -0,0 +1,12 @@ +/* $NetBSD: ieeefp.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +#ifndef _X86_64_IEEEFP_H_ +#define _X86_64_IEEEFP_H_ + +#ifdef _KERNEL +#include +#else +#include +#endif + +#endif /* _X86_64_IEEEFP_H_ */ diff --git a/sys/arch/x86_64/include/int_const.h b/sys/arch/x86_64/include/int_const.h new file mode 100644 index 000000000000..e3f33d84401a --- /dev/null +++ b/sys/arch/x86_64/include/int_const.h @@ -0,0 +1,63 @@ +/* $NetBSD: int_const.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Klaus Klein. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_INT_CONST_H_ +#define _X86_64_INT_CONST_H_ + +/* + * 7.18.4 Macros for integer constants + */ + +/* 7.18.4.1 Macros for minimum-width integer constants */ + +#define INT8_C(c) c +#define INT16_C(c) c +#define INT32_C(c) c +#define INT64_C(c) c ## L + +#define UINT8_C(c) c ## U +#define UINT16_C(c) c ## U +#define UINT32_C(c) c ## U +#define UINT64_C(c) c ## UL + +/* 7.18.4.2 Macros for greatest-width integer constants */ + +#define INTMAX_C(c) c ## L +#define UINTMAX_C(c) c ## UL + +#endif /* !_X86_64_INT_CONST_H_ */ diff --git a/sys/arch/x86_64/include/int_fmtio.h b/sys/arch/x86_64/include/int_fmtio.h new file mode 100644 index 000000000000..c58fe8227646 --- /dev/null +++ b/sys/arch/x86_64/include/int_fmtio.h @@ -0,0 +1,219 @@ +/* $NetBSD: int_fmtio.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Klaus Klein. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_INT_FMTIO_H_ +#define _X86_64_INT_FMTIO_H_ + +/* + * 7.8.1 Macros for format specifiers + */ + +/* fprintf macros for signed integers */ + +#define PRId8 "d" /* int8_t */ +#define PRId16 "d" /* int16_t */ +#define PRId32 "d" /* int32_t */ +#define PRId64 "ld" /* int64_t */ +#define PRIdLEAST8 "d" /* int_least8_t */ +#define PRIdLEAST16 "d" /* int_least16_t */ +#define PRIdLEAST32 "d" /* int_least32_t */ +#define PRIdLEAST64 "ld" /* int_least64_t */ +#define PRIdFAST8 "d" /* int_fast8_t */ +#define PRIdFAST16 "d" /* int_fast16_t */ +#define PRIdFAST32 "d" /* int_fast32_t */ +#define PRIdFAST64 "ld" /* int_fast64_t */ +#define PRIdMAX "ld" /* intmax_t */ +#define PRIdPTR "ld" /* intptr_t */ + +#define PRIi8 "i" /* int8_t */ +#define PRIi16 "i" /* int16_t */ +#define PRIi32 "i" /* int32_t */ +#define PRIi64 "li" /* int64_t */ +#define PRIiLEAST8 "i" /* int_least8_t */ +#define PRIiLEAST16 "i" /* int_least16_t */ +#define PRIiLEAST32 "i" /* int_least32_t */ +#define PRIiLEAST64 "li" /* int_least64_t */ +#define PRIiFAST8 "i" /* int_fast8_t */ +#define PRIiFAST16 "i" /* int_fast16_t */ +#define PRIiFAST32 "i" /* int_fast32_t */ +#define PRIiFAST64 "li" /* int_fast64_t */ +#define PRIiMAX "li" /* intmax_t */ +#define PRIiPTR "li" /* intptr_t */ + +/* fprintf macros for unsigned integers */ + +#define PRIo8 "o" /* uint8_t */ +#define PRIo16 "o" /* uint16_t */ +#define PRIo32 "o" /* uint32_t */ +#define PRIo64 "lo" /* uint64_t */ +#define PRIoLEAST8 "o" /* uint_least8_t */ +#define PRIoLEAST16 "o" /* uint_least16_t */ +#define PRIoLEAST32 "o" /* uint_least32_t */ +#define PRIoLEAST64 "lo" /* uint_least64_t */ +#define PRIoFAST8 "o" /* uint_fast8_t */ +#define PRIoFAST16 "o" /* uint_fast16_t */ +#define PRIoFAST32 "o" /* uint_fast32_t */ +#define PRIoFAST64 "lo" /* uint_fast64_t */ +#define PRIoMAX "lo" /* uintmax_t */ +#define PRIoPTR "lo" /* uintptr_t */ + +#define PRIu8 "u" /* uint8_t */ +#define PRIu16 "u" /* uint16_t */ +#define PRIu32 "u" /* uint32_t */ +#define PRIu64 "lu" /* uint64_t */ +#define PRIuLEAST8 "u" /* uint_least8_t */ +#define PRIuLEAST16 "u" /* uint_least16_t */ +#define PRIuLEAST32 "u" /* uint_least32_t */ +#define PRIuLEAST64 "lu" /* uint_least64_t */ +#define PRIuFAST8 "u" /* uint_fast8_t */ +#define PRIuFAST16 "u" /* uint_fast16_t */ +#define PRIuFAST32 "u" /* uint_fast32_t */ +#define PRIuFAST64 "lu" /* uint_fast64_t */ +#define PRIuMAX "lu" /* uintmax_t */ +#define PRIuPTR "lu" /* uintptr_t */ + +#define PRIx8 "x" /* uint8_t */ +#define PRIx16 "x" /* uint16_t */ +#define PRIx32 "x" /* uint32_t */ +#define PRIx64 "lx" /* uint64_t */ +#define PRIxLEAST8 "x" /* uint_least8_t */ +#define PRIxLEAST16 "x" /* uint_least16_t */ +#define PRIxLEAST32 "x" /* uint_least32_t */ +#define PRIxLEAST64 "lx" /* uint_least64_t */ +#define PRIxFAST8 "x" /* uint_fast8_t */ +#define PRIxFAST16 "x" /* uint_fast16_t */ +#define PRIxFAST32 "x" /* uint_fast32_t */ +#define PRIxFAST64 "lx" /* uint_fast64_t */ +#define PRIxMAX "lx" /* uintmax_t */ +#define PRIxPTR "lx" /* uintptr_t */ + +#define PRIX8 "X" /* uint8_t */ +#define PRIX16 "X" /* uint16_t */ +#define PRIX32 "X" /* uint32_t */ +#define PRIX64 "lX" /* uint64_t */ +#define PRIXLEAST8 "X" /* uint_least8_t */ +#define PRIXLEAST16 "X" /* uint_least16_t */ +#define PRIXLEAST32 "X" /* uint_least32_t */ +#define PRIXLEAST64 "lX" /* uint_least64_t */ +#define PRIXFAST8 "X" /* uint_fast8_t */ +#define PRIXFAST16 "X" /* uint_fast16_t */ +#define PRIXFAST32 "X" /* uint_fast32_t */ +#define PRIXFAST64 "lX" /* uint_fast64_t */ +#define PRIXMAX "lX" /* uintmax_t */ +#define PRIXPTR "lX" /* uintptr_t */ + +/* fscanf macros for signed integers */ + +#define SCNd8 "hhd" /* int8_t */ +#define SCNd16 "hd" /* int16_t */ +#define SCNd32 "d" /* int32_t */ +#define SCNd64 "ld" /* int64_t */ +#define SCNdLEAST8 "hhd" /* int_least8_t */ +#define SCNdLEAST16 "hd" /* int_least16_t */ +#define SCNdLEAST32 "d" /* int_least32_t */ +#define SCNdLEAST64 "ld" /* int_least64_t */ +#define SCNdFAST8 "hhd" /* int_fast8_t */ +#define SCNdFAST16 "hd" /* int_fast16_t */ +#define SCNdFAST32 "d" /* int_fast32_t */ +#define SCNdFAST64 "ld" /* int_fast64_t */ +#define SCNdMAX "ld" /* intmax_t */ +#define SCNdPTR "ld" /* intptr_t */ + +#define SCNi8 "hhi" /* int8_t */ +#define SCNi16 "hi" /* int16_t */ +#define SCNi32 "i" /* int32_t */ +#define SCNi64 "lli" /* int64_t */ +#define SCNiLEAST8 "hhi" /* int_least8_t */ +#define SCNiLEAST16 "hi" /* int_least16_t */ +#define SCNiLEAST32 "i" /* int_least32_t */ +#define SCNiLEAST64 "li" /* int_least64_t */ +#define SCNiFAST8 "hhi" /* int_fast8_t */ +#define SCNiFAST16 "hi" /* int_fast16_t */ +#define SCNiFAST32 "i" /* int_fast32_t */ +#define SCNiFAST64 "li" /* int_fast64_t */ +#define SCNiMAX "li" /* intmax_t */ +#define SCNiPTR "li" /* intptr_t */ + +/* fscanf macros for unsigned integers */ + +#define SCNo8 "hho" /* uint8_t */ +#define SCNo16 "ho" /* uint16_t */ +#define SCNo32 "o" /* uint32_t */ +#define SCNo64 "lo" /* uint64_t */ +#define SCNoLEAST8 "hho" /* uint_least8_t */ +#define SCNoLEAST16 "ho" /* uint_least16_t */ +#define SCNoLEAST32 "o" /* uint_least32_t */ +#define SCNoLEAST64 "lo" /* uint_least64_t */ +#define SCNoFAST8 "hho" /* uint_fast8_t */ +#define SCNoFAST16 "ho" /* uint_fast16_t */ +#define SCNoFAST32 "o" /* uint_fast32_t */ +#define SCNoFAST64 "lo" /* uint_fast64_t */ +#define SCNoMAX "lo" /* uintmax_t */ +#define SCNoPTR "lo" /* uintptr_t */ + +#define SCNu8 "hhu" /* uint8_t */ +#define SCNu16 "hu" /* uint16_t */ +#define SCNu32 "u" /* uint32_t */ +#define SCNu64 "lu" /* uint64_t */ +#define SCNuLEAST8 "hhu" /* uint_least8_t */ +#define SCNuLEAST16 "hu" /* uint_least16_t */ +#define SCNuLEAST32 "u" /* uint_least32_t */ +#define SCNuLEAST64 "lu" /* uint_least64_t */ +#define SCNuFAST8 "hhu" /* uint_fast8_t */ +#define SCNuFAST16 "hu" /* uint_fast16_t */ +#define SCNuFAST32 "u" /* uint_fast32_t */ +#define SCNuFAST64 "lu" /* uint_fast64_t */ +#define SCNuMAX "lu" /* uintmax_t */ +#define SCNuPTR "lu" /* uintptr_t */ + +#define SCNx8 "hhx" /* uint8_t */ +#define SCNx16 "hx" /* uint16_t */ +#define SCNx32 "x" /* uint32_t */ +#define SCNx64 "lx" /* uint64_t */ +#define SCNxLEAST8 "hhx" /* uint_least8_t */ +#define SCNxLEAST16 "hx" /* uint_least16_t */ +#define SCNxLEAST32 "x" /* uint_least32_t */ +#define SCNxLEAST64 "lx" /* uint_least64_t */ +#define SCNxFAST8 "x" /* uint_fast8_t */ +#define SCNxFAST16 "x" /* uint_fast16_t */ +#define SCNxFAST32 "x" /* uint_fast32_t */ +#define SCNxFAST64 "lx" /* uint_fast64_t */ +#define SCNxMAX "lx" /* uintmax_t */ +#define SCNxPTR "lx" /* uintptr_t */ + +#endif /* !_X86_64_INT_FMTIO_H_ */ diff --git a/sys/arch/x86_64/include/int_limits.h b/sys/arch/x86_64/include/int_limits.h new file mode 100644 index 000000000000..5c1b1a9e0024 --- /dev/null +++ b/sys/arch/x86_64/include/int_limits.h @@ -0,0 +1,144 @@ +/* $NetBSD: int_limits.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Klaus Klein. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_INT_LIMITS_H_ +#define _X86_64_INT_LIMITS_H_ + +/* + * 7.18.2 Limits of specified-width integer types + */ + +/* 7.18.2.1 Limits of exact-width integer types */ + +/* minimum values of exact-width signed integer types */ +#define INT8_MIN (-0x7f-1) /* int8_t */ +#define INT16_MIN (-0x7fff-1) /* int16_t */ +#define INT32_MIN (-0x7fffffff-1) /* int32_t */ +#define INT64_MIN (-0x7fffffffffffffffL-1) /* int64_t */ + +/* maximum values of exact-width signed integer types */ +#define INT8_MAX 0x7f /* int8_t */ +#define INT16_MAX 0x7fff /* int16_t */ +#define INT32_MAX 0x7fffffff /* int32_t */ +#define INT64_MAX 0x7fffffffffffffffL /* int64_t */ + +/* maximum values of exact-width unsigned integer types */ +#define UINT8_MAX 0xffU /* uint8_t */ +#define UINT16_MAX 0xffffU /* uint16_t */ +#define UINT32_MAX 0xffffffffU /* uint32_t */ +#define UINT64_MAX 0xffffffffffffffffUL /* uint64_t */ + +/* 7.18.2.2 Limits of minimum-width integer types */ + +/* minimum values of minimum-width signed integer types */ +#define INT_LEAST8_MIN (-0x7f-1) /* int_least8_t */ +#define INT_LEAST16_MIN (-0x7fff-1) /* int_least16_t */ +#define INT_LEAST32_MIN (-0x7fffffff-1) /* int_least32_t */ +#define INT_LEAST64_MIN (-0x7fffffffffffffffL-1) /* int_least64_t */ + +/* maximum values of minimum-width signed integer types */ +#define INT_LEAST8_MAX 0x7f /* int_least8_t */ +#define INT_LEAST16_MAX 0x7fff /* int_least16_t */ +#define INT_LEAST32_MAX 0x7fffffff /* int_least32_t */ +#define INT_LEAST64_MAX 0x7fffffffffffffffL /* int_least64_t */ + +/* maximum values of minimum-width unsigned integer types */ +#define UINT_LEAST8_MAX 0xffU /* uint_least8_t */ +#define UINT_LEAST16_MAX 0xffffU /* uint_least16_t */ +#define UINT_LEAST32_MAX 0xffffffffU /* uint_least32_t */ +#define UINT_LEAST64_MAX 0xffffffffffffffffUL /* uint_least64_t */ + +/* 7.18.2.3 Limits of fastest minimum-width integer types */ + +/* minimum values of fastest minimum-width signed integer types */ +#define INT_FAST8_MIN (-0x7fffffff-1) /* int_fast8_t */ +#define INT_FAST16_MIN (-0x7fffffff-1) /* int_fast16_t */ +#define INT_FAST32_MIN (-0x7fffffff-1) /* int_fast32_t */ +#define INT_FAST64_MIN (-0x7fffffffffffffffLL-1) /* int_fast64_t */ + +/* maximum values of fastest minimum-width signed integer types */ +#define INT_FAST8_MAX 0x7fffffff /* int_fast8_t */ +#define INT_FAST16_MAX 0x7fffffff /* int_fast16_t */ +#define INT_FAST32_MAX 0x7fffffff /* int_fast32_t */ +#define INT_FAST64_MAX 0x7fffffffffffffffLL /* int_fast64_t */ + +/* maximum values of fastest minimum-width unsigned integer types */ +#define UINT_FAST8_MAX 0xffffffffU /* uint_fast8_t */ +#define UINT_FAST16_MAX 0xffffffffU /* uint_fast16_t */ +#define UINT_FAST32_MAX 0xffffffffU /* uint_fast32_t */ +#define UINT_FAST64_MAX 0xffffffffffffffffULL /* uint_fast64_t */ + +/* 7.18.2.4 Limits of integer types capable of holding object pointers */ + +#define INTPTR_MIN (-0x7fffffffffffffffL-1) /* intptr_t */ +#define INTPTR_MAX 0x7fffffffffffffffL /* intptr_t */ +#define UINTPTR_MAX 0xffffffffffffffffUL /* uintptr_t */ + +/* 7.18.2.5 Limits of greatest-width integer types */ + +#define INTMAX_MIN (-0x7fffffffffffffffL-1) /* intmax_t */ +#define INTMAX_MAX 0x7fffffffffffffffL /* intmax_t */ +#define UINTMAX_MAX 0xffffffffffffffffUL /* uintmax_t */ + + +/* + * 7.18.3 Limits of other integer types + */ + +/* limits of ptrdiff_t */ +#define PTRDIFF_MIN (-0x7fffffffffffffffL-1) /* ptrdiff_t */ +#define PTRDIFF_MAX 0x7fffffffffffffffL /* ptrdiff_t */ + +/* limits of sig_atomic_t */ +#define SIG_ATOMIC_MIN (-0x7fffffffffffffffL-1) /* sig_atomic_t */ +#define SIG_ATOMIC_MAX 0x7fffffffffffffffL /* sig_atomic_t */ + +/* limit of size_t */ +#define SIZE_MAX 0xffffffffffffffffUL /* size_t */ + +#ifndef WCHAR_MIN /* also possibly defined in */ +/* limits of wchar_t */ +#define WCHAR_MIN (-0x7fffffff-1) /* wchar_t */ +#define WCHAR_MAX 0x7fffffff /* wchar_t */ + +/* limits of wint_t */ +#define WINT_MIN (-0x7fffffff-1) /* wint_t */ +#define WINT_MAX 0x7fffffff /* wint_t */ +#endif + +#endif /* !_X86_64_INT_LIMITS_H_ */ diff --git a/sys/arch/x86_64/include/int_mwgwtypes.h b/sys/arch/x86_64/include/int_mwgwtypes.h new file mode 100644 index 000000000000..cbf46dbb4968 --- /dev/null +++ b/sys/arch/x86_64/include/int_mwgwtypes.h @@ -0,0 +1,72 @@ +/* $NetBSD: int_mwgwtypes.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 2001 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Klaus Klein. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_INT_MWGWTYPES_H_ +#define _X86_64_INT_MWGWTYPES_H_ + +/* + * 7.18.1 Integer types + */ + +/* 7.18.1.2 Minimum-width integer types */ + +typedef __signed char int_least8_t; +typedef unsigned char uint_least8_t; +typedef short int int_least16_t; +typedef unsigned short int uint_least16_t; +typedef int int_least32_t; +typedef unsigned int uint_least32_t; +typedef long int int_least64_t; +typedef unsigned long int uint_least64_t; + +/* 7.18.1.3 Fastest minimum-width integer types */ +typedef int int_fast8_t; +typedef unsigned int uint_fast8_t; +typedef int int_fast16_t; +typedef unsigned int uint_fast16_t; +typedef int int_fast32_t; +typedef unsigned int uint_fast32_t; +typedef long int int_fast64_t; +typedef unsigned long int uint_fast64_t; + +/* 7.18.1.5 Greatest-width integer types */ + +typedef long int intmax_t; +typedef unsigned long int uintmax_t; + +#endif /* !_X86_64_INT_MWGWTYPES_H_ */ diff --git a/sys/arch/x86_64/include/int_types.h b/sys/arch/x86_64/include/int_types.h new file mode 100644 index 000000000000..91f40323bf0c --- /dev/null +++ b/sys/arch/x86_64/include/int_types.h @@ -0,0 +1,61 @@ +/* $NetBSD: int_types.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)types.h 7.5 (Berkeley) 3/9/91 + */ + +#ifndef _X86_64_INT_TYPES_H_ +#define _X86_64_INT_TYPES_H_ + +/* + * 7.18.1 Integer types + */ + +/* 7.18.1.1 Exact-width integer types */ + +typedef __signed char __int8_t; +typedef unsigned char __uint8_t; +typedef short int __int16_t; +typedef unsigned short int __uint16_t; +typedef int __int32_t; +typedef unsigned int __uint32_t; +typedef long int __int64_t; +typedef unsigned long int __uint64_t; + +/* 7.18.1.4 Integer types capable of holding object pointers */ + +typedef long __intptr_t; +typedef unsigned long __uintptr_t; + +#endif /* !_X86_64_INT_TYPES_H_ */ diff --git a/sys/arch/x86_64/include/intr.h b/sys/arch/x86_64/include/intr.h new file mode 100644 index 000000000000..cdec76a967a2 --- /dev/null +++ b/sys/arch/x86_64/include/intr.h @@ -0,0 +1,167 @@ +/* $NetBSD: intr.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * XXXfvdl copy of i386 right now. Might change later. + */ + +#ifndef _X86_64_INTR_H_ +#define _X86_64_INTR_H_ + +/* Interrupt priority `levels'. */ +#define IPL_NONE 9 /* nothing */ +#define IPL_SOFTCLOCK 8 /* timeouts */ +#define IPL_SOFTNET 7 /* protocol stacks */ +#define IPL_BIO 6 /* block I/O */ +#define IPL_NET 5 /* network */ +#define IPL_SOFTSERIAL 4 /* serial */ +#define IPL_TTY 3 /* terminal */ +#define IPL_IMP 3 /* memory allocation */ +#define IPL_AUDIO 2 /* audio */ +#define IPL_CLOCK 1 /* clock */ +#define IPL_HIGH 1 /* everything */ +#define IPL_SERIAL 0 /* serial */ +#define NIPL 10 + +/* Interrupt sharing types. */ +#define IST_NONE 0 /* none */ +#define IST_PULSE 1 /* pulsed */ +#define IST_EDGE 2 /* edge-triggered */ +#define IST_LEVEL 3 /* level-triggered */ + +/* Soft interrupt masks. */ +#define SIR_CLOCK 31 +#define SIR_NET 30 +#define SIR_SERIAL 29 + +/* Hack for CLKF_INTR(). */ +#define IPL_TAGINTR 28 + +#ifndef _LOCORE + +volatile int cpl, ipending, astpending; +int imask[NIPL]; + +extern void Xspllower __P((void)); + +static __inline int splraise __P((int)); +static __inline void spllower __P((int)); +static __inline void softintr __P((int)); + +/* + * Add a mask to cpl, and return the old value of cpl. + */ +static __inline int +splraise(ncpl) + register int ncpl; +{ + register int ocpl = cpl; + + cpl = ocpl | ncpl; + return (ocpl); +} + +/* + * Restore a value to cpl (unmasking interrupts). If any unmasked + * interrupts are pending, call Xspllower() to process them. + */ +static __inline void +spllower(ncpl) + register int ncpl; +{ + + cpl = ncpl; + if (ipending & ~ncpl) + Xspllower(); +} + +/* + * Hardware interrupt masks + */ +#define splbio() splraise(imask[IPL_BIO]) +#define splnet() splraise(imask[IPL_NET]) +#define spltty() splraise(imask[IPL_TTY]) +#define splaudio() splraise(imask[IPL_AUDIO]) +#define splclock() splraise(imask[IPL_CLOCK]) +#define splstatclock() splclock() +#define splserial() splraise(imask[IPL_SERIAL]) + +#define spllpt() spltty() + +/* + * Software interrupt masks + * + * NOTE: splsoftclock() is used by hardclock() to lower the priority from + * clock to softclock before it calls softclock(). + */ +#define spllowersoftclock() spllower(imask[IPL_SOFTCLOCK]) +#define splsoftclock() splraise(imask[IPL_SOFTCLOCK]) +#define splsoftnet() splraise(imask[IPL_SOFTNET]) +#define splsoftserial() splraise(imask[IPL_SOFTSERIAL]) + +/* + * Miscellaneous + */ +#define splimp() splraise(imask[IPL_IMP]) +#define splvm() splraise(imask[IPL_IMP]) +#define splhigh() splraise(imask[IPL_HIGH]) +#define splsched() splhigh() +#define spllock() splhigh() +#define spl0() spllower(0) +#define splx(x) spllower(x) + +/* + * Software interrupt registration + * + * We hand-code this to ensure that it's atomic. + */ +static __inline void +softintr(mask) + register int mask; +{ + __asm __volatile("orl %1, %0" : "=m"(ipending) : "ir" (1 << mask)); +} + +#define setsoftast() (astpending = 1) +#define setsoftclock() softintr(SIR_CLOCK) +#define setsoftnet() softintr(SIR_NET) +#define setsoftserial() softintr(SIR_SERIAL) + +#endif /* !_LOCORE */ + +#endif /* !_X86_64INTR_H_ */ diff --git a/sys/arch/x86_64/include/isa_machdep.h b/sys/arch/x86_64/include/isa_machdep.h new file mode 100644 index 000000000000..dc866f9b4dcf --- /dev/null +++ b/sys/arch/x86_64/include/isa_machdep.h @@ -0,0 +1,246 @@ +/* $NetBSD: isa_machdep.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)isa.h 5.7 (Berkeley) 5/9/91 + */ + +/* + * XXXfvdl this file should not contain intrhand, and perhaps + * will go away entirely. + */ + +/* + * Various pieces of the x86_64 port want to include this file without + * or in spite of using isavar.h, and should be fixed. + */ + +#ifndef _X86_64_ISA_MACHDEP_H_ /* XXX */ +#define _X86_64_ISA_MACHDEP_H_ /* XXX */ + +#include +#include + +/* + * XXX THIS FILE IS A MESS. copyright: berkeley's probably. + * contents from isavar.h and isareg.h, mostly the latter. + * perhaps charles's? + * + * copyright from berkeley's isa.h which is now dev/isa/isareg.h. + */ + +/* + * Types provided to machine-independent ISA code. + */ +struct x86_64_isa_chipset { + struct isa_dma_state ic_dmastate; +}; + +typedef struct x86_64_isa_chipset *isa_chipset_tag_t; + +struct device; /* XXX */ +struct isabus_attach_args; /* XXX */ + +/* + * Functions provided to machine-independent ISA code. + */ +void isa_attach_hook(struct device *, struct device *, + struct isabus_attach_args *); +int isa_intr_alloc(isa_chipset_tag_t, int, int, int *); +const struct evcnt *isa_intr_evcnt(isa_chipset_tag_t ic, int irq); +void *isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, + int level, int (*ih_fun)(void *), void *ih_arg); +void isa_intr_disestablish(isa_chipset_tag_t ic, void *handler); +int isa_mem_alloc(bus_space_tag_t, bus_size_t, bus_size_t, + bus_addr_t, int, bus_addr_t *, bus_space_handle_t *); +void isa_mem_free(bus_space_tag_t, bus_space_handle_t, bus_size_t); + +#define isa_dmainit(ic, bst, dmat, d) \ + _isa_dmainit(&(ic)->ic_dmastate, (bst), (dmat), (d)) +#define isa_dmacascade(ic, c) \ + _isa_dmacascade(&(ic)->ic_dmastate, (c)) +#define isa_dmamaxsize(ic, c) \ + _isa_dmamaxsize(&(ic)->ic_dmastate, (c)) +#define isa_dmamap_create(ic, c, s, f) \ + _isa_dmamap_create(&(ic)->ic_dmastate, (c), (s), (f)) +#define isa_dmamap_destroy(ic, c) \ + _isa_dmamap_destroy(&(ic)->ic_dmastate, (c)) +#define isa_dmastart(ic, c, a, n, p, f, bf) \ + _isa_dmastart(&(ic)->ic_dmastate, (c), (a), (n), (p), (f), (bf)) +#define isa_dmaabort(ic, c) \ + _isa_dmaabort(&(ic)->ic_dmastate, (c)) +#define isa_dmacount(ic, c) \ + _isa_dmacount(&(ic)->ic_dmastate, (c)) +#define isa_dmafinished(ic, c) \ + _isa_dmafinished(&(ic)->ic_dmastate, (c)) +#define isa_dmadone(ic, c) \ + _isa_dmadone(&(ic)->ic_dmastate, (c)) +#define isa_dmafreeze(ic) \ + _isa_dmafreeze(&(ic)->ic_dmastate) +#define isa_dmathaw(ic) \ + _isa_dmathaw(&(ic)->ic_dmastate) +#define isa_dmamem_alloc(ic, c, s, ap, f) \ + _isa_dmamem_alloc(&(ic)->ic_dmastate, (c), (s), (ap), (f)) +#define isa_dmamem_free(ic, c, a, s) \ + _isa_dmamem_free(&(ic)->ic_dmastate, (c), (a), (s)) +#define isa_dmamem_map(ic, c, a, s, kp, f) \ + _isa_dmamem_map(&(ic)->ic_dmastate, (c), (a), (s), (kp), (f)) +#define isa_dmamem_unmap(ic, c, k, s) \ + _isa_dmamem_unmap(&(ic)->ic_dmastate, (c), (k), (s)) +#define isa_dmamem_mmap(ic, c, a, s, o, p, f) \ + _isa_dmamem_mmap(&(ic)->ic_dmastate, (c), (a), (s), (o), (p), (f)) +#define isa_drq_isfree(ic, c) \ + _isa_drq_isfree(&(ic)->ic_dmastate, (c)) +#define isa_malloc(ic, c, s, p, f) \ + _isa_malloc(&(ic)->ic_dmastate, (c), (s), (p), (f)) +#define isa_free(a, p) \ + _isa_free((a), (p)) +#define isa_mappage(m, o, p) \ + _isa_mappage((m), (o), (p)) + +/* + * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED + * BY PORTABLE CODE. + */ + +extern struct x86_64_bus_dma_tag isa_bus_dma_tag; + +/* + * XXX Various seemingly PC-specific constants, some of which may be + * unnecessary anyway. + */ + +/* + * RAM Physical Address Space (ignoring the above mentioned "hole") + */ +#define RAM_BEGIN 0x0000000 /* Start of RAM Memory */ +#define RAM_END 0x1000000 /* End of RAM Memory */ +#define RAM_SIZE (RAM_END - RAM_BEGIN) + +/* + * Oddball Physical Memory Addresses + */ +#define COMPAQ_RAMRELOC 0x80c00000 /* Compaq RAM relocation/diag */ +#define COMPAQ_RAMSETUP 0x80c00002 /* Compaq RAM setup */ +#define WEITEK_FPU 0xC0000000 /* WTL 2167 */ +#define CYRIX_EMC 0xC0000000 /* Cyrix EMC */ + +/* + * stuff that used to be in pccons.c + */ +#define MONO_BASE 0x3B4 +#define MONO_BUF 0xB0000 +#define CGA_BASE 0x3D4 +#define CGA_BUF 0xB8000 + +/* + * Interrupt handler chains. isa_intr_establish() inserts a handler into + * the list. The handler is called with its (single) argument. + */ + +struct intrhand { + int (*ih_fun)(void *); + void *ih_arg; + u_long ih_count; + struct intrhand *ih_next; + int ih_level; + int ih_irq; +}; + +/* + * Variables and macros to deal with the ISA I/O hole. + * XXX These should be converted to machine- and bus-mapping-independent + * function definitions, invoked through the softc. + */ + +extern u_long atdevbase; /* kernel virtual address of "hole" */ + +/* + * Given a kernel virtual address for some location + * in the "hole" I/O space, return a physical address. + */ +#define ISA_PHYSADDR(v) ((void *) ((u_long)(v) - atdevbase + IOM_BEGIN)) + +/* + * Given a physical address in the "hole", + * return a kernel virtual address. + */ +#define ISA_HOLE_VADDR(p) ((void *) ((u_long)(p) - IOM_BEGIN + atdevbase)) + + +/* + * Miscellanous functions. + */ +void sysbeep(int, int); /* beep with the system speaker */ + +#endif /* _X86_64_ISA_MACHDEP_H_ XXX */ diff --git a/sys/arch/x86_64/include/kcore.h b/sys/arch/x86_64/include/kcore.h new file mode 100644 index 000000000000..751e7c44a0c0 --- /dev/null +++ b/sys/arch/x86_64/include/kcore.h @@ -0,0 +1,49 @@ +/* $NetBSD: kcore.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/* + * Copyright (c) 1996 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Chris G. Demetriou + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Modified for NetBSD/i386 by Jason R. Thorpe, Numerical Aerospace + * Simulation Facility, NASA Ames Research Center. + * + * And once again modified for x86-64 by Frank van der Linden of + * Wasabi Systems, Inc. + */ + +#ifndef _X86_64KCORE_H_ +#define _X86_64_KCORE_H_ + +typedef struct cpu_kcore_hdr { + u_int64_t ptdpaddr; /* PA of PML4 */ + u_int64_t nmemsegs; /* Number of RAM segments */ +#if 0 + phys_ram_seg_t memsegs[]; /* RAM segments */ +#endif +} cpu_kcore_hdr_t; + +#endif /* _X86_64_KCORE_H_ */ diff --git a/sys/arch/x86_64/include/limits.h b/sys/arch/x86_64/include/limits.h new file mode 100644 index 000000000000..1d4d301caa7e --- /dev/null +++ b/sys/arch/x86_64/include/limits.h @@ -0,0 +1,97 @@ +/* $NetBSD: limits.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/* + * Copyright (c) 1988 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)limits.h 7.2 (Berkeley) 6/28/90 + */ + +#ifndef _MACHINE_LIMITS_H_ +#define _MACHINE_LIMITS_H_ + +#define CHAR_BIT 8 /* number of bits in a char */ +#define MB_LEN_MAX 32 /* no multibyte characters */ + +#define SCHAR_MAX 0x7f /* max value for a signed char */ +#define SCHAR_MIN (-0x7f-1) /* min value for a signed char */ + +#define UCHAR_MAX 0xffU /* max value for an unsigned char */ +#define CHAR_MAX 0x7f /* max value for a char */ +#define CHAR_MIN (-0x7f-1) /* min value for a char */ + +#define USHRT_MAX 0xffffU /* max value for an unsigned short */ +#define SHRT_MAX 0x7fff /* max value for a short */ +#define SHRT_MIN (-0x7fff-1) /* min value for a short */ + +#define UINT_MAX 0xffffffffU /* max value for an unsigned int */ +#define INT_MAX 0x7fffffff /* max value for an int */ +#define INT_MIN (-0x7fffffff-1) /* min value for an int */ + +#define ULONG_MAX 0xffffffffUL /* max value for an unsigned long */ +#define LONG_MAX 0x7fffffffL /* max value for a long */ +#define LONG_MIN (-0x7fffffffL-1) /* min value for a long */ + +#if !defined(_ANSI_SOURCE) +#define SSIZE_MAX INT_MAX /* max value for a ssize_t */ + +#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) || \ + defined(_ISOC99_SOURCE) || (__STDC_VERSION__ - 0) >= 199901L +#define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ +#define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ +#define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ +#endif + +#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) +#define SIZE_T_MAX UINT_MAX /* max value for a size_t */ + +#define UQUAD_MAX 0xffffffffffffffffULL /* max unsigned quad */ +#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */ +#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */ + +#endif /* !_POSIX_C_SOURCE && !_XOPEN_SOURCE */ +#endif /* !_ANSI_SOURCE */ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_C_SOURCE) || \ + defined(_XOPEN_SOURCE) +#define LONG_BIT 32 +#define WORD_BIT 32 + +#define DBL_DIG 15 +#define DBL_MAX 1.7976931348623157E+308 +#define DBL_MIN 2.2250738585072014E-308 + +#define FLT_DIG 6 +#define FLT_MAX 3.40282347E+38F +#define FLT_MIN 1.17549435E-38F +#endif + +#endif /* _MACHINE_LIMITS_H_ */ diff --git a/sys/arch/x86_64/include/loadfile_machdep.h b/sys/arch/x86_64/include/loadfile_machdep.h new file mode 100644 index 000000000000..f11d97ad9fb8 --- /dev/null +++ b/sys/arch/x86_64/include/loadfile_machdep.h @@ -0,0 +1,81 @@ +/* $NetBSD: loadfile_machdep.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Christos Zoulas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define BOOT_ELF +#define ELFSIZE 64 + +#define LOAD_KERNEL (LOAD_ALL & ~LOAD_TEXTA) +#define COUNT_KERNEL (COUNT_ALL & ~COUNT_TEXTA) + +#ifdef _STANDALONE + +#define LOADADDR(a) ((((u_long)(a)) & 0x07ffffff) + offset) +#define ALIGNENTRY(a) ((u_long)(a) & 0x00100000) +#define READ(f, b, c) pread((f), (void *)LOADADDR(b), (c)) +#define BCOPY(s, d, c) vpbcopy((s), (void *)LOADADDR(d), (c)) +#define BZERO(d, c) pbzero((void *)LOADADDR(d), (c)) +#define WARN(a) (void)(printf a, \ + printf((errno ? ": %s\n" : "\n"), \ + strerror(errno))) +#define PROGRESS(a) (void) printf a +#define ALLOC(a) alloc(a) +#define FREE(a, b) free(a, b) +#define OKMAGIC(a) ((a) == ZMAGIC) + +void vpbcopy __P((const void *, void *, size_t)); +void pbzero __P((void *, size_t)); +ssize_t pread __P((int, void *, size_t)); + +#else + +#define LOADADDR(a) (((u_long)(a)) + offset) +#define ALIGNENTRY(a) ((u_long)(a)) +#define READ(f, b, c) read((f), (void *)LOADADDR(b), (c)) +#define BCOPY(s, d, c) memcpy((void *)LOADADDR(d), (void *)(s), (c)) +#define BZERO(d, c) memset((void *)LOADADDR(d), 0, (c)) +#define WARN(a) warn a +#define PROGRESS(a) /* nothing */ +#define ALLOC(a) malloc(a) +#define FREE(a, b) free(a) +#define OKMAGIC(a) ((a) == OMAGIC) + +ssize_t vread __P((int, u_long, u_long *, size_t)); +void vcopy __P((u_long, u_long, u_long *, size_t)); +void vzero __P((u_long, u_long *, size_t)); + +#endif diff --git a/sys/arch/x86_64/include/lock.h b/sys/arch/x86_64/include/lock.h new file mode 100644 index 000000000000..0ad9096dd36f --- /dev/null +++ b/sys/arch/x86_64/include/lock.h @@ -0,0 +1,98 @@ +/* $NetBSD: lock.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Machine-dependent spin lock operations. + */ + +#ifndef _X86_64_LOCK_H_ +#define _X86_64_LOCK_H_ + +typedef __volatile int __cpu_simple_lock_t; + +#define __SIMPLELOCK_LOCKED 1 +#define __SIMPLELOCK_UNLOCKED 0 + +static __inline void __cpu_simple_lock_init __P((__cpu_simple_lock_t *)) + __attribute__((__unused__)); +static __inline void __cpu_simple_lock __P((__cpu_simple_lock_t *)) + __attribute__((__unused__)); +static __inline int __cpu_simple_lock_try __P((__cpu_simple_lock_t *)) + __attribute__((__unused__)); +static __inline void __cpu_simple_unlock __P((__cpu_simple_lock_t *)) + __attribute__((__unused__)); + +static __inline void +__cpu_simple_lock_init(__cpu_simple_lock_t *alp) +{ + + *alp = __SIMPLELOCK_UNLOCKED; +} + +static __inline void +__cpu_simple_lock(__cpu_simple_lock_t *alp) +{ + int __val = __SIMPLELOCK_LOCKED; + + do { + __asm __volatile("xchgl %0, %2" + : "=r" (__val) + : "0" (__val), "m" (*alp)); + } while (__val != __SIMPLELOCK_UNLOCKED); +} + +static __inline int +__cpu_simple_lock_try(__cpu_simple_lock_t *alp) +{ + int __val = __SIMPLELOCK_LOCKED; + + __asm __volatile("xchgl %0, %2" + : "=r" (__val) + : "0" (__val), "m" (*alp)); + + return ((__val == __SIMPLELOCK_UNLOCKED) ? 1 : 0); +} + +void +__cpu_simple_unlock(__cpu_simple_lock_t *alp) +{ + + *alp = __SIMPLELOCK_UNLOCKED; +} + +#endif /* _X86_64_LOCK_H_ */ diff --git a/sys/arch/x86_64/include/math.h b/sys/arch/x86_64/include/math.h new file mode 100644 index 000000000000..bb53387dd967 --- /dev/null +++ b/sys/arch/x86_64/include/math.h @@ -0,0 +1,11 @@ +/* $NetBSD: math.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +/* + * ISO C99 + */ +#if !defined(_ANSI_SOURCE) && \ + (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) || \ + defined(_ISOC99_SOURCE) || (__STDC_VERSION__ - 0) >= 199901L) +extern __const char __nanf[]; +#define NAN (*(__const float *)(__const void *)__nanf) +#endif diff --git a/sys/arch/x86_64/include/netbsd32_machdep.h b/sys/arch/x86_64/include/netbsd32_machdep.h new file mode 100644 index 000000000000..f910f1130809 --- /dev/null +++ b/sys/arch/x86_64/include/netbsd32_machdep.h @@ -0,0 +1,106 @@ +/* $NetBSD: netbsd32_machdep.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +#ifndef _MACHINE_NETBSD32_H_ +#define _MACHINE_NETBSD32_H_ + +typedef u_int32_t netbsd32_sigcontextp_t; + +struct netbsd32_sigcontext13 { + int sc_gs; + int sc_fs; + int sc_es; + int sc_ds; + int sc_edi; + int sc_esi; + int sc_ebp; + int sc_ebx; + int sc_edx; + int sc_ecx; + int sc_eax; + /* XXX */ + int sc_eip; + int sc_cs; + int sc_eflags; + int sc_esp; + int sc_ss; + + int sc_onstack; /* sigstack state to restore */ + int sc_mask; /* signal mask to restore (old style) */ + + int sc_trapno; /* XXX should be above */ + int sc_err; +}; + +struct netbsd32_sigcontext { + int sc_gs; + int sc_fs; + int sc_es; + int sc_ds; + int sc_edi; + int sc_esi; + int sc_ebp; + int sc_ebx; + int sc_edx; + int sc_ecx; + int sc_eax; + /* XXX */ + int sc_eip; + int sc_cs; + int sc_eflags; + int sc_esp; + int sc_ss; + + int sc_onstack; /* sigstack state to restore */ + int __sc_mask13; /* signal mask to restore (old style) */ + + int sc_trapno; /* XXX should be above */ + int sc_err; + + sigset_t sc_mask; /* signal mask to restore (new style) */ +}; + +#define sc_sp sc_esp +#define sc_fp sc_ebp +#define sc_pc sc_eip +#define sc_ps sc_eflags + +struct netbsd32_sigframe { + int sf_signum; + int sf_code; + u_int32_t sf_scp; /* struct sigcontext *sf_scp */ + u_int32_t sf_handler; /* sig_t sf_handler; */ + struct netbsd32_sigcontext sf_sc; +}; + +struct reg32 { + int r_eax; + int r_ecx; + int r_edx; + int r_ebx; + int r_esp; + int r_ebp; + int r_esi; + int r_edi; + int r_eip; + int r_eflags; + int r_cs; + int r_ss; + int r_ds; + int r_es; + int r_fs; + int r_gs; +}; + +struct fpreg32 { + char __data[108]; +}; + + +struct exec_package; +void netbsd32_setregs(struct proc *p, struct exec_package *pack, u_long stack); +int netbsd32_sigreturn(struct proc *p, void *v, register_t *retval); +void netbsd32_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code); + +extern char netbsd32_sigcode[], netbsd32_esigcode[]; + +#endif /* _MACHINE_NETBSD32_H_ */ diff --git a/sys/arch/x86_64/include/param.h b/sys/arch/x86_64/include/param.h new file mode 100644 index 000000000000..fb6e85143ba2 --- /dev/null +++ b/sys/arch/x86_64/include/param.h @@ -0,0 +1,145 @@ +/* $NetBSD: param.h,v 1.1 2001/06/19 00:20:11 fvdl Exp $ */ + +#ifdef _KERNEL +#ifdef _LOCORE +#include +#else +#include +#endif +#endif + +#define _MACHINE x86_64 +#define MACHINE "x86_64" +#define _MACHINE_ARCH x86_64 +#define MACHINE_ARCH "x86_64" +#define MID_MACHINE MID_X86_64 + +/* + * Round p (pointer or byte index) up to a correctly-aligned value + * for all data types (int, long, ...). The result is u_int and + * must be cast to any desired pointer type. + * + * ALIGNED_POINTER is a boolean macro that checks whether an address + * is valid to fetch data elements of type t from on this architecture. + * This does not reflect the optimal alignment, just the possibility + * (within reasonable limits). + * + */ +#define ALIGNBYTES (sizeof(long) - 1) +#define ALIGN(p) (((u_long)(p) + ALIGNBYTES) &~ALIGNBYTES) +#define ALIGNED_POINTER(p,t) 1 + +#define ALIGNBYTES32 (sizeof(int) - 1) +#define ALIGN32(p) (((u_long)(p) + ALIGNBYTES32) &~ALIGNBYTES32) + +#define PGSHIFT 12 /* LOG2(NBPG) */ +#define NBPG (1 << PGSHIFT) /* bytes/page */ +#define PGOFSET (NBPG-1) /* byte offset into page */ +#define NPTEPG (NBPG/(sizeof (pt_entry_t))) + +/* + * XXXfvdl change this (after bootstrap) to take # of bits from + * config info into account. + */ +#define KERNBASE 0xffff800000000000 /* start of kernel virtual space */ +#define KERNTEXTOFF 0xffff800000100000 /* start of kernel text */ +#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT) + +#define KERNTEXTOFF_HI 0xffff8000 +#define KERNTEXTOFF_LO 0x00100000 + +#define KERNBASE_HI 0xffff8000 +#define KERNBASE_LO 0x00000000 + +#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ +#define DEV_BSIZE (1 << DEV_BSHIFT) +#define BLKDEV_IOSIZE 2048 +#ifndef MAXPHYS +#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */ +#endif + +#define SSIZE 1 /* initial stack size/NBPG */ +#define SINCR 1 /* increment of stack/NBPG */ +#define UPAGES 4 /* pages of u-area */ +#define USPACE (UPAGES * NBPG) /* total size of u-area */ + +#ifndef MSGBUFSIZE +#define MSGBUFSIZE 4*NBPG /* default message buffer size */ +#endif + +/* + * Constants related to network buffer management. + * MCLBYTES must be no larger than NBPG (the software page size), and, + * on machines that exchange pages of input or output buffers with mbuf + * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple + * of the hardware page size. + */ +#define MSIZE 256 /* size of an mbuf */ + +#ifndef MCLSHIFT +# define MCLSHIFT 11 /* convert bytes to m_buf clusters */ +#endif /* MCLSHIFT */ + +#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */ +#define MCLOFSET (MCLBYTES - 1) /* offset within a m_buf cluster */ + +#ifndef NMBCLUSTERS + +#if defined(_KERNEL) && !defined(_LKM) +#include "opt_gateway.h" +#endif /* _KERNEL && ! _LKM */ + +#ifdef GATEWAY +#define NMBCLUSTERS 4096 /* map size, max cluster allocation */ +#else +#define NMBCLUSTERS 2048 /* map size, max cluster allocation */ +#endif +#endif + +/* + * Minimum and maximum sizes of the kernel malloc arena in PAGE_SIZE-sized + * logical pages. + */ +#define NKMEMPAGES_MIN_DEFAULT ((8 * 1024 * 1024) >> PAGE_SHIFT) +#define NKMEMPAGES_MAX_DEFAULT ((128 * 1024 * 1024) >> PAGE_SHIFT) + +/* pages ("clicks") to disk blocks */ +#define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT)) +#define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT)) + +/* bytes to pages */ +#define ctob(x) ((x) << PGSHIFT) +#define btoc(x) (((x) + PGOFSET) >> PGSHIFT) + +/* bytes to disk blocks */ +#define dbtob(x) ((x) << DEV_BSHIFT) +#define btodb(x) ((x) >> DEV_BSHIFT) + +/* + * Map a ``block device block'' to a file system block. + * This should be device dependent, and should use the bsize + * field from the disk label. + * For now though just use DEV_BSIZE. + */ +#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE / DEV_BSIZE)) + +/* + * XXXfvdl the PD* stuff is different from i386. + */ +/* + * Mach derived conversion macros + */ +#define x86_64_round_pdr(x) \ + ((((unsigned long)(x)) + (NBPD_L2 - 1)) & ~(NBPD_L2 - 1)) +#define x86_64_trunc_pdr(x) ((unsigned long)(x) & ~(NBPD_L2 - 1)) +#define x86_64_btod(x) ((unsigned long)(x) >> L2_SHIFT) +#define x86_64_dtob(x) ((unsigned long)(x) << L2_SHIFT) +#define x86_64_round_page(x) ((((unsigned long)(x)) + PGOFSET) & ~PGOFSET) +#define x86_64_trunc_page(x) ((unsigned long)(x) & ~PGOFSET) +#define x86_64_btop(x) ((unsigned long)(x) >> PGSHIFT) +#define x86_64_ptob(x) ((unsigned long)(x) << PGSHIFT) + +#define btop(x) x86_64_btop(x) +#define ptob(x) x86_64_ptob(x) +#define x86_trunc_page(x) x86_64_trunc_page(x) +#define round_pdr(x) x86_64_round_pdr(x) diff --git a/sys/arch/x86_64/include/pcb.h b/sys/arch/x86_64/include/pcb.h new file mode 100644 index 000000000000..efda96e983cc --- /dev/null +++ b/sys/arch/x86_64/include/pcb.h @@ -0,0 +1,130 @@ +/* $NetBSD: pcb.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pcb.h 5.10 (Berkeley) 5/12/91 + */ + +/* + * XXXfvdl these copyrights don't really match anymore + */ + +#ifndef _X86_64_PCB_H_ +#define _X86_64_PCB_H_ + +#include + +#include +#include +#include +#include + +#define NIOPORTS 1024 /* # of ports we allow to be mapped */ + +struct pcb { + /* + * XXXfvdl + * It's overkill to have a TSS here, as it's only needed + * for compatibility processes who use an I/O permission map. + * The pcb fields below are not in the TSS anymore (and there's + * not enough room in the TSS to store them all) + * Should just make this a pointer and allocate. + */ + struct x86_64_tss pcb_tss; + u_int64_t pcb_cr3; + u_int64_t pcb_rsp; + u_int64_t pcb_rbp; + u_int64_t pcb_fs; + u_int64_t pcb_gs; + u_int64_t pcb_ldt_sel; + int pcb_cr0; /* saved image of CR0 */ + struct fxsave64 pcb_savefpu; /* floating point state */ + int pcb_flags; +#define PCB_USER_LDT 0x01 /* has user-set LDT */ + caddr_t pcb_onfault; /* copyin/out fault recovery */ + unsigned pcb_iomap[NIOPORTS/32]; /* I/O bitmap */ + struct pmap *pcb_pmap; /* back pointer to our pmap */ +}; + +/* + * The pcb is augmented with machine-dependent additional data for + * core dumps. For the i386, there is nothing to add. + */ +struct md_coredump { + long md_pad[8]; +}; + +#ifdef _KERNEL +struct pcb *curpcb; /* our current running pcb */ +#endif + +#endif /* _X86_64_PCB_H_ */ diff --git a/sys/arch/x86_64/include/pccons.h b/sys/arch/x86_64/include/pccons.h new file mode 100644 index 000000000000..f87b00bd48be --- /dev/null +++ b/sys/arch/x86_64/include/pccons.h @@ -0,0 +1,30 @@ +/* $NetBSD: pccons.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * XXXfvdl should be shared with i386. + */ + +/* + * pccons.h -- pccons ioctl definitions + */ + +#ifndef _PCCONS_H_ +#define _PCCONS_H_ + +#include + +#define CONSOLE_X_MODE_ON _IO('t',121) +#define CONSOLE_X_MODE_OFF _IO('t',122) +#define CONSOLE_X_BELL _IOW('t',123,int[2]) +#define CONSOLE_SET_TYPEMATIC_RATE _IOW('t',124,u_char) + +#ifdef _KERNEL +int pccnattach __P((void)); + +#if (NPCCONSKBD > 0) +int pcconskbd_cnattach __P((pckbc_tag_t, pckbc_slot_t)); +#endif + +#endif /* _KERNEL */ + +#endif /* _PCCONS_H_ */ diff --git a/sys/arch/x86_64/include/pci_machdep.h b/sys/arch/x86_64/include/pci_machdep.h new file mode 100644 index 000000000000..5026a3e1fa8d --- /dev/null +++ b/sys/arch/x86_64/include/pci_machdep.h @@ -0,0 +1,109 @@ +/* $NetBSD: pci_machdep.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. + * Copyright (c) 1994 Charles M. Hannum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles M. Hannum. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Machine-specific definitions for PCI autoconfiguration. + */ +#define __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH + +/* + * x86_64-specific PCI structure and type definitions. + * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE. + * + * Configuration tag; created from a {bus,device,function} triplet by + * pci_make_tag(), and passed to pci_conf_read() and pci_conf_write(). + * We could instead always pass the {bus,device,function} triplet to + * the read and write routines, but this would cause extra overhead. + * + * Mode 2 is historical and deprecated by the Revision 2.0 specification. + */ +union x86_64_pci_tag_u { + u_int32_t mode1; + struct { + u_int16_t port; + u_int8_t enable; + u_int8_t forward; + } mode2; +}; + +extern struct x86_64_bus_dma_tag pci_bus_dma_tag; + +/* + * Types provided to machine-independent PCI code + */ +typedef void *pci_chipset_tag_t; +typedef union x86_64_pci_tag_u pcitag_t; +typedef int pci_intr_handle_t; + +/* + * x86_64-specific PCI variables and functions. + * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE. + */ +extern int pci_mode; +int pci_mode_detect(void); +int pci_bus_flags(void); +struct pci_attach_args; + +/* + * Functions provided to machine-independent PCI code. + */ +void pci_attach_hook(struct device *, struct device *, + struct pcibus_attach_args *); +int pci_bus_maxdevs(pci_chipset_tag_t, int); +pcitag_t pci_make_tag(pci_chipset_tag_t, int, int, int); +void pci_decompose_tag(pci_chipset_tag_t, pcitag_t, + int *, int *, int *); +pcireg_t pci_conf_read(pci_chipset_tag_t, pcitag_t, int); +void pci_conf_write(pci_chipset_tag_t, pcitag_t, int, + pcireg_t); +int pci_intr_map(struct pci_attach_args *, pci_intr_handle_t *); +const char *pci_intr_string(pci_chipset_tag_t, pci_intr_handle_t); +const struct evcnt *pci_intr_evcnt(pci_chipset_tag_t, pci_intr_handle_t); +void *pci_intr_establish(pci_chipset_tag_t, pci_intr_handle_t, + int, int (*)(void *), void *); +void pci_intr_disestablish(pci_chipset_tag_t, void *); + +/* + * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED + * BY PORTABLE CODE. + */ + +/* + * XXXfvdl will need to check if this is still valid once the + * real hardware is there. + */ +/* + * Section 6.2.4, `Miscellaneous Functions' of the PCI Specification, + * says that 255 means `unknown' or `no connection' to the interrupt + * controller on a PC. + */ +#define X86_64_PCI_INTERRUPT_LINE_NO_CONNECTION 0xff diff --git a/sys/arch/x86_64/include/pio.h b/sys/arch/x86_64/include/pio.h new file mode 100644 index 000000000000..05bcaacc8c95 --- /dev/null +++ b/sys/arch/x86_64/include/pio.h @@ -0,0 +1,233 @@ +/* $NetBSD: pio.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * XXXfvdl plain copy of i386. Since pio didn't change, this should + * probably be shared. + */ + +#ifndef _X86_64_PIO_H_ +#define _X86_64_PIO_H_ + +/* + * Functions to provide access to i386 programmed I/O instructions. + * + * The in[bwl]() and out[bwl]() functions are split into two varieties: one to + * use a small, constant, 8-bit port number, and another to use a large or + * variable port number. The former can be compiled as a smaller instruction. + */ + + +#ifdef __OPTIMIZE__ + +#define __use_immediate_port(port) \ + (__builtin_constant_p((port)) && (port) < 0x100) + +#else + +#define __use_immediate_port(port) 0 + +#endif + + +#define inb(port) \ + (__use_immediate_port(port) ? __inbc(port) : __inb(port)) + +static __inline u_int8_t +__inbc(int port) +{ + u_int8_t data; + __asm __volatile("inb %1,%0" : "=a" (data) : "id" (port)); + return data; +} + +static __inline u_int8_t +__inb(int port) +{ + u_int8_t data; + __asm __volatile("inb %w1,%0" : "=a" (data) : "d" (port)); + return data; +} + +static __inline void +insb(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\tinsb" : + "=D" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt) : + "memory"); +} + +#define inw(port) \ + (__use_immediate_port(port) ? __inwc(port) : __inw(port)) + +static __inline u_int16_t +__inwc(int port) +{ + u_int16_t data; + __asm __volatile("inw %1,%0" : "=a" (data) : "id" (port)); + return data; +} + +static __inline u_int16_t +__inw(int port) +{ + u_int16_t data; + __asm __volatile("inw %w1,%0" : "=a" (data) : "d" (port)); + return data; +} + +static __inline void +insw(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\tinsw" : + "=D" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt) : + "memory"); +} + +#define inl(port) \ + (__use_immediate_port(port) ? __inlc(port) : __inl(port)) + +static __inline u_int32_t +__inlc(int port) +{ + u_int32_t data; + __asm __volatile("inl %w1,%0" : "=a" (data) : "id" (port)); + return data; +} + +static __inline u_int32_t +__inl(int port) +{ + u_int32_t data; + __asm __volatile("inl %w1,%0" : "=a" (data) : "d" (port)); + return data; +} + +static __inline void +insl(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\tinsl" : + "=D" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt) : + "memory"); +} + +#define outb(port, data) \ + (__use_immediate_port(port) ? __outbc(port, data) : __outb(port, data)) + +static __inline void +__outbc(int port, u_int8_t data) +{ + __asm __volatile("outb %0,%w1" : : "a" (data), "id" (port)); +} + +static __inline void +__outb(int port, u_int8_t data) +{ + __asm __volatile("outb %0,%w1" : : "a" (data), "d" (port)); +} + +static __inline void +outsb(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\toutsb" : + "=S" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt)); +} + +#define outw(port, data) \ + (__use_immediate_port(port) ? __outwc(port, data) : __outw(port, data)) + +static __inline void +__outwc(int port, u_int16_t data) +{ + __asm __volatile("outw %0,%1" : : "a" (data), "id" (port)); +} + +static __inline void +__outw(int port, u_int16_t data) +{ + __asm __volatile("outw %0,%w1" : : "a" (data), "d" (port)); +} + +static __inline void +outsw(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\toutsw" : + "=S" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt)); +} + +#define outl(port, data) \ + (__use_immediate_port(port) ? __outlc(port, data) : __outl(port, data)) + +static __inline void +__outlc(int port, u_int32_t data) +{ + __asm __volatile("outl %0,%1" : : "a" (data), "id" (port)); +} + +static __inline void +__outl(int port, u_int32_t data) +{ + __asm __volatile("outl %0,%w1" : : "a" (data), "d" (port)); +} + +static __inline void +outsl(int port, void *addr, int cnt) +{ + void *dummy1; + int dummy2; + __asm __volatile("cld\n\trepne\n\toutsl" : + "=S" (dummy1), "=c" (dummy2) : + "d" (port), "0" (addr), "1" (cnt)); +} + +#endif /* _X86_64_PIO_H_ */ diff --git a/sys/arch/x86_64/include/pmap.h b/sys/arch/x86_64/include/pmap.h new file mode 100644 index 000000000000..e5ee01702353 --- /dev/null +++ b/sys/arch/x86_64/include/pmap.h @@ -0,0 +1,555 @@ +/* $NetBSD: pmap.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * + * Copyright (c) 1997 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgment: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * pmap.h: see pmap.c for the history of this pmap module. + */ + +#ifndef _X86_64_PMAP_H_ +#define _X86_64_PMAP_H_ + +#ifndef _LOCORE +#if defined(_KERNEL_OPT) +#include "opt_largepages.h" +#endif + +#include +#include +#include +#include +#endif + +/* + * The x86_64 pmap module closely resembles the i386 one. It uses + * the same recursive entry scheme, and the same alternate area + * trick for accessing non-current pmaps. See the i386 pmap.h + * for a description. The obvious difference is that 3 extra + * levels of page table need to be dealt with. The level 1 page + * table pages are at: + * + * l1: 0x00007f8000000000 - 0x00007fffffffffff (39 bits, needs PML4 entry) + * + * The alternate space is at: + * + * l1: 0xffffff8000000000 - 0xffffffffffffffff (39 bits, needs PML4 entry) + * + * The rest is kept as physical pages in 3 UVM objects, and is + * temporarily mapped for virtual access when needed. + * + * Note that address space is signed, so the layout for 48 bits is: + * + * +---------------------------------+ 0xffffffffffffffff + * | | + * | alt.L1 table (PTE pages) | + * | | + * +---------------------------------+ 0xffffff8000000000 + * ~ ~ + * | | + * | Kernel Space | + * | | + * | | + * +---------------------------------+ 0xffff800000000000 = 0x0000008000000000 + * | | + * | alt.L1 table (PTE pages) | + * | | + * +---------------------------------+ 0x00007f8000000000 + * ~ ~ + * | | + * | User Space | + * | | + * | | + * +---------------------------------+ 0x0000000000000000 + * + * In other words, there is a 'VA hole' at 0x0000008000000000 - + * 0xffff800000000000 which will trap, just as on, for example, + * sparcv9. + * + * The unused space can be used if needed, but it adds a little more + * complexity to the calculations. + */ + +/* + * The first generation of Hammer processors can use 48 bits of + * virtual memory, and 40 bits of physical memory. This will be + * more for later generations. These defines can be changed to + * variable names containing the # of bits, extracted from an + * extended cpuid instruction (variables are harder to use during + * bootstrap, though) + */ +#define VIRT_BITS 48 +#define PHYS_BITS 40 + +/* + * Mask to get rid of the sign-extended part of addresses. + */ +#define VA_SIGN_MASK 0xffff000000000000 +#define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK) +/* + * XXXfvdl this one's not right. + */ +#define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK) + +#define L4_SLOT_PTE 255 +#define L4_SLOT_KERN 256 +#define L4_SLOT_APTE 511 + +#define PDIR_SLOT_KERN L4_SLOT_KERN +#define PDIR_SLOT_PTE L4_SLOT_PTE +#define PDIR_SLOT_APTE L4_SLOT_APTE + +/* + * the following defines give the virtual addresses of various MMU + * data structures: + * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings + * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD + * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP + * + */ + +#define PTE_BASE ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4)) +#define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4)))) + +#define L1_BASE PTE_BASE +#define AL1_BASE APTE_BASE + +#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3)) +#define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2)) +#define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1)) + +#define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3)) +#define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2)) +#define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1)) + +#define PDP_PDE (L4_BASE + PDIR_SLOT_PTE) +#define APDP_PDE (L4_BASE + PDIR_SLOT_APTE) + +#define PDP_BASE L4_BASE +#define APDP_BASE AL4_BASE + +#define NKL4_MAX_ENTRIES (unsigned long)254 +#define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512) +#define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512) +#define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512) + +/* + * 32M of KVA to start with. + */ +#define NKL4_START_ENTRIES 1 +#define NKL3_START_ENTRIES 1 +#define NKL2_START_ENTRIES 16 +#define NKL1_START_ENTRIES 0 /* XXX */ + +#define NTOPLEVEL_PDES (NBPG / (sizeof (pd_entry_t))) + +#define KERNSPACE (NKL4_ENTRIES * NBPD_L4) + +#define NPDPG (NBPG / sizeof (pd_entry_t)) + +#define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) + +/* + * pl*_i: generate index into pde/pte arrays in virtual space + */ +#define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) +#define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) +#define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) +#define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) +#define pl_i(va, lvl) \ + (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) + +#define PTP_MASK_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME } +#define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT } +#define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \ + NKL3_START_ENTRIES, NKL4_START_ENTRIES } +#define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \ + NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES } +#define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 } +#define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE } +#define APDES_INITIALIZER { AL2_BASE, AL3_BASE, AL4_BASE } + +/* + * PTP macros: + * a PTP's index is the PD index of the PDE that points to it + * a PTP's offset is the byte-offset in the PTE space that this PTP is at + * a PTP's VA is the first VA mapped by that PTP + * + * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries) + * NBPD == number of bytes a PTP can map (4MB) + */ + +#define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * NBPG) + +#define PTP_LEVELS 4 + +/* + * PG_AVAIL usage: we make use of the ignored bits of the PTE + */ + +#define PG_W PG_AVAIL1 /* "wired" mapping */ +#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ +/* PG_AVAIL3 not used */ + +#if defined(_KERNEL) && !defined(_LOCORE) +/* + * pmap data structures: see pmap.c for details of locking. + */ + +struct pmap; +typedef struct pmap *pmap_t; + +/* + * we maintain a list of all non-kernel pmaps + */ + +LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ + +/* + * the pmap structure + * + * note that the pm_obj contains the simple_lock, the reference count, + * page list, and number of PTPs within the pmap. + * + * pm_lock is the same as the spinlock for vm object 0. Changes to + * the other objects may only be made if that lock has been taken + * (the other object locks are only used when uvm_pagealloc is called) + */ + +struct pmap { + struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */ +#define pm_lock pm_obj[0].vmobjlock +#define pm_obj_l1 pm_obj[0] +#define pm_obj_l2 pm_obj[1] +#define pm_obj_l3 pm_obj[2] + LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ + pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ + paddr_t pm_pdirpa; /* PA of PD (read-only after create) */ + struct vm_page *pm_ptphint[PTP_LEVELS-1]; + /* pointer to a PTP in our pmap */ + struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ + + int pm_flags; /* see below */ + + union descriptor *pm_ldt; /* user-set LDT */ + int pm_ldt_len; /* number of LDT entries */ + int pm_ldt_sel; /* LDT selector */ +}; + +/* pm_flags */ +#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */ + +/* + * for each managed physical page we maintain a list of 's + * which it is mapped at. the list is headed by a pv_head structure. + * there is one pv_head per managed phys page (allocated at boot time). + * the pv_head structure points to a list of pv_entry structures (each + * describes one mapping). + */ + +struct pv_entry; + +struct pv_head { + struct simplelock pvh_lock; /* locks every pv on this list */ + struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */ +}; + +struct pv_entry { /* locked by its list's pvh_lock */ + struct pv_entry *pv_next; /* next entry */ + struct pmap *pv_pmap; /* the pmap */ + vaddr_t pv_va; /* the virtual address */ + struct vm_page *pv_ptp; /* the vm_page of the PTP */ +}; + +/* + * pv_entrys are dynamically allocated in chunks from a single page. + * we keep track of how many pv_entrys are in use for each page and + * we can free pv_entry pages if needed. there is one lock for the + * entire allocation system. + */ + +struct pv_page_info { + TAILQ_ENTRY(pv_page) pvpi_list; + struct pv_entry *pvpi_pvfree; + int pvpi_nfree; +}; + +/* + * number of pv_entry's in a pv_page + * (note: won't work on systems where NPBG isn't a constant) + */ + +#define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ + sizeof(struct pv_entry)) + +/* + * a pv_page: where pv_entrys are allocated from + */ + +struct pv_page { + struct pv_page_info pvinfo; + struct pv_entry pvents[PVE_PER_PVPAGE]; +}; + +/* + * pmap_remove_record: a record of VAs that have been unmapped, used to + * flush TLB. if we have more than PMAP_RR_MAX then we stop recording. + */ + +#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */ + +struct pmap_remove_record { + int prr_npages; + vaddr_t prr_vas[PMAP_RR_MAX]; +}; + +/* + * global kernel variables + */ + +/* PTDpaddr: is the physical address of the kernel's PDP */ +extern u_long PTDpaddr; + +extern struct pmap kernel_pmap_store; /* kernel pmap */ +extern int pmap_pg_g; /* do we support PG_G? */ + +extern paddr_t ptp_masks[]; +extern int ptp_shifts[]; +extern unsigned long nkptp[], nbpd[], nkptpmax[]; +extern pd_entry_t *pdes[]; + +/* + * macros + */ + +#define pmap_kernel() (&kernel_pmap_store) +#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) +#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) +#define pmap_update() /* nothing (yet) */ + +#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M) +#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U) +#define pmap_copy(DP,SP,D,L,S) +#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) +#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) +#define pmap_move(DP,SP,D,L,S) +#define pmap_phys_address(ppn) ptob(ppn) +#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ + + +/* + * prototypes + */ + +void pmap_activate __P((struct proc *)); +void pmap_bootstrap __P((vaddr_t)); +boolean_t pmap_change_attrs __P((struct vm_page *, int, int)); +void pmap_deactivate __P((struct proc *)); +static void pmap_page_protect __P((struct vm_page *, vm_prot_t)); +void pmap_page_remove __P((struct vm_page *)); +static void pmap_protect __P((struct pmap *, vaddr_t, + vaddr_t, vm_prot_t)); +void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t)); +boolean_t pmap_test_attrs __P((struct vm_page *, int)); +static void pmap_update_pg __P((vaddr_t)); +static void pmap_update_2pg __P((vaddr_t,vaddr_t)); +void pmap_write_protect __P((struct pmap *, vaddr_t, + vaddr_t, vm_prot_t)); + +vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */ + +#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ + +/* + * Do idle page zero'ing uncached to avoid polluting the cache. + */ +boolean_t pmap_pageidlezero __P((paddr_t)); +#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) + +/* + * inline functions + */ + +/* + * pmap_update_pg: flush one page from the TLB (or flush the whole thing + * if hardware doesn't support one-page flushing) + */ + +__inline static void +pmap_update_pg(va) + vaddr_t va; +{ + invlpg(va); +} + +/* + * pmap_update_2pg: flush two pages from the TLB + */ + +__inline static void +pmap_update_2pg(va, vb) + vaddr_t va, vb; +{ + invlpg(va); + invlpg(vb); +} + +/* + * pmap_page_protect: change the protection of all recorded mappings + * of a managed page + * + * => this function is a frontend for pmap_page_remove/pmap_change_attrs + * => we only have to worry about making the page more protected. + * unprotecting a page is done on-demand at fault time. + */ + +__inline static void +pmap_page_protect(pg, prot) + struct vm_page *pg; + vm_prot_t prot; +{ + if ((prot & VM_PROT_WRITE) == 0) { + if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { + (void) pmap_change_attrs(pg, PG_RO, PG_RW); + } else { + pmap_page_remove(pg); + } + } +} + +/* + * pmap_protect: change the protection of pages in a pmap + * + * => this function is a frontend for pmap_remove/pmap_write_protect + * => we only have to worry about making the page more protected. + * unprotecting a page is done on-demand at fault time. + */ + +__inline static void +pmap_protect(pmap, sva, eva, prot) + struct pmap *pmap; + vaddr_t sva, eva; + vm_prot_t prot; +{ + if ((prot & VM_PROT_WRITE) == 0) { + if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { + pmap_write_protect(pmap, sva, eva, prot); + } else { + pmap_remove(pmap, sva, eva); + } + } +} + +/* + * various address inlines + * + * vtopte: return a pointer to the PTE mapping a VA, works only for + * user and PT addresses + * + * kvtopte: return a pointer to the PTE mapping a kernel VA + */ + +#include + +static __inline pt_entry_t * +vtopte(vaddr_t va) +{ + + KASSERT(va < (L4_SLOT_KERN * NBPD_L4)); + + return (PTE_BASE + pl1_i(va)); +} + +static __inline pt_entry_t * +kvtopte(vaddr_t va) +{ + + KASSERT(va >= (L4_SLOT_KERN * NBPD_L4)); + +#ifdef LARGEPAGES + { + pd_entry_t *pde; + + pde = L1_BASE + pl2_i(va); + if (*pde & PG_PS) + return ((pt_entry_t *)pde); + } +#endif + + return (PTE_BASE + pl1_i(va)); +} + +paddr_t vtophys __P((vaddr_t)); +vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t)); + +#if 0 /* XXXfvdl was USER_LDT, need to check if that can be supported */ +void pmap_ldt_cleanup __P((struct proc *)); +#define PMAP_FORK +#endif /* USER_LDT */ + +#endif /* _KERNEL && !_LOCORE */ +#endif /* _X86_64_PMAP_H_ */ diff --git a/sys/arch/x86_64/include/pmc.h b/sys/arch/x86_64/include/pmc.h new file mode 100644 index 000000000000..9ed74cacea08 --- /dev/null +++ b/sys/arch/x86_64/include/pmc.h @@ -0,0 +1,43 @@ +/* $NetBSD: pmc.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 2000 Zembu Labs, Inc. + * All rights reserved. + * + * Author: Jason R. Thorpe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Zembu Labs, Inc. + * 4. Neither the name of Zembu Labs nor the names of its employees may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ZEMBU LABS, INC. ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WAR- + * RANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DIS- + * CLAIMED. IN NO EVENT SHALL ZEMBU LABS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef _KERNEL +int pmc_info(struct proc *, struct x86_64_pmc_info_args *, + register_t *); +int pmc_startstop(struct proc *, struct x86_64_pmc_startstop_args *, + register_t *); +int pmc_read(struct proc *, struct x86_64_pmc_read_args *, + register_t *); +#endif diff --git a/sys/arch/x86_64/include/proc.h b/sys/arch/x86_64/include/proc.h new file mode 100644 index 000000000000..dcdaeb40e406 --- /dev/null +++ b/sys/arch/x86_64/include/proc.h @@ -0,0 +1,53 @@ +/* $NetBSD: proc.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)proc.h 7.1 (Berkeley) 5/15/91 + */ + +#include + +/* + * Machine-dependent part of the proc structure for x86-64. + */ +struct mdproc { + struct trapframe *md_regs; /* registers on current frame */ + int md_flags; /* machine-dependent flags */ + int md_tss_sel; /* TSS selector */ + /* Syscall handling function */ + void (*md_syscall) __P((struct trapframe)); +}; + +/* md_flags */ +#define MDP_USEDFPU 0x0001 /* has used the FPU */ +#define MDP_COMPAT 0x0002 /* x86 compatibility process */ diff --git a/sys/arch/x86_64/include/profile.h b/sys/arch/x86_64/include/profile.h new file mode 100644 index 000000000000..3ffbdeafbf83 --- /dev/null +++ b/sys/arch/x86_64/include/profile.h @@ -0,0 +1,65 @@ +/* $NetBSD: profile.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)profile.h 8.1 (Berkeley) 6/11/93 + */ + +#define _MCOUNT_DECL static __inline void _mcount + +#define MCOUNT_ENTRY "__mcount" +#define MCOUNT_COMPAT __weak_alias(mcount, __mcount) + +/* + * XXXfvdl this is screwed by -fomit-frame-pointer being included in + * -O. + */ +#define MCOUNT \ +MCOUNT_COMPAT \ +extern void mcount __P((void)) __asm__(MCOUNT_ENTRY); \ +void \ +mcount() \ +{ \ + _mcount((u_long)__builtin_return_address(1), \ + (u_long)__builtin_return_address(0)); \ +} + + +#ifdef _KERNEL +/* + * Note that we assume splhigh() and splx() cannot call mcount() + * recursively. + */ +#define MCOUNT_ENTER s = splhigh() +#define MCOUNT_EXIT splx(s) +#endif /* _KERNEL */ diff --git a/sys/arch/x86_64/include/psl.h b/sys/arch/x86_64/include/psl.h new file mode 100644 index 000000000000..9cef8f534f46 --- /dev/null +++ b/sys/arch/x86_64/include/psl.h @@ -0,0 +1,7 @@ +/* $NetBSD: psl.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +#ifdef _KERNEL +#include +#else +#include +#endif diff --git a/sys/arch/x86_64/include/pte.h b/sys/arch/x86_64/include/pte.h new file mode 100644 index 000000000000..40438aedaaa4 --- /dev/null +++ b/sys/arch/x86_64/include/pte.h @@ -0,0 +1,135 @@ +/* $NetBSD: pte.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _X86_64_PTE_H_ +#define _X86_64_PTE_H_ + +/* + * x86-64 MMU hardware structure: + * + * the (first generation) x86-64 MMU is a 4-level MMU which maps 2^48 bytes + * of virtual memory. The pagesize we use is is 4K (4096 [0x1000] bytes), + * although 2M and 4M can be used as well. The indexes in the levels + * are 9 bits wide (512 64bit entries per level), dividing the bits + * 9-9-9-9-12. + * + * The top level table, called PML4, contains 512 64bit entries pointing + * to 3rd level table. The 3rd level table is called the 'page directory + * pointers directory' and has 512 entries pointing to page directories. + * The 2nd level is the page directory, containing 512 pointers to + * page table pages. Lastly, level 1 consists of pages containing 512 + * PTEs. + * + * Simply put, levels 4-1 all consist of pages containing 512 + * entries pointing to the next level. Level 0 is the actual PTEs + * themselves. + * + * For a description on the other bits, which are i386 compatible, + * see the i386 pte.h + */ + +#if !defined(_LOCORE) + +/* + * here we define the data types for PDEs and PTEs + */ + +typedef u_int64_t pd_entry_t; /* PDE */ +typedef u_int64_t pt_entry_t; /* PTE */ + +#endif + +/* + * now we define various for playing with virtual addresses + */ + +#define L1_SHIFT 12 +#define L2_SHIFT 21 +#define L3_SHIFT 30 +#define L4_SHIFT 39 +#define NBPD_L1 (1ULL << L1_SHIFT) /* # bytes mapped by L1 ent (4K) */ +#define NBPD_L2 (1ULL << L2_SHIFT) /* # bytes mapped by L2 ent (2MB) */ +#define NBPD_L3 (1ULL << L3_SHIFT) /* # bytes mapped by L3 ent (1G) */ +#define NBPD_L4 (1ULL << L4_SHIFT) /* # bytes mapped by L4 ent (512G) */ + +#define L4_MASK 0x0000ff8000000000 +#define L3_MASK 0x0000007fc0000000 +#define L2_MASK 0x000000003fe00000 +#define L1_MASK 0x00000000001ff000 + +#define L4_FRAME L4_MASK +#define L3_FRAME (L4_FRAME|L3_MASK) +#define L2_FRAME (L3_FRAME|L2_MASK) +#define L1_FRAME (L2_FRAME|L1_MASK) + +/* + * PDE/PTE bits. These are no different from their i386 counterparts. + */ + +#define PG_V 0x0000000000000001 /* valid */ +#define PG_RO 0x0000000000000000 /* read-only */ +#define PG_RW 0x0000000000000002 /* read-write */ +#define PG_u 0x0000000000000004 /* user accessible */ +#define PG_PROT 0x0000000000000006 +#define PG_N 0x0000000000000018 /* non-cacheable */ +#define PG_U 0x0000000000000020 /* used */ +#define PG_M 0x0000000000000040 /* modified */ +#define PG_PS 0x0000000000000080 /* 2MB page size */ +#define PG_G 0x0000000000000100 /* not flushed */ +#define PG_AVAIL1 0x0000000000000200 +#define PG_AVAIL2 0x0000000000000400 +#define PG_AVAIL3 0x0000000000000800 +#define PG_FRAME 0xfffffffffffff000 + +#define PG_LGFRAME 0xffffffffffc00000 /* large (2M) page frame mask */ + +/* + * short forms of protection codes + */ + +#define PG_KR 0x0000000000000000 /* kernel read-only */ +#define PG_KW 0x0000000000000002 /* kernel read-write */ + +/* + * page protection exception bits + */ + +#define PGEX_P 0x01 /* protection violation (vs. no mapping) */ +#define PGEX_W 0x02 /* exception during a write cycle */ +#define PGEX_U 0x04 /* exception while in user mode (upl) */ + +#endif /* _X86_64_PTE_H_ */ diff --git a/sys/arch/x86_64/include/ptrace.h b/sys/arch/x86_64/include/ptrace.h new file mode 100644 index 000000000000..54e254638d91 --- /dev/null +++ b/sys/arch/x86_64/include/ptrace.h @@ -0,0 +1,40 @@ +/* $NetBSD: ptrace.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * Copyright (c) 1993 Christopher G. Demetriou + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Christopher G. Demetriou. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * i386-dependent ptrace definitions + */ +#define PT_STEP (PT_FIRSTMACH + 0) +#define PT_GETREGS (PT_FIRSTMACH + 1) +#define PT_SETREGS (PT_FIRSTMACH + 2) +#define PT_GETFPREGS (PT_FIRSTMACH + 3) +#define PT_SETFPREGS (PT_FIRSTMACH + 4) diff --git a/sys/arch/x86_64/include/reg.h b/sys/arch/x86_64/include/reg.h new file mode 100644 index 000000000000..a62e9cf2daf2 --- /dev/null +++ b/sys/arch/x86_64/include/reg.h @@ -0,0 +1,111 @@ +/* $NetBSD: reg.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)reg.h 5.5 (Berkeley) 1/18/91 + */ + +#ifndef _X86_64_REG_H_ +#define _X86_64_REG_H_ + +/* + * XXX + * The #defines aren't used in the kernel, but some user-level code still + * expects them. + */ + +/* When referenced during a trap/exception, registers are at these offsets */ + +#define tR15 0 +#define tR14 1 +#define tR13 2 +#define tR12 3 +#define tR11 4 +#define tR10 5 +#define tR9 6 +#define tR8 7 +#define tRDI 8 +#define tRSI 9 +#define tRBP 10 +#define tRBX 11 +#define tRDX 12 +#define tRCX 13 +#define tRAX 14 + +#define tRIP 17 +#define tCS 18 +#define tEFLAGS 19 +#define tRSP 20 +#define tSS 21 + +/* + * Registers accessible to ptrace(2) syscall for debugger + * The machine-dependent code for PT_{SET,GET}REGS needs to + * use whichver order, defined above, is correct, so that it + * is all invisible to the user. + */ +struct reg { + u_int64_t r_r15; + u_int64_t r_r14; + u_int64_t r_r13; + u_int64_t r_r12; + u_int64_t r_r11; + u_int64_t r_r10; + u_int64_t r_r9; + u_int64_t r_r8; + u_int64_t r_rdi; + u_int64_t r_rsi; + u_int64_t r_rbp; + u_int64_t r_rbx; + u_int64_t r_rdx; + u_int64_t r_rcx; + u_int64_t r_rax; + u_int64_t r_rsp; + u_int64_t r_rip; + u_int64_t r_eflags; + u_int64_t r_cs; + u_int64_t r_ss; + u_int64_t r_ds; + u_int64_t r_es; + u_int64_t r_fs; + u_int64_t r_gs; +}; + +struct fpreg { + struct fxsave64 fxstate; +}; + +#endif /* !_X86_64_REG_H_ */ diff --git a/sys/arch/x86_64/include/rnd.h b/sys/arch/x86_64/include/rnd.h new file mode 100644 index 000000000000..73db599dec83 --- /dev/null +++ b/sys/arch/x86_64/include/rnd.h @@ -0,0 +1,78 @@ +/* $NetBSD: rnd.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Bill Sommerfeld. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I386_RND_H_ +#define _I386_RND_H_ + +/* + * Machine-specific support for rnd(4) + */ + +#include +#include + +#include "opt_cputype.h" + +#ifdef _KERNEL + +static __inline int +cpu_hascounter(void) +{ +#if I586_CPU || I686_CPU + /* + * Note that: + * 1) Intel documentation is very specific that code *must* test + * the CPU feature flag, even if you "know" that a particular + * rev of the hardware supports it. + * 2) We know that the TSC is busted on some Cyrix CPU's.. + */ + return (cpu_feature & CPUID_TSC) != 0; +#else + return 0; +#endif +} + +static __inline u_int64_t +cpu_counter(void) +{ + return rdtsc(); +} + +#endif /* _KERNEL */ + +#endif /* !_I386_RND_H_ */ diff --git a/sys/arch/x86_64/include/segments.h b/sys/arch/x86_64/include/segments.h new file mode 100644 index 000000000000..46424c415c19 --- /dev/null +++ b/sys/arch/x86_64/include/segments.h @@ -0,0 +1,298 @@ +/* $NetBSD: segments.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/*- + * Copyright (c) 1995, 1997 + * Charles M. Hannum. All rights reserved. + * Copyright (c) 1989, 1990 William F. Jolitz + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)segments.h 7.1 (Berkeley) 5/9/91 + */ + +/* + * Adapted for NetBSD/x86_64 by fvdl@wasabisystems.com. + */ + +/* + * 386 Segmentation Data Structures and definitions + * William F. Jolitz (william@ernie.berkeley.edu) 6/20/1989 + */ + +#ifndef _X86_64_SEGMENTS_H_ +#define _X86_64_SEGMENTS_H_ + +/* + * Selectors + */ + +#define ISPL(s) ((s) & SEL_RPL) /* what is the priority level of a selector */ +#define SEL_KPL 0 /* kernel privilege level */ +#define SEL_UPL 3 /* user privilege level */ +#define SEL_RPL 3 /* requester's privilege level mask */ +#define ISLDT(s) ((s) & SEL_LDT) /* is it local or global */ +#define SEL_LDT 4 /* local descriptor table */ + +/* Dynamically allocated TSSs and LDTs start (byte offset) */ +#define SYSSEL_START (NGDT_MEM << 3) +#define DYNSEL_START (SYSSEL_START + (NGDT_SYS << 4)) + +/* + * These define the index not from the start of the GDT, but from + * the part of the GDT that they're allocated from. + * First NGDT_MEM entries are 8-byte descriptors for CS and DS. + * Next NGDT_SYS entries are 16-byte descriptors defining LDTs. + * + * The rest is 16-byte descriptors for TSS and LDT. + */ + +#define IDXSEL(s) (((s) >> 3) & 0x1fff) +#define IDXDYNSEL(s) ((((s) & ~SEL_RPL) - DYNSEL_START) >> 4) + +#define GSEL(s,r) (((s) << 3) | r) +#define GSYSSEL(s,r) ((((s) << 4) + SYSSEL_START) | r) +#define GDYNSEL(s,r) ((((s) << 4) + DYNSEL_START) | r | SEL_KPL) + +#define LSEL(s,r) ((s) | r | SEL_LDT) + +#define USERMODE(c, f) (ISPL(c) == SEL_UPL) +#define KERNELMODE(c, f) (ISPL(c) == SEL_KPL) + +#ifndef _LOCORE + +/* + * Memory and System segment descriptors + */ + +/* + * Below is used for TSS and LDT. + */ +struct sys_segment_descriptor { + u_int64_t sd_lolimit:16; /* segment extent (lsb) */ + u_int64_t sd_lobase:24; /* segment base address (lsb) */ + u_int64_t sd_type:5; /* segment type */ + u_int64_t sd_dpl:2; /* segment descriptor priority level */ + u_int64_t sd_p:1; /* segment descriptor present */ + u_int64_t sd_hilimit:4; /* segment extent (msb) */ + u_int64_t sd_xx1:3; /* avl, long and def32 (not used) */ + u_int64_t sd_gran:1; /* limit granularity (byte/page) */ + u_int64_t sd_hibase:40; /* segment base address (msb) */ + u_int64_t sd_xx2:8; /* reserved */ + u_int64_t sd_zero:5; /* must be zero */ + u_int64_t sd_xx3:19; /* reserved */ +} __attribute__((packed)); + +/* + * Below is used for cs, ds, etc. + */ +struct mem_segment_descriptor { + unsigned sd_lolimit:16; /* segment extent (lsb) */ + unsigned sd_lobase:24; /* segment base address (lsb) */ + unsigned sd_type:5; /* segment type */ + unsigned sd_dpl:2; /* segment descriptor priority level */ + unsigned sd_p:1; /* segment descriptor present */ + unsigned sd_hilimit:4; /* segment extent (msb) */ + unsigned sd_avl:1; /* available */ + unsigned sd_long:1; /* long mode */ + unsigned sd_def32:1; /* default 32 vs 16 bit size */ + unsigned sd_gran:1; /* limit granularity (byte/page) */ + unsigned sd_hibase:8; /* segment base address (msb) */ +} __attribute__((packed)); + +/* + * Gate descriptors (e.g. indirect descriptors) + */ +struct gate_descriptor { + u_int64_t gd_looffset:16; /* gate offset (lsb) */ + u_int64_t gd_selector:16; /* gate segment selector */ + u_int64_t gd_ist:3; /* IST select */ + u_int64_t gd_xx1:5; /* reserved */ + u_int64_t gd_type:5; /* segment type */ + u_int64_t gd_dpl:2; /* segment descriptor priority level */ + u_int64_t gd_p:1; /* segment descriptor present */ + u_int64_t gd_hioffset:48; /* gate offset (msb) */ + u_int64_t gd_xx2:8; /* reserved */ + u_int64_t gd_zero:5; /* must be zero */ + u_int64_t gd_xx3:19; /* reserved */ +} __attribute__((packed)); + +/* + * region descriptors, used to load gdt/idt tables before segments yet exist. + */ +struct region_descriptor { + u_int16_t rd_limit; /* segment extent */ + u_int64_t rd_base; /* base address */ +} __attribute__((packed)); + +#ifdef _KERNEL +extern struct sys_segment_descriptor *ldt; +extern struct gate_descriptor *idt; +extern char *gdtstore; + +void setgate __P((struct gate_descriptor *, void *, int, int, int)); +void setregion __P((struct region_descriptor *, void *, u_int16_t)); +void set_sys_segment __P((struct sys_segment_descriptor *, void *, size_t, + int, int, int)); +void set_mem_segment __P((struct mem_segment_descriptor *, void *, size_t, + int, int, int, int, int)); +#endif /* _KERNEL */ + +#endif /* !_LOCORE */ + +/* system segments and gate types */ +#define SDT_SYSNULL 0 /* system null */ +#define SDT_SYS286TSS 1 /* system 286 TSS available */ +#define SDT_SYSLDT 2 /* system local descriptor table */ +#define SDT_SYS286BSY 3 /* system 286 TSS busy */ +#define SDT_SYS286CGT 4 /* system 286 call gate */ +#define SDT_SYSTASKGT 5 /* system task gate */ +#define SDT_SYS286IGT 6 /* system 286 interrupt gate */ +#define SDT_SYS286TGT 7 /* system 286 trap gate */ +#define SDT_SYSNULL2 8 /* system null again */ +#define SDT_SYS386TSS 9 /* system 386 TSS available */ +#define SDT_SYSNULL3 10 /* system null again */ +#define SDT_SYS386BSY 11 /* system 386 TSS busy */ +#define SDT_SYS386CGT 12 /* system 386 call gate */ +#define SDT_SYSNULL4 13 /* system null again */ +#define SDT_SYS386IGT 14 /* system 386 interrupt gate */ +#define SDT_SYS386TGT 15 /* system 386 trap gate */ + +/* memory segment types */ +#define SDT_MEMRO 16 /* memory read only */ +#define SDT_MEMROA 17 /* memory read only accessed */ +#define SDT_MEMRW 18 /* memory read write */ +#define SDT_MEMRWA 19 /* memory read write accessed */ +#define SDT_MEMROD 20 /* memory read only expand dwn limit */ +#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */ +#define SDT_MEMRWD 22 /* memory read write expand dwn limit */ +#define SDT_MEMRWDA 23 /* memory read write expand dwn limit acessed */ +#define SDT_MEME 24 /* memory execute only */ +#define SDT_MEMEA 25 /* memory execute only accessed */ +#define SDT_MEMER 26 /* memory execute read */ +#define SDT_MEMERA 27 /* memory execute read accessed */ +#define SDT_MEMEC 28 /* memory execute only conforming */ +#define SDT_MEMEAC 29 /* memory execute only accessed conforming */ +#define SDT_MEMERC 30 /* memory execute read conforming */ +#define SDT_MEMERAC 31 /* memory execute read accessed conforming */ + +/* is memory segment descriptor pointer ? */ +#define ISMEMSDP(s) ((s->d_type) >= SDT_MEMRO && \ + (s->d_type) <= SDT_MEMERAC) + +/* is 286 gate descriptor pointer ? */ +#define IS286GDP(s) ((s->d_type) >= SDT_SYS286CGT && \ + (s->d_type) < SDT_SYS286TGT) + +/* is 386 gate descriptor pointer ? */ +#define IS386GDP(s) ((s->d_type) >= SDT_SYS386CGT && \ + (s->d_type) < SDT_SYS386TGT) + +/* is gate descriptor pointer ? */ +#define ISGDP(s) (IS286GDP(s) || IS386GDP(s)) + +/* is segment descriptor pointer ? */ +#define ISSDP(s) (ISMEMSDP(s) || !ISGDP(s)) + +/* is system segment descriptor pointer ? */ +#define ISSYSSDP(s) (!ISMEMSDP(s) && !ISGDP(s)) + +/* + * Segment Protection Exception code bits + */ +#define SEGEX_EXT 0x01 /* recursive or externally induced */ +#define SEGEX_IDT 0x02 /* interrupt descriptor table */ +#define SEGEX_TI 0x04 /* local descriptor table */ + +/* + * Entries in the Interrupt Descriptor Table (IDT) + */ +#define NIDT 256 +#define NRSVIDT 32 /* reserved entries for cpu exceptions */ + +/* + * Entries in the Global Descriptor Table (GDT) + * The code and data descriptors must come first. There + * are NGDT_MEM of them. + * + * Then come the predefined LDT (and possibly TSS) descriptors. + * There are NGDT_SYS of them. + */ +#define GNULL_SEL 0 /* Null descriptor */ +#define GCODE_SEL 1 /* Kernel code descriptor */ +#define GDATA_SEL 2 /* Kernel data descriptor */ +#define GUCODE_SEL 3 /* User code descriptor */ +#define GUDATA_SEL 4 /* User data descriptor */ +#define GAPM32CODE_SEL 5 +#define GAPM16CODE_SEL 6 +#define GAPMDATA_SEL 7 +#define GBIOSCODE_SEL 8 +#define GBIOSDATA_SEL 9 +#define GPNPBIOSCODE_SEL 10 +#define GPNPBIOSDATA_SEL 11 +#define GPNPBIOSSCRATCH_SEL 12 +#define GPNPBIOSTRAMP_SEL 13 +#define GUCODE32_SEL 14 +#define GUDATA32_SEL 15 +#define NGDT_MEM 16 + +#define GLDT_SEL 0 /* Default LDT descriptor */ +#define NGDT_SYS 1 + +#define GDT_SYS_OFFSET (NGDT_MEM << 3) + +#define GDT_ADDR_MEM(i) \ + ((struct mem_segment_descriptor *)(gdtstore + ((i) << 3))) +#define GDT_ADDR_SYS(i) \ + ((struct sys_segment_descriptor *)(gdtstore + (((i) << 4) + SYSSEL_START))) + +/* + * Byte offsets in the Local Descriptor Table (LDT) + */ +#define LSYS5CALLS_SEL 0 /* iBCS system call gate */ + +#if 0 +/* Sorry, no room. Luckily, it's not needed. */ +#define LSYS5SIGR_SEL 1 /* iBCS sigreturn gate */ +#endif + +#define LUCODE_SEL 16 /* User code descriptor */ +#define LUDATA_SEL 24 /* User data descriptor */ +#define LSOL26CALLS_SEL 32 /* Solaris 2.6 system call gate */ +#define LUCODE32_SEL 48 /* 32 bit user code descriptor */ +#define LUDATA32_SEL 56 /* 32 bit user data descriptor */ +#define LBSDICALLS_SEL 128 /* BSDI system call gate */ + +#define LDT_SIZE 144 + +#endif /* _X86_64_SEGMENTS_H_ */ diff --git a/sys/arch/x86_64/include/setjmp.h b/sys/arch/x86_64/include/setjmp.h new file mode 100644 index 000000000000..543eae15bce8 --- /dev/null +++ b/sys/arch/x86_64/include/setjmp.h @@ -0,0 +1,20 @@ +/* $NetBSD: setjmp.h,v 1.1 2001/06/19 00:20:12 fvdl Exp $ */ + +/* + * machine/setjmp.h: machine dependent setjmp-related information. + * These are only the callee-saved registers, code calling setjmp + * will expect the rest to be clobbered anyway. + */ + +#define _JB_RBX 0 +#define _JB_RBP 1 +#define _JB_R12 2 +#define _JB_R13 3 +#define _JB_R14 4 +#define _JB_R15 5 +#define _JB_RSP 6 +#define _JB_PC 7 +#define _JB_SIGFLAG 8 +#define _JB_SIGMASK 9 + +#define _JBLEN 11 /* size, in longs, of a jmp_buf */ diff --git a/sys/arch/x86_64/include/signal.h b/sys/arch/x86_64/include/signal.h new file mode 100644 index 000000000000..f8e2dbef13c5 --- /dev/null +++ b/sys/arch/x86_64/include/signal.h @@ -0,0 +1,96 @@ +/* $NetBSD: signal.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/* + * Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)signal.h 7.16 (Berkeley) 3/17/91 + */ + +#ifndef _X86_64_SIGNAL_H_ +#define _X86_64_SIGNAL_H_ + +typedef int sig_atomic_t; + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_C_SOURCE) && \ + !defined(_XOPEN_SOURCE) +/* + * Get the "code" values + */ +#include +#include + +/* + * Information pushed on stack when a signal is delivered. + * This is used by the kernel to restore state following + * execution of the signal handler. It is also made available + * to the handler to allow it to restore state properly if + * a non-standard exit is performed. + */ +struct sigcontext { + int sc_gs; + int sc_fs; /* unused */ + int sc_es; /* unused */ + int sc_ds; /* unused */ + u_int64_t sc_r8; + u_int64_t sc_r9; + u_int64_t sc_r10; + u_int64_t sc_r11; + u_int64_t sc_r12; + u_int64_t sc_r13; + u_int64_t sc_r14; + u_int64_t sc_r15; + u_int64_t sc_rdi; + u_int64_t sc_rsi; + u_int64_t sc_rbp; + u_int64_t sc_rbx; + u_int64_t sc_rdx; + u_int64_t sc_rcx; + u_int64_t sc_rax; + u_int64_t sc_trapno; + u_int64_t sc_err; + u_int64_t sc_rip; + int sc_cs; + int sc_pad0; + u_int64_t sc_eflags; + u_int64_t sc_rsp_onsig; + struct fxsave64 *sc_fpstate; /* XXXfvdl compat with Linux, but.. */ + int sc_ss; + int sc_pad1; + + sigset_t sc_mask; /* signal mask to restore (new style) */ + u_int64_t sc_onstack; /* sigstack state to restore */ + + u_int64_t sc_rsp; +}; + +#endif /* !_ANSI_SOURCE && !_POSIX_C_SOURCE && !_XOPEN_SOURCE */ +#endif /* !_X86_64_SIGNAL_H_ */ diff --git a/sys/arch/x86_64/include/specialreg.h b/sys/arch/x86_64/include/specialreg.h new file mode 100644 index 000000000000..15ac4355b538 --- /dev/null +++ b/sys/arch/x86_64/include/specialreg.h @@ -0,0 +1,24 @@ +/* $NetBSD: specialreg.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +#ifdef _KERNEL +#include +#else +#include +#endif + +/* + * Extended Feature Enable Register of the x86-64 + */ + +#define MSR_EFER 0xc0000080 + +#define EFER_SCE 0x00000001 /* SYSCALL extension */ +#define EFER_LME 0x00000100 /* Long Mode Active */ +#define EFER_LMA 0x00000400 /* Long Mode Enabled */ + +#define MSR_STAR 0xc0000081 /* 32 bit syscall gate addr */ +#define MSR_LSTAR 0xc0000082 /* 64 bit syscall gate addr */ +#define MSR_CSTAR 0xc0000083 /* compat syscall gate addr */ + +#define MSR_FSBASE 0xc0000100 /* 64bit offset for fs: */ +#define MSR_GSBASE 0xc0000101 /* 64bit offset for gs: */ diff --git a/sys/arch/x86_64/include/stdarg.h b/sys/arch/x86_64/include/stdarg.h new file mode 100644 index 000000000000..21a189e55432 --- /dev/null +++ b/sys/arch/x86_64/include/stdarg.h @@ -0,0 +1,65 @@ +/* $NetBSD: stdarg.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stdarg.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _X86_64_STDARG_H_ +#define _X86_64_STDARG_H_ + +#include +#include + +typedef _BSD_VA_LIST_ va_list; + +#ifdef __lint__ +#define __builtin_next_arg(t) ((t) ? 0 : 0) +#define __builtin_stdarg_start(a, l) ((a) = ((l) ? 0 : 0)) +#define __builtin_va_arg(a, t) ((a) ? 0 : 0) +#define __builtin_va_end /* nothing */ +#define __builtin_va_copy(d, s) ((d) = (s)) +#endif + +#define va_start(ap, last) __builtin_stdarg_start((ap), (last)) +#define va_arg __builtin_va_arg +#define va_end __builtin_va_end +#define __va_copy(dest, src) __builtin_va_copy((dest), (src)) + +#if !defined(_ANSI_SOURCE) && \ + (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) || \ + defined(_ISOC99_SOURCE) || (__STDC_VERSION__ - 0) >= 199901L) +#define va_copy(dest, src) __va_copy((dest), (src)) +#endif + +#endif /* !_X86_64_STDARG_H_ */ diff --git a/sys/arch/x86_64/include/sysarch.h b/sys/arch/x86_64/include/sysarch.h new file mode 100644 index 000000000000..84ac40c6e4a9 --- /dev/null +++ b/sys/arch/x86_64/include/sysarch.h @@ -0,0 +1,93 @@ +/* $NetBSD: sysarch.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +#if 0 +/* + * XXXfvdl todo. + */ +#ifndef _X86_64_SYSARCH_H_ +#define _X86_64_SYSARCH_H_ + +/* + * Architecture specific syscalls (x86_64) + */ +#define X86_64_GET_LDT 0 +#define X86_64_SET_LDT 1 +#define X86_64_IOPL 2 +#define X86_64_GET_IOPERM 3 +#define X86_64_SET_IOPERM 4 +#define X86_64_VM86 5 +#define X86_64_PMC_INFO 8 +#define X86_64_PMC_STARTSTOP 9 +#define X86_64_PMC_READ 10 + +struct x86_64_get_ldt_args { + int start; + union descriptor *desc; + int num; +}; + +struct x86_64_set_ldt_args { + int start; + union descriptor *desc; + int num; +}; + +struct x86_64_iopl_args { + int iopl; +}; + +struct x86_64_get_ioperm_args { + u_long *iomap; +}; + +struct x86_64_set_ioperm_args { + u_long *iomap; +}; + +struct x86_64_pmc_info_args { + int type; + int flags; +}; + +#define PMC_TYPE_NONE 0 +#define PMC_TYPE_I586 1 +#define PMC_TYPE_I686 2 + +#define PMC_INFO_HASTSC 0x01 + +#define PMC_NCOUNTERS 2 + +struct x86_64_pmc_startstop_args { + int counter; + u_int64_t val; + u_int8_t event; + u_int8_t unit; + u_int8_t compare; + u_int8_t flags; +}; + +#define PMC_SETUP_KERNEL 0x01 +#define PMC_SETUP_USER 0x02 +#define PMC_SETUP_EDGE 0x04 +#define PMC_SETUP_INV 0x08 + +struct x86_64_pmc_read_args { + int counter; + u_int64_t val; + u_int64_t time; +}; + +#ifndef _KERNEL +int x86_64_get_ldt __P((int, union descriptor *, int)); +int x86_64_set_ldt __P((int, union descriptor *, int)); +int x86_64_iopl __P((int)); +int x86_64_get_ioperm __P((u_long *)); +int x86_64_set_ioperm __P((u_long *)); +int x86_64_pmc_info __P((struct x86_64_pmc_info_args *)); +int x86_64_pmc_startstop __P((struct x86_64_pmc_startstop_args *)); +int x86_64_pmc_read __P((struct x86_64_pmc_read_args *)); +int sysarch __P((int, void *)); +#endif + +#endif /* !_X86_64_SYSARCH_H_ */ +#endif /* 0 */ diff --git a/sys/arch/x86_64/include/trap.h b/sys/arch/x86_64/include/trap.h new file mode 100644 index 000000000000..ed3798faa15a --- /dev/null +++ b/sys/arch/x86_64/include/trap.h @@ -0,0 +1,7 @@ +/* $NetBSD: trap.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +#ifdef _KERNEL +#include +#else +#include +#endif diff --git a/sys/arch/x86_64/include/tss.h b/sys/arch/x86_64/include/tss.h new file mode 100644 index 000000000000..621baf66be9d --- /dev/null +++ b/sys/arch/x86_64/include/tss.h @@ -0,0 +1,56 @@ +/* $NetBSD: tss.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * TSS structure. Since TSS hw switching is not supported in long + * mode, this is mainly there for the I/O permission map in + * normal processes. + */ + +struct x86_64_tss { + u_int32_t tss_reserved1; + u_int64_t tss_rsp0; + u_int64_t tss_rsp1; + u_int64_t tss_rsp3; + u_int32_t tss_reserved2; + u_int32_t tss_reserved3; + u_int64_t tss_ist[7]; + u_int32_t tss_reserved4; + u_int32_t tss_reserved5; + u_int16_t tss_reserved6; + u_int16_t tss_iobase; +} __attribute__((packed)); diff --git a/sys/arch/x86_64/include/types.h b/sys/arch/x86_64/include/types.h new file mode 100644 index 000000000000..a9f3c2c61c35 --- /dev/null +++ b/sys/arch/x86_64/include/types.h @@ -0,0 +1,70 @@ +/* $NetBSD: types.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)types.h 7.5 (Berkeley) 3/9/91 + */ + +#ifndef _MACHTYPES_H_ +#define _MACHTYPES_H_ + +#include +#include + +#if defined(_KERNEL) +typedef struct label_t { + long val[8]; +} label_t; +#endif + +/* NB: This should probably be if defined(_KERNEL) */ +#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE) +typedef unsigned long paddr_t; +typedef unsigned long psize_t; +typedef unsigned long vaddr_t; +typedef unsigned long vsize_t; +#endif + +typedef long int register_t; +typedef int register32_t; + +/* The x86-64 does not have strict alignment requirements. */ +#define __NO_STRICT_ALIGNMENT + +#define __HAVE_DEVICE_REGISTER +#define __HAVE_NWSCONS +#define __HAVE_CPU_COUNTER +#define __HAVE_SYSCALL_INTERN +#define __HAVE_MINIMAL_EMUL + +#endif /* _MACHTYPES_H_ */ diff --git a/sys/arch/x86_64/include/userret.h b/sys/arch/x86_64/include/userret.h new file mode 100644 index 000000000000..3617c05e4074 --- /dev/null +++ b/sys/arch/x86_64/include/userret.h @@ -0,0 +1,97 @@ +/* $NetBSD: userret.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/* + * XXXfvdl same as i386 counterpart, but should probably be independent. + */ + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +static __inline void userret __P((register struct proc *)); + +/* + * Define the code needed before returning to user mode, for + * trap and syscall. + */ +static __inline void +userret(p) + register struct proc *p; +{ + int sig; + + /* Take pending signals. */ + while ((sig = CURSIG(p)) != 0) + postsig(sig); + + curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; +} diff --git a/sys/arch/x86_64/include/varargs.h b/sys/arch/x86_64/include/varargs.h new file mode 100644 index 000000000000..4260db6b4ca7 --- /dev/null +++ b/sys/arch/x86_64/include/varargs.h @@ -0,0 +1,56 @@ +/* $NetBSD: varargs.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)varargs.h 8.2 (Berkeley) 3/22/94 + */ + +#ifndef _X86_64_VARARGS_H_ +#define _X86_64_VARARGS_H_ + +#include + +#define __va_ellipsis ... +#define __va_alist_t __builtin_va_alist_t +#define va_alist __builtin_va_alist +#define va_dcl __va_alist_t __builtin_va_alist; __va_ellipsis + +#undef va_start +#define va_start(ap) __builtin_varargs_start((ap)) + +#endif /* !_X86_64_VARARGS_H_ */ diff --git a/sys/arch/x86_64/include/vmparam.h b/sys/arch/x86_64/include/vmparam.h new file mode 100644 index 000000000000..6dc273183ab4 --- /dev/null +++ b/sys/arch/x86_64/include/vmparam.h @@ -0,0 +1,152 @@ +/* $NetBSD: vmparam.h,v 1.1 2001/06/19 00:20:13 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vmparam.h 5.9 (Berkeley) 5/12/91 + */ + +#ifndef _VMPARAM_H_ +#define _VMPARAM_H_ + +/* + * Machine dependent constants for 386. + */ + +/* + * Page size on the x86-64 is not variable in the traditional sense. + * We override the PAGE_* definitions to compile-time constants. + */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (PAGE_SIZE - 1) + +/* + * USRTEXT is the start of the user text/data space, while USRSTACK + * is the top (end) of the user stack. Immediately above the user stack + * resides the user structure, which is UPAGES long and contains the + * kernel stack. + * + * Immediately after the user structure is the page table map, and then + * kernal address space. + */ +#define USRTEXT NBPG +#define USRSTACK VM_MAXUSER_ADDRESS + +#define USRSTACK32 VM_MAXUSER_ADDRESS32 + +/* + * Virtual memory related constants, all in bytes + */ +#define MAXTSIZ (64*1024*1024) /* max text size */ +#ifndef DFLDSIZ +#define DFLDSIZ (128*1024*1024) /* initial data size limit */ +#endif +#ifndef MAXDSIZ +#define MAXDSIZ (1*1024*1024*1024) /* max data size */ +#endif +#ifndef DFLSSIZ +#define DFLSSIZ (2*1024*1024) /* initial stack size limit */ +#endif +#ifndef MAXSSIZ +#define MAXSSIZ (32*1024*1024) /* max stack size */ +#endif + +/* + * Size of shared memory map + */ +#ifndef SHMMAXPGS +#define SHMMAXPGS 2048 +#endif + +/* + * Size of User Raw I/O map + */ +#define USRIOSIZE 300 + +/* + * The time for a process to be blocked before being very swappable. + * This is a number of seconds which the system takes as being a non-trivial + * amount of real time. You probably shouldn't change this; + * it is used in subtle ways (fractions and multiples of it are, that is, like + * half of a ``long time'', almost a long time, etc.) + * It is related to human patience and other factors which don't really + * change over time. + */ +#define MAXSLP 20 + +/* + * Mach derived constants + */ + +/* user/kernel map constants */ +#define VM_MIN_ADDRESS 0 +#define VM_MAXUSER_ADDRESS 0x00007f7fffffc000 +#define VM_MAX_ADDRESS 0x00007fbfdfeff000 +#define VM_MIN_KERNEL_ADDRESS 0xffff800000000000 +#define VM_MAX_KERNEL_ADDRESS 0xffff800080000000 + +#define VM_MAXUSER_ADDRESS32 0xffffc000 + +/* + * XXXfvdl we have plenty of KVM now, remove this. + */ +#ifndef VM_MAX_KERNEL_BUF +#define VM_MAX_KERNEL_BUF \ + ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 1024 * 7 / 10 * 1024) +#endif + +/* virtual sizes (bytes) for various kernel submaps */ +#define VM_PHYS_SIZE (USRIOSIZE*NBPG) + +#define VM_PHYSSEG_MAX 5 /* 1 "hole" + 4 free lists */ +#define VM_PHYSSEG_STRAT VM_PSTRAT_BIGFIRST +#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */ + +#define VM_NFREELIST 2 +#define VM_FREELIST_DEFAULT 0 +#define VM_FREELIST_FIRST16 1 + +#define __HAVE_PMAP_PHYSSEG + +/* + * pmap specific data stored in the vm_physmem[] array + */ +struct pmap_physseg { + struct pv_head *pvhead; /* pv_head array */ + char *attrs; /* attrs array */ +}; + +#endif /* _VMPARAM_H_ */ diff --git a/sys/arch/x86_64/isa/clock.c b/sys/arch/x86_64/isa/clock.c new file mode 100644 index 000000000000..3bab30f3f7af --- /dev/null +++ b/sys/arch/x86_64/isa/clock.c @@ -0,0 +1,845 @@ +/* $NetBSD: clock.c,v 1.1 2001/06/19 00:20:32 fvdl Exp $ */ + +/*- + * Copyright (c) 1993, 1994 Charles M. Hannum. + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz and Don Ahn. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clock.c 7.2 (Berkeley) 5/12/91 + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appears in all +copies and that both the copyright notice and this permission notice +appear in supporting documentation, and that the name of Intel +not be used in advertising or publicity pertaining to distribution +of the software without specific, written prior permission. + +INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, +IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, +NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +/* #define CLOCKDEBUG */ +/* #define CLOCK_PARANOIA */ + +/* + * XXXfvdl this file can be shared with the i386 port. + * but, maybe the actual hardware will not have this chip. + */ + +/* + * Primitive clock interrupt routines. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "pcppi.h" +#if (NPCPPI > 0) +#include + +#include "mca.h" +#if NMCA > 0 +#include /* for MCA_system */ +#endif + +#ifdef CLOCKDEBUG +int clock_debug = 0; +#define DPRINTF(arg) if (clock_debug) printf arg +#else +#define DPRINTF(arg) +#endif + +int sysbeepmatch __P((struct device *, struct cfdata *, void *)); +void sysbeepattach __P((struct device *, struct device *, void *)); + +struct cfattach sysbeep_ca = { + sizeof(struct device), sysbeepmatch, sysbeepattach +}; + +static int ppi_attached; +static pcppi_tag_t ppicookie; +#endif /* PCPPI */ + +void spinwait __P((int)); +int clockintr __P((void *)); +int gettick __P((void)); +void sysbeep __P((int, int)); +void rtcinit __P((void)); +int rtcget __P((mc_todregs *)); +void rtcput __P((mc_todregs *)); +int bcdtobin __P((int)); +int bintobcd __P((int)); + +static void check_clock_bug __P((void)); +static inline int gettick_broken_latch __P((void)); + + +__inline u_int mc146818_read __P((void *, u_int)); +__inline void mc146818_write __P((void *, u_int, u_int)); + +__inline u_int +mc146818_read(sc, reg) + void *sc; /* XXX use it? */ + u_int reg; +{ + + outb(IO_RTC, reg); + return (inb(IO_RTC+1)); +} + +__inline void +mc146818_write(sc, reg, datum) + void *sc; /* XXX use it? */ + u_int reg, datum; +{ + + outb(IO_RTC, reg); + outb(IO_RTC+1, datum); +} + +static u_long rtclock_tval; +static int clock_broken_latch = 0; + +#ifdef CLOCK_PARANOIA +static int ticks[6]; +#endif + +/* + * i8254 latch check routine: + * National Geode (formerly Cyrix MediaGX) has a serious bug in + * its built-in i8254-compatible clock module. + * Set the variable 'clock_broken_latch' to indicate it. + * XXX check only cpu_id + */ +static void +check_clock_bug() +{ + extern int cpu_id; + + switch (cpu_id) { + case 0x440: /* Cyrix MediaGX */ + case 0x540: /* GXm */ + clock_broken_latch = 1; + break; + default: + clock_broken_latch = 0; + break; + } +} + +int +gettick_broken_latch() +{ + u_long ef; + int v1, v2, v3; + int w1, w2, w3; + + /* Don't want someone screwing with the counter + while we're here. */ + ef = read_eflags(); + disable_intr(); + + v1 = inb(TIMER_CNTR0); + v1 |= inb(TIMER_CNTR0) << 8; + v2 = inb(TIMER_CNTR0); + v2 |= inb(TIMER_CNTR0) << 8; + v3 = inb(TIMER_CNTR0); + v3 |= inb(TIMER_CNTR0) << 8; + + write_eflags(ef); + +#ifdef CLOCK_PARANOIA + if (clock_debug) { + ticks[0] = ticks[3]; + ticks[1] = ticks[4]; + ticks[2] = ticks[5]; + ticks[3] = v1; + ticks[4] = v2; + ticks[5] = v3; + } +#endif + + if (v1 >= v2 && v2 >= v3 && v1 - v3 < 0x200) + return (v2); + +#define _swap_val(a, b) do { \ + int c = a; \ + a = b; \ + b = c; \ +} while (0) + + /* + * sort v1 v2 v3 + */ + if (v1 < v2) + _swap_val(v1, v2); + if (v2 < v3) + _swap_val(v2, v3); + if (v1 < v2) + _swap_val(v1, v2); + + /* + * compute the middle value + */ + + if (v1 - v3 < 0x200) + return (v2); + + w1 = v2 - v3; + w2 = v3 - v1 + rtclock_tval; + w3 = v1 - v2; + if (w1 >= w2) { + if (w1 >= w3) + return (v1); + } else { + if (w2 >= w3) + return (v2); + } + return (v3); +} + +/* minimal initialization, enough for delay() */ +void +initrtclock() +{ + u_long tval; + + /* + * Compute timer_count, the count-down count the timer will be + * set to. Also, correctly round + * this by carrying an extra bit through the division. + */ + tval = (TIMER_FREQ * 2) / (u_long) hz; + tval = (tval / 2) + (tval & 0x1); + + /* initialize 8253 clock */ + outb(TIMER_MODE, TIMER_SEL0|TIMER_RATEGEN|TIMER_16BIT); + + /* Correct rounding will buy us a better precision in timekeeping */ + outb(IO_TIMER1, tval % 256); + outb(IO_TIMER1, tval / 256); + + rtclock_tval = tval; + + check_clock_bug(); +} + +/* + * microtime() makes use of the following globals. Note that isa_timer_tick + * may be redundant to the `tick' variable, but is kept here for stability. + * isa_timer_count is the countdown count for the timer. timer_msb_table[] + * and timer_lsb_table[] are used to compute the microsecond increment + * for time.tv_usec in the follow fashion: + * + * time.tv_usec += isa_timer_msb_table[cnt_msb] - isa_timer_lsb_table[cnt_lsb]; + */ +#define ISA_TIMER_MSB_TABLE_SIZE 128 + +u_long isa_timer_tick; /* the number of microseconds in a tick */ +u_short isa_timer_count; /* the countdown count for the timer */ +u_short isa_timer_msb_table[ISA_TIMER_MSB_TABLE_SIZE]; /* timer->usec MSB */ +u_short isa_timer_lsb_table[256]; /* timer->usec conversion for LSB */ + +void +startrtclock() +{ + int s; + u_long tval; + u_long t, msb, lsb, quotient, remainder; + + if (!rtclock_tval) + initrtclock(); + + /* + * Compute timer_tick from hz. We truncate this value (i.e. + * round down) to minimize the possibility of a backward clock + * step if hz is not a nice number. + */ + isa_timer_tick = 1000000 / (u_long) hz; + + /* + * We can't stand any number with an MSB larger than + * TIMER_MSB_TABLE_SIZE will accomodate. + */ + tval = rtclock_tval; + if ((tval / 256) >= ISA_TIMER_MSB_TABLE_SIZE + || TIMER_FREQ > (8*1024*1024)) { + panic("startrtclock: TIMER_FREQ/HZ unsupportable"); + } + isa_timer_count = (u_short) tval; + + /* + * Now compute the translation tables from timer ticks to + * microseconds. We go to some length to ensure all values + * are rounded-to-nearest (i.e. +-0.5 of the exact values) + * as this will ensure the computation + * + * isa_timer_msb_table[msb] - isa_timer_lsb_table[lsb] + * + * will produce a result which is +-1 usec away from the + * correctly rounded conversion (in fact, it'll be exact about + * 75% of the time, 1 too large 12.5% of the time, and 1 too + * small 12.5% of the time). + */ + for (s = 0; s < 64; s++) { + /* LSB table is easy, just divide and round */ + t = ((u_long) s * 1000000 * 2) / TIMER_FREQ; + isa_timer_lsb_table[s] = (u_short) ((t / 2) + (t & 0x1)); + + /* MSB table is zero unless the MSB is <= isa_timer_count */ + if (s < ISA_TIMER_MSB_TABLE_SIZE) { + msb = ((u_long) s) * 256; + if (msb > tval) { + isa_timer_msb_table[s] = 0; + } else { + /* + * Harder computation here, since multiplying + * the value by 1000000 can overflow a long. + * To avoid 64-bit computations we divide + * the high order byte and the low order + * byte of the numerator separately, adding + * the remainder of the first computation + * into the second. The constraint on + * TIMER_FREQ above should prevent overflow + * here. + * + * XXXfvdl fix this if this is the clock.c + * that we'll be using. + */ + msb = tval - msb; + lsb = msb % 256; + msb = (msb / 256) * 1000000; + quotient = msb / TIMER_FREQ; + remainder = msb % TIMER_FREQ; + t = ((remainder * 256 * 2) + + (lsb * 1000000 * 2)) / TIMER_FREQ; + isa_timer_msb_table[s] = (u_short)((t / 2) + + (t & 0x1) + (quotient * 256)); + } + } + } + + /* Check diagnostic status */ + if ((s = mc146818_read(NULL, NVRAM_DIAG)) != 0) { /* XXX softc */ + char bits[128]; + printf("RTC BIOS diagnostic error %s\n", + bitmask_snprintf(s, NVRAM_DIAG_BITS, bits, sizeof(bits))); + } +} + +int +clockintr(arg) + void *arg; +{ + struct clockframe *frame = arg; /* not strictly necessary */ + + hardclock(frame); + + return -1; +} + +int +gettick() +{ + u_long ef; + u_char lo, hi; + + if (clock_broken_latch) + return (gettick_broken_latch()); + + /* Don't want someone screwing with the counter while we're here. */ + ef = read_eflags(); + disable_intr(); + /* Select counter 0 and latch it. */ + outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); + lo = inb(TIMER_CNTR0); + hi = inb(TIMER_CNTR0); + write_eflags(ef); + return ((hi << 8) | lo); +} + +/* + * Wait approximately `n' microseconds. + * Relies on timer 1 counting down from (TIMER_FREQ / hz) at TIMER_FREQ Hz. + * Note: timer had better have been programmed before this is first used! + * (Note that we use `rate generator' mode, which counts at 1:1; `square + * wave' mode counts at 2:1). + * Don't rely on this being particularly accurate. + */ +void +delay(n) + int n; +{ + int tick, otick; + static const int delaytab[26] = { + 0, 2, 3, 4, 5, 6, 7, 9, 10, 11, + 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, + 24, 25, 27, 28, 29, 30, + }; + + /* allow DELAY() to be used before startrtclock() */ + if (!rtclock_tval) + initrtclock(); + + /* + * Read the counter first, so that the rest of the setup overhead is + * counted. + */ + otick = gettick(); + + if (n <= 25) + n = delaytab[n]; + else { +#ifdef __GNUC__ + /* + * Calculate ((n * TIMER_FREQ) / 1e6) using explicit assembler + * code so we can take advantage of the intermediate 64-bit + * quantity to prevent loss of significance. + */ + int m; + __asm __volatile("mul %3" + : "=a" (n), "=d" (m) + : "0" (n), "r" (TIMER_FREQ)); + __asm __volatile("div %4" + : "=a" (n), "=d" (m) + : "0" (n), "1" (m), "r" (1000000)); +#else + /* + * Calculate ((n * TIMER_FREQ) / 1e6) without using floating + * point and without any avoidable overflows. + */ + int sec = n / 1000000, + usec = n % 1000000; + n = sec * TIMER_FREQ + + usec * (TIMER_FREQ / 1000000) + + usec * ((TIMER_FREQ % 1000000) / 1000) / 1000 + + usec * (TIMER_FREQ % 1000) / 1000000; +#endif + } + + while (n > 0) { +#ifdef CLOCK_PARANOIA + int delta; + tick = gettick(); + if (tick > otick) + delta = rtclock_tval - (tick - otick); + else + delta = otick - tick; + if (delta < 0 || delta >= rtclock_tval / 2) { + DPRINTF(("delay: ignore ticks %.4x-%.4x", + otick, tick)); + if (clock_broken_latch) { + DPRINTF((" (%.4x %.4x %.4x %.4x %.4x %.4x)\n", + ticks[0], ticks[1], ticks[2], + ticks[3], ticks[4], ticks[5])); + } else { + DPRINTF(("\n")); + } + } else + n -= delta; +#else + tick = gettick(); + if (tick > otick) + n -= rtclock_tval - (tick - otick); + else + n -= otick - tick; +#endif + otick = tick; + } +} + +#if (NPCPPI > 0) +int +sysbeepmatch(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + return (!ppi_attached); +} + +void +sysbeepattach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ + printf("\n"); + + ppicookie = ((struct pcppi_attach_args *)aux)->pa_cookie; + ppi_attached = 1; +} +#endif + +void +sysbeep(pitch, period) + int pitch, period; +{ +#if (NPCPPI > 0) + if (ppi_attached) + pcppi_bell(ppicookie, pitch, period, 0); +#endif +} + +void +cpu_initclocks() +{ + + /* + * XXX If you're doing strange things with multiple clocks, you might + * want to keep track of clock handlers. + */ + (void)isa_intr_establish(NULL, 0, IST_PULSE, IPL_CLOCK, clockintr, 0); +} + +void +rtcinit() +{ + static int first_rtcopen_ever = 1; + + if (!first_rtcopen_ever) + return; + first_rtcopen_ever = 0; + + mc146818_write(NULL, MC_REGA, /* XXX softc */ + MC_BASE_32_KHz | MC_RATE_1024_Hz); + mc146818_write(NULL, MC_REGB, MC_REGB_24HR); /* XXX softc */ +} + +int +rtcget(regs) + mc_todregs *regs; +{ + + rtcinit(); + if ((mc146818_read(NULL, MC_REGD) & MC_REGD_VRT) == 0) /* XXX softc */ + return (-1); + MC146818_GETTOD(NULL, regs); /* XXX softc */ + return (0); +} + +void +rtcput(regs) + mc_todregs *regs; +{ + + rtcinit(); + MC146818_PUTTOD(NULL, regs); /* XXX softc */ +} + +int +bcdtobin(n) + int n; +{ + + return (((n >> 4) & 0x0f) * 10 + (n & 0x0f)); +} + +int +bintobcd(n) + int n; +{ + + return ((u_char)(((n / 10) << 4) & 0xf0) | ((n % 10) & 0x0f)); +} + +static int timeset; + +/* + * check whether the CMOS layout is "standard"-like (ie, not PS/2-like), + * to be called at splclock() + */ +static int cmoscheck __P((void)); +static int +cmoscheck() +{ + int i; + unsigned short cksum = 0; + + for (i = 0x10; i <= 0x2d; i++) + cksum += mc146818_read(NULL, i); /* XXX softc */ + + return (cksum == (mc146818_read(NULL, 0x2e) << 8) + + mc146818_read(NULL, 0x2f)); +} + +/* + * patchable to control century byte handling: + * 1: always update + * -1: never touch + * 0: try to figure out itself + */ +int rtc_update_century = 0; + +/* + * Expand a two-digit year as read from the clock chip + * into full width. + * Being here, deal with the CMOS century byte. + */ +static int clock_expandyear __P((int)); +static int +clock_expandyear(clockyear) + int clockyear; +{ + int s, clockcentury, cmoscentury; + + clockcentury = (clockyear < 70) ? 20 : 19; + clockyear += 100 * clockcentury; + + if (rtc_update_century < 0) + return (clockyear); + + s = splclock(); + if (cmoscheck()) + cmoscentury = mc146818_read(NULL, NVRAM_CENTURY); + else + cmoscentury = 0; + splx(s); + if (!cmoscentury) { +#ifdef DIAGNOSTIC + printf("clock: unknown CMOS layout\n"); +#endif + return (clockyear); + } + cmoscentury = bcdtobin(cmoscentury); + + if (cmoscentury != clockcentury) { + /* XXX note: saying "century is 20" might confuse the naive. */ + printf("WARNING: NVRAM century is %d but RTC year is %d\n", + cmoscentury, clockyear); + + /* Kludge to roll over century. */ + if ((rtc_update_century > 0) || + ((cmoscentury == 19) && (clockcentury == 20) && + (clockyear == 2000))) { + printf("WARNING: Setting NVRAM century to %d\n", + clockcentury); + s = splclock(); + mc146818_write(NULL, NVRAM_CENTURY, + bintobcd(clockcentury)); + splx(s); + } + } else if (cmoscentury == 19 && rtc_update_century == 0) + rtc_update_century = 1; /* will update later in resettodr() */ + + return (clockyear); +} + +/* + * Initialize the time of day register, based on the time base which is, e.g. + * from a filesystem. + */ +void +inittodr(base) + time_t base; +{ + mc_todregs rtclk; + struct clock_ymdhms dt; + int s; + + /* + * We mostly ignore the suggested time and go for the RTC clock time + * stored in the CMOS RAM. If the time can't be obtained from the + * CMOS, or if the time obtained from the CMOS is 5 or more years + * less than the suggested time, we used the suggested time. (In + * the latter case, it's likely that the CMOS battery has died.) + */ + + if (base < 25*SECYR) { /* if before 1995, something's odd... */ + printf("WARNING: preposterous time in file system\n"); + /* read the system clock anyway */ + base = 27*SECYR + 186*SECDAY + SECDAY/2; + } + + s = splclock(); + if (rtcget(&rtclk)) { + splx(s); + printf("WARNING: invalid time in clock chip\n"); + goto fstime; + } + splx(s); +#ifdef DEBUG_CLOCK + printf("readclock: %x/%x/%x %x:%x:%x\n", rtclk[MC_YEAR], + rtclk[MC_MONTH], rtclk[MC_DOM], rtclk[MC_HOUR], rtclk[MC_MIN], + rtclk[MC_SEC]); +#endif + + dt.dt_sec = bcdtobin(rtclk[MC_SEC]); + dt.dt_min = bcdtobin(rtclk[MC_MIN]); + dt.dt_hour = bcdtobin(rtclk[MC_HOUR]); + dt.dt_day = bcdtobin(rtclk[MC_DOM]); + dt.dt_mon = bcdtobin(rtclk[MC_MONTH]); + dt.dt_year = clock_expandyear(bcdtobin(rtclk[MC_YEAR])); + + /* + * If time_t is 32 bits, then the "End of Time" is + * Mon Jan 18 22:14:07 2038 (US/Eastern) + * This code copes with RTC's past the end of time if time_t + * is an int32 or less. Needed because sometimes RTCs screw + * up or are badly set, and that would cause the time to go + * negative in the calculation below, which causes Very Bad + * Mojo. This at least lets the user boot and fix the problem. + * Note the code is self eliminating once time_t goes to 64 bits. + */ + if (sizeof(time_t) <= sizeof(int32_t)) { + if (dt.dt_year >= 2038) { + printf("WARNING: RTC time at or beyond 2038.\n"); + dt.dt_year = 2037; + printf("WARNING: year set back to 2037.\n"); + printf("WARNING: CHECK AND RESET THE DATE!\n"); + } + } + + time.tv_sec = clock_ymdhms_to_secs(&dt) + rtc_offset * 60; +#ifdef DEBUG_CLOCK + printf("readclock: %ld (%ld)\n", time.tv_sec, base); +#endif + + if (base < time.tv_sec - 5*SECYR) + printf("WARNING: file system time much less than clock time\n"); + else if (base > time.tv_sec + 5*SECYR) { + printf("WARNING: clock time much less than file system time\n"); + printf("WARNING: using file system time\n"); + goto fstime; + } + + timeset = 1; + return; + +fstime: + timeset = 1; + time.tv_sec = base; + printf("WARNING: CHECK AND RESET THE DATE!\n"); +} + +/* + * Reset the clock. + */ +void +resettodr() +{ + mc_todregs rtclk; + struct clock_ymdhms dt; + int century; + int s; + + /* + * We might have been called by boot() due to a crash early + * on. Don't reset the clock chip in this case. + */ + if (!timeset) + return; + + s = splclock(); + if (rtcget(&rtclk)) + memset(&rtclk, 0, sizeof(rtclk)); + splx(s); + + clock_secs_to_ymdhms(time.tv_sec - rtc_offset * 60, &dt); + + rtclk[MC_SEC] = bintobcd(dt.dt_sec); + rtclk[MC_MIN] = bintobcd(dt.dt_min); + rtclk[MC_HOUR] = bintobcd(dt.dt_hour); + rtclk[MC_DOW] = dt.dt_wday; + rtclk[MC_YEAR] = bintobcd(dt.dt_year % 100); + rtclk[MC_MONTH] = bintobcd(dt.dt_mon); + rtclk[MC_DOM] = bintobcd(dt.dt_day); + +#ifdef DEBUG_CLOCK + printf("setclock: %x/%x/%x %x:%x:%x\n", rtclk[MC_YEAR], rtclk[MC_MONTH], + rtclk[MC_DOM], rtclk[MC_HOUR], rtclk[MC_MIN], rtclk[MC_SEC]); +#endif + s = splclock(); + rtcput(&rtclk); + if (rtc_update_century > 0) { + century = bintobcd(dt.dt_year / 100); + mc146818_write(NULL, NVRAM_CENTURY, century); /* XXX softc */ + } + splx(s); +} + +void +setstatclockrate(arg) + int arg; +{ +} diff --git a/sys/arch/x86_64/isa/isa_machdep.c b/sys/arch/x86_64/isa/isa_machdep.c new file mode 100644 index 000000000000..360648e6bb8c --- /dev/null +++ b/sys/arch/x86_64/isa/isa_machdep.c @@ -0,0 +1,1184 @@ +/* $NetBSD: isa_machdep.c,v 1.1 2001/06/19 00:20:32 fvdl Exp $ */ + +#define ISA_DMA_STATS + +/*- + * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace + * Simulation Facility, NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)isa.c 7.2 (Berkeley) 5/13/91 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define _X86_64_BUS_DMA_PRIVATE +#include + +#include +#include + +#include +#include +#include + +#include + +/* + * ISA can only DMA to 0-16M. + */ +#define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024) + +extern paddr_t avail_end; + +#define IDTVEC(name) __CONCAT(X,name) +typedef void (vector) __P((void)); +extern vector *IDTVEC(intr)[]; +void isa_strayintr __P((int)); +void intr_calculatemasks __P((void)); +static int fakeintr __P((void *)); + +/* + * Cookie used by ISA dma. A pointer to one of these it stashed in + * the DMA map. + */ +struct x86_64_isa_dma_cookie { + int id_flags; /* flags; see below */ + + /* + * Information about the original buffer used during + * DMA map syncs. Note that origibuflen is only used + * for ID_BUFTYPE_LINEAR. + */ + void *id_origbuf; /* pointer to orig buffer if + bouncing */ + bus_size_t id_origbuflen; /* ...and size */ + int id_buftype; /* type of buffer */ + + void *id_bouncebuf; /* pointer to the bounce buffer */ + bus_size_t id_bouncebuflen; /* ...and size */ + int id_nbouncesegs; /* number of valid bounce segs */ + bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer + physical memory segments */ +}; + +/* id_flags */ +#define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */ +#define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */ +#define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */ + +/* id_buftype */ +#define ID_BUFTYPE_INVALID 0 +#define ID_BUFTYPE_LINEAR 1 +#define ID_BUFTYPE_MBUF 2 +#define ID_BUFTYPE_UIO 3 +#define ID_BUFTYPE_RAW 4 + +int _isa_bus_dmamap_create __P((bus_dma_tag_t, bus_size_t, int, + bus_size_t, bus_size_t, int, bus_dmamap_t *)); +void _isa_bus_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t)); +int _isa_bus_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int)); +int _isa_bus_dmamap_load_mbuf __P((bus_dma_tag_t, bus_dmamap_t, + struct mbuf *, int)); +int _isa_bus_dmamap_load_uio __P((bus_dma_tag_t, bus_dmamap_t, + struct uio *, int)); +int _isa_bus_dmamap_load_raw __P((bus_dma_tag_t, bus_dmamap_t, + bus_dma_segment_t *, int, bus_size_t, int)); +void _isa_bus_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t)); +void _isa_bus_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, + bus_addr_t, bus_size_t, int)); + +int _isa_bus_dmamem_alloc __P((bus_dma_tag_t, bus_size_t, bus_size_t, + bus_size_t, bus_dma_segment_t *, int, int *, int)); + +int _isa_dma_alloc_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t, + bus_size_t, int)); +void _isa_dma_free_bouncebuf __P((bus_dma_tag_t, bus_dmamap_t)); + +/* + * Entry points for ISA DMA. These are mostly wrappers around + * the generic functions that understand how to deal with bounce + * buffers, if necessary. + */ +struct x86_64_bus_dma_tag isa_bus_dma_tag = { + ISA_DMA_BOUNCE_THRESHOLD, + _isa_bus_dmamap_create, + _isa_bus_dmamap_destroy, + _isa_bus_dmamap_load, + _isa_bus_dmamap_load_mbuf, + _isa_bus_dmamap_load_uio, + _isa_bus_dmamap_load_raw, + _isa_bus_dmamap_unload, + _isa_bus_dmamap_sync, + _isa_bus_dmamem_alloc, + _bus_dmamem_free, + _bus_dmamem_map, + _bus_dmamem_unmap, + _bus_dmamem_mmap, +}; + +/* + * Fill in default interrupt table (in case of spuruious interrupt + * during configuration of kernel, setup interrupt control unit + */ +void +isa_defaultirq() +{ + int i; + + /* icu vectors */ + for (i = 0; i < ICU_LEN; i++) + setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0, + SDT_SYS386IGT, SEL_KPL); + + /* initialize 8259's */ + outb(IO_ICU1, 0x11); /* reset; program device, four bytes */ + + outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */ + outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */ +#ifdef AUTO_EOI_1 + outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */ +#else + outb(IO_ICU1+1, 1); /* 8086 mode */ +#endif + outb(IO_ICU1+1, 0xff); /* leave interrupts masked */ + outb(IO_ICU1, 0x68); /* special mask mode (if available) */ + outb(IO_ICU1, 0x0a); /* Read IRR by default. */ +#ifdef REORDER_IRQ + outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */ +#endif + + outb(IO_ICU2, 0x11); /* reset; program device, four bytes */ + + outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */ + outb(IO_ICU2+1, IRQ_SLAVE); +#ifdef AUTO_EOI_2 + outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */ +#else + outb(IO_ICU2+1, 1); /* 8086 mode */ +#endif + outb(IO_ICU2+1, 0xff); /* leave interrupts masked */ + outb(IO_ICU2, 0x68); /* special mask mode (if available) */ + outb(IO_ICU2, 0x0a); /* Read IRR by default. */ +} + +/* + * Handle a NMI, possibly a machine check. + * return true to panic system, false to ignore. + */ +int +isa_nmi() +{ + log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); + return(0); +} + +/* + * Caught a stray interrupt, notify + */ +void +isa_strayintr(irq) + int irq; +{ + static u_long strays; + + /* + * Stray interrupts on irq 7 occur when an interrupt line is raised + * and then lowered before the CPU acknowledges it. This generally + * means either the device is screwed or something is cli'ing too + * long and it's timing out. + */ + if (++strays <= 5) + log(LOG_ERR, "stray interrupt %d%s\n", irq, + strays >= 5 ? "; stopped logging" : ""); +} + +int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN]; +struct intrhand *intrhand[ICU_LEN]; + +/* + * Recalculate the interrupt masks from scratch. + * We could code special registry and deregistry versions of this function that + * would be faster, but the code would be nastier, and we don't expect this to + * happen very much anyway. + */ +void +intr_calculatemasks() +{ + int irq, level, unusedirqs; + struct intrhand *q; + + /* First, figure out which levels each IRQ uses. */ + unusedirqs = 0xffff; + for (irq = 0; irq < ICU_LEN; irq++) { + int levels = 0; + for (q = intrhand[irq]; q; q = q->ih_next) + levels |= 1 << q->ih_level; + intrlevel[irq] = levels; + if (levels) + unusedirqs &= ~(1 << irq); + } + + /* Then figure out which IRQs use each level. */ + for (level = 0; level < NIPL; level++) { + int irqs = 0; + for (irq = 0; irq < ICU_LEN; irq++) + if (intrlevel[irq] & (1 << level)) + irqs |= 1 << irq; + imask[level] = irqs | unusedirqs; + } + + /* + * Initialize soft interrupt masks to block themselves. + */ + imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK; + imask[IPL_SOFTNET] = 1 << SIR_NET; + imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL; + + /* + * IPL_NONE is used for hardware interrupts that are never blocked, + * and do not block anything else. + */ + imask[IPL_NONE] = 0; + + /* + * Enforce a hierarchy that gives slow devices a better chance at not + * dropping data. + */ + imask[IPL_SOFTCLOCK] |= imask[IPL_NONE]; + imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK]; + imask[IPL_BIO] |= imask[IPL_SOFTNET]; + imask[IPL_NET] |= imask[IPL_BIO]; + imask[IPL_SOFTSERIAL] |= imask[IPL_NET]; + imask[IPL_TTY] |= imask[IPL_SOFTSERIAL]; + + /* + * There are tty, network and disk drivers that use free() at interrupt + * time, so imp > (tty | net | bio). + */ + imask[IPL_IMP] |= imask[IPL_TTY]; + + imask[IPL_AUDIO] |= imask[IPL_IMP]; + + /* + * Since run queues may be manipulated by both the statclock and tty, + * network, and disk drivers, clock > imp. + */ + imask[IPL_CLOCK] |= imask[IPL_AUDIO]; + + /* + * IPL_HIGH must block everything that can manipulate a run queue. + */ + imask[IPL_HIGH] |= imask[IPL_CLOCK]; + + /* + * We need serial drivers to run at the absolute highest priority to + * avoid overruns, so serial > high. + */ + imask[IPL_SERIAL] |= imask[IPL_HIGH]; + + /* And eventually calculate the complete masks. */ + for (irq = 0; irq < ICU_LEN; irq++) { + int irqs = 1 << irq; + for (q = intrhand[irq]; q; q = q->ih_next) + irqs |= imask[q->ih_level]; + intrmask[irq] = irqs | (1 << IPL_TAGINTR); + } + + /* Lastly, determine which IRQs are actually in use. */ + { + int irqs = 0; + for (irq = 0; irq < ICU_LEN; irq++) + if (intrhand[irq]) + irqs |= 1 << irq; + if (irqs >= 0x100) /* any IRQs >= 8 in use */ + irqs |= 1 << IRQ_SLAVE; + imen = ~irqs; + } +} + +static int +fakeintr(arg) + void *arg; +{ + + return 0; +} + +#define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2) + +int +isa_intr_alloc(ic, mask, type, irq) + isa_chipset_tag_t ic; + int mask; + int type; + int *irq; +{ + int i, tmp, bestirq, count; + struct intrhand **p, *q; + + if (type == IST_NONE) + panic("intr_alloc: bogus type"); + + bestirq = -1; + count = -1; + + /* some interrupts should never be dynamically allocated */ + mask &= 0xdef8; + + /* + * XXX some interrupts will be used later (6 for fdc, 12 for pms). + * the right answer is to do "breadth-first" searching of devices. + */ + mask &= 0xefbf; + + for (i = 0; i < ICU_LEN; i++) { + if (LEGAL_IRQ(i) == 0 || (mask & (1<ih_next, tmp++) + ; + if ((bestirq == -1) || (count > tmp)) { + bestirq = i; + count = tmp; + } + break; + + case IST_PULSE: + /* this just isn't shareable */ + continue; + } + } + + if (bestirq == -1) + return (1); + + *irq = bestirq; + + return (0); +} + +const struct evcnt * +isa_intr_evcnt(isa_chipset_tag_t ic, int irq) +{ + + /* XXX for now, no evcnt parent reported */ + return NULL; +} + +/* + * Set up an interrupt handler to start being called. + * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM. + */ +void * +isa_intr_establish(ic, irq, type, level, ih_fun, ih_arg) + isa_chipset_tag_t ic; + int irq; + int type; + int level; + int (*ih_fun) __P((void *)); + void *ih_arg; +{ + struct intrhand **p, *q, *ih; + static struct intrhand fakehand = {fakeintr}; + + /* no point in sleeping unless someone can free memory. */ + ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); + if (ih == NULL) + panic("isa_intr_establish: can't malloc handler info"); + + if (!LEGAL_IRQ(irq) || type == IST_NONE) + panic("intr_establish: bogus irq or type"); + + switch (intrtype[irq]) { + case IST_NONE: + intrtype[irq] = type; + break; + case IST_EDGE: + case IST_LEVEL: + if (type == intrtype[irq]) + break; + case IST_PULSE: + if (type != IST_NONE) { + /* + * We can't share interrupts in this case. + */ +#ifdef DEBUG + printf("intr_establish: irq %d can't share %s " + "with %s\n", irq, + isa_intr_typename(intrtype[irq]), + isa_intr_typename(type)); +#endif + return (NULL); + } + break; + } + + /* + * Figure out where to put the handler. + * This is O(N^2), but we want to preserve the order, and N is + * generally small. + */ + for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) + ; + + /* + * Actually install a fake handler momentarily, since we might be doing + * this with interrupts enabled and don't want the real routine called + * until masking is set up. + */ + fakehand.ih_level = level; + *p = &fakehand; + + intr_calculatemasks(); + + /* + * Poke the real handler in now. + */ + ih->ih_fun = ih_fun; + ih->ih_arg = ih_arg; + ih->ih_count = 0; + ih->ih_next = NULL; + ih->ih_level = level; + ih->ih_irq = irq; + *p = ih; + + SET_ICUS(); + return (ih); +} + +/* + * Deregister an interrupt handler. + */ +void +isa_intr_disestablish(ic, arg) + isa_chipset_tag_t ic; + void *arg; +{ + struct intrhand *ih = arg; + int irq = ih->ih_irq; + struct intrhand **p, *q; + + if (!LEGAL_IRQ(irq)) + panic("intr_disestablish: bogus irq"); + + /* + * Remove the handler from the chain. + * This is O(n^2), too. + */ + for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next) + ; + if (q) + *p = q->ih_next; + else + panic("intr_disestablish: handler not registered"); + free(ih, M_DEVBUF); + + intr_calculatemasks(); + SET_ICUS(); + + if (intrhand[irq] == NULL) + intrtype[irq] = IST_NONE; +} + +void +isa_attach_hook(parent, self, iba) + struct device *parent, *self; + struct isabus_attach_args *iba; +{ + extern struct x86_64_isa_chipset x86_64_isa_chipset; + extern int isa_has_been_seen; + + /* + * Notify others that might need to know that the ISA bus + * has now been attached. + */ + if (isa_has_been_seen) + panic("isaattach: ISA bus already seen!"); + isa_has_been_seen = 1; + + /* + * Since we can only have one ISA bus, we just use a single + * statically allocated ISA chipset structure. Pass it up + * now. + */ + iba->iba_ic = &x86_64_isa_chipset; +} + +int +isa_mem_alloc(t, size, align, boundary, flags, addrp, bshp) + bus_space_tag_t t; + bus_size_t size, align; + bus_addr_t boundary; + int flags; + bus_addr_t *addrp; + bus_space_handle_t *bshp; +{ + + /* + * Allocate physical address space in the ISA hole. + */ + return (bus_space_alloc(t, IOM_BEGIN, IOM_END - 1, size, align, + boundary, flags, addrp, bshp)); +} + +void +isa_mem_free(t, bsh, size) + bus_space_tag_t t; + bus_space_handle_t bsh; + bus_size_t size; +{ + + bus_space_free(t, bsh, size); +} + +/********************************************************************** + * bus.h dma interface entry points + **********************************************************************/ + +#ifdef ISA_DMA_STATS +#define STAT_INCR(v) (v)++ +#define STAT_DECR(v) do { \ + if ((v) == 0) \ + printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ + else \ + (v)--; \ + } while (0) +u_long isa_dma_stats_loads; +u_long isa_dma_stats_bounces; +u_long isa_dma_stats_nbouncebufs; +#else +#define STAT_INCR(v) +#define STAT_DECR(v) +#endif + +/* + * Create an ISA DMA map. + */ +int +_isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) + bus_dma_tag_t t; + bus_size_t size; + int nsegments; + bus_size_t maxsegsz; + bus_size_t boundary; + int flags; + bus_dmamap_t *dmamp; +{ + struct x86_64_isa_dma_cookie *cookie; + bus_dmamap_t map; + int error, cookieflags; + void *cookiestore; + size_t cookiesize; + + /* Call common function to create the basic map. */ + error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, + flags, dmamp); + if (error) + return (error); + + map = *dmamp; + map->_dm_cookie = NULL; + + cookiesize = sizeof(struct x86_64_isa_dma_cookie); + + /* + * ISA only has 24-bits of address space. This means + * we can't DMA to pages over 16M. In order to DMA to + * arbitrary buffers, we use "bounce buffers" - pages + * in memory below the 16M boundary. On DMA reads, + * DMA happens to the bounce buffers, and is copied into + * the caller's buffer. On writes, data is copied into + * but bounce buffer, and the DMA happens from those + * pages. To software using the DMA mapping interface, + * this looks simply like a data cache. + * + * If we have more than 16M of RAM in the system, we may + * need bounce buffers. We check and remember that here. + * + * There are exceptions, however. VLB devices can do + * 32-bit DMA, and indicate that here. + * + * ...or, there is an opposite case. The most segments + * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If + * the caller can't handle that many segments (e.g. the + * ISA DMA controller), we may have to bounce it as well. + */ + if (avail_end <= t->_bounce_thresh || + (flags & ISABUS_DMA_32BIT) != 0) { + /* Bouncing not necessary due to memory size. */ + map->_dm_bounce_thresh = 0; + } + cookieflags = 0; + if (map->_dm_bounce_thresh != 0 || + ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { + cookieflags |= ID_MIGHT_NEED_BOUNCE; + cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt); + } + + /* + * Allocate our cookie. + */ + if ((cookiestore = malloc(cookiesize, M_DMAMAP, + (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) { + error = ENOMEM; + goto out; + } + memset(cookiestore, 0, cookiesize); + cookie = (struct x86_64_isa_dma_cookie *)cookiestore; + cookie->id_flags = cookieflags; + map->_dm_cookie = cookie; + + if (cookieflags & ID_MIGHT_NEED_BOUNCE) { + /* + * Allocate the bounce pages now if the caller + * wishes us to do so. + */ + if ((flags & BUS_DMA_ALLOCNOW) == 0) + goto out; + + error = _isa_dma_alloc_bouncebuf(t, map, size, flags); + } + + out: + if (error) { + if (map->_dm_cookie != NULL) + free(map->_dm_cookie, M_DMAMAP); + _bus_dmamap_destroy(t, map); + } + return (error); +} + +/* + * Destroy an ISA DMA map. + */ +void +_isa_bus_dmamap_destroy(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + + /* + * Free any bounce pages this map might hold. + */ + if (cookie->id_flags & ID_HAS_BOUNCE) + _isa_dma_free_bouncebuf(t, map); + + free(cookie, M_DMAMAP); + _bus_dmamap_destroy(t, map); +} + +/* + * Load an ISA DMA map with a linear buffer. + */ +int +_isa_bus_dmamap_load(t, map, buf, buflen, p, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + int error; + + STAT_INCR(isa_dma_stats_loads); + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + /* + * Try to load the map the normal way. If this errors out, + * and we can bounce, we will. + */ + error = _bus_dmamap_load(t, map, buf, buflen, p, flags); + if (error == 0 || + (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) + return (error); + + /* + * First attempt failed; bounce it. + */ + + STAT_INCR(isa_dma_stats_bounces); + + /* + * Allocate bounce pages, if necessary. + */ + if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { + error = _isa_dma_alloc_bouncebuf(t, map, buflen, flags); + if (error) + return (error); + } + + /* + * Cache a pointer to the caller's buffer and load the DMA map + * with the bounce buffer. + */ + cookie->id_origbuf = buf; + cookie->id_origbuflen = buflen; + cookie->id_buftype = ID_BUFTYPE_LINEAR; + error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, buflen, + p, flags); + if (error) { + /* + * Free the bounce pages, unless our resources + * are reserved for our exclusive use. + */ + if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) + _isa_dma_free_bouncebuf(t, map); + return (error); + } + + /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ + cookie->id_flags |= ID_IS_BOUNCING; + return (0); +} + +/* + * Like _isa_bus_dmamap_load(), but for mbufs. + */ +int +_isa_bus_dmamap_load_mbuf(t, map, m0, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct mbuf *m0; + int flags; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + int error; + + /* + * Make sure on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + +#ifdef DIAGNOSTIC + if ((m0->m_flags & M_PKTHDR) == 0) + panic("_isa_bus_dmamap_load_mbuf: no packet header"); +#endif + + if (m0->m_pkthdr.len > map->_dm_size) + return (EINVAL); + + /* + * Try to load the map the normal way. If this errors out, + * and we can bounce, we will. + */ + error = _bus_dmamap_load_mbuf(t, map, m0, flags); + if (error == 0 || + (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0)) + return (error); + + /* + * First attempt failed; bounce it. + */ + + STAT_INCR(isa_dma_stats_bounces); + + /* + * Allocate bounce pages, if necessary. + */ + if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) { + error = _isa_dma_alloc_bouncebuf(t, map, m0->m_pkthdr.len, + flags); + if (error) + return (error); + } + + /* + * Cache a pointer to the caller's buffer and load the DMA map + * with the bounce buffer. + */ + cookie->id_origbuf = m0; + cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */ + cookie->id_buftype = ID_BUFTYPE_MBUF; + error = _bus_dmamap_load(t, map, cookie->id_bouncebuf, + m0->m_pkthdr.len, NULL, flags); + if (error) { + /* + * Free the bounce pages, unless our resources + * are reserved for our exclusive use. + */ + if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) + _isa_dma_free_bouncebuf(t, map); + return (error); + } + + /* ...so _isa_bus_dmamap_sync() knows we're bouncing */ + cookie->id_flags |= ID_IS_BOUNCING; + return (0); +} + +/* + * Like _isa_bus_dmamap_load(), but for uios. + */ +int +_isa_bus_dmamap_load_uio(t, map, uio, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct uio *uio; + int flags; +{ + + panic("_isa_bus_dmamap_load_uio: not implemented"); +} + +/* + * Like _isa_bus_dmamap_load(), but for raw memory allocated with + * bus_dmamem_alloc(). + */ +int +_isa_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t size; + int flags; +{ + + panic("_isa_bus_dmamap_load_raw: not implemented"); +} + +/* + * Unload an ISA DMA map. + */ +void +_isa_bus_dmamap_unload(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + + /* + * If we have bounce pages, free them, unless they're + * reserved for our exclusive use. + */ + if ((cookie->id_flags & ID_HAS_BOUNCE) && + (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) + _isa_dma_free_bouncebuf(t, map); + + cookie->id_flags &= ~ID_IS_BOUNCING; + cookie->id_buftype = ID_BUFTYPE_INVALID; + + /* + * Do the generic bits of the unload. + */ + _bus_dmamap_unload(t, map); +} + +/* + * Synchronize an ISA DMA map. + */ +void +_isa_bus_dmamap_sync(t, map, offset, len, ops) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_addr_t offset; + bus_size_t len; + int ops; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + + /* + * Mixing PRE and POST operations is not allowed. + */ + if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && + (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) + panic("_isa_bus_dmamap_sync: mix PRE and POST"); + +#ifdef DIAGNOSTIC + if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) { + if (offset >= map->dm_mapsize) + panic("_isa_bus_dmamap_sync: bad offset"); + if (len == 0 || (offset + len) > map->dm_mapsize) + panic("_isa_bus_dmamap_sync: bad length"); + } +#endif + + /* + * If we're not bouncing, just return; nothing to do. + */ + if ((cookie->id_flags & ID_IS_BOUNCING) == 0) + return; + + switch (cookie->id_buftype) { + case ID_BUFTYPE_LINEAR: + /* + * Nothing to do for pre-read. + */ + + if (ops & BUS_DMASYNC_PREWRITE) { + /* + * Copy the caller's buffer to the bounce buffer. + */ + memcpy((char *)cookie->id_bouncebuf + offset, + (char *)cookie->id_origbuf + offset, len); + } + + if (ops & BUS_DMASYNC_POSTREAD) { + /* + * Copy the bounce buffer to the caller's buffer. + */ + memcpy((char *)cookie->id_origbuf + offset, + (char *)cookie->id_bouncebuf + offset, len); + } + + /* + * Nothing to do for post-write. + */ + break; + + case ID_BUFTYPE_MBUF: + { + struct mbuf *m, *m0 = cookie->id_origbuf; + bus_size_t minlen, moff; + + /* + * Nothing to do for pre-read. + */ + + if (ops & BUS_DMASYNC_PREWRITE) { + /* + * Copy the caller's buffer to the bounce buffer. + */ + m_copydata(m0, offset, len, + (char *)cookie->id_bouncebuf + offset); + } + + if (ops & BUS_DMASYNC_POSTREAD) { + /* + * Copy the bounce buffer to the caller's buffer. + */ + for (moff = offset, m = m0; m != NULL && len != 0; + m = m->m_next) { + /* Find the beginning mbuf. */ + if (moff >= m->m_len) { + moff -= m->m_len; + continue; + } + + /* + * Now at the first mbuf to sync; nail + * each one until we have exhausted the + * length. + */ + minlen = len < m->m_len - moff ? + len : m->m_len - moff; + + memcpy(mtod(m, caddr_t) + moff, + (char *)cookie->id_bouncebuf + offset, + minlen); + + moff = 0; + len -= minlen; + offset += minlen; + } + } + + /* + * Nothing to do for post-write. + */ + break; + } + + case ID_BUFTYPE_UIO: + panic("_isa_bus_dmamap_sync: ID_BUFTYPE_UIO"); + break; + + case ID_BUFTYPE_RAW: + panic("_isa_bus_dmamap_sync: ID_BUFTYPE_RAW"); + break; + + case ID_BUFTYPE_INVALID: + panic("_isa_bus_dmamap_sync: ID_BUFTYPE_INVALID"); + break; + + default: + printf("unknown buffer type %d\n", cookie->id_buftype); + panic("_isa_bus_dmamap_sync"); + } +} + +/* + * Allocate memory safe for ISA DMA. + */ +int +_isa_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; +{ + paddr_t high; + + if (avail_end > ISA_DMA_BOUNCE_THRESHOLD) + high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); + else + high = trunc_page(avail_end); + + return (_bus_dmamem_alloc_range(t, size, alignment, boundary, + segs, nsegs, rsegs, flags, 0, high)); +} + +/********************************************************************** + * ISA DMA utility functions + **********************************************************************/ + +int +_isa_dma_alloc_bouncebuf(t, map, size, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_size_t size; + int flags; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + int error = 0; + + cookie->id_bouncebuflen = round_page(size); + error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen, + PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs, + map->_dm_segcnt, &cookie->id_nbouncesegs, flags); + if (error) + goto out; + error = _bus_dmamem_map(t, cookie->id_bouncesegs, + cookie->id_nbouncesegs, cookie->id_bouncebuflen, + (caddr_t *)&cookie->id_bouncebuf, flags); + + out: + if (error) { + _bus_dmamem_free(t, cookie->id_bouncesegs, + cookie->id_nbouncesegs); + cookie->id_bouncebuflen = 0; + cookie->id_nbouncesegs = 0; + } else { + cookie->id_flags |= ID_HAS_BOUNCE; + STAT_INCR(isa_dma_stats_nbouncebufs); + } + + return (error); +} + +void +_isa_dma_free_bouncebuf(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + struct x86_64_isa_dma_cookie *cookie = map->_dm_cookie; + + STAT_DECR(isa_dma_stats_nbouncebufs); + + _bus_dmamem_unmap(t, cookie->id_bouncebuf, + cookie->id_bouncebuflen); + _bus_dmamem_free(t, cookie->id_bouncesegs, + cookie->id_nbouncesegs); + cookie->id_bouncebuflen = 0; + cookie->id_nbouncesegs = 0; + cookie->id_flags &= ~ID_HAS_BOUNCE; +} diff --git a/sys/arch/x86_64/isa/pccons.c b/sys/arch/x86_64/isa/pccons.c new file mode 100644 index 000000000000..e4a38f4ce267 --- /dev/null +++ b/sys/arch/x86_64/isa/pccons.c @@ -0,0 +1,2695 @@ +/* $NetBSD: pccons.c,v 1.1 2001/06/19 00:20:33 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz and Don Ahn. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pccons.c 5.11 (Berkeley) 5/21/91 + */ + +/* + * code to work keyboard & display for PC-style console + * + * "NPCCONSKBD > 0" means that we access the keyboard through the MI keyboard + * controller driver, ==0 that we access it directly. + * XXX Only one of these attachments can be used in one kernel configuration. + */ + +#include "opt_ddb.h" +#include "opt_compat_netbsd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pc.h" +#if (NPCCONSKBD > 0) +#include +#include +#else +/* consistency check: plain pccons can't coexist with pckbc */ +#include "pckbc.h" +#if (NPCKBC > 0) +#error "(pc without pcconskbd) and pckbc can't coexist" +#endif +#endif /* NPCCONSKBD */ + +/* consistency check: pccons can't coexist with vga or pcdisplay */ +#include "vga.h" +#include "pcdisplay.h" +#if (NVGA > 0) || (NPCDISPLAY > 0) +#error "pc and (vga or pcdisplay) can't coexist" +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define XFREE86_BUG_COMPAT + +#ifndef BEEP_FREQ +#define BEEP_FREQ 1500 +#endif +#ifndef BEEP_TIME +#define BEEP_TIME (hz/5) +#endif + +#define PCBURST 128 + +/* + * Non-US keyboards definition + */ +#if defined(FRENCH_KBD) || defined(GERMAN_KBD) || defined(NORWEGIAN_KBD) || defined(FINNISH_KBD) +# define NONUS_KBD +# define DISPLAY_ISO8859 +# define NUMERIC_SLASH_FIX +#endif + +static u_short *Crtat; /* pointer to backing store */ +static u_short *crtat; /* pointer to current char */ +#if (NPCCONSKBD == 0) +static volatile u_char ack, nak; /* Don't ask. */ +#endif +static u_char async, kernel, polling; /* Really, you don't want to know. */ +static u_char lock_state = 0x00; /* all off */ +#if (NPCCONSKBD == 0) +static u_char old_lock_state = 0xff, + typematic_rate = 0xff, /* don't update until set by user */ + old_typematic_rate = 0xff; +#endif +static u_short cursor_shape = 0xffff, /* don't update until set by user */ + old_cursor_shape = 0xffff; +#ifdef XSERVER +int pc_xmode = 0; +#endif +int pccons_is_console = 0; +#if (NPCCONSKBD > 0) +static pckbc_tag_t kbctag; +static pckbc_slot_t kbcslot; +static int kbc_attached; +#endif + +#define PCUNIT(x) (minor(x)) + +static struct video_state { + int cx, cy; /* escape parameters */ + int row, col; /* current cursor position */ + int nrow, ncol, nchr; /* current screen geometry */ + u_char state; /* parser state */ +#define VSS_ESCAPE 1 +#define VSS_EBRACE 2 +#define VSS_EPARAM 3 + char so; /* in standout mode? */ + char color; /* color or mono display */ + char at; /* normal attributes */ + char so_at; /* standout attributes */ +} vs; + +struct pc_softc { + struct device sc_dev; + void *sc_ih; + struct tty *sc_tty; +}; + +static struct callout async_update_ch = CALLOUT_INITIALIZER; + +int pcprobe __P((struct device *, struct cfdata *, void *)); +void pcattach __P((struct device *, struct device *, void *)); +int pcintr __P((void *)); +void pcinit __P((void)); + +struct cfattach pc_ca = { + sizeof(struct pc_softc), pcprobe, pcattach +}; + +extern struct cfdriver pc_cd; + +#if (NPCCONSKBD > 0) +struct pcconskbd_softc { + struct device sc_dev; +}; + +int pcconskbdprobe __P((struct device *, struct cfdata *, void *)); +void pcconskbdattach __P((struct device *, struct device *, void *)); +void pcinput __P((void *, int)); + +struct cfattach pcconskbd_ca = { + sizeof(struct pcconskbd_softc), pcconskbdprobe, pcconskbdattach +}; + +extern struct cfdriver pcconskbd_cd; +#endif + +#define COL 80 +#define ROW 25 +#define CHR 2 + +/* + * DANGER WIL ROBINSON -- the values of SCROLL, NUM, CAPS, and ALT are + * important. + */ +#define SCROLL 0x0001 /* stop output */ +#define NUM 0x0002 /* numeric shift cursors vs. numeric */ +#define CAPS 0x0004 /* caps shift -- swaps case of letter */ +#define SHIFT 0x0008 /* keyboard shift */ +#define CTL 0x0010 /* control shift -- allows ctl function */ +#define ASCII 0x0020 /* ascii code for this key */ +#define ALT 0x0080 /* alternate shift -- alternate chars */ +#define FUNC 0x0100 /* function key */ +#define KP 0x0200 /* Keypad keys */ +#define NONE 0x0400 /* no function */ +#ifdef NONUS_KBD +#define ALTGR 0x0040 /* Alt graphic */ +#endif + +static unsigned int addr_6845 = MONO_BASE; + +#if (NPCCONSKBD == 0) +char *sget __P((void)); +#endif +char *strans __P((u_char)); +void sput __P((u_char *, int)); +#ifdef XSERVER +void pc_xmode_on __P((void)); +void pc_xmode_off __P((void)); +#endif + +void pcstart __P((struct tty *)); +int pcparam __P((struct tty *, struct termios *)); + +#if (NPCCONSKBD == 0) +int kbd_cmd __P((u_char, u_char)); +#endif +void set_cursor_shape __P((void)); +#ifdef XSERVER +#ifdef XFREE86_BUG_COMPAT +void get_cursor_shape __P((void)); +#endif +#endif +void do_async_update __P((void *)); +void async_update __P((void)); +#if (NPCCONSKBD > 0) +void update_leds __P((void)); +#else +#define update_leds async_update +#endif + +#if (NPCCONSKBD == 0) +static __inline int kbd_wait_output __P((void)); +static __inline int kbd_wait_input __P((void)); +static __inline void kbd_flush_input __P((void)); +static u_char kbc_get8042cmd __P((void)); +static int kbc_put8042cmd __P((u_char)); +#endif + +void pccnprobe __P((struct consdev *)); +void pccninit __P((struct consdev *)); +void pccnputc __P((dev_t, int)); +int pccngetc __P((dev_t)); +void pccnpollc __P((dev_t, int)); + +#if (NPCCONSKBD == 0) + +#define KBD_DELAY \ + { u_char x = inb(0x84); (void) x; } \ + { u_char x = inb(0x84); (void) x; } \ + { u_char x = inb(0x84); (void) x; } \ + { u_char x = inb(0x84); (void) x; } \ + { u_char x = inb(0x84); (void) x; } \ + { u_char x = inb(0x84); (void) x; } + +static __inline int +kbd_wait_output() +{ + u_int i; + + for (i = 100000; i; i--) + if ((inb(IO_KBD + KBSTATP) & KBS_IBF) == 0) { + KBD_DELAY; + return (1); + } + return (0); +} + +static __inline int +kbd_wait_input() +{ + u_int i; + + for (i = 100000; i; i--) + if ((inb(IO_KBD + KBSTATP) & KBS_DIB) != 0) { + KBD_DELAY; + return (1); + } + return (0); +} + +static __inline void +kbd_flush_input() +{ + u_int i; + + for (i = 10; i; i--) { + if ((inb(IO_KBD + KBSTATP) & KBS_DIB) == 0) + return; + KBD_DELAY; + (void) inb(IO_KBD + KBDATAP); + } +} + +#if 1 +/* + * Get the current command byte. + */ +static u_char +kbc_get8042cmd() +{ + + if (!kbd_wait_output()) + return (-1); + outb(IO_KBD + KBCMDP, K_RDCMDBYTE); + if (!kbd_wait_input()) + return (-1); + return (inb(IO_KBD + KBDATAP)); +} +#endif + +/* + * Pass command byte to keyboard controller (8042). + */ +static int +kbc_put8042cmd(val) + u_char val; +{ + + if (!kbd_wait_output()) + return (0); + outb(IO_KBD + KBCMDP, K_LDCMDBYTE); + if (!kbd_wait_output()) + return (0); + outb(IO_KBD + KBOUTP, val); + return (1); +} + +/* + * Pass command to keyboard itself + */ +int +kbd_cmd(val, polling) + u_char val; + u_char polling; +{ + u_int retries = 3; + register u_int i; + + do { + if (!kbd_wait_output()) + return (0); + ack = nak = 0; + outb(IO_KBD + KBOUTP, val); + if (polling) + for (i = 100000; i; i--) { + if (inb(IO_KBD + KBSTATP) & KBS_DIB) { + register u_char c; + + KBD_DELAY; + c = inb(IO_KBD + KBDATAP); + if (c == KBR_ACK || c == KBR_ECHO) { + ack = 1; + return (1); + } + if (c == KBR_RESEND) { + nak = 1; + break; + } +#ifdef DIAGNOSTIC + printf("kbd_cmd: input char %x lost\n", c); +#endif + } + } + else + for (i = 100000; i; i--) { + (void) inb(IO_KBD + KBSTATP); + if (ack) + return (1); + if (nak) + break; + } + if (!nak) + return (0); + } while (--retries); + return (0); +} + +#endif /* NPCCONSKBD == 0 */ + +void +set_cursor_shape() +{ + register int iobase = addr_6845; + + outb(iobase, 10); + outb(iobase+1, cursor_shape >> 8); + outb(iobase, 11); + outb(iobase+1, cursor_shape); + old_cursor_shape = cursor_shape; +} + +#ifdef XSERVER +#ifdef XFREE86_BUG_COMPAT +void +get_cursor_shape() +{ + register int iobase = addr_6845; + + outb(iobase, 10); + cursor_shape = inb(iobase+1) << 8; + outb(iobase, 11); + cursor_shape |= inb(iobase+1); + + /* + * real 6845's, as found on, MDA, Hercules or CGA cards, do + * not support reading the cursor shape registers. the 6845 + * tri-states it's data bus. This is _normally_ read by the + * cpu as either 0x00 or 0xff.. in which case we just use + * a line cursor. + */ + if (cursor_shape == 0x0000 || cursor_shape == 0xffff) + cursor_shape = 0x0b10; + else + cursor_shape &= 0x1f1f; +} +#endif /* XFREE86_BUG_COMPAT */ +#endif /* XSERVER */ + +void +do_async_update(v) + void *v; +{ +#if (NPCCONSKBD == 0) + u_char poll = v ? 1 : 0; +#endif + int pos; + static int old_pos = -1; + + async = 0; + +#if (NPCCONSKBD == 0) + if (lock_state != old_lock_state) { + old_lock_state = lock_state; + if (!kbd_cmd(KBC_MODEIND, poll) || + !kbd_cmd(lock_state, poll)) { + printf("pc: timeout updating leds\n"); + (void) kbd_cmd(KBC_ENABLE, poll); + } + } + if (typematic_rate != old_typematic_rate) { + old_typematic_rate = typematic_rate; + if (!kbd_cmd(KBC_TYPEMATIC, poll) || + !kbd_cmd(typematic_rate, poll)) { + printf("pc: timeout updating typematic rate\n"); + (void) kbd_cmd(KBC_ENABLE, poll); + } + } +#else + /* + * If the mi pckbc driver is used, keyboard commands are handled + * there. The commands are issued synchronously (in update_leds() + * and pcioctl()). + */ +#endif + +#ifdef XSERVER + if (pc_xmode > 0) + return; +#endif + + pos = crtat - Crtat; + if (pos != old_pos) { + register int iobase = addr_6845; + outb(iobase, 14); + outb(iobase+1, pos >> 8); + outb(iobase, 15); + outb(iobase+1, pos); + old_pos = pos; + } + if (cursor_shape != old_cursor_shape) + set_cursor_shape(); +} + +void +async_update() +{ + + if (kernel || polling) { + if (async) + callout_stop(&async_update_ch); + do_async_update((void *)1); + } else { + if (async) + return; + async = 1; + callout_reset(&async_update_ch, 1, do_async_update, NULL); + } +} + +#if (NPCCONSKBD > 0) +void update_leds() +{ + u_char cmd[2]; + + cmd[0] = KBC_MODEIND; + cmd[1] = lock_state & 7; + + pckbc_enqueue_cmd(kbctag, kbcslot, cmd, 2, 0, 0, 0); +} +#endif + +/* + * these are both bad jokes + */ +int +pcprobe(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + struct isa_attach_args *ia = aux; +#if (NPCCONSKBD == 0) + u_int i; +#else + u_char cmd[2], resp[1]; + int res; +#endif + +#if (NPCCONSKBD == 0) + /* Enable interrupts and keyboard, etc. */ + if (!kbc_put8042cmd(CMDBYTE)) { + printf("pcprobe: command error\n"); + return (0); + } +#else + if (!kbc_attached) { + printf("pcprobe: no keyboard\n"); + return (0); + } +#endif + +#if 1 + /* Flush any garbage. */ +#if (NPCCONSKBD == 0) + kbd_flush_input(); +#else + pckbc_flush(kbctag, kbcslot); +#endif + /* Reset the keyboard. */ +#if (NPCCONSKBD == 0) + if (!kbd_cmd(KBC_RESET, 1)) { + printf("pcprobe: reset error %d\n", 1); + goto lose; + } + for (i = 600000; i; i--) + if ((inb(IO_KBD + KBSTATP) & KBS_DIB) != 0) { + KBD_DELAY; + break; + } + if (i == 0 || inb(IO_KBD + KBDATAP) != KBR_RSTDONE) { + printf("pcprobe: reset error %d\n", 2); + goto lose; + } +#else + cmd[0] = KBC_RESET; + res = pckbc_poll_cmd(kbctag, kbcslot, cmd, 1, 1, resp, 1); + if (res) { + printf("pcprobe: reset error %d\n", 1); + /* + * XXX The keyboard is not present. Try to set the + * controller to "translating" anyway in case it is + * connected later. This should be done in attach(). + */ + (void) pckbc_xt_translation(kbctag, kbcslot, 1); + goto lose; + } + if (resp[0] != KBR_RSTDONE) { + printf("pcprobe: reset error %d\n", 2); + goto lose; + } +#endif + /* + * Some keyboards seem to leave a second ack byte after the reset. + * This is kind of stupid, but we account for them anyway by just + * flushing the buffer. + */ +#if (NPCCONSKBD == 0) + kbd_flush_input(); +#else + pckbc_flush(kbctag, kbcslot); +#endif + /* Just to be sure. */ +#if (NPCCONSKBD == 0) + if (!kbd_cmd(KBC_ENABLE, 1)) { + printf("pcprobe: reset error %d\n", 3); + goto lose; + } +#else + cmd[0] = KBC_ENABLE; + res = pckbc_poll_cmd(kbctag, kbcslot, cmd, 1, 0, 0, 0); + if (res) { + printf("pcprobe: reset error %d\n", 3); + goto lose; + } +#endif + + /* + * Some keyboard/8042 combinations do not seem to work if the keyboard + * is set to table 1; in fact, it would appear that some keyboards just + * ignore the command altogether. So by default, we use the AT scan + * codes and have the 8042 translate them. Unfortunately, this is + * known to not work on some PS/2 machines. We try desparately to deal + * with this by checking the (lack of a) translate bit in the 8042 and + * attempting to set the keyboard to XT mode. If this all fails, well, + * tough luck. + * + * XXX It would perhaps be a better choice to just use AT scan codes + * and not bother with this. + */ +#if (NPCCONSKBD == 0) + if (kbc_get8042cmd() & KC8_TRANS) { + /* The 8042 is translating for us; use AT codes. */ + if (!kbd_cmd(KBC_SETTABLE, 1) || !kbd_cmd(2, 1)) { + printf("pcprobe: reset error %d\n", 4); + goto lose; + } + } else { + /* Stupid 8042; set keyboard to XT codes. */ + if (!kbd_cmd(KBC_SETTABLE, 1) || !kbd_cmd(1, 1)) { + printf("pcprobe: reset error %d\n", 5); + goto lose; + } + } +#else + if (pckbc_xt_translation(kbctag, kbcslot, 1)) { + /* The 8042 is translating for us; use AT codes. */ + cmd[0] = KBC_SETTABLE; + cmd[1] = 2; + res = pckbc_poll_cmd(kbctag, kbcslot, cmd, 2, 0, 0, 0); + if (res) { + printf("pcprobe: reset error %d\n", 4); + goto lose; + } + } else { + /* Stupid 8042; set keyboard to XT codes. */ + cmd[0] = KBC_SETTABLE; + cmd[1] = 1; + res = pckbc_poll_cmd(kbctag, kbcslot, cmd, 2, 0, 0, 0); + if (res) { + printf("pcprobe: reset error %d\n", 5); + goto lose; + } + } +#endif + +lose: + /* + * Technically, we should probably fail the probe. But we'll be nice + * and allow keyboard-less machines to boot with the console. + */ +#endif /* 1 */ + +#if (NPCCONSKBD > 0) + ia->ia_iosize = 0; +#else + ia->ia_iosize = 16; +#endif + ia->ia_msize = 0; + return (1); +} + +void +pcattach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ + struct pc_softc *sc = (void *)self; +#if (NPCCONSKBD == 0) + struct isa_attach_args *ia = aux; +#endif + + if (crtat == 0) + pcinit(); + + printf(": %s\n", vs.color ? "color" : "mono"); + do_async_update((void *)1); + +#if (NPCCONSKBD > 0) + pckbc_set_inputhandler(kbctag, kbcslot, pcinput, sc, sc->sc_dev.dv_xname); +#else + sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq, IST_EDGE, + IPL_TTY, pcintr, sc); + + /* + * Look for children of the keyboard controller. + * XXX Really should decouple keyboard controller + * from the console code. + */ + while (config_found(self, ia->ia_ic, NULL) != NULL) { /* XXX */ + /* Will break when no more children. */ + ; + } +#endif + + if (pccons_is_console) { + int maj; + + /* Locate the major number. */ + for (maj = 0; maj < nchrdev; maj++) + if (cdevsw[maj].d_open == pcopen) + break; + + /* There can be only one, but it can have any unit number. */ + cn_tab->cn_dev = makedev(maj, sc->sc_dev.dv_unit); + + printf("%s: console\n", sc->sc_dev.dv_xname); + } +} + +#if (NPCCONSKBD > 0) +int +pcconskbdprobe(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + struct pckbc_attach_args *pka = aux; + + if (pka->pa_slot != PCKBC_KBD_SLOT) + return (0); + return (1); +} + +void +pcconskbdattach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ + struct pckbc_attach_args *pka = aux; + + printf("\n"); + + kbctag = pka->pa_tag; + kbcslot = pka->pa_slot; + kbc_attached = 1; +} + +int +pcconskbd_cnattach(tag, slot) + pckbc_tag_t tag; + pckbc_slot_t slot; +{ + kbctag = tag; + kbcslot = slot; + kbc_attached = 1; + return (0); +} +#endif + +int +pcopen(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + struct pc_softc *sc; + int unit = PCUNIT(dev); + struct tty *tp; + + if (unit >= pc_cd.cd_ndevs) + return (ENXIO); + sc = pc_cd.cd_devs[unit]; + if (sc == 0) + return (ENXIO); + + if (!sc->sc_tty) { + tp = sc->sc_tty = ttymalloc(); + tty_attach(tp); + } else + tp = sc->sc_tty; + + tp->t_oproc = pcstart; + tp->t_param = pcparam; + tp->t_dev = dev; + if ((tp->t_state & TS_ISOPEN) == 0) { + ttychars(tp); + tp->t_iflag = TTYDEF_IFLAG; + tp->t_oflag = TTYDEF_OFLAG; + tp->t_cflag = TTYDEF_CFLAG; + tp->t_lflag = TTYDEF_LFLAG; + tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; + pcparam(tp, &tp->t_termios); + ttsetwater(tp); + } else if (tp->t_state&TS_XCLUDE && p->p_ucred->cr_uid != 0) + return (EBUSY); + tp->t_state |= TS_CARR_ON; + + return ((*tp->t_linesw->l_open)(dev, tp)); +} + +int +pcclose(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + + if (tp == NULL) + return (0); + (*tp->t_linesw->l_close)(tp, flag); + ttyclose(tp); +#ifdef notyet /* XXX */ + ttyfree(tp); +#endif + return (0); +} + +int +pcread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + + return ((*tp->t_linesw->l_read)(tp, uio, flag)); +} + +int +pcwrite(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + + return ((*tp->t_linesw->l_write)(tp, uio, flag)); +} + +int +pcpoll(dev, events, p) + dev_t dev; + int events; + struct proc *p; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + + return ((*tp->t_linesw->l_poll)(tp, events, p)); +} + +struct tty * +pctty(dev) + dev_t dev; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + + return (tp); +} + +/* + * Got a console receive interrupt - + * the console processor wants to give us a character. + * Catch the character, and see who it goes to. + */ +#if (NPCCONSKBD > 0) +void +pcinput(arg, data) + void *arg; + int data; +{ + struct pc_softc *sc = arg; + register struct tty *tp = sc->sc_tty; + u_char *cp; + + if (!tp || (tp->t_state & TS_ISOPEN) == 0) + return; + + cp = strans(data); + if (cp) + do + (*tp->t_linesw->l_rint)(*cp++, tp); + while (*cp); +} +#else +int +pcintr(arg) + void *arg; +{ + struct pc_softc *sc = arg; + register struct tty *tp = sc->sc_tty; + u_char *cp; + + if ((inb(IO_KBD + KBSTATP) & KBS_DIB) == 0) + return (0); + if (polling) + return (1); + do { + cp = sget(); + if (!tp || (tp->t_state & TS_ISOPEN) == 0) + return (1); + if (cp) + do + (*tp->t_linesw->l_rint)(*cp++, tp); + while (*cp); + } while (inb(IO_KBD + KBSTATP) & KBS_DIB); + return (1); +} +#endif + +int +pcioctl(dev, cmd, data, flag, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +{ + struct pc_softc *sc = pc_cd.cd_devs[PCUNIT(dev)]; + struct tty *tp = sc->sc_tty; + int error; + + error = (*tp->t_linesw->l_ioctl)(tp, cmd, data, flag, p); + if (error >= 0) + return (error); + error = ttioctl(tp, cmd, data, flag, p); + if (error >= 0) + return (error); + + switch (cmd) { +#ifdef XSERVER + case CONSOLE_X_MODE_ON: + pc_xmode_on(); + ttyflush(tp, FREAD); + return (0); + case CONSOLE_X_MODE_OFF: + pc_xmode_off(); + ttyflush(tp, FREAD); + return (0); + case CONSOLE_X_BELL: + /* + * If set, data is a pointer to a length 2 array of + * integers. data[0] is the pitch in Hz and data[1] + * is the duration in msec. + */ + if (data) + sysbeep(((int*)data)[0], + (((int*)data)[1] * hz) / 1000); + else + sysbeep(BEEP_FREQ, BEEP_TIME); + return (0); +#endif /* XSERVER */ + case CONSOLE_SET_TYPEMATIC_RATE: { + u_char rate; + + if (!data) + return (EINVAL); + rate = *((u_char *)data); + /* + * Check that it isn't too big (which would cause it to be + * confused with a command). + */ + if (rate & 0x80) + return (EINVAL); +#if (NPCCONSKBD > 0) + { + u_char cmd[2]; + + cmd[0] = KBC_TYPEMATIC; + cmd[1] = rate; + + return (pckbc_enqueue_cmd(kbctag, kbcslot, cmd, 2, 0, + 1, 0)); + } +#else + typematic_rate = rate; + async_update(); + return (0); +#endif + } + default: + return (ENOTTY); + } + +#ifdef DIAGNOSTIC + panic("pcioctl: impossible"); +#endif +} + +void +pcstart(tp) + struct tty *tp; +{ + struct clist *cl; + int s, len; + u_char buf[PCBURST]; + + s = spltty(); + if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) + goto out; + tp->t_state |= TS_BUSY; + splx(s); + + lock_state &= ~SCROLL; + + /* + * We need to do this outside spl since it could be fairly + * expensive and we don't want our serial ports to overflow. + */ + cl = &tp->t_outq; + len = q_to_b(cl, buf, PCBURST); + sput(buf, len); + + s = spltty(); + tp->t_state &= ~TS_BUSY; + if (cl->c_cc) { + tp->t_state |= TS_TIMEOUT; + callout_reset(&tp->t_rstrt_ch, 1, ttrstrt, tp); + } + if (cl->c_cc <= tp->t_lowat) { + if (tp->t_state & TS_ASLEEP) { + tp->t_state &= ~TS_ASLEEP; + wakeup(cl); + } + selwakeup(&tp->t_wsel); + } +out: + splx(s); +} + +void +pcstop(tp, flag) + struct tty *tp; + int flag; +{ + + lock_state |= SCROLL; + async_update(); +} + +int +pccnattach() +{ + static struct consdev pccons = { + NULL, NULL, pccngetc, pccnputc, pccnpollc, + NULL, NODEV, CN_NORMAL + }; + + cn_tab = &pccons; + + pccons_is_console = 1; + + return (0); +} + +/* ARGSUSED */ +void +pccnputc(dev, c) + dev_t dev; + int c; +{ + u_char oldkernel = kernel; + char help = c; + + kernel = 1; + if (help == '\n') + sput("\r\n", 2); + else + sput(&help, 1); + kernel = oldkernel; +} + +/* ARGSUSED */ +int +pccngetc(dev) + dev_t dev; +{ + register char *cp; + +#ifdef XSERVER + if (pc_xmode > 0) + return (0); +#endif + + do { + /* wait for byte */ +#if (NPCCONSKBD == 0) + while ((inb(IO_KBD + KBSTATP) & KBS_DIB) == 0); + /* see if it's worthwhile */ + cp = sget(); +#else + int data; + do { + data = pckbc_poll_data(kbctag, kbcslot); + } while (data == -1); + cp = strans(data); +#endif + } while (!cp); + if (*cp == '\r') + return ('\n'); + return (*cp); +} + +void +pccnpollc(dev, on) + dev_t dev; + int on; +{ + + polling = on; +#if (NPCCONSKBD > 0) + pckbc_set_poll(kbctag, kbcslot, on); +#else + if (!on) { + int unit; + struct pc_softc *sc; + int s; + + /* + * If disabling polling on a device that's been configured, + * make sure there are no bytes left in the FIFO, holding up + * the interrupt line. Otherwise we won't get any further + * interrupts. + */ + unit = PCUNIT(dev); + if (pc_cd.cd_ndevs > unit) { + sc = pc_cd.cd_devs[unit]; + if (sc != 0) { + s = spltty(); + pcintr(sc); + splx(s); + } + } + } +#endif +} + +/* + * Set line parameters. + */ +int +pcparam(tp, t) + struct tty *tp; + struct termios *t; +{ + + tp->t_ispeed = t->c_ispeed; + tp->t_ospeed = t->c_ospeed; + tp->t_cflag = t->c_cflag; + return (0); +} + +void +pcinit() +{ + u_short volatile *cp; + u_short was; + unsigned cursorat; + + cp = ISA_HOLE_VADDR(CGA_BUF); + was = *cp; + *cp = (u_short) 0xA55A; + if (*cp != 0xA55A) { + cp = ISA_HOLE_VADDR(MONO_BUF); + addr_6845 = MONO_BASE; + vs.color = 0; + } else { + *cp = was; + addr_6845 = CGA_BASE; + vs.color = 1; + } + + /* Extract cursor location */ + outb(addr_6845, 14); + cursorat = inb(addr_6845+1) << 8; + outb(addr_6845, 15); + cursorat |= inb(addr_6845+1); + + if (cursorat > COL * ROW) + cursorat = 0; + +#ifdef FAT_CURSOR + cursor_shape = 0x0012; +#endif + + Crtat = (u_short *)cp; + crtat = (u_short *)(cp + cursorat); + + vs.ncol = COL; + vs.nrow = ROW; + vs.nchr = COL * ROW; + vs.at = FG_LIGHTGREY | BG_BLACK; + + if (vs.color == 0) + vs.so_at = FG_BLACK | BG_LIGHTGREY; + else + vs.so_at = FG_YELLOW | BG_BLACK; + + fillw((vs.at << 8) | ' ', crtat, vs.nchr - cursorat); +} + +#define wrtchar(c, at) do {\ + char *cp = (char *)crtat; *cp++ = (c); *cp = (at); crtat++; vs.col++; \ +} while (0) + +/* translate ANSI color codes to standard pc ones */ +static char fgansitopc[] = { + FG_BLACK, FG_RED, FG_GREEN, FG_BROWN, FG_BLUE, + FG_MAGENTA, FG_CYAN, FG_LIGHTGREY +}; + +static char bgansitopc[] = { + BG_BLACK, BG_RED, BG_GREEN, BG_BROWN, BG_BLUE, + BG_MAGENTA, BG_CYAN, BG_LIGHTGREY +}; + +#ifdef DISPLAY_ISO8859 +static u_char iso2ibm437[] = +{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0xff, 0xad, 0x9b, 0x9c, 0, 0x9d, 0, 0x40, + 0x6f, 0x63, 0x61, 0xae, 0, 0, 0, 0, + 0xf8, 0xf1, 0xfd, 0x33, 0, 0xe6, 0, 0xfa, + 0, 0x31, 0x6f, 0xaf, 0xac, 0xab, 0, 0xa8, + 0x41, 0x41, 0x41, 0x41, 0x8e, 0x8f, 0x92, 0x80, + 0x45, 0x90, 0x45, 0x45, 0x49, 0x49, 0x49, 0x49, + 0x81, 0xa5, 0x4f, 0x4f, 0x4f, 0x4f, 0x99, 0x4f, + 0x4f, 0x55, 0x55, 0x55, 0x9a, 0x59, 0, 0xe1, + 0x85, 0xa0, 0x83, 0x61, 0x84, 0x86, 0x91, 0x87, + 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, + 0, 0xa4, 0x95, 0xa2, 0x93, 0x6f, 0x94, 0x6f, + 0x6f, 0x97, 0xa3, 0x96, 0x81, 0x98, 0, 0 +}; +#endif + +/* + * `pc3' termcap emulation. + */ +void +sput(cp, n) + u_char *cp; + int n; +{ + u_char c, scroll = 0; + +#ifdef XSERVER + if (pc_xmode > 0) + return; +#endif + + if (crtat == 0) + pcinit(); + + while (n--) { + if (!(c = *cp++)) + continue; + + switch (c) { + case 0x1B: + if (vs.state >= VSS_ESCAPE) { + wrtchar(c, vs.so_at); + vs.state = 0; + goto maybe_scroll; + } else + vs.state = VSS_ESCAPE; + break; + + case '\t': { + int inccol = 8 - (vs.col & 7); + crtat += inccol; + vs.col += inccol; + } + maybe_scroll: + if (vs.col >= COL) { + vs.col -= COL; + scroll = 1; + } + break; + + case '\010': + if (crtat <= Crtat) + break; + --crtat; + if (--vs.col < 0) + vs.col += COL; /* non-destructive backspace */ + break; + + case '\r': + crtat -= vs.col; + vs.col = 0; + break; + + case '\n': + crtat += vs.ncol; + scroll = 1; + break; + + default: + switch (vs.state) { + case 0: + if (c == '\a') + sysbeep(BEEP_FREQ, BEEP_TIME); + else { + /* + * If we're outputting multiple printed + * characters, just blast them to the + * screen until we reach the end of the + * buffer or a control character. This + * saves time by short-circuiting the + * switch. + * If we reach the end of the line, we + * break to do a scroll check. + */ + for (;;) { +#ifdef DISPLAY_ISO8859 + if (c & 0x80) + c = iso2ibm437[c&0x7f]; +#endif + if (vs.so) + wrtchar(c, vs.so_at); + else + wrtchar(c, vs.at); + if (vs.col >= vs.ncol) { + vs.col = 0; + scroll = 1; + break; + } + if (!n || (c = *cp) < ' ') + break; + n--, cp++; + } + } + break; + case VSS_ESCAPE: + if (c == '[') { /* Start ESC [ sequence */ + vs.cx = vs.cy = 0; + vs.state = VSS_EBRACE; + } else if (c == 'c') { /* Clear screen & home */ + fillw((vs.at << 8) | ' ', + Crtat, vs.nchr); + crtat = Crtat; + vs.col = 0; + vs.state = 0; + } else { /* Invalid, clear state */ + wrtchar(c, vs.so_at); + vs.state = 0; + goto maybe_scroll; + } + break; + default: /* VSS_EBRACE or VSS_EPARAM */ + switch (c) { + int pos; + case 'm': + if (!vs.cx) + vs.so = 0; + else + vs.so = 1; + vs.state = 0; + break; + case 'A': { /* back cx rows */ + int cx = vs.cx; + if (cx <= 0) + cx = 1; + else + cx %= vs.nrow; + pos = crtat - Crtat; + pos -= vs.ncol * cx; + if (pos < 0) + pos += vs.nchr; + crtat = Crtat + pos; + vs.state = 0; + break; + } + case 'B': { /* down cx rows */ + int cx = vs.cx; + if (cx <= 0) + cx = 1; + else + cx %= vs.nrow; + pos = crtat - Crtat; + pos += vs.ncol * cx; + if (pos >= vs.nchr) + pos -= vs.nchr; + crtat = Crtat + pos; + vs.state = 0; + break; + } + case 'C': { /* right cursor */ + int cx = vs.cx, + col = vs.col; + if (cx <= 0) + cx = 1; + else + cx %= vs.ncol; + pos = crtat - Crtat; + pos += cx; + col += cx; + if (col >= vs.ncol) { + pos -= vs.ncol; + col -= vs.ncol; + } + vs.col = col; + crtat = Crtat + pos; + vs.state = 0; + break; + } + case 'D': { /* left cursor */ + int cx = vs.cx, + col = vs.col; + if (cx <= 0) + cx = 1; + else + cx %= vs.ncol; + pos = crtat - Crtat; + pos -= cx; + col -= cx; + if (col < 0) { + pos += vs.ncol; + col += vs.ncol; + } + vs.col = col; + crtat = Crtat + pos; + vs.state = 0; + break; + } + case 'J': /* Clear ... */ + switch (vs.cx) { + case 0: + /* ... to end of display */ + fillw((vs.at << 8) | ' ', + crtat, + Crtat + vs.nchr - crtat); + break; + case 1: + /* ... to next location */ + fillw((vs.at << 8) | ' ', + Crtat, crtat - Crtat + 1); + break; + case 2: + /* ... whole display */ + fillw((vs.at << 8) | ' ', + Crtat, vs.nchr); + break; + } + vs.state = 0; + break; + case 'K': /* Clear line ... */ + switch (vs.cx) { + case 0: + /* ... current to EOL */ + fillw((vs.at << 8) | ' ', + crtat, vs.ncol - vs.col); + break; + case 1: + /* ... beginning to next */ + fillw((vs.at << 8) | ' ', + crtat - vs.col, vs.col + 1); + break; + case 2: + /* ... entire line */ + fillw((vs.at << 8) | ' ', + crtat - vs.col, vs.ncol); + break; + } + vs.state = 0; + break; + case 'f': /* in system V consoles */ + case 'H': { /* Cursor move */ + int cx = vs.cx, + cy = vs.cy; + if (!cx || !cy) { + crtat = Crtat; + vs.col = 0; + } else { + if (cx > vs.nrow) + cx = vs.nrow; + if (cy > vs.ncol) + cy = vs.ncol; + crtat = Crtat + + (cx - 1) * vs.ncol + cy - 1; + vs.col = cy - 1; + } + vs.state = 0; + break; + } + case 'M': { /* delete cx rows */ + u_short *crtAt = crtat - vs.col; + int cx = vs.cx, + row = (crtAt - Crtat) / vs.ncol, + nrow = vs.nrow - row; + if (cx <= 0) + cx = 1; + else if (cx > nrow) + cx = nrow; + if (cx < nrow) + memcpy(crtAt, + crtAt + vs.ncol * cx, + vs.ncol * + (nrow - cx) * CHR); + fillw((vs.at << 8) | ' ', + crtAt + vs.ncol * (nrow - cx), + vs.ncol * cx); + vs.state = 0; + break; + } + case 'S': { /* scroll up cx lines */ + int cx = vs.cx; + if (cx <= 0) + cx = 1; + else if (cx > vs.nrow) + cx = vs.nrow; + if (cx < vs.nrow) + memcpy(Crtat, + Crtat + vs.ncol * cx, + vs.ncol * + (vs.nrow - cx) * CHR); + fillw((vs.at << 8) | ' ', + Crtat + vs.ncol * (vs.nrow - cx), + vs.ncol * cx); +#if 0 + crtat -= vs.ncol * cx; /* XXX */ +#endif + vs.state = 0; + break; + } + case 'L': { /* insert cx rows */ + u_short *crtAt = crtat - vs.col; + int cx = vs.cx, + row = (crtAt - Crtat) / vs.ncol, + nrow = vs.nrow - row; + if (cx <= 0) + cx = 1; + else if (cx > nrow) + cx = nrow; + if (cx < nrow) + memcpy(crtAt + vs.ncol * cx, + crtAt, + vs.ncol * (nrow - cx) * + CHR); + fillw((vs.at << 8) | ' ', + crtAt, vs.ncol * cx); + vs.state = 0; + break; + } + case 'T': { /* scroll down cx lines */ + int cx = vs.cx; + if (cx <= 0) + cx = 1; + else if (cx > vs.nrow) + cx = vs.nrow; + if (cx < vs.nrow) + memcpy(Crtat + vs.ncol * cx, + Crtat, + vs.ncol * (vs.nrow - cx) * + CHR); + fillw((vs.at << 8) | ' ', + Crtat, vs.ncol * cx); +#if 0 + crtat += vs.ncol * cx; /* XXX */ +#endif + vs.state = 0; + break; + } + case ';': /* Switch params in cursor def */ + vs.state = VSS_EPARAM; + break; + case 'r': + vs.so_at = (vs.cx & FG_MASK) | + ((vs.cy << 4) & BG_MASK); + vs.state = 0; + break; + case 'x': /* set attributes */ + switch (vs.cx) { + case 0: + vs.at = FG_LIGHTGREY | BG_BLACK; + break; + case 1: + /* ansi background */ + if (!vs.color) + break; + vs.at &= FG_MASK; + vs.at |= bgansitopc[vs.cy & 7]; + break; + case 2: + /* ansi foreground */ + if (!vs.color) + break; + vs.at &= BG_MASK; + vs.at |= fgansitopc[vs.cy & 7]; + break; + case 3: + /* pc text attribute */ + if (vs.state >= VSS_EPARAM) + vs.at = vs.cy; + break; + } + vs.state = 0; + break; + + default: /* Only numbers valid here */ + if ((c >= '0') && (c <= '9')) { + if (vs.state >= VSS_EPARAM) { + vs.cy *= 10; + vs.cy += c - '0'; + } else { + vs.cx *= 10; + vs.cx += c - '0'; + } + } else + vs.state = 0; + break; + } + break; + } + } + if (scroll) { + scroll = 0; + /* scroll check */ + if (crtat >= Crtat + vs.nchr) { + memcpy(Crtat, Crtat + vs.ncol, + (vs.nchr - vs.ncol) * CHR); + fillw((vs.at << 8) | ' ', + Crtat + vs.nchr - vs.ncol, + vs.ncol); + crtat -= vs.ncol; + } + } + } + async_update(); +} + +#define CODE_SIZE 4 /* Use a max of 4 for now... */ +#ifndef NONUS_KBD +typedef struct { + u_short type; + char unshift[CODE_SIZE]; + char shift[CODE_SIZE]; + char ctl[CODE_SIZE]; +} Scan_def; + +static Scan_def scan_codes[] = { + { NONE, "", "", "" }, /* 0 unused */ + { ASCII,"\033", "\033", "\033" }, /* 1 ESCape */ + { ASCII,"1", "!", "!" }, /* 2 1 */ + { ASCII,"2", "@", "\000" }, /* 3 2 */ + { ASCII,"3", "#", "#" }, /* 4 3 */ + { ASCII,"4", "$", "$" }, /* 5 4 */ + { ASCII,"5", "%", "%" }, /* 6 5 */ + { ASCII,"6", "^", "\036" }, /* 7 6 */ + { ASCII,"7", "&", "&" }, /* 8 7 */ + { ASCII,"8", "*", "\010" }, /* 9 8 */ + { ASCII,"9", "(", "(" }, /* 10 9 */ + { ASCII,"0", ")", ")" }, /* 11 0 */ + { ASCII,"-", "_", "\037" }, /* 12 - */ + { ASCII,"=", "+", "+" }, /* 13 = */ +#ifndef PCCONS_REAL_BS + { ASCII,"\177", "\177", "\010" }, /* 14 backspace */ +#else + { ASCII,"\010", "\010", "\177" }, /* 14 backspace */ +#endif + { ASCII,"\t", "\177\t", "\t" }, /* 15 tab */ + { ASCII,"q", "Q", "\021" }, /* 16 q */ + { ASCII,"w", "W", "\027" }, /* 17 w */ + { ASCII,"e", "E", "\005" }, /* 18 e */ + { ASCII,"r", "R", "\022" }, /* 19 r */ + { ASCII,"t", "T", "\024" }, /* 20 t */ + { ASCII,"y", "Y", "\031" }, /* 21 y */ + { ASCII,"u", "U", "\025" }, /* 22 u */ + { ASCII,"i", "I", "\011" }, /* 23 i */ + { ASCII,"o", "O", "\017" }, /* 24 o */ + { ASCII,"p", "P", "\020" }, /* 25 p */ + { ASCII,"[", "{", "\033" }, /* 26 [ */ + { ASCII,"]", "}", "\035" }, /* 27 ] */ + { ASCII,"\r", "\r", "\n" }, /* 28 return */ +#ifdef CAPS_IS_CONTROL + { CAPS, "", "", "" }, /* 29 caps */ +#else + { CTL, "", "", "" }, /* 29 control */ +#endif + { ASCII,"a", "A", "\001" }, /* 30 a */ + { ASCII,"s", "S", "\023" }, /* 31 s */ + { ASCII,"d", "D", "\004" }, /* 32 d */ + { ASCII,"f", "F", "\006" }, /* 33 f */ + { ASCII,"g", "G", "\007" }, /* 34 g */ + { ASCII,"h", "H", "\010" }, /* 35 h */ + { ASCII,"j", "J", "\n" }, /* 36 j */ + { ASCII,"k", "K", "\013" }, /* 37 k */ + { ASCII,"l", "L", "\014" }, /* 38 l */ + { ASCII,";", ":", ";" }, /* 39 ; */ + { ASCII,"'", "\"", "'" }, /* 40 ' */ + { ASCII,"`", "~", "`" }, /* 41 ` */ + { SHIFT,"", "", "" }, /* 42 shift */ + { ASCII,"\\", "|", "\034" }, /* 43 \ */ + { ASCII,"z", "Z", "\032" }, /* 44 z */ + { ASCII,"x", "X", "\030" }, /* 45 x */ + { ASCII,"c", "C", "\003" }, /* 46 c */ + { ASCII,"v", "V", "\026" }, /* 47 v */ + { ASCII,"b", "B", "\002" }, /* 48 b */ + { ASCII,"n", "N", "\016" }, /* 49 n */ + { ASCII,"m", "M", "\r" }, /* 50 m */ + { ASCII,",", "<", "<" }, /* 51 , */ + { ASCII,".", ">", ">" }, /* 52 . */ + { ASCII,"/", "?", "\037" }, /* 53 / */ + { SHIFT,"", "", "" }, /* 54 shift */ + { KP, "*", "*", "*" }, /* 55 kp * */ + { ALT, "", "", "" }, /* 56 alt */ + { ASCII," ", " ", "\000" }, /* 57 space */ +#ifdef CAPS_IS_CONTROL + { CTL, "", "", "" }, /* 58 control */ +#else + { CAPS, "", "", "" }, /* 58 caps */ +#endif + { FUNC, "\033[M", "\033[Y", "\033[k" }, /* 59 f1 */ + { FUNC, "\033[N", "\033[Z", "\033[l" }, /* 60 f2 */ + { FUNC, "\033[O", "\033[a", "\033[m" }, /* 61 f3 */ + { FUNC, "\033[P", "\033[b", "\033[n" }, /* 62 f4 */ + { FUNC, "\033[Q", "\033[c", "\033[o" }, /* 63 f5 */ + { FUNC, "\033[R", "\033[d", "\033[p" }, /* 64 f6 */ + { FUNC, "\033[S", "\033[e", "\033[q" }, /* 65 f7 */ + { FUNC, "\033[T", "\033[f", "\033[r" }, /* 66 f8 */ + { FUNC, "\033[U", "\033[g", "\033[s" }, /* 67 f9 */ + { FUNC, "\033[V", "\033[h", "\033[t" }, /* 68 f10 */ + { NUM, "", "", "" }, /* 69 num lock */ + { SCROLL,"", "", "" }, /* 70 scroll lock */ + { KP, "7", "\033[H", "7" }, /* 71 kp 7 */ + { KP, "8", "\033[A", "8" }, /* 72 kp 8 */ + { KP, "9", "\033[I", "9" }, /* 73 kp 9 */ + { KP, "-", "-", "-" }, /* 74 kp - */ + { KP, "4", "\033[D", "4" }, /* 75 kp 4 */ + { KP, "5", "\033[E", "5" }, /* 76 kp 5 */ + { KP, "6", "\033[C", "6" }, /* 77 kp 6 */ + { KP, "+", "+", "+" }, /* 78 kp + */ + { KP, "1", "\033[F", "1" }, /* 79 kp 1 */ + { KP, "2", "\033[B", "2" }, /* 80 kp 2 */ + { KP, "3", "\033[G", "3" }, /* 81 kp 3 */ + { KP, "0", "\033[L", "0" }, /* 82 kp 0 */ + { KP, ".", "\177", "." }, /* 83 kp . */ + { NONE, "", "", "" }, /* 84 0 */ + { NONE, "100", "", "" }, /* 85 0 */ + { NONE, "101", "", "" }, /* 86 0 */ + { FUNC, "\033[W", "\033[i", "\033[u" }, /* 87 f11 */ + { FUNC, "\033[X", "\033[j", "\033[v" }, /* 88 f12 */ + { NONE, "102", "", "" }, /* 89 0 */ + { NONE, "103", "", "" }, /* 90 0 */ + { NONE, "", "", "" }, /* 91 0 */ + { NONE, "", "", "" }, /* 92 0 */ + { NONE, "", "", "" }, /* 93 0 */ + { NONE, "", "", "" }, /* 94 0 */ + { NONE, "", "", "" }, /* 95 0 */ + { NONE, "", "", "" }, /* 96 0 */ + { NONE, "", "", "" }, /* 97 0 */ + { NONE, "", "", "" }, /* 98 0 */ + { NONE, "", "", "" }, /* 99 0 */ + { NONE, "", "", "" }, /* 100 */ + { NONE, "", "", "" }, /* 101 */ + { NONE, "", "", "" }, /* 102 */ + { NONE, "", "", "" }, /* 103 */ + { NONE, "", "", "" }, /* 104 */ + { NONE, "", "", "" }, /* 105 */ + { NONE, "", "", "" }, /* 106 */ + { NONE, "", "", "" }, /* 107 */ + { NONE, "", "", "" }, /* 108 */ + { NONE, "", "", "" }, /* 109 */ + { NONE, "", "", "" }, /* 110 */ + { NONE, "", "", "" }, /* 111 */ + { NONE, "", "", "" }, /* 112 */ + { NONE, "", "", "" }, /* 113 */ + { NONE, "", "", "" }, /* 114 */ + { NONE, "", "", "" }, /* 115 */ + { NONE, "", "", "" }, /* 116 */ + { NONE, "", "", "" }, /* 117 */ + { NONE, "", "", "" }, /* 118 */ + { NONE, "", "", "" }, /* 119 */ + { NONE, "", "", "" }, /* 120 */ + { NONE, "", "", "" }, /* 121 */ + { NONE, "", "", "" }, /* 122 */ + { NONE, "", "", "" }, /* 123 */ + { NONE, "", "", "" }, /* 124 */ + { NONE, "", "", "" }, /* 125 */ + { NONE, "", "", "" }, /* 126 */ + { NONE, "", "", "" }, /* 127 */ +}; + +#else /* NONUS_KBD */ + +typedef struct { + u_short type; + char unshift[CODE_SIZE]; + char shift[CODE_SIZE]; + char ctl[CODE_SIZE]; + char altgr[CODE_SIZE]; +} Scan_def; + +#ifdef FRENCH_KBD + +static Scan_def scan_codes[] = { + { NONE, "", "", "", "" }, /* 0 unused */ + { ASCII, "\033", "\033", "\033", "\033" }, /* 1 ESCape */ + { ASCII, "&", "1", "&", "" }, /* 2 1 */ + { ASCII, "\351", "2", "\211", "~" }, /* 3 2 */ + { ASCII, "\"", "3", "\"", "#" }, /* 4 3 */ + { ASCII, "'", "4", "'", "{" }, /* 5 4 */ + { ASCII, "(", "5", "(", "[" }, /* 6 5 */ + { ASCII, "-", "6", "-", "|" }, /* 7 6 */ + { ASCII, "\350", "7", "\210", "`" }, /* 8 7 */ + { ASCII, "_", "8", "\037", "\\" }, /* 9 8 */ + { ASCII, "\347", "9", "\207", "^" }, /* 10 9 */ + { ASCII, "\340", "0", "\340", "@" }, /* 11 0 */ + { ASCII, ")", "\260", ")", "]" }, /* 12 - */ + { ASCII, "=", "+", "+", "}" }, /* 13 = */ + { ASCII, "\177", "\177", "\010", "\177" }, /* 14 backspace */ + { ASCII, "\t", "\177\t", "\t", "\t" }, /* 15 tab */ + { ASCII, "a", "A", "\001", "a" }, /* 16 q */ + { ASCII, "z", "Z", "\032", "z" }, /* 17 w */ + { ASCII, "e", "E", "\005", "e" }, /* 18 e */ + { ASCII, "r", "R", "\022", "r" }, /* 19 r */ + { ASCII, "t", "T", "\024", "t" }, /* 20 t */ + { ASCII, "y", "Y", "\031", "y" }, /* 21 y */ + { ASCII, "u", "U", "\025", "u" }, /* 22 u */ + { ASCII, "i", "I", "\011", "i" }, /* 23 i */ + { ASCII, "o", "O", "\017", "o" }, /* 24 o */ + { ASCII, "p", "P", "\020", "p" }, /* 25 p */ + { NONE, "", "", "", "" }, /* 26 [ */ + { ASCII, "$", "\243", "$", "$" }, /* 27 ] */ + { ASCII, "\r", "\r", "\n", "\r" }, /* 28 return */ + { CTL, "", "", "", "" }, /* 29 control */ + { ASCII, "q", "Q", "\021", "q" }, /* 30 a */ + { ASCII, "s", "S", "\023", "s" }, /* 31 s */ + { ASCII, "d", "D", "\004", "d" }, /* 32 d */ + { ASCII, "f", "F", "\006", "f" }, /* 33 f */ + { ASCII, "g", "G", "\007", "g" }, /* 34 g */ + { ASCII, "h", "H", "\010", "h" }, /* 35 h */ + { ASCII, "j", "J", "\n", "j" }, /* 36 j */ + { ASCII, "k", "K", "\013", "k" }, /* 37 k */ + { ASCII, "l", "L", "\014", "l" }, /* 38 l */ + { ASCII, "m", "M", "\r", "m" }, /* 39 ; */ + { ASCII, "\371", "%", "\231", "\371" }, /* 40 ' */ + { ASCII, "\262", "", "\262", "\262" }, /* 41 ` */ + { SHIFT, "", "", "", "" }, /* 42 shift */ + { ASCII, "*", "\265", "*", "*" }, /* 43 \ */ + { ASCII, "w", "W", "\027", "w" }, /* 44 z */ + { ASCII, "x", "X", "\030", "x" }, /* 45 x */ + { ASCII, "c", "C", "\003", "c" }, /* 46 c */ + { ASCII, "v", "V", "\026", "v" }, /* 47 v */ + { ASCII, "b", "B", "\002", "b" }, /* 48 b */ + { ASCII, "n", "N", "\016", "n" }, /* 49 n */ + { ASCII, ",", "?", ",", "," }, /* 50 m */ + { ASCII, ";", ".", ";", ";" }, /* 51 , */ + { ASCII, ":", "/", "\037", ":" }, /* 52 . */ + { ASCII, "!", "\266", "!", "!" }, /* 53 / */ + { SHIFT, "", "", "", "" }, /* 54 shift */ + { KP, "*", "*", "*", "*" }, /* 55 kp * */ + { ALT, "", "", "", "" }, /* 56 alt */ + { ASCII, " ", " ", "\000", " " }, /* 57 space */ + { CAPS, "", "", "", "" }, /* 58 caps */ + { FUNC, "\033[M", "\033[Y", "\033[k", "" }, /* 59 f1 */ + { FUNC, "\033[N", "\033[Z", "\033[l", "" }, /* 60 f2 */ + { FUNC, "\033[O", "\033[a", "\033[m", "" }, /* 61 f3 */ + { FUNC, "\033[P", "\033[b", "\033[n", "" }, /* 62 f4 */ + { FUNC, "\033[Q", "\033[c", "\033[o", "" }, /* 63 f5 */ + { FUNC, "\033[R", "\033[d", "\033[p", "" }, /* 64 f6 */ + { FUNC, "\033[S", "\033[e", "\033[q", "" }, /* 65 f7 */ + { FUNC, "\033[T", "\033[f", "\033[r", "" }, /* 66 f8 */ + { FUNC, "\033[U", "\033[g", "\033[s", "" }, /* 67 f9 */ + { FUNC, "\033[V", "\033[h", "\033[t", "" }, /* 68 f10 */ + { NUM, "", "", "", "" }, /* 69 num lock */ + { SCROLL, "", "", "", "" }, /* 70 scroll lock */ + { KP, "7", "\033[H", "7", "" }, /* 71 kp 7 */ + { KP, "8", "\033[A", "8", "" }, /* 72 kp 8 */ + { KP, "9", "\033[I", "9", "" }, /* 73 kp 9 */ + { KP, "-", "-", "-", "" }, /* 74 kp - */ + { KP, "4", "\033[D", "4", "" }, /* 75 kp 4 */ + { KP, "5", "\033[E", "5", "" }, /* 76 kp 5 */ + { KP, "6", "\033[C", "6", "" }, /* 77 kp 6 */ + { KP, "+", "+", "+", "" }, /* 78 kp + */ + { KP, "1", "\033[F", "1", "" }, /* 79 kp 1 */ + { KP, "2", "\033[B", "2", "" }, /* 80 kp 2 */ + { KP, "3", "\033[G", "3", "" }, /* 81 kp 3 */ + { KP, "0", "\033[L", "0", "" }, /* 82 kp 0 */ + { KP, ".", "\177", ".", "" }, /* 83 kp . */ + { NONE, "", "", "", "" }, /* 84 0 */ + { NONE, "100", "", "", "" }, /* 85 0 */ + { ASCII, "<", ">", "<", "<" }, /* 86 < > */ + { FUNC, "\033[W", "\033[i", "\033[u","" }, /* 87 f11 */ + { FUNC, "\033[X", "\033[j", "\033[v","" }, /* 88 f12 */ + { NONE, "102", "", "", "" }, /* 89 0 */ + { NONE, "103", "", "", "" }, /* 90 0 */ + { NONE, "", "", "", "" }, /* 91 0 */ + { NONE, "", "", "", "" }, /* 92 0 */ + { NONE, "", "", "", "" }, /* 93 0 */ + { NONE, "", "", "", "" }, /* 94 0 */ + { NONE, "", "", "", "" }, /* 95 0 */ + { NONE, "", "", "", "" }, /* 96 0 */ + { NONE, "", "", "", "" }, /* 97 0 */ + { NONE, "", "", "", "" }, /* 98 0 */ + { NONE, "", "", "", "" }, /* 99 0 */ + { NONE, "", "", "", "" }, /* 100 */ + { NONE, "", "", "", "" }, /* 101 */ + { NONE, "", "", "", "" }, /* 102 */ + { NONE, "", "", "", "" }, /* 103 */ + { NONE, "", "", "", "" }, /* 104 */ + { NONE, "", "", "", "" }, /* 105 */ + { NONE, "", "", "", "" }, /* 106 */ + { NONE, "", "", "", "" }, /* 107 */ + { NONE, "", "", "", "" }, /* 108 */ + { NONE, "", "", "", "" }, /* 109 */ + { NONE, "", "", "", "" }, /* 110 */ + { NONE, "", "", "", "" }, /* 111 */ + { NONE, "", "", "", "" }, /* 112 */ + { NONE, "", "", "", "" }, /* 113 */ + { NONE, "", "", "", "" }, /* 114 */ + { NONE, "", "", "", "" }, /* 115 */ + { NONE, "", "", "", "" }, /* 116 */ + { NONE, "", "", "", "" }, /* 117 */ + { NONE, "", "", "", "" }, /* 118 */ + { NONE, "", "", "", "" }, /* 119 */ + { NONE, "", "", "", "" }, /* 120 */ + { NONE, "", "", "", "" }, /* 121 */ + { NONE, "", "", "", "" }, /* 122 */ + { NONE, "", "", "", "" }, /* 123 */ + { NONE, "", "", "", "" }, /* 124 */ + { NONE, "", "", "", "" }, /* 125 */ + { NONE, "", "", "", "" }, /* 126 */ + { NONE, "", "", "", "" } /* 127 */ +}; + +#endif /* FRENCH_KBD */ + +#ifdef GERMAN_KBD + +static Scan_def scan_codes[] = { + { NONE, "", "", "", "" }, /* 0 unused */ + { ASCII, "\033", "\033", "\033", "\033"}, /* 1 ESCape */ + { ASCII, "1", "!", "!", "" }, /* 2 1 */ + { ASCII, "2", "\"", "\"", "\xb2" }, /* 3 2 */ + { ASCII, "3", "\xa7", "\xa7", "\xb3" }, /* 4 3 */ + { ASCII, "4", "$", "$", "" }, /* 5 4 */ + { ASCII, "5", "%", "%", "" }, /* 6 5 */ + { ASCII, "6", "&", "&", "" }, /* 7 6 */ + { ASCII, "7", "/", "/", "{" }, /* 8 7 */ + { ASCII, "8", "(", "(", "[" }, /* 9 8 */ + { ASCII, "9", ")", ")", "]" }, /* 10 9 */ + { ASCII, "0", "=", "=", "}" }, /* 11 0 */ + { ASCII, "\xdf","?", "?", "\\" }, /* 12 - */ + { ASCII, "'", "`", "`", "" }, /* 13 = */ + { ASCII, "\177", "\177", "\010", "\177" }, /* 14 backspace */ + { ASCII, "\t", "\177\t", "\t", "\t" }, /* 15 tab */ + { ASCII, "q", "Q", "\021", "@" }, /* 16 q */ + { ASCII, "w", "W", "\027", "w" }, /* 17 w */ + { ASCII, "e", "E", "\005", "e" }, /* 18 e */ + { ASCII, "r", "R", "\022", "r" }, /* 19 r */ + { ASCII, "t", "T", "\024", "t" }, /* 20 t */ + { ASCII, "z", "Z", "\032", "z" }, /* 21 y */ + { ASCII, "u", "U", "\025", "u" }, /* 22 u */ + { ASCII, "i", "I", "\011", "i" }, /* 23 i */ + { ASCII, "o", "O", "\017", "o" }, /* 24 o */ + { ASCII, "p", "P", "\020", "p" }, /* 25 p */ + { ASCII, "\xfc", "\xdc", "\xfc", "\xdc" }, /* 26 [ */ + { ASCII, "+", "*", "+", "~" }, /* 27 ] */ + { ASCII, "\r", "\r", "\n", "\r" }, /* 28 return */ + { CTL, "", "", "", "" }, /* 29 control */ + { ASCII, "a", "A", "\001", "a" }, /* 30 a */ + { ASCII, "s", "S", "\023", "s" }, /* 31 s */ + { ASCII, "d", "D", "\004", "d" }, /* 32 d */ + { ASCII, "f", "F", "\006", "f" }, /* 33 f */ + { ASCII, "g", "G", "\007", "g" }, /* 34 g */ + { ASCII, "h", "H", "\010", "h" }, /* 35 h */ + { ASCII, "j", "J", "\n", "j" }, /* 36 j */ + { ASCII, "k", "K", "\013", "k" }, /* 37 k */ + { ASCII, "l", "L", "\014", "l" }, /* 38 l */ + { ASCII, "\xf6", "\xd6", "\xf6", "\xd6" }, /* 39 ; */ + { ASCII, "\xe4", "\xc4", "\xe4", "\xc4" }, /* 40 ' */ + { ASCII, "\136", "\370", "\136", "\370" }, /* 41 ` */ + { SHIFT, "", "", "", "" }, /* 42 shift */ + { ASCII, "#", "'", "#", "'" }, /* 43 \ */ + { ASCII, "y", "Y", "\x19", "y" }, /* 44 z */ + { ASCII, "x", "X", "\030", "x" }, /* 45 x */ + { ASCII, "c", "C", "\003", "c" }, /* 46 c */ + { ASCII, "v", "V", "\026", "v" }, /* 47 v */ + { ASCII, "b", "B", "\002", "b" }, /* 48 b */ + { ASCII, "n", "N", "\016", "n" }, /* 49 n */ + { ASCII, "m", "M", "\r", "m" }, /* 50 m */ + { ASCII, ",", ";", ",", ";" }, /* 51 , */ + { ASCII, ".", ":", ".", ":" }, /* 52 . */ + { ASCII, "-", "_", "-", "_" }, /* 53 / */ + { SHIFT, "", "", "", "" }, /* 54 shift */ + { KP, "*", "*", "*", "*" }, /* 55 kp * */ + { ALT, "", "", "", "" }, /* 56 alt */ + { ASCII, " ", " ", "\000", " " }, /* 57 space */ + { CAPS, "", "", "", "" }, /* 58 caps */ + { FUNC, "\033[M", "\033[Y", "\033[k", "" }, /* 59 f1 */ + { FUNC, "\033[N", "\033[Z", "\033[l", "" }, /* 60 f2 */ + { FUNC, "\033[O", "\033[a", "\033[m", "" }, /* 61 f3 */ + { FUNC, "\033[P", "\033[b", "\033[n", "" }, /* 62 f4 */ + { FUNC, "\033[Q", "\033[c", "\033[o", "" }, /* 63 f5 */ + { FUNC, "\033[R", "\033[d", "\033[p", "" }, /* 64 f6 */ + { FUNC, "\033[S", "\033[e", "\033[q", "" }, /* 65 f7 */ + { FUNC, "\033[T", "\033[f", "\033[r", "" }, /* 66 f8 */ + { FUNC, "\033[U", "\033[g", "\033[s", "" }, /* 67 f9 */ + { FUNC, "\033[V", "\033[h", "\033[t", "" }, /* 68 f10 */ + { NUM, "", "", "", "" }, /* 69 num lock */ + { SCROLL, "", "", "", "" }, /* 70 scroll lock */ + { KP, "7", "\033[H", "7", "" }, /* 71 kp 7 */ + { KP, "8", "\033[A", "8", "" }, /* 72 kp 8 */ + { KP, "9", "\033[I", "9", "" }, /* 73 kp 9 */ + { KP, "-", "-", "-", "" }, /* 74 kp - */ + { KP, "4", "\033[D", "4", "" }, /* 75 kp 4 */ + { KP, "5", "\033[E", "5", "" }, /* 76 kp 5 */ + { KP, "6", "\033[C", "6", "" }, /* 77 kp 6 */ + { KP, "+", "+", "+", "" }, /* 78 kp + */ + { KP, "1", "\033[F", "1", "" }, /* 79 kp 1 */ + { KP, "2", "\033[B", "2", "" }, /* 80 kp 2 */ + { KP, "3", "\033[G", "3", "" }, /* 81 kp 3 */ + { KP, "0", "\033[L", "0", "" }, /* 82 kp 0 */ + { KP, ",", "\177", ",", "" }, /* 83 kp . */ + { NONE, "", "", "", "" }, /* 84 0 */ + { NONE, "100", "", "", "" }, /* 85 0 */ + { ASCII, "<", ">", "<", "|" }, /* 86 < > */ + { FUNC, "\033[W", "\033[i", "\033[u","" }, /* 87 f11 */ + { FUNC, "\033[X", "\033[j", "\033[v","" }, /* 88 f12 */ + { NONE, "102", "", "", "" }, /* 89 0 */ + { NONE, "103", "", "", "" }, /* 90 0 */ + { NONE, "", "", "", "" }, /* 91 0 */ + { NONE, "", "", "", "" }, /* 92 0 */ + { NONE, "", "", "", "" }, /* 93 0 */ + { NONE, "", "", "", "" }, /* 94 0 */ + { NONE, "", "", "", "" }, /* 95 0 */ + { NONE, "", "", "", "" }, /* 96 0 */ + { NONE, "", "", "", "" }, /* 97 0 */ + { NONE, "", "", "", "" }, /* 98 0 */ + { NONE, "", "", "", "" }, /* 99 0 */ + { NONE, "", "", "", "" }, /* 100 */ + { NONE, "", "", "", "" }, /* 101 */ + { NONE, "", "", "", "" }, /* 102 */ + { NONE, "", "", "", "" }, /* 103 */ + { NONE, "", "", "", "" }, /* 104 */ + { NONE, "", "", "", "" }, /* 105 */ + { NONE, "", "", "", "" }, /* 106 */ + { NONE, "", "", "", "" }, /* 107 */ + { NONE, "", "", "", "" }, /* 108 */ + { NONE, "", "", "", "" }, /* 109 */ + { NONE, "", "", "", "" }, /* 110 */ + { NONE, "", "", "", "" }, /* 111 */ + { NONE, "", "", "", "" }, /* 112 */ + { NONE, "", "", "", "" }, /* 113 */ + { NONE, "", "", "", "" }, /* 114 */ + { NONE, "", "", "", "" }, /* 115 */ + { NONE, "", "", "", "" }, /* 116 */ + { NONE, "", "", "", "" }, /* 117 */ + { NONE, "", "", "", "" }, /* 118 */ + { NONE, "", "", "", "" }, /* 119 */ + { NONE, "", "", "", "" }, /* 120 */ + { NONE, "", "", "", "" }, /* 121 */ + { NONE, "", "", "", "" }, /* 122 */ + { NONE, "", "", "", "" }, /* 123 */ + { NONE, "", "", "", "" }, /* 124 */ + { NONE, "", "", "", "" }, /* 125 */ + { NONE, "", "", "", "" }, /* 126 */ + { NONE, "", "", "", "" } /* 127 */ +}; + +#endif /* GERMAN_KBD */ + +#ifdef NORWEGIAN_KBD +static Scan_def scan_codes[] = { + { NONE, "", "", "", "" }, /* 0 unused */ + { ASCII, "\033", "\033", "\033", "\033" }, /* 1 ESCape */ + { ASCII, "1", "!", "", "\241" }, /* 2 1 */ + { ASCII, "2", "\"", "\000", "@" }, /* 3 2 */ + { ASCII, "3", "#", "", "\243" }, /* 4 3 */ + { ASCII, "4", "$", "", "$" }, /* 5 4 */ + { ASCII, "5", "%", "\034", "\\" }, /* 6 5 */ + { ASCII, "6", "&", "\034", "|" }, /* 7 6 */ + { ASCII, "7", "/", "\033", "{" }, /* 8 7 */ + { ASCII, "8", "(", "\033", "[" }, /* 9 8 */ + { ASCII, "9", ")", "\035", "]" }, /* 10 9 */ + { ASCII, "0", "=", "\035", "}" }, /* 11 0 */ + { ASCII, "+", "?", "\037", "\277" }, /* 12 - */ + { ASCII, "\\", "`", "\034", "'" }, /* 13 = */ + { ASCII, "\177", "\177", "\010", "\177" }, /* 14 backspace */ + { ASCII, "\t", "\177\t", "\t", "\t" }, /* 15 tab */ + { ASCII, "q", "Q", "\021", "q" }, /* 16 q */ + { ASCII, "w", "W", "\027", "w" }, /* 17 w */ + { ASCII, "e", "E", "\005", "\353" }, /* 18 e */ + { ASCII, "r", "R", "\022", "r" }, /* 19 r */ + { ASCII, "t", "T", "\024", "t" }, /* 20 t */ + { ASCII, "y", "Y", "\031", "y" }, /* 21 y */ + { ASCII, "u", "U", "\025", "\374" }, /* 22 u */ + { ASCII, "i", "I", "\011", "i" }, /* 23 i */ + { ASCII, "o", "O", "\017", "\366" }, /* 24 o */ + { ASCII, "p", "P", "\020", "p" }, /* 25 p */ + { ASCII, "\345", "\305", "\334", "\374" }, /* 26 [ */ + { ASCII, "~", "^", "\036", "" }, /* 27 ] */ + { ASCII, "\r", "\r", "\n", "\r" }, /* 28 return */ + { CTL, "", "", "", "" }, /* 29 control */ + { ASCII, "a", "A", "\001", "\344" }, /* 30 a */ + { ASCII, "s", "S", "\023", "\337" }, /* 31 s */ + { ASCII, "d", "D", "\004", "d" }, /* 32 d */ + { ASCII, "f", "F", "\006", "f" }, /* 33 f */ + { ASCII, "g", "G", "\007", "g" }, /* 34 g */ + { ASCII, "h", "H", "\010", "h" }, /* 35 h */ + { ASCII, "j", "J", "\n", "j" }, /* 36 j */ + { ASCII, "k", "K", "\013", "k" }, /* 37 k */ + { ASCII, "l", "L", "\014", "l" }, /* 38 l */ + { ASCII, "\370", "\330", "\326", "\366" }, /* 39 ; */ + { ASCII, "\346", "\306", "\304", "\344" }, /* 40 ' */ + { ASCII, "|", "@", "\034", "\247" }, /* 41 ` */ + { SHIFT, "", "", "", "" }, /* 42 shift */ + { ASCII, "'", "*", "'", "'" }, /* 43 \ */ + { ASCII, "z", "Z", "\032", "z" }, /* 44 z */ + { ASCII, "x", "X", "\030", "x" }, /* 45 x */ + { ASCII, "c", "C", "\003", "c" }, /* 46 c */ + { ASCII, "v", "V", "\026", "v" }, /* 47 v */ + { ASCII, "b", "B", "\002", "b" }, /* 48 b */ + { ASCII, "n", "N", "\016", "n" }, /* 49 n */ + { ASCII, "m", "M", "\015", "m" }, /* 50 m */ + { ASCII, ",", ";", ",", "," }, /* 51 , */ + { ASCII, ".", ":", ".", "." }, /* 52 . */ + { ASCII, "-", "_", "\037", "-" }, /* 53 / */ + { SHIFT, "", "", "", "" }, /* 54 shift */ + { KP, "*", "*", "*", "*" }, /* 55 kp * */ + { ALT, "", "", "", "" }, /* 56 alt */ + { ASCII, " ", " ", "\000", " " }, /* 57 space */ + { CAPS, "", "", "", "" }, /* 58 caps */ + { FUNC, "\033[M", "\033[Y", "\033[k", "" }, /* 59 f1 */ + { FUNC, "\033[N", "\033[Z", "\033[l", "" }, /* 60 f2 */ + { FUNC, "\033[O", "\033[a", "\033[m", "" }, /* 61 f3 */ + { FUNC, "\033[P", "\033[b", "\033[n", "" }, /* 62 f4 */ + { FUNC, "\033[Q", "\033[c", "\033[o", "" }, /* 63 f5 */ + { FUNC, "\033[R", "\033[d", "\033[p", "" }, /* 64 f6 */ + { FUNC, "\033[S", "\033[e", "\033[q", "" }, /* 65 f7 */ + { FUNC, "\033[T", "\033[f", "\033[r", "" }, /* 66 f8 */ + { FUNC, "\033[U", "\033[g", "\033[s", "" }, /* 67 f9 */ + { FUNC, "\033[V", "\033[h", "\033[t", "" }, /* 68 f10 */ + { NUM, "", "", "", "" }, /* 69 num lock */ + { SCROLL, "", "", "", "" }, /* 70 scroll lock */ + { KP, "7", "\033[H", "7", "" }, /* 71 kp 7 */ + { KP, "8", "\033[A", "8", "" }, /* 72 kp 8 */ + { KP, "9", "\033[I", "9", "" }, /* 73 kp 9 */ + { KP, "-", "-", "-", "" }, /* 74 kp - */ + { KP, "4", "\033[D", "4", "" }, /* 75 kp 4 */ + { KP, "5", "\033[E", "5", "" }, /* 76 kp 5 */ + { KP, "6", "\033[C", "6", "" }, /* 77 kp 6 */ + { KP, "+", "+", "+", "" }, /* 78 kp + */ + { KP, "1", "\033[F", "1", "" }, /* 79 kp 1 */ + { KP, "2", "\033[B", "2", "" }, /* 80 kp 2 */ + { KP, "3", "\033[G", "3", "" }, /* 81 kp 3 */ + { KP, "0", "\033[L", "0", "" }, /* 82 kp 0 */ + { KP, ".", "\177", ".", "" }, /* 83 kp . */ + { NONE, "", "", "", "" }, /* 84 0 */ + { NONE, "100", "", "", "" }, /* 85 0 */ + { ASCII, "<", ">", "\273", "\253" }, /* 86 < > */ + { FUNC, "\033[W", "\033[i", "\033[u","" }, /* 87 f11 */ + { FUNC, "\033[X", "\033[j", "\033[v","" }, /* 88 f12 */ + { NONE, "102", "", "", "" }, /* 89 0 */ + { NONE, "103", "", "", "" }, /* 90 0 */ + { NONE, "", "", "", "" }, /* 91 0 */ + { NONE, "", "", "", "" }, /* 92 0 */ + { NONE, "", "", "", "" }, /* 93 0 */ + { NONE, "", "", "", "" }, /* 94 0 */ + { NONE, "", "", "", "" }, /* 95 0 */ + { NONE, "", "", "", "" }, /* 96 0 */ + { NONE, "", "", "", "" }, /* 97 0 */ + { NONE, "", "", "", "" }, /* 98 0 */ + { NONE, "", "", "", "" }, /* 99 0 */ + { NONE, "", "", "", "" }, /* 100 */ + { NONE, "", "", "", "" }, /* 101 */ + { NONE, "", "", "", "" }, /* 102 */ + { NONE, "", "", "", "" }, /* 103 */ + { NONE, "", "", "", "" }, /* 104 */ + { NONE, "", "", "", "" }, /* 105 */ + { NONE, "", "", "", "" }, /* 106 */ + { NONE, "", "", "", "" }, /* 107 */ + { NONE, "", "", "", "" }, /* 108 */ + { NONE, "", "", "", "" }, /* 109 */ + { NONE, "", "", "", "" }, /* 110 */ + { NONE, "", "", "", "" }, /* 111 */ + { NONE, "", "", "", "" }, /* 112 */ + { NONE, "", "", "", "" }, /* 113 */ + { NONE, "", "", "", "" }, /* 114 */ + { NONE, "", "", "", "" }, /* 115 */ + { NONE, "", "", "", "" }, /* 116 */ + { NONE, "", "", "", "" }, /* 117 */ + { NONE, "", "", "", "" }, /* 118 */ + { NONE, "", "", "", "" }, /* 119 */ + { NONE, "", "", "", "" }, /* 120 */ + { NONE, "", "", "", "" }, /* 121 */ + { NONE, "", "", "", "" }, /* 122 */ + { NONE, "", "", "", "" }, /* 123 */ + { NONE, "", "", "", "" }, /* 124 */ + { NONE, "", "", "", "" }, /* 125 */ + { NONE, "", "", "", "" }, /* 126 */ + { NONE, "", "", "", "" } /* 127 */ +}; +#endif + +#ifdef FINNISH_KBD +static Scan_def scan_codes[] = { + { NONE, "", "", "", "" }, /* 0 unused */ + { ASCII, "\033", "\033", "\033", "\033" }, /* 1 ESCape */ + { ASCII, "1", "!", "", "\241" }, /* 2 1 */ + { ASCII, "2", "\"", "\000", "@" }, /* 3 2 */ + { ASCII, "3", "#", "", "\243" }, /* 4 3 */ + { ASCII, "4", "$", "", "$" }, /* 5 4 */ + { ASCII, "5", "%", "\034", "%" }, /* 6 5 */ + { ASCII, "6", "&", "\034", "&" }, /* 7 6 */ + { ASCII, "7", "/", "\033", "{" }, /* 8 7 */ + { ASCII, "8", "(", "\033", "[" }, /* 9 8 */ + { ASCII, "9", ")", "\035", "]" }, /* 10 9 */ + { ASCII, "0", "=", "\035", "}" }, /* 11 0 */ + { ASCII, "+", "?", "\037", "\\" }, /* 12 - */ + { ASCII, "'", "`", "\034", "'" }, /* 13 = */ + { ASCII, "\177", "\177", "\010", "\177" }, /* 14 backspace */ + { ASCII, "\t", "\177\t", "\t", "\t" }, /* 15 tab */ + { ASCII, "q", "Q", "\021", "q" }, /* 16 q */ + { ASCII, "w", "W", "\027", "w" }, /* 17 w */ + { ASCII, "e", "E", "\005", "\353" }, /* 18 e */ + { ASCII, "r", "R", "\022", "r" }, /* 19 r */ + { ASCII, "t", "T", "\024", "t" }, /* 20 t */ + { ASCII, "y", "Y", "\031", "y" }, /* 21 y */ + { ASCII, "u", "U", "\025", "\374" }, /* 22 u */ + { ASCII, "i", "I", "\011", "i" }, /* 23 i */ + { ASCII, "o", "O", "\017", "\366" }, /* 24 o */ + { ASCII, "p", "P", "\020", "p" }, /* 25 p */ + { ASCII, "\345", "\305", "\035", "}" }, /* 26 [ */ + { ASCII, "~", "^", "\036", "~" }, /* 27 ] */ + { ASCII, "\r", "\r", "\n", "\r" }, /* 28 return */ + { CTL, "", "", "", "" }, /* 29 control */ + { ASCII, "a", "A", "\001", "\344" }, /* 30 a */ + { ASCII, "s", "S", "\023", "\337" }, /* 31 s */ + { ASCII, "d", "D", "\004", "d" }, /* 32 d */ + { ASCII, "f", "F", "\006", "f" }, /* 33 f */ + { ASCII, "g", "G", "\007", "g" }, /* 34 g */ + { ASCII, "h", "H", "\010", "h" }, /* 35 h */ + { ASCII, "j", "J", "\n", "j" }, /* 36 j */ + { ASCII, "k", "K", "\013", "k" }, /* 37 k */ + { ASCII, "l", "L", "\014", "l" }, /* 38 l */ + { ASCII, "\366", "\326", "\034", "|" }, /* 39 ; */ + { ASCII, "\344", "\304", "\033", "{" }, /* 40 ' */ + { ASCII, "\247", "\275", "\000", "@" }, /* 41 ` */ + { SHIFT, "", "", "", "" }, /* 42 shift */ + { ASCII, "'", "*", "'", "'" }, /* 43 \ */ + { ASCII, "z", "Z", "\032", "z" }, /* 44 z */ + { ASCII, "x", "X", "\030", "x" }, /* 45 x */ + { ASCII, "c", "C", "\003", "c" }, /* 46 c */ + { ASCII, "v", "V", "\026", "v" }, /* 47 v */ + { ASCII, "b", "B", "\002", "b" }, /* 48 b */ + { ASCII, "n", "N", "\016", "n" }, /* 49 n */ + { ASCII, "m", "M", "\015", "m" }, /* 50 m */ + { ASCII, ",", ";", ",", "," }, /* 51 , */ + { ASCII, ".", ":", ".", "." }, /* 52 . */ + { ASCII, "-", "_", "\037", "-" }, /* 53 / */ + { SHIFT, "", "", "", "" }, /* 54 shift */ + { KP, "*", "*", "*", "*" }, /* 55 kp * */ + { ALT, "", "", "", "" }, /* 56 alt */ + { ASCII, " ", " ", "\000", " " }, /* 57 space */ + { CAPS, "", "", "", "" }, /* 58 caps */ + { FUNC, "\033[M", "\033[Y", "\033[k", "" }, /* 59 f1 */ + { FUNC, "\033[N", "\033[Z", "\033[l", "" }, /* 60 f2 */ + { FUNC, "\033[O", "\033[a", "\033[m", "" }, /* 61 f3 */ + { FUNC, "\033[P", "\033[b", "\033[n", "" }, /* 62 f4 */ + { FUNC, "\033[Q", "\033[c", "\033[o", "" }, /* 63 f5 */ + { FUNC, "\033[R", "\033[d", "\033[p", "" }, /* 64 f6 */ + { FUNC, "\033[S", "\033[e", "\033[q", "" }, /* 65 f7 */ + { FUNC, "\033[T", "\033[f", "\033[r", "" }, /* 66 f8 */ + { FUNC, "\033[U", "\033[g", "\033[s", "" }, /* 67 f9 */ + { FUNC, "\033[V", "\033[h", "\033[t", "" }, /* 68 f10 */ + { NUM, "", "", "", "" }, /* 69 num lock */ + { SCROLL, "", "", "", "" }, /* 70 scroll lock */ + { KP, "7", "\033[H", "7", "" }, /* 71 kp 7 */ + { KP, "8", "\033[A", "8", "" }, /* 72 kp 8 */ + { KP, "9", "\033[I", "9", "" }, /* 73 kp 9 */ + { KP, "-", "-", "-", "" }, /* 74 kp - */ + { KP, "4", "\033[D", "4", "" }, /* 75 kp 4 */ + { KP, "5", "\033[E", "5", "" }, /* 76 kp 5 */ + { KP, "6", "\033[C", "6", "" }, /* 77 kp 6 */ + { KP, "+", "+", "+", "" }, /* 78 kp + */ + { KP, "1", "\033[F", "1", "" }, /* 79 kp 1 */ + { KP, "2", "\033[B", "2", "" }, /* 80 kp 2 */ + { KP, "3", "\033[G", "3", "" }, /* 81 kp 3 */ + { KP, "0", "\033[L", "0", "" }, /* 82 kp 0 */ + { KP, ".", "\177", ".", "" }, /* 83 kp . */ + { NONE, "", "", "", "" }, /* 84 0 */ + { NONE, "100", "", "", "" }, /* 85 0 */ + { ASCII, "<", ">", "<", "|" }, /* 86 < > */ + { FUNC, "\033[W", "\033[i", "\033[u","" }, /* 87 f11 */ + { FUNC, "\033[X", "\033[j", "\033[v","" }, /* 88 f12 */ + { NONE, "102", "", "", "" }, /* 89 0 */ + { NONE, "103", "", "", "" }, /* 90 0 */ + { NONE, "", "", "", "" }, /* 91 0 */ + { NONE, "", "", "", "" }, /* 92 0 */ + { NONE, "", "", "", "" }, /* 93 0 */ + { NONE, "", "", "", "" }, /* 94 0 */ + { NONE, "", "", "", "" }, /* 95 0 */ + { NONE, "", "", "", "" }, /* 96 0 */ + { NONE, "", "", "", "" }, /* 97 0 */ + { NONE, "", "", "", "" }, /* 98 0 */ + { NONE, "", "", "", "" }, /* 99 0 */ + { NONE, "", "", "", "" }, /* 100 */ + { NONE, "", "", "", "" }, /* 101 */ + { NONE, "", "", "", "" }, /* 102 */ + { NONE, "", "", "", "" }, /* 103 */ + { NONE, "", "", "", "" }, /* 104 */ + { NONE, "", "", "", "" }, /* 105 */ + { NONE, "", "", "", "" }, /* 106 */ + { NONE, "", "", "", "" }, /* 107 */ + { NONE, "", "", "", "" }, /* 108 */ + { NONE, "", "", "", "" }, /* 109 */ + { NONE, "", "", "", "" }, /* 110 */ + { NONE, "", "", "", "" }, /* 111 */ + { NONE, "", "", "", "" }, /* 112 */ + { NONE, "", "", "", "" }, /* 113 */ + { NONE, "", "", "", "" }, /* 114 */ + { NONE, "", "", "", "" }, /* 115 */ + { NONE, "", "", "", "" }, /* 116 */ + { NONE, "", "", "", "" }, /* 117 */ + { NONE, "", "", "", "" }, /* 118 */ + { NONE, "", "", "", "" }, /* 119 */ + { NONE, "", "", "", "" }, /* 120 */ + { NONE, "", "", "", "" }, /* 121 */ + { NONE, "", "", "", "" }, /* 122 */ + { NONE, "", "", "", "" }, /* 123 */ + { NONE, "", "", "", "" }, /* 124 */ + { NONE, "", "", "", "" }, /* 125 */ + { NONE, "", "", "", "" }, /* 126 */ + { NONE, "", "", "", "" }, /* 127 */ +}; +#endif + +/* + * XXXX Add tables for other keyboards here + */ + +#endif + +#if (NPCCONSKBD == 0) +char * +sget() +{ + u_char dt, *capchar; + +top: + KBD_DELAY; + dt = inb(IO_KBD + KBDATAP); + + switch (dt) { + case KBR_ACK: + ack = 1; + goto loop; + case KBR_RESEND: + nak = 1; + goto loop; + } + + capchar = strans(dt); + if (capchar) + return (capchar); + +loop: + if ((inb(IO_KBD + KBSTATP) & KBS_DIB) == 0) + return (0); + goto top; +} +#endif + +/* + * Get characters from the keyboard. If none are present, return NULL. + */ +char * +strans(dt) + u_char dt; +{ + static u_char extended = 0, shift_state = 0; + static u_char capchar[2]; + +#ifdef XSERVER + if (pc_xmode > 0) { +#if defined(DDB) && defined(XSERVER_DDB) + /* F12 enters the debugger while in X mode */ + if ((dt == 88) && !pccons_is_console) + Debugger(); +#endif + capchar[0] = dt; + capchar[1] = 0; + /* + * Check for locking keys. + * + * XXX Setting the LEDs this way is a bit bogus. What if the + * keyboard has been remapped in X? + */ + switch (scan_codes[dt & 0x7f].type) { + case NUM: + if (dt & 0x80) { + shift_state &= ~NUM; + break; + } + if (shift_state & NUM) + break; + shift_state |= NUM; + lock_state ^= NUM; + update_leds(); + break; + case CAPS: + if (dt & 0x80) { + shift_state &= ~CAPS; + break; + } + if (shift_state & CAPS) + break; + shift_state |= CAPS; + lock_state ^= CAPS; + update_leds(); + break; + case SCROLL: + if (dt & 0x80) { + shift_state &= ~SCROLL; + break; + } + if (shift_state & SCROLL) + break; + shift_state |= SCROLL; + lock_state ^= SCROLL; + update_leds(); + break; + } + return (capchar); + } +#endif /* XSERVER */ + + switch (dt) { + case KBR_EXTENDED0: + extended = 1; + return (0); + } + +#ifdef DDB + /* + * Check for cntl-alt-esc. + */ + if ((dt == 1) && ((shift_state & (CTL | ALT)) == (CTL | ALT)) + && pccons_is_console) { + Debugger(); + dt |= 0x80; /* discard esc (ddb discarded ctl-alt) */ + } +#endif + + /* + * Check for make/break. + */ + if (dt & 0x80) { + /* + * break + */ + dt &= 0x7f; + switch (scan_codes[dt].type) { + case NUM: + shift_state &= ~NUM; + break; + case CAPS: + shift_state &= ~CAPS; + break; + case SCROLL: + shift_state &= ~SCROLL; + break; + case SHIFT: + shift_state &= ~SHIFT; + break; + case ALT: +#ifdef NONUS_KBD + if (extended) + shift_state &= ~ALTGR; + else +#endif + shift_state &= ~ALT; + break; + case CTL: + shift_state &= ~CTL; + break; + } + } else { + /* + * make + */ +#ifdef NUMERIC_SLASH_FIX + /* fix numeric / on non US keyboard */ + if (extended && dt == 53) { + capchar[0] = '/'; + extended = 0; + return (capchar); + } +#endif + switch (scan_codes[dt].type) { + /* + * locking keys + */ + case NUM: + if (shift_state & NUM) + break; + shift_state |= NUM; + lock_state ^= NUM; + update_leds(); + break; + case CAPS: + if (shift_state & CAPS) + break; + shift_state |= CAPS; + lock_state ^= CAPS; + update_leds(); + break; + case SCROLL: + if (shift_state & SCROLL) + break; + shift_state |= SCROLL; + if ((lock_state & SCROLL) == 0) + capchar[0] = 'S' - '@'; + else + capchar[0] = 'Q' - '@'; + extended = 0; + return (capchar); + /* + * non-locking keys + */ + case SHIFT: + shift_state |= SHIFT; + break; + case ALT: +#ifdef NONUS_KBD + if (extended) + shift_state |= ALTGR; + else +#endif + shift_state |= ALT; + break; + case CTL: + shift_state |= CTL; + break; + case ASCII: +#ifdef NONUS_KBD + if (shift_state & ALTGR) { + capchar[0] = scan_codes[dt].altgr[0]; + if (shift_state & CTL) + capchar[0] &= 0x1f; + } else +#endif + /* control has highest priority */ + if (shift_state & CTL) + capchar[0] = scan_codes[dt].ctl[0]; + else if (shift_state & SHIFT) + capchar[0] = scan_codes[dt].shift[0]; + else + capchar[0] = scan_codes[dt].unshift[0]; + if ((lock_state & CAPS) && capchar[0] >= 'a' && + capchar[0] <= 'z') { + capchar[0] -= ('a' - 'A'); + } + capchar[0] |= (shift_state & ALT); + extended = 0; + return (capchar); + case NONE: + break; + case FUNC: { + char *more_chars; + if (shift_state & SHIFT) + more_chars = scan_codes[dt].shift; + else if (shift_state & CTL) + more_chars = scan_codes[dt].ctl; + else + more_chars = scan_codes[dt].unshift; + extended = 0; + return (more_chars); + } + case KP: { + char *more_chars; + if (shift_state & (SHIFT | CTL) || + (lock_state & NUM) == 0 || extended) + more_chars = scan_codes[dt].shift; + else + more_chars = scan_codes[dt].unshift; + extended = 0; + return (more_chars); + } + } + } + + extended = 0; + return (0); +} + +paddr_t +pcmmap(dev, offset, nprot) + dev_t dev; + off_t offset; + int nprot; +{ + + if (offset > 0x20000) + return (-1); + return (x86_64_btop(0xa0000 + offset)); +} + +#ifdef XSERVER +void +pc_xmode_on() +{ +#ifdef COMPAT_10 + struct trapframe *fp; +#endif + + if (pc_xmode) + return; + pc_xmode = 1; + +#ifdef XFREE86_BUG_COMPAT + /* If still unchanged, get current shape. */ + if (cursor_shape == 0xffff) + get_cursor_shape(); +#endif + +#ifdef COMPAT_10 + /* This is done by i386_iopl(3) now. */ + fp = curproc->p_md.md_regs; + fp->tf_eflags |= PSL_IOPL; +#endif +} + +void +pc_xmode_off() +{ + struct trapframe *fp; + + if (pc_xmode == 0) + return; + pc_xmode = 0; + +#ifdef XFREE86_BUG_COMPAT + /* XXX It would be hard to justify why the X server doesn't do this. */ + set_cursor_shape(); +#endif + async_update(); + + fp = curproc->p_md.md_regs; + fp->tf_eflags &= ~PSL_IOPL; +} +#endif /* XSERVER */ diff --git a/sys/arch/x86_64/pci/pchb.c b/sys/arch/x86_64/pci/pchb.c new file mode 100644 index 000000000000..4be238c33ab9 --- /dev/null +++ b/sys/arch/x86_64/pci/pchb.c @@ -0,0 +1,273 @@ +/* $NetBSD: pchb.c,v 1.1 2001/06/19 00:20:45 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include + +#include "rnd.h" + +#define PCISET_BRIDGETYPE_MASK 0x3 +#define PCISET_TYPE_COMPAT 0x1 +#define PCISET_TYPE_AUX 0x2 + +#define PCISET_BUSCONFIG_REG 0x48 +#define PCISET_BRIDGE_NUMBER(reg) (((reg) >> 8) & 0xff) +#define PCISET_PCI_BUS_NUMBER(reg) (((reg) >> 16) & 0xff) + +/* XXX should be in dev/ic/i82443reg.h */ +#define I82443BX_SDRAMC_REG 0x76 + +/* XXX should be in dev/ic/i82424{reg.var}.h */ +#define I82424_CPU_BCTL_REG 0x53 +#define I82424_PCI_BCTL_REG 0x54 + +#define I82424_BCTL_CPUMEM_POSTEN 0x01 +#define I82424_BCTL_CPUPCI_POSTEN 0x02 +#define I82424_BCTL_PCIMEM_BURSTEN 0x01 +#define I82424_BCTL_PCI_BURSTEN 0x02 + +int pchbmatch __P((struct device *, struct cfdata *, void *)); +void pchbattach __P((struct device *, struct device *, void *)); + +int pchb_print __P((void *, const char *)); + +struct cfattach pchb_ca = { + sizeof(struct pchb_softc), pchbmatch, pchbattach +}; + +int +pchbmatch(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + struct pci_attach_args *pa = aux; + + if (PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE && + PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST) { + return (1); + } + + return (0); +} + +void +pchbattach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ +#if NRND > 0 + struct pchb_softc *sc = (void *) self; +#endif + struct pci_attach_args *pa = aux; + char devinfo[256]; + struct pcibus_attach_args pba; + pcireg_t bcreg; + u_char bdnum, pbnum; + pcitag_t tag; + int doattach; + + printf("\n"); + doattach = 0; + + /* + * Print out a description, and configure certain chipsets which + * have auxiliary PCI buses. + */ + + pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); + printf("%s: %s (rev. 0x%02x)\n", self->dv_xname, devinfo, + PCI_REVISION(pa->pa_class)); + switch (PCI_VENDOR(pa->pa_id)) { + case PCI_VENDOR_SERVERWORKS: + pbnum = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x44) & 0xff; + + if (pbnum == 0) + break; + + /* + * This host bridge has a second PCI bus. + * Configure it. + */ + doattach = 1; + break; + + case PCI_VENDOR_INTEL: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_INTEL_82443BX_AGP: + case PCI_PRODUCT_INTEL_82443BX_NOAGP: + /* + * BIOS BUG WORKAROUND! The 82443BX + * datasheet indicates that the only + * legal setting for the "Idle/Pipeline + * DRAM Leadoff Timing (IPLDT)" parameter + * (bits 9:8) is 01. Unfortunately, some + * BIOSs do not set these bits properly. + */ + bcreg = pci_conf_read(pa->pa_pc, pa->pa_tag, + I82443BX_SDRAMC_REG); + if ((bcreg & 0x0300) != 0x0100) { + printf("%s: fixing Idle/Pipeline DRAM " + "Leadoff Timing\n", self->dv_xname); + bcreg &= ~0x0300; + bcreg |= 0x0100; + pci_conf_write(pa->pa_pc, pa->pa_tag, + I82443BX_SDRAMC_REG, bcreg); + } + break; + + case PCI_PRODUCT_INTEL_PCI450_PB: + bcreg = pci_conf_read(pa->pa_pc, pa->pa_tag, + PCISET_BUSCONFIG_REG); + bdnum = PCISET_BRIDGE_NUMBER(bcreg); + pbnum = PCISET_PCI_BUS_NUMBER(bcreg); + switch (bdnum & PCISET_BRIDGETYPE_MASK) { + default: + printf("%s: bdnum=%x (reserved)\n", + self->dv_xname, bdnum); + break; + case PCISET_TYPE_COMPAT: + printf("%s: Compatibility PB (bus %d)\n", + self->dv_xname, pbnum); + break; + case PCISET_TYPE_AUX: + printf("%s: Auxiliary PB (bus %d)\n", + self->dv_xname, pbnum); + /* + * This host bridge has a second PCI bus. + * Configure it. + */ + doattach = 1; + break; + } + break; + case PCI_PRODUCT_INTEL_CDC: + bcreg = pci_conf_read(pa->pa_pc, pa->pa_tag, + I82424_CPU_BCTL_REG); + if (bcreg & I82424_BCTL_CPUPCI_POSTEN) { + bcreg &= ~I82424_BCTL_CPUPCI_POSTEN; + pci_conf_write(pa->pa_pc, pa->pa_tag, + I82424_CPU_BCTL_REG, bcreg); + printf("%s: disabled CPU-PCI write posting\n", + self->dv_xname); + } + break; + case PCI_PRODUCT_INTEL_82451NX_PXB: + /* + * The NX chipset supports up to 2 "PXB" chips + * which can drive 2 PCI buses each. Each bus + * shows up as logical PCI device, with fixed + * device numbers between 18 and 21. + * See the datasheet at + ftp://download.intel.com/design/chipsets/datashts/24377102.pdf + * for details. + * (It would be easier to attach all the buses + * at the MIOC, but less aesthetical imho.) + */ + pbnum = 0; + switch (pa->pa_device) { + case 18: /* PXB 0 bus A - primary bus */ + break; + case 19: /* PXB 0 bus B */ + /* read SUBA0 from MIOC */ + tag = pci_make_tag(pa->pa_pc, 0, 16, 0); + bcreg = pci_conf_read(pa->pa_pc, tag, 0xd0); + pbnum = ((bcreg & 0x0000ff00) >> 8) + 1; + break; + case 20: /* PXB 1 bus A */ + /* read BUSNO1 from MIOC */ + tag = pci_make_tag(pa->pa_pc, 0, 16, 0); + bcreg = pci_conf_read(pa->pa_pc, tag, 0xd0); + pbnum = (bcreg & 0xff000000) >> 24; + break; + case 21: /* PXB 1 bus B */ + /* read SUBA1 from MIOC */ + tag = pci_make_tag(pa->pa_pc, 0, 16, 0); + bcreg = pci_conf_read(pa->pa_pc, tag, 0xd4); + pbnum = (bcreg & 0x000000ff) + 1; + break; + } + if (pbnum != 0) + doattach = 1; + break; + } + break; + } + + if (doattach) { + pba.pba_busname = "pci"; + pba.pba_iot = pa->pa_iot; + pba.pba_memt = pa->pa_memt; + pba.pba_dmat = pa->pa_dmat; + pba.pba_bus = pbnum; + pba.pba_flags = pa->pa_flags; + pba.pba_pc = pa->pa_pc; + config_found(self, &pba, pchb_print); + } + +#if NRND > 0 + /* + * Attach a random number generator, if there is one. + */ + pchb_attach_rnd(sc, pa); +#endif +} + +int +pchb_print(aux, pnp) + void *aux; + const char *pnp; +{ + struct pcibus_attach_args *pba = aux; + + if (pnp) + printf("%s at %s", pba->pba_busname, pnp); + printf(" bus %d", pba->pba_bus); + return (UNCONF); +} diff --git a/sys/arch/x86_64/pci/pchbvar.h b/sys/arch/x86_64/pci/pchbvar.h new file mode 100644 index 000000000000..d9a52502b00f --- /dev/null +++ b/sys/arch/x86_64/pci/pchbvar.h @@ -0,0 +1,60 @@ +/* $NetBSD: pchbvar.h,v 1.1 2001/06/19 00:21:35 fvdl Exp $ */ + +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I386_PCI_PCHBVAR_H_ +#define _I386_PCI_PCHBVAR_H_ + +#include +#include + +struct pchb_softc { + struct device sc_dev; + + bus_space_tag_t sc_st; + bus_space_handle_t sc_sh; + + struct callout sc_rnd_ch; + rndsource_element_t sc_rnd_source; + + int sc_rnd_i; + u_int32_t sc_rnd_ax; +}; + +void pchb_attach_rnd(struct pchb_softc *, struct pci_attach_args *); + +#endif /* _I386_PCI_PCHBVAR_H_ */ diff --git a/sys/arch/x86_64/pci/pci_machdep.c b/sys/arch/x86_64/pci/pci_machdep.c new file mode 100644 index 000000000000..2efb671abdb8 --- /dev/null +++ b/sys/arch/x86_64/pci/pci_machdep.c @@ -0,0 +1,614 @@ +/* $NetBSD: pci_machdep.c,v 1.1 2001/06/19 00:20:45 fvdl Exp $ */ + +/*- + * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. + * Copyright (c) 1994 Charles M. Hannum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles M. Hannum. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Machine-specific functions for PCI autoconfiguration. + * + * On PCs, there are two methods of generating PCI configuration cycles. + * We try to detect the appropriate mechanism for this machine and set + * up a few function pointers to access the correct method directly. + * + * The configuration method can be hard-coded in the config file by + * using `options PCI_CONF_MODE=N', where `N' is the configuration mode + * as defined section 3.6.4.1, `Generating Configuration Cycles'. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define _X86_64_BUS_DMA_PRIVATE +#include + +#include + +#include +#include +#include +#include +#include + +#include "opt_pci_conf_mode.h" + +int pci_mode = -1; + +struct simplelock pci_conf_slock; + +#define PCI_CONF_LOCK(s) \ +do { \ + (s) = splhigh(); \ + simple_lock(&pci_conf_slock); \ +} while (0) + +#define PCI_CONF_UNLOCK(s) \ +do { \ + simple_unlock(&pci_conf_slock); \ + splx((s)); \ +} while (0) + +#define PCI_MODE1_ENABLE 0x80000000UL +#define PCI_MODE1_ADDRESS_REG 0x0cf8 +#define PCI_MODE1_DATA_REG 0x0cfc + +#define PCI_MODE2_ENABLE_REG 0x0cf8 +#define PCI_MODE2_FORWARD_REG 0x0cfa + +#define _m1tag(b, d, f) \ + (PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)) +#define _id(v, p) \ + (((v) << PCI_VENDOR_SHIFT) | ((p) << PCI_PRODUCT_SHIFT)) +#define _qe(bus, dev, fcn, vend, prod) \ + {_m1tag(bus, dev, fcn), _id(vend, prod)} +struct { + u_int32_t tag; + pcireg_t id; +} pcim1_quirk_tbl[] = { + _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), + /* XXX Triflex2 not tested */ + _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), + _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), + /* Triton needed for Connectix Virtual PC */ + _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), + {0, 0xffffffff} /* patchable */ +}; +#undef _m1tag +#undef _id +#undef _qe + +/* + * PCI doesn't have any special needs; just use the generic versions + * of these functions. + */ +struct x86_64_bus_dma_tag pci_bus_dma_tag = { + 0, /* _bounce_thresh */ + _bus_dmamap_create, + _bus_dmamap_destroy, + _bus_dmamap_load, + _bus_dmamap_load_mbuf, + _bus_dmamap_load_uio, + _bus_dmamap_load_raw, + _bus_dmamap_unload, + NULL, /* _dmamap_sync */ + _bus_dmamem_alloc, + _bus_dmamem_free, + _bus_dmamem_map, + _bus_dmamem_unmap, + _bus_dmamem_mmap, +}; + +void +pci_attach_hook(parent, self, pba) + struct device *parent, *self; + struct pcibus_attach_args *pba; +{ + + if (pba->pba_bus == 0) + printf(": configuration mode %d", pci_mode); +} + +int +pci_bus_maxdevs(pc, busno) + pci_chipset_tag_t pc; + int busno; +{ + + /* + * Bus number is irrelevant. If Configuration Mechanism 2 is in + * use, can only have devices 0-15 on any bus. If Configuration + * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' + * range). + */ + if (pci_mode == 2) + return (16); + else + return (32); +} + +pcitag_t +pci_make_tag(pc, bus, device, function) + pci_chipset_tag_t pc; + int bus, device, function; +{ + pcitag_t tag; + +#ifndef PCI_CONF_MODE + switch (pci_mode) { + case 1: + goto mode1; + case 2: + goto mode2; + default: + panic("pci_make_tag: mode not configured"); + } +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 1) +#ifndef PCI_CONF_MODE +mode1: +#endif + if (bus >= 256 || device >= 32 || function >= 8) + panic("pci_make_tag: bad request"); + + tag.mode1 = PCI_MODE1_ENABLE | + (bus << 16) | (device << 11) | (function << 8); + return tag; +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 2) +#ifndef PCI_CONF_MODE +mode2: +#endif + if (bus >= 256 || device >= 16 || function >= 8) + panic("pci_make_tag: bad request"); + + tag.mode2.port = 0xc000 | (device << 8); + tag.mode2.enable = 0xf0 | (function << 1); + tag.mode2.forward = bus; + return tag; +#endif +} + +void +pci_decompose_tag(pc, tag, bp, dp, fp) + pci_chipset_tag_t pc; + pcitag_t tag; + int *bp, *dp, *fp; +{ + +#ifndef PCI_CONF_MODE + switch (pci_mode) { + case 1: + goto mode1; + case 2: + goto mode2; + default: + panic("pci_decompose_tag: mode not configured"); + } +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 1) +#ifndef PCI_CONF_MODE +mode1: +#endif + if (bp != NULL) + *bp = (tag.mode1 >> 16) & 0xff; + if (dp != NULL) + *dp = (tag.mode1 >> 11) & 0x1f; + if (fp != NULL) + *fp = (tag.mode1 >> 8) & 0x7; + return; +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 2) +#ifndef PCI_CONF_MODE +mode2: +#endif + if (bp != NULL) + *bp = tag.mode2.forward & 0xff; + if (dp != NULL) + *dp = (tag.mode2.port >> 8) & 0xf; + if (fp != NULL) + *fp = (tag.mode2.enable >> 1) & 0x7; +#endif +} + +pcireg_t +pci_conf_read(pc, tag, reg) + pci_chipset_tag_t pc; + pcitag_t tag; + int reg; +{ + pcireg_t data; + int s; + +#ifndef PCI_CONF_MODE + switch (pci_mode) { + case 1: + goto mode1; + case 2: + goto mode2; + default: + panic("pci_conf_read: mode not configured"); + } +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 1) +#ifndef PCI_CONF_MODE +mode1: +#endif + PCI_CONF_LOCK(s); + outl(PCI_MODE1_ADDRESS_REG, tag.mode1 | reg); + data = inl(PCI_MODE1_DATA_REG); + outl(PCI_MODE1_ADDRESS_REG, 0); + PCI_CONF_UNLOCK(s); + return data; +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 2) +#ifndef PCI_CONF_MODE +mode2: +#endif + PCI_CONF_LOCK(s); + outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); + outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); + data = inl(tag.mode2.port | reg); + outb(PCI_MODE2_ENABLE_REG, 0); + PCI_CONF_UNLOCK(s); + return data; +#endif +} + +void +pci_conf_write(pc, tag, reg, data) + pci_chipset_tag_t pc; + pcitag_t tag; + int reg; + pcireg_t data; +{ + int s; + +#ifndef PCI_CONF_MODE + switch (pci_mode) { + case 1: + goto mode1; + case 2: + goto mode2; + default: + panic("pci_conf_write: mode not configured"); + } +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 1) +#ifndef PCI_CONF_MODE +mode1: +#endif + PCI_CONF_LOCK(s); + outl(PCI_MODE1_ADDRESS_REG, tag.mode1 | reg); + outl(PCI_MODE1_DATA_REG, data); + outl(PCI_MODE1_ADDRESS_REG, 0); + PCI_CONF_UNLOCK(s); + return; +#endif + +#if !defined(PCI_CONF_MODE) || (PCI_CONF_MODE == 2) +#ifndef PCI_CONF_MODE +mode2: +#endif + PCI_CONF_LOCK(s); + outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); + outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); + outl(tag.mode2.port | reg, data); + outb(PCI_MODE2_ENABLE_REG, 0); + PCI_CONF_UNLOCK(s); +#endif +} + +int +pci_mode_detect() +{ + +#ifdef PCI_CONF_MODE +#if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) + return (pci_mode = PCI_CONF_MODE); +#else +#error Invalid PCI configuration mode. +#endif +#else + u_int32_t sav, val; + int i; + pcireg_t idreg; + + simple_lock_init(&pci_conf_slock); + + if (pci_mode != -1) + return pci_mode; + + /* + * We try to divine which configuration mode the host bridge wants. + */ + + sav = inl(PCI_MODE1_ADDRESS_REG); + + pci_mode = 1; /* assume this for now */ + /* + * catch some known buggy implementations of mode 1 + */ + for (i = 0; i < sizeof(pcim1_quirk_tbl) / sizeof(pcim1_quirk_tbl[0]); + i++) { + pcitag_t t; + + if (!pcim1_quirk_tbl[i].tag) + break; + t.mode1 = pcim1_quirk_tbl[i].tag; + idreg = pci_conf_read(0, t, PCI_ID_REG); /* needs "pci_mode" */ + if (idreg == pcim1_quirk_tbl[i].id) { +#ifdef DEBUG + printf("known mode 1 PCI chipset (%08x)\n", + idreg); +#endif + return (pci_mode); + } + } + + /* + * Strong check for standard compliant mode 1: + * 1. bit 31 ("enable") can be set + * 2. byte/word access does not affect register + */ + outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); + outb(PCI_MODE1_ADDRESS_REG + 3, 0); + outw(PCI_MODE1_ADDRESS_REG + 2, 0); + val = inl(PCI_MODE1_ADDRESS_REG); + if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { +#ifdef DEBUG + printf("pci_mode_detect: mode 1 enable failed (%x)\n", + val); +#endif + goto not1; + } + outl(PCI_MODE1_ADDRESS_REG, 0); + val = inl(PCI_MODE1_ADDRESS_REG); + if ((val & 0x80fffffc) != 0) + goto not1; + return (pci_mode); +not1: + outl(PCI_MODE1_ADDRESS_REG, sav); + + /* + * This mode 2 check is quite weak (and known to give false + * positives on some Compaq machines). + * However, this doesn't matter, because this is the + * last test, and simply no PCI devices will be found if + * this happens. + */ + outb(PCI_MODE2_ENABLE_REG, 0); + outb(PCI_MODE2_FORWARD_REG, 0); + if (inb(PCI_MODE2_ENABLE_REG) != 0 || + inb(PCI_MODE2_FORWARD_REG) != 0) + goto not2; + return (pci_mode = 2); +not2: + + return (pci_mode = 0); +#endif +} + +int +pci_intr_map(pa, ihp) + struct pci_attach_args *pa; + pci_intr_handle_t *ihp; +{ + int pin = pa->pa_intrpin; + int line = pa->pa_intrline; + + if (pin == 0) { + /* No IRQ used. */ + goto bad; + } + + if (pin > PCI_INTERRUPT_PIN_MAX) { + printf("pci_intr_map: bad interrupt pin %d\n", pin); + goto bad; + } + + /* + * Section 6.2.4, `Miscellaneous Functions', says that 255 means + * `unknown' or `no connection' on a PC. We assume that a device with + * `no connection' either doesn't have an interrupt (in which case the + * pin number should be 0, and would have been noticed above), or + * wasn't configured by the BIOS (in which case we punt, since there's + * no real way we can know how the interrupt lines are mapped in the + * hardware). + * + * XXX + * Since IRQ 0 is only used by the clock, and we can't actually be sure + * that the BIOS did its job, we also recognize that as meaning that + * the BIOS has not configured the device. + */ + if (line == 0 || line == X86_64_PCI_INTERRUPT_LINE_NO_CONNECTION) { + printf("pci_intr_map: no mapping for pin %c\n", '@' + pin); + goto bad; + } else { + if (line >= ICU_LEN) { + printf("pci_intr_map: bad interrupt line %d\n", line); + goto bad; + } + if (line == 2) { + printf("pci_intr_map: changed line 2 to line 9\n"); + line = 9; + } + } + + *ihp = line; + return 0; + +bad: + *ihp = -1; + return 1; +} + +const char * +pci_intr_string(pc, ih) + pci_chipset_tag_t pc; + pci_intr_handle_t ih; +{ + static char irqstr[8]; /* 4 + 2 + NULL + sanity */ + + if (ih == 0 || ih >= ICU_LEN || ih == 2) + panic("pci_intr_string: bogus handle 0x%x\n", ih); + + sprintf(irqstr, "irq %d", ih); + return (irqstr); + +} + +const struct evcnt * +pci_intr_evcnt(pc, ih) + pci_chipset_tag_t pc; + pci_intr_handle_t ih; +{ + + /* XXX for now, no evcnt parent reported */ + return NULL; +} + +void * +pci_intr_establish(pc, ih, level, func, arg) + pci_chipset_tag_t pc; + pci_intr_handle_t ih; + int level, (*func) __P((void *)); + void *arg; +{ + + if (ih == 0 || ih >= ICU_LEN || ih == 2) + panic("pci_intr_establish: bogus handle 0x%x\n", ih); + + return isa_intr_establish(NULL, ih, IST_LEVEL, level, func, arg); +} + +void +pci_intr_disestablish(pc, cookie) + pci_chipset_tag_t pc; + void *cookie; +{ + + return isa_intr_disestablish(NULL, cookie); +} + +/* + * Determine which flags should be passed to the primary PCI bus's + * autoconfiguration node. We use this to detect broken chipsets + * which cannot safely use memory-mapped device access. + */ +int +pci_bus_flags() +{ + int rval = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED; + int device, maxndevs; + pcitag_t tag; + pcireg_t id; + + maxndevs = pci_bus_maxdevs(NULL, 0); + + for (device = 0; device < maxndevs; device++) { + tag = pci_make_tag(NULL, 0, device, 0); + id = pci_conf_read(NULL, tag, PCI_ID_REG); + + /* Invalid vendor ID value? */ + if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) + continue; + /* XXX Not invalid, but we've done this ~forever. */ + if (PCI_VENDOR(id) == 0) + continue; + + switch (PCI_VENDOR(id)) { + case PCI_VENDOR_SIS: + switch (PCI_PRODUCT(id)) { + case PCI_PRODUCT_SIS_85C496: + goto disable_mem; + break; + } + break; + } + } + + return (rval); + + disable_mem: + printf("Warning: broken PCI-Host bridge detected; " + "disabling memory-mapped access\n"); + rval &= ~PCI_FLAGS_MEM_ENABLED; + return (rval); +} diff --git a/sys/arch/x86_64/pci/pcib.c b/sys/arch/x86_64/pci/pcib.c new file mode 100644 index 000000000000..b3a1d25507df --- /dev/null +++ b/sys/arch/x86_64/pci/pcib.c @@ -0,0 +1,221 @@ +/* $NetBSD: pcib.c,v 1.1 2001/06/19 00:20:45 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include + +#include "isa.h" + +int pcibmatch __P((struct device *, struct cfdata *, void *)); +void pcibattach __P((struct device *, struct device *, void *)); + +struct cfattach pcib_ca = { + sizeof(struct device), pcibmatch, pcibattach +}; + +void pcib_callback __P((struct device *)); +int pcib_print __P((void *, const char *)); + +int +pcibmatch(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + struct pci_attach_args *pa = aux; + +#if 0 + /* + * PCI-ISA bridges are matched on class/subclass. + * This list contains only the bridges where correct + * (or incorrect) behaviour is not yet confirmed. + */ + switch (PCI_VENDOR(pa->pa_id)) { + case PCI_VENDOR_INTEL: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_INTEL_82426EX: + case PCI_PRODUCT_INTEL_82380AB: + return (1); + } + break; + + case PCI_VENDOR_UMC: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_UMC_UM8886F: + case PCI_PRODUCT_UMC_UM82C886: + return (1); + } + break; + case PCI_VENDOR_ALI: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_ALI_M1449: + case PCI_PRODUCT_ALI_M1543: + return (1); + } + break; + case PCI_VENDOR_COMPAQ: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_COMPAQ_PCI_ISA_BRIDGE: + return (1); + } + break; + case PCI_VENDOR_VIATECH: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_VIATECH_VT82C570MV: + case PCI_PRODUCT_VIATECH_VT82C586_ISA: + return (1); + } + break; + } +#endif + + /* + * some special cases: + */ + switch (PCI_VENDOR(pa->pa_id)) { + case PCI_VENDOR_INTEL: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_INTEL_SIO: + /* + * The Intel SIO identifies itself as a + * miscellaneous prehistoric. + */ + case PCI_PRODUCT_INTEL_82371MX: + /* + * The Intel 82371MX identifies itself erroneously as a + * miscellaneous bridge. + */ + case PCI_PRODUCT_INTEL_82371AB_ISA: + /* + * Some Intel 82371AB PCI-ISA bridge identifies + * itself as miscellaneous bridge. + */ + return (1); + } + break; + case PCI_VENDOR_SIS: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_SIS_85C503: + /* + * The SIS 85C503 identifies itself as a + * miscellaneous prehistoric. + */ + return (1); + } + break; + case PCI_VENDOR_VIATECH: + switch (PCI_PRODUCT(pa->pa_id)) { + case PCI_PRODUCT_VIATECH_VT82C686A_SMB: + /* + * The VIA VT82C686A SMBus Controller itself as + * ISA bridge, but it's wrong ! + */ + return (0); + } + } + + if (PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE && + PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_ISA) { + return (1); + } + + return (0); +} + +void +pcibattach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ + struct pci_attach_args *pa = aux; + char devinfo[256]; + + printf("\n"); + + /* + * Just print out a description and defer configuration + * until all PCI devices have been attached. + */ + pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo); + printf("%s: %s (rev. 0x%02x)\n", self->dv_xname, devinfo, + PCI_REVISION(pa->pa_class)); + + config_defer(self, pcib_callback); +} + +void +pcib_callback(self) + struct device *self; +{ + struct isabus_attach_args iba; + + /* + * Attach the ISA bus behind this bridge. + */ + memset(&iba, 0, sizeof(iba)); + iba.iba_busname = "isa"; + iba.iba_iot = X86_64_BUS_SPACE_IO; + iba.iba_memt = X86_64_BUS_SPACE_MEM; +#if NISA > 0 + iba.iba_dmat = &isa_bus_dma_tag; +#endif + config_found(self, &iba, pcib_print); +} + +int +pcib_print(aux, pnp) + void *aux; + const char *pnp; +{ + + /* Only ISAs can attach to pcib's; easy. */ + if (pnp) + printf("isa at %s", pnp); + return (UNCONF); +} diff --git a/sys/arch/x86_64/pci/pciide_machdep.c b/sys/arch/x86_64/pci/pciide_machdep.c new file mode 100644 index 000000000000..0e67c0cf3b69 --- /dev/null +++ b/sys/arch/x86_64/pci/pciide_machdep.c @@ -0,0 +1,72 @@ +/* $NetBSD: pciide_machdep.c,v 1.1 2001/06/19 00:20:45 fvdl Exp $ */ + +/* + * Copyright (c) 1998 Christopher G. Demetriou. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Christopher G. Demetriou + * for the NetBSD Project. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * PCI IDE controller driver (i386 machine-dependent portion). + * + * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD + * sys/dev/pci/ppb.c, revision 1.16). + * + * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" from the + * PCI SIG. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +void * +pciide_machdep_compat_intr_establish(dev, pa, chan, func, arg) + struct device *dev; + struct pci_attach_args *pa; + int chan; + int (*func) __P((void *)); + void *arg; +{ + int irq; + void *cookie; + + irq = PCIIDE_COMPAT_IRQ(chan); + cookie = isa_intr_establish(NULL, irq, IST_EDGE, IPL_BIO, func, arg); + if (cookie == NULL) + return (NULL); + printf("%s: %s channel interrupting at irq %d\n", dev->dv_xname, + PCIIDE_CHANNEL_NAME(chan), irq); + return (cookie); +} diff --git a/sys/arch/x86_64/x86_64/autoconf.c b/sys/arch/x86_64/x86_64/autoconf.c new file mode 100644 index 000000000000..f2ebb988d72b --- /dev/null +++ b/sys/arch/x86_64/x86_64/autoconf.c @@ -0,0 +1,480 @@ +/* $NetBSD: autoconf.c,v 1.1 2001/06/19 00:21:15 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)autoconf.c 7.1 (Berkeley) 5/9/91 + */ + +/* + * Setup the system to run on the current machine. + * + * Configure() is called at boot time and initializes the vba + * device tables and the memory controller monitoring. Available + * devices are determined (from possibilities mentioned in ioconf.c), + * and the drivers are initialized. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int match_harddisk __P((struct device *, struct btinfo_bootdisk *)); +static void matchbiosdisks __P((void)); +static void findroot __P((void)); + +extern struct disklist *x86_64_alldisks; +extern int x86_64_ndisks; + +#include "bios32.h" +#if NBIOS32 > 0 +#include +#endif + +struct device *booted_device; +int booted_partition; + +/* + * Determine i/o configuration for a machine. + */ +void +cpu_configure() +{ + + startrtclock(); + +#if NBIOS32 > 0 + bios32_init(); +#endif + + if (config_rootfound("mainbus", NULL) == NULL) + panic("configure: mainbus not configured"); + + printf("biomask %x netmask %x ttymask %x\n", + (u_short)imask[IPL_BIO], (u_short)imask[IPL_NET], + (u_short)imask[IPL_TTY]); + + spl0(); + + /* Set up proc0's TSS and LDT (after the FPU is configured). */ + x86_64_proc0_tss_ldt_init(); + + /* XXX Finish deferred buffer cache allocation. */ + x86_64_bufinit(); +} + +void +cpu_rootconf() +{ + findroot(); + matchbiosdisks(); + + printf("boot device: %s\n", + booted_device ? booted_device->dv_xname : ""); + + setroot(booted_device, booted_partition); +} + +/* + * XXX ugly bit of code. But, this is the only safe time that the + * match between BIOS disks and native disks can be done. + */ +static void +matchbiosdisks() +{ + struct btinfo_biosgeom *big; + struct bi_biosgeom_entry *be; + struct device *dv; + struct devnametobdevmaj *d; + int i, ck, error, m, n; + struct vnode *tv; + char mbr[DEV_BSIZE]; + + big = lookup_bootinfo(BTINFO_BIOSGEOM); + + if (big == NULL) + return; + + /* + * First, count all native disks + */ + for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) + if (dv->dv_class == DV_DISK && + (!strcmp(dv->dv_cfdata->cf_driver->cd_name, "sd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "wd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "ld"))) + x86_64_ndisks++; + + if (x86_64_ndisks == 0) + return; + + /* XXX M_TEMP is wrong */ + x86_64_alldisks = malloc(sizeof (struct disklist) + (x86_64_ndisks - 1) * + sizeof (struct nativedisk_info), + M_TEMP, M_NOWAIT); + if (x86_64_alldisks == NULL) + return; + + x86_64_alldisks->dl_nnativedisks = x86_64_ndisks; + x86_64_alldisks->dl_nbiosdisks = big->num; + for (i = 0; i < big->num; i++) { + x86_64_alldisks->dl_biosdisks[i].bi_dev = big->disk[i].dev; + x86_64_alldisks->dl_biosdisks[i].bi_sec = big->disk[i].sec; + x86_64_alldisks->dl_biosdisks[i].bi_head = big->disk[i].head; + x86_64_alldisks->dl_biosdisks[i].bi_cyl = big->disk[i].cyl; + x86_64_alldisks->dl_biosdisks[i].bi_lbasecs = big->disk[i].totsec; + x86_64_alldisks->dl_biosdisks[i].bi_flags = big->disk[i].flags; + } + + /* + * XXX code duplication from findroot() + */ + n = -1; + for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) { + if (dv->dv_class != DV_DISK) + continue; +#ifdef GEOM_DEBUG + printf("matchbiosdisks: trying to match (%s) %s\n", + dv->dv_xname, dv->dv_cfdata->cf_driver->cd_name); +#endif + if (!strcmp(dv->dv_cfdata->cf_driver->cd_name, "sd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "wd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "ld")) { + n++; + sprintf(x86_64_alldisks->dl_nativedisks[n].ni_devname, + "%s%d", dv->dv_cfdata->cf_driver->cd_name, + dv->dv_unit); + + for (d = dev_name2blk; d->d_name && + strcmp(d->d_name, dv->dv_cfdata->cf_driver->cd_name); + d++); + if (d->d_name == NULL) + return; + + if (bdevvp(MAKEDISKDEV(d->d_maj, dv->dv_unit, RAW_PART), + &tv)) + panic("matchbiosdisks: can't alloc vnode"); + + error = VOP_OPEN(tv, FREAD, NOCRED, 0); + if (error) { + vput(tv); + continue; + } + error = vn_rdwr(UIO_READ, tv, mbr, DEV_BSIZE, 0, + UIO_SYSSPACE, 0, NOCRED, NULL, 0); + VOP_CLOSE(tv, FREAD, NOCRED, 0); + if (error) { +#ifdef GEOM_DEBUG + printf("matchbiosdisks: %s: MBR read failure\n", + dv->dv_xname); +#endif + continue; + } + + for (ck = i = 0; i < DEV_BSIZE; i++) + ck += mbr[i]; + for (m = i = 0; i < big->num; i++) { + be = &big->disk[i]; +#ifdef GEOM_DEBUG + printf("match %s with %d\n", dv->dv_xname, i); + printf("dev ck %x bios ck %x\n", ck, be->cksum); +#endif + if (be->flags & BI_GEOM_INVALID) + continue; + if (be->cksum == ck && + !memcmp(&mbr[MBR_PARTOFF], be->dosparts, + NMBRPART * + sizeof (struct mbr_partition))) { +#ifdef GEOM_DEBUG + printf("matched bios disk %x with %s\n", + be->dev, be->devname); +#endif + x86_64_alldisks->dl_nativedisks[n]. + ni_biosmatches[m++] = i; + } + } + x86_64_alldisks->dl_nativedisks[n].ni_nmatches = m; + vput(tv); + } + } +} + +/* + * helper function for "findroot()": + * return nonzero if disk device matches bootinfo + */ +static int +match_harddisk(dv, bid) + struct device *dv; + struct btinfo_bootdisk *bid; +{ + struct devnametobdevmaj *i; + struct vnode *tmpvn; + int error; + struct disklabel label; + int found = 0; + + /* + * A disklabel is required here. The + * bootblocks don't refuse to boot from + * a disk without a label, but this is + * normally not wanted. + */ + if (bid->labelsector == -1) + return(0); + + /* + * lookup major number for disk block device + */ + i = dev_name2blk; + while (i->d_name && + strcmp(i->d_name, dv->dv_cfdata->cf_driver->cd_name)) + i++; + if (i->d_name == NULL) + return(0); /* XXX panic() ??? */ + + /* + * Fake a temporary vnode for the disk, open + * it, and read the disklabel for comparison. + */ + if (bdevvp(MAKEDISKDEV(i->d_maj, dv->dv_unit, bid->partition), &tmpvn)) + panic("findroot can't alloc vnode"); + error = VOP_OPEN(tmpvn, FREAD, NOCRED, 0); + if (error) { +#ifndef DEBUG + /* + * Ignore errors caused by missing + * device, partition or medium. + */ + if (error != ENXIO && error != ENODEV) +#endif + printf("findroot: can't open dev %s%c (%d)\n", + dv->dv_xname, 'a' + bid->partition, error); + vput(tmpvn); + return(0); + } + error = VOP_IOCTL(tmpvn, DIOCGDINFO, (caddr_t)&label, FREAD, NOCRED, 0); + if (error) { + /* + * XXX can't happen - open() would + * have errored out (or faked up one) + */ + printf("can't get label for dev %s%c (%d)\n", + dv->dv_xname, 'a' + bid->partition, error); + goto closeout; + } + + /* compare with our data */ + if (label.d_type == bid->label.type && + label.d_checksum == bid->label.checksum && + !strncmp(label.d_packname, bid->label.packname, 16)) + found = 1; + +closeout: + VOP_CLOSE(tmpvn, FREAD, NOCRED, 0); + vput(tmpvn); + return(found); +} + +/* + * Attempt to find the device from which we were booted. + * If we can do so, and not instructed not to do so, + * change rootdev to correspond to the load device. + */ +void +findroot(void) +{ + struct btinfo_bootdisk *bid; + struct device *dv; + + if (booted_device) + return; + + if (lookup_bootinfo(BTINFO_NETIF)) { + /* + * We got netboot interface information, but + * "device_register()" couldn't match it to a configured + * device. Bootdisk information cannot be present at the + * same time, so give up. + */ + printf("findroot: netboot interface not found\n"); + return; + } + + bid = lookup_bootinfo(BTINFO_BOOTDISK); + if (bid) { + /* + * Scan all disk devices for ones that match the passed data. + * Don't break if one is found, to get possible multiple + * matches - for problem tracking. Use the first match anyway + * because lower device numbers are more likely to be the + * boot device. + */ + for (dv = alldevs.tqh_first; dv != NULL; + dv = dv->dv_list.tqe_next) { + if (dv->dv_class != DV_DISK) + continue; + + if (!strcmp(dv->dv_cfdata->cf_driver->cd_name, "fd")) { + /* + * Assume the configured unit number matches + * the BIOS device number. (This is the old + * behaviour.) Needs some ideas how to handle + * BIOS's "swap floppy drive" options. + */ + if ((bid->biosdev & 0x80) || + dv->dv_unit != bid->biosdev) + continue; + + goto found; + } + + if (!strcmp(dv->dv_cfdata->cf_driver->cd_name, "sd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "wd") || + !strcmp(dv->dv_cfdata->cf_driver->cd_name, "ld")) { + /* + * Don't trust BIOS device numbers, try + * to match the information passed by the + * bootloader instead. + */ + if ((bid->biosdev & 0x80) == 0 || + !match_harddisk(dv, bid)) + continue; + + goto found; + } + + /* no "fd", "wd", "sd", "ld" */ + continue; + +found: + if (booted_device) { + printf("warning: double match for boot " + "device (%s, %s)\n", booted_device->dv_xname, + dv->dv_xname); + continue; + } + booted_device = dv; + booted_partition = bid->partition; + } + + if (booted_device) + return; + } + +} + +#include "pci.h" + +#include +#if NPCI > 0 +#include +#endif + +void +device_register(dev, aux) + struct device *dev; + void *aux; +{ + /* + * Handle network interfaces here, the attachment information is + * not available driver independantly later. + * For disks, there is nothing useful available at attach time. + */ + if (dev->dv_class == DV_IFNET) { + struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); + if (bin == NULL) + return; + + /* + * We don't check the driver name against the device name + * passed by the boot ROM. The ROM should stay usable + * if the driver gets obsoleted. + * The physical attachment information (checked below) + * must be sufficient to identify the device. + */ + + if (bin->bus == BI_BUS_ISA && + !strcmp(dev->dv_parent->dv_cfdata->cf_driver->cd_name, + "isa")) { + struct isa_attach_args *iaa = aux; + + /* compare IO base address */ + if (bin->addr.iobase == iaa->ia_iobase) + goto found; + } +#if NPCI > 0 + if (bin->bus == BI_BUS_PCI && + !strcmp(dev->dv_parent->dv_cfdata->cf_driver->cd_name, + "pci")) { + struct pci_attach_args *paa = aux; + int b, d, f; + + /* + * Calculate BIOS representation of: + * + * + * + * and compare. + */ + pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); + if (bin->addr.tag == ((b << 8) | (d << 3) | f)) + goto found; + } +#endif + } + return; + +found: + if (booted_device) { + /* XXX should be a "panic()" */ + printf("warning: double match for boot device (%s, %s)\n", + booted_device->dv_xname, dev->dv_xname); + return; + } + booted_device = dev; +} diff --git a/sys/arch/x86_64/x86_64/bios32.c b/sys/arch/x86_64/x86_64/bios32.c new file mode 100644 index 000000000000..8063c3bd845d --- /dev/null +++ b/sys/arch/x86_64/x86_64/bios32.c @@ -0,0 +1,177 @@ +/* $NetBSD: bios32.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/*- + * Copyright (c) 1999 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, + * NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999, by UCHIYAMA Yasushi + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the developer may NOT be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Basic interface to BIOS32 services. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define BIOS32_START 0xe0000 +#define BIOS32_SIZE 0x20000 +#define BIOS32_END (BIOS32_START + BIOS32_SIZE - 0x10) + +struct bios32_entry bios32_entry; + +/* + * Initialize the BIOS32 interface. + */ +void +bios32_init() +{ +#if 0 /* XXXfvdl need to set up compatibility segment for this */ + paddr_t entry = 0; + caddr_t p; + unsigned char cksum; + int i; + + for (p = (caddr_t)ISA_HOLE_VADDR(BIOS32_START); + p < (caddr_t)ISA_HOLE_VADDR(BIOS32_END); + p += 16) { + if (*(int *)p != BIOS32_MAKESIG('_', '3', '2', '_')) + continue; + + cksum = 0; + for (i = 0; i < 16; i++) + cksum += *(unsigned char *)(p + i); + if (cksum != 0) + continue; + + if (*(p + 9) != 1) + continue; + + entry = *(u_int32_t *)(p + 4); + + printf("BIOS32 rev. %d found at 0x%lx\n", + *(p + 8), entry); + + if (entry < BIOS32_START || + entry >= BIOS32_END) { + printf("BIOS32 entry point outside " + "allowable range\n"); + entry = 0; + } + break; + } + + if (entry != 0) { + bios32_entry.offset = (caddr_t)ISA_HOLE_VADDR(entry); + bios32_entry.segment = GSEL(GCODE_SEL, SEL_KPL); + } +#endif +} + +/* + * Call BIOS32 to locate the specified BIOS32 service, and fill + * in the entry point information. + */ +int +bios32_service(service, e, ei) + u_int32_t service; + bios32_entry_t e; + bios32_entry_info_t ei; +{ + u_int32_t eax, ebx, ecx, edx; + paddr_t entry; + + if (bios32_entry.offset == 0) + return (0); /* BIOS32 not present */ + + __asm __volatile("lcall *(%%rdi)" + : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) + : "0" (service), "1" (0), "D" (&bios32_entry)); + + if ((eax & 0xff) != 0) + return (0); /* service not found */ + + entry = ebx + edx; + + if (entry < BIOS32_START || entry >= BIOS32_END) { + printf("bios32: entry point for service %c%c%c%c is outside " + "allowable range\n", + service & 0xff, + (service >> 8) & 0xff, + (service >> 16) & 0xff, + (service >> 24) & 0xff); + return (0); + } + + e->offset = (caddr_t)ISA_HOLE_VADDR(entry); + e->segment = GSEL(GCODE_SEL, SEL_KPL); + + ei->bei_base = ebx; + ei->bei_size = ecx; + ei->bei_entry = entry; + + return (1); +} diff --git a/sys/arch/x86_64/x86_64/bus_machdep.c b/sys/arch/x86_64/x86_64/bus_machdep.c new file mode 100644 index 000000000000..ba083802224e --- /dev/null +++ b/sys/arch/x86_64/x86_64/bus_machdep.c @@ -0,0 +1,963 @@ +/* $NetBSD: bus_machdep.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace + * Simulation Facility, NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_largepages.h" + +#include +#include +#include +#include +#include + +#include + +#define _X86_64_BUS_DMA_PRIVATE +#include + +#include +#include + +/* + * Extent maps to manage I/O and memory space. Allocate + * storage for 8 regions in each, initially. Later, ioport_malloc_safe + * will indicate that it's safe to use malloc() to dynamically allocate + * region descriptors. + * + * N.B. At least two regions are _always_ allocated from the iomem + * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM). + * + * The extent maps are not static! Machine-dependent ISA and EISA + * routines need access to them for bus address space allocation. + */ +static long ioport_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; +static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)]; +struct extent *ioport_ex; +struct extent *iomem_ex; +static int ioport_malloc_safe; + +int x86_64_mem_add_mapping __P((bus_addr_t, bus_size_t, + int, bus_space_handle_t *)); + +int _bus_dmamap_load_buffer __P((bus_dma_tag_t, bus_dmamap_t, void *, + bus_size_t, struct proc *, int, paddr_t *, int *, int)); + +void +x86_64_bus_space_init() +{ + /* + * Initialize the I/O port and I/O mem extent maps. + * Note: we don't have to check the return value since + * creation of a fixed extent map will never fail (since + * descriptor storage has already been allocated). + * + * N.B. The iomem extent manages _all_ physical addresses + * on the machine. When the amount of RAM is found, the two + * extents of RAM are allocated from the map (0 -> ISA hole + * and end of ISA hole -> end of RAM). + */ + ioport_ex = extent_create("ioport", 0x0, 0xffff, M_DEVBUF, + (caddr_t)ioport_ex_storage, sizeof(ioport_ex_storage), + EX_NOCOALESCE|EX_NOWAIT); + iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF, + (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage), + EX_NOCOALESCE|EX_NOWAIT); +} + +void +x86_64_bus_space_mallocok() +{ + + ioport_malloc_safe = 1; +} + +int +x86_64_memio_map(t, bpa, size, flags, bshp) + bus_space_tag_t t; + bus_addr_t bpa; + bus_size_t size; + int flags; + bus_space_handle_t *bshp; +{ + int error; + struct extent *ex; + + /* + * Pick the appropriate extent map. + */ + if (t == X86_64_BUS_SPACE_IO) { + if (flags & BUS_SPACE_MAP_LINEAR) + return (EOPNOTSUPP); + ex = ioport_ex; + } else if (t == X86_64_BUS_SPACE_MEM) + ex = iomem_ex; + else + panic("x86_64_memio_map: bad bus space tag"); + + /* + * Before we go any further, let's make sure that this + * region is available. + */ + error = extent_alloc_region(ex, bpa, size, + EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0)); + if (error) + return (error); + + /* + * For I/O space, that's all she wrote. + */ + if (t == X86_64_BUS_SPACE_IO) { + *bshp = bpa; + return (0); + } + + if (bpa >= IOM_BEGIN && (bpa + size) <= IOM_END) { + *bshp = (bus_space_handle_t)ISA_HOLE_VADDR(bpa); + return(0); + } + + /* + * For memory space, map the bus physical address to + * a kernel virtual address. + */ + error = x86_64_mem_add_mapping(bpa, size, + (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp); + if (error) { + if (extent_free(ex, bpa, size, EX_NOWAIT | + (ioport_malloc_safe ? EX_MALLOCOK : 0))) { + printf("x86_64_memio_map: pa 0x%lx, size 0x%lx\n", + bpa, size); + printf("x86_64_memio_map: can't free region\n"); + } + } + + return (error); +} + +int +_x86_64_memio_map(t, bpa, size, flags, bshp) + bus_space_tag_t t; + bus_addr_t bpa; + bus_size_t size; + int flags; + bus_space_handle_t *bshp; +{ + + /* + * For I/O space, just fill in the handle. + */ + if (t == X86_64_BUS_SPACE_IO) { + if (flags & BUS_SPACE_MAP_LINEAR) + return (EOPNOTSUPP); + *bshp = bpa; + return (0); + } + + /* + * For memory space, map the bus physical address to + * a kernel virtual address. + */ + return (x86_64_mem_add_mapping(bpa, size, + (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp)); +} + +int +x86_64_memio_alloc(t, rstart, rend, size, alignment, boundary, flags, + bpap, bshp) + bus_space_tag_t t; + bus_addr_t rstart, rend; + bus_size_t size, alignment, boundary; + int flags; + bus_addr_t *bpap; + bus_space_handle_t *bshp; +{ + struct extent *ex; + u_long bpa; + int error; + + /* + * Pick the appropriate extent map. + */ + if (t == X86_64_BUS_SPACE_IO) { + if (flags & BUS_SPACE_MAP_LINEAR) + return (EOPNOTSUPP); + ex = ioport_ex; + } else if (t == X86_64_BUS_SPACE_MEM) + ex = iomem_ex; + else + panic("x86_64_memio_alloc: bad bus space tag"); + + /* + * Sanity check the allocation against the extent's boundaries. + */ + if (rstart < ex->ex_start || rend > ex->ex_end) + panic("x86_64_memio_alloc: bad region start/end"); + + /* + * Do the requested allocation. + */ + error = extent_alloc_subregion(ex, rstart, rend, size, alignment, + boundary, + EX_FAST | EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0), + &bpa); + + if (error) + return (error); + + /* + * For I/O space, that's all she wrote. + */ + if (t == X86_64_BUS_SPACE_IO) { + *bshp = *bpap = bpa; + return (0); + } + + /* + * For memory space, map the bus physical address to + * a kernel virtual address. + */ + error = x86_64_mem_add_mapping(bpa, size, + (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp); + if (error) { + if (extent_free(iomem_ex, bpa, size, EX_NOWAIT | + (ioport_malloc_safe ? EX_MALLOCOK : 0))) { + printf("x86_64_memio_alloc: pa 0x%lx, size 0x%lx\n", + bpa, size); + printf("x86_64_memio_alloc: can't free region\n"); + } + } + + *bpap = bpa; + + return (error); +} + +int +x86_64_mem_add_mapping(bpa, size, cacheable, bshp) + bus_addr_t bpa; + bus_size_t size; + int cacheable; + bus_space_handle_t *bshp; +{ + u_long pa, endpa; + vaddr_t va; + pt_entry_t *pte; + + pa = x86_64_trunc_page(bpa); + endpa = x86_64_round_page(bpa + size); + +#ifdef DIAGNOSTIC + if (endpa <= pa) + panic("x86_64_mem_add_mapping: overflow"); +#endif + + va = uvm_km_valloc(kernel_map, endpa - pa); + if (va == 0) + return (ENOMEM); + + *bshp = (bus_space_handle_t)(va + (bpa & PGOFSET)); + + for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { + pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); + + pte = kvtopte(va); + if (cacheable) + *pte &= ~PG_N; + else + *pte |= PG_N; +#ifdef LARGEPAGES + if (*pte & PG_PS) + pmap_update_pg(va & PG_LGFRAME); + else +#endif + pmap_update_pg(va); + } + + return 0; +} + +void +x86_64_memio_unmap(t, bsh, size) + bus_space_tag_t t; + bus_space_handle_t bsh; + bus_size_t size; +{ + struct extent *ex; + u_long va, endva; + bus_addr_t bpa; + + /* + * Find the correct extent and bus physical address. + */ + if (t == X86_64_BUS_SPACE_IO) { + ex = ioport_ex; + bpa = bsh; + } else if (t == X86_64_BUS_SPACE_MEM) { + ex = iomem_ex; + + if (bsh >= atdevbase && + (bsh + size) <= (atdevbase + IOM_SIZE)) { + bpa = (bus_addr_t)ISA_PHYSADDR(bsh); + goto ok; + } + + va = x86_64_trunc_page(bsh); + endva = x86_64_round_page(bsh + size); + +#ifdef DIAGNOSTIC + if (endva <= va) + panic("x86_64_memio_unmap: overflow"); +#endif + + (void) pmap_extract(pmap_kernel(), va, &bpa); + bpa += (bsh & PGOFSET); + + /* + * Free the kernel virtual mapping. + */ + uvm_km_free(kernel_map, va, endva - va); + } else + panic("x86_64_memio_unmap: bad bus space tag"); + +ok: + if (extent_free(ex, bpa, size, + EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) { + printf("x86_64_memio_unmap: %s 0x%lx, size 0x%lx\n", + (t == X86_64_BUS_SPACE_IO) ? "port" : "pa", bpa, size); + printf("x86_64_memio_unmap: can't free region\n"); + } +} + +void +x86_64_memio_free(t, bsh, size) + bus_space_tag_t t; + bus_space_handle_t bsh; + bus_size_t size; +{ + + /* x86_64_memio_unmap() does all that we need to do. */ + x86_64_memio_unmap(t, bsh, size); +} + +int +x86_64_memio_subregion(t, bsh, offset, size, nbshp) + bus_space_tag_t t; + bus_space_handle_t bsh; + bus_size_t offset, size; + bus_space_handle_t *nbshp; +{ + + *nbshp = bsh + offset; + return (0); +} + +/* + * Common function for DMA map creation. May be called by bus-specific + * DMA map creation functions. + */ +int +_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp) + bus_dma_tag_t t; + bus_size_t size; + int nsegments; + bus_size_t maxsegsz; + bus_size_t boundary; + int flags; + bus_dmamap_t *dmamp; +{ + struct x86_64_bus_dmamap *map; + void *mapstore; + size_t mapsize; + + /* + * Allocate and initialize the DMA map. The end of the map + * is a variable-sized array of segments, so we allocate enough + * room for them in one shot. + * + * Note we don't preserve the WAITOK or NOWAIT flags. Preservation + * of ALLOCNOW notifies others that we've reserved these resources, + * and they are not to be freed. + * + * The bus_dmamap_t includes one bus_dma_segment_t, hence + * the (nsegments - 1). + */ + mapsize = sizeof(struct x86_64_bus_dmamap) + + (sizeof(bus_dma_segment_t) * (nsegments - 1)); + if ((mapstore = malloc(mapsize, M_DMAMAP, + (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) + return (ENOMEM); + + memset(mapstore, 0, mapsize); + map = (struct x86_64_bus_dmamap *)mapstore; + map->_dm_size = size; + map->_dm_segcnt = nsegments; + map->_dm_maxsegsz = maxsegsz; + map->_dm_boundary = boundary; + map->_dm_bounce_thresh = t->_bounce_thresh; + map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT); + map->dm_mapsize = 0; /* no valid mappings */ + map->dm_nsegs = 0; + + *dmamp = map; + return (0); +} + +/* + * Common function for DMA map destruction. May be called by bus-specific + * DMA map destruction functions. + */ +void +_bus_dmamap_destroy(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + + free(map, M_DMAMAP); +} + +/* + * Common function for loading a DMA map with a linear buffer. May + * be called by bus-specific DMA map load functions. + */ +int +_bus_dmamap_load(t, map, buf, buflen, p, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; +{ + paddr_t lastaddr; + int seg, error; + + /* + * Make sure that on error condition we return "no valid mappings". + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + if (buflen > map->_dm_size) + return (EINVAL); + + seg = 0; + error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, + &lastaddr, &seg, 1); + if (error == 0) { + map->dm_mapsize = buflen; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for mbufs. + */ +int +_bus_dmamap_load_mbuf(t, map, m0, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct mbuf *m0; + int flags; +{ + paddr_t lastaddr; + int seg, error, first; + struct mbuf *m; + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + +#ifdef DIAGNOSTIC + if ((m0->m_flags & M_PKTHDR) == 0) + panic("_bus_dmamap_load_mbuf: no packet header"); +#endif + + if (m0->m_pkthdr.len > map->_dm_size) + return (EINVAL); + + first = 1; + seg = 0; + error = 0; + for (m = m0; m != NULL && error == 0; m = m->m_next) { + error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, + NULL, flags, &lastaddr, &seg, first); + first = 0; + } + if (error == 0) { + map->dm_mapsize = m0->m_pkthdr.len; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for uios. + */ +int +_bus_dmamap_load_uio(t, map, uio, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + struct uio *uio; + int flags; +{ + paddr_t lastaddr; + int seg, i, error, first; + bus_size_t minlen, resid; + struct proc *p = NULL; + struct iovec *iov; + caddr_t addr; + + /* + * Make sure that on error condition we return "no valid mappings." + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; + + resid = uio->uio_resid; + iov = uio->uio_iov; + + if (uio->uio_segflg == UIO_USERSPACE) { + p = uio->uio_procp; +#ifdef DIAGNOSTIC + if (p == NULL) + panic("_bus_dmamap_load_uio: USERSPACE but no proc"); +#endif + } + + first = 1; + seg = 0; + error = 0; + for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { + /* + * Now at the first iovec to load. Load each iovec + * until we have exhausted the residual count. + */ + minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; + addr = (caddr_t)iov[i].iov_base; + + error = _bus_dmamap_load_buffer(t, map, addr, minlen, + p, flags, &lastaddr, &seg, first); + first = 0; + + resid -= minlen; + } + if (error == 0) { + map->dm_mapsize = uio->uio_resid; + map->dm_nsegs = seg + 1; + } + return (error); +} + +/* + * Like _bus_dmamap_load(), but for raw memory allocated with + * bus_dmamem_alloc(). + */ +int +_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_dma_segment_t *segs; + int nsegs; + bus_size_t size; + int flags; +{ + + panic("_bus_dmamap_load_raw: not implemented"); +} + +/* + * Common function for unloading a DMA map. May be called by + * bus-specific DMA map unload functions. + */ +void +_bus_dmamap_unload(t, map) + bus_dma_tag_t t; + bus_dmamap_t map; +{ + + /* + * No resources to free; just mark the mappings as + * invalid. + */ + map->dm_mapsize = 0; + map->dm_nsegs = 0; +} + +/* + * Common function for DMA map synchronization. May be called + * by bus-specific DMA map synchronization functions. + */ +void +_bus_dmamap_sync(t, map, offset, len, ops) + bus_dma_tag_t t; + bus_dmamap_t map; + bus_addr_t offset; + bus_size_t len; + int ops; +{ + + /* Nothing to do here. */ +} + +/* + * Common function for DMA-safe memory allocation. May be called + * by bus-specific DMA memory allocation functions. + */ +int +_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; +{ + extern paddr_t avail_end; + paddr_t high, tend; + + /* XXXfvdl restrict all dmamem allocs to < 4G for now */ + tend = trunc_page(avail_end); + high = tend > 0xffffffff ? 0xffffffff : tend; + return (_bus_dmamem_alloc_range(t, size, alignment, boundary, + segs, nsegs, rsegs, flags, 0, high)); +} + +/* + * Common function for freeing DMA-safe memory. May be called by + * bus-specific DMA memory free functions. + */ +void +_bus_dmamem_free(t, segs, nsegs) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; +{ + struct vm_page *m; + bus_addr_t addr; + struct pglist mlist; + int curseg; + + /* + * Build a list of pages to free back to the VM system. + */ + TAILQ_INIT(&mlist); + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE) { + m = PHYS_TO_VM_PAGE(addr); + TAILQ_INSERT_TAIL(&mlist, m, pageq); + } + } + + uvm_pglistfree(&mlist); +} + +/* + * Common function for mapping DMA-safe memory. May be called by + * bus-specific DMA memory map functions. + */ +int +_bus_dmamem_map(t, segs, nsegs, size, kvap, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; + size_t size; + caddr_t *kvap; + int flags; +{ + vaddr_t va; + bus_addr_t addr; + int curseg; + + size = round_page(size); + + va = uvm_km_valloc(kernel_map, size); + + if (va == 0) + return (ENOMEM); + + *kvap = (caddr_t)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { + if (size == 0) + panic("_bus_dmamem_map: size botch"); + pmap_enter(pmap_kernel(), va, addr, + VM_PROT_READ | VM_PROT_WRITE, + PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); + } + } + + return (0); +} + +/* + * Common function for unmapping DMA-safe memory. May be called by + * bus-specific DMA memory unmapping functions. + */ +void +_bus_dmamem_unmap(t, kva, size) + bus_dma_tag_t t; + caddr_t kva; + size_t size; +{ + +#ifdef DIAGNOSTIC + if ((u_long)kva & PGOFSET) + panic("_bus_dmamem_unmap"); +#endif + + size = round_page(size); + + uvm_km_free(kernel_map, (vaddr_t)kva, size); +} + +/* + * Common functin for mmap(2)'ing DMA-safe memory. May be called by + * bus-specific DMA mmap(2)'ing functions. + */ +paddr_t +_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags) + bus_dma_tag_t t; + bus_dma_segment_t *segs; + int nsegs; + off_t off; + int prot, flags; +{ + int i; + + for (i = 0; i < nsegs; i++) { +#ifdef DIAGNOSTIC + if (off & PGOFSET) + panic("_bus_dmamem_mmap: offset unaligned"); + if (segs[i].ds_addr & PGOFSET) + panic("_bus_dmamem_mmap: segment unaligned"); + if (segs[i].ds_len & PGOFSET) + panic("_bus_dmamem_mmap: segment size not multiple" + " of page size"); +#endif + if (off >= segs[i].ds_len) { + off -= segs[i].ds_len; + continue; + } + + return (x86_64_btop((caddr_t)segs[i].ds_addr + off)); + } + + /* Page not found. */ + return (-1); +} + +/********************************************************************** + * DMA utility functions + **********************************************************************/ + +/* + * Utility function to load a linear buffer. lastaddrp holds state + * between invocations (for multiple-buffer loads). segp contains + * the starting segment on entrace, and the ending segment on exit. + * first indicates if this is the first invocation of this function. + */ +int +_bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first) + bus_dma_tag_t t; + bus_dmamap_t map; + void *buf; + bus_size_t buflen; + struct proc *p; + int flags; + paddr_t *lastaddrp; + int *segp; + int first; +{ + bus_size_t sgsize; + bus_addr_t curaddr, lastaddr, baddr, bmask; + vaddr_t vaddr = (vaddr_t)buf; + int seg; + pmap_t pmap; + + if (p != NULL) + pmap = p->p_vmspace->vm_map.pmap; + else + pmap = pmap_kernel(); + + lastaddr = *lastaddrp; + bmask = ~(map->_dm_boundary - 1); + + for (seg = *segp; buflen > 0 ; ) { + /* + * Get the physical address for this segment. + */ + (void) pmap_extract(pmap, vaddr, &curaddr); + + /* + * If we're beyond the bounce threshold, notify + * the caller. + */ + if (map->_dm_bounce_thresh != 0 && + curaddr >= map->_dm_bounce_thresh) + return (EINVAL); + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); + if (buflen < sgsize) + sgsize = buflen; + + /* + * Make sure we don't cross any boundaries. + */ + if (map->_dm_boundary > 0) { + baddr = (curaddr + map->_dm_boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + if (first) { + map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_len = sgsize; + first = 0; + } else { + if (curaddr == lastaddr && + (map->dm_segs[seg].ds_len + sgsize) <= + map->_dm_maxsegsz && + (map->_dm_boundary == 0 || + (map->dm_segs[seg].ds_addr & bmask) == + (curaddr & bmask))) + map->dm_segs[seg].ds_len += sgsize; + else { + if (++seg >= map->_dm_segcnt) + break; + map->dm_segs[seg].ds_addr = curaddr; + map->dm_segs[seg].ds_len = sgsize; + } + } + + lastaddr = curaddr + sgsize; + vaddr += sgsize; + buflen -= sgsize; + } + + *segp = seg; + *lastaddrp = lastaddr; + + /* + * Did we fit? + */ + if (buflen != 0) + return (EFBIG); /* XXX better return value here? */ + return (0); +} + +/* + * Allocate physical memory from the given physical address range. + * Called by DMA-safe memory allocation methods. + */ +int +_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, + flags, low, high) + bus_dma_tag_t t; + bus_size_t size, alignment, boundary; + bus_dma_segment_t *segs; + int nsegs; + int *rsegs; + int flags; + paddr_t low; + paddr_t high; +{ + paddr_t curaddr, lastaddr; + struct vm_page *m; + struct pglist mlist; + int curseg, error; + + /* Always round the size. */ + size = round_page(size); + + /* + * Allocate pages from the VM system. + */ + TAILQ_INIT(&mlist); + error = uvm_pglistalloc(size, low, high, alignment, boundary, + &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); + if (error) + return (error); + + /* + * Compute the location, size, and number of segments actually + * returned by the VM code. + */ + m = mlist.tqh_first; + curseg = 0; + lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m); + segs[curseg].ds_len = PAGE_SIZE; + m = m->pageq.tqe_next; + + for (; m != NULL; m = m->pageq.tqe_next) { + curaddr = VM_PAGE_TO_PHYS(m); +#ifdef DIAGNOSTIC + if (curaddr < low || curaddr >= high) { + printf("vm_page_alloc_memory returned non-sensical" + " address 0x%lx\n", curaddr); + panic("_bus_dmamem_alloc_range"); + } +#endif + if (curaddr == (lastaddr + PAGE_SIZE)) + segs[curseg].ds_len += PAGE_SIZE; + else { + curseg++; + segs[curseg].ds_addr = curaddr; + segs[curseg].ds_len = PAGE_SIZE; + } + lastaddr = curaddr; + } + + *rsegs = curseg + 1; + + return (0); +} diff --git a/sys/arch/x86_64/x86_64/conf.c b/sys/arch/x86_64/x86_64/conf.c new file mode 100644 index 000000000000..c05f43927482 --- /dev/null +++ b/sys/arch/x86_64/x86_64/conf.c @@ -0,0 +1,468 @@ +/* $NetBSD: conf.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_compat_svr4.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "wd.h" +bdev_decl(wd); +bdev_decl(sw); +#include "fdc.h" +bdev_decl(fd); +#include "wt.h" +bdev_decl(wt); +#include "sd.h" +bdev_decl(sd); +#include "st.h" +bdev_decl(st); +#include "cd.h" +bdev_decl(cd); +#include "mcd.h" +bdev_decl(mcd); +#include "vnd.h" +bdev_decl(vnd); +#include "scd.h" +bdev_decl(scd); +#include "ccd.h" +bdev_decl(ccd); +#include "raid.h" +bdev_decl(raid); +#include "md.h" +bdev_decl(md); +#include "ld.h" +bdev_decl(ld); + +struct bdevsw bdevsw[] = +{ + bdev_disk_init(NWD,wd), /* 0: ST506/ESDI/IDE disk */ + bdev_swap_init(1,sw), /* 1: swap pseudo-device */ + bdev_disk_init(NFDC,fd), /* 2: floppy diskette */ + bdev_tape_init(NWT,wt), /* 3: QIC-02/QIC-36 tape */ + bdev_disk_init(NSD,sd), /* 4: SCSI disk */ + bdev_tape_init(NST,st), /* 5: SCSI tape */ + bdev_disk_init(NCD,cd), /* 6: SCSI CD-ROM */ + bdev_disk_init(NMCD,mcd), /* 7: Mitsumi CD-ROM */ + bdev_lkm_dummy(), /* 8 */ + bdev_lkm_dummy(), /* 9 */ + bdev_lkm_dummy(), /* 10 */ + bdev_lkm_dummy(), /* 11 */ + bdev_lkm_dummy(), /* 12 */ + bdev_lkm_dummy(), /* 13 */ + bdev_disk_init(NVND,vnd), /* 14: vnode disk driver */ + bdev_disk_init(NSCD,scd), /* 15: Sony CD-ROM */ + bdev_disk_init(NCCD,ccd), /* 16: concatenated disk driver */ + bdev_disk_init(NMD,md), /* 17: memory disk driver */ + bdev_disk_init(NRAID,raid), /* 18: RAIDframe disk driver */ + bdev_disk_init(NLD,ld), /* 19: logical disk */ +}; +int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]); + +#include +cdev_decl(sysmon); + +cdev_decl(cn); +cdev_decl(ctty); +#define mmread mmrw +#define mmwrite mmrw +cdev_decl(mm); +cdev_decl(wd); +cdev_decl(sw); +#include "pty.h" +#define ptstty ptytty +#define ptsioctl ptyioctl +cdev_decl(pts); +#define ptctty ptytty +#define ptcioctl ptyioctl +cdev_decl(ptc); +cdev_decl(log); +#include "com.h" +cdev_decl(com); +cdev_decl(fd); +cdev_decl(wt); +cdev_decl(scd); +#include "pc.h" +cdev_decl(pc); +cdev_decl(sd); +cdev_decl(st); +#include "ses.h" +cdev_decl(ses); +#include "ss.h" +cdev_decl(ss); +#include "uk.h" +cdev_decl(uk); +cdev_decl(cd); +#include "lpt.h" +cdev_decl(lpt); +#include "ch.h" +cdev_decl(ch); +dev_decl(filedesc,open); +#include "bpfilter.h" +cdev_decl(bpf); +cdev_decl(md); +#include "spkr.h" +cdev_decl(spkr); +#include "cy.h" +cdev_decl(cy); +#include "cz.h" +cdev_decl(cztty); +cdev_decl(mcd); +#include "tun.h" +cdev_decl(tun); +cdev_decl(vnd); +#include "audio.h" +cdev_decl(audio); +#include "midi.h" +cdev_decl(midi); +#include "sequencer.h" +cdev_decl(music); +cdev_decl(svr4_net); +cdev_decl(ccd); +cdev_decl(raid); +cdev_decl(ld); +#include "usb.h" +cdev_decl(usb); +#include "uhid.h" +cdev_decl(uhid); +#include "ugen.h" +cdev_decl(ugen); +#include "ulpt.h" +cdev_decl(ulpt); +#include "ucom.h" +cdev_decl(ucom); +#include "urio.h" +cdev_decl(urio); +#include "uscanner.h" +cdev_decl(uscanner); +#include "vcoda.h" +cdev_decl(vc_nb_); + +#include "ipfilter.h" +#include "satlink.h" +cdev_decl(satlink); + +#include "rnd.h" + +#include "wsdisplay.h" +cdev_decl(wsdisplay); +#include "wskbd.h" +cdev_decl(wskbd); +#include "wsmouse.h" +cdev_decl(wsmouse); +#include "wsmux.h" +cdev_decl(wsmux); +#include "esh.h" +cdev_decl(esh_fp); +#include "scsibus.h" +cdev_decl(scsibus); +#include "bktr.h" + +#include "i4b.h" +#include "i4bctl.h" +#include "i4btrc.h" +#include "i4brbch.h" +#include "i4btel.h" +cdev_decl(i4b); +cdev_decl(i4bctl); +cdev_decl(i4btrc); +cdev_decl(i4brbch); +cdev_decl(i4btel); + +/* open, close, read, write, ioctl, mmap */ +#define cdev_vmegen_init(c,n) { \ + dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \ + dev_init(c,n,write), dev_init(c,n,ioctl), (dev_type_stop((*))) enodev, \ + 0, (dev_type_poll((*))) enodev, dev_init(c,n,mmap) } + +#if 0 +#include "vmegeneric.h" +cdev_decl(vmegeneric); +#endif +#include "iop.h" +cdev_decl(iop); +#include "mlx.h" +cdev_decl(mlx); + +#include + +struct cdevsw cdevsw[] = +{ + cdev_cn_init(1,cn), /* 0: virtual console */ + cdev_ctty_init(1,ctty), /* 1: controlling terminal */ + cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */ + cdev_disk_init(NWD,wd), /* 3: ST506/ESDI/IDE disk */ + cdev_swap_init(1,sw), /* 4: /dev/drum (swap pseudo-device) */ + cdev_tty_init(NPTY,pts), /* 5: pseudo-tty slave */ + cdev_ptc_init(NPTY,ptc), /* 6: pseudo-tty master */ + cdev_log_init(1,log), /* 7: /dev/klog */ + cdev_tty_init(NCOM,com), /* 8: serial port */ + cdev_disk_init(NFDC,fd), /* 9: floppy disk */ + cdev_tape_init(NWT,wt), /* 10: QIC-02/QIC-36 tape */ + cdev_disk_init(NSCD,scd), /* 11: Sony CD-ROM */ + cdev_pc_init(NPC,pc), /* 12: PC console */ + cdev_disk_init(NSD,sd), /* 13: SCSI disk */ + cdev_tape_init(NST,st), /* 14: SCSI tape */ + cdev_disk_init(NCD,cd), /* 15: SCSI CD-ROM */ + cdev_lpt_init(NLPT,lpt), /* 16: parallel printer */ + cdev_ch_init(NCH,ch), /* 17: SCSI autochanger */ + cdev_disk_init(NCCD,ccd), /* 18: concatenated disk driver */ + cdev_scanner_init(NSS,ss), /* 19: SCSI scanner */ + cdev_uk_init(NUK,uk), /* 20: SCSI unknown */ +#ifdef notdef + cdev_apm_init(NAPM,apm), /* 21: Advancded Power Management */ +#else + cdev_notdef(), +#endif + cdev_fd_init(1,filedesc), /* 22: file descriptor pseudo-device */ + cdev_bpftun_init(NBPFILTER,bpf),/* 23: Berkeley packet filter */ + cdev_disk_init(NMD,md), /* 24: memory disk driver */ + cdev_notdef(), /* 25 */ +#ifdef notdef + cdev_joy_init(NJOY,joy), /* 26: joystick */ +#else + cdev_notdef(), +#endif + cdev_spkr_init(NSPKR,spkr), /* 27: PC speaker */ + cdev_lkm_init(NLKM,lkm), /* 28: loadable module driver */ + cdev_lkm_dummy(), /* 29 */ + cdev_lkm_dummy(), /* 30 */ + cdev_lkm_dummy(), /* 31 */ + cdev_lkm_dummy(), /* 32 */ + cdev_lkm_dummy(), /* 33 */ + cdev_lkm_dummy(), /* 34 */ + cdev_notdef(), /* 35 */ + cdev_notdef(), /* 36 */ + cdev_notdef(), /* 37 */ + cdev_tty_init(NCY,cy), /* 38: Cyclom serial port */ + cdev_disk_init(NMCD,mcd), /* 39: Mitsumi CD-ROM */ + cdev_bpftun_init(NTUN,tun), /* 40: network tunnel */ + cdev_disk_init(NVND,vnd), /* 41: vnode disk driver */ + cdev_audio_init(NAUDIO,audio), /* 42: generic audio I/O */ +#ifdef COMPAT_SVR4 + cdev_svr4_net_init(1,svr4_net), /* 43: svr4 net pseudo-device */ +#else + cdev_notdef(), /* 43 */ +#endif + cdev_ipf_init(NIPFILTER,ipl), /* 44: ip-filter device */ + cdev_satlink_init(NSATLINK,satlink), /* 45: planetconnect satlink */ + cdev_rnd_init(NRND,rnd), /* 46: random source pseudo-device */ + cdev_wsdisplay_init(NWSDISPLAY, + wsdisplay), /* 47: frame buffers, etc. */ + + cdev_mouse_init(NWSKBD, wskbd), /* 48: keyboards */ + cdev_mouse_init(NWSMOUSE, + wsmouse), /* 49: mice */ + cdev_i4b_init(NI4B, i4b), /* 50: i4b main device */ + cdev_i4bctl_init(NI4BCTL, i4bctl), /* 51: i4b control device */ + cdev_i4brbch_init(NI4BRBCH, i4brbch), /* 52: i4b raw b-channel access */ + cdev_i4btrc_init(NI4BTRC, i4btrc), /* 53: i4b trace device */ + cdev_i4btel_init(NI4BTEL, i4btel), /* 54: i4b phone device */ + cdev_usb_init(NUSB,usb), /* 55: USB controller */ + cdev_usbdev_init(NUHID,uhid), /* 56: USB generic HID */ + cdev_lpt_init(NULPT,ulpt), /* 57: USB printer */ + cdev_midi_init(NMIDI,midi), /* 58: MIDI I/O */ + cdev_midi_init(NSEQUENCER,sequencer), /* 59: sequencer I/O */ + cdev_vc_nb_init(NVCODA,vc_nb_), /* 60: coda file system psdev */ + cdev_scsibus_init(NSCSIBUS,scsibus), /* 61: SCSI bus */ + cdev_disk_init(NRAID,raid), /* 62: RAIDframe disk driver */ + cdev_esh_init(NESH, esh_fp), /* 63: HIPPI (esh) raw device */ + cdev_ugen_init(NUGEN,ugen), /* 64: USB generic driver */ + cdev_mouse_init(NWSMUX, wsmux), /* 65: ws multiplexor */ + cdev_tty_init(NUCOM, ucom), /* 66: USB tty */ + cdev_sysmon_init(NSYSMON, sysmon),/* 67: System Monitor */ +#if 0 + cdev_vmegen_init(NVMEGENERIC, vmegeneric), /* 68: generic VME access */ +#else + cdev_notdef(), +#endif + cdev_disk_init(NLD, ld), /* 69: logical disk */ + cdev_usbdev_init(NURIO,urio), /* 70: Diamond Rio 500 */ + cdev_bktr_init(NBKTR, bktr), /* 71: Bt848 video capture device */ + cdev_notdef(), /* 72 */ + cdev_tty_init(NCZ,cztty), /* 73: Cyclades-Z serial port */ + cdev_ses_init(NSES,ses), /* 74: SCSI SES/SAF-TE */ + cdev_ugen_init(NUSCANNER,uscanner),/* 75: USB scanner */ + cdev__oci_init(NIOP,iop), /* 76: I2O IOP control interface */ + cdev_altq_init(NALTQ,altq), /* 77: ALTQ control interface */ + cdev__oci_init(NMLX,mlx), /* 78: Mylex DAC960 control interface */ +}; +int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]); + +int mem_no = 2; /* major device number of memory special file */ + +/* + * Swapdev is a fake device implemented + * in sw.c used only internally to get to swstrategy. + * It cannot be provided to the users, because the + * swstrategy routine munches the b_dev and b_blkno entries + * before calling the appropriate driver. This would horribly + * confuse, e.g. the hashing routines. Instead, /dev/drum is + * provided as a character (raw) device. + */ +dev_t swapdev = makedev(1, 0); + +/* + * Returns true if dev is /dev/mem or /dev/kmem. + */ +int +iskmemdev(dev) + dev_t dev; +{ + + return (major(dev) == mem_no && (minor(dev) < 2 || minor(dev) == 14)); +} + +/* + * Returns true if dev is /dev/zero. + */ +int +iszerodev(dev) + dev_t dev; +{ + + return (major(dev) == mem_no && minor(dev) == 12); +} + +static int chrtoblktbl[] = { + /* XXXX This needs to be dynamic for LKMs. */ + /*VCHR*/ /*VBLK*/ + /* 0 */ NODEV, + /* 1 */ NODEV, + /* 2 */ NODEV, + /* 3 */ 0, + /* 4 */ NODEV, + /* 5 */ NODEV, + /* 6 */ NODEV, + /* 7 */ NODEV, + /* 8 */ NODEV, + /* 9 */ 2, + /* 10 */ 3, + /* 11 */ 15, + /* 12 */ NODEV, + /* 13 */ 4, + /* 14 */ 5, + /* 15 */ 6, + /* 16 */ NODEV, + /* 17 */ NODEV, + /* 18 */ 16, + /* 19 */ NODEV, + /* 20 */ NODEV, + /* 21 */ NODEV, + /* 22 */ NODEV, + /* 23 */ NODEV, + /* 24 */ 17, + /* 25 */ NODEV, + /* 26 */ NODEV, + /* 27 */ NODEV, + /* 28 */ NODEV, + /* 29 */ NODEV, + /* 30 */ NODEV, + /* 31 */ NODEV, + /* 32 */ NODEV, + /* 33 */ NODEV, + /* 34 */ NODEV, + /* 35 */ NODEV, + /* 36 */ NODEV, + /* 37 */ NODEV, + /* 38 */ NODEV, + /* 39 */ 7, + /* 40 */ NODEV, + /* 41 */ 14, + /* 42 */ NODEV, + /* 43 */ NODEV, + /* 44 */ NODEV, + /* 45 */ NODEV, + /* 46 */ NODEV, + /* 47 */ NODEV, + /* 48 */ NODEV, + /* 49 */ NODEV, + /* 50 */ NODEV, + /* 51 */ NODEV, + /* 52 */ NODEV, + /* 53 */ NODEV, + /* 54 */ NODEV, + /* 55 */ NODEV, + /* 56 */ NODEV, + /* 57 */ NODEV, + /* 58 */ NODEV, + /* 59 */ NODEV, + /* 60 */ NODEV, + /* 61 */ NODEV, + /* 62 */ 18, + /* 63 */ NODEV, + /* 64 */ NODEV, + /* 65 */ NODEV, + /* 66 */ NODEV, + /* 67 */ NODEV, + /* 68 */ NODEV, + /* 69 */ 19, + /* 70 */ NODEV, + /* 71 */ NODEV, + /* 72 */ NODEV, + /* 73 */ NODEV, + /* 74 */ NODEV, + /* 75 */ NODEV, + /* 76 */ NODEV, + /* 77 */ NODEV, + /* 78 */ NODEV, +}; + +/* + * Convert a character device number to a block device number. + */ +dev_t +chrtoblk(dev) + dev_t dev; +{ + int blkmaj; + + if (major(dev) >= nchrdev) + return (NODEV); + blkmaj = chrtoblktbl[major(dev)]; + if (blkmaj == NODEV) + return (NODEV); + return (makedev(blkmaj, minor(dev))); +} diff --git a/sys/arch/x86_64/x86_64/consinit.c b/sys/arch/x86_64/x86_64/consinit.c new file mode 100644 index 000000000000..11ed962d5e34 --- /dev/null +++ b/sys/arch/x86_64/x86_64/consinit.c @@ -0,0 +1,217 @@ +/* $NetBSD: consinit.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/* + * Copyright (c) 1998 + * Matthias Drochner. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include "vga.h" +#include "ega.h" +#include "pcdisplay.h" +#if (NVGA > 0) || (NEGA > 0) || (NPCDISPLAY > 0) +#include +#include +#if (NVGA > 0) +#include +#include +#endif +#if (NEGA > 0) +#include +#endif +#if (NPCDISPLAY > 0) +#include +#endif +#endif + +#include "pckbc.h" +#if (NPCKBC > 0) +#include +#include +#include +#endif +#include "pckbd.h" /* for pckbc_machdep_cnattach */ + +#include "pc.h" +#if (NPC > 0) +#include +#endif + +#include "com.h" +#if (NCOM > 0) +#include +#include +#include +#endif + +#ifndef CONSDEVNAME +#define CONSDEVNAME "pc" +#endif + +#if (NCOM > 0) +#ifndef CONADDR +#define CONADDR 0x3f8 +#endif +#ifndef CONSPEED +#define CONSPEED TTYDEF_SPEED +#endif +#ifndef CONMODE +#define CONMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */ +#endif +int comcnmode = CONMODE; +#endif /* NCOM */ + +const struct btinfo_console default_consinfo = { + {0, 0}, + CONSDEVNAME, +#if (NCOM > 0) + CONADDR, CONSPEED +#else + 0, 0 +#endif +}; + +#ifdef KGDB +#ifndef KGDB_DEVNAME +#define KGDB_DEVNAME "com" +#endif +const char kgdb_devname[] = KGDB_DEVNAME; + +#if (NCOM > 0) +#ifndef KGDBADDR +#define KGDBADDR 0x3f8 +#endif +int comkgdbaddr = KGDBADDR; +#ifndef KGDBRATE +#define KGDBRATE TTYDEF_SPEED +#endif +int comkgdbrate = KGDBRATE; +#ifndef KGDBMODE +#define KGDBMODE ((TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8) /* 8N1 */ +#endif +int comkgdbmode = KGDBMODE; +#endif /* NCOM */ + +#endif /* KGDB */ + +/* + * consinit: + * initialize the system console. + * XXX - shouldn't deal with this initted thing, but then, + * it shouldn't be called from init_x86_64 either. + */ +void +consinit() +{ + const struct btinfo_console *consinfo; + static int initted; + + if (initted) + return; + initted = 1; + +#ifndef CONS_OVERRIDE + consinfo = lookup_bootinfo(BTINFO_CONSOLE); + if (!consinfo) +#endif + consinfo = &default_consinfo; + +#if (NPC > 0) || (NVGA > 0) || (NEGA > 0) || (NPCDISPLAY > 0) + if (!strcmp(consinfo->devname, "pc")) { +#if (NVGA > 0) + if (!vga_cnattach(X86_64_BUS_SPACE_IO, X86_64_BUS_SPACE_MEM, + -1, 1)) + goto dokbd; +#endif +#if (NEGA > 0) + if (!ega_cnattach(X86_64_BUS_SPACE_IO, X86_64_BUS_SPACE_MEM)) + goto dokbd; +#endif +#if (NPCDISPLAY > 0) + if (!pcdisplay_cnattach(X86_64_BUS_SPACE_IO, X86_64_BUS_SPACE_MEM)) + goto dokbd; +#endif +#if (NPC > 0) + pccnattach(); +#endif + if (0) goto dokbd; /* XXX stupid gcc */ +dokbd: +#if (NPCKBC > 0) + pckbc_cnattach(X86_64_BUS_SPACE_IO, IO_KBD, KBCMDP, + PCKBC_KBD_SLOT); +#endif + return; + } +#endif /* PC | VT | VGA | PCDISPLAY */ +#if (NCOM > 0) + if (!strcmp(consinfo->devname, "com")) { + bus_space_tag_t tag = X86_64_BUS_SPACE_IO; + + if (comcnattach(tag, consinfo->addr, consinfo->speed, + COM_FREQ, comcnmode)) + panic("can't init serial console @%x", consinfo->addr); + + return; + } +#endif + panic("invalid console device %s", consinfo->devname); +} + +#if (NPCKBC > 0) && (NPCKBD == 0) +/* + * glue code to support old console code with the + * mi keyboard controller driver + */ +int +pckbc_machdep_cnattach(kbctag, kbcslot) + pckbc_tag_t kbctag; + pckbc_slot_t kbcslot; +{ +#if (NPC > 0) && (NPCCONSKBD > 0) + return (pcconskbd_cnattach(kbctag, kbcslot)); +#else + return (ENXIO); +#endif +} +#endif + +#ifdef KGDB +void +kgdb_port_init() +{ +#if (NCOM > 0) + if(!strcmp(kgdb_devname, "com")) { + bus_space_tag_t tag = X86_64_BUS_SPACE_IO; + + com_kgdb_attach(tag, comkgdbaddr, comkgdbrate, COM_FREQ, + comkgdbmode); + } +#endif +} +#endif diff --git a/sys/arch/x86_64/x86_64/copy.S b/sys/arch/x86_64/x86_64/copy.S new file mode 100644 index 000000000000..35d923e41961 --- /dev/null +++ b/sys/arch/x86_64/x86_64/copy.S @@ -0,0 +1,438 @@ +/* $NetBSD: copy.S,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "assym.h" + +#include +#include + +#include +#include + +/* + * Copy routines from and to userland, plus a few more. See the + * section 9 manpages for info. Some cases can be optimized more. + * + * I wonder if it's worthwhile to make these use SSE2 registers. + */ + +/* + * XXXfvdl appears only to be used by pccons. + * + * fillw(short pattern, caddr_t addr, size_t len); + * Write len copies of pattern at addr. + * appears to be used by pccons. + */ +ENTRY(fillw) + movl %edi,%eax + movq %rsi,%rdi + movw %ax,%cx + rorl $16,%eax + movw %cx,%ax + cld + movq %rdx,%rcx + shrq %rcx + rep + stosl + movq %rdx,%rcx + andq $1,%rcx + rep + stosw + ret + +ENTRY(kcopy) + movq _C_LABEL(curpcb)(%rip),%rax + pushq PCB_ONFAULT(%rax) + leaq _C_LABEL(copy_fault)(%rip),%r11 + movq %r11, PCB_ONFAULT(%rax) + + xchgq %rdi,%rsi + movq %rdx,%rcx + + movq %rdi,%rax + subq %rsi,%rax + cmpq %rcx,%rax # overlapping? + jb 1f + cld # nope, copy forward + shrq $3,%rcx # copy by 64-bit words + rep + movsq + + movq %rdx,%rcx + andl $7,%ecx # any bytes left? + rep + movsb + + movq _C_LABEL(curpcb)(%rip),%rdx + popq PCB_ONFAULT(%rdx) + xorq %rax,%rax + ret + +1: addq %rcx,%rdi # copy backward + addq %rcx,%rsi + std + andq $7,%rcx # any fractional bytes? + decq %rdi + decq %rsi + rep + movsb + movq %rdx,%rcx # copy remainder by 64-bit words + shrq $3,%rcx + subq $7,%rsi + subq $7,%rdi + rep + movsq + cld + + movq _C_LABEL(curpcb)(%rip),%rdx + popq PCB_ONFAULT(%rdx) + xorq %rax,%rax + ret + +ENTRY(copyout) + pushq $0 + + xchgq %rdi,%rsi + movq %rdx,%rax + + movq %rdi,%rdx + addq %rax,%rdx + jc _C_LABEL(copy_efault) + movq $VM_MAXUSER_ADDRESS,%r8 + cmpq %r8,%rdx + ja _C_LABEL(copy_efault) + + movq _C_LABEL(curpcb)(%rip),%rdx + leaq _C_LABEL(copy_fault)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rdx) + + cld + movq %rax,%rcx + shrq $3,%rcx + rep + movsq + movb %al,%cl + andb $7,%cl + rep + movsb + + popq PCB_ONFAULT(%rdx) + xorl %eax,%eax + ret + +ENTRY(copyin) + movq _C_LABEL(curpcb)(%rip),%rax + pushq $0 + leaq _C_LABEL(copy_fault)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rax) + + xchgq %rdi,%rsi + movq %rdx,%rax + + movq %rsi,%rdx + addq %rax,%rdx + jc _C_LABEL(copy_efault) + movq $VM_MAXUSER_ADDRESS,%r8 + cmpq %r8,%rdx + ja _C_LABEL(copy_efault) + +3: /* bcopy(%rsi, %rdi, %rax); */ + cld + movq %rax,%rcx + shrq $3,%rcx + rep + movsq + movb %al,%cl + andb $7,%cl + rep + movsb + + movq _C_LABEL(curpcb)(%rip),%rdx + popq PCB_ONFAULT(%rdx) + xorl %eax,%eax + ret + +ENTRY(copy_efault) + movq $EFAULT,%rax + +ENTRY(copy_fault) + movq _C_LABEL(curpcb)(%rip),%rdx + popq PCB_ONFAULT(%rdx) + ret + +ENTRY(copyoutstr) + xchgq %rdi,%rsi + movq %rdx,%r8 + movq %rcx,%r9 + +5: movq _C_LABEL(curpcb)(%rip),%rax + leaq _C_LABEL(copystr_fault)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rax) + /* + * Get min(%rdx, VM_MAXUSER_ADDRESS-%rdi). + */ + movq $VM_MAXUSER_ADDRESS,%rax + subq %rdi,%rax + cmpq %rdx,%rax + jae 1f + movq %rax,%rdx + movq %rax,%r8 + +1: incq %rdx + cld + +1: decq %rdx + jz 2f + lodsb + stosb + testb %al,%al + jnz 1b + + /* Success -- 0 byte reached. */ + decq %rdx + xorq %rax,%rax + jmp copystr_return + +2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */ + movq $VM_MAXUSER_ADDRESS,%r11 + cmpq %r11,%rdi + jae _C_LABEL(copystr_efault) + movq $ENAMETOOLONG,%rax + jmp copystr_return + +ENTRY(copyinstr) + xchgq %rdi,%rsi + movq %rdx,%r8 + movq %rcx,%r9 + + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(copystr_fault)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + + /* + * Get min(%rdx, VM_MAXUSER_ADDRESS-%rsi). + */ + movq $VM_MAXUSER_ADDRESS,%rax + subq %rsi,%rax + cmpq %rdx,%rax + jae 1f + movq %rax,%rdx + movq %rax,%r8 + +1: incq %rdx + cld + +1: decq %rdx + jz 2f + lodsb + stosb + testb %al,%al + jnz 1b + + /* Success -- 0 byte reached. */ + decq %rdx + xorq %rax,%rax + jmp copystr_return + +2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ + movq $VM_MAXUSER_ADDRESS,%r11 + cmpq %r11,%rsi + jae _C_LABEL(copystr_efault) + movq $ENAMETOOLONG,%rax + jmp copystr_return + +ENTRY(copystr_efault) + movl $EFAULT,%eax + +ENTRY(copystr_fault) +copystr_return: + /* Set *lencopied and return %eax. */ + movq _C_LABEL(curpcb)(%rip),%rcx + movq $0,PCB_ONFAULT(%rcx) + testq %r9,%r9 + jz 8f + subq %rdx,%r8 + movq %r8,(%r9) + +8: ret + +ENTRY(copystr) + xchgq %rdi,%rsi + movq %rdx,%r8 + + incq %rdx + cld + +1: decq %rdx + jz 4f + lodsb + stosb + testb %al,%al + jnz 1b + + /* Success -- 0 byte reached. */ + decq %rdx + xorl %eax,%eax + jmp 6f + +4: /* edx is zero -- return ENAMETOOLONG. */ + movl $ENAMETOOLONG,%eax + +6: /* Set *lencopied and return %eax. */ + testq %rcx,%rcx + jz 7f + subq %rdx,%r8 + movq %r8,(%rcx) + +7: ret + +ENTRY(fuword) + movq $VM_MAXUSER_ADDRESS-4,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusufailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + movl (%rdi),%eax + movq $0,PCB_ONFAULT(%rcx) + ret + +ENTRY(fusword) + movq $VM_MAXUSER_ADDRESS-2,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusufailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + movzwl (%rdi),%eax + movq $0,PCB_ONFAULT(%rcx) + ret + +ENTRY(fuswintr) + movq $VM_MAXUSER_ADDRESS-2,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusuintrfailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + movzwl (%rdi),%eax + movq $0,PCB_ONFAULT(%rcx) + ret + +ENTRY(fubyte) + movq $VM_MAXUSER_ADDRESS-1,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusuintrfailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + movzbl (%rdi),%eax + movq $0,PCB_ONFAULT(%rcx) + ret + +/* + * These are the same, but must reside at different addresses, + * because trap.c checks for them. + */ +ENTRY(fusuintrfailure) + movq $0,PCB_ONFAULT(%rcx) + movl $-1,%eax + ret + +ENTRY(fusufailure) + movq $0,PCB_ONFAULT(%rcx) + movl $-1,%eax + ret + +ENTRY(fusuaddrfault) + movl $-1,%eax + ret + +ENTRY(suword) + movq $VM_MAXUSER_ADDRESS-4,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusufailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + + movq %rsi,(%rdi) + xorq %rax,%rax + movq %rax,PCB_ONFAULT(%rcx) + ret + +ENTRY(susword) + movq $VM_MAXUSER_ADDRESS-2,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusufailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + + movw %si,(%rdi) + xorq %rax,%rax + movq %rax,PCB_ONFAULT(%rcx) + ret + +ENTRY(suswintr) + movq $VM_MAXUSER_ADDRESS-2,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusuintrfailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + + movw %si,(%rdi) + xorq %rax,%rax + movq %rax,PCB_ONFAULT(%rcx) + ret + +ENTRY(subyte) + movq $VM_MAXUSER_ADDRESS-1,%r11 + cmpq %r11,%rdi + ja _C_LABEL(fusuaddrfault) + + movq _C_LABEL(curpcb)(%rip),%rcx + leaq _C_LABEL(fusufailure)(%rip),%r11 + movq %r11,PCB_ONFAULT(%rcx) + + movb %sil,(%rdi) + xorq %rax,%rax + movq %rax,PCB_ONFAULT(%rcx) + ret diff --git a/sys/arch/x86_64/x86_64/disksubr.c b/sys/arch/x86_64/x86_64/disksubr.c new file mode 100644 index 000000000000..34161cd809e2 --- /dev/null +++ b/sys/arch/x86_64/x86_64/disksubr.c @@ -0,0 +1,520 @@ +/* $NetBSD: disksubr.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/* + * Copyright (c) 1982, 1986, 1988 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91 + */ + +/* + * XXXfvdl copy of i386 code. May change later if a different partitioning + * scheme is used. If not, it can be shared. + */ + +#include +#include +#include +#include +#include +#include + +#include "opt_mbr.h" + +int fat_types[] = { MBR_PTYPE_FAT12, MBR_PTYPE_FAT16S, + MBR_PTYPE_FAT16B, MBR_PTYPE_FAT32, + MBR_PTYPE_FAT32L, MBR_PTYPE_FAT16L, + -1 }; + +#define NO_MBR_SIGNATURE ((struct mbr_partition *) -1) + +static struct mbr_partition * +mbr_findslice __P((struct mbr_partition* dp, struct buf *bp)); + +/* + * Scan MBR for NetBSD partittion. Return NO_MBR_SIGNATURE if no MBR found + * Otherwise, copy valid MBR partition-table into dp, and if a NetBSD + * partition is found, return a pointer to it; else return NULL. + */ +static +struct mbr_partition * +mbr_findslice(dp, bp) + struct mbr_partition *dp; + struct buf *bp; +{ + struct mbr_partition *ourdp = NULL; + u_int16_t *mbrmagicp; + int i; + + /* Note: Magic number is little-endian. */ + mbrmagicp = (u_int16_t *)(bp->b_data + MBR_MAGICOFF); + if (*mbrmagicp != MBR_MAGIC) + return (NO_MBR_SIGNATURE); + + /* XXX how do we check veracity/bounds of this? */ + memcpy(dp, bp->b_data + MBR_PARTOFF, NMBRPART * sizeof(*dp)); + + /* look for NetBSD partition */ + for (i = 0; i < NMBRPART; i++) { + if (dp[i].mbrp_typ == MBR_PTYPE_NETBSD) { + ourdp = &dp[i]; + break; + } + } + +#ifdef COMPAT_386BSD_MBRPART + /* didn't find it -- look for 386BSD partition */ + if (!ourdp) { + for (i = 0; i < NMBRPART; i++) { + if (dp[i].mbrp_typ == MBR_PTYPE_386BSD) { + printf("WARNING: old BSD partition ID!\n"); + ourdp = &dp[i]; + /* + * If more than one matches, take last, + * as NetBSD install tool does. + */ +#if 0 + break; +#endif + } + } + } +#endif /* COMPAT_386BSD_MBRPART */ + + return (ourdp); +} + + +/* + * Attempt to read a disk label from a device + * using the indicated stategy routine. + * The label must be partly set up before this: + * secpercyl, secsize and anything required for a block i/o read + * operation in the driver's strategy/start routines + * must be filled in before calling us. + * + * If dos partition table requested, attempt to load it and + * find disklabel inside a DOS partition. Also, if bad block + * table needed, attempt to extract it as well. Return buffer + * for use in signalling errors if requested. + * + * Returns null on success and an error string on failure. + */ +char * +readdisklabel(dev, strat, lp, osdep) + dev_t dev; + void (*strat) __P((struct buf *)); + struct disklabel *lp; + struct cpu_disklabel *osdep; +{ + struct mbr_partition *dp; + struct partition *pp; + struct dkbad *bdp; + struct buf *bp; + struct disklabel *dlp; + char *msg = NULL; + int dospartoff, cyl, i, *ip; + + /* minimal requirements for archtypal disk label */ + if (lp->d_secsize == 0) + lp->d_secsize = DEV_BSIZE; + if (lp->d_secperunit == 0) + lp->d_secperunit = 0x1fffffff; +#if 0 + if (lp->d_ncylinders == 16383) { + printf("disklabel: Disk > 8G ... readjusting chs %d/%d/%d to ", + lp->d_ncylinders, lp->d_ntracks, lp->d_nsectors); + lp->d_ncylinders = lp->d_secperunit / lp->d_ntracks / lp->d_nsectors; + printf("%d/%d/%d\n", + lp->d_ncylinders, lp->d_ntracks, lp->d_nsectors); + } +#endif + lp->d_npartitions = RAW_PART + 1; + for (i = 0; i < RAW_PART; i++) { + lp->d_partitions[i].p_size = 0; + lp->d_partitions[i].p_offset = 0; + } + if (lp->d_partitions[i].p_size == 0) + lp->d_partitions[i].p_size = 0x1fffffff; + lp->d_partitions[i].p_offset = 0; + + /* get a buffer and initialize it */ + bp = geteblk((int)lp->d_secsize); + bp->b_dev = dev; + + /* do dos partitions in the process of getting disklabel? */ + dospartoff = 0; + cyl = LABELSECTOR / lp->d_secpercyl; + if (!osdep) + goto nombrpart; + dp = osdep->dosparts; + + /* read master boot record */ + bp->b_blkno = MBR_BBSECTOR; + bp->b_bcount = lp->d_secsize; + bp->b_flags |= B_READ; + bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl; + (*strat)(bp); + + /* if successful, wander through dos partition table */ + if (biowait(bp)) { + msg = "dos partition I/O error"; + goto done; + } else { + struct mbr_partition *ourdp = NULL; + + ourdp = mbr_findslice(dp, bp); + if (ourdp == NO_MBR_SIGNATURE) + goto nombrpart; + + for (i = 0; i < NMBRPART; i++, dp++) { + /* Install in partition e, f, g, or h. */ + pp = &lp->d_partitions[RAW_PART + 1 + i]; + pp->p_offset = dp->mbrp_start; + pp->p_size = dp->mbrp_size; + for (ip = fat_types; *ip != -1; ip++) { + if (dp->mbrp_typ == *ip) + pp->p_fstype = FS_MSDOS; + } + if (dp->mbrp_typ == MBR_PTYPE_LNXEXT2) + pp->p_fstype = FS_EX2FS; + + if (dp->mbrp_typ == MBR_PTYPE_NTFS) + pp->p_fstype = FS_NTFS; + + /* is this ours? */ + if (dp == ourdp) { + /* need sector address for SCSI/IDE, + cylinder for ESDI/ST506/RLL */ + dospartoff = dp->mbrp_start; + cyl = MBR_PCYL(dp->mbrp_scyl, dp->mbrp_ssect); + + /* update disklabel with details */ + lp->d_partitions[2].p_size = + dp->mbrp_size; + lp->d_partitions[2].p_offset = + dp->mbrp_start; +#if 0 + if (lp->d_ntracks != dp->mbrp_ehd + 1 || + lp->d_nsectors != DPSECT(dp->mbrp_esect)) { + printf("disklabel: BIOS sees chs %d/%d/%d as ", + lp->d_ncylinders, lp->d_ntracks, + lp->d_nsectors); + lp->d_ntracks = dp->mbrp_ehd + 1; + lp->d_nsectors = DPSECT(dp->mbrp_esect); + lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; + lp->d_ncylinders = lp->d_secperunit / lp->d_secpercyl; + if (! lp->d_ncylinders) + lp->d_ncylinders = 1; + printf("%d/%d/%d\n", + lp->d_ncylinders, lp->d_ntracks, + lp->d_nsectors); + } +#endif + } + } + lp->d_npartitions = RAW_PART + 1 + i; + } + +nombrpart: + /* next, dig out disk label */ + bp->b_blkno = dospartoff + LABELSECTOR; + bp->b_cylinder = cyl; + bp->b_bcount = lp->d_secsize; + bp->b_flags &= ~(B_DONE); + bp->b_flags |= B_READ; + (*strat)(bp); + + /* if successful, locate disk label within block and validate */ + if (biowait(bp)) { + msg = "disk label I/O error"; + goto done; + } + for (dlp = (struct disklabel *)bp->b_data; + dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp)); + dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { + if (dlp->d_magic != DISKMAGIC || dlp->d_magic2 != DISKMAGIC) { + if (msg == NULL) + msg = "no disk label"; + } else if (dlp->d_npartitions > MAXPARTITIONS || + dkcksum(dlp) != 0) + msg = "disk label corrupted"; + else { + *lp = *dlp; + msg = NULL; + break; + } + } + + if (msg) + goto done; + + /* obtain bad sector table if requested and present */ + if (osdep && (lp->d_flags & D_BADSECT)) { + struct dkbad *db; + + bdp = &osdep->bad; + i = 0; + do { + /* read a bad sector table */ + bp->b_flags &= ~(B_DONE); + bp->b_flags |= B_READ; + bp->b_blkno = lp->d_secperunit - lp->d_nsectors + i; + if (lp->d_secsize > DEV_BSIZE) + bp->b_blkno *= lp->d_secsize / DEV_BSIZE; + else + bp->b_blkno /= DEV_BSIZE / lp->d_secsize; + bp->b_bcount = lp->d_secsize; + bp->b_cylinder = lp->d_ncylinders - 1; + (*strat)(bp); + + /* if successful, validate, otherwise try another */ + if (biowait(bp)) { + msg = "bad sector table I/O error"; + } else { + db = (struct dkbad *)(bp->b_data); +#define DKBAD_MAGIC 0x4321 + if (db->bt_mbz == 0 + && db->bt_flag == DKBAD_MAGIC) { + msg = NULL; + *bdp = *db; + break; + } else + msg = "bad sector table corrupted"; + } + } while ((bp->b_flags & B_ERROR) && (i += 2) < 10 && + i < lp->d_nsectors); + } + +done: + brelse(bp); + return (msg); +} + +/* + * Check new disk label for sensibility + * before setting it. + */ +int +setdisklabel(olp, nlp, openmask, osdep) + struct disklabel *olp, *nlp; + u_long openmask; + struct cpu_disklabel *osdep; +{ + int i; + struct partition *opp, *npp; + + /* sanity clause */ + if (nlp->d_secpercyl == 0 || nlp->d_secsize == 0 + || (nlp->d_secsize % DEV_BSIZE) != 0) + return(EINVAL); + + /* special case to allow disklabel to be invalidated */ + if (nlp->d_magic == 0xffffffff) { + *olp = *nlp; + return (0); + } + + if (nlp->d_magic != DISKMAGIC || nlp->d_magic2 != DISKMAGIC || + dkcksum(nlp) != 0) + return (EINVAL); + + /* XXX missing check if other dos partitions will be overwritten */ + + while (openmask != 0) { + i = ffs(openmask) - 1; + openmask &= ~(1 << i); + if (nlp->d_npartitions <= i) + return (EBUSY); + opp = &olp->d_partitions[i]; + npp = &nlp->d_partitions[i]; + if (npp->p_offset != opp->p_offset || npp->p_size < opp->p_size) + return (EBUSY); + /* + * Copy internally-set partition information + * if new label doesn't include it. XXX + */ + if (npp->p_fstype == FS_UNUSED && opp->p_fstype != FS_UNUSED) { + npp->p_fstype = opp->p_fstype; + npp->p_fsize = opp->p_fsize; + npp->p_frag = opp->p_frag; + npp->p_cpg = opp->p_cpg; + } + } + nlp->d_checksum = 0; + nlp->d_checksum = dkcksum(nlp); + *olp = *nlp; + return (0); +} + + +/* + * Write disk label back to device after modification. + */ +int +writedisklabel(dev, strat, lp, osdep) + dev_t dev; + void (*strat) __P((struct buf *)); + struct disklabel *lp; + struct cpu_disklabel *osdep; +{ + struct mbr_partition *dp; + struct buf *bp; + struct disklabel *dlp; + int error, dospartoff, cyl; + + /* get a buffer and initialize it */ + bp = geteblk((int)lp->d_secsize); + bp->b_dev = dev; + + /* do dos partitions in the process of getting disklabel? */ + dospartoff = 0; + cyl = LABELSECTOR / lp->d_secpercyl; + if (!osdep) + goto nombrpart; + dp = osdep->dosparts; + + /* read master boot record */ + bp->b_blkno = MBR_BBSECTOR; + bp->b_bcount = lp->d_secsize; + bp->b_flags |= B_READ; + bp->b_cylinder = MBR_BBSECTOR / lp->d_secpercyl; + (*strat)(bp); + + if ((error = biowait(bp)) == 0) { + struct mbr_partition *ourdp = NULL; + + ourdp = mbr_findslice(dp, bp); + if (ourdp == NO_MBR_SIGNATURE) + goto nombrpart; + + if (ourdp) { + /* need sector address for SCSI/IDE, + cylinder for ESDI/ST506/RLL */ + dospartoff = ourdp->mbrp_start; + cyl = MBR_PCYL(ourdp->mbrp_scyl, ourdp->mbrp_ssect); + } + } + +nombrpart: +#ifdef maybe + /* disklabel in appropriate location? */ + if (lp->d_partitions[2].p_offset != 0 + && lp->d_partitions[2].p_offset != dospartoff) { + error = EXDEV; + goto done; + } +#endif + + /* next, dig out disk label */ + bp->b_blkno = dospartoff + LABELSECTOR; + bp->b_cylinder = cyl; + bp->b_bcount = lp->d_secsize; + bp->b_flags &= ~(B_DONE); + bp->b_flags |= B_READ; + (*strat)(bp); + + /* if successful, locate disk label within block and validate */ + if ((error = biowait(bp)) != 0) + goto done; + for (dlp = (struct disklabel *)bp->b_data; + dlp <= (struct disklabel *)(bp->b_data + lp->d_secsize - sizeof(*dlp)); + dlp = (struct disklabel *)((char *)dlp + sizeof(long))) { + if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC && + dkcksum(dlp) == 0) { + *dlp = *lp; + bp->b_flags &= ~(B_READ|B_DONE); + bp->b_flags |= B_WRITE; + (*strat)(bp); + error = biowait(bp); + goto done; + } + } + error = ESRCH; + +done: + brelse(bp); + return (error); +} + +/* + * Determine the size of the transfer, and make sure it is + * within the boundaries of the partition. Adjust transfer + * if needed, and signal errors or early completion. + */ +int +bounds_check_with_label(bp, lp, wlabel) + struct buf *bp; + struct disklabel *lp; + int wlabel; +{ + struct partition *p = lp->d_partitions + DISKPART(bp->b_dev); + int labelsector = lp->d_partitions[2].p_offset + LABELSECTOR; + int sz; + + sz = howmany(bp->b_bcount, lp->d_secsize); + + if (bp->b_blkno + sz > p->p_size) { + sz = p->p_size - bp->b_blkno; + if (sz == 0) { + /* If exactly at end of disk, return EOF. */ + bp->b_resid = bp->b_bcount; + goto done; + } + if (sz < 0) { + /* If past end of disk, return EINVAL. */ + bp->b_error = EINVAL; + goto bad; + } + /* Otherwise, truncate request. */ + bp->b_bcount = sz << DEV_BSHIFT; + } + + /* Overwriting disk label? */ + if (bp->b_blkno + p->p_offset <= labelsector && +#if LABELSECTOR != 0 + bp->b_blkno + p->p_offset + sz > labelsector && +#endif + (bp->b_flags & B_READ) == 0 && !wlabel) { + bp->b_error = EROFS; + goto bad; + } + + /* calculate cylinder for disksort to order transfers with */ + bp->b_cylinder = (bp->b_blkno + p->p_offset) / + (lp->d_secsize / DEV_BSIZE) / lp->d_secpercyl; + return (1); + +bad: + bp->b_flags |= B_ERROR; +done: + return (0); +} diff --git a/sys/arch/x86_64/x86_64/fpu.c b/sys/arch/x86_64/x86_64/fpu.c new file mode 100644 index 000000000000..6243adaba23b --- /dev/null +++ b/sys/arch/x86_64/x86_64/fpu.c @@ -0,0 +1,225 @@ +/* $NetBSD: fpu.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/*- + * Copyright (c) 1994, 1995, 1998 Charles M. Hannum. All rights reserved. + * Copyright (c) 1990 William Jolitz. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)npx.c 7.2 (Berkeley) 5/12/91 + */ + +/* + * XXXfvdl update copyright notice. this started out as a stripped isa/npx.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* + * We do lazy initialization and switching using the TS bit in cr0 and the + * MDP_USEDFPU bit in mdproc. + * + * DNA exceptions are handled like this: + * + * 1) If there is no FPU, return and go to the emulator. + * 2) If someone else has used the FPU, save its state into that process's PCB. + * 3a) If MDP_USEDFPU is not set, set it and initialize the FPU. + * 3b) Otherwise, reload the process's previous FPU state. + * + * When a process is created or exec()s, its saved cr0 image has the TS bit + * set and the MDP_USEDFPU bit clear. The MDP_USEDFPU bit is set when the + * process first gets a DNA and the FPU is initialized. The TS bit is turned + * off when the FPU is used, and turned on again later when the process's FPU + * state is saved. + */ + +#define fninit() __asm("fninit") +#define fwait() __asm("fwait") +#define fxsave(addr) __asm("fxsave %0" : "=m" (*addr)) +#define fxrstor(addr) __asm("fxrstor %0" : : "m" (*addr)) +#define clts() __asm("clts") +#define stts() lcr0(rcr0() | CR0_TS) + +void fpudna(struct proc *); +void fpuexit(void); +static void fpusave1(void); + +struct proc *fpuproc; + +/* + * Init the FPU. + */ +void +fpuinit(void) +{ + lcr0(rcr0() & ~(CR0_EM|CR0_TS)); + fninit(); + lcr0(rcr0() | (CR0_TS)); +} + +/* + * Record the FPU state and reinitialize it all except for the control word. + * Then generate a SIGFPE. + * + * Reinitializing the state allows naive SIGFPE handlers to longjmp without + * doing any fixups. + */ +void +fputrap(frame) + struct trapframe *frame; +{ + register struct proc *p = fpuproc; + +#ifdef DIAGNOSTIC + /* + * At this point, fpuproc should be curproc. If it wasn't, the TS bit + * should be set, and we should have gotten a DNA exception. + */ + if (p != curproc) + panic("fputrap: wrong process"); +#endif + + fxsave(&p->p_addr->u_pcb.pcb_savefpu); + fninit(); + fwait(); + trapsignal(p, SIGFPE, frame->tf_err); +} + +/* + * Wrapper for the fnsave instruction. We set the TS bit in the saved CR0 for + * this process, so that it will get a DNA exception on the FPU instruction and + * force a reload. + */ +static inline void +fpusave1(void) +{ + struct proc *p = fpuproc; + + fxsave(&p->p_addr->u_pcb.pcb_savefpu); + p->p_addr->u_pcb.pcb_cr0 |= CR0_TS; +} + +/* + * Implement device not available (DNA) exception + * + * If we were the last process to use the FPU, we can simply return. + * Otherwise, we save the previous state, if necessary, and restore our last + * saved state. + */ +void +fpudna(struct proc *p) +{ + +#ifdef DIAGNOSTIC + if (cpl != 0) + panic("fpudna: masked"); +#endif + + p->p_addr->u_pcb.pcb_cr0 &= ~CR0_TS; + clts(); + + /* + * Initialize the FPU state to clear any exceptions. If someone else + * was using the FPU, save their state. + */ + if (fpuproc != 0 && fpuproc != p) + fpusave1(); + + fninit(); + fwait(); + + fpuproc = p; + + if ((p->p_md.md_flags & MDP_USEDFPU) == 0) + p->p_md.md_flags |= MDP_USEDFPU; + else + fxrstor(&p->p_addr->u_pcb.pcb_savefpu); +} + +/* + * Drop the current FPU state on the floor. + */ +void +fpudrop(void) +{ + struct proc *p = fpuproc; + + fpuproc = 0; + stts(); + p->p_addr->u_pcb.pcb_cr0 |= CR0_TS; +} + +/* + * Save fpuproc's FPU state. + * + * The FNSAVE instruction clears the FPU state. Rather than reloading the FPU + * immediately, we clear fpuproc and turn on CR0_TS to force a DNA and a reload + * of the FPU state the next time we try to use it. This routine is only + * called when forking or core dumping, so the lazy reload at worst forces us + * to trap once per fork(), and at best saves us a reload once per fork(). + */ +void +fpusave(void) +{ + +#ifdef DIAGNOSTIC + if (cpl != 0) + panic("fpusave: masked"); +#endif + clts(); + fpusave1(); + fpuproc = 0; + stts(); +} diff --git a/sys/arch/x86_64/x86_64/gdt.c b/sys/arch/x86_64/x86_64/gdt.c new file mode 100644 index 000000000000..87b2274a2f3d --- /dev/null +++ b/sys/arch/x86_64/x86_64/gdt.c @@ -0,0 +1,373 @@ +/* $NetBSD: gdt.c,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by John T. Kohl and Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Modified to deal with variable-length entries for NetBSD/x86_64 by + * fvdl@wasabisystems.com, may 2001 + * XXX this file should be shared with the i386 code, the difference + * can be hidden in macros. + */ + +#include +#include +#include +#include +#include + +#include + +#include + +#define MINGDTSIZ 2048 +#define MAXGDTSIZ 65536 + +int gdt_size; /* size of GDT in bytes */ +int gdt_dyncount; /* number of dyn. allocated GDT entries in use */ +int gdt_dynavail; +int gdt_next; /* next available slot for sweeping */ +int gdt_free; /* next free slot; terminated with GNULL_SEL */ + +struct lock gdt_lock_store; + +static __inline void gdt_lock __P((void)); +static __inline void gdt_unlock __P((void)); +void gdt_compact __P((void)); +void gdt_init __P((void)); +void gdt_grow __P((void)); +void gdt_shrink __P((void)); +int gdt_get_slot __P((void)); +void gdt_put_slot __P((int)); + +/* + * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep + * waiting for memory. + * + * Note that the locking done here is not sufficient for multiprocessor + * systems. A freshly allocated slot will still be of type SDT_SYSNULL for + * some time after the GDT is unlocked, so gdt_compact() could attempt to + * reclaim it. + */ +static __inline void +gdt_lock() +{ + + (void) lockmgr(&gdt_lock_store, LK_EXCLUSIVE, NULL); +} + +static __inline void +gdt_unlock() +{ + + (void) lockmgr(&gdt_lock_store, LK_RELEASE, NULL); +} + +/* + * Compact the GDT as follows: + * 0) We partition the GDT into two areas, one of the slots before gdt_dyncount, + * and one of the slots after. After compaction, the former part should be + * completely filled, and the latter part should be completely empty. + * 1) Step through the process list, looking for TSS and LDT descriptors in + * the second section, and swap them with empty slots in the first section. + * 2) Arrange for new allocations to sweep through the empty section. Since + * we're sweeping through all of the empty entries, and we'll create a free + * list as things are deallocated, we do not need to create a new free list + * here. + */ +void +gdt_compact() +{ + struct proc *p; + pmap_t pmap; + int slot = 0, oslot; + struct sys_segment_descriptor *gdt; + + gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; + proclist_lock_read(); + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + pmap = p->p_vmspace->vm_map.pmap; + oslot = IDXDYNSEL(p->p_md.md_tss_sel); + if (oslot >= gdt_dyncount) { + while (gdt[slot].sd_type != SDT_SYSNULL) { + if (++slot >= gdt_dyncount) + panic("gdt_compact botch 1"); + } + gdt[slot] = gdt[oslot]; + gdt[oslot].sd_type = SDT_SYSNULL; + p->p_md.md_tss_sel = GDYNSEL(slot, SEL_KPL); + } + simple_lock(&pmap->pm_lock); + oslot = IDXDYNSEL(pmap->pm_ldt_sel); + if (oslot >= gdt_dyncount) { + while (gdt[slot].sd_type != SDT_SYSNULL) { + if (++slot >= gdt_dyncount) + panic("gdt_compact botch 2"); + } + gdt[slot] = gdt[oslot]; + gdt[oslot].sd_type = SDT_SYSNULL; + pmap->pm_ldt_sel = GDYNSEL(slot, SEL_KPL); + /* + * XXXSMP: if the pmap is in use on other + * processors, they need to reload thier + * LDT! + */ + } + simple_unlock(&pmap->pm_lock); + } +#ifdef DIAGNOSTIC + for (; slot < gdt_dyncount; slot++) + if (gdt[slot].sd_type == SDT_SYSNULL) + panic("gdt_compact botch 3"); + + for (slot = gdt_dyncount; slot < gdt_dynavail; slot++) + if (gdt[slot].sd_type != SDT_SYSNULL) + panic("gdt_compact botch 4"); + +#endif + gdt_next = gdt_dyncount; + gdt_free = GNULL_SEL; + proclist_unlock_read(); +} + +/* + * Initialize the GDT. + */ +void +gdt_init() +{ + struct region_descriptor region; + char *old_gdt; + + lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0); + + gdt_size = MINGDTSIZ; + gdt_dyncount = 0; + gdt_next = 0; + gdt_free = GNULL_SEL; + gdt_dynavail = + (gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor); + + old_gdt = gdtstore; + gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ); + uvm_map_pageable(kernel_map, (vaddr_t)gdtstore, + (vaddr_t)gdtstore + MINGDTSIZ, FALSE, FALSE); + memcpy(gdtstore, old_gdt, DYNSEL_START); + + setregion(®ion, gdtstore, (u_int16_t)(MAXGDTSIZ - 1)); + lgdt(®ion); +} + +/* + * Grow or shrink the GDT. + */ +void +gdt_grow() +{ + size_t old_len, new_len; + + old_len = gdt_size; + gdt_size <<= 1; + new_len = old_len << 1; + gdt_dynavail = + (gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor); + + uvm_map_pageable(kernel_map, (vaddr_t)gdtstore + old_len, + (vaddr_t)gdtstore + new_len, FALSE, FALSE); +} + +void +gdt_shrink() +{ + size_t old_len, new_len; + + old_len = gdt_size; + gdt_size >>= 1; + new_len = old_len >> 1; + gdt_dynavail = + (gdt_size - DYNSEL_START) / sizeof (struct sys_segment_descriptor); + + uvm_map_pageable(kernel_map, (vaddr_t)gdtstore + new_len, + (vaddr_t)gdtstore + old_len, TRUE, FALSE); +} + +/* + * Allocate a GDT slot as follows: + * 1) If there are entries on the free list, use those. + * 2) If there are fewer than gdt_dynavail entries in use, there are free slots + * near the end that we can sweep through. + * 3) As a last resort, we increase the size of the GDT, and sweep through + * the new slots. + */ +int +gdt_get_slot() +{ + int slot; + struct sys_segment_descriptor *gdt; + + gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; + + gdt_lock(); + + if (gdt_free != GNULL_SEL) { + slot = gdt_free; + gdt_free = gdt[slot].sd_xx3; /* XXXfvdl res. field abuse */ + } else { +#ifdef DIAGNOSTIC + if (gdt_next != gdt_dyncount) + panic("gdt_get_slot botch 1"); +#endif + if (gdt_next >= gdt_dynavail) { +#ifdef DIAGNOSTIC + if (gdt_size >= MAXGDTSIZ) + panic("gdt_get_slot botch 2"); +#endif + gdt_grow(); + } + slot = gdt_next++; + } + + gdt_dyncount++; + gdt_unlock(); + return (slot); +} + +/* + * Deallocate a GDT slot, putting it on the free list. + */ +void +gdt_put_slot(slot) + int slot; +{ + struct sys_segment_descriptor *gdt; + + gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; + + gdt_lock(); + gdt_dyncount--; + + gdt[slot].sd_type = SDT_SYSNULL; + /* + * shrink the GDT if we're using less than 1/4 of it. + * Shrinking at that point means we'll still have room for + * almost 2x as many processes as are now running without + * having to grow the GDT. + */ + if (gdt_size > MINGDTSIZ && gdt_dyncount <= gdt_dynavail / 4) { + gdt_compact(); + gdt_shrink(); + } else { + gdt[slot].sd_xx3 = gdt_free; + gdt_free = slot; + } + + gdt_unlock(); +} + +void +tss_alloc(p) + struct proc *p; +{ + struct pcb *pcb = &p->p_addr->u_pcb; + int slot; + struct sys_segment_descriptor *gdt; + + gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; + + slot = gdt_get_slot(); +#if 0 + printf("tss_alloc: slot %d addr %p\n", slot, &gdt[slot]); +#endif + set_sys_segment(&gdt[slot], &pcb->pcb_tss, sizeof (struct x86_64_tss)-1, + SDT_SYS386TSS, SEL_KPL, 0); + p->p_md.md_tss_sel = GDYNSEL(slot, SEL_KPL); +#if 0 + printf("sel %x\n", p->p_md.md_tss_sel); + printf("lolimit %lx lobase %lx type %lx dpl %lx p %lx hilimit %lx\n" + "xx1 %lx gran %lx hibase %lx xx2 %lx zero %lx xx3 %lx pad %lx\n", + (unsigned long)gdt[slot].sd_lolimit, + (unsigned long)gdt[slot].sd_lobase, + (unsigned long)gdt[slot].sd_type, + (unsigned long)gdt[slot].sd_dpl, + (unsigned long)gdt[slot].sd_p, + (unsigned long)gdt[slot].sd_hilimit, + (unsigned long)gdt[slot].sd_xx1, + (unsigned long)gdt[slot].sd_gran, + (unsigned long)gdt[slot].sd_hibase, + (unsigned long)gdt[slot].sd_xx2, + (unsigned long)gdt[slot].sd_zero, + (unsigned long)gdt[slot].sd_xx3); +#endif +} + +void +tss_free(p) + struct proc *p; +{ + + gdt_put_slot(IDXDYNSEL(p->p_md.md_tss_sel)); +} + +void +ldt_alloc(pmap, ldt, len) + struct pmap *pmap; + char *ldt; + size_t len; +{ + int slot; + struct sys_segment_descriptor *gdt; + + gdt = (struct sys_segment_descriptor *)&gdtstore[DYNSEL_START]; + + slot = gdt_get_slot(); + set_sys_segment(&gdt[slot], ldt, len - 1, SDT_SYSLDT, SEL_KPL, 0); + simple_lock(&pmap->pm_lock); + pmap->pm_ldt_sel = GSEL(slot, SEL_KPL); + simple_unlock(&pmap->pm_lock); +} + +void +ldt_free(pmap) + struct pmap *pmap; +{ + int slot; + + simple_lock(&pmap->pm_lock); + slot = IDXDYNSEL(pmap->pm_ldt_sel); + simple_unlock(&pmap->pm_lock); + + gdt_put_slot(slot); +} diff --git a/sys/arch/x86_64/x86_64/locore.S b/sys/arch/x86_64/x86_64/locore.S new file mode 100644 index 000000000000..fbf123187c9e --- /dev/null +++ b/sys/arch/x86_64/x86_64/locore.S @@ -0,0 +1,1129 @@ +/* $NetBSD: locore.S,v 1.1 2001/06/19 00:21:16 fvdl Exp $ */ + +/* + * Copyright-o-rama! + */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)locore.s 7.3 (Berkeley) 5/13/91 + */ + +#include "opt_ddb.h" +#include "opt_user_ldt.h" +#include "opt_multiprocessor.h" +#include "opt_lockdebug.h" + +#include "assym.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * override user-land alignment before including asm.h + */ +#define ALIGN_DATA .align 4 +#define ALIGN_TEXT .align 4,0x90 /* 4-byte boundaries, NOP-filled */ +#define SUPERALIGN_TEXT .align 16,0x90 /* 16-byte boundaries better for 486 */ +#define _ALIGN_TEXT ALIGN_TEXT + +#include + + +/* XXX temporary kluge; these should not be here */ +/* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ +#include + + +/* + * Initialization + */ + .data + + .globl _C_LABEL(cpu_id),_C_LABEL(cpu_vendor), _C_LABEL(cpu_brand_id) + .globl _C_LABEL(cpuid_level),_C_LABEL(cpu_feature) + .globl _C_LABEL(esym),_C_LABEL(boothowto) + .globl _C_LABEL(bootinfo),_C_LABEL(atdevbase) + .globl _C_LABEL(proc0paddr),_C_LABEL(curpcb),_C_LABEL(PTDpaddr) + .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem) + .globl _C_LABEL(gdtstore) +_C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486, + # or Pentium, or.. +_C_LABEL(cpu_id): .long 0 # saved from `cpuid' instruction +_C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid' + # instruction +_C_LABEL(cpuid_level): .long -1 # max. level accepted by 'cpuid' + # instruction +_C_LABEL(cpu_vendor): .space 16 # vendor string returned by `cpuid' + # instruction +_C_LABEL(cpu_brand_id): .long 0 # brand ID from 'cpuid' instruction +_C_LABEL(esym): .quad 0 # ptr to end of syms +_C_LABEL(atdevbase): .quad 0 # location of start of iomem in virtual +_C_LABEL(proc0paddr): .quad 0 +_C_LABEL(PTDpaddr): .quad 0 # paddr of PTD, for libkvm +#ifndef REALBASEMEM +_C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS +#else +_C_LABEL(biosbasemem): .long REALBASEMEM +#endif +#ifndef REALEXTMEM +_C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS +#else +_C_LABEL(biosextmem): .long REALEXTMEM +#endif + +#define _RELOC(x) ((x) - KERNBASE) +#define RELOC(x) _RELOC(_C_LABEL(x)) + +gdt64: + .word gdt64_end-gdt64_start + .quad _RELOC(gdt64_start) +.align 64 + +gdt64_start: + .quad 0x0000000000000000 /* always empty */ + .quad 0x00af9a000000ffff /* kernel CS */ + .quad 0x00cf92000000ffff /* kernel DS */ +gdt64_end: + +farjmp64: + .long longmode-KERNBASE + .word GSEL(GCODE_SEL, SEL_KPL) + + .space 512 +tmpstk: + +/* + * Some hackage to deal with 64bit symbols in 32 bit mode. + * This may not be needed it things are cleaned up a little. + */ + + + .text + .globl _C_LABEL(kernel_text) + .set _C_LABEL(kernel_text),KERNTEXTOFF+VAR32_SIZE + .globl _C_LABEL(var32_frob) +_C_LABEL(var32_frob): +_reloc_boothowto: .long _RELOC(boothowto) +_reloc_bootinfo: .long _RELOC(bootinfo) +_reloc_biosextmem: .long _RELOC(biosextmem) +_reloc_biosbasemem: .long _RELOC(biosbasemem) +_reloc_cpuid_level: .long _RELOC(cpuid_level) +_reloc_cpu_vendor: .long _RELOC(cpu_vendor) +_reloc_cpu_id: .long _RELOC(cpu_id) +_reloc_cpu_feature: .long _RELOC(cpu_feature) +_reloc_cpu_brand_id: .long _RELOC(cpu_brand_id) +_reloc_tmpstk: .long _RELOC(tmpstk) +_reloc_end: .long _RELOC(end) +_reloc_etext: .long _RELOC(etext) +_reloc_esym: .long _RELOC(esym) +_reloc_PTDpaddr: .long _RELOC(PTDpaddr) +_reloc_gdt64: .long _RELOC(gdt64) +_reloc_farjmp64: .long _RELOC(farjmp64) + +#define RELOC_boothowto 0 +#define RELOC_bootinfo 4 +#define RELOC_biosextmem 8 +#define RELOC_biosbasemem 12 +#define RELOC_cpuid_level 16 +#define RELOC_cpu_vendor 20 +#define RELOC_cpu_id 24 +#define RELOC_cpu_feature 28 +#define RELOC_cpu_brand_id 32 +#define RELOC_tmpstk 36 +#define RELOC_end 40 +#define RELOC_etext 44 +#define RELOC_esym 48 +#define RELOC_PTDpaddr 52 +#define RELOC_gdt64 56 +#define RELOC_farjmp64 60 + + .space VAR32_SIZE - (. - _C_LABEL(var32_frob)) + +#define GET_RELOC_ADDR(var) \ + movl $(KERNTEXTOFF-KERNBASE),%ebp ; movl RELOC_/**/var(%ebp),%ebp + +.code32 + + .globl start +start: movw $0x1234,0x472 # warm boot + + /* + * Load parameters from stack + * (howto, [bootdev], bootinfo, esym, basemem, extmem). + */ + movl 4(%esp),%eax + GET_RELOC_ADDR(boothowto) + movl %eax, (%ebp) + movl 12(%esp),%eax + + testl %eax, %eax + jz 1f + movl (%eax), %ebx /* number of entries */ + GET_RELOC_ADDR(bootinfo) + movl %ebp, %edx + addl $BOOTINFO_MAXSIZE,%ebp + movl %ebx, (%edx) + addl $4, %edx +2: + testl %ebx, %ebx + jz 1f + addl $4, %eax + movl (%eax), %ecx /* address of entry */ + pushl %edi + pushl %esi + pushl %eax + + movl (%ecx),%eax /* len */ + movl %edx,%edi + addl (%ecx), %edx /* update dest pointer */ + cmpl %ebp, %edx + jg 2f + movl %ecx,%esi + movl %eax,%ecx + rep + movsb + popl %eax + popl %esi + popl %edi + subl $1, %ebx + jmp 2b +2: /* cleanup for overflow case */ + popl %eax + popl %esi + popl %edi + GET_RELOC_ADDR(bootinfo) + movl %ebp, %edx + subl %ebx, (%edx) /* correct number of entries */ +1: + + movl 16(%esp),%eax + testl %eax,%eax + jz 1f + addl $KERNBASE_LO,%eax +1: GET_RELOC_ADDR(esym) + movl %eax,(%ebp) + movl $KERNBASE_HI,4(%ebp) + + GET_RELOC_ADDR(biosextmem) + movl (%ebp),%eax + testl %eax,%eax + jnz 1f + movl 20(%esp),%eax + movl %eax,(%ebp) +1: + GET_RELOC_ADDR(biosbasemem) + movl (%ebp),%eax + testl %eax,%eax + jnz 1f + movl 24(%esp),%eax + movl %eax,(%ebp) +1: + + /* First, reset the PSL. */ + pushl $PSL_MBO + popfl + + xorl %eax,%eax + cpuid + GET_RELOC_ADDR(cpuid_level) + movl %eax,(%ebp) + GET_RELOC_ADDR(cpu_vendor) + movl %ebx,(%ebp) + movl %edx,4(%ebp) + movl %ecx,8(%ebp) + movl $0, 12(%ebp) + + movl $1,%eax + cpuid + GET_RELOC_ADDR(cpu_id) + movl %eax,(%ebp) # store cpu_id and features + GET_RELOC_ADDR(cpu_feature) + movl %edx,(%ebp) + + /* Brand ID is bits 0-7 of %ebx */ + andl $255,%ebx + GET_RELOC_ADDR(cpu_brand_id) + movl %ebx,(%ebp) + + /* + * Finished with old stack; load new %esp now instead of later so we + * can trace this code without having to worry about the trace trap + * clobbering the memory test or the zeroing of the bss+bootstrap page + * tables. + * + * The boot program should check: + * text+data <= &stack_variable - more_space_for_stack + * text+data+bss+pad+space_for_page_tables <= end_of_memory + * Oops, the gdt is in the carcass of the boot program so clearing + * the rest of memory is still not possible. + */ + GET_RELOC_ADDR(tmpstk) + movl %ebp,%esp # bootstrap stack end location + +/* + * Virtual address space of kernel: + * + * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp | L2 ptp | L3 + * 0 1 2 3 + */ + +#define PROC0_PML4_OFF 0 +#define PROC0_STK_OFF (PROC0_PML4_OFF + NBPG) +#define PROC0_PTP3_OFF (PROC0_STK_OFF + UPAGES * NBPG) +#define PROC0_PTP2_OFF (PROC0_PTP3_OFF + NKL4_START_ENTRIES * NBPG) +#define PROC0_PTP1_OFF (PROC0_PTP2_OFF + NKL3_START_ENTRIES * NBPG) +#define TABLESIZE \ + ((NKL4_START_ENTRIES + NKL3_START_ENTRIES + NKL2_START_ENTRIES + 1 + UPAGES) \ + * NBPG) + + /* Find end of kernel image. */ + GET_RELOC_ADDR(end) + movl %ebp,%edi +#if defined(DDB) && !defined(SYMTAB_SPACE) + /* Save the symbols (if loaded). */ + GET_RELOC_ADDR(esym) + movl (%ebp),%eax + testl %eax,%eax + jz 1f + subl $KERNBASE_LO,%eax /* XXX */ + movl %eax,%edi +1: +#endif + +#define fillkpt \ +1: movl %eax,(%ebx) ; /* store phys addr */ \ + movl $0,4(%ebx) ; /* upper 32 bits 0 */ \ + addl $8,%ebx ; /* next pte/pde */ \ + addl $NBPG,%eax ; /* next phys page */ \ + loop 1b ; \ + + /* Clear tables */ + GET_RELOC_ADDR(end) + movl %ebp,%esi + addl $PGOFSET,%esi + andl $~PGOFSET,%esi + + movl %esi,%edi + xorl %eax,%eax + cld + movl $TABLESIZE,%ecx + shrl $2,%ecx + rep + stosl + + leal (PROC0_PTP1_OFF)(%esi), %ebx + + /* + * Compute etext - KERNBASE. This can't be > 4G, or we can't deal + * with it anyway, since we can't load it in 32 bit mode. So use + * the bottom 32 bits. + */ + GET_RELOC_ADDR(etext) + movl %ebp,%edx /* should be $RELOC */ + subl $KERNBASE_LO,%edx + addl $PGOFSET,%edx + andl $~PGOFSET,%edx + + /* + * Skip the first MB. + */ + movl $KERNTEXTOFF_LO,%eax + movl %eax,%ecx + shrl $(PGSHIFT-3),%ecx /* ((n >> PGSHIFT) << 3) for # pdes */ + addl %ecx,%ebx + + /* Map kernel text read-only */ + movl %edx,%ecx + subl %eax,%ecx + shrl $PGSHIFT,%ecx + orl $(PG_V|PG_KR),%eax + fillkpt + + /* Map the data, BSS, and bootstrap tables read-write. */ + leal (PG_V|PG_KW)(%edx),%eax + movl $TABLESIZE,%ecx + addl %esi,%ecx /* %ecx = &end[TABLESIZE] */ + subl %edx,%ecx /* %ecx = %ecx - etext */ + shrl $PGSHIFT,%ecx + fillkpt + + /* Map ISA I/O mem (later atdevbase) */ + movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax + movl $(IOM_SIZE>>PGSHIFT),%ecx + fillkpt + + /* Set up level 2 pages */ + leal (PROC0_PTP2_OFF)(%esi),%ebx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL2_START_ENTRIES,%ecx + fillkpt + + /* Set up level 3 pages */ + leal (PROC0_PTP3_OFF)(%esi),%ebx + leal (PROC0_PTP2_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_START_ENTRIES,%ecx + fillkpt + + /* Set up top level entries for identity mapping */ + leal (PROC0_PML4_OFF)(%esi),%ebx + leal (PROC0_PTP3_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_START_ENTRIES,%ecx + fillkpt + + /* Set up top level entries for actual kernel mapping */ + leal (PROC0_PML4_OFF + PDIR_SLOT_KERN*8)(%esi),%ebx + leal (PROC0_PTP3_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl $NKL3_START_ENTRIES,%ecx + fillkpt + + /* Install recursive top level PDE */ + leal (PROC0_PML4_OFF + PDIR_SLOT_PTE*8)(%esi),%ebx + leal (PROC0_PML4_OFF)(%esi),%eax + orl $(PG_V|PG_KW),%eax + movl %eax,(%ebx) + movl $0, 4(%ebx) + + + /* Save phys. addr of PTD, for libkvm. */ + GET_RELOC_ADDR(PTDpaddr) + movl %esi,(%ebp) + movl $0,4(%ebp) + + /* + * Startup checklist: + * 1. Enable PAE (and SSE while here). + */ + movl %cr4,%eax + orl $(CR4_PAE|CR4_OSFXSR),%eax + movl %eax,%cr4 + + /* + * 2. Set Long Mode Enable in EFER. Also enable the + * syscall extensions. + */ + movl $MSR_EFER,%ecx + rdmsr + xorl %eax,%eax /* XXX */ + orl $(EFER_LME|EFER_SCE),%eax + wrmsr + + /* + * 3. Load %cr3 with pointer to PML4. + */ + movl %esi,%eax + movl %eax,%cr3 + + /* + * 4. Enable paging and the rest of it. + */ + movl %cr0,%eax + orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP),%eax + movl %eax,%cr0 + jmp compat +compat: + + /* + * 5. + * Not quite done yet, we're now in a compatibility segment, + * in legacy mode. We must jump to a long mode segment. + * Need to set up a temporary GDT with a long mode segment + * in it to do that. + */ + + GET_RELOC_ADDR(gdt64) + movl %ebp,%eax + lgdt (%eax) + GET_RELOC_ADDR(farjmp64) + movl %ebp,%eax + ljmp *(%eax) + +.code64 +longmode: + /* + * 6. + * Finally, we're in long mode. However, we're still + * in the identity mapped area (could not jump out + * of that earlier because it would have been a > 32bit + * jump). We can do that now, so here we go. + */ + movabsq $longmode_hi,%rax + jmp *%rax +longmode_hi: + /* + * We have arrived. + * There's no need anymore for the identity mapping in low + * memory, remove it. + */ + movq $NKL4_START_ENTRIES,%rcx + leaq (PROC0_PML4_OFF)(%rsi),%rbx # old, phys address of PML4 + movq $KERNBASE,%r8 + addq %r8, %rbx # new, virtual adress of PML4 +1: movq $0,(%rbx) + addq $8,%rbx + loop 1b + + /* Relocate atdevbase. */ + movq $(TABLESIZE+KERNBASE),%rdx + addq %rsi,%rdx + movq %rdx,_C_LABEL(atdevbase)(%rip) + + /* Set up bootstrap stack. */ + leaq (PROC0_STK_OFF)(%rsi),%rax + addq %r8,%rax + movq %rax,_C_LABEL(proc0paddr)(%rip) + leaq (USPACE-FRAMESIZE)(%rax),%rsp + movq %rsi,PCB_CR3(%rax) # pcb->pcb_cr3 + xorq %rbp,%rbp # mark end of frames + + leaq (TABLESIZE)(%rsi),%rdi # skip past stack and page tables + call _C_LABEL(init_x86_64) + + /* Clear segment registers; always null in proc0. */ + xorq %rax,%rax + movq $MSR_FSBASE,%rcx + wrmsr + movq $MSR_GSBASE,%rcx + wrmsr + + call _C_LABEL(main) + +NENTRY(proc_trampoline) + movq %r13,%rdi + call *%r12 + INTRFASTEXIT + /* NOTREACHED */ + +/*****************************************************************************/ + +/* + * Signal trampoline; copied to top of user stack. + * XXXfvdl might as well do away with the frame here. + */ +NENTRY(sigcode) + movq SIGF_SIGNUM(%rsp),%rdi # handler(sig, code, scp) + movq SIGF_CODE(%rsp),%rsi + movq SIGF_SCP(%rsp),%rdx + call *SIGF_HANDLER(%rsp) + leaq SIGF_SC(%rsp),%rax # scp (the call may have clobbered the + # copy at SIGF_SCP(%esp)) + movl SC_FS(%rax),%ecx # XXXfvdl these are wrong + movl SC_GS(%rax),%edx + movl %ecx,%fs + movl %edx,%gs + movq %rax,%rdi + pushq %rax + movq $SYS___sigreturn14,%rax + int $0x80 + movq $SYS_exit,%rax + int $0x80 + .globl _C_LABEL(esigcode) +_C_LABEL(esigcode): + +/* + * void lgdt(struct region_descriptor *rdp); + * Change the global descriptor table. + */ +NENTRY(lgdt) + /* Reload the descriptor table. */ + movq %rdi,%rax + lgdt (%rax) + /* Flush the prefetch q. */ + jmp 1f + nop +1: /* Reload "stale" selectors. */ + movl $GSEL(GDATA_SEL, SEL_KPL),%eax + movl %eax,%ds + movl %eax,%es + movl %eax,%ss + /* Reload code selector by doing intersegment return. */ + popq %rax + pushq $GSEL(GCODE_SEL, SEL_KPL) + pushq %rax + lretq + +ENTRY(setjmp) + /* + * Only save registers that must be preserved across function + * calls according to the ABI (%rbx, %rsp, %rbp, %r12-%r15) + * and %rip. + */ + movq %rdi,%rax + movq %rbx,(%rax) + movq %rsp,8(%rax) + movq %rbp,16(%rax) + movq %r12,24(%rax) + movq %r13,32(%rax) + movq %r14,40(%rax) + movq %r15,48(%rax) + movq (%rsp),%rdx + movq %rdx,56(%rax) + xorl %eax,%eax + ret + +ENTRY(longjmp) + movq %rdi,%rax + movq (%rax),%rbx + movq 8(%rax),%rsp + movq 16(%rax),%rbp + movq 24(%rax),%r12 + movq 32(%rax),%r13 + movq 40(%rax),%r14 + movq 48(%rax),%r15 + movq 56(%rax),%rdx + movq %rdx,(%rsp) + xorl %eax,%eax + incl %eax + ret + +/*****************************************************************************/ + +/* + * The following primitives manipulate the run queues. + * _whichqs tells which of the 32 queues _qs + * have processes in them. Setrq puts processes into queues, Remrq + * removes them from queues. The running process is on no queue, + * other processes are on a queue related to p->p_pri, divided by 4 + * actually to shrink the 0-127 range of priorities into the 32 available + * queues. + */ + .globl _C_LABEL(sched_whichqs),_C_LABEL(sched_qs) + .globl _C_LABEL(uvmexp),_C_LABEL(panic) + +/* + * setrunqueue(struct proc *p); + * Insert a process on the appropriate queue. Should be called at splclock(). + */ +NENTRY(setrunqueue) +#ifdef DIAGNOSTIC + cmpq $0,P_BACK(%rdi) # should not be on q already + jne 1f + cmpq $0,P_WCHAN(%rdi) + jne 1f + cmpb $SRUN,P_STAT(%rdi) + jne 1f +#endif /* DIAGNOSTIC */ + movzbl P_PRIORITY(%rdi),%edx + shrl $2,%edx + btsl %edx,_C_LABEL(sched_whichqs)(%rip) # set q full bit + + shlq $4,%rdx + leaq _C_LABEL(sched_qs)(%rip),%rax # sched_qs[0] + addq %rax,%rdx # sched_qs[queue] + + movq P_BACK(%rdx),%rcx + movq %rdx,P_FORW(%rdi) # link process on tail of q + movq %rdi,P_BACK(%rdx) + movq %rdi,P_FORW(%rcx) + movq %rcx,P_BACK(%rdi) + ret +#ifdef DIAGNOSTIC +1: movabsq $2f,%rdi + call _C_LABEL(panic) + /* NOTREACHED */ +2: .asciz "setrunqueue" +#endif /* DIAGNOSTIC */ + +/* + * remrunqueue(struct proc *p); + * Remove a process from its queue. Should be called at splclock(). + */ +NENTRY(remrunqueue) + movzbl P_PRIORITY(%rdi),%eax +#ifdef DIAGNOSTIC + shrl $2,%eax + btl %eax,_C_LABEL(sched_whichqs)(%rip) + jnc 1f +#endif /* DIAGNOSTIC */ + movq P_BACK(%rdi),%rdx # unlink process + movq $0,P_BACK(%rdi) # zap reverse link to indicate off list + movq P_FORW(%rdi),%rdi + movq %rdi,P_FORW(%rdx) + movq %rdx,P_BACK(%rdi) + cmpq %rdi,%rdx # q still has something? + jne 2f +#ifndef DIAGNOSTIC + shrl $2,%eax +#endif + btrl %eax,_C_LABEL(sched_whichqs)(%rip) # no; clear bit +2: ret +#ifdef DIAGNOSTIC +1: movabsq $3f,%rdi + call _C_LABEL(panic) + /* NOTREACHED */ +3: .asciz "remrunqueue" +#endif /* DIAGNOSTIC */ + +#if NAPM > 0 + .globl _C_LABEL(apm_cpu_idle),_C_LABEL(apm_cpu_busy) +#endif +/* + * When no processes are on the runq, cpu_switch() branches to here to wait for + * something to come ready. + */ +ENTRY(idle) + /* + * When we get here, interrupts are off (via cli) and + * sched_lock is held. + */ + movl _C_LABEL(sched_whichqs)(%rip),%ecx + testl %ecx,%ecx + jnz sw1 +#if defined(LOCKDEBUG) + call _C_LABEL(sched_unlock_idle) +#endif + sti + +#if 0 + /* Try to zero some pages. */ + leaq _C_LABEL(uvm)(%rip),%rax + movl UVM_PAGE_IDLE_ZERO(%rax),%ecx + testl %ecx,%ecx + jz 1f + call _C_LABEL(uvm_pageidlezero) +1: +#endif + hlt + cli +#if defined(LOCKDEBUG) + call _C_LABEL(sched_lock_idle) +#endif + jmp _C_LABEL(idle) + +#ifdef DIAGNOSTIC +NENTRY(switch_error1) + movabsq $1f,%rdi + call _C_LABEL(panic) + /* NOTREACHED */ +1: .asciz "cpu_switch 1" +NENTRY(switch_error2) + movabsq $1f,%rdi + call _C_LABEL(panic) + /* NOTREACHED */ +1: .asciz "cpu_switch 2" +NENTRY(switch_error3) + movabsq $1f,%rdi + call _C_LABEL(panic) + /* NOTREACHED */ +1: .asciz "cpu_switch 3" +#endif /* DIAGNOSTIC */ + +/* + * void cpu_switch(struct proc *) + * Find a runnable process and switch to it. Wait if necessary. If the new + * process is the same as the old one, we short-circuit the context save and + * restore. + */ +ENTRY(cpu_switch) + pushq %rbx + pushq %rbp + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + movl _C_LABEL(cpl)(%rip),%r11d + pushq %r11 + + movq _C_LABEL(curproc)(%rip),%r13 + + /* + * Clear curproc so that we don't accumulate system time while idle. + * This also insures that schedcpu() will move the old process to + * the correct queue if it happens to get called from the spllower() + * below and changes the priority. (See corresponding comment in + * userret()). + */ + movq $0,_C_LABEL(curproc)(%rip) + + +#if defined(LOCKDEBUG) + /* Release the sched_lock before processing interrupts. */ + call _C_LABEL(sched_unlock_idle) +#endif + + movl $0,_C_LABEL(cpl)(%rip) # spl0() + call _C_LABEL(Xspllower) # process pending interrupts + +switch_search: + /* + * First phase: find new process. + * + * Registers: + * %rax - queue head, scratch, then zero + * %r8 - queue number + * %ecx - cached value of whichqs + * %rdx - next process in queue + * %r13 - old process + * %r12 - new process + */ + + /* Lock the scheduler. */ + cli # splhigh doesn't do a cli +#if defined(LOCKDEBUG) + call _C_LABEL(sched_lock_idle) +#endif + + /* Wait for new process. */ + movl _C_LABEL(sched_whichqs)(%rip),%ecx + +sw1: bsfl %ecx,%r8d # find a full q + jz _C_LABEL(idle) # if none, idle + movq %r8,%r9 + + shlq $4, %r9 + leaq _C_LABEL(sched_qs)(%rip),%rax + addq %r9,%rax + /* movq (%rax),%rax */ + + movq P_FORW(%rax),%r12 # unlink from front of process q +#ifdef DIAGNOSTIC + cmpq %r12,%rax # linked to self (i.e. nothing queued)? + je _C_LABEL(switch_error1) # not possible +#endif /* DIAGNOSTIC */ + movq P_FORW(%r12),%rdx + movq %rdx,P_FORW(%rax) + movq %rax,P_BACK(%rdx) + + cmpq %rdx,%rax # q empty? + jne 3f + + btrl %r8d,%ecx # yes, clear to indicate empty + movl %ecx,_C_LABEL(sched_whichqs)(%rip) # update q status + +3: /* We just did it. */ + xorq %rax,%rax + movl %eax,_C_LABEL(want_resched)(%rip) + +#ifdef DIAGNOSTIC + cmpq %rax,P_WCHAN(%r12) # Waiting for something? + je 1f + xchgw %bx,%bx +#if 0 + jne _C_LABEL(switch_error2) # Yes; shouldn't be queued. +#endif +1: cmpb $SRUN,P_STAT(%r12) # In run state? + jne _C_LABEL(switch_error3) # No; shouldn't be queued. +#endif /* DIAGNOSTIC */ + + /* Isolate process. XXX Is this necessary? */ + movq %rax,P_BACK(%r12) + +#if defined(LOCKDEBUG) + /* + * Unlock the sched_lock, but leave interrupts off, for now. + */ + call _C_LABEL(sched_unlock_idle) +#endif + +#if defined(MULTIPROCESSOR) + /* + * p->p_cpu = curcpu() + * XXXSMP + */ +#endif + + /* Record new process. */ + movb $SONPROC,P_STAT(%r12) # p->p_stat = SONPROC + movq %r12,_C_LABEL(curproc)(%rip) + + /* It's okay to take interrupts here. */ + sti + + /* Skip context switch if same process. */ + cmpq %r12,%r13 + je switch_return + + /* If old process exited, don't bother. */ + testq %r13,%r13 + jz switch_exited + + /* + * Second phase: save old context. + * + * Registers: + * %rax, %rcx - scratch + * %r13 - old process, then old pcb + * %r12 - new process + */ + + movq P_ADDR(%r13),%r13 + + /* Save segment registers. */ + movl %fs,%eax + movl %gs,%ecx + movl %eax,PCB_FS(%r13) + movl %ecx,PCB_GS(%r13) + + /* Save stack pointers. */ + movq %rsp,PCB_RSP(%r13) + movq %rbp,PCB_RBP(%r13) + +switch_exited: + /* + * Third phase: restore saved context. + * + * Registers: + * %rax, %rcx, %rdx - scratch + * %r13 - new pcb + * %r12 - new process + */ + + /* No interrupts while loading new state. */ + cli + movq P_ADDR(%r12),%r13 + + /* Restore stack pointers. */ + movq PCB_RSP(%r13),%rsp + movq PCB_RBP(%r13),%rbp + +#if 0 + /* Don't bother with the rest if switching to a system process. */ + testl $P_SYSTEM,P_FLAG(%r12) + jnz switch_restored +#endif + + /* + * Activate the address space. We're curproc, so %cr3 will + * be reloaded, but we're not yet curpcb, so the LDT won't + * be reloaded, although the PCB copy of the selector will + * be refreshed from the pmap. + */ + movq %r12,%rdi + call _C_LABEL(pmap_activate) + + /* Load TSS info. */ + movq _C_LABEL(gdtstore)(%rip),%rax + movl P_MD_TSS_SEL(%r12),%edx + + /* Switch TSS. Reset "task busy" flag before */ + andl $~0x0200,4(%rax,%rdx, 1) + ltr %dx + + /* Restore segment registers. */ + movl PCB_FS(%r13),%eax + movl PCB_GS(%r13),%ecx + movl %eax,%fs + movl %ecx,%gs + +switch_restored: + /* Restore cr0 (including FPU state). */ + movl PCB_CR0(%r13),%ecx + movq %rcx,%cr0 + + /* Record new pcb. */ + movq %r13,_C_LABEL(curpcb)(%rip) + + /* Interrupts are okay again. */ + sti + +switch_return: + /* + * Restore old cpl from stack. Note that this is always an increase, + * due to the spl0() on entry. + */ + popq %r11 + movl %r11d,_C_LABEL(cpl)(%rip) + + movq %r12,%rax # return (p); + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbp + popq %rbx + ret + +/* + * switch_exit(struct proc *p); + * Switch to proc0's saved context and deallocate the address space and kernel + * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along. + */ + .globl _C_LABEL(proc0),_C_LABEL(uvmspace_free),_C_LABEL(kernel_map) + .globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free) +ENTRY(switch_exit) + leaq _C_LABEL(proc0)(%rip),%rbx + + /* In case we fault... */ + movq $0,_C_LABEL(curproc)(%rip) + + /* Restore proc0's context. */ + cli + movq P_ADDR(%rbx),%rsi + + /* Restore stack pointers. */ + movq PCB_RSP(%rsi),%rsp + movq PCB_RBP(%rsi),%rbp + + /* Load TSS info. */ + movq _C_LABEL(gdtstore)(%rip),%rax + movl P_MD_TSS_SEL(%rbx),%edx + + /* Switch address space. */ + movq PCB_CR3(%rsi),%rcx + movq %rcx,%cr3 + + /* Switch TSS. */ + andl $~0x0200,4-SEL_KPL(%rax,%rdx,1) + ltr %dx + + /* We're always in the kernel, so we don't need the LDT. */ + + /* Clear segment registers; always null in proc0. */ + xorl %ecx,%ecx + movl %ecx,%fs + movl %ecx,%gs + + /* Restore cr0 (including FPU state). */ + movl PCB_CR0(%rsi),%ecx + movq %rcx,%cr0 + + /* Record new pcb. */ + movq %rsi,_C_LABEL(curpcb)(%rip) + + /* Interrupts are okay again. */ + sti + + /* + * Schedule the dead process's vmspace and stack to be freed. + */ + call _C_LABEL(exit2) + + /* Jump into cpu_switch() with the right state. */ + movq %rbx,%r13 + movq $0,_C_LABEL(curproc)(%rip) + jmp switch_search + +/* + * savectx(struct pcb *pcb); + * Update pcb, saving current processor state. + */ +ENTRY(savectx) + /* Save segment registers. */ + movl %fs,%eax + movl %gs,%ecx + movl %eax,PCB_FS(%rdi) + movl %ecx,PCB_GS(%rdi) + + /* Save stack pointers. */ + movq %rsp,PCB_RSP(%rdi) + movq %rbp,PCB_RBP(%rdi) + + ret diff --git a/sys/arch/x86_64/x86_64/machdep.c b/sys/arch/x86_64/x86_64/machdep.c new file mode 100644 index 000000000000..2f6afeae3844 --- /dev/null +++ b/sys/arch/x86_64/x86_64/machdep.c @@ -0,0 +1,1676 @@ +/* $NetBSD: machdep.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/*- + * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace + * Simulation Facility, NASA Ames Research Center. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)machdep.c 7.4 (Berkeley) 6/3/91 + */ + +#include "opt_ddb.h" +#include "opt_compat_netbsd.h" +#include "opt_cpureset_delay.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef KGDB +#include +#endif + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef DDB +#include +#include +#endif + +#include "isa.h" +#include "isadma.h" + +/* the following is used externally (sysctl_hw) */ +char machine[] = "x86_64"; /* cpu "architecture" */ +char machine_arch[] = "x86_64"; /* machine == machine_arch */ + +u_int cpu_serial[3]; +char cpu_model[] = "VirtuHammer x86-64"; + +char bootinfo[BOOTINFO_MAXSIZE]; + +/* Our exported CPU info; we have only one right now. */ +struct cpu_info cpu_info_store; + +struct bi_devmatch *x86_64_alldisks = NULL; +int x86_64_ndisks = 0; + +#ifdef CPURESET_DELAY +int cpureset_delay = CPURESET_DELAY; +#else +int cpureset_delay = 2000; /* default to 2s */ +#endif + + +int physmem; +u_int64_t dumpmem_low; +u_int64_t dumpmem_high; +int boothowto; +int cpu_class; + +#define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15) + +vaddr_t msgbuf_vaddr; +paddr_t msgbuf_paddr; + +vaddr_t idt_vaddr; +paddr_t idt_paddr; + +struct vm_map *exec_map = NULL; +struct vm_map *mb_map = NULL; +struct vm_map *phys_map = NULL; + +extern paddr_t avail_start, avail_end; + +/* + * Size of memory segments, before any memory is stolen. + */ +phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; +int mem_cluster_cnt; + +/* + * The number of CPU cycles in one second. + */ +u_int64_t cpu_tsc_freq; + +int cpu_dump __P((void)); +int cpu_dumpsize __P((void)); +u_long cpu_dump_mempagecnt __P((void)); +void dumpsys __P((void)); +void init_x86_64 __P((paddr_t)); + +/* + * Machine-dependent startup code + */ +void +cpu_startup() +{ + caddr_t v, v2; + unsigned long sz; + int x; + vaddr_t minaddr, maxaddr; + vsize_t size; + char buf[160]; /* about 2 line */ + char pbuf[9]; + + /* + * Initialize error message buffer (et end of core). + */ + msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_64_round_page(MSGBUFSIZE)); + if (msgbuf_vaddr == 0) + panic("failed to valloc msgbuf_vaddr"); + + /* msgbuf_paddr was init'd in pmap */ + for (x = 0; x < btoc(MSGBUFSIZE); x++) + pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE, + msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE); + + initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE)); + + printf("%s", version); + + printf("cpu0: %s", cpu_model); + if (cpu_tsc_freq != 0) + printf(", %ld.%02ld MHz", (cpu_tsc_freq + 4999) / 1000000, + ((cpu_tsc_freq + 4999) / 10000) % 100); + printf("\n"); + + if ((cpu_feature & CPUID_MASK1) != 0) { + bitmask_snprintf(cpu_feature, CPUID_FLAGS1, + buf, sizeof(buf)); + printf("cpu0: features %s\n", buf); + } + if ((cpu_feature & CPUID_MASK2) != 0) { + bitmask_snprintf(cpu_feature, CPUID_FLAGS2, + buf, sizeof(buf)); + printf("cpu0: features %s\n", buf); + } + + if (cpuid_level >= 3 && ((cpu_feature & CPUID_PN) != 0)) { + printf("cpu0: serial number %04X-%04X-%04X-%04X-%04X-%04X\n", + cpu_serial[0] / 65536, cpu_serial[0] % 65536, + cpu_serial[1] / 65536, cpu_serial[1] % 65536, + cpu_serial[2] / 65536, cpu_serial[2] % 65536); + } + + format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); + printf("total memory = %s\n", pbuf); + + /* + * Find out how much space we need, allocate it, + * and then give everything true virtual addresses. + */ + sz = (unsigned long)allocsys(NULL, NULL); + if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) + panic("startup: no room for tables"); + v2 = allocsys(v, NULL); + if ((v2 - v) != sz) + panic("startup: table size inconsistency"); + + /* + * Allocate virtual address space for the buffers. The area + * is not managed by the VM system. + */ + size = MAXBSIZE * nbuf; + if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), + NULL, UVM_UNKNOWN_OFFSET, 0, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, + UVM_ADV_NORMAL, 0)) != 0) + panic("cpu_startup: cannot allocate VM for buffers"); + minaddr = (vaddr_t)buffers; + if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { + /* don't want to alloc more physical mem than needed */ + bufpages = btoc(MAXBSIZE) * nbuf; + } + + /* + * XXX We defer allocation of physical pages for buffers until + * XXX after autoconfiguration has run. We must do this because + * XXX on system with large amounts of memory or with large + * XXX user-configured buffer caches, the buffer cache will eat + * XXX up all of the lower 16M of RAM. This prevents ISA DMA + * XXX maps from allocating bounce pages. + * + * XXX Note that nothing can use buffer cache buffers until after + * XXX autoconfiguration completes!! + * + * XXX This is a hack, and needs to be replaced with a better + * XXX solution! --thorpej@netbsd.org, December 6, 1997 + */ + + /* + * Allocate a submap for exec arguments. This map effectively + * limits the number of processes exec'ing at any time. + */ + exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); + + /* + * Allocate a submap for physio + */ + phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + VM_PHYS_SIZE, 0, FALSE, NULL); + + /* + * Finally, allocate mbuf cluster submap. + */ + mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL); + + /* + * XXX Buffer cache pages haven't yet been allocated, so + * XXX we need to account for those pages when printing + * XXX the amount of free memory. + */ + format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages)); + printf("avail memory = %s\n", pbuf); + format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE); + printf("using %d buffers containing %s of memory\n", nbuf, pbuf); + + /* Safe for i/o port / memory space allocation to use malloc now. */ + x86_64_bus_space_mallocok(); +} + +/* + * Set up proc0's TSS and LDT. + */ +void +x86_64_proc0_tss_ldt_init() +{ + struct pcb *pcb; + int x; + + gdt_init(); + curpcb = pcb = &proc0.p_addr->u_pcb; + pcb->pcb_flags = 0; + pcb->pcb_tss.tss_iobase = + (u_int16_t)((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss); + for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++) + pcb->pcb_iomap[x] = 0xffffffff; + + pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = + GSYSSEL(GLDT_SEL, SEL_KPL); + pcb->pcb_cr0 = rcr0(); + pcb->pcb_tss.tss_rsp0 = (u_int64_t)proc0.p_addr + USPACE - 16; + tss_alloc(&proc0); + + ltr(proc0.p_md.md_tss_sel); + lldt(pcb->pcb_ldt_sel); + + proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_rsp0 - 1; +} + +/* + * XXX Finish up the deferred buffer cache allocation and initialization. + */ +void +x86_64_bufinit() +{ + int i, base, residual; + + base = bufpages / nbuf; + residual = bufpages % nbuf; + for (i = 0; i < nbuf; i++) { + vsize_t curbufsize; + vaddr_t curbuf; + struct vm_page *pg; + + /* + * Each buffer has MAXBSIZE bytes of VM space allocated. Of + * that MAXBSIZE space, we allocate and map (base+1) pages + * for the first "residual" buffers, and then we allocate + * "base" pages for the rest. + */ + curbuf = (vaddr_t) buffers + (i * MAXBSIZE); + curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base); + + while (curbufsize) { + /* + * Attempt to allocate buffers from the first + * 16M of RAM to avoid bouncing file system + * transfers. + */ + pg = uvm_pagealloc_strat(NULL, 0, NULL, 0, + UVM_PGA_STRAT_FALLBACK, VM_FREELIST_FIRST16); + if (pg == NULL) + panic("cpu_startup: not enough memory for " + "buffer cache"); + pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg), + VM_PROT_READ|VM_PROT_WRITE); + curbuf += PAGE_SIZE; + curbufsize -= PAGE_SIZE; + } + } + + /* + * Set up buffers, so they can be used to read disk labels. + */ + bufinit(); +} + + +/* + * machine dependent system variables. + */ +int +cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + dev_t consdev; + struct btinfo_bootpath *bibp; + + /* all sysctl names at this level are terminal */ + if (namelen != 1) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case CPU_CONSDEV: + if (cn_tab != NULL) + consdev = cn_tab->cn_dev; + else + consdev = NODEV; + return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, + sizeof consdev)); + + case CPU_BOOTED_KERNEL: + bibp = lookup_bootinfo(BTINFO_BOOTPATH); + if(!bibp) + return(ENOENT); /* ??? */ + return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath)); + case CPU_DISKINFO: + if (x86_64_alldisks == NULL) + return (ENOENT); + return (sysctl_rdstruct(oldp, oldlenp, newp, x86_64_alldisks, + sizeof (struct disklist) + + (x86_64_ndisks - 1) * sizeof (struct nativedisk_info))); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} + +/* + * Send an interrupt to process. + * + * Stack is set up to allow sigcode stored + * in u. to call routine, followed by kcall + * to sigreturn routine below. After sigreturn + * resets the signal mask, the stack, and the + * frame pointer, it returns to the user + * specified pc, psl. + */ +void +sendsig(catcher, sig, mask, code) + sig_t catcher; + int sig; + sigset_t *mask; + u_long code; +{ + struct proc *p = curproc; + struct trapframe *tf; + char *sp; + struct sigframe *fp, frame; + int onstack; + size_t tocopy; + + tf = p->p_md.md_regs; + + /* Do we need to jump onto the signal stack? */ + onstack = + (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && + (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; + + /* Allocate space for the signal handler context. */ + if (onstack) + sp = ((caddr_t)p->p_sigctx.ps_sigstk.ss_sp + + p->p_sigctx.ps_sigstk.ss_size); + else + sp = (caddr_t)tf->tf_rsp; + /* + * Round down the stackpointer to a multiple of 16 for + * fxsave and the ABI. + */ + sp = (char *)((unsigned long)sp & ~15); + if (p->p_md.md_flags & MDP_USEDFPU) { + frame.sf_fpp = &frame.sf_fp; + memcpy(frame.sf_fpp, &p->p_addr->u_pcb.pcb_savefpu, + sizeof (struct fxsave64)); + tocopy = sizeof (struct sigframe); + } else { + frame.sf_fpp = NULL; + tocopy = sizeof (struct sigframe) - sizeof (struct fxsave64); + } + fp = (struct sigframe *)sp - 1; + + /* Build stack frame for signal trampoline. */ + frame.sf_signum = sig; + frame.sf_code = code; + frame.sf_scp = &fp->sf_sc; + frame.sf_handler = catcher; + + /* Save register context. */ + __asm("movl %%gs,%0" : "=r" (frame.sf_sc.sc_gs)); + __asm("movl %%fs,%0" : "=r" (frame.sf_sc.sc_fs)); +#if 0 + frame.sf_sc.sc_es = tf->tf_es; + frame.sf_sc.sc_ds = tf->tf_ds; +#endif + frame.sf_sc.sc_eflags = tf->tf_eflags; + frame.sf_sc.sc_r15 = tf->tf_r15; + frame.sf_sc.sc_r14 = tf->tf_r14; + frame.sf_sc.sc_r13 = tf->tf_r13; + frame.sf_sc.sc_r12 = tf->tf_r12; + frame.sf_sc.sc_r11 = tf->tf_r11; + frame.sf_sc.sc_r10 = tf->tf_r10; + frame.sf_sc.sc_r9 = tf->tf_r9; + frame.sf_sc.sc_r8 = tf->tf_r8; + frame.sf_sc.sc_rdi = tf->tf_rdi; + frame.sf_sc.sc_rsi = tf->tf_rsi; + frame.sf_sc.sc_rbp = tf->tf_rbp; + frame.sf_sc.sc_rbx = tf->tf_rbx; + frame.sf_sc.sc_rdx = tf->tf_rdx; + frame.sf_sc.sc_rcx = tf->tf_rcx; + frame.sf_sc.sc_rax = tf->tf_rax; + frame.sf_sc.sc_rip = tf->tf_rip; + frame.sf_sc.sc_cs = tf->tf_cs; + frame.sf_sc.sc_rsp = tf->tf_rsp; + frame.sf_sc.sc_ss = tf->tf_ss; + frame.sf_sc.sc_trapno = tf->tf_trapno; + frame.sf_sc.sc_err = tf->tf_err; + + /* Save signal stack. */ + frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK; + + /* Save signal mask. */ + frame.sf_sc.sc_mask = *mask; + + if (copyout(&frame, fp, tocopy) != 0) { + /* + * Process has trashed its stack; give it an illegal + * instruction to halt it in its tracks. + */ + sigexit(p, SIGILL); + /* NOTREACHED */ + } + + /* + * Build context to run handler in. + */ + __asm("movl %0,%%gs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); + __asm("movl %0,%%fs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); +#if 0 + tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); + tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); +#endif + tf->tf_rip = (u_int64_t)p->p_sigctx.ps_sigcode; + tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); + tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); + tf->tf_rsp = (u_int64_t)fp; + tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); + + /* Remember that we're now on the signal stack. */ + if (onstack) + p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; +} + +/* + * System call to cleanup state after a signal + * has been taken. Reset signal mask and + * stack state from context left by sendsig (above). + * Return to previous pc and psl as specified by + * context left by sendsig. Check carefully to + * make sure that the user has not modified the + * psl to gain improper privileges or to cause + * a machine fault. + */ +int +sys___sigreturn14(p, v, retval) + struct proc *p; + void *v; + register_t *retval; +{ + struct sys___sigreturn14_args /* { + syscallarg(struct sigcontext *) sigcntxp; + } */ *uap = v; + struct sigcontext *scp, context; + struct trapframe *tf; + + /* + * The trampoline code hands us the context. + * It is unsafe to keep track of it ourselves, in the event that a + * program jumps out of a signal handler. + */ + scp = SCARG(uap, sigcntxp); + if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) + return (EFAULT); + + /* Restore register context. */ + tf = p->p_md.md_regs; + /* + * Check for security violations. If we're returning to + * protected mode, the CPU will validate the segment registers + * automatically and generate a trap on violations. We handle + * the trap, rather than doing all of the checking here. + */ + if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || + !USERMODE(context.sc_cs, context.sc_eflags)) + return (EINVAL); + + /* %fs and %gs were restored by the trampoline. */ +#if 0 + tf->tf_es = context.sc_es; + tf->tf_ds = context.sc_ds; +#endif + tf->tf_eflags = context.sc_eflags; + tf->tf_rdi = context.sc_rdi; + tf->tf_rsi = context.sc_rsi; + tf->tf_rbp = context.sc_rbp; + tf->tf_rbx = context.sc_rbx; + tf->tf_rdx = context.sc_rdx; + tf->tf_rcx = context.sc_rcx; + tf->tf_rax = context.sc_rax; + tf->tf_rip = context.sc_rip; + tf->tf_cs = context.sc_cs; + tf->tf_rsp = context.sc_rsp; + tf->tf_ss = context.sc_ss; + + /* Restore signal stack. */ + if (context.sc_onstack & SS_ONSTACK) + p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; + else + p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; + + /* Restore signal mask. */ + (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0); + + return (EJUSTRETURN); +} + +int waittime = -1; +struct pcb dumppcb; + +void +cpu_reboot(howto, bootstr) + int howto; + char *bootstr; +{ + + if (cold) { + howto |= RB_HALT; + goto haltsys; + } + + boothowto = howto; + if ((howto & RB_NOSYNC) == 0 && waittime < 0) { + waittime = 0; + vfs_shutdown(); + /* + * If we've been adjusting the clock, the todr + * will be out of synch; adjust it now. + */ + resettodr(); + } + + /* Disable interrupts. */ + splhigh(); + + /* Do a dump if requested. */ + if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) + dumpsys(); + +haltsys: + doshutdownhooks(); + + if (howto & RB_HALT) { + printf("\n"); + printf("The operating system has halted.\n"); + printf("Please press any key to reboot.\n\n"); + cnpollc(1); /* for proper keyboard command handling */ + cngetc(); + cnpollc(0); + } + + printf("rebooting...\n"); + if (cpureset_delay > 0) + delay(cpureset_delay * 1000); + cpu_reset(); + for(;;) ; + /*NOTREACHED*/ +} + +/* + * These variables are needed by /sbin/savecore + */ +u_int32_t dumpmag = 0x8fca0101; /* magic number */ +int dumpsize = 0; /* pages */ +long dumplo = 0; /* blocks */ + +/* + * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. + */ +int +cpu_dumpsize() +{ + int size; + + size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + + ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); + if (roundup(size, dbtob(1)) != dbtob(1)) + return (-1); + + return (1); +} + +/* + * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped. + */ +u_long +cpu_dump_mempagecnt() +{ + u_long i, n; + + n = 0; + for (i = 0; i < mem_cluster_cnt; i++) + n += atop(mem_clusters[i].size); + return (n); +} + +/* + * cpu_dump: dump the machine-dependent kernel core dump headers. + */ +int +cpu_dump() +{ + int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); + char buf[dbtob(1)]; + kcore_seg_t *segp; + cpu_kcore_hdr_t *cpuhdrp; + phys_ram_seg_t *memsegp; + int i; + + dump = bdevsw[major(dumpdev)].d_dump; + + memset(buf, 0, sizeof buf); + segp = (kcore_seg_t *)buf; + cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; + memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) + + ALIGN(sizeof(*cpuhdrp))]; + + /* + * Generate a segment header. + */ + CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); + segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); + + /* + * Add the machine-dependent header info. + */ + cpuhdrp->ptdpaddr = PTDpaddr; + cpuhdrp->nmemsegs = mem_cluster_cnt; + + /* + * Fill in the memory segment descriptors. + */ + for (i = 0; i < mem_cluster_cnt; i++) { + memsegp[i].start = mem_clusters[i].start; + memsegp[i].size = mem_clusters[i].size; + } + + return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1))); +} + +/* + * This is called by main to set dumplo and dumpsize. + * Dumps always skip the first PAGE_SIZE of disk space + * in case there might be a disk label stored there. + * If there is extra space, put dump at the end to + * reduce the chance that swapping trashes it. + */ +void +cpu_dumpconf() +{ + int nblks, dumpblks; /* size of dump area */ + int maj; + + if (dumpdev == NODEV) + goto bad; + maj = major(dumpdev); + if (maj < 0 || maj >= nblkdev) + panic("dumpconf: bad dumpdev=0x%x", dumpdev); + if (bdevsw[maj].d_psize == NULL) + goto bad; + nblks = (*bdevsw[maj].d_psize)(dumpdev); + if (nblks <= ctod(1)) + goto bad; + + dumpblks = cpu_dumpsize(); + if (dumpblks < 0) + goto bad; + dumpblks += ctod(cpu_dump_mempagecnt()); + + /* If dump won't fit (incl. room for possible label), punt. */ + if (dumpblks > (nblks - ctod(1))) + goto bad; + + /* Put dump at end of partition */ + dumplo = nblks - dumpblks; + + /* dumpsize is in page units, and doesn't include headers. */ + dumpsize = cpu_dump_mempagecnt(); + return; + + bad: + dumpsize = 0; +} + +/* + * Doadump comes here after turning off memory management and + * getting on the dump stack, either when called above, or by + * the auto-restart code. + */ +#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */ +static vaddr_t dumpspace; + +vaddr_t +reserve_dumppages(p) + vaddr_t p; +{ + + dumpspace = p; + return (p + BYTES_PER_DUMP); +} + +void +dumpsys() +{ + u_long totalbytesleft, bytes, i, n, memseg; + u_long maddr; + int psize; + daddr_t blkno; + int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); + int error; + + /* Save registers. */ + savectx(&dumppcb); + + if (dumpdev == NODEV) + return; + + /* + * For dumps during autoconfiguration, + * if dump device has already configured... + */ + if (dumpsize == 0) + cpu_dumpconf(); + if (dumplo <= 0 || dumpsize == 0) { + printf("\ndump to dev %u,%u not possible\n", major(dumpdev), + minor(dumpdev)); + return; + } + printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), + minor(dumpdev), dumplo); + + psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); + printf("dump "); + if (psize == -1) { + printf("area unavailable\n"); + return; + } + +#if 0 /* XXX this doesn't work. grr. */ + /* toss any characters present prior to dump */ + while (sget() != NULL); /*syscons and pccons differ */ +#endif + + if ((error = cpu_dump()) != 0) + goto err; + + totalbytesleft = ptoa(cpu_dump_mempagecnt()); + blkno = dumplo + cpu_dumpsize(); + dump = bdevsw[major(dumpdev)].d_dump; + error = 0; + + for (memseg = 0; memseg < mem_cluster_cnt; memseg++) { + maddr = mem_clusters[memseg].start; + bytes = mem_clusters[memseg].size; + + for (i = 0; i < bytes; i += n, totalbytesleft -= n) { + /* Print out how many MBs we have left to go. */ + if ((totalbytesleft % (1024*1024)) == 0) + printf("%ld ", totalbytesleft / (1024 * 1024)); + + /* Limit size for next transfer. */ + n = bytes - i; + if (n > BYTES_PER_DUMP) + n = BYTES_PER_DUMP; + + (void) pmap_map(dumpspace, maddr, maddr + n, + VM_PROT_READ); + + error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n); + if (error) + goto err; + maddr += n; + blkno += btodb(n); /* XXX? */ + +#if 0 /* XXX this doesn't work. grr. */ + /* operator aborting dump? */ + if (sget() != NULL) { + error = EINTR; + break; + } +#endif + } + } + + err: + switch (error) { + + case ENXIO: + printf("device bad\n"); + break; + + case EFAULT: + printf("device not ready\n"); + break; + + case EINVAL: + printf("area improper\n"); + break; + + case EIO: + printf("i/o error\n"); + break; + + case EINTR: + printf("aborted from console\n"); + break; + + case 0: + printf("succeeded\n"); + break; + + default: + printf("error %d\n", error); + break; + } + printf("\n\n"); + delay(5000000); /* 5 seconds */ +} + +/* + * Clear registers on exec + */ +void +setregs(p, pack, stack) + struct proc *p; + struct exec_package *pack; + u_long stack; +{ + struct pcb *pcb = &p->p_addr->u_pcb; + struct trapframe *tf; + + /* If we were using the FPU, forget about it. */ + if (fpuproc == p) + fpudrop(); + +#ifdef USER_LDT + pmap_ldt_cleanup(p); +#endif + + p->p_md.md_flags &= ~MDP_USEDFPU; + pcb->pcb_flags = 0; + pcb->pcb_savefpu.fx_fcw = __NetBSD_NPXCW__; + + tf = p->p_md.md_regs; + __asm("movl %0,%%gs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL))); + __asm("movl %0,%%fs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL))); +#if 0 + tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL); + tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL); +#endif + tf->tf_rdi = 0; + tf->tf_rsi = 0; + tf->tf_rbp = 0; + tf->tf_rbx = (u_int64_t)PS_STRINGS; + tf->tf_rdx = 0; + tf->tf_rcx = 0; + tf->tf_rax = 0; + tf->tf_rip = pack->ep_entry; + tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL); + tf->tf_eflags = PSL_USERSET; + tf->tf_rsp = stack; + tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL); +} + +/* + * Initialize segments and descriptor tables + */ + +struct gate_descriptor *idt; +char *ldtstore; +char *gdtstore; +extern struct user *proc0paddr; + +void +setgate(gd, func, ist, type, dpl) + struct gate_descriptor *gd; + void *func; + int ist, type, dpl; +{ + gd->gd_looffset = (u_int64_t)func & 0xffff; + gd->gd_selector = GSEL(GCODE_SEL, SEL_KPL); + gd->gd_ist = ist; + gd->gd_type = type; + gd->gd_dpl = dpl; + gd->gd_p = 1; + gd->gd_hioffset = (u_int64_t)func >> 16; + gd->gd_zero = 0; + gd->gd_xx1 = 0; + gd->gd_xx2 = 0; + gd->gd_xx3 = 0; +} + +void +setregion(rd, base, limit) + struct region_descriptor *rd; + void *base; + u_int16_t limit; +{ + rd->rd_limit = limit; + rd->rd_base = (u_int64_t)base; +} + +/* + * Note that the base and limit fields are ignored in long mode. + */ +void +set_mem_segment(sd, base, limit, type, dpl, gran, def32, is64) + struct mem_segment_descriptor *sd; + void *base; + size_t limit; + int type, dpl, gran, is64; +{ + sd->sd_lolimit = (unsigned)limit; + sd->sd_lobase = (unsigned long)base; + sd->sd_type = type; + sd->sd_dpl = dpl; + sd->sd_p = 1; + sd->sd_hilimit = (unsigned)limit >> 16; + sd->sd_avl = 0; + sd->sd_long = is64; + sd->sd_def32 = def32; + sd->sd_gran = gran; + sd->sd_hibase = (unsigned long)base >> 24; +} + +void +set_sys_segment(sd, base, limit, type, dpl, gran) + struct sys_segment_descriptor *sd; + void *base; + size_t limit; + int type, dpl, gran; +{ + memset(sd, 0, sizeof *sd); + sd->sd_lolimit = (unsigned)limit; + sd->sd_lobase = (u_int64_t)base; + sd->sd_type = type; + sd->sd_dpl = dpl; + sd->sd_p = 1; + sd->sd_hilimit = (unsigned)limit >> 16; + sd->sd_gran = gran; + sd->sd_hibase = (u_int64_t)base >> 24; +} + +#define IDTVEC(name) __CONCAT(X, name) +typedef void (vector) __P((void)); +extern vector IDTVEC(syscall); +extern vector IDTVEC(osyscall); +extern vector *IDTVEC(exceptions)[]; + +#define KBTOB(x) ((size_t)(x) * 1024UL) + +void +init_x86_64(first_avail) + vaddr_t first_avail; +{ + extern void consinit __P((void)); + extern struct extent *iomem_ex; + struct btinfo_memmap *bim; + struct region_descriptor region; + struct mem_segment_descriptor *ldt_segp; + int x, first16q; + u_int64_t seg_start, seg_end; + u_int64_t seg_start1, seg_end1; + + proc0.p_addr = proc0paddr; + curpcb = &proc0.p_addr->u_pcb; + + x86_64_bus_space_init(); + + consinit(); /* XXX SHOULD NOT BE DONE HERE */ + + /* + * Initailize PAGE_SIZE-dependent variables. + */ + uvm_setpagesize(); + + /* + * A quick sanity check. + */ + if (PAGE_SIZE != NBPG) + panic("init386: PAGE_SIZE != NBPG"); + + avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */ + /* and VM system doesn't work with phys 0 */ + + /* + * Call pmap initialization to make new kernel address space. + * We must do this before loading pages into the VM system. + */ + pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE); + + /* + * Check to see if we have a memory map from the BIOS (passed + * to us by the boot program. + */ + bim = lookup_bootinfo(BTINFO_MEMMAP); + if (0 && bim != NULL && bim->num > 0) { +#if DEBUG_MEMLOAD + printf("BIOS MEMORY MAP (%d ENTRIES):\n", bim->num); +#endif + for (x = 0; x < bim->num; x++) { +#if DEBUG_MEMLOAD + printf(" addr 0x%qx size 0x%qx type 0x%x\n", + bim->entry[x].addr, + bim->entry[x].size, + bim->entry[x].type); +#endif + + /* + * If the segment is not memory, skip it. + */ + switch (bim->entry[x].type) { + case BIM_Memory: + case BIM_ACPI: + case BIM_NVS: + break; + default: + continue; + } + + seg_start = bim->entry[x].addr; + seg_end = bim->entry[x].addr + bim->entry[x].size; + + if (seg_end > 0x10000000000ULL) { + printf("WARNING: skipping large " + "memory map entry: " + "0x%lx/0x%lx/0x%x\n", + bim->entry[x].addr, + bim->entry[x].size, + bim->entry[x].type); + continue; + } + + /* + * XXX Chop the last page off the size so that + * XXX it can fit in avail_end. + */ + if (seg_end == 0x100000000ULL) { + seg_end -= PAGE_SIZE; + if (seg_end <= seg_start) + continue; + } + + /* + * Allocate the physical addresses used by RAM + * from the iomem extent map. This is done before + * the addresses are page rounded just to make + * sure we get them all. + */ + if (extent_alloc_region(iomem_ex, seg_start, + seg_end - seg_start, EX_NOWAIT)) { + /* XXX What should we do? */ + printf("WARNING: CAN'T ALLOCATE " + "MEMORY SEGMENT %d " + "(0x%lx/0x%lx/0l%x) FROM " + "IOMEM EXTENT MAP!\n", + x, seg_start, seg_end - seg_start, + bim->entry[x].type); + } + + /* + * If it's not free memory, skip it. + */ + if (bim->entry[x].type != BIM_Memory) + continue; + + /* XXX XXX XXX */ + if (mem_cluster_cnt >= VM_PHYSSEG_MAX) + panic("init386: too many memory segments"); + + seg_start = round_page(seg_start); + seg_end = trunc_page(seg_end); + + if (seg_start == seg_end) + continue; + + mem_clusters[mem_cluster_cnt].start = seg_start; + mem_clusters[mem_cluster_cnt].size = + seg_end - seg_start; + + if (avail_end < seg_end) + avail_end = seg_end; + physmem += atop(mem_clusters[mem_cluster_cnt].size); + mem_cluster_cnt++; + } + } + + /* + * If the loop above didn't find any valid segment, fall back to + * former code. + */ + if (mem_cluster_cnt == 0) { + /* + * Allocate the physical addresses used by RAM from the iomem + * extent map. This is done before the addresses are + * page rounded just to make sure we get them all. + */ + if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem), + EX_NOWAIT)) { + /* XXX What should we do? */ + printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM " + "IOMEM EXTENT MAP!\n"); + } + mem_clusters[0].start = 0; + mem_clusters[0].size = trunc_page(KBTOB(biosbasemem)); + physmem += atop(mem_clusters[0].size); + if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem), + EX_NOWAIT)) { + /* XXX What should we do? */ + printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM " + "IOMEM EXTENT MAP!\n"); + } +#if NISADMA > 0 + /* + * Some motherboards/BIOSes remap the 384K of RAM that would + * normally be covered by the ISA hole to the end of memory + * so that it can be used. However, on a 16M system, this + * would cause bounce buffers to be allocated and used. + * This is not desirable behaviour, as more than 384K of + * bounce buffers might be allocated. As a work-around, + * we round memory down to the nearest 1M boundary if + * we're using any isadma devices and the remapped memory + * is what puts us over 16M. + */ + if (biosextmem > (15*1024) && biosextmem < (16*1024)) { + char pbuf[9]; + + format_bytes(pbuf, sizeof(pbuf), + biosextmem - (15*1024)); + printf("Warning: ignoring %s of remapped memory\n", + pbuf); + biosextmem = (15*1024); + } +#endif + mem_clusters[1].start = IOM_END; + mem_clusters[1].size = trunc_page(KBTOB(biosextmem)); + physmem += atop(mem_clusters[1].size); + + mem_cluster_cnt = 2; + + avail_end = IOM_END + trunc_page(KBTOB(biosextmem)); + } + + /* + * If we have 16M of RAM or less, just put it all on + * the default free list. Otherwise, put the first + * 16M of RAM on a lower priority free list (so that + * all of the ISA DMA'able memory won't be eaten up + * first-off). + */ + if (avail_end <= (16 * 1024 * 1024)) + first16q = VM_FREELIST_DEFAULT; + else + first16q = VM_FREELIST_FIRST16; + + /* Make sure the end of the space used by the kernel is rounded. */ + first_avail = round_page(first_avail); + + /* + * Now, load the memory clusters (which have already been + * rounded and truncated) into the VM system. + * + * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL + * IS LOADED AT IOM_END (1M). + */ + for (x = 0; x < mem_cluster_cnt; x++) { + seg_start = mem_clusters[x].start; + seg_end = mem_clusters[x].start + mem_clusters[x].size; + seg_start1 = 0; + seg_end1 = 0; + + /* + * Skip memory before our available starting point. + */ + if (seg_end <= avail_start) + continue; + + if (avail_start >= seg_start && avail_start < seg_end) { + if (seg_start != 0) + panic("init)x86_64: memory doesn't start at 0"); + seg_start = avail_start; + if (seg_start == seg_end) + continue; + } + + /* + * If this segment contains the kernel, split it + * in two, around the kernel. + */ + if (seg_start <= IOM_END && first_avail <= seg_end) { + seg_start1 = first_avail; + seg_end1 = seg_end; + seg_end = IOM_END; + } + + /* First hunk */ + if (seg_start != seg_end) { + if (seg_start <= (16 * 1024 * 1024) && + first16q != VM_FREELIST_DEFAULT) { + u_int64_t tmp; + + if (seg_end > (16 * 1024 * 1024)) + tmp = (16 * 1024 * 1024); + else + tmp = seg_end; +#if DEBUG_MEMLOAD + printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", + seg_start, tmp, + atop(seg_start), atop(tmp)); +#endif + uvm_page_physload(atop(seg_start), + atop(tmp), atop(seg_start), + atop(tmp), first16q); + seg_start = tmp; + } + + if (seg_start != seg_end) { +#if DEBUG_MEMLOAD + printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", + seg_start, seg_end, + atop(seg_start), atop(seg_end)); +#endif + uvm_page_physload(atop(seg_start), + atop(seg_end), atop(seg_start), + atop(seg_end), VM_FREELIST_DEFAULT); + } + } + + /* Second hunk */ + if (seg_start1 != seg_end1) { + if (seg_start1 <= (16 * 1024 * 1024) && + first16q != VM_FREELIST_DEFAULT) { + u_int64_t tmp; + + if (seg_end1 > (16 * 1024 * 1024)) + tmp = (16 * 1024 * 1024); + else + tmp = seg_end1; +#if DEBUG_MEMLOAD + printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", + seg_start1, tmp, + atop(seg_start1), atop(tmp)); +#endif + uvm_page_physload(atop(seg_start1), + atop(tmp), atop(seg_start1), + atop(tmp), first16q); + seg_start1 = tmp; + } + + if (seg_start1 != seg_end1) { +#if DEBUG_MEMLOAD + printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n", + seg_start1, seg_end1, + atop(seg_start1), atop(seg_end1)); +#endif + uvm_page_physload(atop(seg_start1), + atop(seg_end1), atop(seg_start1), + atop(seg_end1), VM_FREELIST_DEFAULT); + } + } + } + + /* + * Steal memory for the message buffer (at end of core). + */ + { + struct vm_physseg *vps = NULL; + psize_t sz = round_page(MSGBUFSIZE); + psize_t reqsz = sz; + + for (x = 0; x < vm_nphysseg; x++) { + vps = &vm_physmem[x]; + if (ptoa(vps->avail_end) == avail_end) + break; + } + if (x == vm_nphysseg) + panic("init_x86_64: can't find end of memory"); + + /* Shrink so it'll fit in the last segment. */ + if ((vps->avail_end - vps->avail_start) < atop(sz)) + sz = ptoa(vps->avail_end - vps->avail_start); + + vps->avail_end -= atop(sz); + vps->end -= atop(sz); + msgbuf_paddr = ptoa(vps->avail_end); + + /* Remove the last segment if it now has no pages. */ + if (vps->start == vps->end) { + for (vm_nphysseg--; x < vm_nphysseg; x++) + vm_physmem[x] = vm_physmem[x + 1]; + } + + /* Now find where the new avail_end is. */ + for (avail_end = 0, x = 0; x < vm_nphysseg; x++) + if (vm_physmem[x].avail_end > avail_end) + avail_end = vm_physmem[x].avail_end; + avail_end = ptoa(avail_end); + + /* Warn if the message buffer had to be shrunk. */ + if (sz != reqsz) + printf("WARNING: %ld bytes not available for msgbuf " + "in last cluster (%ld used)\n", reqsz, sz); + } + + pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr, + VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE); + pmap_enter(pmap_kernel(), idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE, + VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE); + + idt = (struct gate_descriptor *)idt_vaddr; + gdtstore = (char *)(idt + NIDT); + ldtstore = gdtstore + DYNSEL_START; + + + /* make gdt gates and memory segments */ + set_mem_segment(GDT_ADDR_MEM(GCODE_SEL), 0, 0xfffff, SDT_MEMERA, + SEL_KPL, 1, 0, 1); + + set_mem_segment(GDT_ADDR_MEM(GDATA_SEL), 0, 0xfffff, SDT_MEMRWA, + SEL_KPL, 1, 0, 1); + + set_sys_segment(GDT_ADDR_SYS(GLDT_SEL), ldtstore, LDT_SIZE - 1, + SDT_SYSLDT, SEL_KPL, 0); + + set_mem_segment(GDT_ADDR_MEM(GUCODE_SEL), 0, + x86_64_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1); + + set_mem_segment(GDT_ADDR_MEM(GUDATA_SEL), 0, + x86_64_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1); + + /* make ldt gates and memory segments */ + setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), + &IDTVEC(osyscall), 0, SDT_SYS386CGT, SEL_UPL); + + *(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) = + *GDT_ADDR_MEM(GUCODE_SEL); + *(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) = + *GDT_ADDR_MEM(GUDATA_SEL); + + /* + * 32 bit GDT entries. + */ + + set_mem_segment(GDT_ADDR_MEM(GUCODE32_SEL), 0, + x86_64_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); + + set_mem_segment(GDT_ADDR_MEM(GUDATA32_SEL), 0, + x86_64_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); + + /* + * 32 bit LDT entries. + */ + ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL); + set_mem_segment(ldt_segp, 0, x86_64_btop(VM_MAXUSER_ADDRESS32) - 1, + SDT_MEMERA, SEL_UPL, 1, 1, 0); + ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL); + set_mem_segment(ldt_segp, 0, x86_64_btop(VM_MAXUSER_ADDRESS32) - 1, + SDT_MEMRWA, SEL_UPL, 1, 1, 0); + + /* + * Other entries. + */ + memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL), + (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), + sizeof (struct gate_descriptor)); + memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL), + (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL), + sizeof (struct gate_descriptor)); + + /* exceptions */ + for (x = 0; x < 32; x++) + setgate(&idt[x], IDTVEC(exceptions)[x], 0, SDT_SYS386TGT, + (x == 3 || x == 4) ? SEL_UPL : SEL_KPL); + + /* new-style interrupt gate for syscalls */ + setgate(&idt[128], &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL); + + setregion(®ion, gdtstore, DYNSEL_START - 1); + lgdt(®ion); + setregion(®ion, idt, NIDT * sizeof(idt[0]) - 1); + lidt(®ion); + + +#ifdef DDB + { + extern int end; + extern int *esym; + struct btinfo_symtab *symtab; + + symtab = lookup_bootinfo(BTINFO_SYMTAB); + if (symtab) { + symtab->ssym += KERNBASE; + symtab->esym += KERNBASE; + ddb_init(symtab->nsym, (int *)symtab->ssym, + (int *)symtab->esym); + } + else + ddb_init(*(int *)&end, ((int *)&end) + 1, esym); + } + if (boothowto & RB_KDB) + Debugger(); +#endif +#ifdef KGDB + kgdb_port_init(); + if (boothowto & RB_KDB) { + kgdb_debug_init = 1; + kgdb_connect(1); + } +#endif + +#if NISA > 0 + isa_defaultirq(); +#endif + + fpuinit(); + + splraise(-1); + enable_intr(); +} + +struct queue { + struct queue *q_next, *q_prev; +}; + +/* + * insert an element into a queue + */ +void +_insque(v1, v2) + void *v1; + void *v2; +{ + struct queue *elem = v1, *head = v2; + struct queue *next; + + next = head->q_next; + elem->q_next = next; + head->q_next = elem; + elem->q_prev = head; + next->q_prev = elem; +} + +/* + * remove an element from a queue + */ +void +_remque(v) + void *v; +{ + struct queue *elem = v; + struct queue *next, *prev; + + next = elem->q_next; + prev = elem->q_prev; + next->q_prev = prev; + prev->q_next = next; + elem->q_prev = 0; +} + +void * +lookup_bootinfo(type) +int type; +{ + struct btinfo_common *help; + int n = *(int*)bootinfo; + help = (struct btinfo_common *)(bootinfo + sizeof(int)); + while(n--) { + if(help->type == type) + return(help); + help = (struct btinfo_common *)((char*)help + help->len); + } + return(0); +} + +void +cpu_reset() +{ + + disable_intr(); + + /* + * The keyboard controller has 4 random output pins, one of which is + * connected to the RESET pin on the CPU in many PCs. We tell the + * keyboard controller to pulse this line a couple of times. + */ + outb(IO_KBD + KBCMDP, KBC_PULSE0); + delay(100000); + outb(IO_KBD + KBCMDP, KBC_PULSE0); + delay(100000); + + /* + * Try to cause a triple fault and watchdog reset by making the IDT + * invalid and causing a fault. + */ + memset((caddr_t)idt, 0, NIDT * sizeof(idt[0])); + __asm __volatile("divl %0,%1" : : "q" (0), "a" (0)); + +#if 0 + /* + * Try to cause a triple fault and watchdog reset by unmapping the + * entire address space and doing a TLB flush. + */ + memset((caddr_t)PTD, 0, PAGE_SIZE); + tlbflush(); +#endif + + for (;;); +} + +extern void i8254_microtime(struct timeval *tv); + +/* + * XXXXXXX + * the simulator's 8254 seems to travel backward in time sometimes? + * work around this with this hideous code. Unacceptable for + * real hardware, but this is just a patch to stop the weird + * effects. SMP unsafe, etc. + */ +void +microtime(struct timeval *tv) +{ + static struct timeval mtv; + + i8254_microtime(tv); + if (tv->tv_sec <= mtv.tv_sec && tv->tv_usec < mtv.tv_usec) { + mtv.tv_usec++; + if (mtv.tv_usec > 1000000) { + mtv.tv_sec++; + mtv.tv_usec = 0; + } + *tv = mtv; + } else + mtv = *tv; +} diff --git a/sys/arch/x86_64/x86_64/mainbus.c b/sys/arch/x86_64/x86_64/mainbus.c new file mode 100644 index 000000000000..a869e1792199 --- /dev/null +++ b/sys/arch/x86_64/x86_64/mainbus.c @@ -0,0 +1,142 @@ +/* $NetBSD: mainbus.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/* + * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Christopher G. Demetriou + * for the NetBSD Project. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include +#include + +#include /* for ISA_HOLE_VADDR */ + +#include "pci.h" +#include "isa.h" + +int mainbus_match __P((struct device *, struct cfdata *, void *)); +void mainbus_attach __P((struct device *, struct device *, void *)); + +struct cfattach mainbus_ca = { + sizeof(struct device), mainbus_match, mainbus_attach +}; + +int mainbus_print __P((void *, const char *)); + +union mainbus_attach_args { + const char *mba_busname; /* first elem of all */ + struct pcibus_attach_args mba_pba; + struct isabus_attach_args mba_iba; +}; + +/* + * This is set when the ISA bus is attached. If it's not set by the + * time it's checked below, then mainbus attempts to attach an ISA. + */ +int isa_has_been_seen; +struct x86_64_isa_chipset x86_64_isa_chipset; +#if NISA > 0 +struct isabus_attach_args mba_iba = { + "isa", + X86_64_BUS_SPACE_IO, X86_64_BUS_SPACE_MEM, + &isa_bus_dma_tag, + &x86_64_isa_chipset +}; +#endif + +/* + * Probe for the mainbus; always succeeds. + */ +int +mainbus_match(parent, match, aux) + struct device *parent; + struct cfdata *match; + void *aux; +{ + + return 1; +} + +/* + * Attach the mainbus. + */ +void +mainbus_attach(parent, self, aux) + struct device *parent, *self; + void *aux; +{ +#if NPCI > 0 + union mainbus_attach_args mba; +#endif + + printf("\n"); + + /* + * XXX Note also that the presence of a PCI bus should + * XXX _always_ be checked, and if present the bus should be + * XXX 'found'. However, because of the structure of the code, + * XXX that's not currently possible. + */ +#if NPCI > 0 + if (pci_mode_detect() != 0) { + mba.mba_pba.pba_busname = "pci"; + mba.mba_pba.pba_iot = X86_64_BUS_SPACE_IO; + mba.mba_pba.pba_memt = X86_64_BUS_SPACE_MEM; + mba.mba_pba.pba_dmat = &pci_bus_dma_tag; + mba.mba_pba.pba_pc = NULL; + mba.mba_pba.pba_flags = pci_bus_flags(); + mba.mba_pba.pba_bus = 0; + config_found(self, &mba.mba_pba, mainbus_print); + } +#endif + +#if NISA > 0 + if (isa_has_been_seen == 0) + config_found(self, &mba_iba, mainbus_print); +#endif + +} + +int +mainbus_print(aux, pnp) + void *aux; + const char *pnp; +{ + union mainbus_attach_args *mba = aux; + + if (pnp) + printf("%s at %s", mba->mba_busname, pnp); + if (strcmp(mba->mba_busname, "pci") == 0) + printf(" bus %d", mba->mba_pba.pba_bus); + return (UNCONF); +} diff --git a/sys/arch/x86_64/x86_64/md_root.c b/sys/arch/x86_64/x86_64/md_root.c new file mode 100644 index 000000000000..91cec612d090 --- /dev/null +++ b/sys/arch/x86_64/x86_64/md_root.c @@ -0,0 +1,83 @@ +/* $NetBSD: md_root.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/* + * Copyright (c) 1995 Gordon W. Ross + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "opt_mdsize.h" + +extern int boothowto; + +#ifndef MINIROOTSIZE +#define MINIROOTSIZE 512 +#endif + +#define ROOTBYTES (MINIROOTSIZE << DEV_BSHIFT) + +/* + * This array will be patched to contain a file-system image. + * See the program mdsetimage(8) for details. + */ +u_int32_t md_root_size = ROOTBYTES; +char md_root_image[ROOTBYTES] = "|This is the root ramdisk!\n"; + +/* + * This is called during autoconfig. + */ +void +md_attach_hook(unit, md) + int unit; + struct md_conf *md; +{ + if (unit == 0) { + /* Setup root ramdisk */ + md->md_addr = (caddr_t)md_root_image; + md->md_size = (size_t)md_root_size; + md->md_type = MD_KMEM_FIXED; + printf("md%d: internal %dK image area\n", unit, + ROOTBYTES / 1024); + } +} + +/* + * This is called during open (i.e. mountroot) + */ +void +md_open_hook(unit, md) + int unit; + struct md_conf *md; +{ + if (unit == 0) { + /* The root ramdisk only works single-user. */ + boothowto |= RB_SINGLE; + } +} diff --git a/sys/arch/x86_64/x86_64/mem.c b/sys/arch/x86_64/x86_64/mem.c new file mode 100644 index 000000000000..9a87f7dee864 --- /dev/null +++ b/sys/arch/x86_64/x86_64/mem.c @@ -0,0 +1,202 @@ +/* $NetBSD: mem.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mem.c 8.3 (Berkeley) 1/12/94 + */ + +#include "opt_compat_netbsd.h" + +/* + * Memory special file + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +extern char *vmmap; /* poor name! */ +caddr_t zeropage; + +/*ARGSUSED*/ +int +mmopen(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + + return (0); +} + +/*ARGSUSED*/ +int +mmclose(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + + return (0); +} + +/*ARGSUSED*/ +int +mmrw(dev, uio, flags) + dev_t dev; + struct uio *uio; + int flags; +{ + register vaddr_t o, v; + register int c; + register struct iovec *iov; + int error = 0; + static int physlock; + vm_prot_t prot; + + if (minor(dev) == 0) { + /* lock against other uses of shared vmmap */ + while (physlock > 0) { + physlock++; + error = tsleep((caddr_t)&physlock, PZERO | PCATCH, + "mmrw", 0); + if (error) + return (error); + } + physlock = 1; + } + while (uio->uio_resid > 0 && !error) { + iov = uio->uio_iov; + if (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + if (uio->uio_iovcnt < 0) + panic("mmrw"); + continue; + } + switch (minor(dev)) { + +/* minor device 0 is physical memory */ + case 0: + v = uio->uio_offset; + prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : + VM_PROT_WRITE; + pmap_enter(pmap_kernel(), (vaddr_t)vmmap, + trunc_page(v), prot, PMAP_WIRED|prot); + o = uio->uio_offset & PGOFSET; + c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); + error = uiomove((caddr_t)vmmap + o, c, uio); + pmap_remove(pmap_kernel(), (vaddr_t)vmmap, + (vaddr_t)vmmap + PAGE_SIZE); + break; + +/* minor device 1 is kernel memory */ + case 1: + v = uio->uio_offset; + c = min(iov->iov_len, MAXPHYS); + if (!uvm_kernacc((caddr_t)v, c, + uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) + return (EFAULT); + error = uiomove((caddr_t)v, c, uio); + break; + +/* minor device 2 is EOF/rathole */ + case 2: + if (uio->uio_rw == UIO_WRITE) + uio->uio_resid = 0; + return (0); + +/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ + case 12: + if (uio->uio_rw == UIO_WRITE) { + uio->uio_resid = 0; + return (0); + } + if (zeropage == NULL) { + zeropage = (caddr_t) + malloc(PAGE_SIZE, M_TEMP, M_WAITOK); + memset(zeropage, 0, PAGE_SIZE); + } + c = min(iov->iov_len, PAGE_SIZE); + error = uiomove(zeropage, c, uio); + break; + + default: + return (ENXIO); + } + } + if (minor(dev) == 0) { + if (physlock > 1) + wakeup((caddr_t)&physlock); + physlock = 0; + } + return (error); +} + +paddr_t +mmmmap(dev, off, prot) + dev_t dev; + off_t off; + int prot; +{ + struct proc *p = curproc; /* XXX */ + + /* + * /dev/mem is the only one that makes sense through this + * interface. For /dev/kmem any physaddr we return here + * could be transient and hence incorrect or invalid at + * a later time. /dev/null just doesn't make any sense + * and /dev/zero is a hack that is handled via the default + * pager in mmap(). + */ + if (minor(dev) != 0) + return (-1); + + if (off > ctob(physmem) && suser(p->p_ucred, &p->p_acflag) != 0) + return (-1); + return (x86_64_btop(off)); +} diff --git a/sys/arch/x86_64/x86_64/netbsd32_machdep.c b/sys/arch/x86_64/x86_64/netbsd32_machdep.c new file mode 100644 index 000000000000..12b106d85ddd --- /dev/null +++ b/sys/arch/x86_64/x86_64/netbsd32_machdep.c @@ -0,0 +1,374 @@ +/* $NetBSD: netbsd32_machdep.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_compat_netbsd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +int process_read_fpregs32(struct proc *, struct fpreg32 *); +int process_read_regs32(struct proc *, struct reg32 *); + +void +netbsd32_setregs(struct proc *p, struct exec_package *pack, u_long stack) +{ + struct pcb *pcb = &p->p_addr->u_pcb; + struct trapframe *tf; + + /* If we were using the FPU, forget about it. */ + if (fpuproc == p) + fpudrop(); + +#if defined(USER_LDT) && 0 + pmap_ldt_cleanup(p); +#endif + + p->p_md.md_flags &= ~MDP_USEDFPU; + pcb->pcb_flags = 0; + pcb->pcb_savefpu.fx_fcw = __NetBSD_NPXCW__; + + p->p_flag |= P_32; + + tf = p->p_md.md_regs; + __asm("movl %0,%%gs" : : "r" (LSEL(LUDATA32_SEL, SEL_UPL))); + __asm("movl %0,%%fs" : : "r" (LSEL(LUDATA32_SEL, SEL_UPL))); + + /* + * XXXfvdl needs to be revisited + * if USER_LDT is going to be supported, these need + * to be saved/restored. + */ +#if 1 + __asm("movl %0,%%ds" : : "r" (LSEL(LUDATA32_SEL, SEL_UPL))); + __asm("movl %0,%%es" : : "r" (LSEL(LUDATA32_SEL, SEL_UPL))); +#else + tf->tf_es = LSEL(LUDATA32_SEL, SEL_UPL); + tf->tf_ds = LSEL(LUDATA32_SEL, SEL_UPL); +#endif + tf->tf_rdi = 0; + tf->tf_rsi = 0; + tf->tf_rbp = 0; + tf->tf_rbx = (u_int64_t)p->p_psstr; + tf->tf_rdx = 0; + tf->tf_rcx = 0; + tf->tf_rax = 0; + tf->tf_rip = pack->ep_entry; + tf->tf_cs = LSEL(LUCODE32_SEL, SEL_UPL); + tf->tf_eflags = PSL_USERSET; + tf->tf_rsp = stack; + tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL); +} + +void +netbsd32_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) +{ + struct proc *p = curproc; + struct trapframe *tf; + struct netbsd32_sigframe *fp, frame; + int onstack; + + tf = p->p_md.md_regs; + + /* Do we need to jump onto the signal stack? */ + onstack = + (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && + (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; + + /* Allocate space for the signal handler context. */ + if (onstack) + fp = (struct netbsd32_sigframe *) + ((caddr_t)p->p_sigctx.ps_sigstk.ss_sp + + p->p_sigctx.ps_sigstk.ss_size); + else + fp = (struct netbsd32_sigframe *)tf->tf_rsp; + fp--; + + /* Build stack frame for signal trampoline. */ + frame.sf_signum = sig; + frame.sf_code = code; + frame.sf_scp = (u_int32_t)(u_long)&fp->sf_sc; + frame.sf_handler = (u_int32_t)(u_long)catcher; + + /* + * XXXfvdl these need to be saved and restored for USER_LDT. + */ + + /* Save register context. */ + __asm("movl %%gs,%0" : "=r" (frame.sf_sc.sc_gs)); + __asm("movl %%fs,%0" : "=r" (frame.sf_sc.sc_fs)); +#if 1 + frame.sf_sc.sc_es = LSEL(LUDATA32_SEL, SEL_UPL); + frame.sf_sc.sc_ds = LSEL(LUDATA32_SEL, SEL_UPL); +#else + frame.sf_sc.sc_es = tf->tf_es; + frame.sf_sc.sc_ds = tf->tf_ds; +#endif + frame.sf_sc.sc_eflags = tf->tf_eflags; + frame.sf_sc.sc_edi = tf->tf_rdi; + frame.sf_sc.sc_esi = tf->tf_rsi; + frame.sf_sc.sc_ebp = tf->tf_rbp; + frame.sf_sc.sc_ebx = tf->tf_rbx; + frame.sf_sc.sc_edx = tf->tf_rdx; + frame.sf_sc.sc_ecx = tf->tf_rcx; + frame.sf_sc.sc_eax = tf->tf_rax; + frame.sf_sc.sc_eip = tf->tf_rip; + frame.sf_sc.sc_cs = tf->tf_cs; + frame.sf_sc.sc_esp = tf->tf_rsp; + frame.sf_sc.sc_ss = tf->tf_ss; + frame.sf_sc.sc_trapno = tf->tf_trapno; + frame.sf_sc.sc_err = tf->tf_err; + + /* Save signal stack. */ + frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK; + + /* Save signal mask. */ + frame.sf_sc.sc_mask = *mask; + + if (copyout(&frame, fp, sizeof frame) != 0) { + /* + * Process has trashed its stack; give it an illegal + * instruction to halt it in its tracks. + */ + sigexit(p, SIGILL); + /* NOTREACHED */ + } + + /* + * Build context to run handler in. + */ + __asm("movl %0,%%gs" : : "r" (GSEL(GUDATA32_SEL, SEL_UPL))); + __asm("movl %0,%%fs" : : "r" (GSEL(GUDATA32_SEL, SEL_UPL))); +#if 1 + /* XXXX */ + __asm("movl %0,%%es" : : "r" (GSEL(GUDATA32_SEL, SEL_UPL))); + __asm("movl %0,%%ds" : : "r" (GSEL(GUDATA32_SEL, SEL_UPL))); +#else + tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL); + tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL); +#endif + tf->tf_rip = (u_int64_t)p->p_sigctx.ps_sigcode; + tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL); + tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); + tf->tf_rsp = (u_int64_t)fp; + tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL); + + /* Remember that we're now on the signal stack. */ + if (onstack) + p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; +} + + +int +netbsd32___sigreturn14(struct proc *p, void *v, register_t *retval) +{ + struct netbsd32___sigreturn14_args /* { + syscallarg(struct netbsd32_sigcontext *) sigcntxp; + } */ *uap = v; + struct netbsd32_sigcontext *scp, context; + struct trapframe *tf; + + /* + * The trampoline code hands us the context. + * It is unsafe to keep track of it ourselves, in the event that a + * program jumps out of a signal handler. + */ + scp = (struct netbsd32_sigcontext *)(unsigned long)SCARG(uap, sigcntxp); + if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) + return (EFAULT); + + /* Restore register context. */ + tf = p->p_md.md_regs; + /* + * Check for security violations. If we're returning to + * protected mode, the CPU will validate the segment registers + * automatically and generate a trap on violations. We handle + * the trap, rather than doing all of the checking here. + */ + if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || + !USERMODE(context.sc_cs, context.sc_eflags)) + return (EINVAL); + + /* %fs and %gs were restored by the trampoline. */ +#if 1 + __asm("movl %0,%%ds" : : "r" (context.sc_ds)); + __asm("movl %0,%%es" : : "r" (context.sc_es)); +#else + tf->tf_es = context.sc_es; + tf->tf_ds = context.sc_ds; +#endif + tf->tf_eflags = context.sc_eflags; + tf->tf_rdi = context.sc_edi; + tf->tf_rsi = context.sc_esi; + tf->tf_rbp = context.sc_ebp; + tf->tf_rbx = context.sc_ebx; + tf->tf_rdx = context.sc_edx; + tf->tf_rcx = context.sc_ecx; + tf->tf_rax = context.sc_eax; + tf->tf_rip = context.sc_eip; + tf->tf_cs = context.sc_cs; + tf->tf_rsp = context.sc_esp; + tf->tf_ss = context.sc_ss; + + /* Restore signal stack. */ + if (context.sc_onstack & SS_ONSTACK) + p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; + else + p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; + + /* Restore signal mask. */ + (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0); + + return (EJUSTRETURN); +} + + +/* + * Dump the machine specific segment at the start of a core dump. + */ +struct md_core32 { + struct reg32 intreg; + struct fpreg32 freg; +}; + +int +cpu_coredump32(struct proc *p, struct vnode *vp, struct ucred *cred, + struct core32 *chdr) +{ + struct md_core32 md_core; + struct coreseg cseg; + int error; + + CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); + chdr->c_hdrsize = ALIGN32(sizeof(*chdr)); + chdr->c_seghdrsize = ALIGN32(sizeof(cseg)); + chdr->c_cpusize = sizeof(md_core); + + /* Save integer registers. */ + error = process_read_regs32(p, &md_core.intreg); + if (error) + return error; + + /* Save floating point registers. */ + error = process_read_fpregs32(p, &md_core.freg); + if (error) + return error; + + CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_I386, CORE_CPU); + cseg.c_addr = 0; + cseg.c_size = chdr->c_cpusize; + + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, + (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, + NULL, p); + if (error) + return error; + + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core), + (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE, + IO_NODELOCKED|IO_UNIT, cred, NULL, p); + if (error) + return error; + + chdr->c_nseg++; + return 0; +} + + +int +process_read_regs32(struct proc *p, struct reg32 *regs) +{ + struct trapframe *tf = p->p_md.md_regs; + struct pcb *pcb = &p->p_addr->u_pcb; + + regs->r_gs = pcb->pcb_gs; + regs->r_fs = pcb->pcb_fs; + regs->r_es = LSEL(LUCODE32_SEL, SEL_UPL); + regs->r_ds = LSEL(LUCODE32_SEL, SEL_UPL); + regs->r_eflags = tf->tf_eflags; + /* XXX avoid sign extension problems with unknown upper bits? */ + regs->r_edi = tf->tf_rdi & 0xffffffff; + regs->r_esi = tf->tf_rsi & 0xffffffff; + regs->r_ebp = tf->tf_rbp & 0xffffffff; + regs->r_ebx = tf->tf_rbx & 0xffffffff; + regs->r_edx = tf->tf_rdx & 0xffffffff; + regs->r_ecx = tf->tf_rcx & 0xffffffff; + regs->r_eax = tf->tf_rax & 0xffffffff; + regs->r_eip = tf->tf_rip & 0xffffffff; + regs->r_cs = tf->tf_cs; + regs->r_esp = tf->tf_rsp & 0xffffffff; + regs->r_ss = tf->tf_ss; + + return (0); +} + +int +process_read_fpregs32(struct proc *p, struct fpreg32 *regs) +{ + struct oldfsave frame; + + if (p->p_md.md_flags & MDP_USEDFPU) { + if (fpuproc == p) + __asm__("fnsave %0" : "=m" (frame)); + } else { + memset(&frame, 0, sizeof(*regs)); + frame.fs_control = __NetBSD_NPXCW__; + frame.fs_tag = 0xffff; + p->p_md.md_flags |= MDP_USEDFPU; + } + + memcpy(regs, &frame, sizeof(*regs)); + return (0); +} diff --git a/sys/arch/x86_64/x86_64/netbsd32_sigcode.S b/sys/arch/x86_64/x86_64/netbsd32_sigcode.S new file mode 100644 index 000000000000..21ebef83e549 --- /dev/null +++ b/sys/arch/x86_64/x86_64/netbsd32_sigcode.S @@ -0,0 +1,29 @@ +#include "assym.h" +#include +#include + + .globl _C_LABEL(netbsd32_esigcode) + +/* + * Signal trampoline; copied to top of user stack. + */ + +NENTRY(netbsd32_sigcode) + + .code32 + + call *SIGF_HANDLER32(%esp) + leal SIGF_SC32(%esp),%eax # scp (the call may have clobbered the + # copy at SIGF_SCP(%esp)) + movl SC_FS32(%eax),%ecx + movl SC_GS32(%eax),%edx + movl %ecx,%fs + movl %edx,%gs + pushl %eax + pushl %eax # junk to fake return address + movl $SYS___sigreturn14,%eax + int $0x80 # enter kernel with args on stack + movl $SYS_exit,%eax + int $0x80 # exit if sigreturn fails + .globl _C_LABEL(esigcode) +_C_LABEL(netbsd32_esigcode): diff --git a/sys/arch/x86_64/x86_64/netbsd32_syscall.c b/sys/arch/x86_64/x86_64/netbsd32_syscall.c new file mode 100644 index 000000000000..fee7b94d3d18 --- /dev/null +++ b/sys/arch/x86_64/x86_64/netbsd32_syscall.c @@ -0,0 +1,254 @@ +/* $NetBSD: netbsd32_syscall.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_syscall_debug.h" +#include "opt_ktrace.h" + +#include +#include +#include +#include +#include +#ifdef KTRACE +#include +#endif +#include + +#include + +#include +#include +#include + +void netbsd32_syscall_intern(struct proc *); +void netbsd32_syscall_plain(struct trapframe); +void netbsd32_syscall_fancy(struct trapframe); + +void +netbsd32_syscall_intern(p) + struct proc *p; +{ +#ifdef KTRACE + if (p->p_traceflag & (KTRFAC_SYSCALL | KTRFAC_SYSRET)) + p->p_md.md_syscall = netbsd32_syscall_fancy; + else +#endif + p->p_md.md_syscall = netbsd32_syscall_plain; +} + +void +netbsd32_syscall_plain(frame) + struct trapframe frame; +{ + register caddr_t params; + register const struct sysent *callp; + register struct proc *p; + int error; + size_t argsize; + register32_t code, args[8]; + register_t rval[2]; + + uvmexp.syscalls++; + p = curproc; + + code = frame.tf_rax; + callp = p->p_emul->e_sysent; + params = (caddr_t)frame.tf_rsp + sizeof(int); + + switch (code) { + case SYS_syscall: + /* + * Code is first argument, followed by actual args. + */ + code = fuword(params); + params += sizeof(int); + break; + case SYS___syscall: + /* + * Like syscall, but code is a quad, so as to maintain + * quad alignment for the rest of the arguments. + */ + code = fuword(params + _QUAD_LOWWORD * sizeof(int)); + params += sizeof(quad_t); + break; + default: + break; + } + + code &= (SYS_NSYSENT - 1); + callp += code; + argsize = callp->sy_argsize; + if (argsize) { + error = copyin(params, (caddr_t)args, argsize); + if (error) + goto bad; + } + +#ifdef SYSCALL_DEBUG + scdebug_call(p, code, args); +#endif /* SYSCALL_DEBUG */ + + rval[0] = 0; + rval[1] = 0; +#if 0 + printf("netbsd32: syscall %d (%x %x %x %x %x %x, %x)\n", code, + args[0], args[1], args[2], args[3], args[4], args[5], args[6]); +#endif + error = (*callp->sy_call)(p, args, rval); + switch (error) { + case 0: + frame.tf_rax = rval[0]; + frame.tf_rdx = rval[1]; + frame.tf_eflags &= ~PSL_C; /* carry bit */ + break; + case ERESTART: + /* + * The offset to adjust the PC by depends on whether we entered + * the kernel through the trap or call gate. We pushed the + * size of the instruction into tf_err on entry. + */ + frame.tf_rip -= frame.tf_err; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + bad: + frame.tf_rax = error; + frame.tf_eflags |= PSL_C; /* carry bit */ + break; + } + +#ifdef SYSCALL_DEBUG + scdebug_ret(p, code, error, rval); +#endif /* SYSCALL_DEBUG */ + userret(p); +} + +void +netbsd32_syscall_fancy(frame) + struct trapframe frame; +{ + register caddr_t params; + register const struct sysent *callp; + register struct proc *p; + int error; + size_t argsize; + register32_t code, args[8]; + register_t rval[2]; + + uvmexp.syscalls++; + p = curproc; + + code = frame.tf_rax; + callp = p->p_emul->e_sysent; + params = (caddr_t)frame.tf_rsp + sizeof(int); + + switch (code) { + case SYS_syscall: + /* + * Code is first argument, followed by actual args. + */ + code = fuword(params); + params += sizeof(int); + break; + case SYS___syscall: + /* + * Like syscall, but code is a quad, so as to maintain + * quad alignment for the rest of the arguments. + */ + code = fuword(params + _QUAD_LOWWORD * sizeof(int)); + params += sizeof(quad_t); + break; + default: + break; + } + + code &= (SYS_NSYSENT - 1); + callp += code; + argsize = callp->sy_argsize; + if (argsize) { + error = copyin(params, (caddr_t)args, argsize); + if (error) + goto bad; + } + +#ifdef SYSCALL_DEBUG + scdebug_call(p, code, args); +#endif /* SYSCALL_DEBUG */ +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSCALL)) + ktrsyscall(p, code, argsize, args); +#endif /* KTRACE */ + + rval[0] = 0; + rval[1] = 0; + error = (*callp->sy_call)(p, args, rval); + switch (error) { + case 0: + frame.tf_rax = rval[0]; + frame.tf_rdx = rval[1]; + frame.tf_eflags &= ~PSL_C; /* carry bit */ + break; + case ERESTART: + /* + * The offset to adjust the PC by depends on whether we entered + * the kernel through the trap or call gate. We pushed the + * size of the instruction into tf_err on entry. + */ + frame.tf_rip -= frame.tf_err; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + bad: + frame.tf_rax = error; + frame.tf_eflags |= PSL_C; /* carry bit */ + break; + } + +#ifdef SYSCALL_DEBUG + scdebug_ret(p, code, error, rval); +#endif /* SYSCALL_DEBUG */ + userret(p); +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSRET)) + ktrsysret(p, code, error, rval[0]); +#endif /* KTRACE */ +} diff --git a/sys/arch/x86_64/x86_64/pmap.c b/sys/arch/x86_64/x86_64/pmap.c new file mode 100644 index 000000000000..52266c23d4d3 --- /dev/null +++ b/sys/arch/x86_64/x86_64/pmap.c @@ -0,0 +1,3460 @@ +/* $NetBSD: pmap.c,v 1.1 2001/06/19 00:21:17 fvdl Exp $ */ + +/* + * + * Copyright (c) 1997 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright 2001 (c) Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Frank van der Linden for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This is the i386 pmap modified and generalized to support x86-64 + * as well. The idea is to hide the upper N levels of the page tables + * inside pmap_get_ptp, pmap_free_ptp and pmap_growkernel. The rest + * is mostly untouched, except that it uses some more generalized + * macros and interfaces. + * + * This pmap has been tested on the i386 as well, and it can be easily + * adapted to PAE. + * + * fvdl@wasabisystems.com 18-Jun-2001 + */ + +/* + * pmap.c: i386 pmap module rewrite + * Chuck Cranor + * 11-Aug-97 + * + * history of this pmap module: in addition to my own input, i used + * the following references for this rewrite of the i386 pmap: + * + * [1] the NetBSD i386 pmap. this pmap appears to be based on the + * BSD hp300 pmap done by Mike Hibler at University of Utah. + * it was then ported to the i386 by William Jolitz of UUNET + * Technologies, Inc. Then Charles M. Hannum of the NetBSD + * project fixed some bugs and provided some speed ups. + * + * [2] the FreeBSD i386 pmap. this pmap seems to be the + * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson + * and David Greenman. + * + * [3] the Mach pmap. this pmap, from CMU, seems to have migrated + * between several processors. the VAX version was done by + * Avadis Tevanian, Jr., and Michael Wayne Young. the i386 + * version was done by Lance Berc, Mike Kupfer, Bob Baron, + * David Golub, and Richard Draves. the alpha version was + * done by Alessandro Forin (CMU/Mach) and Chris Demetriou + * (NetBSD/alpha). + */ + +#ifndef __x86_64__ +#include "opt_cputype.h" +#endif +#include "opt_user_ldt.h" +#include "opt_largepages.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include + +/* + * general info: + * + * - for an explanation of how the i386 MMU hardware works see + * the comments in . + * + * - for an explanation of the general memory structure used by + * this pmap (including the recursive mapping), see the comments + * in . + * + * this file contains the code for the "pmap module." the module's + * job is to manage the hardware's virtual to physical address mappings. + * note that there are two levels of mapping in the VM system: + * + * [1] the upper layer of the VM system uses vm_map's and vm_map_entry's + * to map ranges of virtual address space to objects/files. for + * example, the vm_map may say: "map VA 0x1000 to 0x22000 read-only + * to the file /bin/ls starting at offset zero." note that + * the upper layer mapping is not concerned with how individual + * vm_pages are mapped. + * + * [2] the lower layer of the VM system (the pmap) maintains the mappings + * from virtual addresses. it is concerned with which vm_page is + * mapped where. for example, when you run /bin/ls and start + * at page 0x1000 the fault routine may lookup the correct page + * of the /bin/ls file and then ask the pmap layer to establish + * a mapping for it. + * + * note that information in the lower layer of the VM system can be + * thrown away since it can easily be reconstructed from the info + * in the upper layer. + * + * data structures we use include: + * + * - struct pmap: describes the address space of one thread + * - struct pv_entry: describes one mapping of a PA + * - struct pv_head: there is one pv_head per managed page of + * physical memory. the pv_head points to a list of pv_entry + * structures which describe all the pairs that this + * page is mapped in. this is critical for page based operations + * such as pmap_page_protect() [change protection on _all_ mappings + * of a page] + * - pv_page/pv_page_info: pv_entry's are allocated out of pv_page's. + * if we run out of pv_entry's we allocate a new pv_page and free + * its pv_entrys. + * - pmap_remove_record: a list of virtual addresses whose mappings + * have been changed. used for TLB flushing. + */ + +/* + * memory allocation + * + * - there are three data structures that we must dynamically allocate: + * + * [A] new process' page directory page (PDP) + * - plan 1: done at pmap_create() we use + * uvm_km_alloc(kernel_map, PAGE_SIZE) [fka kmem_alloc] to do this + * allocation. + * + * if we are low in free physical memory then we sleep in + * uvm_km_alloc -- in this case this is ok since we are creating + * a new pmap and should not be holding any locks. + * + * if the kernel is totally out of virtual space + * (i.e. uvm_km_alloc returns NULL), then we panic. + * + * XXX: the fork code currently has no way to return an "out of + * memory, try again" error code since uvm_fork [fka vm_fork] + * is a void function. + * + * [B] new page tables pages (PTP) + * call uvm_pagealloc() + * => success: zero page, add to pm_pdir + * => failure: we are out of free vm_pages, let pmap_enter() + * tell UVM about it. + * + * note: for kernel PTPs, we start with NKPTP of them. as we map + * kernel memory (at uvm_map time) we check to see if we've grown + * the kernel pmap. if so, we call the optional function + * pmap_growkernel() to grow the kernel PTPs in advance. + * + * [C] pv_entry structures + * - plan 1: try to allocate one off the free list + * => success: done! + * => failure: no more free pv_entrys on the list + * - plan 2: try to allocate a new pv_page to add a chunk of + * pv_entrys to the free list + * [a] obtain a free, unmapped, VA in kmem_map. either + * we have one saved from a previous call, or we allocate + * one now using a "vm_map_lock_try" in uvm_map + * => success: we have an unmapped VA, continue to [b] + * => failure: unable to lock kmem_map or out of VA in it. + * move on to plan 3. + * [b] allocate a page in kmem_object for the VA + * => success: map it in, free the pv_entry's, DONE! + * => failure: kmem_object locked, no free vm_pages, etc. + * save VA for later call to [a], go to plan 3. + * If we fail, we simply let pmap_enter() tell UVM about it. + */ + +/* + * locking + * + * we have the following locks that we must contend with: + * + * "normal" locks: + * + * - pmap_main_lock + * this lock is used to prevent deadlock and/or provide mutex + * access to the pmap system. most operations lock the pmap + * structure first, then they lock the pv_lists (if needed). + * however, some operations such as pmap_page_protect lock + * the pv_lists and then lock pmaps. in order to prevent a + * cycle, we require a mutex lock when locking the pv_lists + * first. thus, the "pmap = >pv_list" lockers must gain a + * read-lock on pmap_main_lock before locking the pmap. and + * the "pv_list => pmap" lockers must gain a write-lock on + * pmap_main_lock before locking. since only one thread + * can write-lock a lock at a time, this provides mutex. + * + * "simple" locks: + * + * - pmap lock (per pmap, part of uvm_object) + * this lock protects the fields in the pmap structure including + * the non-kernel PDEs in the PDP, and the PTEs. it also locks + * in the alternate PTE space (since that is determined by the + * entry in the PDP). + * + * - pvh_lock (per pv_head) + * this lock protects the pv_entry list which is chained off the + * pv_head structure for a specific managed PA. it is locked + * when traversing the list (e.g. adding/removing mappings, + * syncing R/M bits, etc.) + * + * - pvalloc_lock + * this lock protects the data structures which are used to manage + * the free list of pv_entry structures. + * + * - pmaps_lock + * this lock protects the list of active pmaps (headed by "pmaps"). + * we lock it when adding or removing pmaps from this list. + * + * - pmap_copy_page_lock + * locks the tmp kernel PTE mappings we used to copy data + * + * - pmap_zero_page_lock + * locks the tmp kernel PTE mapping we use to zero a page + * + * - pmap_tmpptp_lock + * locks the tmp kernel PTE mapping we use to look at a PTP + * in another process + * + * XXX: would be nice to have per-CPU VAs for the above 4 + */ + +/* + * locking data structures + */ + +vaddr_t ptp_masks[] = PTP_MASK_INITIALIZER; +int ptp_shifts[] = PTP_SHIFT_INITIALIZER; +unsigned long nkptp[] = NKPTP_INITIALIZER; +unsigned long nkptpmax[] = NKPTPMAX_INITIALIZER; +unsigned long nbpd[] = NBPD_INITIALIZER; +pd_entry_t *normal_pdes[] = PDES_INITIALIZER; +pd_entry_t *alternate_pdes[] = APDES_INITIALIZER; + +/* int nkpde = NKPTP; */ + +static struct lock pmap_main_lock; +static struct simplelock pvalloc_lock; +static struct simplelock pmaps_lock; +static struct simplelock pmap_copy_page_lock; +static struct simplelock pmap_zero_page_lock; +static struct simplelock pmap_tmpptp_lock; + +#define PMAP_MAP_TO_HEAD_LOCK() \ + (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) +#define PMAP_MAP_TO_HEAD_UNLOCK() \ + (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) + +#define PMAP_HEAD_TO_MAP_LOCK() \ + (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) +#define PMAP_HEAD_TO_MAP_UNLOCK() \ + (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) + +/* + * global data structures + */ + +struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */ + +/* + * nkpde is the number of kernel PTPs allocated for the kernel at + * boot time (NKPTP is a compile time override). this number can + * grow dynamically as needed (but once allocated, we never free + * kernel PTPs). + */ + +/* + * pmap_pg_g: if our processor supports PG_G in the PTE then we + * set pmap_pg_g to PG_G (otherwise it is zero). + */ + +int pmap_pg_g = 0; + +#ifdef LARGEPAGES +/* + * pmap_largepages: if our processor supports PG_PS and we are + * using it, this is set to TRUE. + */ + +int pmap_largepages; +#endif + +/* + * i386 physical memory comes in a big contig chunk with a small + * hole toward the front of it... the following 4 paddr_t's + * (shared with machdep.c) describe the physical address space + * of this machine. + */ +paddr_t avail_start; /* PA of first available physical page */ +paddr_t avail_end; /* PA of last available physical page */ + +/* + * other data structures + */ + +static pt_entry_t protection_codes[8]; /* maps MI prot to i386 prot code */ +static boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */ + +/* + * the following two vaddr_t's are used during system startup + * to keep track of how much of the kernel's VM space we have used. + * once the system is started, the management of the remaining kernel + * VM space is turned over to the kernel_map vm_map. + */ + +static vaddr_t virtual_avail; /* VA of first free KVA */ +static vaddr_t virtual_end; /* VA of last free KVA */ + + +/* + * pv_page management structures: locked by pvalloc_lock + */ + +TAILQ_HEAD(pv_pagelist, pv_page); +static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */ +static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */ +static int pv_nfpvents; /* # of free pv entries */ +static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */ +static vaddr_t pv_cachedva; /* cached VA for later use */ + +#define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */ +#define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2)) + /* high water mark */ + +/* + * linked list of all non-kernel pmaps + */ + +static struct pmap_head pmaps; + +/* + * pool that pmap structures are allocated from + */ + +struct pool pmap_pmap_pool; + +/* + * pool and cache that PDPs are allocated from + */ + +struct pool pmap_pdp_pool; +struct pool_cache pmap_pdp_cache; + +int pmap_pdp_ctor(void *, void *, int); + +/* + * special VAs and the PTEs that map them + */ + +static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte; +static caddr_t csrcp, cdstp, zerop, ptpp; +caddr_t vmmap; /* XXX: used by mem.c... it should really uvm_map_reserve it */ + +extern vaddr_t msgbuf_vaddr; +extern paddr_t msgbuf_paddr; + +extern vaddr_t idt_vaddr; /* we allocate IDT early */ +extern paddr_t idt_paddr; + +#if defined(I586_CPU) +/* stuff to fix the pentium f00f bug */ +extern vaddr_t pentium_idt_vaddr; +#endif + + +/* + * local prototypes + */ + +static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t)); +static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */ +#define ALLOCPV_NEED 0 /* need PV now */ +#define ALLOCPV_TRY 1 /* just try to allocate, don't steal */ +#define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */ +static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int)); +static void pmap_enter_pv __P((struct pv_head *, + struct pv_entry *, struct pmap *, + vaddr_t, struct vm_page *)); +static void pmap_free_pv __P((struct pmap *, struct pv_entry *)); +static void pmap_free_pvs __P((struct pmap *, struct pv_entry *)); +static void pmap_free_pv_doit __P((struct pv_entry *)); +static void pmap_free_pvpage __P((void)); +static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t, + pd_entry_t **)); +static struct vm_page *pmap_find_ptp __P((struct pmap *, vaddr_t, paddr_t, + int)); +static void pmap_freepage __P((struct pmap *, struct vm_page *, + int)); +static void pmap_free_ptp __P((struct pmap *, struct vm_page *, + vaddr_t, pt_entry_t *, + pd_entry_t **)); +static boolean_t pmap_is_curpmap __P((struct pmap *)); +static void pmap_map_ptes __P((struct pmap *, pt_entry_t **, + pd_entry_t ***)); +static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *, + vaddr_t)); +static void pmap_do_remove __P((struct pmap *, vaddr_t, + vaddr_t, int)); +static boolean_t pmap_remove_pte __P((struct pmap *, struct vm_page *, + pt_entry_t *, vaddr_t, int)); +static void pmap_remove_ptes __P((struct pmap *, + struct pmap_remove_record *, + struct vm_page *, vaddr_t, + vaddr_t, vaddr_t, int)); +#define PMAP_REMOVE_ALL 0 /* remove all mappings */ +#define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */ +static vaddr_t pmap_tmpmap_pa __P((paddr_t)); +static pt_entry_t *pmap_tmpmap_pvepte __P((struct pv_entry *)); +static void pmap_tmpunmap_pa __P((void)); +static void pmap_tmpunmap_pvepte __P((struct pv_entry *)); +static void pmap_unmap_ptes __P((struct pmap *)); +static boolean_t pmap_get_physpage __P((vaddr_t, int, paddr_t *)); +static boolean_t pmap_pdes_valid __P((vaddr_t, pd_entry_t **, + pd_entry_t *)); +static void pmap_alloc_level __P((pd_entry_t **, vaddr_t, int, + unsigned long *)); + +#ifdef DIAGNOSTIC +static void pmap_dump_obj(struct pmap *, int); +#endif +#if 0 +static void pmap_check_ptp(struct vm_page *, int, vaddr_t); +#endif + + +/* + * p m a p i n l i n e h e l p e r f u n c t i o n s + */ + +/* + * pmap_is_curpmap: is this pmap the one currently loaded [in %cr3]? + * of course the kernel is always loaded + */ + +__inline static boolean_t +pmap_is_curpmap(pmap) + struct pmap *pmap; +{ + return((pmap == pmap_kernel()) || + (pmap->pm_pdirpa == (paddr_t) rcr3())); +} + +/* + * pmap_tmpmap_pa: map a page in for tmp usage + * + * => returns with pmap_tmpptp_lock held + */ + +__inline static vaddr_t +pmap_tmpmap_pa(pa) + paddr_t pa; +{ + simple_lock(&pmap_tmpptp_lock); +#if defined(DIAGNOSTIC) + if (*ptp_pte) + panic("pmap_tmpmap_pa: ptp_pte in use?"); +#endif + *ptp_pte = PG_V | PG_RW | pa; /* always a new mapping */ + return((vaddr_t)ptpp); +} + +/* + * pmap_tmpunmap_pa: unmap a tmp use page (undoes pmap_tmpmap_pa) + * + * => we release pmap_tmpptp_lock + */ + +__inline static void +pmap_tmpunmap_pa() +{ +#if defined(DIAGNOSTIC) + if (!pmap_valid_entry(*ptp_pte)) + panic("pmap_tmpunmap_pa: our pte invalid?"); +#endif + *ptp_pte = 0; /* zap! */ + pmap_update_pg((vaddr_t)ptpp); + simple_unlock(&pmap_tmpptp_lock); +} + +#if 0 +static void +pmap_check_ptp(struct vm_page *ptp, int level, vaddr_t va) +{ + int i, count; + u_int64_t *p; + + p = (u_int64_t *)pmap_tmpmap_pa(VM_PAGE_TO_PHYS(ptp)); + for (count = i = 0; i < 512; i++) + if (p[i] != 0) + count++; + pmap_tmpunmap_pa(); + if (count != ptp->wire_count - 1) { + printf("va %lx level %d: ptp bad: %d should be %d\n", + va, level, ptp->wire_count - 1, count); + __asm__("xchgw %bx,%bx\n"); + } +} +#endif + +/* + * pmap_tmpmap_pvepte: get a quick mapping of a PTE for a pv_entry + * + * => do NOT use this on kernel mappings [why? because pv_ptp may be NULL] + * => we may grab pmap_tmpptp_lock and return with it held + */ + +__inline static pt_entry_t * +pmap_tmpmap_pvepte(pve) + struct pv_entry *pve; +{ +#ifdef DIAGNOSTIC + if (pve->pv_pmap == pmap_kernel()) + panic("pmap_tmpmap_pvepte: attempt to map kernel"); +#endif + + /* is it current pmap? use direct mapping... */ + if (pmap_is_curpmap(pve->pv_pmap)) + return(vtopte(pve->pv_va)); + + return(((pt_entry_t *)pmap_tmpmap_pa(VM_PAGE_TO_PHYS(pve->pv_ptp))) + + ptei((unsigned long)pve->pv_va)); +} + +/* + * pmap_tmpunmap_pvepte: release a mapping obtained with pmap_tmpmap_pvepte + * + * => we will release pmap_tmpptp_lock if we hold it + */ + +__inline static void +pmap_tmpunmap_pvepte(pve) + struct pv_entry *pve; +{ + /* was it current pmap? if so, return */ + if (pmap_is_curpmap(pve->pv_pmap)) + return; + + pmap_tmpunmap_pa(); +} + +/* + * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in + * + * => we lock enough pmaps to keep things locked in + * => must be undone with pmap_unmap_ptes before returning + */ + +__inline static void +pmap_map_ptes(pmap, ptepp, pdeppp) + struct pmap *pmap; + pt_entry_t **ptepp; + pd_entry_t ***pdeppp; +{ + pd_entry_t opde, npde; + int off = 0, mcase = 0; +#if 0 + int i; +#endif + + /* the kernel's pmap is always accessible */ + if (pmap == pmap_kernel()) { + *ptepp = PTE_BASE; + *pdeppp = normal_pdes; + off = PDIR_SLOT_KERN; +#if 0 + normal_pdes[PTP_LEVELS - 2] = pmap->pm_pdir; +#endif + mcase = 1; + return; + } + + /* if curpmap then we are always mapped */ + if (pmap_is_curpmap(pmap)) { + simple_lock(&pmap->pm_lock); + *ptepp = PTE_BASE; + *pdeppp = normal_pdes; +#if 0 + normal_pdes[PTP_LEVELS - 2] = pmap->pm_pdir; +#endif + mcase = 2; + return; + } + + /* need to lock both curpmap and pmap: use ordered locking */ + if ((unsigned long) pmap < (unsigned long) curpcb->pcb_pmap) { + simple_lock(&pmap->pm_lock); + simple_lock(&curpcb->pcb_pmap->pm_lock); + } else { + simple_lock(&curpcb->pcb_pmap->pm_lock); + simple_lock(&pmap->pm_lock); + } + + mcase = 3; + + /* need to load a new alternate pt space into curpmap? */ + opde = *APDP_PDE; + if (!pmap_valid_entry(opde) || (opde & PG_FRAME) != pmap->pm_pdirpa) { + npde = (pd_entry_t) (pmap->pm_pdirpa | PG_RW | PG_V); +#if 0 + pmap->pm_pdir[PDIR_SLOT_APTE] = npde; +#endif + *APDP_PDE = npde; + if (pmap_valid_entry(opde)) + tlbflush(); + } + *ptepp = APTE_BASE; + *pdeppp = alternate_pdes; +#if 0 + alternate_pdes[PTP_LEVELS - 2] = pmap->pm_pdir; + for (i = off; i < NTOPLEVEL_PDES - off; i++) { + if ((*pdeppp)[0][i] != pmap->pm_pdir[i]) { + printf("%x vs. %x at off %d\n", + (unsigned)(*pdeppp)[0][i], pmap->pm_pdir[i], i); + printf("pdirpa %lx PDP_BASE %p APDP_BASE %p " + "pdeppp[0] %p\n", + (unsigned long)pmap->pm_pdirpa, PDP_BASE, APDP_BASE, + (*pdeppp)[0]); + panic("pdir mismatch case %d off", mcase); + } + } +#endif +} + +/* + * pmap_unmap_ptes: unlock the PTE mapping of "pmap" + */ + +__inline static void +pmap_unmap_ptes(pmap) + struct pmap *pmap; +{ + if (pmap == pmap_kernel()) { + return; + } + if (pmap_is_curpmap(pmap)) { + simple_unlock(&pmap->pm_lock); + } else { + simple_unlock(&pmap->pm_lock); + simple_unlock(&curpcb->pcb_pmap->pm_lock); + } +} + +/* + * p m a p k e n t e r f u n c t i o n s + * + * functions to quickly enter/remove pages from the kernel address + * space. pmap_kremove is exported to MI kernel. we make use of + * the recursive PTE mappings. + */ + +/* + * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking + * + * => no need to lock anything, assume va is already allocated + * => should be faster than normal pmap enter function + */ + +void +pmap_kenter_pa(va, pa, prot) + vaddr_t va; + paddr_t pa; + vm_prot_t prot; +{ + pt_entry_t *pte, opte; + + if (va < VM_MIN_KERNEL_ADDRESS) + pte = vtopte(va); + else + pte = kvtopte(va); + opte = *pte; +#ifdef LARGEPAGES + /* XXX For now... */ + if (opte & PG_PS) + panic("pmap_kenter_pa: PG_PS"); +#endif + *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | + PG_V | pmap_pg_g; /* zap! */ + if (pmap_valid_entry(opte)) + pmap_update_pg(va); +} + +/* + * pmap_kremove: remove a kernel mapping(s) without R/M (pv_entry) tracking + * + * => no need to lock anything + * => caller must dispose of any vm_page mapped in the va range + * => note: not an inline function + * => we assume the va is page aligned and the len is a multiple of PAGE_SIZE + * => we assume kernel only unmaps valid addresses and thus don't bother + * checking the valid bit before doing TLB flushing + */ + +void +pmap_kremove(va, len) + vaddr_t va; + vsize_t len; +{ + pt_entry_t *pte; + + len >>= PAGE_SHIFT; + for ( /* null */ ; len ; len--, va += PAGE_SIZE) { + if (va < VM_MIN_KERNEL_ADDRESS) + pte = vtopte(va); + else + pte = kvtopte(va); +#ifdef LARGEPAGES + /* XXX For now... */ + if (*pte & PG_PS) + panic("pmap_kremove: PG_PS"); +#endif +#ifdef DIAGNOSTIC + if (*pte & PG_PVLIST) + panic("pmap_kremove: PG_PVLIST mapping for 0x%lx\n", + va); +#endif + *pte = 0; /* zap! */ +#if defined(I386_CPU) + if (cpu_class != CPUCLASS_386) +#endif + pmap_update_pg(va); + } +#if defined(I386_CPU) + if (cpu_class == CPUCLASS_386) + tlbflush(); +#endif +} + +/* + * p m a p i n i t f u n c t i o n s + * + * pmap_bootstrap and pmap_init are called during system startup + * to init the pmap module. pmap_bootstrap() does a low level + * init just to get things rolling. pmap_init() finishes the job. + */ + +/* + * pmap_bootstrap: get the system in a state where it can run with VM + * properly enabled (called before main()). the VM system is + * fully init'd later... + * + * => on i386, locore.s has already enabled the MMU by allocating + * a PDP for the kernel, and nkpde PTP's for the kernel. + * => kva_start is the first free virtual address in kernel space + */ + +void +pmap_bootstrap(kva_start) + vaddr_t kva_start; +{ + struct pmap *kpm; + vaddr_t kva; + pt_entry_t *pte; + int i; + unsigned long p1i; + + /* + * set up our local static global vars that keep track of the + * usage of KVM before kernel_map is set up + */ + + virtual_avail = kva_start; /* first free KVA */ + virtual_end = VM_MAX_KERNEL_ADDRESS; /* last KVA */ + + /* + * set up protection_codes: we need to be able to convert from + * a MI protection code (some combo of VM_PROT...) to something + * we can jam into a i386 PTE. + */ + + protection_codes[VM_PROT_NONE] = 0; /* --- */ + protection_codes[VM_PROT_EXECUTE] = PG_RO; /* --x */ + protection_codes[VM_PROT_READ] = PG_RO; /* -r- */ + protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO; /* -rx */ + protection_codes[VM_PROT_WRITE] = PG_RW; /* w-- */ + protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;/* w-x */ + protection_codes[VM_PROT_WRITE|VM_PROT_READ] = PG_RW; /* wr- */ + protection_codes[VM_PROT_ALL] = PG_RW; /* wrx */ + + /* + * now we init the kernel's pmap + * + * the kernel pmap's pm_obj is not used for much. however, in + * user pmaps the pm_obj contains the list of active PTPs. + * the pm_obj currently does not have a pager. it might be possible + * to add a pager that would allow a process to read-only mmap its + * own page tables (fast user level vtophys?). this may or may not + * be useful. + */ + + kpm = pmap_kernel(); + for (i = 0; i < PTP_LEVELS - 1; i++) { + simple_lock_init(&kpm->pm_obj[i].vmobjlock); + kpm->pm_obj[i].pgops = NULL; + TAILQ_INIT(&kpm->pm_obj[i].memq); + kpm->pm_obj[i].uo_npages = 0; + kpm->pm_obj[i].uo_refs = 1; + kpm->pm_ptphint[i] = NULL; + } + memset(&kpm->pm_list, 0, sizeof(kpm->pm_list)); /* pm_list not used */ + kpm->pm_pdir = (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE); + kpm->pm_pdirpa = (u_int32_t) proc0.p_addr->u_pcb.pcb_cr3; + kpm->pm_stats.wired_count = kpm->pm_stats.resident_count = + btop(kva_start - VM_MIN_KERNEL_ADDRESS); + + /* + * the above is just a rough estimate and not critical to the proper + * operation of the system. + */ + + curpcb->pcb_pmap = kpm; /* proc0's pcb */ + + /* + * enable global TLB entries if they are supported + */ + + if (cpu_feature & CPUID_PGE) { + lcr4(rcr4() | CR4_PGE); /* enable hardware (via %cr4) */ + pmap_pg_g = PG_G; /* enable software */ + + /* add PG_G attribute to already mapped kernel pages */ + for (kva = VM_MIN_KERNEL_ADDRESS ; kva < virtual_avail ; + kva += PAGE_SIZE) { + p1i = pl1_i(kva); + if (pmap_valid_entry(PTE_BASE[p1i])) + PTE_BASE[p1i] |= PG_G; + } + } + +#ifdef LARGEPAGES + /* + * enable large pages of they are supported. + */ + + if (cpu_feature & CPUID_PSE) { + paddr_t pa; + vaddr_t kva_end; + pd_entry_t *pde; + extern char _etext; + + lcr4(rcr4() | CR4_PSE); /* enable hardware (via %cr4) */ + pmap_largepages = 1; /* enable software */ + + /* + * the TLB must be flushed after enabling large pages + * on Pentium CPUs, according to section 3.6.2.2 of + * "Intel Architecture Software Developer's Manual, + * Volume 3: System Programming". + */ + tlbflush(); + + /* + * now, remap the kernel text using large pages. we + * assume that the linker has properly aligned the + * .data segment to a 4MB boundary. + */ + kva_end = roundup((vaddr_t)&_etext, NBPD); + for (pa = 0, kva = KERNBASE; kva < kva_end; + kva += NBPD, pa += NBPD) { + pde = &kpm->pm_pdir[pdei(kva)]; + *pde = pa | pmap_pg_g | PG_PS | + PG_KR | PG_V; /* zap! */ + tlbflush(); + } + } +#endif /* LARGEPAGES */ + + /* + * now we allocate the "special" VAs which are used for tmp mappings + * by the pmap (and other modules). we allocate the VAs by advancing + * virtual_avail (note that there are no pages mapped at these VAs). + * we find the PTE that maps the allocated VA via the linear PTE + * mapping. + */ + + pte = PTE_BASE + pl1_i(virtual_avail); + + csrcp = (caddr_t) virtual_avail; csrc_pte = pte; /* allocate */ + virtual_avail += PAGE_SIZE; pte++; /* advance */ + + cdstp = (caddr_t) virtual_avail; cdst_pte = pte; + virtual_avail += PAGE_SIZE; pte++; + + zerop = (caddr_t) virtual_avail; zero_pte = pte; + virtual_avail += PAGE_SIZE; pte++; + + ptpp = (caddr_t) virtual_avail; ptp_pte = pte; + virtual_avail += PAGE_SIZE; pte++; + + /* XXX: vmmap used by mem.c... should be uvm_map_reserve */ + vmmap = (char *)virtual_avail; /* don't need pte */ + virtual_avail += PAGE_SIZE; pte++; + + msgbuf_vaddr = virtual_avail; /* don't need pte */ + virtual_avail += round_page(MSGBUFSIZE); pte++; + + idt_vaddr = virtual_avail; /* don't need pte */ + virtual_avail += 2 * PAGE_SIZE; pte += 2; + idt_paddr = avail_start; /* steal a page */ + avail_start += 2 * PAGE_SIZE; + +#if defined(I586_CPU) + /* pentium f00f bug stuff */ + pentium_idt_vaddr = virtual_avail; /* don't need pte */ + virtual_avail += PAGE_SIZE; pte++; +#endif + + /* + * now we reserve some VM for mapping pages when doing a crash dump + */ + + virtual_avail = reserve_dumppages(virtual_avail); + + /* + * init the static-global locks and global lists. + */ + + spinlockinit(&pmap_main_lock, "pmaplk", 0); + simple_lock_init(&pvalloc_lock); + simple_lock_init(&pmaps_lock); + simple_lock_init(&pmap_copy_page_lock); + simple_lock_init(&pmap_zero_page_lock); + simple_lock_init(&pmap_tmpptp_lock); + LIST_INIT(&pmaps); + TAILQ_INIT(&pv_freepages); + TAILQ_INIT(&pv_unusedpgs); + + /* + * initialize the pmap pool. + */ + + pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", + 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP); + + /* + * initialize the PDE pool and cache. + */ + + pool_init(&pmap_pdp_pool, PAGE_SIZE, 0, 0, 0, "pdppl", + 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP); + pool_cache_init(&pmap_pdp_cache, &pmap_pdp_pool, + pmap_pdp_ctor, NULL, NULL); + + /* + * ensure the TLB is sync'd with reality by flushing it... + */ + + tlbflush(); +} + +/* + * pmap_init: called from uvm_init, our job is to get the pmap + * system ready to manage mappings... this mainly means initing + * the pv_entry stuff. + */ + +void +pmap_init() +{ + int npages, lcv, i; + vaddr_t addr; + vsize_t s; + + /* + * compute the number of pages we have and then allocate RAM + * for each pages' pv_head and saved attributes. + */ + + npages = 0; + for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) + npages += (vm_physmem[lcv].end - vm_physmem[lcv].start); + s = (vsize_t) (sizeof(struct pv_head) * npages + + sizeof(char) * npages); + s = round_page(s); /* round up */ + addr = (vaddr_t) uvm_km_zalloc(kernel_map, s); + if (addr == 0) + panic("pmap_init: unable to allocate pv_heads"); + + /* + * init all pv_head's and attrs in one memset + */ + + /* allocate pv_head stuff first */ + for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { + vm_physmem[lcv].pmseg.pvhead = (struct pv_head *) addr; + addr = (vaddr_t)(vm_physmem[lcv].pmseg.pvhead + + (vm_physmem[lcv].end - vm_physmem[lcv].start)); + for (i = 0; + i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) { + simple_lock_init( + &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock); + } + } + + /* now allocate attrs */ + for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { + vm_physmem[lcv].pmseg.attrs = (char *) addr; + addr = (vaddr_t)(vm_physmem[lcv].pmseg.attrs + + (vm_physmem[lcv].end - vm_physmem[lcv].start)); + } + + /* + * now we need to free enough pv_entry structures to allow us to get + * the kmem_map/kmem_object allocated and inited (done after this + * function is finished). to do this we allocate one bootstrap page out + * of kernel_map and use it to provide an initial pool of pv_entry + * structures. we never free this page. + */ + + pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE); + if (pv_initpage == NULL) + panic("pmap_init: pv_initpage"); + pv_cachedva = 0; /* a VA we have allocated but not used yet */ + pv_nfpvents = 0; + (void) pmap_add_pvpage(pv_initpage, FALSE); + + /* + * done: pmap module is up (and ready for business) + */ + + pmap_initialized = TRUE; +} + +/* + * p v _ e n t r y f u n c t i o n s + */ + +/* + * pv_entry allocation functions: + * the main pv_entry allocation functions are: + * pmap_alloc_pv: allocate a pv_entry structure + * pmap_free_pv: free one pv_entry + * pmap_free_pvs: free a list of pv_entrys + * + * the rest are helper functions + */ + +/* + * pmap_alloc_pv: inline function to allocate a pv_entry structure + * => we lock pvalloc_lock + * => if we fail, we call out to pmap_alloc_pvpage + * => 3 modes: + * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it + * ALLOCPV_TRY = we want a pv_entry, but not enough to steal + * ALLOCPV_NONEED = we are trying to grow our free list, don't really need + * one now + * + * "try" is for optional functions like pmap_copy(). + */ + +__inline static struct pv_entry * +pmap_alloc_pv(pmap, mode) + struct pmap *pmap; + int mode; +{ + struct pv_page *pvpage; + struct pv_entry *pv; + + simple_lock(&pvalloc_lock); + + if (pv_freepages.tqh_first != NULL) { + pvpage = pv_freepages.tqh_first; + pvpage->pvinfo.pvpi_nfree--; + if (pvpage->pvinfo.pvpi_nfree == 0) { + /* nothing left in this one? */ + TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list); + } + pv = pvpage->pvinfo.pvpi_pvfree; +#ifdef DIAGNOSTIC + if (pv == NULL) + panic("pmap_alloc_pv: pvpi_nfree off"); +#endif + pvpage->pvinfo.pvpi_pvfree = pv->pv_next; + pv_nfpvents--; /* took one from pool */ + } else { + pv = NULL; /* need more of them */ + } + + /* + * if below low water mark or we didn't get a pv_entry we try and + * create more pv_entrys ... + */ + + if (pv_nfpvents < PVE_LOWAT || pv == NULL) { + if (pv == NULL) + pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ? + mode : ALLOCPV_NEED); + else + (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED); + } + + simple_unlock(&pvalloc_lock); + return(pv); +} + +/* + * pmap_alloc_pvpage: maybe allocate a new pvpage + * + * if need_entry is false: try and allocate a new pv_page + * if need_entry is true: try and allocate a new pv_page and return a + * new pv_entry from it. if we are unable to allocate a pv_page + * we make a last ditch effort to steal a pv_page from some other + * mapping. if that fails, we panic... + * + * => we assume that the caller holds pvalloc_lock + */ + +static struct pv_entry * +pmap_alloc_pvpage(pmap, mode) + struct pmap *pmap; + int mode; +{ + struct vm_page *pg; + struct pv_page *pvpage; + struct pv_entry *pv; + int s; + + /* + * if we need_entry and we've got unused pv_pages, allocate from there + */ + + if (mode != ALLOCPV_NONEED && pv_unusedpgs.tqh_first != NULL) { + + /* move it to pv_freepages list */ + pvpage = pv_unusedpgs.tqh_first; + TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list); + TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list); + + /* allocate a pv_entry */ + pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */ + pv = pvpage->pvinfo.pvpi_pvfree; +#ifdef DIAGNOSTIC + if (pv == NULL) + panic("pmap_alloc_pvpage: pvpi_nfree off"); +#endif + pvpage->pvinfo.pvpi_pvfree = pv->pv_next; + + pv_nfpvents--; /* took one from pool */ + return(pv); + } + + /* + * see if we've got a cached unmapped VA that we can map a page in. + * if not, try to allocate one. + */ + + s = splvm(); /* must protect kmem_map/kmem_object with splvm! */ + if (pv_cachedva == 0) { + pv_cachedva = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, + PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC); + if (pv_cachedva == 0) { + splx(s); + return (NULL); + } + } + + /* + * we have a VA, now let's try and allocate a page in the object + * note: we are still holding splvm to protect kmem_object + */ + + if (!simple_lock_try(&uvmexp.kmem_object->vmobjlock)) { + splx(s); + return (NULL); + } + + pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva - + vm_map_min(kernel_map), + NULL, UVM_PGA_USERESERVE); + if (pg) + pg->flags &= ~PG_BUSY; /* never busy */ + + simple_unlock(&uvmexp.kmem_object->vmobjlock); + splx(s); + /* splvm now dropped */ + + if (pg == NULL) + return (NULL); + + /* + * add a mapping for our new pv_page and free its entrys (save one!) + * + * NOTE: If we are allocating a PV page for the kernel pmap, the + * pmap is already locked! (...but entering the mapping is safe...) + */ + + pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL); + pmap_update(); + pvpage = (struct pv_page *) pv_cachedva; + pv_cachedva = 0; + return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED)); +} + +/* + * pmap_add_pvpage: add a pv_page's pv_entrys to the free list + * + * => caller must hold pvalloc_lock + * => if need_entry is true, we allocate and return one pv_entry + */ + +static struct pv_entry * +pmap_add_pvpage(pvp, need_entry) + struct pv_page *pvp; + boolean_t need_entry; +{ + int tofree, lcv; + + /* do we need to return one? */ + tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE; + + pvp->pvinfo.pvpi_pvfree = NULL; + pvp->pvinfo.pvpi_nfree = tofree; + for (lcv = 0 ; lcv < tofree ; lcv++) { + pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree; + pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv]; + } + if (need_entry) + TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list); + else + TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list); + pv_nfpvents += tofree; + return((need_entry) ? &pvp->pvents[lcv] : NULL); +} + +/* + * pmap_free_pv_doit: actually free a pv_entry + * + * => do not call this directly! instead use either + * 1. pmap_free_pv ==> free a single pv_entry + * 2. pmap_free_pvs => free a list of pv_entrys + * => we must be holding pvalloc_lock + */ + +__inline static void +pmap_free_pv_doit(pv) + struct pv_entry *pv; +{ + struct pv_page *pvp; + + pvp = (struct pv_page *) x86_trunc_page(pv); + pv_nfpvents++; + pvp->pvinfo.pvpi_nfree++; + + /* nfree == 1 => fully allocated page just became partly allocated */ + if (pvp->pvinfo.pvpi_nfree == 1) { + TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list); + } + + /* free it */ + pv->pv_next = pvp->pvinfo.pvpi_pvfree; + pvp->pvinfo.pvpi_pvfree = pv; + + /* + * are all pv_page's pv_entry's free? move it to unused queue. + */ + + if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) { + TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list); + TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list); + } +} + +/* + * pmap_free_pv: free a single pv_entry + * + * => we gain the pvalloc_lock + */ + +__inline static void +pmap_free_pv(pmap, pv) + struct pmap *pmap; + struct pv_entry *pv; +{ + simple_lock(&pvalloc_lock); + pmap_free_pv_doit(pv); + + /* + * Can't free the PV page if the PV entries were associated with + * the kernel pmap; the pmap is already locked. + */ + if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL && + pmap != pmap_kernel()) + pmap_free_pvpage(); + + simple_unlock(&pvalloc_lock); +} + +/* + * pmap_free_pvs: free a list of pv_entrys + * + * => we gain the pvalloc_lock + */ + +__inline static void +pmap_free_pvs(pmap, pvs) + struct pmap *pmap; + struct pv_entry *pvs; +{ + struct pv_entry *nextpv; + + simple_lock(&pvalloc_lock); + + for ( /* null */ ; pvs != NULL ; pvs = nextpv) { + nextpv = pvs->pv_next; + pmap_free_pv_doit(pvs); + } + + /* + * Can't free the PV page if the PV entries were associated with + * the kernel pmap; the pmap is already locked. + */ + if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL && + pmap != pmap_kernel()) + pmap_free_pvpage(); + + simple_unlock(&pvalloc_lock); +} + + +/* + * pmap_free_pvpage: try and free an unused pv_page structure + * + * => assume caller is holding the pvalloc_lock and that + * there is a page on the pv_unusedpgs list + * => if we can't get a lock on the kmem_map we try again later + * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows + * that if we can lock the kmem_map then we are not already + * holding kmem_object's lock. + */ + +static void +pmap_free_pvpage() +{ + int s; + struct vm_map *map; + struct vm_map_entry *dead_entries; + struct pv_page *pvp; + + s = splvm(); /* protect kmem_map */ + + pvp = pv_unusedpgs.tqh_first; + + /* + * note: watch out for pv_initpage which is allocated out of + * kernel_map rather than kmem_map. + */ + if (pvp == pv_initpage) + map = kernel_map; + else + map = kmem_map; + + if (vm_map_lock_try(map)) { + + /* remove pvp from pv_unusedpgs */ + TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list); + + /* unmap the page */ + dead_entries = NULL; + uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE, + &dead_entries); + vm_map_unlock(map); + + if (dead_entries != NULL) + uvm_unmap_detach(dead_entries, 0); + + pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */ + } + + if (pvp == pv_initpage) + /* no more initpage, we've freed it */ + pv_initpage = NULL; + + splx(s); +} + +/* + * main pv_entry manipulation functions: + * pmap_enter_pv: enter a mapping onto a pv_head list + * pmap_remove_pv: remove a mappiing from a pv_head list + * + * NOTE: pmap_enter_pv expects to lock the pvh itself + * pmap_remove_pv expects te caller to lock the pvh before calling + */ + +/* + * pmap_enter_pv: enter a mapping onto a pv_head lst + * + * => caller should hold the proper lock on pmap_main_lock + * => caller should have pmap locked + * => we will gain the lock on the pv_head and allocate the new pv_entry + * => caller should adjust ptp's wire_count before calling + */ + +__inline static void +pmap_enter_pv(pvh, pve, pmap, va, ptp) + struct pv_head *pvh; + struct pv_entry *pve; /* preallocated pve for us to use */ + struct pmap *pmap; + vaddr_t va; + struct vm_page *ptp; /* PTP in pmap that maps this VA */ +{ + pve->pv_pmap = pmap; + pve->pv_va = va; + pve->pv_ptp = ptp; /* NULL for kernel pmap */ + simple_lock(&pvh->pvh_lock); /* lock pv_head */ + pve->pv_next = pvh->pvh_list; /* add to ... */ + pvh->pvh_list = pve; /* ... locked list */ + simple_unlock(&pvh->pvh_lock); /* unlock, done! */ +} + +/* + * pmap_remove_pv: try to remove a mapping from a pv_list + * + * => caller should hold proper lock on pmap_main_lock + * => pmap should be locked + * => caller should hold lock on pv_head [so that attrs can be adjusted] + * => caller should adjust ptp's wire_count and free PTP if needed + * => we return the removed pve + */ + +__inline static struct pv_entry * +pmap_remove_pv(pvh, pmap, va) + struct pv_head *pvh; + struct pmap *pmap; + vaddr_t va; +{ + struct pv_entry *pve, **prevptr; + + prevptr = &pvh->pvh_list; /* previous pv_entry pointer */ + pve = *prevptr; + while (pve) { + if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */ + *prevptr = pve->pv_next; /* remove it! */ + break; + } + prevptr = &pve->pv_next; /* previous pointer */ + pve = pve->pv_next; /* advance */ + } + return(pve); /* return removed pve */ +} + +/* + * p t p f u n c t i o n s + */ + +static __inline struct vm_page * +pmap_find_ptp(struct pmap *pmap, vaddr_t va, paddr_t pa, int level) +{ +#if 1 + int lidx = level - 1; + + if (pa != (paddr_t)-1 && pmap->pm_ptphint[lidx] && + pa == VM_PAGE_TO_PHYS(pmap->pm_ptphint[lidx])) { + return (pmap->pm_ptphint[lidx]); + } + return uvm_pagelookup(&pmap->pm_obj[lidx], ptp_va2o(va, level)); +#else + return PHYS_TO_VM_PAGE(pa); +#endif +} + +static __inline void +pmap_freepage(struct pmap *pmap, struct vm_page *ptp, int level) +{ + int lidx; + + lidx = level - 1; + + pmap->pm_stats.resident_count--; + if (pmap->pm_ptphint[lidx] == ptp) + pmap->pm_ptphint[lidx] = pmap->pm_obj[lidx].memq.tqh_first; + ptp->wire_count = 0; + uvm_pagefree(ptp); +} + +static void +pmap_free_ptp(struct pmap *pmap, struct vm_page *ptp, vaddr_t va, + pt_entry_t *ptes, pd_entry_t **pdes) +{ + unsigned long index; + int level; + vaddr_t invaladdr; + level = 1; + + do { + pmap_freepage(pmap, ptp, level); + index = pl_i(va, level + 1); + pdes[level - 1][index] = 0; +#if defined(I386_CPU) + if (cpu_class != CPUCLASS_386) +#endif + { + invaladdr = level == 1 ? (vaddr_t)ptes : + (vaddr_t)pdes[level - 2]; + pmap_update_pg(invaladdr + index * NBPG); + } + if (level < PTP_LEVELS - 1) { + ptp = pmap_find_ptp(pmap, va, (paddr_t)-1, level + 1); + ptp->wire_count--; +#if 0 + pmap_check_ptp(ptp, level, va); +#endif + if (ptp->wire_count > 1) + break; + } + } while (++level < PTP_LEVELS); +} + +/* + * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one) + * + * => pmap should NOT be pmap_kernel() + * => pmap should be locked + */ + + +static struct vm_page * +pmap_get_ptp(struct pmap *pmap, vaddr_t va, pd_entry_t **pdes) +{ + struct vm_page *ptp, *pptp; + int i; + unsigned long index; + pd_entry_t *pva; + paddr_t ppa, pa; + + ptp = NULL; + pa = (paddr_t)-1; + + /* + * Loop through all page table levels seeing if we need to + * add a new page to that level. + */ + for (i = PTP_LEVELS; i > 1; i--) { + /* + * Save values from previous round. + */ + pptp = ptp; + ppa = pa; + + index = pl_i(va, i); + pva = pdes[i - 2]; + + if (pmap_valid_entry(pva[index])) { + ppa = pva[index] & PG_FRAME; + ptp = NULL; + continue; + } + + ptp = uvm_pagealloc(&pmap->pm_obj[i-2], + ptp_va2o(va, i - 1), NULL, + UVM_PGA_USERESERVE|UVM_PGA_ZERO); + + if (ptp == NULL) + return NULL; + + ptp->flags &= ~PG_BUSY; /* never busy */ + ptp->wire_count = 1; + pmap->pm_ptphint[i - 2] = ptp; + pa = VM_PAGE_TO_PHYS(ptp); + pva[index] = (pd_entry_t) (pa | PG_u | PG_RW | PG_V); + pmap->pm_stats.resident_count++; + /* + * If we're not in the top level, increase the + * wire count of the parent page. + */ + if (i < PTP_LEVELS) { + if (pptp == NULL) + pptp = pmap_find_ptp(pmap, va, ppa, i); +#ifdef DIAGNOSTIC + if (pptp == NULL) + panic("pde page disappeared"); +#endif + pptp->wire_count++; +#if 0 + pmap_check_ptp(pptp, i, va); +#endif + } + } + + /* + * ptp is not NULL if we just allocated a new ptp. If it's + * still NULL, we must look up the existing one. + */ + if (ptp == NULL) { + ptp = pmap_find_ptp(pmap, va, ppa, 1); +#ifdef DIAGNOSTIC + if (ptp == NULL) { + pmap_dump_obj(pmap, 1); + printf("va %lx ppa %lx\n", (unsigned long)va, + (unsigned long)ppa); + panic("pmap_get_ptp: unmanaged user PTP"); + } +#if 0 + pmap_check_ptp(ptp, 1, va); +#endif +#endif + } + + pmap->pm_ptphint[0] = ptp; + return(ptp); +} + +/* + * p m a p l i f e c y c l e f u n c t i o n s + */ + +/* + * pmap_pdp_ctor: constructor for the PDP cache. + */ + +int +pmap_pdp_ctor(void *arg, void *object, int flags) +{ + pd_entry_t *pdir = object; + paddr_t pdirpa; + int npde; + + /* + * NOTE: The `pmap_lock' is held when the PDP is allocated. + * WE MUST NOT BLOCK! + */ + + /* fetch the physical address of the page directory. */ + (void) pmap_extract(pmap_kernel(), (vaddr_t) pdir, &pdirpa); + + /* zero init area */ + memset(pdir, 0, PDIR_SLOT_PTE * sizeof(pd_entry_t)); + + /* put in recursibve PDE to map the PTEs */ + pdir[PDIR_SLOT_PTE] = pdirpa | PG_V | PG_KW; + + npde = nkptp[PTP_LEVELS - 1]; + + /* put in kernel VM PDEs */ + memcpy(&pdir[PDIR_SLOT_KERN], &PDP_BASE[PDIR_SLOT_KERN], + npde * sizeof(pd_entry_t)); + + /* zero the rest */ + memset(&pdir[PDIR_SLOT_KERN + npde], 0, + (NTOPLEVEL_PDES - (PDIR_SLOT_KERN + npde)) * sizeof(pd_entry_t)); + + return (0); +} + +/* + * pmap_create: create a pmap + * + * => note: old pmap interface took a "size" args which allowed for + * the creation of "software only" pmaps (not in bsd). + */ + +struct pmap * +pmap_create() +{ + struct pmap *pmap; + int i; + + pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); + + /* init uvm_object */ + for (i = 0; i < PTP_LEVELS - 1; i++) { + simple_lock_init(&pmap->pm_obj[i].vmobjlock); + pmap->pm_obj[i].pgops = NULL; /* not a mappable object */ + TAILQ_INIT(&pmap->pm_obj[i].memq); + pmap->pm_obj[i].uo_npages = 0; + pmap->pm_obj[i].uo_refs = 1; + pmap->pm_ptphint[i] = NULL; + } + pmap->pm_stats.wired_count = 0; + pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */ + pmap->pm_flags = 0; + + /* init the LDT */ + pmap->pm_ldt = NULL; + pmap->pm_ldt_len = 0; + pmap->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); + + /* allocate PDP */ + + /* + * we need to lock pmaps_lock to prevent nkpde from changing on + * us. note that there is no need to splvm to protect us from + * malloc since malloc allocates out of a submap and we should + * have already allocated kernel PTPs to cover the range... + * + * NOTE: WE MUST NOT BLOCK WHILE HOLDING THE `pmap_lock'! + */ + + simple_lock(&pmaps_lock); + + /* XXX Need a generic "I want memory" wchan */ + while ((pmap->pm_pdir = + pool_cache_get(&pmap_pdp_cache, PR_NOWAIT)) == NULL) + (void) ltsleep(&lbolt, PVM, "pmapcr", hz >> 3, &pmaps_lock); + + pmap->pm_pdirpa = pmap->pm_pdir[PDIR_SLOT_PTE] & PG_FRAME; + + LIST_INSERT_HEAD(&pmaps, pmap, pm_list); + + simple_unlock(&pmaps_lock); + + return (pmap); +} + +/* + * pmap_destroy: drop reference count on pmap. free pmap if + * reference count goes to zero. + */ + +void +pmap_destroy(pmap) + struct pmap *pmap; +{ + struct vm_page *pg; + int refs; + int i; + + /* + * drop reference count + */ + + simple_lock(&pmap->pm_lock); + refs = --pmap->pm_obj[0].uo_refs; + simple_unlock(&pmap->pm_lock); + if (refs > 0) { + return; + } + + /* + * reference count is zero, free pmap resources and then free pmap. + */ + + /* + * remove it from global list of pmaps + */ + + simple_lock(&pmaps_lock); + LIST_REMOVE(pmap, pm_list); + simple_unlock(&pmaps_lock); + + /* + * free any remaining PTPs + */ + + for (i = 0; i < PTP_LEVELS - 1; i++) { + while (pmap->pm_obj[i].memq.tqh_first != NULL) { + pg = pmap->pm_obj[i].memq.tqh_first; +#ifdef DIAGNOSTIC + if (pg->flags & PG_BUSY) + panic("pmap_release: busy page table page"); +#endif + /* pmap_page_protect? currently no need for it. */ + + pg->wire_count = 0; + uvm_pagefree(pg); + } + } + + /* XXX: need to flush it out of other processor's APTE space? */ + pool_cache_put(&pmap_pdp_cache, pmap->pm_pdir); + +#ifdef USER_LDT + if (pmap->pm_flags & PMF_USER_LDT) { + /* + * no need to switch the LDT; this address space is gone, + * nothing is using it. + */ + ldt_free(pmap); + uvm_km_free(kernel_map, (vaddr_t)pmap->pm_ldt, + pmap->pm_ldt_len); + } +#endif + + pool_put(&pmap_pmap_pool, pmap); +} + +/* + * Add a reference to the specified pmap. + */ + +void +pmap_reference(pmap) + struct pmap *pmap; +{ + simple_lock(&pmap->pm_lock); + pmap->pm_obj[0].uo_refs++; + simple_unlock(&pmap->pm_lock); +} + +#if defined(PMAP_FORK) +/* + * pmap_fork: perform any necessary data structure manipulation when + * a VM space is forked. + */ + +void +pmap_fork(pmap1, pmap2) + struct pmap *pmap1, *pmap2; +{ + simple_lock(&pmap1->pm_lock); + simple_lock(&pmap2->pm_lock); + +#ifdef USER_LDT + /* Copy the LDT, if necessary. */ + if (pmap1->pm_flags & PMF_USER_LDT) { + char *new_ldt; + size_t len; + + len = pmap1->pm_ldt_len; + new_ldt = (char *)uvm_km_alloc(kernel_map, len); + memcpy(new_ldt, pmap1->pm_ldt, len); + pmap2->pm_ldt = new_ldt; + pmap2->pm_ldt_len = pmap1->pm_ldt_len; + pmap2->pm_flags |= PMF_USER_LDT; + ldt_alloc(pmap2, new_ldt, len); + } +#endif /* USER_LDT */ + + simple_unlock(&pmap2->pm_lock); + simple_unlock(&pmap1->pm_lock); +} +#endif /* PMAP_FORK */ + +#ifdef USER_LDT +/* + * pmap_ldt_cleanup: if the pmap has a local LDT, deallocate it, and + * restore the default. + */ + +void +pmap_ldt_cleanup(p) + struct proc *p; +{ + struct pcb *pcb = &p->p_addr->u_pcb; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + char *old_ldt = NULL; + size_t len = 0; + + simple_lock(&pmap->pm_lock); + + if (pmap->pm_flags & PMF_USER_LDT) { + ldt_free(pmap); + pmap->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); + pcb->pcb_ldt_sel = pmap->pm_ldt_sel; + if (pcb == curpcb) + lldt(pcb->pcb_ldt_sel); + old_ldt = pmap->pm_ldt; + len = pmap->pm_ldt_len; + pmap->pm_ldt = NULL; + pmap->pm_ldt_len = 0; + pmap->pm_flags &= ~PMF_USER_LDT; + } + + simple_unlock(&pmap->pm_lock); + + if (old_ldt != NULL) + uvm_km_free(kernel_map, (vaddr_t)old_ldt, len); +} +#endif /* USER_LDT */ + +/* + * pmap_activate: activate a process' pmap (fill in %cr3 and LDT info) + * + * => called from cpu_switch() + * => if proc is the curproc, then load it into the MMU + */ + +void +pmap_activate(p) + struct proc *p; +{ + struct pcb *pcb = &p->p_addr->u_pcb; + struct pmap *pmap = p->p_vmspace->vm_map.pmap; + + pcb->pcb_pmap = pmap; + pcb->pcb_ldt_sel = pmap->pm_ldt_sel; + pcb->pcb_cr3 = pmap->pm_pdirpa; + if (p == curproc) { +#if 0 + printf("pmap_activate: pid %d cr3 %lx\n", p->p_pid, + pcb->pcb_cr3); +#endif + lcr3(pcb->pcb_cr3); + } + if (pcb == curpcb) + lldt(pcb->pcb_ldt_sel); +} + +/* + * pmap_deactivate: deactivate a process' pmap + * + * => XXX: what should this do, if anything? + */ + +void +pmap_deactivate(p) + struct proc *p; +{ +} + +/* + * end of lifecycle functions + */ + +/* + * some misc. functions + */ + +static boolean_t +pmap_pdes_valid(vaddr_t va, pd_entry_t **pdes, pd_entry_t *lastpde) +{ + int i; + unsigned long index; + pd_entry_t pde; + + for (i = PTP_LEVELS; i > 1; i--) { + index = pl_i(va, i); + pde = pdes[i - 2][index]; + if ((pde & PG_V) == 0) + return FALSE; + } + if (lastpde != NULL) + *lastpde = pde; + return TRUE; +} + +/* + * pmap_extract: extract a PA for the given VA + */ + +boolean_t +pmap_extract(pmap, va, pap) + struct pmap *pmap; + vaddr_t va; + paddr_t *pap; +{ + pt_entry_t *ptes, pte; + pd_entry_t pde, **pdes; + + pmap_map_ptes(pmap, &ptes, &pdes); + if (pmap_pdes_valid(va, pdes, &pde) == FALSE) { + pmap_unmap_ptes(pmap); + return FALSE; + } + pte = ptes[pl1_i(va)]; + pmap_unmap_ptes(pmap); + +#ifdef LARGEPAGES + if (pde & PG_PS) { + if (pap != NULL) + *pap = (pde & PG_LGFRAME) | (va & ~PG_LGFRAME); + return (TRUE); + } +#endif + + + if (__predict_true((pte & PG_V) != 0)) { + if (pap != NULL) + *pap = (pte & PG_FRAME) | (va & ~PG_FRAME); + return (TRUE); + } + + return FALSE; +} + + +/* + * vtophys: virtual address to physical address. For use by + * machine-dependent code only. + */ + +paddr_t +vtophys(va) + vaddr_t va; +{ + paddr_t pa; + + if (pmap_extract(pmap_kernel(), va, &pa) == TRUE) + return (pa); + return (0); +} + + +/* + * pmap_virtual_space: used during bootup [pmap_steal_memory] to + * determine the bounds of the kernel virtual addess space. + */ + +void +pmap_virtual_space(startp, endp) + vaddr_t *startp; + vaddr_t *endp; +{ + *startp = virtual_avail; + *endp = virtual_end; +} + +/* + * pmap_map: map a range of PAs into kvm + * + * => used during crash dump + * => XXX: pmap_map() should be phased out? + */ + +vaddr_t +pmap_map(va, spa, epa, prot) + vaddr_t va; + paddr_t spa, epa; + vm_prot_t prot; +{ + while (spa < epa) { + pmap_enter(pmap_kernel(), va, spa, prot, 0); + va += PAGE_SIZE; + spa += PAGE_SIZE; + } + pmap_update(); + return va; +} + +/* + * pmap_zero_page: zero a page + */ + +void +pmap_zero_page(pa) + paddr_t pa; +{ + + simple_lock(&pmap_zero_page_lock); + *zero_pte = (pa & PG_FRAME) | PG_V | PG_RW; /* map in */ + pmap_update_pg((vaddr_t)zerop); /* flush TLB */ + memset(zerop, 0, PAGE_SIZE); /* zero */ + simple_unlock(&pmap_zero_page_lock); +} + +/* + * pmap_pageidlezero: the same, for the idle loop page zero'er. + * Returns TRUE if the page was zero'd, FALSE if we aborted for + * some reason. + */ + +boolean_t +pmap_pageidlezero(pa) + paddr_t pa; +{ + boolean_t rv = TRUE; + int i, *ptr; + + simple_lock(&pmap_zero_page_lock); + + *zero_pte = (pa & PG_FRAME) | PG_V | PG_RW; /* map in */ + pmap_update_pg((vaddr_t)zerop); /* flush TLB */ + + for (i = 0, ptr = (int *) zerop; i < PAGE_SIZE / sizeof(int); i++) { + if (sched_whichqs != 0) { + /* + * A process has become ready. Abort now, + * so we don't keep it waiting while we + * do slow memory access to finish this + * page. + */ + rv = FALSE; + break; + } + *ptr++ = 0; + } + + simple_unlock(&pmap_zero_page_lock); + + return (rv); +} + +/* + * pmap_copy_page: copy a page + */ + +void +pmap_copy_page(srcpa, dstpa) + paddr_t srcpa, dstpa; +{ + simple_lock(&pmap_copy_page_lock); +#ifdef DIAGNOSTIC + if (*csrc_pte || *cdst_pte) + panic("pmap_copy_page: lock botch"); +#endif + + *csrc_pte = (srcpa & PG_FRAME) | PG_V | PG_RW; + *cdst_pte = (dstpa & PG_FRAME) | PG_V | PG_RW; + memcpy(cdstp, csrcp, PAGE_SIZE); + *csrc_pte = *cdst_pte = 0; /* zap! */ + pmap_update_2pg((vaddr_t)csrcp, (vaddr_t)cdstp); + simple_unlock(&pmap_copy_page_lock); +} + +/* + * p m a p r e m o v e f u n c t i o n s + * + * functions that remove mappings + */ + +/* + * pmap_remove_ptes: remove PTEs from a PTP + * + * => must have proper locking on pmap_master_lock + * => caller must hold pmap's lock + * => PTP must be mapped into KVA + * => PTP should be null if pmap == pmap_kernel() + */ + +static void +pmap_remove_ptes(pmap, pmap_rr, ptp, ptpva, startva, endva, flags) + struct pmap *pmap; + struct pmap_remove_record *pmap_rr; + struct vm_page *ptp; + vaddr_t ptpva; + vaddr_t startva, endva; + int flags; +{ + struct pv_entry *pv_tofree = NULL; /* list of pv_entrys to free */ + struct pv_entry *pve; + pt_entry_t *pte = (pt_entry_t *) ptpva; + pt_entry_t opte; + int bank, off; + + /* + * note that ptpva points to the PTE that maps startva. this may + * or may not be the first PTE in the PTP. + * + * we loop through the PTP while there are still PTEs to look at + * and the wire_count is greater than 1 (because we use the wire_count + * to keep track of the number of real PTEs in the PTP). + */ + + for (/*null*/; startva < endva && (ptp == NULL || ptp->wire_count > 1) + ; pte++, startva += PAGE_SIZE) { + if (!pmap_valid_entry(*pte)) + continue; /* VA not mapped */ + if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W)) { + continue; + } + + opte = *pte; /* save the old PTE */ + *pte = 0; /* zap! */ + if (opte & PG_W) + pmap->pm_stats.wired_count--; + pmap->pm_stats.resident_count--; + + if (pmap_rr) { /* worried about tlb flushing? */ + if (opte & PG_G) { + /* PG_G requires this */ + pmap_update_pg(startva); + } else { + if (pmap_rr->prr_npages < PMAP_RR_MAX) { + pmap_rr->prr_vas[pmap_rr->prr_npages++] + = startva; + } else { + if (pmap_rr->prr_npages == PMAP_RR_MAX) + /* signal an overflow */ + pmap_rr->prr_npages++; + } + } + } + if (ptp) + ptp->wire_count--; /* dropping a PTE */ + + /* + * if we are not on a pv_head list we are done. + */ + + if ((opte & PG_PVLIST) == 0) { +#ifdef DIAGNOSTIC + if (vm_physseg_find(btop(opte & PG_FRAME), &off) + != -1) + panic("pmap_remove_ptes: managed page without " + "PG_PVLIST for 0x%lx", startva); +#endif + continue; + } + + bank = vm_physseg_find(btop(opte & PG_FRAME), &off); +#ifdef DIAGNOSTIC + if (bank == -1) + panic("pmap_remove_ptes: unmanaged page marked " + "PG_PVLIST, va = 0x%lx, pa = 0x%lx", + startva, (u_long)(opte & PG_FRAME)); +#endif + + /* sync R/M bits */ + simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); + vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); + pve = pmap_remove_pv(&vm_physmem[bank].pmseg.pvhead[off], pmap, + startva); + simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); + + if (pve) { + pve->pv_next = pv_tofree; + pv_tofree = pve; + } + + /* end of "for" loop: time for next pte */ + } + if (pv_tofree) + pmap_free_pvs(pmap, pv_tofree); +} + + +/* + * pmap_remove_pte: remove a single PTE from a PTP + * + * => must have proper locking on pmap_master_lock + * => caller must hold pmap's lock + * => PTP must be mapped into KVA + * => PTP should be null if pmap == pmap_kernel() + * => returns true if we removed a mapping + */ + +static boolean_t +pmap_remove_pte(pmap, ptp, pte, va, flags) + struct pmap *pmap; + struct vm_page *ptp; + pt_entry_t *pte; + vaddr_t va; + int flags; +{ + pt_entry_t opte; + int bank, off; + struct pv_entry *pve; + + if (!pmap_valid_entry(*pte)) + return(FALSE); /* VA not mapped */ + if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W)) { + return(FALSE); + } + + opte = *pte; /* save the old PTE */ + *pte = 0; /* zap! */ + + if (opte & PG_W) + pmap->pm_stats.wired_count--; + pmap->pm_stats.resident_count--; + + if (ptp) + ptp->wire_count--; /* dropping a PTE */ + + if (pmap_is_curpmap(pmap)) + pmap_update_pg(va); /* flush TLB */ + + /* + * if we are not on a pv_head list we are done. + */ + + if ((opte & PG_PVLIST) == 0) { +#ifdef DIAGNOSTIC + if (vm_physseg_find(btop(opte & PG_FRAME), &off) != -1) + panic("pmap_remove_pte: managed page without " + "PG_PVLIST for 0x%lx", va); +#endif + return(TRUE); + } + + bank = vm_physseg_find(btop(opte & PG_FRAME), &off); +#ifdef DIAGNOSTIC + if (bank == -1) + panic("pmap_remove_pte: unmanaged page marked " + "PG_PVLIST, va = 0x%lx, pa = 0x%lx", va, + (u_long)(opte & PG_FRAME)); +#endif + + /* sync R/M bits */ + simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); + vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); + pve = pmap_remove_pv(&vm_physmem[bank].pmseg.pvhead[off], pmap, va); + simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock); + + if (pve) + pmap_free_pv(pmap, pve); + return(TRUE); +} + +/* + * pmap_remove: top level mapping removal function + * + * => caller should not be holding any pmap locks + */ + +void +pmap_remove(pmap, sva, eva) + struct pmap *pmap; + vaddr_t sva, eva; +{ + pmap_do_remove(pmap, sva, eva, PMAP_REMOVE_ALL); +} + +/* + * pmap_do_remove: mapping removal guts + * + * => caller should not be holding any pmap locks + */ + +static void +pmap_do_remove(pmap, sva, eva, flags) + struct pmap *pmap; + vaddr_t sva, eva; + int flags; +{ + pt_entry_t *ptes; + pd_entry_t **pdes, pde; + boolean_t result; + paddr_t ptppa; + vaddr_t blkendva; + struct vm_page *ptp; + struct pmap_remove_record pmap_rr, *prr; + + /* + * we lock in the pmap => pv_head direction + */ + + PMAP_MAP_TO_HEAD_LOCK(); + pmap_map_ptes(pmap, &ptes, &pdes); /* locks pmap */ +#if 0 + if (sva < VM_MAXUSER_ADDRESS) + printf("remove va %lx - %lx at pte %p\n", sva, eva, + &ptes[pl1_i(sva)]); +#endif + + /* + * removing one page? take shortcut function. + */ + + if (sva + PAGE_SIZE == eva) { + + if (pmap_pdes_valid(sva, pdes, &pde)) { + + /* PA of the PTP */ + ptppa = pde & PG_FRAME; + + /* get PTP if non-kernel mapping */ + + if (pmap == pmap_kernel()) { + /* we never free kernel PTPs */ + ptp = NULL; + } else { + ptp = pmap_find_ptp(pmap, sva, ptppa, 1); +#ifdef DIAGNOSTIC + if (ptp == NULL) { + pmap_dump_obj(pmap, 1); + printf("va %lx ppa %lx\n", + (unsigned long)sva, + (unsigned long)ptppa); + panic("pmap_remove: unmanaged " + "PTP detected"); + } +#endif + } + + /* do it! */ + result = pmap_remove_pte(pmap, ptp, + &ptes[pl1_i(sva)], sva, flags); + + /* + * if mapping removed and the PTP is no longer + * being used, free it! + */ + + if (result && ptp && ptp->wire_count <= 1) + pmap_free_ptp(pmap, ptp, sva, ptes, pdes); + } + + pmap_unmap_ptes(pmap); /* unlock pmap */ + PMAP_MAP_TO_HEAD_UNLOCK(); + return; + } + + /* + * removing a range of pages: we unmap in PTP sized blocks + * + * if we are the currently loaded pmap, we use prr to keep track + * of the VAs we unload so that we can flush them out of the tlb. + */ + + if (pmap_is_curpmap(pmap)) { + prr = &pmap_rr; + prr->prr_npages = 0; + } else { + prr = NULL; + } + + for (/* null */ ; sva < eva ; sva = blkendva) { + + /* determine range of block */ + blkendva = round_pdr(sva+1); + if (blkendva > eva) + blkendva = eva; + + /* + * XXXCDC: our PTE mappings should never be removed + * with pmap_remove! if we allow this (and why would + * we?) then we end up freeing the pmap's page + * directory page (PDP) before we are finished using + * it when we hit in in the recursive mapping. this + * is BAD. + * + * long term solution is to move the PTEs out of user + * address space. and into kernel address space (up + * with APTE). then we can set VM_MAXUSER_ADDRESS to + * be VM_MAX_ADDRESS. + */ + + if (pl_i(sva, PTP_LEVELS) == PDIR_SLOT_PTE) + /* XXXCDC: ugly hack to avoid freeing PDP here */ + continue; + + if (!pmap_pdes_valid(sva, pdes, &pde)) + continue; + + /* PA of the PTP */ + ptppa = pde & PG_FRAME; + + /* get PTP if non-kernel mapping */ + if (pmap == pmap_kernel()) { + /* we never free kernel PTPs */ + ptp = NULL; + } else { + ptp = pmap_find_ptp(pmap, sva, ptppa, 1); +#ifdef DIAGNOSTIC + if (ptp == NULL) { + pmap_dump_obj(pmap, 1); + printf("va %lx ppa %lx\n", + (unsigned long)sva, + (unsigned long)ptppa); + panic("pmap_remove: unmanaged PTP " + "detected"); + } +#endif + } + pmap_remove_ptes(pmap, prr, ptp, + (vaddr_t)&ptes[pl1_i(sva)], sva, blkendva, flags); + + /* if PTP is no longer being used, free it! */ + if (ptp && ptp->wire_count <= 1) { + pmap_free_ptp(pmap, ptp, sva, ptes,pdes); +#if defined(I386_CPU) + /* cancel possible pending pmap update on i386 */ + if (cpu_class == CPUCLASS_386 && prr) + prr->prr_npages = 0; +#endif + } + } + + /* + * if we kept a removal record and removed some pages update the TLB + */ + + if (prr && prr->prr_npages) { +#if defined(I386_CPU) + if (cpu_class == CPUCLASS_386) { + tlbflush(); + } else +#endif + { /* not I386 */ + if (prr->prr_npages > PMAP_RR_MAX) { + tlbflush(); + } else { + while (prr->prr_npages) { + pmap_update_pg( + prr->prr_vas[--prr->prr_npages]); + } + } + } /* not I386 */ + } + pmap_unmap_ptes(pmap); + PMAP_MAP_TO_HEAD_UNLOCK(); +} + +/* + * pmap_page_remove: remove a managed vm_page from all pmaps that map it + * + * => we set pv_head => pmap locking + * => R/M bits are sync'd back to attrs + */ + +void +pmap_page_remove(pg) + struct vm_page *pg; +{ + int bank, off; + struct pv_head *pvh; + struct pv_entry *pve, *npve, **prevptr, *killlist = NULL; + pt_entry_t *ptes, opte; + pd_entry_t **pdes; +#if defined(I386_CPU) + boolean_t needs_update = FALSE; +#endif +#ifdef DIAGNOSTIC + pd_entry_t pde; +#endif + + /* XXX: vm_page should either contain pv_head or have a pointer to it */ + bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); + if (bank == -1) { + printf("pmap_page_remove: unmanaged page?\n"); + return; + } + + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + if (pvh->pvh_list == NULL) { + return; + } + + /* set pv_head => pmap locking */ + PMAP_HEAD_TO_MAP_LOCK(); + + /* XXX: needed if we hold head->map lock? */ + simple_lock(&pvh->pvh_lock); + + for (prevptr = &pvh->pvh_list, pve = pvh->pvh_list; + pve != NULL; pve = npve) { + npve = pve->pv_next; + pmap_map_ptes(pve->pv_pmap, &ptes, &pdes); /* locks pmap */ + +#ifdef DIAGNOSTIC + if (pve->pv_va >= uvm.pager_sva && pve->pv_va < uvm.pager_eva) { + printf("pmap_page_remove: found pager VA on pv_list\n"); + } + if (pve->pv_ptp && pmap_pdes_valid(pve->pv_va, pdes, &pde) && + (pde & PG_FRAME) != VM_PAGE_TO_PHYS(pve->pv_ptp)) { + printf("pmap_page_remove: pg=%p: va=%lx, pv_ptp=%p\n", + pg, pve->pv_va, pve->pv_ptp); + printf("pmap_page_remove: PTP's phys addr: " + "actual=%lx, recorded=%lx\n", + (unsigned long)(pde & PG_FRAME), + VM_PAGE_TO_PHYS(pve->pv_ptp)); + panic("pmap_page_remove: mapped managed page has " + "invalid pv_ptp field"); + } +#endif + opte = ptes[pl1_i(pve->pv_va)]; +#if 1 /* XXX Work-around for kern/12554. */ + if (opte & PG_W) { +#ifdef DEBUG + printf("pmap_page_remove: wired mapping for " + "0x%lx (wire count %d) not removed\n", + VM_PAGE_TO_PHYS(pg), pg->wire_count); +#endif + prevptr = &pve->pv_next; + pmap_unmap_ptes(pve->pv_pmap); + } +#endif /* kern/12554 */ + ptes[pl1_i(pve->pv_va)] = 0; /* zap! */ + + if (opte & PG_W) + pve->pv_pmap->pm_stats.wired_count--; + pve->pv_pmap->pm_stats.resident_count--; + + if (pmap_is_curpmap(pve->pv_pmap)) { +#if defined(I386_CPU) + if (cpu_class == CPUCLASS_386) + needs_update = TRUE; + else +#endif + pmap_update_pg(pve->pv_va); + } + + /* sync R/M bits */ + vm_physmem[bank].pmseg.attrs[off] |= (opte & (PG_U|PG_M)); + + /* update the PTP reference count. free if last reference. */ + if (pve->pv_ptp) { + pve->pv_ptp->wire_count--; + if (pve->pv_ptp->wire_count <= 1) { + pmap_free_ptp(pve->pv_pmap, pve->pv_ptp, + pve->pv_va, ptes, pdes); +#if defined(I386_CPU) + needs_update = FALSE; +#endif + } + } + pmap_unmap_ptes(pve->pv_pmap); /* unlocks pmap */ + *prevptr = npve; /* remove it */ + pve->pv_next = killlist; /* mark it for death */ + killlist = pve; + } + pmap_free_pvs(NULL, killlist); + pvh->pvh_list = NULL; + simple_unlock(&pvh->pvh_lock); + PMAP_HEAD_TO_MAP_UNLOCK(); +#if defined(I386_CPU) + if (needs_update) + tlbflush(); +#endif +} + +/* + * p m a p a t t r i b u t e f u n c t i o n s + * functions that test/change managed page's attributes + * since a page can be mapped multiple times we must check each PTE that + * maps it by going down the pv lists. + */ + +/* + * pmap_test_attrs: test a page's attributes + * + * => we set pv_head => pmap locking + */ + +boolean_t +pmap_test_attrs(pg, testbits) + struct vm_page *pg; + int testbits; +{ + int bank, off; + char *myattrs; + struct pv_head *pvh; + struct pv_entry *pve; + pt_entry_t *ptes, pte; + pd_entry_t **pdes; + + /* XXX: vm_page should either contain pv_head or have a pointer to it */ + bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); + if (bank == -1) { + printf("pmap_test_attrs: unmanaged page?\n"); + return(FALSE); + } + + /* + * before locking: see if attributes are already set and if so, + * return! + */ + + myattrs = &vm_physmem[bank].pmseg.attrs[off]; + if (*myattrs & testbits) + return(TRUE); + + /* test to see if there is a list before bothering to lock */ + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + if (pvh->pvh_list == NULL) { + return(FALSE); + } + + /* nope, gonna have to do it the hard way */ + PMAP_HEAD_TO_MAP_LOCK(); + /* XXX: needed if we hold head->map lock? */ + simple_lock(&pvh->pvh_lock); + + for (pve = pvh->pvh_list; pve != NULL && (*myattrs & testbits) == 0; + pve = pve->pv_next) { + pmap_map_ptes(pve->pv_pmap, &ptes, &pdes); + pte = ptes[pl1_i(pve->pv_va)]; + pmap_unmap_ptes(pve->pv_pmap); + *myattrs |= pte; + } + + /* + * note that we will exit the for loop with a non-null pve if + * we have found the bits we are testing for. + */ + + simple_unlock(&pvh->pvh_lock); + PMAP_HEAD_TO_MAP_UNLOCK(); + return((*myattrs & testbits) != 0); +} + +/* + * pmap_change_attrs: change a page's attributes + * + * => we set pv_head => pmap locking + * => we return TRUE if we cleared one of the bits we were asked to + */ + +boolean_t +pmap_change_attrs(pg, setbits, clearbits) + struct vm_page *pg; + int setbits, clearbits; +{ + u_int32_t result; + int bank, off; + struct pv_head *pvh; + struct pv_entry *pve; + pt_entry_t *ptes, npte; + pd_entry_t **pdes; + char *myattrs; +#if defined(I386_CPU) + boolean_t needs_update = FALSE; +#endif + + /* XXX: vm_page should either contain pv_head or have a pointer to it */ + bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off); + if (bank == -1) { + printf("pmap_change_attrs: unmanaged page?\n"); + return(FALSE); + } + + PMAP_HEAD_TO_MAP_LOCK(); + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + /* XXX: needed if we hold head->map lock? */ + simple_lock(&pvh->pvh_lock); + + myattrs = &vm_physmem[bank].pmseg.attrs[off]; + result = *myattrs & clearbits; + *myattrs = (*myattrs | setbits) & ~clearbits; + + for (pve = pvh->pvh_list; pve != NULL; pve = pve->pv_next) { + pmap_map_ptes(pve->pv_pmap, &ptes, &pdes); /* locks pmap */ +#ifdef DIAGNOSTIC + if (pve->pv_va >= uvm.pager_sva && pve->pv_va < uvm.pager_eva) { + printf("pmap_change_attrs: found pager VA on pv_list\n"); + } + if (!pmap_pdes_valid(pve->pv_va, pdes, NULL)) + panic("pmap_change_attrs: mapping without PTP " + "detected"); +#endif + + npte = ptes[pl1_i(pve->pv_va)]; + result |= (npte & clearbits); + npte = (npte | setbits) & ~clearbits; + if (ptes[pl1_i(pve->pv_va)] != npte) { + ptes[pl1_i(pve->pv_va)] = npte; /* zap! */ + + if (pmap_is_curpmap(pve->pv_pmap)) { +#if defined(I386_CPU) + if (cpu_class == CPUCLASS_386) + needs_update = TRUE; + else +#endif + pmap_update_pg(pve->pv_va); + } + } + pmap_unmap_ptes(pve->pv_pmap); /* unlocks pmap */ + } + + simple_unlock(&pvh->pvh_lock); + PMAP_HEAD_TO_MAP_UNLOCK(); + +#if defined(I386_CPU) + if (needs_update) + tlbflush(); +#endif + return(result != 0); +} + +/* + * p m a p p r o t e c t i o n f u n c t i o n s + */ + +/* + * pmap_page_protect: change the protection of all recorded mappings + * of a managed page + * + * => NOTE: this is an inline function in pmap.h + */ + +/* see pmap.h */ + +/* + * pmap_protect: set the protection in of the pages in a pmap + * + * => NOTE: this is an inline function in pmap.h + */ + +/* see pmap.h */ + +/* + * pmap_write_protect: write-protect pages in a pmap + */ + +void +pmap_write_protect(pmap, sva, eva, prot) + struct pmap *pmap; + vaddr_t sva, eva; + vm_prot_t prot; +{ + pt_entry_t *ptes, *spte, *epte, npte; + pd_entry_t **pdes; + struct pmap_remove_record pmap_rr, *prr; + vaddr_t blockend, va; + u_int32_t md_prot; + + pmap_map_ptes(pmap, &ptes, &pdes); /* locks pmap */ + + /* need to worry about TLB? [TLB stores protection bits] */ + if (pmap_is_curpmap(pmap)) { + prr = &pmap_rr; + prr->prr_npages = 0; + } else { + prr = NULL; + } + + /* should be ok, but just in case ... */ + sva &= PG_FRAME; + eva &= PG_FRAME; + + for (/* null */ ; sva < eva ; sva = blockend) { + + blockend = (sva & L2_FRAME) + NBPD_L2; + if (blockend > eva) + blockend = eva; + + /* + * XXXCDC: our PTE mappings should never be write-protected! + * + * long term solution is to move the PTEs out of user + * address space. and into kernel address space (up + * with APTE). then we can set VM_MAXUSER_ADDRESS to + * be VM_MAX_ADDRESS. + */ + + /* XXXCDC: ugly hack to avoid freeing PDP here */ + if (pl_i(sva, PTP_LEVELS) == PDIR_SLOT_PTE) + continue; + + /* empty block? */ + if (!pmap_pdes_valid(sva, pdes, NULL)) + continue; + + md_prot = protection_codes[prot]; + if (sva < VM_MAXUSER_ADDRESS) + md_prot |= PG_u; + else if (sva < VM_MAX_ADDRESS) + /* XXX: write-prot our PTES? never! */ + md_prot |= (PG_u | PG_RW); + + spte = &ptes[pl1_i(sva)]; + epte = &ptes[pl1_i(blockend)]; + + for (/*null */; spte < epte ; spte++) { + + if (!pmap_valid_entry(*spte)) /* no mapping? */ + continue; + + npte = (*spte & ~PG_PROT) | md_prot; + + if (npte != *spte) { + *spte = npte; /* zap! */ + + if (prr) { /* worried about tlb flushing? */ + va = ptob(spte - ptes); + if (npte & PG_G) { + /* PG_G requires this */ + pmap_update_pg(va); + } else { + if (prr->prr_npages < + PMAP_RR_MAX) { + prr->prr_vas[ + prr->prr_npages++] = + va; + } else { + if (prr->prr_npages == + PMAP_RR_MAX) + /* signal an overflow */ + prr->prr_npages++; + } + } + } /* if (prr) */ + } /* npte != *spte */ + } /* for loop */ + } + + /* + * if we kept a removal record and removed some pages update the TLB + */ + + if (prr && prr->prr_npages) { +#if defined(I386_CPU) + if (cpu_class == CPUCLASS_386) { + tlbflush(); + } else +#endif + { /* not I386 */ + if (prr->prr_npages > PMAP_RR_MAX) { + tlbflush(); + } else { + while (prr->prr_npages) { + pmap_update_pg(prr->prr_vas[ + --prr->prr_npages]); + } + } + } /* not I386 */ + } + pmap_unmap_ptes(pmap); /* unlocks pmap */ +} + +/* + * end of protection functions + */ + +/* + * pmap_unwire: clear the wired bit in the PTE + * + * => mapping should already be in map + */ + +void +pmap_unwire(pmap, va) + struct pmap *pmap; + vaddr_t va; +{ + pt_entry_t *ptes; + pd_entry_t **pdes; + + pmap_map_ptes(pmap, &ptes, &pdes); /* locks pmap */ + + if (pmap_pdes_valid(va, pdes, NULL)) { + +#ifdef DIAGNOSTIC + if (!pmap_valid_entry(ptes[pl1_i(va)])) + panic("pmap_unwire: invalid (unmapped) va 0x%lx", va); +#endif + if ((ptes[pl1_i(va)] & PG_W) != 0) { + ptes[pl1_i(va)] &= ~PG_W; + pmap->pm_stats.wired_count--; + } +#ifdef DIAGNOSTIC + else { + printf("pmap_unwire: wiring for pmap %p va 0x%lx " + "didn't change!\n", pmap, va); + } +#endif + } +#ifdef DIAGNOSTIC + else { + panic("pmap_unwire: invalid PDE"); + } +#endif + pmap_unmap_ptes(pmap); /* unlocks map */ +} + +/* + * pmap_collect: free resources held by a pmap + * + * => optional function. + * => called when a process is swapped out to free memory. + */ + +void +pmap_collect(pmap) + struct pmap *pmap; +{ + /* + * free all of the pt pages by removing the physical mappings + * for its entire address space. + */ + + pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, + PMAP_REMOVE_SKIPWIRED); +} + +/* + * pmap_copy: copy mappings from one pmap to another + * + * => optional function + * void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) + */ + +/* + * defined as macro call in pmap.h + */ + +/* + * pmap_enter: enter a mapping into a pmap + * + * => must be done "now" ... no lazy-evaluation + * => we set pmap => pv_head locking + */ + +int +pmap_enter(pmap, va, pa, prot, flags) + struct pmap *pmap; + vaddr_t va; + paddr_t pa; + vm_prot_t prot; + int flags; +{ + pt_entry_t *ptes, opte, npte; + pd_entry_t **pdes; + struct vm_page *ptp; + struct pv_head *pvh; + struct pv_entry *pve; + int bank, off, error; + boolean_t wired = (flags & PMAP_WIRED) != 0; + +#ifdef DIAGNOSTIC + /* sanity check: totally out of range? */ + if (va >= VM_MAX_KERNEL_ADDRESS) + panic("pmap_enter: too big"); + + if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE) + panic("pmap_enter: trying to map over PDP/APDP!"); + + /* sanity check: kernel PTPs should already have been pre-allocated */ + if (va >= VM_MIN_KERNEL_ADDRESS && + !pmap_valid_entry(pmap->pm_pdir[pl_i(va, PTP_LEVELS)])) + panic("pmap_enter: missing kernel PTP for va %lx!", va); + +#endif + + /* get lock */ + PMAP_MAP_TO_HEAD_LOCK(); + + /* + * map in ptes and get a pointer to our PTP (unless we are the kernel) + */ + + pmap_map_ptes(pmap, &ptes, &pdes); /* locks pmap */ + + if (pmap == pmap_kernel()) { + ptp = NULL; + } else { + ptp = pmap_get_ptp(pmap, va, pdes); + if (ptp == NULL) { + if (flags & PMAP_CANFAIL) { + error = ENOMEM; + goto out; + } + panic("pmap_enter: get ptp failed"); + } + } +#if 0 + if (va < VM_MAXUSER_ADDRESS) + printf("enter va %lx at pte %p\n", va, &ptes[pl1_i(va)]); +#endif + opte = ptes[pl1_i(va)]; /* old PTE */ + + /* + * is there currently a valid mapping at our VA? + */ + + if (pmap_valid_entry(opte)) { + /* + * first, update pm_stats. resident count will not + * change since we are replacing/changing a valid + * mapping. wired count might change... + */ + + if (wired && (opte & PG_W) == 0) + pmap->pm_stats.wired_count++; + else if (!wired && (opte & PG_W) != 0) + pmap->pm_stats.wired_count--; + + /* + * is the currently mapped PA the same as the one we + * want to map? + */ + + if ((opte & PG_FRAME) == pa) { + + /* if this is on the PVLIST, sync R/M bit */ + if (opte & PG_PVLIST) { + bank = vm_physseg_find(atop(pa), &off); +#ifdef DIAGNOSTIC + if (bank == -1) + panic("pmap_enter: same pa PG_PVLIST " + "mapping with unmanaged page " + "pa = 0x%lx (0x%lx)", pa, + atop(pa)); +#endif + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + simple_lock(&pvh->pvh_lock); + vm_physmem[bank].pmseg.attrs[off] |= opte; + simple_unlock(&pvh->pvh_lock); + } else { + pvh = NULL; /* ensure !PG_PVLIST */ + } + goto enter_now; + } + + /* + * changing PAs: we must remove the old one first + */ + + /* + * if current mapping is on a pvlist, + * remove it (sync R/M bits) + */ + + if (opte & PG_PVLIST) { + bank = vm_physseg_find(atop(opte & PG_FRAME), &off); +#ifdef DIAGNOSTIC + if (bank == -1) + panic("pmap_enter: PG_PVLIST mapping with " + "unmanaged page " + "pa = 0x%lx (0x%lx)", pa, atop(pa)); +#endif + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + simple_lock(&pvh->pvh_lock); + pve = pmap_remove_pv(pvh, pmap, va); + vm_physmem[bank].pmseg.attrs[off] |= opte; + simple_unlock(&pvh->pvh_lock); + } else { + pve = NULL; + } + } else { /* opte not valid */ + pve = NULL; + pmap->pm_stats.resident_count++; + if (wired) + pmap->pm_stats.wired_count++; + if (ptp) + ptp->wire_count++; /* count # of valid entrys */ + } + + /* + * at this point pm_stats has been updated. pve is either NULL + * or points to a now-free pv_entry structure (the latter case is + * if we called pmap_remove_pv above). + * + * if this entry is to be on a pvlist, enter it now. + */ + + bank = vm_physseg_find(atop(pa), &off); + if (pmap_initialized && bank != -1) { + pvh = &vm_physmem[bank].pmseg.pvhead[off]; + if (pve == NULL) { + pve = pmap_alloc_pv(pmap, ALLOCPV_NEED); + if (pve == NULL) { + if (flags & PMAP_CANFAIL) { + error = ENOMEM; + goto out; + } + panic("pmap_enter: no pv entries available"); + } + } + /* lock pvh when adding */ + pmap_enter_pv(pvh, pve, pmap, va, ptp); + } else { + + /* new mapping is not PG_PVLIST. free pve if we've got one */ + pvh = NULL; /* ensure !PG_PVLIST */ + if (pve) + pmap_free_pv(pmap, pve); + } + +enter_now: + /* + * at this point pvh is !NULL if we want the PG_PVLIST bit set + */ + + npte = pa | protection_codes[prot] | PG_V; + if (pvh) + npte |= PG_PVLIST; + if (wired) + npte |= PG_W; + if (va < VM_MAXUSER_ADDRESS) + npte |= PG_u; + else if (va < VM_MAX_ADDRESS) + npte |= (PG_u | PG_RW); /* XXXCDC: no longer needed? */ + if (pmap == pmap_kernel()) + npte |= pmap_pg_g; + + ptes[pl1_i(va)] = npte; /* zap! */ + + if ((opte & ~(PG_M|PG_U)) != npte && pmap_is_curpmap(pmap)) + pmap_update_pg(va); + + error = 0; + +out: + pmap_unmap_ptes(pmap); + PMAP_MAP_TO_HEAD_UNLOCK(); + + return error; +} + +static __inline boolean_t +pmap_get_physpage(va, level, paddrp) + vaddr_t va; + int level; + paddr_t *paddrp; +{ + struct vm_page *ptp; + struct pmap *kpm = pmap_kernel(); + + if (uvm.page_init_done == FALSE) { + /* + * we're growing the kernel pmap early (from + * uvm_pageboot_alloc()). this case must be + * handled a little differently. + */ + + if (uvm_page_physget(paddrp) == FALSE) + panic("pmap_get_physpage: out of memory"); + pmap_zero_page(*paddrp); + } else { + ptp = uvm_pagealloc(&kpm->pm_obj[level - 1], + ptp_va2o(va, level), NULL, + UVM_PGA_USERESERVE|UVM_PGA_ZERO); + if (ptp == NULL) + panic("pmap_get_physpage: out of memory"); + ptp->flags &= ~PG_BUSY; + ptp->wire_count = 1; + *paddrp = VM_PAGE_TO_PHYS(ptp); + } + kpm->pm_stats.resident_count++; + return TRUE; +} + +/* + * Allocate the amount of specified ptps for a ptp level, and populate + * all levels below accordingly, mapping virtual addresses starting at + * kva. + * + * Used by pmap_growkernel. + */ +static void +pmap_alloc_level(pdes, kva, lvl, needed_ptps) + pd_entry_t **pdes; + vaddr_t kva; + int lvl; + unsigned long *needed_ptps; +{ + unsigned long i; + vaddr_t va; + paddr_t pa; + unsigned long index, endindex; + int level; + pd_entry_t *pdep; + + for (level = lvl; level > 1; level--) { + if (level == PTP_LEVELS) + pdep = pmap_kernel()->pm_pdir; + else + pdep = pdes[level - 2]; + va = kva; + index = pl_i(kva, level); + endindex = index + needed_ptps[level - 1]; + + for (i = index; i < endindex; i++) { + pmap_get_physpage(va, level - 1, &pa); + pdep[i] = pa | PG_RW | PG_V; + nkptp[level - 1]++; + va += nbpd[level - 1]; + } +#if 0 + if (level == 2 && (endindex - index) > 5) { + printf("alloc_level: va %lx - %lx, pde %p - %p\n", + kva, va, &pdep[index], &pdep[endindex]); + if (flipje == 0) + flipje = 1; + else + panic("bah humbug"); + } +#endif + } +} + +/* + * pmap_growkernel: increase usage of KVM space + * + * => we allocate new PTPs for the kernel and install them in all + * the pmaps on the system. + */ + +vaddr_t +pmap_growkernel(maxkvaddr) + vaddr_t maxkvaddr; +{ + struct pmap *kpm = pmap_kernel(), *pm; + int s, i; + unsigned newpdes; + vaddr_t curmax; + unsigned long needed_kptp[PTP_LEVELS], target_nptp, old; + + curmax = VM_MIN_KERNEL_ADDRESS + nkptp[1] * NBPD_L2; + if (maxkvaddr <= curmax) + return curmax; + maxkvaddr = round_pdr(maxkvaddr); + old = nkptp[PTP_LEVELS - 1]; + /* + * This loop could be optimized more, but pmap_growkernel() + * is called infrequently. + */ + for (i = PTP_LEVELS - 1; i >= 1; i--) { + target_nptp = pl_i(maxkvaddr, i + 1) - + pl_i(VM_MIN_KERNEL_ADDRESS, i + 1); + /* + * XXX only need to check toplevel. + */ +#if 0 + printf("pmap_growkernel: lvl %d targ %lu max %lu cur %lu\n", + i + 1, target_nptp, nkptpmax[i], nkptp[i]); +#endif + if (target_nptp > nkptpmax[i]) + panic("out of KVA space"); + needed_kptp[i] = target_nptp - nkptp[i]; + } + + + s = splhigh(); /* to be safe */ + simple_lock(&kpm->pm_lock); + pmap_alloc_level(normal_pdes, curmax, PTP_LEVELS, + needed_kptp); + + /* + * If the number of top level entries changed, update all + * pmaps. + */ + if (needed_kptp[PTP_LEVELS - 1] != 0) { + newpdes = nkptp[PTP_LEVELS - 1] - old; + simple_lock(&pmaps_lock); + for (pm = pmaps.lh_first; pm != NULL; + pm = pm->pm_list.le_next) { + memcpy(&pm->pm_pdir[PDIR_SLOT_KERN + old], + &kpm->pm_pdir[PDIR_SLOT_KERN + old], + newpdes * sizeof (pd_entry_t)); + } + + /* Invalidate the PDP cache. */ + pool_cache_invalidate(&pmap_pdp_cache); + + simple_unlock(&pmaps_lock); + } + simple_unlock(&kpm->pm_lock); + splx(s); + + return maxkvaddr; +} + +#ifdef DEBUG +void pmap_dump __P((struct pmap *, vaddr_t, vaddr_t)); + +/* + * pmap_dump: dump all the mappings from a pmap + * + * => caller should not be holding any pmap locks + */ + +void +pmap_dump(pmap, sva, eva) + struct pmap *pmap; + vaddr_t sva, eva; +{ + pt_entry_t *ptes, *pte; + pd_entry_t **pdes; + vaddr_t blkendva; + + /* + * if end is out of range truncate. + * if (end == start) update to max. + */ + + if (eva > VM_MAXUSER_ADDRESS || eva <= sva) + eva = VM_MAXUSER_ADDRESS; + + /* + * we lock in the pmap => pv_head direction + */ + + PMAP_MAP_TO_HEAD_LOCK(); + pmap_map_ptes(pmap, &ptes, &pdes); /* locks pmap */ + + /* + * dumping a range of pages: we dump in PTP sized blocks (4MB) + */ + + for (/* null */ ; sva < eva ; sva = blkendva) { + + /* determine range of block */ + blkendva = round_pdr(sva+1); + if (blkendva > eva) + blkendva = eva; + + /* valid block? */ + if (!pmap_pdes_valid(sva, pdes, NULL)) + continue; + + pte = &ptes[pl1_i(sva)]; + for (/* null */; sva < blkendva ; sva += PAGE_SIZE, pte++) { + if (!pmap_valid_entry(*pte)) + continue; + printf("va %#lx -> pa %#lx (pte=%#lx)\n", + sva, *pte, *pte & PG_FRAME); + } + } + pmap_unmap_ptes(pmap); + PMAP_MAP_TO_HEAD_UNLOCK(); +} +#endif + +#ifdef DIAGNOSTIC +static void +pmap_dump_obj(struct pmap *pmap, int level) +{ + struct vm_page *pg; + int i; + + for (i = 0; i < PDIR_SLOT_KERN; i++) + if (pmap->pm_pdir[i] & PG_V) + printf("%d: %lx\n", i, + (unsigned long)pmap->pm_pdir[i] & PG_FRAME); + + for (pg = TAILQ_FIRST(&pmap->pm_obj[level - 1].memq); + pg != NULL; + pg = TAILQ_NEXT(pg, listq)) + printf("off %llx paddr %lx flags %x\n", + (unsigned long long)pg->offset, + pg->phys_addr, pg->flags); +} +#endif diff --git a/sys/arch/x86_64/x86_64/process_machdep.c b/sys/arch/x86_64/x86_64/process_machdep.c new file mode 100644 index 000000000000..5703e3aeec08 --- /dev/null +++ b/sys/arch/x86_64/x86_64/process_machdep.c @@ -0,0 +1,305 @@ +/* $NetBSD: process_machdep.c,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This file may seem a bit stylized, but that so that it's easier to port. + * Functions to be implemented here are: + * + * process_read_regs(proc, regs) + * Get the current user-visible register set from the process + * and copy it into the regs structure (). + * The process is stopped at the time read_regs is called. + * + * process_write_regs(proc, regs) + * Update the current register set from the passed in regs + * structure. Take care to avoid clobbering special CPU + * registers or privileged bits in the PSL. + * The process is stopped at the time write_regs is called. + * + * process_sstep(proc) + * Arrange for the process to trap after executing a single instruction. + * + * process_set_pc(proc) + * Set the process's program counter. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +static __inline struct trapframe *process_frame __P((struct proc *)); +static __inline struct fxsave64 *process_fpframe __P((struct proc *)); +#if 0 +static __inline int verr_gdt __P((struct pmap *, int sel)); +static __inline int verr_ldt __P((struct pmap *, int sel)); +#endif + +static __inline struct trapframe * +process_frame(p) + struct proc *p; +{ + + return (p->p_md.md_regs); +} + +static __inline struct fxsave64 * +process_fpframe(p) + struct proc *p; +{ + + return (&p->p_addr->u_pcb.pcb_savefpu); +} + +int +process_read_regs(p, regs) + struct proc *p; + struct reg *regs; +{ + struct trapframe *tf = process_frame(p); + + regs->r_eflags = tf->tf_eflags; + regs->r_rdi = tf->tf_rdi; + regs->r_rsi = tf->tf_rsi; + regs->r_rbp = tf->tf_rbp; + regs->r_rbx = tf->tf_rbx; + regs->r_rdx = tf->tf_rdx; + regs->r_rcx = tf->tf_rcx; + regs->r_rax = tf->tf_rax; + regs->r_r8 = tf->tf_r8; + regs->r_r9 = tf->tf_r9; + regs->r_r10 = tf->tf_r10; + regs->r_r11 = tf->tf_r11; + regs->r_r12 = tf->tf_r12; + regs->r_r13 = tf->tf_r13; + regs->r_r14 = tf->tf_r14; + regs->r_r15 = tf->tf_r15; + regs->r_rip = tf->tf_rip; + regs->r_cs = tf->tf_cs; + regs->r_rsp = tf->tf_rsp; + regs->r_ss = tf->tf_ss; + + return (0); +} + +int +process_read_fpregs(p, regs) + struct proc *p; + struct fpreg *regs; +{ + struct fxsave64 *frame = process_fpframe(p); + + if (p->p_md.md_flags & MDP_USEDFPU) { + if (fpuproc == p) + fpusave(); + } else { + u_int16_t cw; + + /* + * Fake a FNINIT. + * The initial control word was already set by setregs(), so + * save it temporarily. + */ + cw = frame->fx_fcw; + memset(frame, 0, sizeof(*regs)); + frame->fx_fcw = cw; + frame->fx_fsw = 0x0000; + frame->fx_ftw = 0xff; + p->p_md.md_flags |= MDP_USEDFPU; + } + + memcpy(®s->fxstate, frame, sizeof(*regs)); + return (0); +} + +#if 0 +static __inline int +verr_ldt(pmap, sel) + struct pmap *pmap; + int sel; +{ + int off; + struct mem_segment_descriptor *d; + + off = sel & 0xfff8; + if (off > (pmap->pm_ldt_len - sizeof (struct mem_segment_descriptor))) + return 0; + d = (struct mem_segment_descriptor *)(ldtstore + off); + return ((d->sd_type & SDT_MEMRO) != 0 && d->sd_dpl == SEL_UPL && + d->sd_p == 1); +} + +static __inline int +verr_gdt(pmap, sel) + struct pmap *pmap; + int sel; +{ + int off; + struct mem_segment_descriptor *d; + + off = sel & 0xfff8; + if (off > (NGDT_MEM - 1) * sizeof (struct mem_segment_descriptor)) + return 0; + d = (struct mem_segment_descriptor *)(gdtstore + off); + return ((d->type & SDT_MEMRO) != 0 && d->sd_p == 1 && + d->dpl == SEL_UPL); +} + +#define verr(sel) (ISLDT(sel) ? verr_ldt(IDXSEL(sel)) : \ + verr_gdt(IDXSEL(sel))) +#define valid_sel(sel) (ISPL(sel) == SEL_UPL && verr(sel)) +#define null_sel(sel) (!ISLDT(sel) && IDXSEL(sel) == 0) + +#endif + +int +process_write_regs(p, regs) + struct proc *p; + struct reg *regs; +{ + struct trapframe *tf = process_frame(p); + struct pcb *pcb = &p->p_addr->u_pcb; + pmap_t pmap = p->p_vmspace->vm_map.pmap; +#if 0 + union descriptor *gdt = (union descriptor *)gdtstore; +#endif + + /* + * Check for security violations. + */ + if (((regs->r_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || + !USERMODE(regs->r_cs, regs->r_eflags)) + return (EINVAL); + + simple_lock(&pmap->pm_lock); + +#if 0 + /* + * fs and gs contents ignored by long mode. + * must reenable this check for 32bit compat mode. + */ + if ((regs->r_gs != pcb->pcb_gs && \ + !valid_sel(regs->r_gs) && !null_sel(regs->r_gs)) || + (regs->r_fs != pcb->pcb_fs && \ + !valid_sel(regs->r_fs) && !null_sel(regs->r_fs))) + return (EINVAL); +#endif + + simple_unlock(&pmap->pm_lock); + + pcb->pcb_gs = regs->r_gs; + pcb->pcb_fs = regs->r_fs; + tf->tf_eflags = regs->r_eflags; + tf->tf_r15 = regs->r_r15; + tf->tf_r14 = regs->r_r14; + tf->tf_r13 = regs->r_r13; + tf->tf_r12 = regs->r_r12; + tf->tf_r11 = regs->r_r11; + tf->tf_r10 = regs->r_r10; + tf->tf_r9 = regs->r_r9; + tf->tf_r8 = regs->r_r8; + tf->tf_rdi = regs->r_rdi; + tf->tf_rsi = regs->r_rsi; + tf->tf_rbp = regs->r_rbp; + tf->tf_rbx = regs->r_rbx; + tf->tf_rdx = regs->r_rdx; + tf->tf_rcx = regs->r_rcx; + tf->tf_rax = regs->r_rax; + tf->tf_rip = regs->r_rip; + tf->tf_cs = regs->r_cs; + tf->tf_rsp = regs->r_rsp; + tf->tf_ss = regs->r_ss; + + return (0); +} + +int +process_write_fpregs(p, regs) + struct proc *p; + struct fpreg *regs; +{ + struct fxsave64 *frame = process_fpframe(p); + + if (p->p_md.md_flags & MDP_USEDFPU) { + if (fpuproc == p) + fpudrop(); + } else { + p->p_md.md_flags |= MDP_USEDFPU; + } + + memcpy(frame, regs, sizeof(*regs)); + return (0); +} + +int +process_sstep(p, sstep) + struct proc *p; +{ + struct trapframe *tf = process_frame(p); + + if (sstep) + tf->tf_eflags |= PSL_T; + else + tf->tf_eflags &= ~PSL_T; + + return (0); +} + +int +process_set_pc(p, addr) + struct proc *p; + caddr_t addr; +{ + struct trapframe *tf = process_frame(p); + + tf->tf_rip = (u_int64_t)addr; + + return (0); +} diff --git a/sys/arch/x86_64/x86_64/sys_machdep.c b/sys/arch/x86_64/x86_64/sys_machdep.c new file mode 100644 index 000000000000..23d23cda3a88 --- /dev/null +++ b/sys/arch/x86_64/x86_64/sys_machdep.c @@ -0,0 +1,418 @@ +/* $NetBSD: sys_machdep.c,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * XXXfvdl check USER_LDT + */ + +#if 0 +#include "opt_user_ldt.h" +#include "opt_perfctrs.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#if defined(PERFCTRS) && 0 +#include +#endif + +extern struct vm_map *kernel_map; + +#if 0 +int x86_64_iopl __P((struct proc *, void *, register_t *)); +int x86_64_get_ioperm __P((struct proc *, void *, register_t *)); +int x86_64_set_ioperm __P((struct proc *, void *, register_t *)); +#endif + +/* XXXfvdl disabled USER_LDT stuff until I check this stuff */ + +#if defined(USER_LDT) && 0 +int +x86_64_get_ldt(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + int error; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + int nldt, num; + union descriptor *lp; + struct x86_64_get_ldt_args ua; + + if ((error = copyin(args, &ua, sizeof(ua))) != 0) + return (error); + +#ifdef LDT_DEBUG + printf("x86_64_get_ldt: start=%d num=%d descs=%p\n", ua.start, + ua.num, ua.desc); +#endif + + if (ua.start < 0 || ua.num < 0) + return (EINVAL); + + /* + * XXX LOCKING. + */ + + if (pmap->pm_flags & PMF_USER_LDT) { + nldt = pmap->pm_ldt_len; + lp = pmap->pm_ldt; + } else { + nldt = NLDT; + lp = ldt; + } + + if (ua.start > nldt) + return (EINVAL); + + lp += ua.start; + num = min(ua.num, nldt - ua.start); + + error = copyout(lp, ua.desc, num * sizeof(union descriptor)); + if (error) + return (error); + + *retval = num; + return (0); +} + +int +x86_64_set_ldt(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + int error, i, n; + struct pcb *pcb = &p->p_addr->u_pcb; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + int fsslot, gsslot; + struct x86_64_set_ldt_args ua; + union descriptor desc; + + if ((error = copyin(args, &ua, sizeof(ua))) != 0) + return (error); + +#ifdef LDT_DEBUG + printf("x86_64_set_ldt: start=%d num=%d descs=%p\n", ua.start, + ua.num, ua.desc); +#endif + + if (ua.start < 0 || ua.num < 0) + return (EINVAL); + if (ua.start > 8192 || (ua.start + ua.num) > 8192) + return (EINVAL); + + /* + * XXX LOCKING + */ + + /* allocate user ldt */ + if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) { + size_t old_len, new_len; + union descriptor *old_ldt, *new_ldt; + + if (pmap->pm_flags & PMF_USER_LDT) { + old_len = pmap->pm_ldt_len * sizeof(union descriptor); + old_ldt = pmap->pm_ldt; + } else { + old_len = NLDT * sizeof(union descriptor); + old_ldt = ldt; + pmap->pm_ldt_len = 512; + } + while ((ua.start + ua.num) > pmap->pm_ldt_len) + pmap->pm_ldt_len *= 2; + new_len = pmap->pm_ldt_len * sizeof(union descriptor); + new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len); + memcpy(new_ldt, old_ldt, old_len); + memset((caddr_t)new_ldt + old_len, 0, new_len - old_len); + pmap->pm_ldt = new_ldt; + + if (pmap->pm_flags & PCB_USER_LDT) + ldt_free(pmap); + else + pmap->pm_flags |= PCB_USER_LDT; + ldt_alloc(pmap, new_ldt, new_len); + pcb->pcb_ldt_sel = pmap->pm_ldt_sel; + if (pcb == curpcb) + lldt(pcb->pcb_ldt_sel); + + /* + * XXX Need to notify other processors which may be + * XXX currently using this pmap that they need to + * XXX re-load the LDT. + */ + + if (old_ldt != ldt) + uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len); +#ifdef LDT_DEBUG + printf("x86_64_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt); +#endif + } + + if (pcb == curpcb) + savectx(curpcb); + fsslot = IDXSEL(pcb->pcb_fs); + gsslot = IDXSEL(pcb->pcb_gs); + error = 0; + + /* Check descriptors for access violations. */ + for (i = 0, n = ua.start; i < ua.num; i++, n++) { + if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) + return (error); + + switch (desc.sd.sd_type) { + case SDT_SYSNULL: + desc.sd.sd_p = 0; + break; + case SDT_SYS286CGT: + case SDT_SYS386CGT: + /* + * Only allow call gates targeting a segment + * in the LDT or a user segment in the fixed + * part of the gdt. Segments in the LDT are + * constrained (below) to be user segments. + */ + if (desc.gd.gd_p != 0 && !ISLDT(desc.gd.gd_selector) && + ((IDXSEL(desc.gd.gd_selector) >= NGDT) || + (gdt[IDXSEL(desc.gd.gd_selector)].sd.sd_dpl != + SEL_UPL))) + return (EACCES); + /* Can't replace in use descriptor with gate. */ + if (n == fsslot || n == gsslot) + return (EBUSY); + break; + case SDT_MEMEC: + case SDT_MEMEAC: + case SDT_MEMERC: + case SDT_MEMERAC: + /* Must be "present" if executable and conforming. */ + if (desc.sd.sd_p == 0) + return (EACCES); + break; + case SDT_MEMRO: + case SDT_MEMROA: + case SDT_MEMRW: + case SDT_MEMRWA: + case SDT_MEMROD: + case SDT_MEMRODA: + case SDT_MEMRWD: + case SDT_MEMRWDA: + case SDT_MEME: + case SDT_MEMEA: + case SDT_MEMER: + case SDT_MEMERA: + break; + default: + /* Only care if it's present. */ + if (desc.sd.sd_p != 0) + return (EACCES); + break; + } + + if (desc.sd.sd_p != 0) { + /* Only user (ring-3) descriptors may be present. */ + if (desc.sd.sd_dpl != SEL_UPL) + return (EACCES); + } else { + /* Must be "present" if in use. */ + if (n == fsslot || n == gsslot) + return (EBUSY); + } + } + + /* Now actually replace the descriptors. */ + for (i = 0, n = ua.start; i < ua.num; i++, n++) { + if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) + goto out; + + pmap->pm_ldt[n] = desc; + } + + *retval = ua.start; + +out: + return (error); +} +#endif /* USER_LDT */ + +#if 0 +int +x86_64_iopl(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + int error; + struct trapframe *tf = p->p_md.md_regs; + struct x86_64_iopl_args ua; + + if (securelevel > 1) + return EPERM; + + if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; + + if ((error = copyin(args, &ua, sizeof(ua))) != 0) + return error; + + if (ua.iopl) + tf->tf_eflags |= PSL_IOPL; + else + tf->tf_eflags &= ~PSL_IOPL; + + return 0; +} + +int +x86_64_get_ioperm(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + int error; + struct pcb *pcb = &p->p_addr->u_pcb; + struct x86_64_get_ioperm_args ua; + + if ((error = copyin(args, &ua, sizeof(ua))) != 0) + return (error); + + return copyout(pcb->pcb_iomap, ua.iomap, sizeof(pcb->pcb_iomap)); +} + +int +x86_64_set_ioperm(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + int error; + struct pcb *pcb = &p->p_addr->u_pcb; + struct x86_64_set_ioperm_args ua; + + if (securelevel > 1) + return EPERM; + + if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; + + if ((error = copyin(args, &ua, sizeof(ua))) != 0) + return (error); + + return copyin(ua.iomap, pcb->pcb_iomap, sizeof(pcb->pcb_iomap)); +} + +#endif + +int +sys_sysarch(p, v, retval) + struct proc *p; + void *v; + register_t *retval; +{ + struct sys_sysarch_args /* { + syscallarg(int) op; + syscallarg(void *) parms; + } */ *uap = v; + int error = 0; + + switch(SCARG(uap, op)) { +#if defined(USER_LDT) && 0 + case X86_64_GET_LDT: + error = x86_64_get_ldt(p, SCARG(uap, parms), retval); + break; + + case X86_64_SET_LDT: + error = x86_64_set_ldt(p, SCARG(uap, parms), retval); + break; +#endif +#if 0 + case X86_64_IOPL: + error = x86_64_iopl(p, SCARG(uap, parms), retval); + break; + + case X86_64_GET_IOPERM: + error = x86_64_get_ioperm(p, SCARG(uap, parms), retval); + break; + + case X86_64_SET_IOPERM: + error = x86_64_set_ioperm(p, SCARG(uap, parms), retval); + break; +#endif + +#if defined(PERFCTRS) && 0 + case X86_64_PMC_INFO: + error = pmc_info(p, SCARG(uap, parms), retval); + break; + + case X86_64_PMC_STARTSTOP: + error = pmc_startstop(p, SCARG(uap, parms), retval); + break; + + case X86_64_PMC_READ: + error = pmc_read(p, SCARG(uap, parms), retval); + break; +#endif + + default: + error = EINVAL; + break; + } + return (error); +} diff --git a/sys/arch/x86_64/x86_64/syscall.c b/sys/arch/x86_64/x86_64/syscall.c new file mode 100644 index 000000000000..c1f6fa4466bc --- /dev/null +++ b/sys/arch/x86_64/x86_64/syscall.c @@ -0,0 +1,307 @@ +/* $NetBSD: syscall.c,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_syscall_debug.h" +#include "opt_ktrace.h" + +#include +#include +#include +#include +#include +#ifdef KTRACE +#include +#endif +#include + +#include + +#include +#include +#include + +void syscall_intern __P((struct proc *)); +void syscall_plain __P((struct trapframe)); +void syscall_fancy __P((struct trapframe)); + +void +syscall_intern(p) + struct proc *p; +{ + +#ifdef KTRACE + if (p->p_traceflag & (KTRFAC_SYSCALL | KTRFAC_SYSRET)) + p->p_md.md_syscall = syscall_fancy; + else +#endif + p->p_md.md_syscall = syscall_plain; +} + +/* + * syscall(frame): + * System call request from POSIX system call gate interface to kernel. + * Like trap(), argument is call by reference. + */ +void +syscall_plain(frame) + struct trapframe frame; +{ + register caddr_t params; + register const struct sysent *callp; + register struct proc *p; + int error; + size_t argsize, argoff; + register_t code, args[9], rval[2], *argp; + + uvmexp.syscalls++; + p = curproc; + + code = frame.tf_rax; + callp = p->p_emul->e_sysent; + argoff = 0; + argp = &args[0]; + + switch (code) { + case SYS_syscall: + case SYS___syscall: + /* + * Code is first argument, followed by actual args. + */ + code = frame.tf_rdi; + argp = &args[1]; + argoff = 1; + break; + default: + break; + } + + code &= (SYS_NSYSENT - 1); + callp += code; + + argsize = (callp->sy_argsize >> 3) + argoff; + if (argsize) { + switch (MIN(argsize, 6)) { + case 6: + args[5] = frame.tf_r9; + case 5: + args[4] = frame.tf_r8; + case 4: + args[3] = frame.tf_rcx; + case 3: + args[2] = frame.tf_rdx; + case 2: + args[1] = frame.tf_rsi; + case 1: + args[0] = frame.tf_rdi; + break; + default: + panic("impossible syscall argsize"); + } + if (argsize > 6) { + argsize -= 6; + params = (caddr_t)frame.tf_rsp + sizeof(register_t); + error = copyin(params, (caddr_t)&args[6], + argsize << 3); + if (error != 0) + goto bad; + } + } + +#ifdef SYSCALL_DEBUG + scdebug_call(p, code, argp); +#endif /* SYSCALL_DEBUG */ + + rval[0] = 0; + rval[1] = 0; + error = (*callp->sy_call)(p, argp, rval); + switch (error) { + case 0: + frame.tf_rax = rval[0]; + frame.tf_rdx = rval[1]; + frame.tf_eflags &= ~PSL_C; /* carry bit */ + break; + case ERESTART: + /* + * The offset to adjust the PC by depends on whether we entered + * the kernel through the trap or call gate. We pushed the + * size of the instruction into tf_err on entry. + */ + frame.tf_rip -= frame.tf_err; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + bad: + frame.tf_rax = error; + frame.tf_eflags |= PSL_C; /* carry bit */ + break; + } + +#ifdef SYSCALL_DEBUG + scdebug_ret(p, code, error, rval); +#endif /* SYSCALL_DEBUG */ + userret(p); +} + +void +syscall_fancy(frame) + struct trapframe frame; +{ + register caddr_t params; + register const struct sysent *callp; + register struct proc *p; + int error; + size_t argsize, argoff; + register_t code, args[9], rval[2], *argp; + + uvmexp.syscalls++; + p = curproc; + + code = frame.tf_rax; + callp = p->p_emul->e_sysent; + argp = &args[0]; + argoff = 0; + + switch (code) { + case SYS_syscall: + case SYS___syscall: + /* + * Code is first argument, followed by actual args. + */ + code = frame.tf_rdi; + argp = &args[1]; + argoff = 1; + break; + default: + break; + } + code &= (SYS_NSYSENT - 1); + callp += code; + + argsize = (callp->sy_argsize >> 3) + argoff; + if (argsize) { + switch (MIN(argsize, 6)) { + case 6: + args[5] = frame.tf_r9; + case 5: + args[4] = frame.tf_r8; + case 4: + args[3] = frame.tf_rcx; + case 3: + args[2] = frame.tf_rdx; + case 2: + args[1] = frame.tf_rsi; + case 1: + args[0] = frame.tf_rdi; + break; + default: + panic("impossible syscall argsize"); + } + if (argsize > 6) { + argsize -= 6; + params = (caddr_t)frame.tf_rsp + sizeof(register_t); + error = copyin(params, (caddr_t)&args[6], + argsize << 3); + if (error != 0) + goto bad; + } + } + + +#ifdef SYSCALL_DEBUG + scdebug_call(p, code, args); +#endif /* SYSCALL_DEBUG */ +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSCALL)) + ktrsyscall(p, code, argsize, args); +#endif /* KTRACE */ + + rval[0] = 0; + rval[1] = 0; + error = (*callp->sy_call)(p, argp, rval); + switch (error) { + case 0: + frame.tf_rax = rval[0]; + frame.tf_rdx = rval[1]; + frame.tf_eflags &= ~PSL_C; /* carry bit */ + break; + case ERESTART: + /* + * The offset to adjust the PC by depends on whether we entered + * the kernel through the trap or call gate. We pushed the + * size of the instruction into tf_err on entry. + */ + frame.tf_rip -= frame.tf_err; + break; + case EJUSTRETURN: + /* nothing to do */ + break; + default: + bad: + frame.tf_rax = error; + frame.tf_eflags |= PSL_C; /* carry bit */ + break; + } + +#ifdef SYSCALL_DEBUG + scdebug_ret(p, code, error, rval); +#endif /* SYSCALL_DEBUG */ + userret(p); +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSRET)) + ktrsysret(p, code, error, rval[0]); +#endif /* KTRACE */ +} + +void +child_return(arg) + void *arg; +{ + struct proc *p = arg; + struct trapframe *tf = p->p_md.md_regs; + + tf->tf_rax = 0; + tf->tf_eflags &= ~PSL_C; + + userret(p); +#ifdef KTRACE + if (KTRPOINT(p, KTR_SYSRET)) + ktrsysret(p, SYS_fork, 0, 0); +#endif +} diff --git a/sys/arch/x86_64/x86_64/trap.c b/sys/arch/x86_64/x86_64/trap.c new file mode 100644 index 000000000000..82dd98727981 --- /dev/null +++ b/sys/arch/x86_64/x86_64/trap.c @@ -0,0 +1,474 @@ +/* $NetBSD: trap.c,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the University of Utah, and William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)trap.c 7.4 (Berkeley) 5/13/91 + */ + +/* + * 386 Trap and System call handling + */ + +#include "opt_ddb.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#ifdef DDB +#include +#endif + +#include "isa.h" + +#ifdef KGDB +#include +#endif + +void trap __P((struct trapframe)); +#if defined(I386_CPU) +int trapwrite __P((unsigned)); +#endif + +const char *trap_type[] = { + "privileged instruction fault", /* 0 T_PRIVINFLT */ + "breakpoint trap", /* 1 T_BPTFLT */ + "arithmetic trap", /* 2 T_ARITHTRAP */ + "asynchronous system trap", /* 3 T_ASTFLT */ + "protection fault", /* 4 T_PROTFLT */ + "trace trap", /* 5 T_TRCTRAP */ + "page fault", /* 6 T_PAGEFLT */ + "alignment fault", /* 7 T_ALIGNFLT */ + "integer divide fault", /* 8 T_DIVIDE */ + "non-maskable interrupt", /* 9 T_NMI */ + "overflow trap", /* 10 T_OFLOW */ + "bounds check fault", /* 11 T_BOUND */ + "FPU not available fault", /* 12 T_DNA */ + "double fault", /* 13 T_DOUBLEFLT */ + "FPU operand fetch fault", /* 14 T_FPOPFLT */ + "invalid TSS fault", /* 15 T_TSSFLT */ + "segment not present fault", /* 16 T_SEGNPFLT */ + "stack fault", /* 17 T_STKFLT */ + "reserved trap", /* 18 T_RESERVED */ +}; +int trap_types = sizeof trap_type / sizeof trap_type[0]; + +#ifdef DEBUG +int trapdebug = 0; +#endif + +#define IDTVEC(name) __CONCAT(X, name) + +/* + * trap(frame): + * Exception, fault, and trap interface to BSD kernel. This + * common code is called from assembly language IDT gate entry + * routines that prepare a suitable stack frame, and restore this + * frame after the exception has been processed. Note that the + * effect is as if the arguments were passed call by reference. + */ +/*ARGSUSED*/ +void +trap(frame) + struct trapframe frame; +{ + register struct proc *p = curproc; + int type = (int)frame.tf_trapno; + struct pcb *pcb = NULL; + extern char fusuintrfailure[], + resume_iret[], IDTVEC(osyscall)[]; +#if 0 + extern char resume_pop_ds[], resume_pop_es[]; +#endif + struct trapframe *vframe; + void *resume; + caddr_t onfault; + int error; + + uvmexp.traps++; + +#ifdef DEBUG + if (trapdebug) { + printf("trap %d code %lx eip %lx cs %lx eflags %lx cr2 %lx " + "cpl %x\n", + type, frame.tf_err, frame.tf_rip, frame.tf_cs, + frame.tf_eflags, rcr2(), cpl); + printf("curproc %p\n", curproc); + } +#endif + + if (!KERNELMODE(frame.tf_cs, frame.tf_eflags)) { + type |= T_USER; + p->p_md.md_regs = &frame; + } + + switch (type) { + + default: + we_re_toast: +#ifdef KGDB + if (kgdb_trap(type, &frame)) + return; + else { + /* + * If this is a breakpoint, don't panic + * if we're not connected. + */ + if (type == T_BPTFLT) { + printf("kgdb: ignored %s\n", trap_type[type]); + return; + } + } +#endif +#ifdef DDB + if (kdb_trap(type, 0, &frame)) + return; +#endif + if (frame.tf_trapno < trap_types) + printf("fatal %s", trap_type[frame.tf_trapno]); + else + printf("unknown trap %ld", (u_long)frame.tf_trapno); + printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor"); + printf("trap type %d code %lx rip %lx cs %lx eflags %lx cr2 " + " %lx cpl %x\n", + type, frame.tf_err, (u_long)frame.tf_rip, frame.tf_cs, + frame.tf_eflags, rcr2(), cpl); + + panic("trap"); + /*NOTREACHED*/ + + case T_PROTFLT: + case T_SEGNPFLT: + case T_ALIGNFLT: + case T_TSSFLT: + /* Check for copyin/copyout fault. */ + pcb = &p->p_addr->u_pcb; + if (pcb->pcb_onfault != 0) { +copyefault: + error = EFAULT; +copyfault: + frame.tf_rip = (u_int64_t)pcb->pcb_onfault; + frame.tf_rax = error; + return; + } + + /* + * Check for failure during return to user mode. + * + * XXXfvdl check for rex prefix? + * + * We do this by looking at the instruction we faulted on. The + * specific instructions we recognize only happen when + * returning from a trap, syscall, or interrupt. + * + * XXX + * The heuristic used here will currently fail for the case of + * one of the 2 pop instructions faulting when returning from a + * a fast interrupt. This should not be possible. It can be + * fixed by rearranging the trap frame so that the stack format + * at this point is the same as on exit from a `slow' + * interrupt. + */ + switch (*(u_char *)frame.tf_rip) { + case 0xcf: /* iret */ + vframe = (void *)((u_int64_t)&frame.tf_rsp - 44); + resume = resume_iret; + break; +/* + * XXXfvdl these are illegal in long mode (not in compat mode, though) + * and we do not take back the descriptors from the signal context anyway, + * but may do so later for USER_LDT, in which case we need to intercept + * other instructions (movl %eax, %Xs). + */ +#if 0 + case 0x1f: /* popl %ds */ + vframe = (void *)((u_int64_t)&frame.tf_rsp - 4); + resume = resume_pop_ds; + break; + case 0x07: /* popl %es */ + vframe = (void *)((u_int64_t)&frame.tf_rsp - 0); + resume = resume_pop_es; + break; +#endif + default: + goto we_re_toast; + } + if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) + goto we_re_toast; + + frame.tf_rip = (u_int64_t)resume; + return; + + case T_PROTFLT|T_USER: /* protection fault */ + case T_TSSFLT|T_USER: + case T_SEGNPFLT|T_USER: + case T_STKFLT|T_USER: + case T_ALIGNFLT|T_USER: + case T_NMI|T_USER: + trapsignal(p, SIGBUS, type &~ T_USER); + goto out; + + case T_PRIVINFLT|T_USER: /* privileged instruction fault */ + case T_FPOPFLT|T_USER: /* coprocessor operand fault */ + trapsignal(p, SIGILL, type &~ T_USER); + goto out; + + case T_ASTFLT|T_USER: /* Allow process switch */ + uvmexp.softs++; + if (p->p_flag & P_OWEUPC) { + p->p_flag &= ~P_OWEUPC; + ADDUPROF(p); + } + /* Allow a forced task switch. */ + if (want_resched) + preempt(NULL); + goto out; + + case T_DNA|T_USER: { + printf("pid %d killed due to lack of floating point\n", + p->p_pid); + trapsignal(p, SIGKILL, type &~ T_USER); + goto out; + } + + case T_BOUND|T_USER: + case T_OFLOW|T_USER: + case T_DIVIDE|T_USER: + trapsignal(p, SIGFPE, type &~ T_USER); + goto out; + + case T_ARITHTRAP|T_USER: + fputrap(&frame); + goto out; + + case T_PAGEFLT: /* allow page faults in kernel mode */ + if (p == 0) + goto we_re_toast; + pcb = &p->p_addr->u_pcb; + /* + * fusuintrfailure is used by [fs]uswintr() to prevent + * page faulting from inside the profiling interrupt. + */ + if (pcb->pcb_onfault == fusuintrfailure) + goto copyefault; + /* FALLTHROUGH */ + + case T_PAGEFLT|T_USER: { /* page fault */ + register vaddr_t va; + register struct vmspace *vm = p->p_vmspace; + register struct vm_map *map; + vm_prot_t ftype; + extern struct vm_map *kernel_map; + unsigned long nss; + + if (vm == NULL) + goto we_re_toast; + va = trunc_page((vaddr_t)rcr2()); + /* + * It is only a kernel address space fault iff: + * 1. (type & T_USER) == 0 and + * 2. pcb_onfault not set or + * 3. pcb_onfault set but supervisor space fault + * The last can occur during an exec() copyin where the + * argument space is lazy-allocated. + */ + if (type == T_PAGEFLT && va >= KERNBASE) + map = kernel_map; + else + map = &vm->vm_map; + if (frame.tf_err & PGEX_W) + ftype = VM_PROT_READ | VM_PROT_WRITE; + else + ftype = VM_PROT_READ; + +#ifdef DIAGNOSTIC + if (map == kernel_map && va == 0) { + printf("trap: bad kernel access at %lx\n", va); + goto we_re_toast; + } +#endif + + nss = 0; + if ((caddr_t)va >= vm->vm_maxsaddr + && (caddr_t)va < (caddr_t)VM_MAXUSER_ADDRESS + && map != kernel_map) { + nss = btoc(USRSTACK-(unsigned long)va); + if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) { + /* + * We used to fail here. However, it may + * just have been an mmap()ed page low + * in the stack, which is legal. If it + * wasn't, uvm_fault() will fail below. + * + * Set nss to 0, since this case is not + * a "stack extension". + */ + nss = 0; + } + } + + /* Fault the original page in. */ + onfault = p->p_addr->u_pcb.pcb_onfault; + p->p_addr->u_pcb.pcb_onfault = NULL; + error = uvm_fault(map, va, 0, ftype); + p->p_addr->u_pcb.pcb_onfault = onfault; + if (error == 0) { + if (nss > vm->vm_ssize) + vm->vm_ssize = nss; + + if (type == T_PAGEFLT) + return; + goto out; + } + if (error == EACCES) { + error = EFAULT; + } + + if (type == T_PAGEFLT) { + if (pcb->pcb_onfault != 0) + goto copyfault; + printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n", + map, va, ftype, error); + goto we_re_toast; + } + if (error == ENOMEM) { + printf("UVM: pid %d (%s), uid %d killed: out of swap\n", + p->p_pid, p->p_comm, + p->p_cred && p->p_ucred ? + p->p_ucred->cr_uid : -1); + trapsignal(p, SIGKILL, T_PAGEFLT); + } else + trapsignal(p, SIGSEGV, T_PAGEFLT); + break; + } + + case T_TRCTRAP: + /* Check whether they single-stepped into a lcall. */ + if (frame.tf_rip == (int)IDTVEC(osyscall)) + return; + if (frame.tf_rip == (int)IDTVEC(osyscall) + 1) { + frame.tf_eflags &= ~PSL_T; + return; + } + goto we_re_toast; + + case T_BPTFLT|T_USER: /* bpt instruction fault */ + case T_TRCTRAP|T_USER: /* trace trap */ +#ifdef MATH_EMULATE + trace: +#endif + trapsignal(p, SIGTRAP, type &~ T_USER); + break; + +#if NISA > 0 + case T_NMI: +#if defined(KGDB) || defined(DDB) + /* NMI can be hooked up to a pushbutton for debugging */ + printf ("NMI ... going to debugger\n"); +#ifdef KGDB + + if (kgdb_trap(type, &frame)) + return; +#endif +#ifdef DDB + if (kdb_trap(type, 0, &frame)) + return; +#endif +#endif /* KGDB || DDB */ + /* machine/parity/power fail/"kitchen sink" faults */ + + if (isa_nmi() != 0) + goto we_re_toast; + else + return; +#endif /* NISA > 0 */ + } + + if ((type & T_USER) == 0) + return; +out: + userret(p); +} diff --git a/sys/arch/x86_64/x86_64/vector.S b/sys/arch/x86_64/x86_64/vector.S new file mode 100644 index 000000000000..db78daa44c56 --- /dev/null +++ b/sys/arch/x86_64/x86_64/vector.S @@ -0,0 +1,676 @@ +/* $NetBSD: vector.S,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Slightly adapted for NetBSD/x86_64 by fvdl@wasabisystems.com. A bit + * of a minimal effort port, as this file will very likely have to + * be redone for the real hardware (but not specs are available for that + * yet). + */ +#include "opt_ddb.h" + +#include "assym.h" + +#include +#include + +#define ALIGN_TEXT .align 16,0x90 + +#include +#include +#include +#include +#include +#include + + +#include + +/* + * XXX definition should not be here. + */ +#define IRQ_SLAVE 2 + +/*****************************************************************************/ + +/* + * Trap and fault vector routines + * + * On exit from the kernel to user mode, we always need to check for ASTs. In + * addition, we need to do this atomically; otherwise an interrupt may occur + * which causes an AST, but it won't get processed until the next kernel entry + * (possibly the next clock tick). Thus, we disable interrupt before checking, + * and only enable them again on the final `iret' or before calling the AST + * handler. + */ + +/*****************************************************************************/ + +/* + * XXX traditional CPP's evaluation semantics make this necessary. + * XXX (__CONCAT() would be evaluated incorrectly) + */ +#define IDTVEC(name) ALIGN_TEXT; .globl X/**/name; X/**/name: + +#define TRAP(a) pushq $(a) ; jmp _C_LABEL(alltraps) +#define ZTRAP(a) pushq $0 ; TRAP(a) + +#define BPTTRAP(a) ZTRAP(a) + + .text +IDTVEC(trap00) + ZTRAP(T_DIVIDE) +IDTVEC(trap01) + BPTTRAP(T_TRCTRAP) +IDTVEC(trap02) + ZTRAP(T_NMI) +IDTVEC(trap03) + BPTTRAP(T_BPTFLT) +IDTVEC(trap04) + ZTRAP(T_OFLOW) +IDTVEC(trap05) + ZTRAP(T_BOUND) +IDTVEC(trap06) + ZTRAP(T_PRIVINFLT) +IDTVEC(trap07) + pushq $0 # dummy error code + pushq $T_DNA + INTRENTRY + movq _C_LABEL(curproc)(%rip),%rdi + call _C_LABEL(fpudna) + INTRFASTEXIT +IDTVEC(trap08) + TRAP(T_DOUBLEFLT) +IDTVEC(trap09) + ZTRAP(T_FPOPFLT) +IDTVEC(trap0a) + TRAP(T_TSSFLT) +IDTVEC(trap0b) + TRAP(T_SEGNPFLT) +IDTVEC(trap0c) + TRAP(T_STKFLT) +IDTVEC(trap0d) + TRAP(T_PROTFLT) +IDTVEC(trap0e) + TRAP(T_PAGEFLT) +IDTVEC(trap0f) + iretq +IDTVEC(trap10) + ZTRAP(T_ARITHTRAP) +IDTVEC(trap11) + ZTRAP(T_ALIGNFLT) +IDTVEC(trap12) +IDTVEC(trap13) +IDTVEC(trap14) +IDTVEC(trap15) +IDTVEC(trap16) +IDTVEC(trap17) +IDTVEC(trap18) +IDTVEC(trap19) +IDTVEC(trap1a) +IDTVEC(trap1b) +IDTVEC(trap1c) +IDTVEC(trap1d) +IDTVEC(trap1e) +IDTVEC(trap1f) + /* 18 - 31 reserved for future exp */ + ZTRAP(T_RESERVED) + +IDTVEC(exceptions) + .quad _C_LABEL(Xtrap00), _C_LABEL(Xtrap01) + .quad _C_LABEL(Xtrap02), _C_LABEL(Xtrap03) + .quad _C_LABEL(Xtrap04), _C_LABEL(Xtrap05) + .quad _C_LABEL(Xtrap06), _C_LABEL(Xtrap07) + .quad _C_LABEL(Xtrap08), _C_LABEL(Xtrap09) + .quad _C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b) + .quad _C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d) + .quad _C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f) + .quad _C_LABEL(Xtrap10), _C_LABEL(Xtrap11) + .quad _C_LABEL(Xtrap12), _C_LABEL(Xtrap13) + .quad _C_LABEL(Xtrap14), _C_LABEL(Xtrap15) + .quad _C_LABEL(Xtrap16), _C_LABEL(Xtrap17) + .quad _C_LABEL(Xtrap18), _C_LABEL(Xtrap19) + .quad _C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b) + .quad _C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d) + .quad _C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f) + +/* + * If an error is detected during trap, syscall, or interrupt exit, trap() will + * change %eip to point to one of these labels. We clean up the stack, if + * necessary, and resume as if we were handling a general protection fault. + * This will cause the process to get a SIGBUS. + * + * XXXfvdl currently unused, as pop %ds and pop %es are illegal in long + * mode. However, if the x86-64 port is going to support USER_LDT, we + * may need something like this after all. + */ +NENTRY(resume_iret) + ZTRAP(T_PROTFLT) +#if 0 +NENTRY(resume_pop_ds) + movl $GSEL(GDATA_SEL, SEL_KPL),%eax + movl %eax,%es +NENTRY(resume_pop_es) + movl $T_PROTFLT,TF_TRAPNO(%rsp) + jmp calltrap +#endif + +/* + * All traps go through here. Call the generic trap handler, and + * check for ASTs afterwards. + */ +NENTRY(alltraps) + INTRENTRY +calltrap: +#ifdef DIAGNOSTIC + movl _C_LABEL(cpl)(%rip),%ebx +#endif /* DIAGNOSTIC */ + call _C_LABEL(trap) +2: /* Check for ASTs on exit to user mode. */ + cli + cmpb $0,_C_LABEL(astpending)(%rip) + je 1f + testb $SEL_RPL,TF_CS(%rsp) + jz 1f +5: movb $0,_C_LABEL(astpending)(%rip) + sti + movl $T_ASTFLT,TF_TRAPNO(%rsp) + call _C_LABEL(trap) + jmp 2b +#ifndef DIAGNOSTIC +1: INTRFASTEXIT +#else /* DIAGNOSTIC */ +1: cmpl _C_LABEL(cpl)(%rip),%ebx + jne 3f + INTRFASTEXIT +3: sti + movabsq $4f,%rdi + call _C_LABEL(printf) +#ifdef DDB + int $3 +#endif /* DDB */ + movl %ebx,_C_LABEL(cpl)(%rip) + jmp 2b +4: .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT\n" +#endif /* DIAGNOSTIC */ + + +/* + * Old call gate entry for syscall. XXXfvdl: only needed if we're + * going to support running old NetBSD or ibcs2 binaries, etc, + * on NetBSD/x86_64. + */ +IDTVEC(osyscall) + /* Set eflags in trap frame. */ + pushfq + popq 8(%rsp) + pushq $7 # size of instruction for restart + jmp syscall1 + +/* + * Trap gate entry for syscall + * XXX convert to using syscall/sysret. + * XXXfvdl Pushing all of the intr frame is overkill. + */ +IDTVEC(syscall) + pushq $2 # size of instruction for restart +syscall1: + pushq $T_ASTFLT # trap # for doing ASTs + INTRENTRY + movq _C_LABEL(curproc)(%rip),%rdx # get pointer to curproc +#ifdef DIAGNOSTIC + movl _C_LABEL(cpl)(%rip),%ebx +#endif /* DIAGNOSTIC */ + movq %rsp,P_MD_REGS(%rdx) # save pointer to frame + call *P_MD_SYSCALL(%rdx) +2: /* Check for ASTs on exit to user mode. */ + cli + cmpb $0,_C_LABEL(astpending)(%rip) + je 1f + /* Always returning to user mode here. */ + movb $0,_C_LABEL(astpending)(%rip) + sti + /* Pushed T_ASTFLT into tf_trapno on entry. */ + call _C_LABEL(trap) + jmp 2b +#ifndef DIAGNOSTIC +1: INTRFASTEXIT +#else /* DIAGNOSTIC */ +1: cmpl _C_LABEL(cpl)(%rip),%ebx + jne 3f + INTRFASTEXIT +3: sti + movabsq $4f, %rdi + xorq %rax,%rax + call _C_LABEL(printf) +#ifdef DDB + int $3 +#endif /* DDB */ + movl %ebx,_C_LABEL(cpl)(%rip) + jmp 2b +4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n" +#endif /* DIAGNOSTIC */ + + +/* + * XXXfvdl these are currently still the i386 ISA-like handling functions. + * The first generation of Hammers is specified to have an "APIC-like" + * interrupt controller, so it is likely that the local APIC code as + * used by the SMP code can be shared in the future. + */ + +#define IRQ_BIT(irq_num) (1 << ((irq_num) & 7)) +#define IRQ_BYTE(irq_num) ((irq_num) >> 3) + +#define ACK1(irq_num) \ + movb $(0x60|(irq_num%8)),%al /* specific EOI */ ;\ + outb %al,$IO_ICU1 + +#define ACK2(irq_num) \ + movb $(0x60|(irq_num%8)),%al /* specific EOI */ ;\ + outb %al,$IO_ICU2 /* do the second ICU first */ ;\ + movb $(0x60|IRQ_SLAVE),%al /* specific EOI for IRQ2 */ ;\ + outb %al,$IO_ICU1 + +#define MASK(irq_num, icu) \ + movb (_C_LABEL(imen) + IRQ_BYTE(irq_num))(%rip),%al ;\ + orb $IRQ_BIT(irq_num),%al ;\ + movb %al,(_C_LABEL(imen) + IRQ_BYTE(irq_num))(%rip) ;\ + outb %al,$(icu+1) +#define UNMASK(irq_num, icu) \ + cli ;\ + movb (_C_LABEL(imen) + IRQ_BYTE(irq_num))(%rip),%al ;\ + andb $~IRQ_BIT(irq_num),%al ;\ + movb %al,(_C_LABEL(imen) + IRQ_BYTE(irq_num))(%rip) ;\ + outb %al,$(icu+1) ;\ + sti + +/* + * Macros for interrupt entry, call to handler, and exit. + * + * XXX + * The interrupt frame is set up to look like a trap frame. This may be a + * waste. The only handler which needs a frame is the clock handler, and it + * only needs a few bits. Xdoreti() needs a trap frame for handling ASTs, but + * it could easily convert the frame on demand. + * + * The direct costs of setting up a trap frame are two pushl's (error code and + * trap number), an addl to get rid of these, and pushing and popping the + * callee-saved registers %esi, %edi, %ebx, and %ebp twice. + * + * If the interrupt frame is made more flexible, INTR can push %eax first and + * decide the ipending case with less overhead, e.g., by avoiding loading the + * segment registers. + * + * XXX + * Should we do a cld on every system entry to avoid the requirement for + * scattered cld's? + */ + + .globl _C_LABEL(isa_strayintr) + +/* + * Normal vectors. + * + * We cdr down the intrhand chain, calling each handler with its appropriate + * argument (0 meaning a pointer to the frame, for clock interrupts). + * + * The handler returns one of three values: + * 0 - This interrupt wasn't for me. + * 1 - This interrupt was for me. + * -1 - This interrupt might have been for me, but I don't know. + * If there are no handlers, or they all return 0, we flags it as a `stray' + * interrupt. On a system with level-triggered interrupts, we could terminate + * immediately when one of them returns 1; but this is a PC. + * + * On exit, we jump to Xdoreti(), to process soft interrupts and ASTs. + */ + +#define MY_COUNT _C_LABEL(uvmexp) + +#define XINTR(irq_num) Xintr/**/irq_num +#define XHOLD(irq_num) Xhold/**/irq_num +#define XSTRAY(irq_num) Xstray/**/irq_num + +#define INTR(irq_num, icu, ack) \ +IDTVEC(resume/**/irq_num) ;\ + cli ;\ + jmp 1f ;\ +IDTVEC(recurse/**/irq_num) ;\ + movq %rsp,%r10 ;\ + movl %ss,%r11d ;\ + pushq %r11 ;\ + pushq %r10 ;\ + pushfq ;\ + movl %cs,%r11d ;\ + pushq %r11 ;\ + pushq %r12 ;\ + cli ;\ +XINTR(irq_num): ;\ + pushq $0 /* dummy error code */ ;\ + pushq $T_ASTFLT /* trap # for doing ASTs */ ;\ + INTRENTRY ;\ + MAKE_FRAME ;\ + MASK(irq_num, icu) /* mask it in hardware */ ;\ + ack(irq_num) /* and allow other intrs */ ;\ + incl (MY_COUNT+V_INTR)(%rip) /* statistical info */ ;\ + leaq _C_LABEL(cpl)(%rip),%rdi ;\ + testb $IRQ_BIT(irq_num),IRQ_BYTE(irq_num)(%rdi) ;\ + jnz XHOLD(irq_num) /* currently masked; hold it */ ;\ +1: movl _C_LABEL(cpl)(%rip),%eax /* cpl to restore on exit */ ;\ + pushq %rax ;\ + leaq _C_LABEL(intrmask)(%rip),%rdi ;\ + orl ((irq_num) * 4)(%rdi),%eax ;\ + movl %eax,_C_LABEL(cpl)(%rip) /* add in this intr's mask */ ;\ + sti /* safe to take intrs now */ ;\ + leaq _C_LABEL(intrhand)(%rip),%rdi ;\ + movq ((irq_num) * 8)(%rdi),%rbx /* head of chain */ ;\ + testq %rbx,%rbx ;\ + jz XSTRAY(irq_num) /* no handlers; we're stray */ ;\ + STRAY_INITIALIZE /* nobody claimed it yet */ ;\ + leaq _C_LABEL(intrcnt)(%rip),%rdi ;\ + incl (4*(irq_num))(%rdi) /* XXX */ ;\ +7: movq IH_ARG(%rbx),%rdi /* get handler arg */ ;\ + testq %rdi,%rdi ;\ + jnz 4f ;\ + movq %rsp,%rdi /* 0 means frame pointer */ ;\ +4: call *IH_FUN(%rbx) /* call it */ ;\ + STRAY_INTEGRATE /* maybe he claimed it */ ;\ + incl IH_COUNT(%rbx) /* count the intrs */ ;\ + movq IH_NEXT(%rbx),%rbx /* next handler in chain */ ;\ + testq %rbx,%rbx ;\ + jnz 7b ;\ + STRAY_TEST /* see if it's a stray */ ;\ +5: UNMASK(irq_num, icu) /* unmask it in hardware */ ;\ + jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\ +IDTVEC(stray/**/irq_num) ;\ + movq $irq_num,%rdi ;\ + call _C_LABEL(isa_strayintr) ;\ + leaq _C_LABEL(strayintrcnt)(%rip),%rdi ;\ + incl (4*(irq_num))(%rdi) ;\ + jmp 5b ;\ +IDTVEC(hold/**/irq_num) ;\ + leaq _C_LABEL(ipending)(%rip),%rdi ;\ + orb $IRQ_BIT(irq_num),IRQ_BYTE(irq_num)(%rdi) ;\ + INTRFASTEXIT + +#if defined(DEBUG) && defined(notdef) +#define STRAY_INITIALIZE \ + xorl %esi,%esi +#define STRAY_INTEGRATE \ + orl %eax,%esi +#define STRAY_TEST \ + testl %esi,%esi ;\ + jz XSTRAY(irq_num) +#else /* !DEBUG */ +#define STRAY_INITIALIZE +#define STRAY_INTEGRATE +#define STRAY_TEST +#endif /* DEBUG */ + +#ifdef DDB +#define MAKE_FRAME \ + leaq -8(%rsp),%rbp +#else /* !DDB */ +#define MAKE_FRAME +#endif /* DDB */ + +INTR(0, IO_ICU1, ACK1) +INTR(1, IO_ICU1, ACK1) +INTR(2, IO_ICU1, ACK1) +INTR(3, IO_ICU1, ACK1) +INTR(4, IO_ICU1, ACK1) +INTR(5, IO_ICU1, ACK1) +INTR(6, IO_ICU1, ACK1) +INTR(7, IO_ICU1, ACK1) +INTR(8, IO_ICU2, ACK2) +INTR(9, IO_ICU2, ACK2) +INTR(10, IO_ICU2, ACK2) +INTR(11, IO_ICU2, ACK2) +INTR(12, IO_ICU2, ACK2) +INTR(13, IO_ICU2, ACK2) +INTR(14, IO_ICU2, ACK2) +INTR(15, IO_ICU2, ACK2) + +/* + * These tables are used by the ISA configuration code. + */ +/* interrupt service routine entry points */ +IDTVEC(intr) + .quad _C_LABEL(Xintr0), _C_LABEL(Xintr1) + .quad _C_LABEL(Xintr2), _C_LABEL(Xintr3) + .quad _C_LABEL(Xintr4), _C_LABEL(Xintr5) + .quad _C_LABEL(Xintr6), _C_LABEL(Xintr7) + .quad _C_LABEL(Xintr8), _C_LABEL(Xintr9) + .quad _C_LABEL(Xintr10), _C_LABEL(Xintr11) + .quad _C_LABEL(Xintr12), _C_LABEL(Xintr13) + .quad _C_LABEL(Xintr14), _C_LABEL(Xintr15) + +/* + * These tables are used by Xdoreti() and Xspllower(). + */ +/* resume points for suspended interrupts */ +IDTVEC(resume) + .quad _C_LABEL(Xresume0), _C_LABEL(Xresume1) + .quad _C_LABEL(Xresume2), _C_LABEL(Xresume3) + .quad _C_LABEL(Xresume4), _C_LABEL(Xresume5) + .quad _C_LABEL(Xresume6), _C_LABEL(Xresume7) + .quad _C_LABEL(Xresume8), _C_LABEL(Xresume9) + .quad _C_LABEL(Xresume10), _C_LABEL(Xresume11) + .quad _C_LABEL(Xresume12), _C_LABEL(Xresume13) + .quad _C_LABEL(Xresume14), _C_LABEL(Xresume15) + /* for soft interrupts */ + .quad 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .quad _C_LABEL(Xsoftserial), _C_LABEL(Xsoftnet), _C_LABEL(Xsoftclock) +/* fake interrupts to resume from splx() */ +IDTVEC(recurse) + .quad _C_LABEL(Xrecurse0), _C_LABEL(Xrecurse1) + .quad _C_LABEL(Xrecurse2), _C_LABEL(Xrecurse3) + .quad _C_LABEL(Xrecurse4), _C_LABEL(Xrecurse5) + .quad _C_LABEL(Xrecurse6), _C_LABEL(Xrecurse7) + .quad _C_LABEL(Xrecurse8), _C_LABEL(Xrecurse9) + .quad _C_LABEL(Xrecurse10), _C_LABEL(Xrecurse11) + .quad _C_LABEL(Xrecurse12), _C_LABEL(Xrecurse13) + .quad _C_LABEL(Xrecurse14), _C_LABEL(Xrecurse15) + /* for soft interrupts */ + .quad 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .quad _C_LABEL(Xsoftserial), _C_LABEL(Xsoftnet), _C_LABEL(Xsoftclock) + + +/* Old-style vmstat -i interrupt counters. Should be replaced with evcnts. */ + .globl _C_LABEL(intrnames), _C_LABEL(eintrnames) + .globl _C_LABEL(intrcnt), _C_LABEL(eintrcnt) + + /* Names */ +_C_LABEL(intrnames): + .asciz "irq0", "irq1", "irq2", "irq3" + .asciz "irq4", "irq5", "irq6", "irq7" + .asciz "irq8", "irq9", "irq10", "irq11" + .asciz "irq12", "irq13", "irq14", "irq15" +_C_LABEL(strayintrnames): + .asciz "stray0", "stray1", "stray2", "stray3" + .asciz "stray4", "stray5", "stray6", "stray7" + .asciz "stray8", "stray9", "stray10", "stray11" + .asciz "stray12", "stray13", "stray14", "stray15" +_C_LABEL(eintrnames): + + /* And counters */ + .data + .align 64 + +_C_LABEL(intrcnt): + .long 0, 0, 0, 0, 0, 0, 0, 0 + .long 0, 0, 0, 0, 0, 0, 0, 0 +_C_LABEL(strayintrcnt): + .long 0, 0, 0, 0, 0, 0, 0, 0 + .long 0, 0, 0, 0, 0, 0, 0, 0 +_C_LABEL(eintrcnt): + + .text + + + .data + .globl _C_LABEL(imen),_C_LABEL(cpl),_C_LABEL(ipending) + .globl _C_LABEL(astpending),_C_LABEL(netisr) +_C_LABEL(imen): + .long 0xffff # interrupt mask enable (all off) + + .text + +#if defined(PROF) || defined(GPROF) + .globl _C_LABEL(splhigh), _C_LABEL(splx) + + ALIGN_TEXT +_C_LABEL(splhigh): + movl $-1,%eax + xchgl %eax,_C_LABEL(cpl)(%rip) + ret + + ALIGN_TEXT +_C_LABEL(splx): + movl %edi,_C_LABEL(cpl)(%rip) + testl %edi,%edi + jnz _C_LABEL(Xspllower) + ret +#endif /* PROF || GPROF */ + +/* + * Process pending interrupts. + * + * Important registers: + * ebx - cpl + * r12 - address to resume loop at + * r13 - scratch for Xsoftnet + */ +IDTVEC(spllower) + pushq %rbx + pushq %r12 + pushq %r13 + movl _C_LABEL(cpl)(%rip),%ebx # save priority + leaq 1f(%rip),%r12 # address to resume loop at +1: movl %ebx,%eax + notl %eax + andl _C_LABEL(ipending)(%rip),%eax + jz 2f + bsfl %eax,%eax + btrl %eax,_C_LABEL(ipending)(%rip) + jnc 1b + leaq _C_LABEL(Xrecurse)(%rip),%rdi + jmp *(%rdi,%rax,8) +2: popq %r13 + popq %r12 + popq %rbx + ret + +/* + * Handle return from interrupt after device handler finishes. + * + * Important registers: + * ebx - cpl to restore + * r12 - address to resume loop at + * r13 - scratch for Xsoftnet + */ +IDTVEC(doreti) + popq %rbx # get previous priority + movl %ebx,_C_LABEL(cpl)(%rip) + leaq 1f(%rip),%r12 # address to resume loop at +1: movl %ebx,%eax + notl %eax + andl _C_LABEL(ipending)(%rip),%eax + jz 2f + bsfl %eax,%eax # slow, but not worth optimizing + btrl %eax,_C_LABEL(ipending)(%rip) + jnc 1b # some intr cleared the in-memory bit + leaq _C_LABEL(Xresume)(%rip),%rdi + jmp *(%rdi,%rax,8) +2: /* Check for ASTs on exit to user mode. */ + cli + cmpb $0,_C_LABEL(astpending)(%rip) + je 3f + testb $SEL_RPL,TF_CS(%rsp) + jz 3f +4: movb $0,_C_LABEL(astpending)(%rip) + sti + /* Pushed T_ASTFLT into tf_trapno on entry. */ + call _C_LABEL(trap) + jmp 2b +3: INTRFASTEXIT + + +/* + * Soft interrupt handlers + */ + +IDTVEC(softserial) + leaq _C_LABEL(imask)(%rip),%rdi + movl (IPL_SOFTSERIAL * 4)(%rdi),%eax + movl %eax,_C_LABEL(cpl)(%rip) +#include "com.h" +#if NCOM > 0 + call _C_LABEL(comsoft) +#endif + movl %ebx,_C_LABEL(cpl)(%rip) + jmp *%r12 + +IDTVEC(softnet) + movl (_C_LABEL(imask) + IPL_SOFTNET * 4)(%rip),%eax + movl %eax,_C_LABEL(cpl)(%rip) + xorl %r13d,%r13d + xchgl _C_LABEL(netisr)(%rip),%r13d + +#define DONETISR(s, c) \ + .globl _C_LABEL(c) ;\ + testl $(1 << s),%r13d ;\ + jz 1f ;\ + call _C_LABEL(c) ;\ +1: + +#include + +#undef DONETISR + + movl %ebx,_C_LABEL(cpl)(%rip) + jmp *%r12 + +IDTVEC(softclock) + leaq _C_LABEL(imask)(%rip),%rdi + movl (IPL_SOFTCLOCK * 4)(%rdi),%eax + movl %eax,_C_LABEL(cpl)(%rip) + call _C_LABEL(softclock) + movl %ebx,_C_LABEL(cpl)(%rip) + jmp *%r12 diff --git a/sys/arch/x86_64/x86_64/vm_machdep.c b/sys/arch/x86_64/x86_64/vm_machdep.c new file mode 100644 index 000000000000..a13cac6d9588 --- /dev/null +++ b/sys/arch/x86_64/x86_64/vm_machdep.c @@ -0,0 +1,397 @@ +/* $NetBSD: vm_machdep.c,v 1.1 2001/06/19 00:21:18 fvdl Exp $ */ + +/*- + * Copyright (c) 1995 Charles M. Hannum. All rights reserved. + * Copyright (c) 1982, 1986 The Regents of the University of California. + * Copyright (c) 1989, 1990 William Jolitz + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department, and William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 + */ + +/* + * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ + */ + +#include "opt_user_ldt.h" +#include "opt_largepages.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +void setredzone __P((u_short *, caddr_t)); + +/* + * Finish a fork operation, with process p2 nearly set up. + * Copy and update the pcb and trap frame, making the child ready to run. + * + * Rig the child's kernel stack so that it will start out in + * proc_trampoline() and call child_return() with p2 as an + * argument. This causes the newly-created child process to go + * directly to user level with an apparent return value of 0 from + * fork(), while the parent process returns normally. + * + * p1 is the process being forked; if p1 == &proc0, we are creating + * a kernel thread, and the return path and argument are specified with + * `func' and `arg'. + * + * If an alternate user-level stack is requested (with non-zero values + * in both the stack and stacksize args), set up the user stack pointer + * accordingly. + */ +void +cpu_fork(p1, p2, stack, stacksize, func, arg) + register struct proc *p1, *p2; + void *stack; + size_t stacksize; + void (*func) __P((void *)); + void *arg; +{ + register struct pcb *pcb = &p2->p_addr->u_pcb; + register struct trapframe *tf; + register struct switchframe *sf; + + /* + * If fpuproc != p1, then the fpu h/w state is irrelevant and the + * state had better already be in the pcb. This is true for forks + * but not for dumps. + * + * If fpuproc == p1, then we have to save the fpu h/w state to + * p1's pcb so that we can copy it. + */ + if (fpuproc == p1) + fpusave(); + + p2->p_md.md_flags = p1->p_md.md_flags; + + /* Copy pcb from proc p1 to p2. */ + if (p1 == curproc) { + /* Sync the PCB before we copy it. */ + savectx(curpcb); + } +#ifdef DIAGNOSTIC + else if (p1 != &proc0) + panic("cpu_fork: curproc"); +#endif + *pcb = p1->p_addr->u_pcb; + + /* + * Preset these so that gdt_compact() doesn't get confused if called + * during the allocations below. + * + * Note: pcb_ldt_sel is handled in the pmap_activate() call when + * we run the new process. + */ + p2->p_md.md_tss_sel = GSEL(GNULL_SEL, SEL_KPL); + + /* Fix up the TSS. */ + pcb->pcb_tss.tss_rsp0 = (u_int64_t)p2->p_addr + USPACE - 16; + tss_alloc(p2); + + /* + * Copy the trapframe. + */ + p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_tss.tss_rsp0 - 1; + *tf = *p1->p_md.md_regs; + + /* + * If specified, give the child a different stack. + */ + if (stack != NULL) + tf->tf_rsp = (u_int64_t)stack + stacksize; + + sf = (struct switchframe *)tf - 1; + sf->sf_ppl = 0; + sf->sf_r12 = (u_int64_t)func; + sf->sf_r13 = (u_int64_t)arg; + sf->sf_rip = (u_int64_t)proc_trampoline; + pcb->pcb_rsp = (u_int64_t)sf; + pcb->pcb_rbp = 0; +} + +void +cpu_swapout(p) + struct proc *p; +{ + + /* + * Make sure we save the FP state before the user area vanishes. + */ + if (fpuproc == p) + fpusave(); +} + +/* + * cpu_exit is called as the last action during exit. + * + * We clean up a little and then call switch_exit() with the old proc as an + * argument. switch_exit() first switches to proc0's context, and finally + * jumps into switch() to wait for another process to wake up. + */ +void +cpu_exit(p) + register struct proc *p; +{ + + /* If we were using the FPU, forget about it. */ + if (fpuproc == p) + fpuproc = 0; + + /* + * No need to do user LDT cleanup here; it's handled in + * pmap_destroy(). + */ + + uvmexp.swtch++; + switch_exit(p); +} + +/* + * cpu_wait is called from reaper() to let machine-dependent + * code free machine-dependent resources that couldn't be freed + * in cpu_exit(). + */ +void +cpu_wait(p) + struct proc *p; +{ + /* Nuke the TSS. */ + tss_free(p); +} + +/* + * Dump the machine specific segment at the start of a core dump. + */ +struct md_core { + struct reg intreg; + struct fpreg freg; +}; +int +cpu_coredump(p, vp, cred, chdr) + struct proc *p; + struct vnode *vp; + struct ucred *cred; + struct core *chdr; +{ + struct md_core md_core; + struct coreseg cseg; + int error; + + CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); + chdr->c_hdrsize = ALIGN(sizeof(*chdr)); + chdr->c_seghdrsize = ALIGN(sizeof(cseg)); + chdr->c_cpusize = sizeof(md_core); + + /* Save integer registers. */ + error = process_read_regs(p, &md_core.intreg); + if (error) + return error; + + /* Save floating point registers. */ + error = process_read_fpregs(p, &md_core.freg); + if (error) + return error; + + CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); + cseg.c_addr = 0; + cseg.c_size = chdr->c_cpusize; + + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, + (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, + NULL, p); + if (error) + return error; + + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core), + (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE, + IO_NODELOCKED|IO_UNIT, cred, NULL, p); + if (error) + return error; + + chdr->c_nseg++; + return 0; +} + +#if 0 +/* + * Set a red zone in the kernel stack after the u. area. + */ +void +setredzone(pte, vaddr) + u_short *pte; + caddr_t vaddr; +{ +/* eventually do this by setting up an expand-down stack segment + for ss0: selector, allowing stack access down to top of u. + this means though that protection violations need to be handled + thru a double fault exception that must do an integral task + switch to a known good context, within which a dump can be + taken. a sensible scheme might be to save the initial context + used by sched (that has physical memory mapped 1:1 at bottom) + and take the dump while still in mapped mode */ +} +#endif + +/* + * Move pages from one kernel virtual address to another. + * Both addresses are assumed to reside in the Sysmap. + */ +void +pagemove(from, to, size) + register caddr_t from, to; + size_t size; +{ + register pt_entry_t *fpte, *tpte, ofpte, otpte; + + if (size & PAGE_MASK) + panic("pagemove"); + fpte = kvtopte((vaddr_t)from); + tpte = kvtopte((vaddr_t)to); +#ifdef LARGEPAGES + /* XXX For now... */ + if (*fpte & PG_PS) + panic("pagemove: fpte PG_PS"); + if (*tpte & PG_PS) + panic("pagemove: tpte PG_PS"); +#endif + while (size > 0) { + otpte = *tpte; + ofpte = *fpte; + *tpte++ = *fpte; + *fpte++ = 0; + if (otpte & PG_V) + pmap_update_pg((vaddr_t) to); + if (ofpte & PG_V) + pmap_update_pg((vaddr_t) from); + from += PAGE_SIZE; + to += PAGE_SIZE; + size -= PAGE_SIZE; + } +} + +/* + * Convert kernel VA to physical address + */ +int +kvtop(addr) + register caddr_t addr; +{ + paddr_t pa; + + if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE) + panic("kvtop: zero page frame"); + return((int)pa); +} + +extern struct vm_map *phys_map; + +/* + * Map a user I/O request into kernel virtual address space. + * Note: the pages are already locked by uvm_vslock(), so we + * do not need to pass an access_type to pmap_enter(). + */ +void +vmapbuf(bp, len) + struct buf *bp; + vsize_t len; +{ + vaddr_t faddr, taddr, off; + paddr_t fpa; + + if ((bp->b_flags & B_PHYS) == 0) + panic("vmapbuf"); + faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data); + off = (vaddr_t)bp->b_data - faddr; + len = round_page(off + len); + taddr= uvm_km_valloc_wait(phys_map, len); + bp->b_data = (caddr_t)(taddr + off); + /* + * The region is locked, so we expect that pmap_pte() will return + * non-NULL. + * XXX: unwise to expect this in a multithreaded environment. + * anything can happen to a pmap between the time we lock a + * region, release the pmap lock, and then relock it for + * the pmap_extract(). + * + * no need to flush TLB since we expect nothing to be mapped + * where we we just allocated (TLB will be flushed when our + * mapping is removed). + */ + while (len) { + (void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), + faddr, &fpa); + pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE); + faddr += PAGE_SIZE; + taddr += PAGE_SIZE; + len -= PAGE_SIZE; + } +} + +/* + * Unmap a previously-mapped user I/O request. + */ +void +vunmapbuf(bp, len) + struct buf *bp; + vsize_t len; +{ + vaddr_t addr, off; + + if ((bp->b_flags & B_PHYS) == 0) + panic("vunmapbuf"); + addr = trunc_page((vaddr_t)bp->b_data); + off = (vaddr_t)bp->b_data - addr; + len = round_page(off + len); + pmap_kremove(addr, len); + uvm_km_free_wakeup(phys_map, addr, len); + bp->b_data = bp->b_saveaddr; + bp->b_saveaddr = 0; +} diff --git a/sys/lib/libkern/arch/x86_64/Makefile.inc b/sys/lib/libkern/arch/x86_64/Makefile.inc new file mode 100644 index 000000000000..302da8310874 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/Makefile.inc @@ -0,0 +1,12 @@ +# $NetBSD: Makefile.inc,v 1.1 2001/06/19 00:22:45 fvdl Exp $ + +SRCS+= __main.c __assert.c \ + imax.c imin.c lmax.c lmin.c max.c min.c ulmax.c ulmin.c \ + byte_swap_2.S byte_swap_4.S bswap64.c \ + bcmp.S bzero.S ffs.S \ + memchr.S memcmp.S memcpy.S memmove.S memset.S \ + strcat.S strchr.S strcmp.S strcasecmp.c \ + strcpy.S strlen.S strncasecmp.c \ + strncmp.c strncpy.c strrchr.S \ + scanc.S skpc.S \ + random.S diff --git a/sys/lib/libkern/arch/x86_64/bcmp.S b/sys/lib/libkern/arch/x86_64/bcmp.S new file mode 100644 index 000000000000..5cee3afe74b2 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/bcmp.S @@ -0,0 +1,24 @@ +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: bcmp.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $") +#endif + +ENTRY(bcmp) + xorl %eax,%eax /* clear return value */ + cld /* set compare direction forward */ + + movq %rdx,%rcx /* compare by words */ + shrq $3,%rcx + repe + cmpsq + jne L1 + + movq %rdx,%rcx /* compare remainder by bytes */ + andq $7,%rcx + repe + cmpsb + je L2 + +L1: incl %eax +L2: ret diff --git a/sys/lib/libkern/arch/x86_64/bcopy.S b/sys/lib/libkern/arch/x86_64/bcopy.S new file mode 100644 index 000000000000..7787465b8225 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/bcopy.S @@ -0,0 +1,97 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from locore.s. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: bcopy.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $") +#endif + + /* + * (ov)bcopy (src,dst,cnt) + * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 + */ + +#ifdef MEMCOPY +ENTRY(memcpy) +#else +#ifdef MEMMOVE +ENTRY(memmove) +#else +ENTRY(bcopy) +#endif +#endif +#if defined(MEMCOPY) || defined(MEMMOVE) + movq %rdi,%r11 /* save dest */ +#else + xchgq %rdi,%rsi +#endif + movq %rdx,%rcx + movq %rdi,%rax + subq %rsi,%rax + cmpq %rcx,%rax /* overlapping? */ + jb 1f + cld /* nope, copy forwards. */ + shrq $3,%rcx /* copy by words */ + rep + movsq + movq %rdx,%rcx + andq $7,%rcx /* any bytes left? */ + rep + movsb +#if defined(MEMCOPY) || defined(MEMMOVE) + movq %r11,%rax +#endif + ret +1: + addq %rcx,%rdi /* copy backwards. */ + addq %rcx,%rsi + std + andq $7,%rcx /* any fractional bytes? */ + decq %rdi + decq %rsi + rep + movsb + movq %rdx,%rcx /* copy remainder by words */ + shrq $3,%rcx + subq $7,%rsi + subq $7,%rdi + rep + movsq +#if defined(MEMCOPY) || defined(MEMMOVE) + movq %r11,%rax +#endif + cld + ret diff --git a/sys/lib/libkern/arch/x86_64/byte_swap_2.S b/sys/lib/libkern/arch/x86_64/byte_swap_2.S new file mode 100644 index 000000000000..19ed5e278129 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/byte_swap_2.S @@ -0,0 +1,52 @@ +/* $NetBSD: byte_swap_2.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)htons.s 5.2 (Berkeley) 12/17/90 + */ + +#include +#if defined(LIBC_SCCS) + RCSID("$NetBSD: byte_swap_2.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $") +#endif + +_ENTRY(_C_LABEL(bswap16)) +_ENTRY(_C_LABEL(ntohs)) +_ENTRY(_C_LABEL(htons)) +_PROF_PROLOGUE + movl %edi,%eax + xchgb %ah,%al + ret diff --git a/sys/lib/libkern/arch/x86_64/byte_swap_4.S b/sys/lib/libkern/arch/x86_64/byte_swap_4.S new file mode 100644 index 000000000000..f6a961e38f54 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/byte_swap_4.S @@ -0,0 +1,52 @@ +/* $NetBSD: byte_swap_4.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)htonl.s 5.3 (Berkeley) 12/17/90 + */ + +#include +#if defined(LIBC_SCCS) + RCSID("$NetBSD: byte_swap_4.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $") +#endif + +_ENTRY(_C_LABEL(bswap32)) +_ENTRY(_C_LABEL(ntohl)) +_ENTRY(_C_LABEL(htonl)) +_PROF_PROLOGUE + movl %edi,%eax + bswap %eax + ret diff --git a/sys/lib/libkern/arch/x86_64/bzero.S b/sys/lib/libkern/arch/x86_64/bzero.S new file mode 100644 index 000000000000..6e4fe834d1a0 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/bzero.S @@ -0,0 +1,44 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: bzero.S,v 1.1 2001/06/19 00:22:45 fvdl Exp $") +#endif + +ENTRY(bzero) + movq %rsi,%rdx + + cld /* set fill direction forward */ + xorq %rax,%rax /* set fill data to 0 */ + + /* + * if the string is too short, it's really not worth the overhead + * of aligning to word boundries, etc. So we jump to a plain + * unaligned set. + */ + cmpq $16,%rdx + jb L1 + + movq %rdi,%rcx /* compute misalignment */ + negq %rcx + andq $7,%rcx + subq %rcx,%rdx + rep /* zero until word aligned */ + stosb + + movq %rdx,%rcx /* zero by words */ + shrq $3,%rcx + andq $7,%rdx + rep + stosq + +L1: movq %rdx,%rcx /* zero remainder by bytes */ + rep + stosb + + ret diff --git a/sys/lib/libkern/arch/x86_64/ffs.S b/sys/lib/libkern/arch/x86_64/ffs.S new file mode 100644 index 000000000000..c74c7010f54d --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/ffs.S @@ -0,0 +1,21 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: ffs.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $") +#endif + +ENTRY(ffs) + bsfl %edi,%eax + jz L1 /* ZF is set if all bits are 0 */ + incl %eax /* bits numbered from 1, not 0 */ + ret + + _ALIGN_TEXT +L1: xorl %eax,%eax /* clear result */ + ret diff --git a/sys/lib/libkern/arch/x86_64/index.S b/sys/lib/libkern/arch/x86_64/index.S new file mode 100644 index 000000000000..60754f266f09 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/index.S @@ -0,0 +1,29 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: index.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $") +#endif + +#ifdef STRCHR +ENTRY(strchr) +#else +ENTRY(index) +#endif + movq %rdi,%rax + movb %sil,%cl +L1: + movb (%rax),%dl + cmpb %dl,%cl /* found char? */ + je L2 + incq %rax + testb %dl,%dl /* null terminator? */ + jnz L1 + xorq %rax,%rax +L2: + ret diff --git a/sys/lib/libkern/arch/x86_64/memchr.S b/sys/lib/libkern/arch/x86_64/memchr.S new file mode 100644 index 000000000000..f978e760220c --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/memchr.S @@ -0,0 +1,25 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: memchr.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $") +#endif + +ENTRY(memchr) + movb %sil,%al /* set character to search for */ + movq %rdx,%rcx /* set length of search */ + testq %rcx,%rcx /* test for len == 0 */ + jz L1 + cld /* set search forward */ + repne /* search! */ + scasb + jne L1 /* scan failed, return null */ + leaq -1(%rdi),%rax /* adjust result of scan */ + ret +L1: xorq %rax,%rax + ret diff --git a/sys/lib/libkern/arch/x86_64/memcmp.S b/sys/lib/libkern/arch/x86_64/memcmp.S new file mode 100644 index 000000000000..722a2a2c304a --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/memcmp.S @@ -0,0 +1,40 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: memcmp.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $") +#endif + +ENTRY(memcmp) + cld /* set compare direction forward */ + movq %rdx,%rcx /* compare by longs */ + shrq $3,%rcx + repe + cmpsq + jne L5 /* do we match so far? */ + + movq %rdx,%rcx /* compare remainder by bytes */ + andq $7,%rcx + repe + cmpsb + jne L6 /* do we match? */ + + xorl %eax,%eax /* we match, return zero */ + ret + +L5: movl $8,%ecx /* We know that one of the next */ + subq %rcx,%rdi /* eight pairs of bytes do not */ + subq %rcx,%rsi /* match. */ + repe + cmpsb +L6: xorl %eax,%eax /* Perform unsigned comparison */ + movb -1(%rdi),%al + xorl %edx,%edx + movb -1(%rsi),%dl + subl %edx,%eax + ret diff --git a/sys/lib/libkern/arch/x86_64/memcpy.S b/sys/lib/libkern/arch/x86_64/memcpy.S new file mode 100644 index 000000000000..c39caa328a39 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/memcpy.S @@ -0,0 +1,4 @@ +/* $NetBSD: memcpy.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $ */ + +#define MEMCOPY +#include "bcopy.S" diff --git a/sys/lib/libkern/arch/x86_64/memmove.S b/sys/lib/libkern/arch/x86_64/memmove.S new file mode 100644 index 000000000000..f5b81357afa1 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/memmove.S @@ -0,0 +1,4 @@ +/* $NetBSD: memmove.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $ */ + +#define MEMMOVE +#include "bcopy.S" diff --git a/sys/lib/libkern/arch/x86_64/memset.S b/sys/lib/libkern/arch/x86_64/memset.S new file mode 100644 index 000000000000..2d92dc665344 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/memset.S @@ -0,0 +1,58 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: memset.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $") +#endif + +ENTRY(memset) + movq %rsi,%rax + movq %rdx,%rcx + movq %rdi,%r11 + + cld /* set fill direction forward */ + + /* + * if the string is too short, it's really not worth the overhead + * of aligning to word boundries, etc. So we jump to a plain + * unaligned set. + */ + cmpq $0x0f,%rcx + jle L1 + + movb %al,%ah /* copy char to all bytes in word */ + movl %eax,%edx + sall $16,%eax + orl %edx,%eax + + movl %eax,%edx + salq $32,%rax + orq %rdx,%rax + + movq %rdi,%rdx /* compute misalignment */ + negq %rdx + andq $7,%rdx + movq %rcx,%r8 + subq %rdx,%r8 + + movq %rdx,%rcx /* set until word aligned */ + rep + stosb + + movq %r8,%rcx + shrq $3,%rcx /* set by words */ + rep + stosq + + movq %r8,%rcx /* set remainder by bytes */ + andq $7,%rcx +L1: rep + stosb + movq %r11,%rax + + ret diff --git a/sys/lib/libkern/arch/x86_64/random.S b/sys/lib/libkern/arch/x86_64/random.S new file mode 100644 index 000000000000..1f8be7f2a9a8 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/random.S @@ -0,0 +1,97 @@ +/* $NetBSD: random.S,v 1.1 2001/06/19 00:22:46 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 1990,1993 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that: (1) source code distributions + * retain the above copyright notice and this paragraph in its entirety, (2) + * distributions including binary code include the above copyright notice and + * this paragraph in its entirety in the documentation or other materials + * provided with the distribution, and (3) all advertising materials mentioning + * features or use of this software display the following acknowledgement: + * ``This product includes software developed by the University of California, + * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of + * the University nor the names of its contributors may be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Here is a very good random number generator. This implementation is + * based on ``Two Fast Implementations of the "Minimal Standard" Random + * Number Generator'', David G. Carta, Communications of the ACM, Jan 1990, + * Vol 33 No 1. Do NOT modify this code unless you have a very thorough + * understanding of the algorithm. It's trickier than you think. If + * you do change it, make sure that its 10,000'th invocation returns + * 1043618065. + * + * Here is easier-to-decipher pseudocode: + * + * p = (16807*seed)<30:0> # e.g., the low 31 bits of the product + * q = (16807*seed)<62:31> # e.g., the high 31 bits starting at bit 32 + * if (p + q < 2^31) + * seed = p + q + * else + * seed = ((p + q) & (2^31 - 1)) + 1 + * return (seed); + * + * The result is in (0,2^31), e.g., it's always positive. + */ +#include + + .data +randseed: + .long 1 + .text +ENTRY(random) + movl $16807,%eax + imull randseed(%rip) + shld $1,%eax,%edx + andl $0x7fffffff,%eax + addl %edx,%eax + js 1f + movl %eax,randseed(%rip) + ret +1: + subl $0x7fffffff,%eax + movl %eax,randseed(%rip) + ret diff --git a/sys/lib/libkern/arch/x86_64/rindex.S b/sys/lib/libkern/arch/x86_64/rindex.S new file mode 100644 index 000000000000..6ba7c52a11f0 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/rindex.S @@ -0,0 +1,29 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: rindex.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $") +#endif + +#ifdef STRRCHR +ENTRY(strrchr) +#else +ENTRY(rindex) +#endif + movb %sil,%cl + xorq %rax,%rax /* init pointer to null */ +L1: + movb (%rdi),%dl + cmpb %dl,%cl + jne L2 + movq %rdi,%rax +L2: + incq %rdi + testb %dl,%dl /* null terminator??? */ + jnz L1 + ret diff --git a/sys/lib/libkern/arch/x86_64/scanc.S b/sys/lib/libkern/arch/x86_64/scanc.S new file mode 100644 index 000000000000..fd4fd31e1299 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/scanc.S @@ -0,0 +1,62 @@ +/* $NetBSD: scanc.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +ENTRY(scanc) + movq %rdx,%r11 + movb %cl,%dl + movl %edi,%ecx + testl %ecx,%ecx + jz 2f + movq %r11,%rdi + xorq %rax,%rax + cld +1: + lodsb + testb %dl,(%rax,%rdi) + jnz 2f + decl %ecx + jnz 1b +2: + movl %ecx,%eax + ret diff --git a/sys/lib/libkern/arch/x86_64/skpc.S b/sys/lib/libkern/arch/x86_64/skpc.S new file mode 100644 index 000000000000..f037d98b66ad --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/skpc.S @@ -0,0 +1,56 @@ +/* $NetBSD: skpc.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $ */ + +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +ENTRY(skpc) + movl %edi,%eax + movq %rsi,%rcx + movq %rdx,%rdi + cld + repe + scasb + je 1f + incq %rcx +1: + movl %ecx,%eax + ret diff --git a/sys/lib/libkern/arch/x86_64/strcat.S b/sys/lib/libkern/arch/x86_64/strcat.S new file mode 100644 index 000000000000..7dc712443123 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strcat.S @@ -0,0 +1,65 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: strcat.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $") +#endif + +/* + * NOTE: I've unrolled the loop eight times: large enough to make a + * significant difference, and small enough not to totally trash the + * cache. + */ + +ENTRY(strcat) + movq %rdi,%r11 + + cld /* set search forward */ + xorl %eax,%eax /* set search for null terminator */ + movq $-1,%rcx /* set search for lots of characters */ + repne /* search! */ + scasb + + decq %rdi + +L1: movb (%rsi),%al /* unroll loop, but not too much */ + movb %al,(%rdi) + testb %al,%al + jz L2 + movb 1(%rsi),%al + movb %al,1(%rdi) + testb %al,%al + jz L2 + movb 2(%rsi),%al + movb %al,2(%rdi) + testb %al,%al + jz L2 + movb 3(%rsi),%al + movb %al,3(%rdi) + testb %al,%al + jz L2 + movb 4(%rsi),%al + movb %al,4(%rdi) + testb %al,%al + jz L2 + movb 5(%rsi),%al + movb %al,5(%rdi) + testb %al,%al + jz L2 + movb 6(%rsi),%al + movb %al,6(%rdi) + testb %al,%al + jz L2 + movb 7(%rsi),%al + movb %al,7(%rdi) + addq $8,%rsi + addq $8,%rdi + testb %al,%al + jnz L1 +L2: movq %r11,%rax + ret diff --git a/sys/lib/libkern/arch/x86_64/strchr.S b/sys/lib/libkern/arch/x86_64/strchr.S new file mode 100644 index 000000000000..91fd708891fc --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strchr.S @@ -0,0 +1,4 @@ +/* $NetBSD: strchr.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $ */ + +#define STRCHR +#include "index.S" diff --git a/sys/lib/libkern/arch/x86_64/strcmp.S b/sys/lib/libkern/arch/x86_64/strcmp.S new file mode 100644 index 000000000000..559563666d48 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strcmp.S @@ -0,0 +1,88 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: strcmp.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $") +#endif + +/* + * NOTE: I've unrolled the loop eight times: large enough to make a + * significant difference, and small enough not to totally trash the + * cache. + */ + +ENTRY(strcmp) + jmp L2 /* Jump into the loop. */ + +L1: incq %rdi + incq %rsi +L2: movb (%rdi),%cl + testb %cl,%cl /* null terminator */ + jz L3 + cmpb %cl,(%rsi) /* chars match */ + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + jne L3 + + incq %rdi + incq %rsi + movb (%rdi),%cl + testb %cl,%cl + jz L3 + cmpb %cl,(%rsi) + je L1 +L3: movzbl (%rdi),%eax /* unsigned comparison */ + movzbl (%rsi),%edx + subl %edx,%eax + ret diff --git a/sys/lib/libkern/arch/x86_64/strcpy.S b/sys/lib/libkern/arch/x86_64/strcpy.S new file mode 100644 index 000000000000..924dfffd5bee --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strcpy.S @@ -0,0 +1,57 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: strcpy.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $") +#endif + +/* + * NOTE: I've unrolled the loop eight times: large enough to make a + * significant difference, and small enough not to totally trash the + * cache. + */ + +ENTRY(strcpy) + movq %rdi,%r11 + +L1: movb (%rsi),%al /* unroll loop, but not too much */ + movb %al,(%rdi) + testb %al,%al + jz L2 + movb 1(%rsi),%al + movb %al,1(%rdi) + testb %al,%al + jz L2 + movb 2(%rsi),%al + movb %al,2(%rdi) + testb %al,%al + jz L2 + movb 3(%rsi),%al + movb %al,3(%rdi) + testb %al,%al + jz L2 + movb 4(%rsi),%al + movb %al,4(%rdi) + testb %al,%al + jz L2 + movb 5(%rsi),%al + movb %al,5(%rdi) + testb %al,%al + jz L2 + movb 6(%rsi),%al + movb %al,6(%rdi) + testb %al,%al + jz L2 + movb 7(%rsi),%al + movb %al,7(%rdi) + addq $8,%rsi + addq $8,%rdi + testb %al,%al + jnz L1 +L2: movq %r11,%rax + ret diff --git a/sys/lib/libkern/arch/x86_64/strlen.S b/sys/lib/libkern/arch/x86_64/strlen.S new file mode 100644 index 000000000000..3c85659391a9 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strlen.S @@ -0,0 +1,21 @@ +/* + * Written by J.T. Conklin . + * Public domain. + * Adapted for NetBSD/x86_64 by Frank van der Linden + */ + +#include + +#if defined(LIBC_SCCS) + RCSID("$NetBSD: strlen.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $") +#endif + +ENTRY(strlen) + cld /* set search forward */ + xorl %eax,%eax /* set search for null terminator */ + movq $-1,%rcx /* set search for lots of characters */ + repne /* search! */ + scasb + notq %rcx /* get length by taking complement */ + leaq -1(%rcx),%rax /* and subtracting one */ + ret diff --git a/sys/lib/libkern/arch/x86_64/strrchr.S b/sys/lib/libkern/arch/x86_64/strrchr.S new file mode 100644 index 000000000000..9b23edfb4353 --- /dev/null +++ b/sys/lib/libkern/arch/x86_64/strrchr.S @@ -0,0 +1,4 @@ +/* $NetBSD: strrchr.S,v 1.1 2001/06/19 00:22:47 fvdl Exp $ */ + +#define STRRCHR +#include "rindex.S"