2007-11-22 19:16:40 +03:00
|
|
|
/* $NetBSD: frameasm.h,v 1.8 2007/11/22 16:16:45 bouyer Exp $ */
|
2003-04-26 22:39:25 +04:00
|
|
|
|
|
|
|
#ifndef _AMD64_MACHINE_FRAMEASM_H
|
|
|
|
#define _AMD64_MACHINE_FRAMEASM_H
|
2007-11-22 19:16:40 +03:00
|
|
|
#include "opt_xen.h"
|
2003-04-26 22:39:25 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Macros to define pushing/popping frames for interrupts, traps
|
|
|
|
* and system calls. Currently all the same; will diverge later.
|
|
|
|
*/
|
|
|
|
|
2007-11-22 19:16:40 +03:00
|
|
|
#ifdef XEN
|
|
|
|
#define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
|
|
|
|
/* Xen do not need swapgs, done by hypervisor */
|
|
|
|
#define swapgs
|
|
|
|
#define iretq pushq $0 ; jmp HYPERVISOR_iret
|
|
|
|
#endif
|
|
|
|
|
2003-04-26 22:39:25 +04:00
|
|
|
/*
|
|
|
|
* These are used on interrupt or trap entry or exit.
|
|
|
|
*/
|
|
|
|
#define INTR_SAVE_GPRS \
|
|
|
|
subq $120,%rsp ; \
|
|
|
|
movq %r15,TF_R15(%rsp) ; \
|
|
|
|
movq %r14,TF_R14(%rsp) ; \
|
|
|
|
movq %r13,TF_R13(%rsp) ; \
|
|
|
|
movq %r12,TF_R12(%rsp) ; \
|
|
|
|
movq %r11,TF_R11(%rsp) ; \
|
|
|
|
movq %r10,TF_R10(%rsp) ; \
|
|
|
|
movq %r9,TF_R9(%rsp) ; \
|
|
|
|
movq %r8,TF_R8(%rsp) ; \
|
|
|
|
movq %rdi,TF_RDI(%rsp) ; \
|
|
|
|
movq %rsi,TF_RSI(%rsp) ; \
|
|
|
|
movq %rbp,TF_RBP(%rsp) ; \
|
|
|
|
movq %rbx,TF_RBX(%rsp) ; \
|
|
|
|
movq %rdx,TF_RDX(%rsp) ; \
|
|
|
|
movq %rcx,TF_RCX(%rsp) ; \
|
2007-11-14 14:09:49 +03:00
|
|
|
movq %rax,TF_RAX(%rsp) ; \
|
|
|
|
cld
|
2003-04-26 22:39:25 +04:00
|
|
|
|
|
|
|
#define INTR_RESTORE_GPRS \
|
|
|
|
movq TF_R15(%rsp),%r15 ; \
|
|
|
|
movq TF_R14(%rsp),%r14 ; \
|
|
|
|
movq TF_R13(%rsp),%r13 ; \
|
|
|
|
movq TF_R12(%rsp),%r12 ; \
|
|
|
|
movq TF_R11(%rsp),%r11 ; \
|
|
|
|
movq TF_R10(%rsp),%r10 ; \
|
|
|
|
movq TF_R9(%rsp),%r9 ; \
|
|
|
|
movq TF_R8(%rsp),%r8 ; \
|
|
|
|
movq TF_RDI(%rsp),%rdi ; \
|
|
|
|
movq TF_RSI(%rsp),%rsi ; \
|
|
|
|
movq TF_RBP(%rsp),%rbp ; \
|
|
|
|
movq TF_RBX(%rsp),%rbx ; \
|
|
|
|
movq TF_RDX(%rsp),%rdx ; \
|
|
|
|
movq TF_RCX(%rsp),%rcx ; \
|
|
|
|
movq TF_RAX(%rsp),%rax ; \
|
|
|
|
addq $120,%rsp
|
|
|
|
|
|
|
|
#define INTRENTRY \
|
|
|
|
subq $32,%rsp ; \
|
|
|
|
testq $SEL_UPL,56(%rsp) ; \
|
|
|
|
je 98f ; \
|
|
|
|
swapgs ; \
|
2007-05-21 12:10:38 +04:00
|
|
|
movw %gs,0(%rsp) ; \
|
|
|
|
movw %fs,8(%rsp) ; \
|
2003-04-26 22:39:25 +04:00
|
|
|
movw %es,16(%rsp) ; \
|
|
|
|
movw %ds,24(%rsp) ; \
|
|
|
|
98: INTR_SAVE_GPRS
|
|
|
|
|
2007-11-22 19:16:40 +03:00
|
|
|
#ifndef XEN
|
2003-04-26 22:39:25 +04:00
|
|
|
#define INTRFASTEXIT \
|
|
|
|
INTR_RESTORE_GPRS ; \
|
|
|
|
testq $SEL_UPL,56(%rsp) ; \
|
|
|
|
je 99f ; \
|
|
|
|
cli ; \
|
|
|
|
swapgs ; \
|
2007-05-21 12:10:38 +04:00
|
|
|
movw 0(%rsp),%gs ; \
|
|
|
|
movw 8(%rsp),%fs ; \
|
2003-04-26 22:39:25 +04:00
|
|
|
movw 16(%rsp),%es ; \
|
|
|
|
movw 24(%rsp),%ds ; \
|
|
|
|
99: addq $48,%rsp ; \
|
|
|
|
iretq
|
|
|
|
|
|
|
|
#define INTR_RECURSE_HWFRAME \
|
|
|
|
movq %rsp,%r10 ; \
|
|
|
|
movl %ss,%r11d ; \
|
|
|
|
pushq %r11 ; \
|
|
|
|
pushq %r10 ; \
|
|
|
|
pushfq ; \
|
|
|
|
movl %cs,%r11d ; \
|
|
|
|
pushq %r11 ; \
|
|
|
|
pushq %r13 ;
|
|
|
|
|
2007-11-22 19:16:40 +03:00
|
|
|
#else /* !XEN */
|
|
|
|
/*
|
|
|
|
* Disabling events before going to user mode sounds like a BAD idea
|
|
|
|
* do no restore gs either, HYPERVISOR_iret will do a swapgs
|
|
|
|
*/
|
|
|
|
#define INTRFASTEXIT \
|
|
|
|
INTR_RESTORE_GPRS ; \
|
|
|
|
testq $SEL_UPL,56(%rsp) ; \
|
|
|
|
je 99f ; \
|
|
|
|
movw 8(%rsp),%fs ; \
|
|
|
|
movw 16(%rsp),%es ; \
|
|
|
|
movw 24(%rsp),%ds ; \
|
|
|
|
99: addq $48,%rsp ; \
|
|
|
|
iretq
|
|
|
|
|
|
|
|
/* We must fixup CS, as even kernel mode runs at CPL 3 */
|
|
|
|
#define INTR_RECURSE_HWFRAME \
|
|
|
|
movq %rsp,%r10 ; \
|
|
|
|
movl %ss,%r11d ; \
|
|
|
|
pushq %r11 ; \
|
|
|
|
pushq %r10 ; \
|
|
|
|
pushfq ; \
|
|
|
|
movl %cs,%r11d ; \
|
|
|
|
pushq %r11 ; \
|
|
|
|
andb $0xfc,(%rsp) ; \
|
|
|
|
pushq %r13 ;
|
|
|
|
|
|
|
|
#endif /* !XEN */
|
|
|
|
|
2007-10-18 19:28:31 +04:00
|
|
|
#define DO_DEFERRED_SWITCH \
|
|
|
|
cmpq $0, CPUVAR(WANT_PMAPLOAD) ; \
|
|
|
|
jz 1f ; \
|
|
|
|
call _C_LABEL(do_pmap_load) ; \
|
|
|
|
1:
|
|
|
|
|
|
|
|
#define CHECK_DEFERRED_SWITCH \
|
|
|
|
cmpq $0, CPUVAR(WANT_PMAPLOAD)
|
2003-04-26 22:39:25 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
#define CHECK_ASTPENDING(reg) cmpq $0, reg ; \
|
2003-04-26 22:39:25 +04:00
|
|
|
je 99f ; \
|
2007-02-10 00:55:00 +03:00
|
|
|
cmpl $0, L_MD_ASTPENDING(reg) ; \
|
2003-04-26 22:39:25 +04:00
|
|
|
99:
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
#define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
|
2003-04-26 22:39:25 +04:00
|
|
|
|
2007-11-22 19:16:40 +03:00
|
|
|
#ifdef XEN
|
|
|
|
#define CLI(reg1,reg2) \
|
|
|
|
movl CPUVAR(CPUID),%e/**/reg1 ; \
|
|
|
|
shlq $6,%r/**/reg1 ; \
|
|
|
|
movq _C_LABEL(HYPERVISOR_shared_info),%r/**/reg2 ; \
|
|
|
|
addq %r/**/reg1,%r/**/reg2 ; \
|
|
|
|
movb $1,EVTCHN_UPCALL_MASK(%r/**/reg2)
|
|
|
|
#define STI(reg1,reg2) \
|
|
|
|
movl CPUVAR(CPUID),%e/**/reg1 ; \
|
|
|
|
shlq $6,%r/**/reg1 ; \
|
|
|
|
movq _C_LABEL(HYPERVISOR_shared_info),%r/**/reg2 ; \
|
|
|
|
addq %r/**/reg1,%r/**/reg2 ; \
|
|
|
|
movb $0,EVTCHN_UPCALL_MASK(%r/**/reg2)
|
|
|
|
#else /* XEN */
|
|
|
|
#define CLI(reg1,reg2) cli
|
|
|
|
#define STI(reg1,reg2) sti
|
|
|
|
#endif /* XEN */
|
|
|
|
|
2003-04-26 22:39:25 +04:00
|
|
|
#endif /* _AMD64_MACHINE_FRAMEASM_H */
|