aboutsummaryrefslogtreecommitdiffstats
path: root/extras/mini-os/arch
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-17 15:09:50 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-17 15:09:50 +0000
commitf515c2cf95c5a6bc88a67a7e6319637b7c6e378b (patch)
tree54d642abd3f4194f557ae9c0e0f362be594fcd20 /extras/mini-os/arch
parent4ff20a30344f24c7a246e6ceb3b631c9876c6a51 (diff)
downloadxen-f515c2cf95c5a6bc88a67a7e6319637b7c6e378b.tar.gz
xen-f515c2cf95c5a6bc88a67a7e6319637b7c6e378b.tar.bz2
xen-f515c2cf95c5a6bc88a67a7e6319637b7c6e378b.zip
[MINIOS] New make structure to support different architectures.
Signed-off-by: Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
Diffstat (limited to 'extras/mini-os/arch')
-rw-r--r--extras/mini-os/arch/x86/Makefile29
-rw-r--r--extras/mini-os/arch/x86/arch.mk28
-rw-r--r--extras/mini-os/arch/x86/minios-x86_32.lds45
-rw-r--r--extras/mini-os/arch/x86/minios-x86_64.lds54
-rw-r--r--extras/mini-os/arch/x86/x86_32.S287
-rw-r--r--extras/mini-os/arch/x86/x86_64.S385
6 files changed, 828 insertions, 0 deletions
diff --git a/extras/mini-os/arch/x86/Makefile b/extras/mini-os/arch/x86/Makefile
new file mode 100644
index 0000000000..45cfe19136
--- /dev/null
+++ b/extras/mini-os/arch/x86/Makefile
@@ -0,0 +1,29 @@
+#
+# x86 architecture specific makefiles.
+# It's is used for x86_32, x86_32y and x86_64
+#
+
+# Rebuild all after touching this/these extra file(s) (see mini-os.mk)
+SPEC_DEP = arch.mk
+
+# include arch.mk has to be before mini-os.mk!
+include arch.mk
+include ../../minios.mk
+
+# Sources here are all *.c *.S without $(TARGET_ARCH).S
+# This is handled in $(HEAD_ARCH_OBJ)
+ARCH_SRCS := $(wildcard *.c)
+
+# The objects built from the sources.
+ARCH_OBJS := $(patsubst %.c,%.o,$(ARCH_SRCS))
+
+all: $(ARCH_LIB)
+
+# $(HEAD_ARCH_OBJ) is only build here, needed on linking
+# in ../../Makefile.
+$(ARCH_LIB): $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
+ $(AR) rv $(ARCH_LIB) $(ARCH_OBJS)
+
+clean:
+ rm -f $(ARCH_LIB) $(ARCH_OBJS)
+
diff --git a/extras/mini-os/arch/x86/arch.mk b/extras/mini-os/arch/x86/arch.mk
new file mode 100644
index 0000000000..5d0ee4add7
--- /dev/null
+++ b/extras/mini-os/arch/x86/arch.mk
@@ -0,0 +1,28 @@
+#
+# Architecture special makerules for x86 family
+# (including x86_32, x86_32y and x86_64).
+#
+
+ifeq ($(TARGET_ARCH),x86_32)
+ARCH_CFLAGS := -m32 -march=i686
+ARCH_LDFLAGS := -m elf_i386
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH)$(pae),x86_32y)
+ARCH_CFLAGS := -DCONFIG_X86_PAE=1
+ARCH_ASFLAGS := -DCONFIG_X86_PAE=1
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+ifeq ($(TARGET_ARCH),x86_64)
+ARCH_CFLAGS := -m64 -mno-red-zone -fpic -fno-reorder-blocks
+ARCH_CFLAGS := -fno-asynchronous-unwind-tables
+ARCH_LDFLAGS := -m elf_x86_64
+EXTRA_INC += $(TARGET_ARCH_FAM)/$(TARGET_ARCH)
+EXTRA_SRC += arch/$(EXTRA_INC)
+endif
+
+
diff --git a/extras/mini-os/arch/x86/minios-x86_32.lds b/extras/mini-os/arch/x86/minios-x86_32.lds
new file mode 100644
index 0000000000..09d84ad96f
--- /dev/null
+++ b/extras/mini-os/arch/x86/minios-x86_32.lds
@@ -0,0 +1,45 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff --git a/extras/mini-os/arch/x86/minios-x86_64.lds b/extras/mini-os/arch/x86/minios-x86_64.lds
new file mode 100644
index 0000000000..e6ef04a511
--- /dev/null
+++ b/extras/mini-os/arch/x86/minios-x86_64.lds
@@ -0,0 +1,54 @@
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x0;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.gnu.warning)
+ } = 0x9090
+
+ _etext = .; /* End of text section */
+
+ .rodata : { *(.rodata) *(.rodata.*) }
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff --git a/extras/mini-os/arch/x86/x86_32.S b/extras/mini-os/arch/x86/x86_32.S
new file mode 100644
index 0000000000..46fce169ca
--- /dev/null
+++ b/extras/mini-os/arch/x86/x86_32.S
@@ -0,0 +1,287 @@
+#include <os.h>
+#include <xen/arch-x86_32.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+#ifdef CONFIG_X86_PAE
+ .ascii ",PAE=yes"
+#else
+ .ascii ",PAE=no"
+#endif
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+.globl _start, shared_info, hypercall_page
+
+_start:
+ cld
+ lss stack_start,%esp
+ push %esi
+ call start_kernel
+
+stack_start:
+ .long stack+8192, __KERNEL_SS
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+ES = 0x20
+ORIG_EAX = 0x24
+EIP = 0x28
+CS = 0x2C
+
+#define ENTRY(X) .globl X ; X :
+
+#define SAVE_ALL \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__KERNEL_DS),%edx; \
+ movl %edx,%ds; \
+ movl %edx,%es;
+
+#define RESTORE_ALL \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
+ popl %ds; \
+ popl %es; \
+ addl $4,%esp; \
+ iret; \
+
+ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $do_divide_error
+do_exception:
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es, %ecx
+ movl ES(%esp), %edi # get the function address
+ movl ORIG_EAX(%esp), %edx # get the error code
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %esp,%eax # pt_regs pointer
+ pushl %edx
+ pushl %eax
+ call *%edi
+ jmp ret_from_exception
+
+ret_from_exception:
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+ RESTORE_ALL
+
+# A note on the "critical region" in our callback handler.
+# We want to avoid stacking callback handlers due to events occurring
+# during handling of the last event. To do this, we keep events disabled
+# until weve done all processing. HOWEVER, we must enable events before
+# popping the stack frame (cant be done atomically) and so it would still
+# be possible to get enough handler activations to overflow the stack.
+# Although unlikely, bugs of that kind are hard to track down, so wed
+# like to avoid the possibility.
+# So, on entry to the handler we detect whether we interrupted an
+# existing activation in its critical region -- if so, we pop the current
+# activation and restart the handler using the previous one.
+ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+11: push %esp
+ call do_hypervisor_callback
+ add $4,%esp
+ movl HYPERVISOR_shared_info,%esi
+ xorl %eax,%eax
+ movb CS(%esp),%cl
+ test $2,%cl # slow return to ring 2 or 3
+ jne safesti
+safesti:movb $0,1(%esi) # reenable event callbacks
+scrit: /**** START OF CRITICAL REGION ****/
+ testb $0xFF,(%esi)
+ jnz 14f # process more events if necessary...
+ RESTORE_ALL
+14: movb $1,1(%esi)
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame.
+critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
+ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
+ je 16f # skip loop if nothing to copy
+15: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
+ loop 15b
+16: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # testb $0xff,(%esi)
+ .byte 0x00,0x00 # jne 14f
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
+ .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
+ .byte 0x00,0x00 # jmp 11b
+
+# Hypervisor uses this for application faults while it executes.
+ENTRY(failsafe_callback)
+ pop %ds
+ pop %es
+ pop %fs
+ pop %gs
+ iret
+
+ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp do_exception
+
+ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $do_simd_coprocessor_error
+ jmp do_exception
+
+ENTRY(device_not_available)
+ iret
+
+ENTRY(debug)
+ pushl $0
+ pushl $do_debug
+ jmp do_exception
+
+ENTRY(int3)
+ pushl $0
+ pushl $do_int3
+ jmp do_exception
+
+ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp do_exception
+
+ENTRY(bounds)
+ pushl $0
+ pushl $do_bounds
+ jmp do_exception
+
+ENTRY(invalid_op)
+ pushl $0
+ pushl $do_invalid_op
+ jmp do_exception
+
+
+ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp do_exception
+
+
+ENTRY(invalid_TSS)
+ pushl $do_invalid_TSS
+ jmp do_exception
+
+
+ENTRY(segment_not_present)
+ pushl $do_segment_not_present
+ jmp do_exception
+
+
+ENTRY(stack_segment)
+ pushl $do_stack_segment
+ jmp do_exception
+
+
+ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp do_exception
+
+
+ENTRY(alignment_check)
+ pushl $do_alignment_check
+ jmp do_exception
+
+
+ENTRY(page_fault)
+ pushl $do_page_fault
+ jmp do_exception
+
+ENTRY(machine_check)
+ pushl $0
+ pushl $do_machine_check
+ jmp do_exception
+
+
+ENTRY(spurious_interrupt_bug)
+ pushl $0
+ pushl $do_spurious_interrupt_bug
+ jmp do_exception
+
+
+
+ENTRY(thread_starter)
+ popl %eax
+ popl %ebx
+ pushl %eax
+ call *%ebx
+ call exit_thread
+
diff --git a/extras/mini-os/arch/x86/x86_64.S b/extras/mini-os/arch/x86/x86_64.S
new file mode 100644
index 0000000000..2b621784ed
--- /dev/null
+++ b/extras/mini-os/arch/x86/x86_64.S
@@ -0,0 +1,385 @@
+#include <os.h>
+#include <xen/features.h>
+
+.section __xen_guest
+ .ascii "GUEST_OS=Mini-OS"
+ .ascii ",XEN_VER=xen-3.0"
+ .ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_64.lds */
+ .ascii ",ELF_PADDR_OFFSET=0x0"
+ .ascii ",HYPERCALL_PAGE=0x2"
+ .ascii ",LOADER=generic"
+ .byte 0
+.text
+
+#define ENTRY(X) .globl X ; X :
+.globl _start, shared_info, hypercall_page
+
+
+_start:
+ cld
+ movq stack_start(%rip),%rsp
+ movq %rsi,%rdi
+ call start_kernel
+
+stack_start:
+ .quad stack+8192
+
+ /* Unpleasant -- the PTE that maps this page is actually overwritten */
+ /* to map the real shared-info page! :-) */
+ .org 0x1000
+shared_info:
+ .org 0x2000
+
+hypercall_page:
+ .org 0x3000
+
+
+/* Offsets into shared_info_t. */
+#define evtchn_upcall_pending /* 0 */
+#define evtchn_upcall_mask 1
+
+NMI_MASK = 0x80000000
+
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
+#define EFLAGS 144
+
+#define REST_SKIP 6*8
+.macro SAVE_REST
+ subq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET REST_SKIP
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,5*8
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,4*8
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,3*8
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,2*8
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,1*8
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,0*8
+.endm
+
+
+.macro RESTORE_REST
+ movq (%rsp),%r15
+# CFI_RESTORE r15
+ movq 1*8(%rsp),%r14
+# CFI_RESTORE r14
+ movq 2*8(%rsp),%r13
+# CFI_RESTORE r13
+ movq 3*8(%rsp),%r12
+# CFI_RESTORE r12
+ movq 4*8(%rsp),%rbp
+# CFI_RESTORE rbp
+ movq 5*8(%rsp),%rbx
+# CFI_RESTORE rbx
+ addq $REST_SKIP,%rsp
+# CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
+.endm
+
+
+#define ARG_SKIP 9*8
+.macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
+ .if \skipr11
+ .else
+ movq (%rsp),%r11
+# CFI_RESTORE r11
+ .endif
+ .if \skipr8910
+ .else
+ movq 1*8(%rsp),%r10
+# CFI_RESTORE r10
+ movq 2*8(%rsp),%r9
+# CFI_RESTORE r9
+ movq 3*8(%rsp),%r8
+# CFI_RESTORE r8
+ .endif
+ .if \skiprax
+ .else
+ movq 4*8(%rsp),%rax
+# CFI_RESTORE rax
+ .endif
+ .if \skiprcx
+ .else
+ movq 5*8(%rsp),%rcx
+# CFI_RESTORE rcx
+ .endif
+ .if \skiprdx
+ .else
+ movq 6*8(%rsp),%rdx
+# CFI_RESTORE rdx
+ .endif
+ movq 7*8(%rsp),%rsi
+# CFI_RESTORE rsi
+ movq 8*8(%rsp),%rdi
+# CFI_RESTORE rdi
+ .if ARG_SKIP+\addskip > 0
+ addq $ARG_SKIP+\addskip,%rsp
+# CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
+ .endif
+.endm
+
+
+.macro HYPERVISOR_IRET flag
+# testb $3,1*8(%rsp) /* Don't need to do that in Mini-os, as */
+# jnz 2f /* there is no userspace? */
+ testl $NMI_MASK,2*8(%rsp)
+ jnz 2f
+
+ testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
+ jnz 1f
+
+ /* Direct iret to kernel space. Correct CS and SS. */
+ orb $3,1*8(%rsp)
+ orb $3,4*8(%rsp)
+1: iretq
+
+2: /* Slow iret via hypervisor. */
+ andl $~NMI_MASK, 16(%rsp)
+ pushq $\flag
+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
+.endm
+
+/*
+ * Exception entry point. This expects an error code/orig_rax on the stack
+ * and the exception handler in %rax.
+ */
+ENTRY(error_entry)
+# _frame RDI
+ /* rdi slot contains rax, oldrax contains error code */
+ cld
+ subq $14*8,%rsp
+# CFI_ADJUST_CFA_OFFSET (14*8)
+ movq %rsi,13*8(%rsp)
+# CFI_REL_OFFSET rsi,RSI
+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
+ movq %rdx,12*8(%rsp)
+# CFI_REL_OFFSET rdx,RDX
+ movq %rcx,11*8(%rsp)
+# CFI_REL_OFFSET rcx,RCX
+ movq %rsi,10*8(%rsp) /* store rax */
+# CFI_REL_OFFSET rax,RAX
+ movq %r8, 9*8(%rsp)
+# CFI_REL_OFFSET r8,R8
+ movq %r9, 8*8(%rsp)
+# CFI_REL_OFFSET r9,R9
+ movq %r10,7*8(%rsp)
+# CFI_REL_OFFSET r10,R10
+ movq %r11,6*8(%rsp)
+# CFI_REL_OFFSET r11,R11
+ movq %rbx,5*8(%rsp)
+# CFI_REL_OFFSET rbx,RBX
+ movq %rbp,4*8(%rsp)
+# CFI_REL_OFFSET rbp,RBP
+ movq %r12,3*8(%rsp)
+# CFI_REL_OFFSET r12,R12
+ movq %r13,2*8(%rsp)
+# CFI_REL_OFFSET r13,R13
+ movq %r14,1*8(%rsp)
+# CFI_REL_OFFSET r14,R14
+ movq %r15,(%rsp)
+# CFI_REL_OFFSET r15,R15
+#if 0
+ cmpl $__KERNEL_CS,CS(%rsp)
+ je error_kernelspace
+#endif
+error_call_handler:
+ movq %rdi, RDI(%rsp)
+ movq %rsp,%rdi
+ movq ORIG_RAX(%rsp),%rsi # get error code
+ movq $-1,ORIG_RAX(%rsp)
+ call *%rax
+
+.macro zeroentry sym
+# INTR_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+# CFI_ADJUST_CFA_OFFSET 8
+ pushq %rax /* push real oldrax to the rdi slot */
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+.macro errorentry sym
+# XCPT_FRAME
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* rsp points to the error code */
+ pushq %rax
+# CFI_ADJUST_CFA_OFFSET 8
+ leaq \sym(%rip),%rax
+ jmp error_entry
+# CFI_ENDPROC
+.endm
+
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+
+
+ENTRY(hypervisor_callback)
+ zeroentry hypervisor_callback2
+
+ENTRY(hypervisor_callback2)
+ movq %rdi, %rsp
+11: movq %gs:8,%rax
+ incl %gs:0
+ cmovzq %rax,%rsp
+ pushq %rdi
+ call do_hypervisor_callback
+ popq %rsp
+ decl %gs:0
+ jmp error_exit
+
+# ALIGN
+restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
+
+scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%rsi)
+ jnz 14f # process more events if necessary...
+ XEN_PUT_VCPU_INFO(%rsi)
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ XEN_PUT_VCPU_INFO(%rsi)
+ SAVE_REST
+ movq %rsp,%rdi # set the argument again
+ jmp 11b
+ecrit: /**** END OF CRITICAL REGION ****/
+
+
+retint_kernel:
+retint_restore_args:
+ movl EFLAGS-REST_SKIP(%rsp), %eax
+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
+ XEN_GET_VCPU_INFO(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
+ jnz restore_all_enable_events # != 0 => enable event delivery
+ XEN_PUT_VCPU_INFO(%rsi)
+
+ RESTORE_ARGS 0,8,0
+ HYPERVISOR_IRET 0
+
+
+error_exit:
+ RESTORE_REST
+/* cli */
+ XEN_BLOCK_EVENTS(%rsi)
+ jmp retint_kernel
+
+
+
+ENTRY(failsafe_callback)
+ popq %rcx
+ popq %r11
+ iretq
+
+
+ENTRY(coprocessor_error)
+ zeroentry do_coprocessor_error
+
+
+ENTRY(simd_coprocessor_error)
+ zeroentry do_simd_coprocessor_error
+
+
+ENTRY(device_not_available)
+ zeroentry do_device_not_available
+
+
+ENTRY(debug)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_debug
+# CFI_ENDPROC
+
+
+ENTRY(int3)
+# INTR_FRAME
+# CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_int3
+# CFI_ENDPROC
+
+ENTRY(overflow)
+ zeroentry do_overflow
+
+
+ENTRY(bounds)
+ zeroentry do_bounds
+
+
+ENTRY(invalid_op)
+ zeroentry do_invalid_op
+
+
+ENTRY(coprocessor_segment_overrun)
+ zeroentry do_coprocessor_segment_overrun
+
+
+ENTRY(invalid_TSS)
+ errorentry do_invalid_TSS
+
+
+ENTRY(segment_not_present)
+ errorentry do_segment_not_present
+
+
+/* runs on exception stack */
+ENTRY(stack_segment)
+# XCPT_FRAME
+ errorentry do_stack_segment
+# CFI_ENDPROC
+
+
+ENTRY(general_protection)
+ errorentry do_general_protection
+
+
+ENTRY(alignment_check)
+ errorentry do_alignment_check
+
+
+ENTRY(divide_error)
+ zeroentry do_divide_error
+
+
+ENTRY(spurious_interrupt_bug)
+ zeroentry do_spurious_interrupt_bug
+
+
+ENTRY(page_fault)
+ errorentry do_page_fault
+
+
+
+
+
+ENTRY(thread_starter)
+ popq %rdi
+ popq %rbx
+ call *%rbx
+ call exit_thread
+
+