/* * linux/arch/i386/entry.S * * Copyright (C) 1991, 1992 Linus Torvalds * * Adjusted for XenoLinux use by K A Frasier * Adjusted for Xen minimal os by R Neugebauer */ #include EBX = 0x00 ECX = 0x04 EDX = 0x08 ESI = 0x0C EDI = 0x10 EBP = 0x14 EAX = 0x18 DS = 0x1C ES = 0x20 ORIG_EAX = 0x24 EIP = 0x28 CS = 0x2C EFLAGS = 0x30 OLDESP = 0x34 OLDSS = 0x38 CF_MASK = 0x00000001 IF_MASK = 0x00000200 NT_MASK = 0x00004000 /* Declare a globally-visible label */ #define ENTRY(X) .globl X ; X : /* A Linux hangover. Just ignore it. */ #define SYMBOL_NAME(X) X #define SAVE_ALL \ cld; \ pushl %es; \ pushl %ds; \ pushl %eax; \ pushl %ebp; \ pushl %edi; \ pushl %esi; \ pushl %edx; \ pushl %ecx; \ pushl %ebx; \ movl $(__KERNEL_DS),%edx; \ movl %edx,%ds; \ movl %edx,%es; #define RESTORE_ALL \ popl %ebx; \ popl %ecx; \ popl %edx; \ popl %esi; \ popl %edi; \ popl %ebp; \ popl %eax; \ 1: popl %ds; \ 2: popl %es; \ addl $4,%esp; \ 3: iret; \ .section .fixup,"ax"; \ 4: movl $0,(%esp); \ jmp 1b; \ 5: movl $0,(%esp); \ jmp 2b; \ 6: pushl %ss; \ popl %ds; \ pushl %ss; \ popl %es; \ pushl $11; \ call do_exit; \ .previous; \ .section __ex_table,"a";\ .align 4; \ .long 1b,4b; \ .long 2b,5b; \ .long 3b,6b; \ .previous ENTRY(divide_error) pushl $0 # no error code pushl $ SYMBOL_NAME(do_divide_error) .align 4 error_code: pushl %ds pushl %eax xorl %eax,%eax pushl %ebp pushl %edi pushl %esi pushl %edx decl %eax # eax = -1 pushl %ecx pushl %ebx cld movl %es,%ecx movl ORIG_EAX(%esp), %esi # get the error code movl ES(%esp), %edi # get the function address movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) movl %esp,%edx pushl %esi # push the error code pushl %edx # push the pt_regs pointer movl $(__KERNEL_DS),%edx movl %edx,%ds movl %edx,%es call *%edi addl $8,%esp # These are the tests Linux makes before exiting the OS back to userland. # At these point preeemption may occur, or signals may get delivered. ret_to_user_tests: # cmpl $0,need_resched(%ebx) # jne reschedule # cmpl $0,sigpending(%ebx) # je safesti jmp safesti ret_from_exception: movb CS(%esp),%cl test $2,%cl # slow return to ring 2 or 3 jne ret_to_user_tests RESTORE_ALL # A note on the "critical region" in our callback handler. # We want to avoid stacking callback handlers due to events occurring # during handling of the last event. To do this, we keep events disabled # until weve done all processing. HOWEVER, we must enable events before # popping the stack frame (cant be done atomically) and so it would still # be possible to get enough handler activations to overflow the stack. # Although unlikely, bugs of that kind are hard to track down, so wed # like to avoid the possibility. # So, on entry to the handler we detect whether we interrupted an # existing activation in its critical region -- if so, we pop the current # activation and restart the handler using the previous one. ENTRY(hypervisor_callback) pushl %eax SAVE_ALL movl EIP(%esp),%eax cmpl $scrit,%eax jb 11f cmpl $ecrit,%eax jb critical_region_fixup 11: push %esp call do_hypervisor_callback add $4,%esp movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi xorl %eax,%eax movb CS(%esp),%cl test $2,%cl # slow return to ring 2 or 3 jne ret_to_user_tests safesti:btsl $31,4(%esi) # reenable event callbacks scrit: /**** START OF CRITICAL REGION ****/ cmpl %eax,(%esi) jne 14f # process more events if necessary... RESTORE_ALL 14: btrl %eax,4(%esi) jmp 11b ecrit: /**** END OF CRITICAL REGION ****/ # [How we do the fixup]. We want to merge the current stack frame with the # just-interrupted frame. How we do this depends on where in the critical # region the interrupted handler was executing, and so how many saved # registers are in each frame. We do this quickly using the lookup table # 'critical_fixup_table'. For each byte offset in the critical region, it # provides the number of bytes which have already been popped from the # interrupted stack frame. critical_region_fixup: addl $critical_fixup_table-scrit,%eax movzbl (%eax),%eax # %eax contains num bytes popped mov %esp,%esi add %eax,%esi # %esi points at end of src region mov %esp,%edi add $0x34,%edi # %edi points at end of dst region mov %eax,%ecx shr $2,%ecx # convert words to bytes je 16f # skip loop if nothing to copy 15: subl $4,%esi # pre-decrementing copy loop subl $4,%edi movl (%esi),%eax movl %eax,(%edi) loop 15b 16: movl %edi,%esp # final %edi is top of merged stack jmp 11b critical_fixup_table: .byte 0x00,0x00 # cmpl %eax,(%esi) .byte 0x00,0x00 # jne 14f .byte 0x00 # pop %ebx .byte 0x04 # pop %ecx .byte 0x08 # pop %edx .byte 0x0c # pop %esi .byte 0x10 # pop %edi .byte 0x14 # pop %ebp .byte 0x18 # pop %eax .byte 0x1c # pop %ds .byte 0x20 # pop %es .byte 0x24,0x24,0x24 # add $4,%esp .byte 0x28 # iret .byte 0x00,0x00,0x00,0x00,0x00 # btrl $31,4(%esi) .byte 0x00,0x00 # jmp 11b # Hypervisor uses this for application faults while it executes. ENTRY(failsafe_callback) 1: pop %ds 2: pop %es 3: pop %fs 4: pop %gs 5: iret .section .fixup,"ax"; \ 6: movl $0,(%esp); \ jmp 1b; \ 7: movl $0,(%esp); \ jmp 2b; \ 8: movl $0,(%esp); \ jmp 3b; \ 9: movl $0,(%esp); \ jmp 4b; \ 10: pushl %ss; \ popl %ds; \ pushl %ss; \ popl %es; \ pushl $11; \ call do_exit; \ .previous; \ .section __ex_table,"a";\ .align 4; \ .long 1b,6b; \ .long 2b,7b; \ .long 3b,8b; \ .long 4b,9b; \ .long 5b,10b; \ .previous ENTRY(coprocessor_error) pushl $0 pushl $ SYMBOL_NAME(do_coprocessor_error) jmp error_code ENTRY(simd_coprocessor_error) pushl $0 pushl $ SYMBOL_NAME(do_simd_coprocessor_error) jmp error_code ENTRY(device_not_available) pushl $-1 # mark this as an int SAVE_ALL #call SYMBOL_NAME(math_state_restore) jmp ret_from_exception ENTRY(debug) pushl $0 pushl $ SYMBOL_NAME(do_debug) jmp error_code ENTRY(int3) pushl $0 pushl $ SYMBOL_NAME(do_int3) jmp error_code ENTRY(overflow) pushl $0 pushl $ SYMBOL_NAME(do_overflow) jmp error_code ENTRY(bounds) pushl $0 pushl $ SYMBOL_NAME(do_bounds) jmp error_code ENTRY(invalid_op) pushl $0 pushl $ SYMBOL_NAME(do_invalid_op) jmp error_code ENTRY(coprocessor_segment_overrun) pushl $0 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun) jmp error_code ENTRY(double_fault) pushl $ SYMBOL_NAME(do_double_fault) jmp error_code ENTRY(invalid_TSS) pushl $ SYMBOL_NAME(do_invalid_TSS) jmp error_code ENTRY(segment_not_present) pushl $ SYMBOL_NAME(do_segment_not_present) jmp error_code ENTRY(stack_segment) pushl $ SYMBOL_NAME(do_stack_segment) jmp error_code ENTRY(general_protection) pushl $ SYMBOL_NAME(do_general_protection) jmp error_code ENTRY(alignment_check) pushl $ SYMBOL_NAME(do_alignment_check) jmp error_code # This handler is special, because it gets an extra value on its stack, # which is the linear faulting address. ENTRY(page_fault) pushl %ds pushl %eax xorl %eax,%eax pushl %ebp pushl %edi pushl %esi pushl %edx decl %eax # eax = -1 pushl %ecx pushl %ebx cld movl %es,%ecx movl ORIG_EAX(%esp), %esi # get the error code movl ES(%esp), %edi # get the faulting address movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) movl %esp,%edx pushl %edi # push the faulting address pushl %esi # push the error code pushl %edx # push the pt_regs pointer movl $(__KERNEL_DS),%edx movl %edx,%ds movl %edx,%es call SYMBOL_NAME(do_page_fault) addl $12,%esp jmp ret_from_exception ENTRY(machine_check) pushl $0 pushl $ SYMBOL_NAME(do_machine_check) jmp error_code ENTRY(spurious_interrupt_bug) pushl $0 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug) jmp error_code