aboutsummaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
authorXu Zhang <xzhang@cs.uic.edu>2013-04-10 23:46:57 -0500
committerIan Campbell <ian.campbell@citrix.com>2013-04-22 12:32:35 +0100
commitbf65a5d69b1f83a1b4f598c4e1d531a3ff2dbfe1 (patch)
tree094b371bdb3a696d5e197422a87c9329ea258e95 /extras
parent7e84f54da55d7b59ee4469b6a2ee8fe62ea0bca2 (diff)
downloadxen-bf65a5d69b1f83a1b4f598c4e1d531a3ff2dbfe1.tar.gz
xen-bf65a5d69b1f83a1b4f598c4e1d531a3ff2dbfe1.tar.bz2
xen-bf65a5d69b1f83a1b4f598c4e1d531a3ff2dbfe1.zip
mini-os/x86-64 entry: code refactoring; no functional changes
Re-arrange assembly code blocks so that they are in called order instead of jumping around, enhancing readability. Macros are grouped together as well. Signed-off-by: Xu Zhang <xzhang@cs.uic.edu> Acked-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
Diffstat (limited to 'extras')
-rw-r--r--extras/mini-os/arch/x86/x86_64.S113
1 files changed, 57 insertions, 56 deletions
diff --git a/extras/mini-os/arch/x86/x86_64.S b/extras/mini-os/arch/x86/x86_64.S
index 24f35cdbd2..d9b34a7745 100644
--- a/extras/mini-os/arch/x86/x86_64.S
+++ b/extras/mini-os/arch/x86/x86_64.S
@@ -36,6 +36,22 @@ hypercall_page:
.org 0x3000
+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
+#define XEN_PUT_VCPU_INFO(reg)
+#define XEN_PUT_VCPU_INFO_fixup
+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
+ XEN_PUT_VCPU_INFO(reg)
+
+
/* Offsets into shared_info_t. */
#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
@@ -46,6 +62,27 @@ NMI_MASK = 0x80000000
#define ORIG_RAX 120 /* + error_code */
#define EFLAGS 144
+
+/* Macros */
+.macro zeroentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* skip rcx and r11 */
+ pushq $0 /* push error code/oldrax */
+ pushq %rax /* push real oldrax to the rdi slot */
+ leaq \sym(%rip),%rax
+ jmp error_entry
+.endm
+
+.macro errorentry sym
+ movq (%rsp),%rcx
+ movq 8(%rsp),%r11
+ addq $0x10,%rsp /* rsp points to the error code */
+ pushq %rax
+ leaq \sym(%rip),%rax
+ jmp error_entry
+.endm
+
.macro RESTORE_ALL
movq (%rsp),%r11
movq 1*8(%rsp),%r10
@@ -130,42 +167,10 @@ error_call_handler:
call *%rax
jmp error_exit
-.macro zeroentry sym
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* skip rcx and r11 */
- pushq $0 /* push error code/oldrax */
- pushq %rax /* push real oldrax to the rdi slot */
- leaq \sym(%rip),%rax
- jmp error_entry
-.endm
-
-.macro errorentry sym
- movq (%rsp),%rcx
- movq 8(%rsp),%r11
- addq $0x10,%rsp /* rsp points to the error code */
- pushq %rax
- leaq \sym(%rip),%rax
- jmp error_entry
-.endm
-
-#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
-#define XEN_PUT_VCPU_INFO(reg)
-#define XEN_PUT_VCPU_INFO_fixup
-#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
-#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
-#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
-
-#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
- XEN_LOCKED_BLOCK_EVENTS(reg) ; \
- XEN_PUT_VCPU_INFO(reg)
-
-#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
- XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
- XEN_PUT_VCPU_INFO(reg)
-
-
+/*
+ * Xen event (virtual interrupt) entry point.
+ */
ENTRY(hypervisor_callback)
zeroentry hypervisor_callback2
@@ -178,7 +183,23 @@ ENTRY(hypervisor_callback2)
call do_hypervisor_callback
popq %rsp
decl %gs:0
- jmp error_exit
+
+error_exit:
+ RESTORE_REST
+ XEN_BLOCK_EVENTS(%rsi)
+
+retint_kernel:
+retint_restore_args:
+ movl EFLAGS-6*8(%rsp), %eax
+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
+ XEN_GET_VCPU_INFO(%rsi)
+ andb evtchn_upcall_mask(%rsi),%al
+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
+ jnz restore_all_enable_events # != 0 => enable event delivery
+ XEN_PUT_VCPU_INFO(%rsi)
+
+ RESTORE_ALL
+ HYPERVISOR_IRET 0
restore_all_enable_events:
XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
@@ -198,26 +219,6 @@ scrit: /**** START OF CRITICAL REGION ****/
ecrit: /**** END OF CRITICAL REGION ****/
-retint_kernel:
-retint_restore_args:
- movl EFLAGS-6*8(%rsp), %eax
- shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
- XEN_GET_VCPU_INFO(%rsi)
- andb evtchn_upcall_mask(%rsi),%al
- andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
- jnz restore_all_enable_events # != 0 => enable event delivery
- XEN_PUT_VCPU_INFO(%rsi)
-
- RESTORE_ALL
- HYPERVISOR_IRET 0
-
-
-error_exit:
- RESTORE_REST
- XEN_BLOCK_EVENTS(%rsi)
- jmp retint_kernel
-
-
ENTRY(failsafe_callback)
popq %rcx