aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-09-09 10:24:21 +0200
committerJan Beulich <jbeulich@suse.com>2013-09-09 10:24:21 +0200
commit110b2d6e8aa2a35d8f9d8efc3f3bfda3c49a3855 (patch)
tree1ddecd4b87385a0abc1979df173f545b9bab2462 /xen
parentcfd54835e6e8a28c743dc7d67c662d151ab4923a (diff)
downloadxen-110b2d6e8aa2a35d8f9d8efc3f3bfda3c49a3855.tar.gz
xen-110b2d6e8aa2a35d8f9d8efc3f3bfda3c49a3855.tar.bz2
xen-110b2d6e8aa2a35d8f9d8efc3f3bfda3c49a3855.zip
SVM: streamline entry.S code
- fix a bogus "test" with zero immediate - move stuff easily/better done in C into C code - re-arrange code paths so that no redundant GET_CURRENT() would remain on the fast paths - move long latency operations earlier - slightly defer disabling global interrupts on the VM entry path Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/svm/entry.S61
-rw-r--r--xen/arch/x86/hvm/svm/svm.c2
-rw-r--r--xen/include/asm-x86/asm_defns.h10
3 files changed, 34 insertions, 39 deletions
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 1969629774..3ee42475e6 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -32,28 +32,34 @@
#define CLGI .byte 0x0F,0x01,0xDD
ENTRY(svm_asm_do_resume)
+ GET_CURRENT(%rbx)
+.Lsvm_do_resume:
call svm_intr_assist
mov %rsp,%rdi
call nsvm_vcpu_switch
ASSERT_NOT_IN_ATOMIC
- GET_CURRENT(%rbx)
- CLGI
-
mov VCPU_processor(%rbx),%eax
- shl $IRQSTAT_shift,%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
- cmpl $0,(%rdx,%rax,1)
+ xor %ecx,%ecx
+ shl $IRQSTAT_shift,%eax
+ CLGI
+ cmp %ecx,(%rdx,%rax,1)
jne .Lsvm_process_softirqs
- testb $0, VCPU_nsvm_hap_enabled(%rbx)
-UNLIKELY_START(nz, nsvm_hap)
- mov VCPU_nhvm_p2m(%rbx),%rax
- test %rax,%rax
+ cmp %cl,VCPU_nsvm_hap_enabled(%rbx)
+UNLIKELY_START(ne, nsvm_hap)
+ cmp %rcx,VCPU_nhvm_p2m(%rbx)
sete %al
- andb VCPU_nhvm_guestmode(%rbx),%al
- jnz .Lsvm_nsvm_no_p2m
-UNLIKELY_END(nsvm_hap)
+ test VCPU_nhvm_guestmode(%rbx),%al
+ UNLIKELY_DONE(z, nsvm_hap)
+ /*
+ * Someone shot down our nested p2m table; go round again
+ * and nsvm_vcpu_switch() will fix it for us.
+ */
+ STGI
+ jmp .Lsvm_do_resume
+__UNLIKELY_END(nsvm_hap)
call svm_asid_handle_vmrun
@@ -72,13 +78,12 @@ UNLIKELY_END(svm_trace)
mov UREGS_eflags(%rsp),%rax
mov %rax,VMCB_rflags(%rcx)
- mov VCPU_svm_vmcb_pa(%rbx),%rax
-
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
+ mov VCPU_svm_vmcb_pa(%rbx),%rax
pop %rbx
pop %r11
pop %r10
@@ -92,25 +97,26 @@ UNLIKELY_END(svm_trace)
VMRUN
+ GET_CURRENT(%rax)
push %rdi
push %rsi
push %rdx
push %rcx
+ mov VCPU_svm_vmcb(%rax),%rcx
push %rax
push %r8
push %r9
push %r10
push %r11
push %rbx
+ mov %rax,%rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
- GET_CURRENT(%rbx)
movb $0,VCPU_svm_vmcb_in_sync(%rbx)
- mov VCPU_svm_vmcb(%rbx),%rcx
mov VMCB_rax(%rcx),%rax
mov %rax,UREGS_rax(%rsp)
mov VMCB_rip(%rcx),%rax
@@ -120,33 +126,14 @@ UNLIKELY_END(svm_trace)
mov VMCB_rflags(%rcx),%rax
mov %rax,UREGS_eflags(%rsp)
-#ifndef NDEBUG
- mov $0xbeef,%ax
- mov %ax,UREGS_error_code(%rsp)
- mov %ax,UREGS_entry_vector(%rsp)
- mov %ax,UREGS_saved_upcall_mask(%rsp)
- mov %ax,UREGS_cs(%rsp)
- mov %ax,UREGS_ds(%rsp)
- mov %ax,UREGS_es(%rsp)
- mov %ax,UREGS_fs(%rsp)
- mov %ax,UREGS_gs(%rsp)
- mov %ax,UREGS_ss(%rsp)
-#endif
-
STGI
.globl svm_stgi_label
svm_stgi_label:
mov %rsp,%rdi
call svm_vmexit_handler
- jmp svm_asm_do_resume
+ jmp .Lsvm_do_resume
.Lsvm_process_softirqs:
STGI
call do_softirq
- jmp svm_asm_do_resume
-
-.Lsvm_nsvm_no_p2m:
- /* Someone shot down our nested p2m table; go round again
- * and nsvm_vcpu_switch() will fix it for us. */
- STGI
- jmp svm_asm_do_resume
+ jmp .Lsvm_do_resume
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index f74265a976..695b53a928 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2069,6 +2069,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
vintr_t intr;
bool_t vcpu_guestmode = 0;
+ hvm_invalidate_regs_fields(regs);
+
if ( paging_mode_hap(v->domain) )
v->arch.hvm_vcpu.guest_cr[3] = v->arch.hvm_vcpu.hw_cr[3] =
vmcb_get_cr3(vmcb);
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index 8fc1a2c4c5..25032d55eb 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -39,11 +39,17 @@ void ret_from_intr(void);
.subsection 1; \
.Lunlikely.tag:
-#define UNLIKELY_END(tag) \
- jmp .Llikely.tag; \
+#define UNLIKELY_DONE(cond, tag) \
+ j##cond .Llikely.tag
+
+#define __UNLIKELY_END(tag) \
.subsection 0; \
.Llikely.tag:
+#define UNLIKELY_END(tag) \
+ UNLIKELY_DONE(mp, tag); \
+ __UNLIKELY_END(tag)
+
#define STACK_CPUINFO_FIELD(field) (STACK_SIZE-CPUINFO_sizeof+CPUINFO_##field)
#define GET_STACK_BASE(reg) \
movq $~(STACK_SIZE-1),reg; \