1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
/*
* entry.S: SVM architecture-specific entry/exit handling.
* Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <xen/config.h>
#include <xen/errno.h>
#include <xen/softirq.h>
#include <asm/types.h>
#include <asm/asm_defns.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <public/xen.h>
#define VMRUN .byte 0x0F,0x01,0xD8
#define STGI .byte 0x0F,0x01,0xDC
#define CLGI .byte 0x0F,0x01,0xDD
ENTRY(svm_asm_do_resume)
GET_CURRENT(%rbx)
.Lsvm_do_resume:
call svm_intr_assist
mov %rsp,%rdi
call nsvm_vcpu_switch
ASSERT_NOT_IN_ATOMIC
mov VCPU_processor(%rbx),%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
xor %ecx,%ecx
shl $IRQSTAT_shift,%eax
CLGI
cmp %ecx,(%rdx,%rax,1)
jne .Lsvm_process_softirqs
cmp %cl,VCPU_nsvm_hap_enabled(%rbx)
UNLIKELY_START(ne, nsvm_hap)
cmp %rcx,VCPU_nhvm_p2m(%rbx)
sete %al
test VCPU_nhvm_guestmode(%rbx),%al
UNLIKELY_DONE(z, nsvm_hap)
/*
* Someone shot down our nested p2m table; go round again
* and nsvm_vcpu_switch() will fix it for us.
*/
STGI
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
call svm_asid_handle_vmrun
cmpb $0,tb_init_done(%rip)
UNLIKELY_START(nz, svm_trace)
call svm_trace_vmentry
UNLIKELY_END(svm_trace)
mov VCPU_svm_vmcb(%rbx),%rcx
mov UREGS_rax(%rsp),%rax
mov %rax,VMCB_rax(%rcx)
mov UREGS_rip(%rsp),%rax
mov %rax,VMCB_rip(%rcx)
mov UREGS_rsp(%rsp),%rax
mov %rax,VMCB_rsp(%rcx)
mov UREGS_eflags(%rsp),%rax
mov %rax,VMCB_rflags(%rcx)
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
mov VCPU_svm_vmcb_pa(%rbx),%rax
pop %rbx
pop %r11
pop %r10
pop %r9
pop %r8
add $8,%rsp /* Skip %rax: restored by VMRUN. */
pop %rcx
pop %rdx
pop %rsi
pop %rdi
VMRUN
GET_CURRENT(%rax)
push %rdi
push %rsi
push %rdx
push %rcx
mov VCPU_svm_vmcb(%rax),%rcx
push %rax
push %r8
push %r9
push %r10
push %r11
push %rbx
mov %rax,%rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
movb $0,VCPU_svm_vmcb_in_sync(%rbx)
mov VMCB_rax(%rcx),%rax
mov %rax,UREGS_rax(%rsp)
mov VMCB_rip(%rcx),%rax
mov %rax,UREGS_rip(%rsp)
mov VMCB_rsp(%rcx),%rax
mov %rax,UREGS_rsp(%rsp)
mov VMCB_rflags(%rcx),%rax
mov %rax,UREGS_eflags(%rsp)
STGI
GLOBAL(svm_stgi_label)
mov %rsp,%rdi
call svm_vmexit_handler
jmp .Lsvm_do_resume
.Lsvm_process_softirqs:
STGI
call do_softirq
jmp .Lsvm_do_resume
|