aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-08-11 21:38:58 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-08-11 21:38:58 +0000
commit0e1629cc169e741ea09ba5805e68d36cd8d57188 (patch)
treea65f973928a40fd8efbafea8caa97c20e26964a3
parent1873dc6b45122bd55441c956c5b06ab5dabe5e54 (diff)
downloadxen-0e1629cc169e741ea09ba5805e68d36cd8d57188.tar.gz
xen-0e1629cc169e741ea09ba5805e68d36cd8d57188.tar.bz2
xen-0e1629cc169e741ea09ba5805e68d36cd8d57188.zip
Support VCPU migration for VMX guests.
Add a hook to support CPU migration for VMX domains Reorganize the low level asm code to support relaunching a VMCS on a different logical CPU. Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com> Signed-off-by: Arun Sharma <arun.sharma@intel.com>
-rw-r--r--xen/arch/ia64/domain.c9
-rw-r--r--xen/arch/x86/domain.c31
-rw-r--r--xen/arch/x86/vmx_vmcs.c33
-rw-r--r--xen/arch/x86/x86_32/entry.S73
-rw-r--r--xen/arch/x86/x86_64/entry.S98
-rw-r--r--xen/common/dom0_ops.c6
-rw-r--r--xen/include/asm-x86/vmx.h1
-rw-r--r--xen/include/asm-x86/vmx_vmcs.h1
-rw-r--r--xen/include/xen/domain.h4
9 files changed, 155 insertions, 101 deletions
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index f57a5699ab..bceacd300c 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -1398,3 +1398,12 @@ void domain_pend_keyboard_interrupt(int irq)
{
vcpu_pend_interrupt(dom0->vcpu[0],irq);
}
+
+void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
+{
+ if ( v->processor == newcpu )
+ return;
+
+ set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+ v->processor = newcpu;
+}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index efd967464d..598c2095ee 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -295,27 +295,24 @@ void arch_do_boot_vcpu(struct vcpu *v)
l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}
-#ifdef CONFIG_VMX
-static int vmx_switch_on;
-
-void arch_vmx_do_resume(struct vcpu *v)
+void vcpu_migrate_cpu(struct vcpu *v, int newcpu)
{
- u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
-
- load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
- vmx_do_resume(v);
- reset_stack_and_jump(vmx_asm_do_resume);
-}
+ if ( v->processor == newcpu )
+ return;
-void arch_vmx_do_launch(struct vcpu *v)
-{
- u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+ set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+ v->processor = newcpu;
- load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
- vmx_do_launch(v);
- reset_stack_and_jump(vmx_asm_do_launch);
+ if ( VMX_DOMAIN(v) )
+ {
+ __vmpclear(virt_to_phys(v->arch.arch_vmx.vmcs));
+ v->arch.schedule_tail = arch_vmx_do_relaunch;
+ }
}
+#ifdef CONFIG_VMX
+static int vmx_switch_on;
+
static int vmx_final_setup_guest(
struct vcpu *v, struct vcpu_guest_context *ctxt)
{
@@ -346,7 +343,7 @@ static int vmx_final_setup_guest(
v->arch.schedule_tail = arch_vmx_do_launch;
-#if defined (__i386)
+#if defined (__i386__)
v->domain->arch.vmx_platform.real_mode_data =
(unsigned long *) regs->esi;
#endif
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index dd2f11c4a0..c83df67ddf 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -198,7 +198,7 @@ void vmx_set_host_env(struct vcpu *v)
host_env.idtr_limit = desc.size;
host_env.idtr_base = desc.address;
error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
-
+
__asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
host_env.gdtr_limit = desc.size;
host_env.gdtr_base = desc.address;
@@ -210,7 +210,6 @@ void vmx_set_host_env(struct vcpu *v)
host_env.tr_base = (unsigned long) &init_tss[cpu];
error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
-
}
void vmx_do_launch(struct vcpu *v)
@@ -544,6 +543,36 @@ void vm_resume_fail(unsigned long eflags)
__vmx_bug(guest_cpu_user_regs());
}
+void arch_vmx_do_resume(struct vcpu *v)
+{
+ u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+ load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+ vmx_do_resume(v);
+ reset_stack_and_jump(vmx_asm_do_resume);
+}
+
+void arch_vmx_do_launch(struct vcpu *v)
+{
+ u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+ load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+ vmx_do_launch(v);
+ reset_stack_and_jump(vmx_asm_do_launch);
+}
+
+void arch_vmx_do_relaunch(struct vcpu *v)
+{
+ u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+ load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+ vmx_do_resume(v);
+ vmx_set_host_env(v);
+ v->arch.schedule_tail = arch_vmx_do_resume;
+
+ reset_stack_and_jump(vmx_asm_do_relaunch);
+}
+
#endif /* CONFIG_VMX */
/*
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 99d5b1fbc2..a2491612d7 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -108,31 +108,26 @@
pushl %ecx; \
pushl %ebx;
+#define VMX_RESTORE_ALL_NOSEGREGS \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax; \
+ addl $(NR_SKIPPED_REGS*4), %esp
+
ENTRY(vmx_asm_vmexit_handler)
/* selectors are restored/saved by VMX */
VMX_SAVE_ALL_NOSEGREGS
call vmx_vmexit_handler
jmp vmx_asm_do_resume
-ENTRY(vmx_asm_do_launch)
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
- addl $(NR_SKIPPED_REGS*4), %esp
- /* VMLUANCH */
- .byte 0x0f,0x01,0xc2
- pushf
- call vm_launch_fail
- hlt
-
- ALIGN
-
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized
+1:
+/* vmx_test_all_events */
+ .if \initialized
GET_CURRENT(%ebx)
/*test_all_events:*/
xorl %ecx,%ecx
@@ -142,34 +137,50 @@ vmx_test_all_events:
movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
test %ecx,irq_stat(%eax,1)
- jnz vmx_process_softirqs
+ jnz 2f
-vmx_restore_all_guest:
+/* vmx_restore_all_guest */
call load_cr2
+ .endif
+ VMX_RESTORE_ALL_NOSEGREGS
/*
* Check if we are going back to VMX-based VM
* By this time, all the setups in the VMCS must be complete.
*/
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
- addl $(NR_SKIPPED_REGS*4), %esp
+ .if \launch
+ /* VMLUANCH */
+ .byte 0x0f,0x01,0xc2
+ pushf
+ call vm_launch_fail
+ .else
/* VMRESUME */
.byte 0x0f,0x01,0xc3
pushf
call vm_resume_fail
+ .endif
/* Should never reach here */
hlt
ALIGN
-vmx_process_softirqs:
+ .if \initialized
+2:
+/* vmx_process_softirqs */
sti
call do_softirq
- jmp vmx_test_all_events
+ jmp 1b
+ ALIGN
+ .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+ vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+ vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+ vmx_asm_common 1 1
+
#endif
ALIGN
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index a8a5574e2c..0f24b15ca1 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -194,39 +194,34 @@ test_all_events:
pushq %r14; \
pushq %r15; \
+#define VMX_RESTORE_ALL_NOSEGREGS \
+ popq %r15; \
+ popq %r14; \
+ popq %r13; \
+ popq %r12; \
+ popq %rbp; \
+ popq %rbx; \
+ popq %r11; \
+ popq %r10; \
+ popq %r9; \
+ popq %r8; \
+ popq %rax; \
+ popq %rcx; \
+ popq %rdx; \
+ popq %rsi; \
+ popq %rdi; \
+ addq $(NR_SKIPPED_REGS*8), %rsp; \
+
ENTRY(vmx_asm_vmexit_handler)
/* selectors are restored/saved by VMX */
VMX_SAVE_ALL_NOSEGREGS
call vmx_vmexit_handler
jmp vmx_asm_do_resume
-ENTRY(vmx_asm_do_launch)
- popq %r15
- popq %r14
- popq %r13
- popq %r12
- popq %rbp
- popq %rbx
- popq %r11
- popq %r10
- popq %r9
- popq %r8
- popq %rax
- popq %rcx
- popq %rdx
- popq %rsi
- popq %rdi
- addq $(NR_SKIPPED_REGS*8), %rsp
- /* VMLUANCH */
- .byte 0x0f,0x01,0xc2
- pushfq
- call vm_launch_fail
- hlt
-
- ALIGN
-
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized
+1:
+ .if \initialized
+/* vmx_test_all_events */
GET_CURRENT(%rbx)
/* test_all_events: */
cli # tests must not race interrupts
@@ -235,42 +230,51 @@ vmx_test_all_events:
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip), %rdx
testl $~0,(%rdx,%rax,1)
- jnz vmx_process_softirqs
+ jnz 2f
-vmx_restore_all_guest:
+/* vmx_restore_all_guest */
call load_cr2
+ .endif
/*
* Check if we are going back to VMX-based VM
* By this time, all the setups in the VMCS must be complete.
*/
- popq %r15
- popq %r14
- popq %r13
- popq %r12
- popq %rbp
- popq %rbx
- popq %r11
- popq %r10
- popq %r9
- popq %r8
- popq %rax
- popq %rcx
- popq %rdx
- popq %rsi
- popq %rdi
- addq $(NR_SKIPPED_REGS*8), %rsp
+ VMX_RESTORE_ALL_NOSEGREGS
+ .if \launch
+ /* VMLUANCH */
+ .byte 0x0f,0x01,0xc2
+ pushfq
+ call vm_launch_fail
+ .else
/* VMRESUME */
.byte 0x0f,0x01,0xc3
pushfq
call vm_resume_fail
+ .endif
/* Should never reach here */
hlt
ALIGN
-vmx_process_softirqs:
+
+ .if \initialized
+2:
+/* vmx_process_softirqs */
sti
call do_softirq
- jmp vmx_test_all_events
+ jmp 1b
+ ALIGN
+ .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+ vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+ vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+ vmx_asm_common 1 1
+
#endif
ALIGN
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index d189528df2..9964714d03 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -293,17 +293,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
v->cpumap = cpumap;
if ( cpumap == CPUMAP_RUNANYWHERE )
+ {
clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
+ }
else
{
/* pick a new cpu from the usable map */
int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
vcpu_pause(v);
- if ( v->processor != new_cpu )
- set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+ vcpu_migrate_cpu(v, new_cpu);
set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
- v->processor = new_cpu;
vcpu_unpause(v);
}
diff --git a/xen/include/asm-x86/vmx.h b/xen/include/asm-x86/vmx.h
index 5596549ce1..714969eb2e 100644
--- a/xen/include/asm-x86/vmx.h
+++ b/xen/include/asm-x86/vmx.h
@@ -35,6 +35,7 @@ extern void vmx_intr_assist(struct vcpu *d);
extern void arch_vmx_do_launch(struct vcpu *);
extern void arch_vmx_do_resume(struct vcpu *);
+extern void arch_vmx_do_relaunch(struct vcpu *);
extern int vmcs_size;
extern unsigned int cpu_rev;
diff --git a/xen/include/asm-x86/vmx_vmcs.h b/xen/include/asm-x86/vmx_vmcs.h
index 68db612665..987e4e79e6 100644
--- a/xen/include/asm-x86/vmx_vmcs.h
+++ b/xen/include/asm-x86/vmx_vmcs.h
@@ -93,6 +93,7 @@ struct arch_vmx_struct {
void vmx_do_launch(struct vcpu *);
void vmx_do_resume(struct vcpu *);
+void vmx_set_host_env(struct vcpu *);
struct vmcs_struct *alloc_vmcs(void);
void free_vmcs(struct vmcs_struct *);
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 46c2e4bb5d..cf3a192260 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -15,7 +15,9 @@ extern void arch_do_createdomain(struct vcpu *v);
extern void arch_do_boot_vcpu(struct vcpu *v);
extern int arch_set_info_guest(
- struct vcpu *d, struct vcpu_guest_context *c);
+ struct vcpu *v, struct vcpu_guest_context *c);
+
+extern void vcpu_migrate_cpu(struct vcpu *v, int newcpu);
extern void free_perdomain_pt(struct domain *d);