diff options
-rw-r--r-- | xen/arch/x86/domain.c | 14 | ||||
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 40 | ||||
-rw-r--r-- | xen/arch/x86/hvm/svm/svm.c | 42 | ||||
-rw-r--r-- | xen/arch/x86/hvm/svm/vmcb.c | 3 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vlapic.c | 17 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmcs.c | 11 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vmx/vmx.c | 33 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/hvm.h | 13 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/io.h | 1 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/vlapic.h | 3 |
10 files changed, 75 insertions, 102 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 0ee7361825..6c4ea37ae1 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -235,7 +235,7 @@ int arch_domain_create(struct domain *d) virt_to_page(d->shared_info), d, XENSHARE_writable); } - return hvm_domain_initialise(d); + return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0; fail: free_xenheap_page(d->shared_info); @@ -249,6 +249,15 @@ int arch_domain_create(struct domain *d) void arch_domain_destroy(struct domain *d) { + struct vcpu *v; + + if ( is_hvm_domain(d) ) + { + for_each_vcpu ( d, v ) + hvm_vcpu_destroy(v); + hvm_domain_destroy(d); + } + shadow_final_teardown(d); free_xenheap_pages( @@ -974,9 +983,6 @@ void domain_relinquish_resources(struct domain *d) #endif } - if ( is_hvm_domain(d) ) - hvm_relinquish_guest_resources(d); - /* Tear down shadow mode stuff. */ shadow_teardown(d); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 6bc9881165..10b97c2b96 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -110,19 +110,11 @@ void hvm_do_resume(struct vcpu *v) } } -void hvm_release_assist_channel(struct vcpu *v) -{ - free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port); -} - int hvm_domain_initialise(struct domain *d) { struct hvm_domain *platform = &d->arch.hvm_domain; int rc; - if ( !is_hvm_domain(d) ) - return 0; - if ( !hvm_enabled ) { gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest " @@ -146,6 +138,20 @@ int hvm_domain_initialise(struct domain *d) return 0; } +void hvm_domain_destroy(struct domain *d) +{ + kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer); + rtc_deinit(d); + pmtimer_deinit(d); + + if ( d->arch.hvm_domain.shared_page_va ) + unmap_domain_page_global( + (void *)d->arch.hvm_domain.shared_page_va); + + if ( d->arch.hvm_domain.buffered_io_va ) + unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va); +} + int hvm_vcpu_initialise(struct vcpu *v) { struct hvm_domain *platform; @@ -154,12 +160,20 @@ int hvm_vcpu_initialise(struct vcpu *v) if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) return rc; + if ( (rc = vlapic_init(v)) != 0 ) + { + hvm_funcs.vcpu_destroy(v); + return rc; + } + /* Create ioreq event channel. */ v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0); if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) ) get_vio(v->domain, v->vcpu_id)->vp_eport = v->arch.hvm_vcpu.xen_port; + init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor); + if ( v->vcpu_id != 0 ) return 0; @@ -178,6 +192,16 @@ int hvm_vcpu_initialise(struct vcpu *v) return 0; } +void hvm_vcpu_destroy(struct vcpu *v) +{ + kill_timer(&v->arch.hvm_vcpu.hlt_timer); + vlapic_destroy(v); + hvm_funcs.vcpu_destroy(v); + + /* Event channel is already freed by evtchn_destroy(). */ + /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/ +} + void pic_irq_request(void *data, int level) { int *interrupt_request = data; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 5ff04f79e5..3e4dd31d23 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -61,7 +61,6 @@ extern void svm_dump_inst(unsigned long eip); extern int svm_dbg_on; void svm_dump_regs(const char *from, struct cpu_user_regs *regs); -static void svm_relinquish_guest_resources(struct domain *d); static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v, struct cpu_user_regs *regs); @@ -777,6 +776,11 @@ static int svm_vcpu_initialise(struct vcpu *v) return 0; } +static void svm_vcpu_destroy(struct vcpu *v) +{ + destroy_vmcb(&v->arch.hvm_svm); +} + int start_svm(void) { u32 eax, ecx, edx; @@ -825,7 +829,7 @@ int start_svm(void) hvm_funcs.disable = stop_svm; hvm_funcs.vcpu_initialise = svm_vcpu_initialise; - hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources; + hvm_funcs.vcpu_destroy = svm_vcpu_destroy; hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs; hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs; @@ -851,40 +855,6 @@ int start_svm(void) } -static void svm_relinquish_guest_resources(struct domain *d) -{ - struct vcpu *v; - - for_each_vcpu ( d, v ) - { - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) - continue; - - destroy_vmcb(&v->arch.hvm_svm); - kill_timer(&v->arch.hvm_vcpu.hlt_timer); - if ( VLAPIC(v) != NULL ) - { - kill_timer(&VLAPIC(v)->vlapic_timer); - unmap_domain_page_global(VLAPIC(v)->regs); - free_domheap_page(VLAPIC(v)->regs_page); - xfree(VLAPIC(v)); - } - hvm_release_assist_channel(v); - } - - kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer); - rtc_deinit(d); - pmtimer_deinit(d); - - if ( d->arch.hvm_domain.shared_page_va ) - unmap_domain_page_global( - (void *)d->arch.hvm_domain.shared_page_va); - - if ( d->arch.hvm_domain.buffered_io_va ) - unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va); -} - - static void svm_migrate_timers(struct vcpu *v) { struct periodic_time *pt = diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index d2fee7677c..9daa11969f 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -351,9 +351,6 @@ void svm_do_launch(struct vcpu *v) if ( !asidpool_assign_next( vmcb, 0, core, core )) BUG(); - vlapic_init(v); - init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor); - vmcb->ldtr.sel = 0; vmcb->ldtr.base = 0; vmcb->ldtr.limit = 0; diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 64820c767c..5524dd7036 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -1016,7 +1016,7 @@ static int vlapic_reset(struct vlapic *vlapic) int vlapic_init(struct vcpu *v) { - struct vlapic *vlapic = NULL; + struct vlapic *vlapic; HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id); @@ -1047,3 +1047,18 @@ int vlapic_init(struct vcpu *v) return 0; } + +void vlapic_destroy(struct vcpu *v) +{ + struct vlapic *vlapic = VLAPIC(v); + + if ( vlapic == NULL ) + return; + + VLAPIC(v) = NULL; + + kill_timer(&vlapic->vlapic_timer); + unmap_domain_page_global(vlapic->regs); + free_domheap_page(vlapic->regs_page); + xfree(vlapic); +} diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 9c4e8defb3..4f679e60ac 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -193,11 +193,7 @@ void vmx_vmcs_enter(struct vcpu *v) { /* * NB. We must *always* run an HVM VCPU on its own VMCS, except for - * vmx_vmcs_enter/exit critical regions. This leads to some TODOs: - * 1. VMPTRLD as soon as we context-switch to a HVM VCPU. - * 2. VMCS destruction needs to happen later (from domain_destroy()). - * We can relax this a bit if a paused VCPU always commits its - * architectural state to a software structure. + * vmx_vmcs_enter/exit critical regions. */ if ( v == current ) return; @@ -416,11 +412,6 @@ static int construct_vmcs(struct vcpu *v) cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE); error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4); - /* XXX Move this out. */ - init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor); - if ( vlapic_init(v) != 0 ) - return -1; - #ifdef __x86_64__ /* VLAPIC TPR optimisation. */ v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index a921e2a1c3..8ce1e1d503 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -74,36 +74,9 @@ static int vmx_vcpu_initialise(struct vcpu *v) return 0; } -static void vmx_relinquish_guest_resources(struct domain *d) +static void vmx_vcpu_destroy(struct vcpu *v) { - struct vcpu *v; - - for_each_vcpu ( d, v ) - { - vmx_destroy_vmcs(v); - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) - continue; - kill_timer(&v->arch.hvm_vcpu.hlt_timer); - if ( VLAPIC(v) != NULL ) - { - kill_timer(&VLAPIC(v)->vlapic_timer); - unmap_domain_page_global(VLAPIC(v)->regs); - free_domheap_page(VLAPIC(v)->regs_page); - xfree(VLAPIC(v)); - } - hvm_release_assist_channel(v); - } - - kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer); - rtc_deinit(d); - pmtimer_deinit(d); - - if ( d->arch.hvm_domain.shared_page_va ) - unmap_domain_page_global( - (void *)d->arch.hvm_domain.shared_page_va); - - if ( d->arch.hvm_domain.buffered_io_va ) - unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va); + vmx_destroy_vmcs(v); } #ifdef __x86_64__ @@ -674,7 +647,7 @@ static void vmx_setup_hvm_funcs(void) hvm_funcs.disable = stop_vmx; hvm_funcs.vcpu_initialise = vmx_vcpu_initialise; - hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources; + hvm_funcs.vcpu_destroy = vmx_vcpu_destroy; hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs; hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 851b37830e..6576e20839 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -33,10 +33,10 @@ struct hvm_function_table { void (*disable)(void); /* - * Initialize/relinguish HVM guest resources + * Initialise/destroy HVM VCPU resources */ int (*vcpu_initialise)(struct vcpu *v); - void (*relinquish_guest_resources)(struct domain *d); + void (*vcpu_destroy)(struct vcpu *v); /* * Store and load guest state: @@ -92,13 +92,10 @@ hvm_disable(void) } int hvm_domain_initialise(struct domain *d); -int hvm_vcpu_initialise(struct vcpu *v); +void hvm_domain_destroy(struct domain *d); -static inline void -hvm_relinquish_guest_resources(struct domain *d) -{ - hvm_funcs.relinquish_guest_resources(d); -} +int hvm_vcpu_initialise(struct vcpu *v); +void hvm_vcpu_destroy(struct vcpu *v); static inline void hvm_store_cpu_guest_regs( diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index b784d059fe..66c48b438c 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -151,7 +151,6 @@ extern void pic_irq_request(void *data, int level); extern void hvm_pic_assist(struct vcpu *v); extern int cpu_get_interrupt(struct vcpu *v, int *type); extern int cpu_has_pending_irq(struct vcpu *v); -extern void hvm_release_assist_channel(struct vcpu *v); // XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO frame. #define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT))) diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h index 3827c11766..b53c9e149b 100644 --- a/xen/include/asm-x86/hvm/vlapic.h +++ b/xen/include/asm-x86/hvm/vlapic.h @@ -77,7 +77,8 @@ int vlapic_find_highest_irr(struct vlapic *vlapic); int cpu_get_apic_interrupt(struct vcpu *v, int *mode); -int vlapic_init(struct vcpu *vc); +int vlapic_init(struct vcpu *v); +void vlapic_destroy(struct vcpu *v); void vlapic_msr_set(struct vlapic *vlapic, uint64_t value); |