aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2013-04-09 16:25:20 +0200
committerJan Beulich <jbeulich@suse.com>2013-04-09 16:25:20 +0200
commitda7e3cdf0a1c2440886383e0e7f826c88796b8ca (patch)
tree7e28dde1e11f8cfd95acc3da49a6dbb5191c22bc
parente6bd18f4cf30c3e26b44e52a929d18066e0c8ceb (diff)
downloadxen-da7e3cdf0a1c2440886383e0e7f826c88796b8ca.tar.gz
xen-da7e3cdf0a1c2440886383e0e7f826c88796b8ca.tar.bz2
xen-da7e3cdf0a1c2440886383e0e7f826c88796b8ca.zip
vmx: Simplify cr0 update handling by deferring cr4 changes to the cr4 handler.
Signed-off-by: Keir Fraser <keir@xen.org> master commit: 1453984eab1297559e016d4e907a27e55997191c master date: 2013-01-30 09:15:39 -0800
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e9f13239c7..88eeb9d352 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1092,20 +1092,18 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
if ( paging_mode_hap(v->domain) )
{
- /* We manage GUEST_CR3 when guest CR0.PE is zero or when cr3 memevents are on */
+ /* Manage GUEST_CR3 when CR0.PE=0. */
uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
if ( !hvm_paging_enabled(v) )
v->arch.hvm_vmx.exec_control |= cr3_ctls;
+ /* Trap CR3 updates if CR3 memory events are enabled. */
if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
vmx_update_cpu_exec_control(v);
-
- /* Changing CR0.PE can change some bits in real CR4. */
- vmx_update_guest_cr(v, 4);
}
if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
@@ -1135,8 +1133,6 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
{
for ( s = x86_seg_cs ; s <= x86_seg_tr ; s++ )
vmx_set_segment_register(v, s, &reg[s]);
- v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
- __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
v->arch.hvm_vmx.exception_bitmap = 0xffffffff;
vmx_update_exception_bitmap(v);
}
@@ -1146,10 +1142,6 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
if ( !(v->arch.hvm_vmx.vm86_segment_mask & (1<<s)) )
vmx_set_segment_register(
v, s, &v->arch.hvm_vmx.vm86_saved_seg[s]);
- v->arch.hvm_vcpu.hw_cr[4] =
- ((v->arch.hvm_vcpu.hw_cr[4] & ~X86_CR4_VME)
- |(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VME));
- __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
| (paging_mode_hap(v->domain) ?
0 : (1U << TRAP_page_fault))
@@ -1163,6 +1155,9 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
v->arch.hvm_vcpu.guest_cr[0] | hw_cr0_mask;
__vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
__vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+
+ /* Changing CR0 can change some bits in real CR4. */
+ vmx_update_guest_cr(v, 4);
break;
}
case 2: