diff options
author | Liu, Jinsong <jinsong.liu@intel.com> | 2012-09-26 12:12:42 +0200 |
---|---|---|
committer | Liu, Jinsong <jinsong.liu@intel.com> | 2012-09-26 12:12:42 +0200 |
commit | 6e0336351ed95e2f72e396b3e1d055a8f5663542 (patch) | |
tree | 7a8e9ed2328d9d3169bd3d9580d0085b4d6def5e /xen/arch/x86/hvm/hvm.c | |
parent | be3e4ed45e1bb0ec9aa2dfcd5450eec6ccc56dac (diff) | |
download | xen-6e0336351ed95e2f72e396b3e1d055a8f5663542.tar.gz xen-6e0336351ed95e2f72e396b3e1d055a8f5663542.tar.bz2 xen-6e0336351ed95e2f72e396b3e1d055a8f5663542.zip |
x86: Implement TSC adjust feature for HVM guest
IA32_TSC_ADJUST MSR is maintained separately for each logical
processor. A logical processor maintains and uses the IA32_TSC_ADJUST
MSR as follows:
1). On RESET, the value of the IA32_TSC_ADJUST MSR is 0;
2). If an execution of WRMSR to the IA32_TIME_STAMP_COUNTER MSR adds
(or subtracts) value X from the TSC, the logical processor also
adds (or subtracts) value X from the IA32_TSC_ADJUST MSR;
3). If an execution of WRMSR to the IA32_TSC_ADJUST MSR adds (or
subtracts) value X from that MSR, the logical processor also adds
(or subtracts) value X from the TSC.
This patch provides tsc adjust support for hvm guest, with it guest OS
would be happy when sync tsc.
Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
Diffstat (limited to 'xen/arch/x86/hvm/hvm.c')
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 0929a098f7..52ba47ca28 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -237,6 +237,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat) void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) { uint64_t tsc; + uint64_t delta_tsc; if ( v->domain->arch.vtsc ) { @@ -248,10 +249,22 @@ void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) rdtscll(tsc); } - v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc; + delta_tsc = guest_tsc - tsc; + v->arch.hvm_vcpu.msr_tsc_adjust += delta_tsc + - v->arch.hvm_vcpu.cache_tsc_offset; + v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc; + hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); } +void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust) +{ + v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust + - v->arch.hvm_vcpu.msr_tsc_adjust; + hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); + v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust; +} + u64 hvm_get_guest_tsc(struct vcpu *v) { uint64_t tsc; @@ -270,6 +283,11 @@ u64 hvm_get_guest_tsc(struct vcpu *v) return tsc + v->arch.hvm_vcpu.cache_tsc_offset; } +u64 hvm_get_guest_tsc_adjust(struct vcpu *v) +{ + return v->arch.hvm_vcpu.msr_tsc_adjust; +} + void hvm_migrate_timers(struct vcpu *v) { rtc_migrate_timers(v); @@ -2769,6 +2787,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) *msr_content = hvm_get_guest_tsc(v); break; + case MSR_IA32_TSC_ADJUST: + *msr_content = hvm_get_guest_tsc_adjust(v); + break; + case MSR_TSC_AUX: *msr_content = hvm_msr_tsc_aux(v); break; @@ -2882,6 +2904,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) hvm_set_guest_tsc(v, msr_content); break; + case MSR_IA32_TSC_ADJUST: + hvm_set_guest_tsc_adjust(v, msr_content); + break; + case MSR_TSC_AUX: v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content; if ( cpu_has_rdtscp @@ -3429,6 +3455,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip) v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset; hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); + v->arch.hvm_vcpu.msr_tsc_adjust = 0; + paging_update_paging_modes(v); v->arch.flags |= TF_kernel_mode; |