aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Egger <Christoph.Egger@amd.com>2011-07-16 09:23:22 +0100
committerChristoph Egger <Christoph.Egger@amd.com>2011-07-16 09:23:22 +0100
commitfd14a1943c43c3c898711fa8f3f1ea9017da87d0 (patch)
tree0171c5383414aae3f29d0cb461103606c38ae382
parent68b5b855954787fedc8b03a2fdb6ebedfe07d366 (diff)
downloadxen-fd14a1943c43c3c898711fa8f3f1ea9017da87d0.tar.gz
xen-fd14a1943c43c3c898711fa8f3f1ea9017da87d0.tar.bz2
xen-fd14a1943c43c3c898711fa8f3f1ea9017da87d0.zip
nestedsvm: Support TSC Rate MSR
Support TSC Rate MSR and enable TSC scaling for nested virtualization. With it, guest VMs don't need take #VMEXIT to calculate a translated TSC value when it is running under TSC emulation mode. I measured native performance of the rdtsc instruction in the l2 guest with xen-on-xen and both host and and l1 guest run under TSC emulation mode. TSC scaling just needs MSR emulation and correct tsc offset calculation to be done and thus can be emulated also on older hardware. In this case rdtsc instruction is intercepted and handled by the host directly and safes the cost of a full VMRUN/VMEXIT emulation cycle. Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
-rw-r--r--tools/libxc/xc_cpuid_x86.c2
-rw-r--r--xen/arch/x86/hvm/svm/nestedsvm.c15
-rw-r--r--xen/arch/x86/hvm/svm/svm.c44
-rw-r--r--xen/include/asm-x86/hvm/svm/nestedsvm.h5
4 files changed, 51 insertions, 15 deletions
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index d41f0b0ece..0796ae91d2 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -157,7 +157,7 @@ static void amd_xc_cpuid_policy(
SVM_FEATURE_DECODEASSISTS);
/* Pass 2: Always enable SVM features which are emulated */
- regs[3] |= SVM_FEATURE_VMCBCLEAN;
+ regs[3] |= SVM_FEATURE_VMCBCLEAN | SVM_FEATURE_TSCRATEMSR;
break;
}
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index cb5e1a31eb..ba7683b25a 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -147,6 +147,8 @@ int nsvm_vcpu_reset(struct vcpu *v)
svm->ns_msr_hsavepa = VMCX_EADDR;
svm->ns_ovvmcb_pa = VMCX_EADDR;
+ svm->ns_tscratio = DEFAULT_TSC_RATIO;
+
svm->ns_cr_intercepts = 0;
svm->ns_dr_intercepts = 0;
svm->ns_exception_intercepts = 0;
@@ -1185,6 +1187,9 @@ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
case MSR_K8_VM_HSAVE_PA:
*msr_content = svm->ns_msr_hsavepa;
break;
+ case MSR_AMD64_TSC_RATIO:
+ *msr_content = svm->ns_tscratio;
+ break;
default:
ret = 0;
break;
@@ -1211,6 +1216,16 @@ int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
}
svm->ns_msr_hsavepa = msr_content;
break;
+ case MSR_AMD64_TSC_RATIO:
+ if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) {
+ gdprintk(XENLOG_ERR,
+ "reserved bits set in MSR_AMD64_TSC_RATIO 0x%"PRIx64"\n",
+ msr_content);
+ ret = -1; /* inject #GP */
+ break;
+ }
+ svm->ns_tscratio = msr_content;
+ break;
default:
ret = 0;
break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 00c546681d..8d9ee8747f 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -635,28 +635,37 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
svm_vmload(vmcb);
}
+static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
+ uint64_t ratio)
+{
+ uint64_t offset;
+
+ if (ratio == DEFAULT_TSC_RATIO)
+ return guest_tsc - host_tsc;
+
+ /* calculate hi,lo parts in 64bits to prevent overflow */
+ offset = (((host_tsc >> 32U) * (ratio >> 32U)) << 32U) +
+ (host_tsc & 0xffffffffULL) * (ratio & 0xffffffffULL);
+ return guest_tsc - offset;
+}
+
static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct vmcb_struct *n1vmcb, *n2vmcb;
uint64_t n2_tsc_offset = 0;
struct domain *d = v->domain;
+ uint64_t host_tsc, guest_tsc;
- if ( !nestedhvm_enabled(d) ) {
- /* Re-adjust the offset value when TSC_RATIO is available */
- if ( cpu_has_tsc_ratio && d->arch.vtsc )
- {
- uint64_t host_tsc, guest_tsc;
-
- rdtscll(host_tsc);
- guest_tsc = hvm_get_guest_tsc(v);
-
- /* calculate hi,lo parts in 64bits to prevent overflow */
- offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) +
- (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz;
- offset = guest_tsc - offset;
- }
+ guest_tsc = hvm_get_guest_tsc(v);
+
+ /* Re-adjust the offset value when TSC_RATIO is available */
+ if ( cpu_has_tsc_ratio && d->arch.vtsc ) {
+ rdtscll(host_tsc);
+ offset = svm_get_tsc_offset(host_tsc, guest_tsc, vcpu_tsc_ratio(v));
+ }
+ if ( !nestedhvm_enabled(d) ) {
vmcb_set_tsc_offset(vmcb, offset);
return;
}
@@ -665,8 +674,14 @@ static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
n2vmcb = vcpu_nestedhvm(v).nv_n2vmcx;
if ( nestedhvm_vcpu_in_guestmode(v) ) {
+ struct nestedsvm *svm = &vcpu_nestedsvm(v);
+
n2_tsc_offset = vmcb_get_tsc_offset(n2vmcb) -
vmcb_get_tsc_offset(n1vmcb);
+ if ( svm->ns_tscratio != DEFAULT_TSC_RATIO ) {
+ n2_tsc_offset = svm_get_tsc_offset(guest_tsc,
+ guest_tsc + n2_tsc_offset, svm->ns_tscratio);
+ }
vmcb_set_tsc_offset(n1vmcb, offset);
}
@@ -1107,6 +1122,7 @@ struct hvm_function_table * __init start_svm(void)
P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
P(cpu_has_svm_decode, "DecodeAssists");
P(cpu_has_pause_filter, "Pause-Intercept Filter");
+ P(cpu_has_tsc_ratio, "TSC Rate MSR");
#undef P
if ( !printed )
diff --git a/xen/include/asm-x86/hvm/svm/nestedsvm.h b/xen/include/asm-x86/hvm/svm/nestedsvm.h
index ec8114643c..65c13211a2 100644
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h
@@ -36,6 +36,11 @@ struct nestedsvm {
*/
uint64_t ns_ovvmcb_pa;
+ /* virtual tscratio holding the value l1 guest writes to the
+ * MSR_AMD64_TSC_RATIO MSR.
+ */
+ uint64_t ns_tscratio;
+
/* Cached real intercepts of the l2 guest */
uint32_t ns_cr_intercepts;
uint32_t ns_dr_intercepts;