diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-05-22 06:31:47 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-05-22 06:31:47 +0100 |
commit | 3f03c620370e99bdf78120ce64580cd715ad150f (patch) | |
tree | 946d935765f472c31471d5310e7207e717bb035c /xen/arch/x86/time.c | |
parent | f3b88f2e646418ad3ed8ce7a19ea32d0b382654a (diff) | |
download | xen-3f03c620370e99bdf78120ce64580cd715ad150f.tar.gz xen-3f03c620370e99bdf78120ce64580cd715ad150f.tar.bz2 xen-3f03c620370e99bdf78120ce64580cd715ad150f.zip |
x86: TSC handling cleanups (version 2)
"I am removing the tsc_scaled variable that is never actually used
because when tsc needs to be scaled vtsc is 1. I am also making this
more explicit in tsc_set_info. I am also removing hvm_domain.gtsc_khz
that is a duplicate of d->arch.tsc_khz. I am using scale_delta(delta,
&d->arch.ns_to_vtsc) to scale the tsc value before returning it to the
guest like in the pv case. I added a feature flag to specify that the
pvclock algorithm is safe to be used in an HVM guest so that the guest
can now use it without hanging."
Version 2 fixes a bug which breaks PV domU time.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'xen/arch/x86/time.c')
-rw-r--r-- | xen/arch/x86/time.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index 4863996930..ee535c186e 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -804,8 +804,13 @@ static void __update_vcpu_system_time(struct vcpu *v, int force) if ( d->arch.vtsc ) { - u64 delta = max_t(s64, t->stime_local_stamp - d->arch.vtsc_offset, 0); - tsc_stamp = scale_delta(delta, &d->arch.ns_to_vtsc); + u64 stime = t->stime_local_stamp; + if ( is_hvm_domain(d) ) + { + struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time; + stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset; + } + tsc_stamp = gtime_to_gtsc(d, stime); } else { @@ -828,6 +833,8 @@ static void __update_vcpu_system_time(struct vcpu *v, int force) _u.tsc_to_system_mul = t->tsc_scale.mul_frac; _u.tsc_shift = (s8)t->tsc_scale.shift; } + if ( is_hvm_domain(d) ) + _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset; /* Don't bother unless timestamp record has changed or we are forced. */ _u.version = u->version; /* make versions match for memcmp test */ @@ -1591,11 +1598,17 @@ struct tm wallclock_time(void) * PV SoftTSC Emulation. */ +u64 gtime_to_gtsc(struct domain *d, u64 tsc) +{ + if ( !is_hvm_domain(d) ) + tsc = max_t(s64, tsc - d->arch.vtsc_offset, 0); + return scale_delta(tsc, &d->arch.ns_to_vtsc); +} + void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp) { s_time_t now = get_s_time(); struct domain *d = v->domain; - u64 delta; spin_lock(&d->arch.vtsc_lock); @@ -1611,8 +1624,7 @@ void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp) spin_unlock(&d->arch.vtsc_lock); - delta = max_t(s64, now - d->arch.vtsc_offset, 0); - now = scale_delta(delta, &d->arch.ns_to_vtsc); + now = gtime_to_gtsc(d, now); regs->eax = (uint32_t)now; regs->edx = (uint32_t)(now >> 32); @@ -1753,8 +1765,10 @@ void tsc_set_info(struct domain *d, d->arch.vtsc_offset = get_s_time() - elapsed_nsec; d->arch.tsc_khz = gtsc_khz ? gtsc_khz : cpu_khz; set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 ); - /* use native TSC if initial host has safe TSC and not migrated yet */ - if ( host_tsc_is_safe() && incarnation == 0 ) + /* use native TSC if initial host has safe TSC, has not migrated + * yet and tsc_khz == cpu_khz */ + if ( host_tsc_is_safe() && incarnation == 0 && + d->arch.tsc_khz == cpu_khz ) d->arch.vtsc = 0; else d->arch.ns_to_vtsc = scale_reciprocal(d->arch.vtsc_to_ns); @@ -1779,7 +1793,7 @@ void tsc_set_info(struct domain *d, } d->arch.incarnation = incarnation + 1; if ( is_hvm_domain(d) ) - hvm_set_rdtsc_exiting(d, d->arch.vtsc || hvm_gtsc_need_scale(d)); + hvm_set_rdtsc_exiting(d, d->arch.vtsc); } /* vtsc may incur measurable performance degradation, diagnose with this */ |