From 4b46e7be783df641b2889e514e85643febd378c2 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 11 Oct 2013 09:30:31 +0200 Subject: x86: use {rd,wr}{fs,gs}base when available ... as being intended to be faster than MSR reads/writes. In the case of emulate_privileged_op() also use these in favor of the cached (but possibly stale) addresses from arch.pv_vcpu. This allows entirely removing the code that was the subject of XSA-67. Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper Acked-by: Keir Fraser --- xen/include/asm-x86/domain.h | 6 ++--- xen/include/asm-x86/msr.h | 56 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 3 deletions(-) (limited to 'xen/include') diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 909f4490e5..e42651ef7f 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -457,12 +457,12 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4); (((v)->arch.pv_vcpu.ctrlreg[4] \ | (mmu_cr4_features \ & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP | \ - X86_CR4_OSXSAVE)) \ + X86_CR4_OSXSAVE | X86_CR4_FSGSBASE)) \ | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \ & ~X86_CR4_DE) #define real_cr4_to_pv_guest_cr4(c) \ - ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD \ - | X86_CR4_OSXSAVE | X86_CR4_SMEP)) + ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | \ + X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE)) void domain_cpuid(struct domain *d, unsigned int input, diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index 06c46a8bd2..5277c2b6fb 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -9,6 +9,7 @@ #include #include #include +#include #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ @@ -97,6 +98,61 @@ static inline int wrmsr_safe(unsigned int msr, uint64_t val) : "=a" (low), "=d" (high) \ : "c" (counter)) +static inline unsigned long rdfsbase(void) +{ + unsigned long base; + + if ( cpu_has_fsgsbase ) +#ifdef HAVE_GAS_FSGSBASE + asm volatile ( "rdfsbase %0" : "=r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc0" : "=a" (base) ); +#endif + else + rdmsrl(MSR_FS_BASE, base); + + return base; +} + +static inline unsigned long rdgsbase(void) +{ + unsigned long base; + + if ( cpu_has_fsgsbase ) +#ifdef HAVE_GAS_FSGSBASE + asm volatile ( "rdgsbase %0" : "=r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc8" : "=a" (base) ); +#endif + else + rdmsrl(MSR_GS_BASE, base); + + return base; +} + +static inline void wrfsbase(unsigned long base) +{ + if ( cpu_has_fsgsbase ) +#ifdef HAVE_GAS_FSGSBASE + asm volatile ( "wrfsbase %0" :: "r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd0" :: "a" (base) ); +#endif + else + wrmsrl(MSR_FS_BASE, base); +} + +static inline void wrgsbase(unsigned long base) +{ + if ( cpu_has_fsgsbase ) +#ifdef HAVE_GAS_FSGSBASE + asm volatile ( "wrgsbase %0" :: "r" (base) ); +#else + asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8" :: "a" (base) ); +#endif + else + wrmsrl(MSR_GS_BASE, base); +} DECLARE_PER_CPU(u64, efer); u64 read_efer(void); -- cgit v1.2.3