aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-09-17 16:31:01 +0100
committerKeir Fraser <keir@xen.org>2011-09-17 16:31:01 +0100
commit3233d7686ff2d6d585dd1517696f322ce87cb578 (patch)
treeddd9ad621b54cd91c50c45b55513bb62d48c5601
parentaadf7d716f067c03df9d2339e3c2a271fd8b5165 (diff)
downloadxen-3233d7686ff2d6d585dd1517696f322ce87cb578.tar.gz
xen-3233d7686ff2d6d585dd1517696f322ce87cb578.tar.bz2
xen-3233d7686ff2d6d585dd1517696f322ce87cb578.zip
x86: Couple of small cleanups after cpuid faulting patch.
Signed-off-by: Keir Fraser <keir@xen.org> xen-unstable changeset: 23655:7e4404a8f5f9 xen-unstable date: Mon Jul 04 07:57:32 2011 +0100 x86: Remove redundant cpuid-faulting-related BUG_ON I added. Signed-off-by: Keir Fraser <keir@xen.org> xen-unstable changeset: 23654:122fcf37c596 xen-unstable date: Sat Jul 02 09:08:27 2011 +0100 x86: cpuid faulting feature enable Latest Intel processor add cpuid faulting feature. This patch is used to support cpuid faulting in Xen. Like cpuid spoofing, cpuid faulting mainly used to support live migration. When cpuid faulting enabled, cpuid instruction runs at cpl>0 will produce GP, vmm then emulate execution of the cpuid instruction. Hence will appear to guest software the value chosen by the vmm. Signed-off-by: Liu, Jinsong <jinsong.liu@intel.com> Signed-off-by: Keir Fraser <keir@xen.org> xen-unstable changeset: 23653:71b58748cfee xen-unstable date: Fri Jul 01 22:28:53 2011 +0100
-rw-r--r--xen/arch/x86/cpu/cpu.h1
-rw-r--r--xen/arch/x86/cpu/intel.c35
-rw-r--r--xen/arch/x86/domain.c3
-rw-r--r--xen/arch/x86/traps.c12
-rw-r--r--xen/include/asm-x86/cpufeature.h3
-rw-r--r--xen/include/asm-x86/msr-index.h14
-rw-r--r--xen/include/asm-x86/processor.h2
7 files changed, 60 insertions, 10 deletions
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index bdecc26631..a696859ff6 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -30,4 +30,3 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c);
extern void generic_identify(struct cpuinfo_x86 * c);
extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index ed54c4168d..0bfb883c4b 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -27,6 +27,30 @@ extern int trap_init_f00f_bug(void);
struct movsl_mask movsl_mask __read_mostly;
#endif
+static unsigned int probe_intel_cpuid_faulting(void)
+{
+ uint64_t x;
+ return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) && (x & (1u<<31));
+}
+
+static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+void set_cpuid_faulting(bool_t enable)
+{
+ uint32_t hi, lo;
+
+ if (!cpu_has_cpuid_faulting ||
+ this_cpu(cpuid_faulting_enabled) == enable )
+ return;
+
+ rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+ lo &= ~1;
+ if (enable)
+ lo |= 1;
+ wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+
+ this_cpu(cpuid_faulting_enabled) = enable;
+}
+
/*
* opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
* For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
@@ -218,7 +242,16 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
detect_ht(c);
}
- set_cpuidmask(c);
+ if (smp_processor_id() == 0) {
+ if (probe_intel_cpuid_faulting())
+ set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
+ BUG_ON(!probe_intel_cpuid_faulting());
+ set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ }
+
+ if (!cpu_has_cpuid_faulting)
+ set_cpuidmask(c);
/* Work around errata */
Intel_errata_workarounds(c);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 5791925675..aa8baba855 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1519,6 +1519,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
load_LDT(next);
load_segments(next);
}
+
+ set_cpuid_faulting(!is_hvm_vcpu(next) &&
+ (next->domain->domain_id != 0));
}
context_saved(prev);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 504bb08015..6f8a6ff4de 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2092,11 +2092,13 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
twobyte_opcode:
/*
- * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
- * are executable only from guest kernel mode (virtual ring 0).
+ * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
+ * and CPUID (0xa2), are executable only from guest kernel mode
+ * (virtual ring 0).
*/
opcode = insn_fetch(u8, code_base, eip, code_limit);
- if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
+ if ( !guest_kernel_mode(v, regs) &&
+ (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
goto fail;
if ( lock && (opcode & ~3) != 0x20 )
@@ -2531,6 +2533,10 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
}
break;
+ case 0xa2: /* CPUID */
+ pv_cpuid(regs);
+ break;
+
default:
goto fail;
}
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index 7670da6cf5..876f01eba9 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -79,6 +79,7 @@
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
#define X86_FEATURE_XTOPOLOGY (3*32+13) /* cpu topology enum extensions */
+#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -217,6 +218,8 @@
#define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP)
+#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
+
#endif /* __ASM_I386_CPUFEATURE_H */
/*
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 316a1931cd..ea1df3aac8 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -156,11 +156,6 @@
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
-/* MSRs for Intel cpuid feature mask */
-#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
-
/* MSRs & bits used for VMX enabling */
#define MSR_IA32_VMX_BASIC 0x480
#define MSR_IA32_VMX_PINBASED_CTLS 0x481
@@ -472,6 +467,15 @@
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+/* Intel cpuid spoofing MSRs */
+#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+
+/* Intel cpuid faulting MSRs */
+#define MSR_INTEL_PLATFORM_INFO 0x000000ce
+#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
+
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 48b84afdfe..bd800b7edc 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -193,6 +193,8 @@ extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data
#endif
+extern void set_cpuid_faulting(bool_t enable);
+
extern u64 host_pat;
extern int phys_proc_id[NR_CPUS];
extern int cpu_core_id[NR_CPUS];