aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/cpu
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2012-09-28 09:26:46 +0200
committerJan Beulich <jbeulich@suse.com>2012-09-28 09:26:46 +0200
commit24c152c7fec0cf0d788500482cfed09c4a0c527d (patch)
treeb53d2f99e13a4578261bd79351fe6ffd2f6ca002 /xen/arch/x86/cpu
parentc22039af8eeb0adfd4c0047aa1fbde43a0e8ef80 (diff)
downloadxen-24c152c7fec0cf0d788500482cfed09c4a0c527d.tar.gz
xen-24c152c7fec0cf0d788500482cfed09c4a0c527d.tar.bz2
xen-24c152c7fec0cf0d788500482cfed09c4a0c527d.zip
x86: remove further code applicable to 32-bit CPUs only
On the AMD side, anything prior to family 0xf can now be ignored, as well as very low model numbers of family 6 on the Intel side. Apart from that, there were several made up CPU features that turned out entirely unused throughout the tree. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/cpu')
-rw-r--r--xen/arch/x86/cpu/amd.c47
-rw-r--r--xen/arch/x86/cpu/intel.c36
2 files changed, 3 insertions, 80 deletions
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 4ac7e71efc..c95349f52b 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -180,7 +180,7 @@ static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
if (c->x86 >= 0x10) {
wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
- } else if (c->x86 == 0x0f) {
+ } else {
wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
}
@@ -234,14 +234,7 @@ int cpu_has_amd_erratum(const struct cpuinfo_x86 *cpu, int osvw_id, ...)
/* Can this system suffer from TSC drift due to C1 clock ramping? */
static int c1_ramping_may_cause_clock_drift(struct cpuinfo_x86 *c)
{
- if (c->x86 < 0xf) {
- /*
- * TSC drift doesn't exist on 7th Gen or less
- * However, OS still needs to consider effects
- * of P-state changes on TSC
- */
- return 0;
- } else if (cpuid_edx(0x80000007) & (1<<8)) {
+ if (cpuid_edx(0x80000007) & (1<<8)) {
/*
* CPUID.AdvPowerMgmtInfo.TscInvariant
* EDX bit 8, 8000_0007
@@ -416,41 +409,7 @@ static void __devinit init_amd(struct cpuinfo_x86 *c)
switch(c->x86)
{
- case 6: /* An Athlon/Duron */
-
- /* Bit 15 of Athlon specific MSR 15, needs to be 0
- * to enable SSE on Palomino/Morgan/Barton CPU's.
- * If the BIOS didn't enable it already, enable it here.
- */
- if (c->x86_model >= 6 && c->x86_model <= 10) {
- if (!cpu_has(c, X86_FEATURE_XMM)) {
- printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
- rdmsr(MSR_K7_HWCR, l, h);
- l &= ~0x00008000;
- wrmsr(MSR_K7_HWCR, l, h);
- set_bit(X86_FEATURE_XMM, c->x86_capability);
- }
- }
-
- /* It's been determined by AMD that Athlons since model 8 stepping 1
- * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
- * As per AMD technical note 27212 0.2
- */
- if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
- rdmsr(MSR_K7_CLK_CTL, l, h);
- if ((l & 0xfff00000) != 0x20000000) {
- printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
- ((l & 0x000fffff)|0x20000000));
- wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
- }
- }
- set_bit(X86_FEATURE_K7, c->x86_capability);
- break;
-
- case 0xf:
- /* Use K8 tuning for Fam10h and Fam11h */
- case 0x10 ... 0x17:
- set_bit(X86_FEATURE_K8, c->x86_capability);
+ case 0xf ... 0x17:
disable_c1e(NULL);
if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
pv_post_outb_hook = check_disable_c1e;
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 3b04e018a7..9b71d36d9e 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -192,7 +192,6 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
static void __devinit init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
- char *p = NULL;
/* Detect the extended topology information if available */
detect_extended_topology(c);
@@ -210,37 +209,6 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
clear_bit(X86_FEATURE_SEP, c->x86_capability);
- /* Names for the Pentium II/Celeron processors
- detectable only by also checking the cache size.
- Dixon is NOT a Celeron. */
- if (c->x86 == 6) {
- switch (c->x86_model) {
- case 5:
- if (c->x86_mask == 0) {
- if (l2 == 0)
- p = "Celeron (Covington)";
- else if (l2 == 256)
- p = "Mobile Pentium II (Dixon)";
- }
- break;
-
- case 6:
- if (l2 == 128)
- p = "Celeron (Mendocino)";
- else if (c->x86_mask == 0 || c->x86_mask == 5)
- p = "Celeron-A";
- break;
-
- case 8:
- if (l2 == 128)
- p = "Celeron (Coppermine)";
- break;
- }
- }
-
- if ( p )
- safe_strcpy(c->x86_model_id, p);
-
if ( !cpu_has(c, X86_FEATURE_XTOPOLOGY) )
{
c->x86_max_cores = num_cpu_cores(c);
@@ -275,10 +243,6 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
}
#endif
- if (c->x86 == 15)
- set_bit(X86_FEATURE_P4, c->x86_capability);
- if (c->x86 == 6)
- set_bit(X86_FEATURE_P3, c->x86_capability);
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);