aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/acpi/power.c2
-rw-r--r--xen/arch/x86/cpu/amd.c4
-rw-r--r--xen/arch/x86/cpu/intel.c2
-rw-r--r--xen/arch/x86/hvm/hvm.c38
-rw-r--r--xen/arch/x86/hvm/svm/svm.c39
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c102
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c26
-rw-r--r--xen/arch/x86/smpboot.c33
-rw-r--r--xen/include/asm-x86/hvm/hvm.h2
-rw-r--r--xen/include/asm-x86/hvm/support.h2
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h1
11 files changed, 157 insertions, 94 deletions
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index 4eb9e30254..90a8fff470 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -198,7 +198,7 @@ static int enter_state(u32 state)
local_irq_restore(flags);
console_end_sync();
acpi_sleep_post(state);
- if ( !hvm_cpu_up() )
+ if ( hvm_cpu_up() )
BUG();
enable_cpu:
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 67a3352dbb..77dc1d04df 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -13,8 +13,6 @@
#include "cpu.h"
#include "amd.h"
-void start_svm(struct cpuinfo_x86 *c);
-
/*
* Pre-canned values for overriding the CPUID features
* and extended features masks.
@@ -516,8 +514,6 @@ static void __devinit init_amd(struct cpuinfo_x86 *c)
disable_c1_ramping();
set_cpuidmask(c);
-
- start_svm(c);
}
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 5df0ec198f..0022adfd62 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -218,8 +218,6 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
(cpuid_eax(0x00000006) & (1u<<2)))
set_bit(X86_FEATURE_ARAT, c->x86_capability);
- start_vmx();
-
if ( !use_xsave )
clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index a6f1be9d0a..022b4b79b1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -100,12 +100,35 @@ static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
-void hvm_enable(struct hvm_function_table *fns)
+static int __init hvm_enable(void)
{
+ extern struct hvm_function_table *start_svm(void);
+ extern struct hvm_function_table *start_vmx(void);
extern int hvm_port80_allowed;
- BUG_ON(hvm_enabled);
- printk("HVM: %s enabled\n", fns->name);
+ struct hvm_function_table *fns = NULL;
+
+ switch ( boot_cpu_data.x86_vendor )
+ {
+ case X86_VENDOR_INTEL:
+ fns = start_vmx();
+ break;
+ case X86_VENDOR_AMD:
+ fns = start_svm();
+ break;
+ default:
+ break;
+ }
+
+ if ( fns == NULL )
+ return 0;
+
+ hvm_funcs = *fns;
+ hvm_enabled = 1;
+
+ printk("HVM: %s enabled\n", hvm_funcs.name);
+ if ( hvm_funcs.hap_supported )
+ printk("HVM: Hardware Assisted Paging detected.\n");
/*
* Allow direct access to the PC debug ports 0x80 and 0xed (they are
@@ -116,14 +139,11 @@ void hvm_enable(struct hvm_function_table *fns)
__clear_bit(0x80, hvm_io_bitmap);
__clear_bit(0xed, hvm_io_bitmap);
- hvm_funcs = *fns;
- hvm_enabled = 1;
-
- if ( hvm_funcs.hap_supported )
- printk("HVM: Hardware Assisted Paging detected.\n");
-
register_cpu_notifier(&cpu_nfb);
+
+ return 0;
}
+presmp_initcall(hvm_enable);
/*
* Need to re-inject a given event? We avoid re-injecting software exceptions
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 1c968b8d5e..558d237f04 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -856,22 +856,23 @@ static void svm_init_erratum_383(struct cpuinfo_x86 *c)
amd_erratum383_found = 1;
}
-static int svm_cpu_up(struct cpuinfo_x86 *c)
+static int svm_cpu_up(void)
{
u32 eax, edx, phys_hsa_lo, phys_hsa_hi;
u64 phys_hsa;
- int cpu = smp_processor_id();
+ int rc, cpu = smp_processor_id();
+ struct cpuinfo_x86 *c = &cpu_data[cpu];
/* Check whether SVM feature is disabled in BIOS */
rdmsr(MSR_K8_VM_CR, eax, edx);
if ( eax & K8_VMCR_SVME_DISABLE )
{
printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu);
- return 0;
+ return -EINVAL;
}
- if ( svm_cpu_up_prepare(cpu) != 0 )
- return 0;
+ if ( (rc = svm_cpu_up_prepare(cpu)) != 0 )
+ return rc;
write_efer(read_efer() | EFER_SVME);
@@ -905,39 +906,26 @@ static int svm_cpu_up(struct cpuinfo_x86 *c)
else
{
if ( cpu_has_lmsl )
- printk(XENLOG_WARNING "Inconsistent LMLSE support across CPUs!\n");
+ printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n");
cpu_has_lmsl = 0;
}
#endif
- return 1;
+ return 0;
}
-void start_svm(struct cpuinfo_x86 *c)
+struct hvm_function_table * __init start_svm(void)
{
- static bool_t bootstrapped;
-
- if ( test_and_set_bool(bootstrapped) )
- {
- if ( hvm_enabled && !svm_cpu_up(c) )
- {
- printk("SVM: FATAL: failed to initialise CPU%d!\n",
- smp_processor_id());
- BUG();
- }
- return;
- }
-
/* Xen does not fill x86_capability words except 0. */
boot_cpu_data.x86_capability[5] = cpuid_ecx(0x80000001);
if ( !test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability) )
- return;
+ return NULL;
- if ( !svm_cpu_up(c) )
+ if ( svm_cpu_up() )
{
printk("SVM: failed to initialise.\n");
- return;
+ return NULL;
}
setup_vmcb_dump();
@@ -949,7 +937,7 @@ void start_svm(struct cpuinfo_x86 *c)
svm_function_table.hap_1gb_pgtb =
(CONFIG_PAGING_LEVELS == 4)? !!(cpuid_edx(0x80000001) & 0x04000000):0;
- hvm_enable(&svm_function_table);
+ return &svm_function_table;
}
static void svm_do_nested_pgfault(paddr_t gpa)
@@ -1402,6 +1390,7 @@ static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
.cpu_dead = svm_cpu_dead,
+ .cpu_up = svm_cpu_up,
.cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
.domain_destroy = svm_domain_destroy,
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 83054e7cc3..a47befcc7b 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -112,7 +112,14 @@ static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr)
return ctl;
}
-static void vmx_init_vmcs_config(void)
+static bool_t cap_check(const char *name, u32 expected, u32 saw)
+{
+ if ( saw != expected )
+ printk("VMX %s: saw 0x%08x expected 0x%08x\n", name, saw, expected);
+ return saw != expected;
+}
+
+static int vmx_init_vmcs_config(void)
{
u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt;
u32 _vmx_pin_based_exec_control;
@@ -121,6 +128,7 @@ static void vmx_init_vmcs_config(void)
u8 ept_super_page_level_limit = 0;
u32 _vmx_vmexit_control;
u32 _vmx_vmentry_control;
+ bool_t mismatch = 0;
rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high);
@@ -227,6 +235,9 @@ static void vmx_init_vmcs_config(void)
_vmx_vmentry_control = adjust_vmx_controls(
min, opt, MSR_IA32_VMX_ENTRY_CTLS);
+ if ( smp_processor_id() == 2 )
+ vmx_basic_msr_low = 0xdeadbeef;
+
if ( !vmx_pin_based_exec_control )
{
/* First time through. */
@@ -243,27 +254,73 @@ static void vmx_init_vmcs_config(void)
else
{
/* Globals are already initialised: re-check them. */
- BUG_ON(vmcs_revision_id != vmx_basic_msr_low);
- BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
- BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
- BUG_ON(vmx_secondary_exec_control != _vmx_secondary_exec_control);
- BUG_ON(vmx_ept_super_page_level_limit > ept_super_page_level_limit);
- BUG_ON(vmx_vmexit_control != _vmx_vmexit_control);
- BUG_ON(vmx_vmentry_control != _vmx_vmentry_control);
- BUG_ON(cpu_has_vmx_ins_outs_instr_info !=
- !!(vmx_basic_msr_high & (1U<<22)));
+ mismatch |= cap_check(
+ "VMCS revision ID",
+ vmcs_revision_id, vmx_basic_msr_low);
+ mismatch |= cap_check(
+ "Pin-Based Exec Control",
+ vmx_pin_based_exec_control, _vmx_pin_based_exec_control);
+ mismatch |= cap_check(
+ "CPU-Based Exec Control",
+ vmx_cpu_based_exec_control, _vmx_cpu_based_exec_control);
+ mismatch |= cap_check(
+ "Secondary Exec Control",
+ vmx_secondary_exec_control, _vmx_secondary_exec_control);
+ mismatch |= cap_check(
+ "VMExit Control",
+ vmx_vmexit_control, _vmx_vmexit_control);
+ mismatch |= cap_check(
+ "VMEntry Control",
+ vmx_vmentry_control, _vmx_vmentry_control);
+ if ( vmx_ept_super_page_level_limit > ept_super_page_level_limit )
+ {
+ printk("EPT Super Page Limit: saw %u expected >= %u\n",
+ ept_super_page_level_limit, vmx_ept_super_page_level_limit);
+ mismatch = 1;
+ }
+ if ( cpu_has_vmx_ins_outs_instr_info !=
+ !!(vmx_basic_msr_high & (1U<<22)) )
+ {
+ printk("VMX INS/OUTS Instruction Info: saw %d expected %d\n",
+ !!(vmx_basic_msr_high & (1U<<22)),
+ cpu_has_vmx_ins_outs_instr_info);
+ mismatch = 1;
+ }
+ if ( mismatch )
+ {
+ printk("VMX: Capabilities fatally differ between CPU%d and CPU0\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
}
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
- BUG_ON((vmx_basic_msr_high & 0x1fff) > PAGE_SIZE);
+ if ( (vmx_basic_msr_high & 0x1fff) > PAGE_SIZE )
+ {
+ printk("VMX: CPU%d VMCS size is too big (%u bytes)\n",
+ smp_processor_id(), vmx_basic_msr_high & 0x1fff);
+ return -EINVAL;
+ }
#ifdef __x86_64__
/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
- BUG_ON(vmx_basic_msr_high & (1u<<16));
+ if ( vmx_basic_msr_high & (1u<<16) )
+ {
+ printk("VMX: CPU%d limits VMX structure pointers to 32 bits\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
- BUG_ON(((vmx_basic_msr_high >> 18) & 15) != 6);
+ if ( ((vmx_basic_msr_high >> 18) & 15) != 6 )
+ {
+ printk("VMX: CPU%d has unexpected VMCS access type %u\n",
+ smp_processor_id(), (vmx_basic_msr_high >> 18) & 15);
+ return -EINVAL;
+ }
+
+ return 0;
}
static struct vmcs_struct *vmx_alloc_vmcs(void)
@@ -359,7 +416,7 @@ void vmx_cpu_dead(unsigned int cpu)
int vmx_cpu_up(void)
{
u32 eax, edx;
- int bios_locked, cpu = smp_processor_id();
+ int rc, bios_locked, cpu = smp_processor_id();
u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;
BUG_ON(!(read_cr4() & X86_CR4_VMXE));
@@ -375,7 +432,7 @@ int vmx_cpu_up(void)
{
printk("CPU%d: some settings of host CR0 are "
"not allowed in VMX operation.\n", cpu);
- return 0;
+ return -EINVAL;
}
rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
@@ -388,7 +445,7 @@ int vmx_cpu_up(void)
: IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) )
{
printk("CPU%d: VMX disabled by BIOS.\n", cpu);
- return 0;
+ return -EINVAL;
}
}
else
@@ -400,12 +457,13 @@ int vmx_cpu_up(void)
wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
}
- vmx_init_vmcs_config();
+ if ( (rc = vmx_init_vmcs_config()) != 0 )
+ return rc;
INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
- if ( vmx_cpu_up_prepare(cpu) != 0 )
- return 0;
+ if ( (rc = vmx_cpu_up_prepare(cpu)) != 0 )
+ return rc;
switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
{
@@ -419,12 +477,12 @@ int vmx_cpu_up(void)
"in your BIOS configuration?\n", cpu);
printk(" --> Disable TXT in your BIOS unless using a secure "
"bootloader.\n");
- return 0;
+ return -EINVAL;
}
/* fall through */
case -1: /* CF==1 or ZF==1 */
printk("CPU%d: unexpected VMXON failure\n", cpu);
- return 0;
+ return -EINVAL;
case 0: /* success */
break;
default:
@@ -438,7 +496,7 @@ int vmx_cpu_up(void)
if ( cpu_has_vmx_vpid )
vpid_sync_all();
- return 1;
+ return 0;
}
void vmx_cpu_down(void)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d1d82ab6fc..38741fef5e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1413,35 +1413,22 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.set_rdtsc_exiting = vmx_set_rdtsc_exiting
};
-void start_vmx(void)
+struct hvm_function_table * __init start_vmx(void)
{
- static bool_t bootstrapped;
-
vmx_save_host_msrs();
- if ( test_and_set_bool(bootstrapped) )
- {
- if ( hvm_enabled && !vmx_cpu_up() )
- {
- printk("VMX: FATAL: failed to initialise CPU%d!\n",
- smp_processor_id());
- BUG();
- }
- return;
- }
-
/* Xen does not fill x86_capability words except 0. */
boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
- return;
+ return NULL;
set_in_cr4(X86_CR4_VMXE);
- if ( !vmx_cpu_up() )
+ if ( vmx_cpu_up() )
{
printk("VMX: failed to initialise.\n");
- return;
+ return NULL;
}
if ( cpu_has_vmx_ept )
@@ -1450,12 +1437,11 @@ void start_vmx(void)
setup_ept_dump();
}
- vmx_function_table.hap_1gb_pgtb = ( vmx_ept_super_page_level_limit == 2 ) ?
- 1 : 0;
+ vmx_function_table.hap_1gb_pgtb = (vmx_ept_super_page_level_limit == 2);
setup_vmcs_dump();
- hvm_enable(&vmx_function_table);
+ return &vmx_function_table;
}
/*
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index caddfad492..e548375049 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -71,7 +71,8 @@ u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = -1U };
static void map_cpu_to_logical_apicid(void);
-enum cpu_state {
+static int cpu_error;
+static enum cpu_state {
CPU_STATE_DEAD = 0, /* slave -> master: I am completely dead */
CPU_STATE_INIT, /* master -> slave: Early bringup phase 1 */
CPU_STATE_CALLOUT, /* master -> slave: Early bringup phase 2 */
@@ -133,7 +134,8 @@ static void smp_store_cpu_info(int id)
void smp_callin(void)
{
- int i;
+ unsigned int cpu = smp_processor_id();
+ int i, rc;
/* Wait 2s total for startup. */
Dprintk("Waiting for CALLOUT.\n");
@@ -155,7 +157,16 @@ void smp_callin(void)
map_cpu_to_logical_apicid();
/* Save our processor parameters. */
- smp_store_cpu_info(smp_processor_id());
+ smp_store_cpu_info(cpu);
+
+ if ( (rc = hvm_cpu_up()) != 0 )
+ {
+ extern void (*dead_idle) (void);
+ printk("CPU%d: Failed to initialise HVM. Not coming online.\n", cpu);
+ cpu_error = rc;
+ cpu_exit_clear(cpu);
+ (*dead_idle)();
+ }
/* Allow the master to continue. */
set_cpu_state(CPU_STATE_CALLIN);
@@ -507,7 +518,7 @@ int alloc_cpu_id(void)
static int do_boot_cpu(int apicid, int cpu)
{
unsigned long boot_error;
- int timeout;
+ int timeout, rc = 0;
unsigned long start_eip;
/*
@@ -548,8 +559,8 @@ static int do_boot_cpu(int apicid, int cpu)
/* Wait 5s total for a response. */
for ( timeout = 0; timeout < 50000; timeout++ )
{
- if ( cpu_state == CPU_STATE_CALLIN )
- break; /* It has booted */
+ if ( cpu_state != CPU_STATE_CALLOUT )
+ break;
udelay(100);
}
@@ -560,6 +571,11 @@ static int do_boot_cpu(int apicid, int cpu)
print_cpu_info(cpu);
Dprintk("CPU has booted.\n");
}
+ else if ( cpu_state == CPU_STATE_DEAD )
+ {
+ rmb();
+ rc = cpu_error;
+ }
else
{
boot_error = 1;
@@ -575,7 +591,10 @@ static int do_boot_cpu(int apicid, int cpu)
}
if ( boot_error )
+ {
cpu_exit_clear(cpu);
+ rc = -EIO;
+ }
/* mark "stuck" area as not stuck */
bootsym(trampoline_cpu_started) = 0;
@@ -583,7 +602,7 @@ static int do_boot_cpu(int apicid, int cpu)
smpboot_restore_warm_reset_vector();
- return boot_error ? -EIO : 0;
+ return rc;
}
void cpu_exit_clear(unsigned int cpu)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 8d36e4c9f0..66637cc3d5 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -299,7 +299,7 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
static inline int hvm_cpu_up(void)
{
- return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1);
+ return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
}
static inline void hvm_cpu_down(void)
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 6d64927868..b2267fe6d5 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -66,8 +66,6 @@ extern unsigned int opt_hvm_debug_level;
extern unsigned long hvm_io_bitmap[];
-void hvm_enable(struct hvm_function_table *);
-
enum hvm_copy_result {
HVMCOPY_okay = 0,
HVMCOPY_bad_gva_to_gfn,
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 8e73573080..389531b79d 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -23,7 +23,6 @@
#include <asm/hvm/io.h>
#include <asm/hvm/vpmu.h>
-extern void start_vmx(void);
extern void vmcs_dump_vcpu(struct vcpu *v);
extern void setup_vmcs_dump(void);
extern int vmx_cpu_up_prepare(unsigned int cpu);