diff options
author | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2007-07-19 13:23:33 +0100 |
---|---|---|
committer | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2007-07-19 13:23:33 +0100 |
commit | de46483fe43c8b1b19d60026ae88ed9a6028b856 (patch) | |
tree | 39934a43ac831fc1b3a9e7575db94b9e5ff74341 | |
parent | d3faf9badf52a00fb3c1da2bfbb49228ffca075a (diff) | |
download | xen-de46483fe43c8b1b19d60026ae88ed9a6028b856.tar.gz xen-de46483fe43c8b1b19d60026ae88ed9a6028b856.tar.bz2 xen-de46483fe43c8b1b19d60026ae88ed9a6028b856.zip |
SMP support for Xen PM.
Add SMP sleep support to Xen. One new utility is created
to allow vcpu continue previous running after migration
to new processor.
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r-- | xen/arch/x86/acpi/power.c | 42 | ||||
-rw-r--r-- | xen/arch/x86/cpu/intel_cacheinfo.c | 4 | ||||
-rw-r--r-- | xen/arch/x86/domain.c | 49 | ||||
-rw-r--r-- | xen/include/asm-x86/domain.h | 8 | ||||
-rw-r--r-- | xen/include/asm-x86/smp.h | 2 |
5 files changed, 95 insertions, 10 deletions
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index 54ac729a89..09f11d0bb9 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -119,19 +119,25 @@ static int enter_state(u32 state) if (state <= ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return -EINVAL; - /* Sync lazy state on ths cpu */ __sync_lazy_execstate(); pmprintk(XENLOG_INFO, "Flush lazy state\n"); if (!spin_trylock(&pm_lock)) return -EBUSY; + pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n", + acpi_states[state]); + freeze_domains(); - hvm_cpu_down(); + disable_nonboot_cpus(); + if (num_online_cpus() != 1) + { + error = -EBUSY; + goto Enable_cpu; + } - pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n", - acpi_states[state]); + hvm_cpu_down(); acpi_sleep_prepare(state); @@ -169,16 +175,31 @@ static int enter_state(u32 state) if ( !hvm_cpu_up() ) BUG(); + Enable_cpu: + enable_nonboot_cpus(); + thaw_domains(); spin_unlock(&pm_lock); return error; } +static void acpi_power_off(void) +{ + pmprintk(XENLOG_INFO, "%s called\n", __FUNCTION__); + local_irq_disable(); + /* Some SMP machines only can poweroff in boot CPU */ + acpi_enter_sleep_state(ACPI_STATE_S5); +} + +static long enter_state_helper(void *data) +{ + struct acpi_sleep_info *sinfo = (struct acpi_sleep_info *)data; + return enter_state(sinfo->sleep_state); +} + /* * Dom0 issues this hypercall in place of writing pm1a_cnt. Xen then * takes over the control and put the system into sleep state really. - * Also video flags and mode are passed here, in case user may use - * "acpi_sleep=***" for video resume. * * Guest may issue a two-phases write to PM1x_CNT, to work * around poorly implemented hardware. It's better to keep @@ -216,7 +237,14 @@ int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep) acpi_sinfo.pm1b_cnt_val = sleep->pm1b_cnt_val; acpi_sinfo.sleep_state = sleep->sleep_state; - return enter_state(acpi_sinfo.sleep_state); + /* ACPI power-off method. */ + if ( acpi_sinfo.sleep_state == ACPI_STATE_S5 ) + { + for ( ; ; ) + acpi_power_off(); + } + + return continue_hypercall_on_cpu(0, enter_state_helper, &acpi_sinfo); } static int acpi_get_wake_status(void) diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c b/xen/arch/x86/cpu/intel_cacheinfo.c index f309467e29..f7a35439a5 100644 --- a/xen/arch/x86/cpu/intel_cacheinfo.c +++ b/xen/arch/x86/cpu/intel_cacheinfo.c @@ -17,7 +17,7 @@ struct _cache_table }; /* all the cache descriptor types we care about (no TLB or trace cache entries) */ -static struct _cache_table cache_table[] __initdata = +static struct _cache_table cache_table[] __devinitdata = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ @@ -60,7 +60,7 @@ static struct _cache_table cache_table[] __initdata = { 0x00, 0, 0} }; -unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c) +unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) { unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 061f0c7976..f3241ff160 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -82,6 +82,7 @@ static void play_dead(void) __cpu_disable(); /* This must be done before dead CPU ack */ cpu_exit_clear(); + hvm_cpu_down(); wbinvd(); mb(); /* Ack it */ @@ -1363,6 +1364,54 @@ void sync_vcpu_execstate(struct vcpu *v) flush_tlb_mask(v->vcpu_dirty_cpumask); } +struct migrate_info { + long (*func)(void *data); + void *data; + void (*saved_schedule_tail)(struct vcpu *); + cpumask_t saved_affinity; +}; + +static void continue_hypercall_on_cpu_helper(struct vcpu *v) +{ + struct cpu_user_regs *regs = guest_cpu_user_regs(); + struct migrate_info *info = v->arch.continue_info; + + regs->eax = info->func(info->data); + + v->arch.schedule_tail = info->saved_schedule_tail; + v->cpu_affinity = info->saved_affinity; + + xfree(info); + v->arch.continue_info = NULL; + + vcpu_set_affinity(v, &v->cpu_affinity); + schedule_tail(v); +} + +int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data) +{ + struct vcpu *v = current; + struct migrate_info *info; + cpumask_t mask = cpumask_of_cpu(cpu); + + info = xmalloc(struct migrate_info); + if ( info == NULL ) + return -ENOMEM; + + info->func = func; + info->data = data; + info->saved_schedule_tail = v->arch.schedule_tail; + v->arch.schedule_tail = continue_hypercall_on_cpu_helper; + + info->saved_affinity = v->cpu_affinity; + v->arch.continue_info = info; + + vcpu_set_affinity(v, &mask); + schedule_tail(v); + + return 0; +} + #define next_arg(fmt, args) ({ \ unsigned long __arg; \ switch ( *(fmt)++ ) \ diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 8f5a761051..22de4c60e9 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -268,6 +268,9 @@ struct arch_vcpu void (*ctxt_switch_from) (struct vcpu *); void (*ctxt_switch_to) (struct vcpu *); + /* Record information required to continue execution after migration */ + void *continue_info; + /* Bounce information for propagating an exception to guest OS. */ struct trap_bounce trap_bounce; @@ -311,10 +314,13 @@ struct arch_vcpu unsigned long vcpu_info_mfn; } __cacheline_aligned; -/* shorthands to improve code legibility */ +/* Shorthands to improve code legibility. */ #define hvm_vmx hvm_vcpu.u.vmx #define hvm_svm hvm_vcpu.u.svm +/* Continue the current hypercall via func(data) on specified cpu. */ +int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data); + #endif /* __ASM_DOMAIN_H__ */ /* diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h index 83a48f3ea1..9b36a64437 100644 --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -66,6 +66,8 @@ extern void disable_nonboot_cpus(void); extern void enable_nonboot_cpus(void); #else static inline int cpu_is_offline(int cpu) {return 0;} +static inline void disable_nonboot_cpus(void) {} +static inline void enable_nonboot_cpus(void) {} #endif /* |