aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/cpu.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-05-14 17:07:52 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-05-14 17:07:52 +0100
commit27c31d386746bea3daa5b3733c9f986a9c3cccc3 (patch)
tree64ef675ed9b4c0836ab9076b7280e88a64f5bb16 /xen/common/cpu.c
parent2e4146ddc277d2ddd861646b8b1ba83182eb84bb (diff)
downloadxen-27c31d386746bea3daa5b3733c9f986a9c3cccc3.tar.gz
xen-27c31d386746bea3daa5b3733c9f986a9c3cccc3.tar.bz2
xen-27c31d386746bea3daa5b3733c9f986a9c3cccc3.zip
Move cpu hotplug routines into common cpu.c file.
Also simplify the locking (reverting to use if spin_trylock, as returning EBUSY/EAGAIN seems unavoidable after all). In particular this should continue to ensure that stop_machine_run() does not have cpu_online_map change under its feet. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/cpu.c')
-rw-r--r--xen/common/cpu.c189
1 files changed, 176 insertions, 13 deletions
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 8a04dd449e..82a111de98 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -1,6 +1,9 @@
#include <xen/config.h>
#include <xen/cpumask.h>
#include <xen/cpu.h>
+#include <xen/event.h>
+#include <xen/sched.h>
+#include <xen/stop_machine.h>
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -26,35 +29,195 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
#endif
};
-DEFINE_SPINLOCK(cpu_add_remove_lock);
+static DEFINE_SPINLOCK(cpu_add_remove_lock);
+
+bool_t get_cpu_maps(void)
+{
+ return spin_trylock_recursive(&cpu_add_remove_lock);
+}
+
+void put_cpu_maps(void)
+{
+ spin_unlock_recursive(&cpu_add_remove_lock);
+}
+
+bool_t cpu_hotplug_begin(void)
+{
+ return get_cpu_maps();
+}
+
+void cpu_hotplug_done(void)
+{
+ put_cpu_maps();
+}
static RAW_NOTIFIER_HEAD(cpu_chain);
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- spin_lock(&cpu_add_remove_lock);
+ if ( !spin_trylock(&cpu_add_remove_lock) )
+ BUG(); /* Should never fail as we are called only during boot. */
ret = raw_notifier_chain_register(&cpu_chain, nb);
spin_unlock(&cpu_add_remove_lock);
return ret;
}
-void unregister_cpu_notifier(struct notifier_block *nb)
+static int take_cpu_down(void *unused)
{
- spin_lock(&cpu_add_remove_lock);
- raw_notifier_chain_unregister(&cpu_chain, nb);
- spin_unlock(&cpu_add_remove_lock);
+ void *hcpu = (void *)(long)smp_processor_id();
+ if ( raw_notifier_call_chain(&cpu_chain, CPU_DYING, hcpu) != NOTIFY_DONE )
+ BUG();
+ return __cpu_disable();
+}
+
+int cpu_down(unsigned int cpu)
+{
+ int err, notifier_rc, nr_calls;
+ void *hcpu = (void *)(long)cpu;
+
+ if ( !cpu_hotplug_begin() )
+ return -EBUSY;
+
+ if ( (cpu == 0) || !cpu_online(cpu) )
+ {
+ cpu_hotplug_done();
+ return -EINVAL;
+ }
+
+ printk("Prepare to bring CPU%d down...\n", cpu);
+
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
+ if ( notifier_rc != NOTIFY_DONE )
+ {
+ err = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ goto out;
+ }
+
+ if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
+ {
+ notifier_rc = raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_FAILED, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ goto out;
+ }
+
+ __cpu_die(cpu);
+ BUG_ON(cpu_online(cpu));
+
+ notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+
+ out:
+ if ( !err )
+ {
+ printk("CPU %u is now offline\n", cpu);
+ send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ }
+ else
+ {
+ printk("Failed to take down CPU %u (error %d)\n", cpu, err);
+ }
+ cpu_hotplug_done();
+ return err;
}
-int cpu_notifier_call_chain(unsigned long val, void *v)
+int cpu_up(unsigned int cpu)
{
- BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
- return raw_notifier_call_chain(&cpu_chain, val, v);
+ int notifier_rc, nr_calls, err = 0;
+ void *hcpu = (void *)(long)cpu;
+
+ if ( !cpu_hotplug_begin() )
+ return -EBUSY;
+
+ if ( cpu_online(cpu) || !cpu_present(cpu) )
+ {
+ cpu_hotplug_done();
+ return -EINVAL;
+ }
+
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_UP_PREPARE, hcpu, -1, &nr_calls);
+ if ( notifier_rc != NOTIFY_DONE )
+ {
+ err = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ goto fail;
+ }
+
+ err = __cpu_up(cpu);
+ if ( err < 0 )
+ goto fail;
+
+ notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+
+ send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+
+ cpu_hotplug_done();
+ return 0;
+
+ fail:
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ cpu_hotplug_done();
+ return err;
}
-int __cpu_notifier_call_chain(
- unsigned long val, void *v, int nr_to_call, int *nr_calls)
+static cpumask_t frozen_cpus;
+
+int disable_nonboot_cpus(void)
{
- BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
- return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls);
+ int cpu, error = 0;
+
+ BUG_ON(raw_smp_processor_id() != 0);
+
+ cpus_clear(frozen_cpus);
+
+ printk("Disabling non-boot CPUs ...\n");
+
+ for_each_online_cpu ( cpu )
+ {
+ if ( cpu == 0 )
+ continue;
+
+ if ( (error = cpu_down(cpu)) )
+ {
+ BUG_ON(error == -EBUSY);
+ printk("Error taking CPU%d down: %d\n", cpu, error);
+ break;
+ }
+
+ cpu_set(cpu, frozen_cpus);
+ printk("CPU%d is down\n", cpu);
+ }
+
+ BUG_ON(!error && (num_online_cpus() != 1));
+ return error;
+}
+
+void enable_nonboot_cpus(void)
+{
+ int cpu, error;
+
+ printk("Enabling non-boot CPUs ...\n");
+
+ for_each_cpu_mask ( cpu, frozen_cpus )
+ {
+ if ( (error = cpu_up(cpu)) )
+ {
+ BUG_ON(error == -EBUSY);
+ printk("Error taking CPU%d up: %d\n", cpu, error);
+ continue;
+ }
+ printk("CPU%d is up\n", cpu);
+ }
+
+ cpus_clear(frozen_cpus);
}