/* * xen/arch/arm/smpboot.c * * Dummy smpboot support * * Copyright (c) 2011 Citrix Systems. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #include #include #include #include #include #include #include #include #include #include #include #include cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_present_map; EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_possible_map); struct cpuinfo_arm cpu_data[NR_CPUS]; /* CPU logical map: map xen cpuid to an MPIDR */ u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; /* Fake one node for now. See also include/asm-arm/numa.h */ nodemask_t __read_mostly node_online_map = { { [0] = 1UL } }; /* Xen stack for bringing up the first CPU. */ static unsigned char __initdata cpu0_boot_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE))); /* Initial boot cpu data */ struct init_info __initdata init_data = { .stack = cpu0_boot_stack, }; /* Shared state for coordinating CPU bringup */ unsigned long smp_up_cpu = MPIDR_INVALID; /* Shared state for coordinating CPU teardown */ static bool_t cpu_is_dead = 0; /* ID of the PCPU we're running on */ DEFINE_PER_CPU(unsigned int, cpu_id); /* XXX these seem awfully x86ish... */ /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask); /* representing HT and core siblings of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask); static void setup_cpu_sibling_map(int cpu) { if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) || !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) ) panic("No memory for CPU sibling/core maps\n"); /* A CPU is a sibling with itself and is always on its own core. */ cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu)); cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu)); } void __init smp_clear_cpu_maps (void) { cpumask_clear(&cpu_possible_map); cpumask_clear(&cpu_online_map); cpumask_set_cpu(0, &cpu_online_map); cpumask_set_cpu(0, &cpu_possible_map); cpu_logical_map(0) = READ_SYSREG(MPIDR_EL1) & MPIDR_HWID_MASK; } int __init smp_get_max_cpus (void) { int i, max_cpus = 0; for ( i = 0; i < nr_cpu_ids; i++ ) if ( cpu_possible(i) ) max_cpus++; return max_cpus; } void __init smp_prepare_cpus (unsigned int max_cpus) { cpumask_copy(&cpu_present_map, &cpu_possible_map); setup_cpu_sibling_map(0); } /* Boot the current CPU */ void __cpuinit start_secondary(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long hwid) { unsigned int cpuid = init_data.cpuid; memset(get_cpu_info(), 0, sizeof (struct cpu_info)); set_processor_id(cpuid); current_cpu_data = boot_cpu_data; identify_cpu(¤t_cpu_data); init_traps(); setup_virt_paging(); mmu_init_secondary_cpu(); gic_init_secondary_cpu(); init_secondary_IRQ(); gic_route_ppis(); init_maintenance_interrupt(); init_timer_interrupt(); set_current(idle_vcpu[cpuid]); setup_cpu_sibling_map(cpuid); /* Run local notifiers */ notify_cpu_starting(cpuid); wmb(); /* Now report this CPU is up */ smp_up_cpu = MPIDR_INVALID; cpumask_set_cpu(cpuid, &cpu_online_map); wmb(); local_irq_enable(); local_abort_enable(); printk(XENLOG_DEBUG "CPU %u booted.\n", smp_processor_id()); startup_cpu_idle_loop(); } /* Shut down the current CPU */ void __cpu_disable(void) { unsigned int cpu = get_processor_id(); local_irq_disable(); gic_disable_cpu(); /* Allow any queued timer interrupts to get serviced */ local_irq_enable(); mdelay(1); local_irq_disable(); /* It's now safe to remove this processor from the online map */ cpumask_clear_cpu(cpu, &cpu_online_map); if ( cpu_disable_scheduler(cpu) ) BUG(); mb(); /* Return to caller; eventually the IPI mechanism will unwind and the * scheduler will drop to the idle loop, which will call stop_cpu(). */ } void stop_cpu(void) { local_irq_disable(); cpu_is_dead = 1; /* Make sure the write happens before we sleep forever */ dsb(); isb(); while ( 1 ) wfi(); } /* Bring up a remote CPU */ int __cpu_up(unsigned int cpu) { int rc; printk("Bringing up CPU%d\n", cpu); rc = init_secondary_pagetables(cpu); if ( rc < 0 ) return rc; /* Tell the remote CPU which stack to boot on. */ init_data.stack = idle_vcpu[cpu]->arch.stack; /* Tell the remote CPU what is it's logical CPU ID */ init_data.cpuid = cpu; /* Open the gate for this CPU */ smp_up_cpu = cpu_logical_map(cpu); flush_xen_dcache(smp_up_cpu); rc = arch_cpu_up(cpu); if ( rc < 0 ) { printk("Failed to bring up CPU%d\n", cpu); return rc; } /* We don't know the GIC ID of the CPU until it has woken up, so just signal * everyone and rely on our own smp_up_cpu gate to ensure only the one we * want gets through. */ send_SGI_allbutself(GIC_SGI_EVENT_CHECK); while ( !cpu_online(cpu) ) { cpu_relax(); process_pending_softirqs(); } return 0; } /* Wait for a remote CPU to die */ void __cpu_die(unsigned int cpu) { unsigned int i = 0; while ( !cpu_is_dead ) { mdelay(100); cpu_relax(); process_pending_softirqs(); if ( (++i % 10) == 0 ) printk(KERN_ERR "CPU %u still not dead...\n", cpu); mb(); } cpu_is_dead = 0; mb(); } /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * indent-tabs-mode: nil * End: */