/* * Intel SMP support routines. * * (c) 1995 Alan Cox, Building #3 * (c) 1998-99, 2000 Ingo Molnar * * This code is released under the GNU General Public License version 2 or * later. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Some notes on x86 processor bugs affecting SMP operation: * * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. * The Linux implications for SMP are handled as follows: * * Pentium III / [Xeon] * None of the E1AP-E3AP errata are visible to the user. * * E1AP. see PII A1AP * E2AP. see PII A2AP * E3AP. see PII A3AP * * Pentium II / [Xeon] * None of the A1AP-A3AP errata are visible to the user. * * A1AP. see PPro 1AP * A2AP. see PPro 2AP * A3AP. see PPro 7AP * * Pentium Pro * None of 1AP-9AP errata are visible to the normal user, * except occasional delivery of 'spurious interrupt' as trap #15. * This is very rare and a non-problem. * * 1AP. Linux maps APIC as non-cacheable * 2AP. worked around in hardware * 3AP. fixed in C0 and above steppings microcode update. * Linux does not use excessive STARTUP_IPIs. * 4AP. worked around in hardware * 5AP. symmetric IO mode (normal Linux operation) not affected. * 'noapic' mode has vector 0xf filled out properly. * 6AP. 'noapic' mode might be affected - fixed in later steppings * 7AP. We do not assume writes to the LVT deassering IRQs * 8AP. We do not enable low power mode (deep sleep) during MP bootup * 9AP. We do not use mixed mode */ /* * The following functions deal with sending IPIs between CPUs. */ static inline int __prepare_ICR (unsigned int shortcut, int vector) { return APIC_DM_FIXED | shortcut | vector; } static inline int __prepare_ICR2 (unsigned int mask) { return SET_APIC_DEST_FIELD(mask); } static inline void check_IPI_mask(cpumask_t cpumask) { /* * Sanity, and necessary. An IPI with no target generates a send accept * error with Pentium and P6 APICs. */ ASSERT(cpus_subset(cpumask, cpu_online_map)); ASSERT(!cpus_empty(cpumask)); } void send_IPI_mask_flat(cpumask_t cpumask, int vector) { unsigned long mask = cpus_addr(cpumask)[0]; unsigned long cfg; unsigned long flags; check_IPI_mask(cpumask); local_irq_save(flags); /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ cfg = __prepare_ICR2(mask); apic_write_around(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL; /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write_around(APIC_ICR, cfg); local_irq_restore(flags); } void send_IPI_mask_phys(cpumask_t mask, int vector) { unsigned long cfg, flags; unsigned int query_cpu; check_IPI_mask(mask); /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicasts to each CPU instead. This * should be modified to do 1 message per cluster ID - mbligh */ local_irq_save(flags); for_each_cpu_mask( query_cpu, mask ) { /* * Wait for idle. */ apic_wait_icr_idle(); /*
# C++ wrapper files.
CHCPPSRC = ${CHIBIOS}/os/various/cpp_wrappers/ch.cpp \
           ${CHIBIOS}/os/various/cpp_wrappers/syscalls_cpp.cpp

CHCPPINC = ${CHIBIOS}/os/various/cpp_wrappers