/* * Intel SMP support routines. * * (c) 1995 Alan Cox, Building #3 * (c) 1998-99, 2000 Ingo Molnar * * This code is released under the GNU General Public License version 2 or * later. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Some notes on x86 processor bugs affecting SMP operation: * * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. * The Linux implications for SMP are handled as follows: * * Pentium III / [Xeon] * None of the E1AP-E3AP errata are visible to the user. * * E1AP. see PII A1AP * E2AP. see PII A2AP * E3AP. see PII A3AP * * Pentium II / [Xeon] * None of the A1AP-A3AP errata are visible to the user. * * A1AP. see PPro 1AP * A2AP. see PPro 2AP * A3AP. see PPro 7AP * * Pentium Pro * None of 1AP-9AP errata are visible to the normal user, * except occasional delivery of 'spurious interrupt' as trap #15. * This is very rare and a non-problem. * * 1AP. Linux maps APIC as non-cacheable * 2AP. worked around in hardware * 3AP. fixed in C0 and above steppings microcode update. * Linux does not use excessive STARTUP_IPIs. * 4AP. worked around in hardware * 5AP. symmetric IO mode (normal Linux operation) not affected. * 'noapic' mode has vector 0xf filled out properly. * 6AP. 'noapic' mode might be affected - fixed in later steppings * 7AP. We do not assume writes to the LVT deassering IRQs * 8AP. We do not enable low power mode (deep sleep) during MP bootup * 9AP. We do not use mixed mode */ /* * The following functions deal with sending IPIs between CPUs. */ static inline int __prepare_ICR (unsigned int shortcut, int vector) { return APIC_DM_FIXED | shortcut | vector; } static inline int __prepare_ICR2 (unsigned int mask) { return SET_APIC_DEST_FIELD(mask); } static inline void check_IPI_mask(cpumask_t cpumask) { /* * Sanity, and necessary. An IPI with no target generates a send accept * error with Pentium and P6 APICs. */ ASSERT(cpus_subset(cpumask, cpu_online_map)); ASSERT(!cpus_empty(cpumask)); } void apic_wait_icr_idle(void) { while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) cpu_relax(); } void send_IPI_mask_flat(cpumask_t cpumask, int vector) { unsigned long mask = cpus_addr(cpumask)[0]; unsigned long cfg; unsigned long flags; check_IPI_mask(cpumask); local_irq_save(flags); /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ cfg = __prepare_ICR2(mask); apic_write_around(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL; /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write_around(APIC_ICR, cfg); local_irq_restore(flags); } void send_IPI_mask_phys(cpumask_t mask, int vector) { unsigned long cfg, flags; unsigned int query_cpu; check_IPI_mask(mask); /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicasts to each CPU instead. This * should be modified to do 1 message per cluster ID - mbligh */ local_irq_save(flags); for_each_cpu_mask( query_cpu, mask ) { /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ cfg = __prepare_ICR2(cpu_physical_id(query_cpu)); apic_write_around(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL; /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write_around(APIC_ICR, cfg); } local_irq_restore(flags); } static DEFINE_SPINLOCK(flush_lock); static cpumask_t flush_cpumask; static const void *flush_va; static unsigned int flush_flags; fastcall void smp_invalidate_interrupt(void) { ack_APIC_irq(); perfc_incr(ipis); irq_enter(); if ( !__sync_lazy_execstate() || (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) ) flush_area_local(flush_va, flush_flags); cpu_clear(smp_processor_id(), flush_cpumask); irq_exit(); } void flush_area_mask(cpumask_t mask, const void *va, unsigned int flags) { ASSERT(local_irq_is_enabled()); if ( cpu_isset(smp_processor_id(), mask) ) { flush_area_local(va, flags); cpu_clear(smp_processor_id(), mask); } if ( !cpus_empty(mask) ) { spin_lock(&flush_lock); flush_cpumask = mask; flush_va = va; flush_flags = flags; send_IPI_mask(mask, INVALIDATE_TLB_VECTOR); while ( !cpus_empty(flush_cpumask) ) cpu_relax(); spin_unlock(&flush_lock); } } /* Call with no locks held and interrupts enabled (e.g., softirq context). */ void new_tlbflush_clock_period(void) { cpumask_t allbutself; /* Flush everyone else. We definitely flushed just before entry. */ allbutself = cpu_online_m
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;

// General Information about an assembly is controlled through the following 
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("CPUMonitor")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("Microsoft")]
[assembly: AssemblyProduct("CPUMonitor")]
[assembly: AssemblyCopyright("Copyright © Microsoft 2009")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]

// Setting ComVisible to false makes the types in this assembly not visible 
// to COM components.  If you need to access a type in this assembly from 
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]

// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("3e4a61da-cdde-46de-848b-b5206d225e21")]

// Version information for an assembly consists of the following four values:
//
//      Major Version
//      Minor Version 
//      Build Number
//      Revision
//
// You can specify all the values or you can default the Build and Revision Numbers 
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]