aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/smp.c
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-04-03 16:35:06 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-04-03 16:35:06 +0100
commitf55976facfbc63f0c8619070a5b2be32a915a67a (patch)
tree1c72055d48b7afe01ffbd7573494270126725069 /xen/arch/x86/smp.c
parent5e8883cc8c7075e3c1b03c50016f433153ea9c01 (diff)
downloadxen-f55976facfbc63f0c8619070a5b2be32a915a67a.tar.gz
xen-f55976facfbc63f0c8619070a5b2be32a915a67a.tar.bz2
xen-f55976facfbc63f0c8619070a5b2be32a915a67a.zip
Remove send_IPI_all, send_IPI_allbutself and send_IPI_self
shortcutting IPI functions. They're either unused or used so frequently that we may as well simplify things and always use send_IPI_mask. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/smp.c')
-rw-r--r--xen/arch/x86/smp.c97
1 files changed, 26 insertions, 71 deletions
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index fc0a545883..18240da6f5 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -21,6 +21,7 @@
#include <asm/smpboot.h>
#include <asm/hardirq.h>
#include <mach_apic.h>
+#include <mach_ipi.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
@@ -74,38 +75,6 @@ static inline int __prepare_ICR2 (unsigned int mask)
return SET_APIC_DEST_FIELD(mask);
}
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
- /*
- * Subtle. In the case of the 'never do double writes' workaround
- * we have to lock out interrupts to be safe. As we don't care
- * of the value read we use an atomic rmw access to avoid costly
- * cli/sti. Otherwise we use an even cheaper single atomic write
- * to the APIC.
- */
- unsigned int cfg;
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
-
- /*
- * No need to touch the target chip field
- */
- cfg = __prepare_ICR(shortcut, vector);
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write_around(APIC_ICR, cfg);
-}
-
-void send_IPI_self(int vector)
-{
- __send_IPI_shortcut(APIC_DEST_SELF, vector);
-}
-
static inline void check_IPI_mask(cpumask_t cpumask)
{
/*
@@ -116,9 +85,6 @@ static inline void check_IPI_mask(cpumask_t cpumask)
ASSERT(!cpus_empty(cpumask));
}
-/*
- * This is only used on smaller machines.
- */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
{
unsigned long mask = cpus_addr(cpumask)[0];
@@ -168,36 +134,33 @@ inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
local_irq_save(flags);
- for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
- if (cpu_isset(query_cpu, mask)) {
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
+ for_each_cpu_mask( query_cpu, mask )
+ {
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
- apic_write_around(APIC_ICR2, cfg);
+ /*
+ * prepare target chip field
+ */
+ cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
+ apic_write_around(APIC_ICR2, cfg);
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
+ /*
+ * program the ICR
+ */
+ cfg = __prepare_ICR(0, vector);
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write_around(APIC_ICR, cfg);
- }
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ apic_write_around(APIC_ICR, cfg);
}
+
local_irq_restore(flags);
}
-#include <mach_ipi.h>
-
static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
static cpumask_t flush_cpumask;
static unsigned long flush_va;
@@ -241,20 +204,12 @@ void __flush_tlb_mask(cpumask_t mask, unsigned long va)
/* Call with no locks held and interrupts enabled (e.g., softirq context). */
void new_tlbflush_clock_period(void)
{
- ASSERT(local_irq_is_enabled());
-
+ cpumask_t allbutself;
+
/* Flush everyone else. We definitely flushed just before entry. */
- if ( num_online_cpus() > 1 )
- {
- spin_lock(&flush_lock);
- flush_cpumask = cpu_online_map;
- flush_va = FLUSHVA_ALL;
- send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
- cpu_clear(smp_processor_id(), flush_cpumask);
- while ( !cpus_empty(flush_cpumask) )
- cpu_relax();
- spin_unlock(&flush_lock);
- }
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+ __flush_tlb_mask(allbutself, FLUSHVA_ALL);
/* No need for atomicity: we are the only possible updater. */
ASSERT(tlbflush_clock == 0);