aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-05-27 11:15:08 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-05-27 11:15:08 +0100
commitc3da952a0261cb2fa381154a6f22b758a4bd843b (patch)
tree92496e6fa23a402388431550351e458deedf84ec /xen
parentcf5e6f2d34413cac233939ab701191655a587ca7 (diff)
downloadxen-c3da952a0261cb2fa381154a6f22b758a4bd843b.tar.gz
xen-c3da952a0261cb2fa381154a6f22b758a4bd843b.tar.bz2
xen-c3da952a0261cb2fa381154a6f22b758a4bd843b.zip
Pass cpumasks by reference always.
Rather than passing cpumasks by value in all cases (which is problematic for large NR_CPUS configurations), pass them 'by reference' (i.e. through a pointer to a const cpumask). On x86 this changes send_IPI_mask() to always only send IPIs to remote CPUs (meaning any caller needing to handle the current CPU as well has to do so on its own). Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/ia64/linux-xen/smp.c15
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c2
-rw-r--r--xen/arch/ia64/xen/mm.c2
-rw-r--r--xen/arch/ia64/xen/vhpt.c15
-rw-r--r--xen/arch/x86/acpi/cpufreq/cpufreq.c8
-rw-r--r--xen/arch/x86/acpi/cpufreq/powernow.c2
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.c8
-rw-r--r--xen/arch/x86/cpu/mtrr/main.c2
-rw-r--r--xen/arch/x86/crash.c14
-rw-r--r--xen/arch/x86/domain.c4
-rw-r--r--xen/arch/x86/genapic/x2apic.c7
-rw-r--r--xen/arch/x86/hpet.c2
-rw-r--r--xen/arch/x86/hvm/hvm.c2
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c4
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c2
-rw-r--r--xen/arch/x86/irq.c6
-rw-r--r--xen/arch/x86/machine_kexec.c6
-rw-r--r--xen/arch/x86/mm.c22
-rw-r--r--xen/arch/x86/mm/hap/hap.c6
-rw-r--r--xen/arch/x86/mm/shadow/common.c18
-rw-r--r--xen/arch/x86/mm/shadow/multi.c8
-rw-r--r--xen/arch/x86/shutdown.c2
-rw-r--r--xen/arch/x86/smp.c90
-rw-r--r--xen/arch/x86/time.c2
-rw-r--r--xen/common/Makefile1
-rw-r--r--xen/common/cpu.c26
-rw-r--r--xen/common/grant_table.c10
-rw-r--r--xen/common/keyhandler.c2
-rw-r--r--xen/common/page_alloc.c2
-rw-r--r--xen/include/asm-ia64/tlbflush.h2
-rw-r--r--xen/include/asm-x86/flushtlb.h10
-rw-r--r--xen/include/asm-x86/genapic.h8
-rw-r--r--xen/include/asm-x86/ipi.h8
-rw-r--r--xen/include/asm-x86/smp.h2
-rw-r--r--xen/include/xen/cpumask.h29
-rw-r--r--xen/include/xen/smp.h8
-rw-r--r--xen/include/xen/softirq.h2
37 files changed, 187 insertions, 172 deletions
diff --git a/xen/arch/ia64/linux-xen/smp.c b/xen/arch/ia64/linux-xen/smp.c
index 43786124f1..03d673f1c2 100644
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -57,19 +57,18 @@
//#if CONFIG_SMP || IA64
#if CONFIG_SMP
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void smp_send_event_check_mask(cpumask_t mask)
+void smp_send_event_check_mask(const cpumask_t *mask)
{
int cpu;
/* Not for me. */
- cpu_clear(smp_processor_id(), mask);
- if (cpus_empty(mask))
+ if (cpus_subset(*mask, *cpumask_of(smp_processor_id())))
return;
//printf("smp_send_event_check_mask called\n");
for (cpu = 0; cpu < NR_CPUS; ++cpu)
- if (cpu_isset(cpu, mask))
+ if (cpu_isset(cpu, *mask) && cpu != smp_processor_id())
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
#endif
@@ -438,11 +437,11 @@ EXPORT_SYMBOL(smp_call_function);
#ifdef XEN
int
-on_selected_cpus(cpumask_t selected, void (*func) (void *info), void *info,
- int retry, int wait)
+on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
+ void *info, int retry, int wait)
{
struct call_data_struct data;
- unsigned int cpu, nr_cpus = cpus_weight(selected);
+ unsigned int cpu, nr_cpus = cpus_weight(*selected);
ASSERT(local_irq_is_enabled());
@@ -460,7 +459,7 @@ on_selected_cpus(cpumask_t selected, void (*func) (void *info), void *info,
call_data = &data;
wmb();
- for_each_cpu_mask(cpu, selected)
+ for_each_cpu_mask(cpu, *selected)
send_IPI_single(cpu, IPI_CALL_FUNC);
while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
index 707c55ce17..b525c8b411 100644
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
@@ -239,7 +239,7 @@ sn2_global_tlb_purge(unsigned long start,
flush_data.start = start;
flush_data.end = end;
flush_data.nbits = nbits;
- on_selected_cpus(selected_cpus, sn_flush_ptcga_cpu,
+ on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu,
&flush_data, 1, 1);
}
spin_unlock(&sn2_ptcg_lock2);
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index 20071061f2..de18232388 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -3189,7 +3189,7 @@ int get_page_type(struct page_info *page, unsigned long type)
if ( unlikely(!cpus_empty(mask)) )
{
perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(mask);
+ flush_tlb_mask(&mask);
}
/* We lose existing type, back pointer, and validity. */
diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c
index 525b76fc37..212f550960 100644
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -548,22 +548,21 @@ void flush_tlb_for_log_dirty(struct domain *d)
cpus_clear (d->domain_dirty_cpumask);
}
-void flush_tlb_mask(cpumask_t mask)
+void flush_tlb_mask(const cpumask_t *mask)
{
int cpu;
cpu = smp_processor_id();
- if (cpu_isset (cpu, mask)) {
- cpu_clear(cpu, mask);
+ if (cpu_isset(cpu, *mask))
flush_tlb_vhpt_all (NULL);
- }
- if (cpus_empty(mask))
+ if (cpus_subset(*mask, *cpumask_of(cpu)))
return;
- for_each_cpu_mask (cpu, mask)
- smp_call_function_single
- (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+ for_each_cpu_mask (cpu, *mask)
+ if (cpu != smp_processor_id())
+ smp_call_function_single
+ (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
}
#ifdef PERF_COUNTERS
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 1631a30935..ccf7bffa75 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -186,7 +186,7 @@ static void drv_read(struct drv_cmd *cmd)
if (likely(cpu_isset(smp_processor_id(), cmd->mask)))
do_drv_read((void *)cmd);
else
- on_selected_cpus( cmd->mask, do_drv_read, (void *)cmd, 0, 1);
+ on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1);
}
static void drv_write(struct drv_cmd *cmd)
@@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cmd)
cpu_isset(smp_processor_id(), cmd->mask))
do_drv_write((void *)cmd);
else
- on_selected_cpus( cmd->mask, do_drv_write, (void *)cmd, 0, 0);
+ on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0);
}
static u32 get_cur_val(cpumask_t mask)
@@ -274,7 +274,6 @@ static unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
struct cpufreq_policy *policy;
struct perf_pair readin, cur, *saved;
unsigned int perf_percent;
- cpumask_t cpumask;
unsigned int retval;
if (!cpu_online(cpu))
@@ -303,8 +302,7 @@ static unsigned int get_measured_perf(unsigned int cpu, unsigned int flag)
if (cpu == smp_processor_id()) {
read_measured_perf_ctrs((void *)&readin);
} else {
- cpumask = cpumask_of_cpu(cpu);
- on_selected_cpus(cpumask, read_measured_perf_ctrs,
+ on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
(void *)&readin, 0, 1);
}
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c
index f69b425a0c..98b8ed2df6 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -121,7 +121,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
cmd.val = next_perf_state;
- on_selected_cpus( cmd.mask, transition_pstate, (void *) &cmd, 0, 0);
+ on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0);
perf->state = next_perf_state;
policy->cur = freqs.new;
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 49f3f84899..655c168d42 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1205,8 +1205,8 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
add_taint(TAINT_ERROR_INJECT);
- on_selected_cpus(cpumask_of_cpu(target),
- x86_mc_msrinject, mc_msrinject, 1, 1);
+ on_selected_cpus(cpumask_of(target), x86_mc_msrinject,
+ mc_msrinject, 1, 1);
break;
@@ -1225,8 +1225,8 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u_xen_mc)
add_taint(TAINT_ERROR_INJECT);
- on_selected_cpus(cpumask_of_cpu(target), x86_mc_mceinject,
- mc_mceinject, 1, 1);
+ on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
+ mc_mceinject, 1, 1);
break;
default:
diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c
index 16133218a9..0f15eb8688 100644
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -688,7 +688,7 @@ void mtrr_save_state(void)
if (cpu == 0)
mtrr_save_fixed_ranges(NULL);
else
- on_selected_cpus(cpumask_of_cpu(0), mtrr_save_fixed_ranges, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1, 1);
put_cpu();
}
diff --git a/xen/arch/x86/crash.c b/xen/arch/x86/crash.c
index d38f969d02..009ae9ab7e 100644
--- a/xen/arch/x86/crash.c
+++ b/xen/arch/x86/crash.c
@@ -13,7 +13,6 @@
#include <asm/percpu.h>
#include <xen/types.h>
#include <xen/irq.h>
-#include <asm/ipi.h>
#include <asm/nmi.h>
#include <xen/string.h>
#include <xen/elf.h>
@@ -51,19 +50,6 @@ static int crash_nmi_callback(struct cpu_user_regs *regs, int cpu)
return 1;
}
-/*
- * By using the NMI code instead of a vector we just sneak thru the
- * word generator coming out with just what we want. AND it does
- * not matter if clustered_apic_mode is set or not.
- */
-static void smp_send_nmi_allbutself(void)
-{
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- if ( !cpus_empty(allbutself) )
- send_IPI_mask(allbutself, APIC_DM_NMI);
-}
-
static void nmi_shootdown_cpus(void)
{
unsigned long msecs;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 170e61d4b8..0004259eb6 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1316,7 +1316,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )
{
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(dirty_mask);
+ flush_tlb_mask(&dirty_mask);
}
if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
@@ -1410,7 +1410,7 @@ void sync_vcpu_execstate(struct vcpu *v)
(void)__sync_lazy_execstate();
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ flush_tlb_mask(&v->vcpu_dirty_cpumask);
}
struct migrate_info {
diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c
index 3dd97250c7..08ef983699 100644
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -56,7 +56,7 @@ unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask)
return cpu_physical_id(first_cpu(cpumask));
}
-void send_IPI_mask_x2apic(cpumask_t cpumask, int vector)
+void send_IPI_mask_x2apic(const cpumask_t *cpumask, int vector)
{
unsigned int cpu, cfg;
unsigned long flags;
@@ -76,8 +76,9 @@ void send_IPI_mask_x2apic(cpumask_t cpumask, int vector)
local_irq_save(flags);
cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_PHYSICAL | vector;
- for_each_cpu_mask ( cpu, cpumask )
- apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu));
+ for_each_cpu_mask ( cpu, *cpumask )
+ if ( cpu != smp_processor_id() )
+ apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu));
local_irq_restore(flags);
}
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index e66eedee73..6877a80aea 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -617,7 +617,7 @@ void hpet_disable_legacy_broadcast(void)
spin_unlock_irq(&legacy_hpet_event.lock);
- smp_send_event_check_mask(cpu_online_map);
+ smp_send_event_check_mask(&cpu_online_map);
}
void hpet_broadcast_enter(void)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5156fbf11e..6c9fadca0d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2426,7 +2426,7 @@ static int hvmop_flush_tlb_all(void)
paging_update_cr3(v);
/* Flush all dirty TLBs. */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
/* Done. */
for_each_vcpu ( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 5401e5638d..786a22796e 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -264,7 +264,7 @@ static void vmx_clear_vmcs(struct vcpu *v)
int cpu = v->arch.hvm_vmx.active_cpu;
if ( cpu != -1 )
- on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1);
}
static void vmx_load_vmcs(struct vcpu *v)
@@ -900,7 +900,7 @@ void vmx_do_resume(struct vcpu *v)
{
int cpu = v->arch.hvm_vmx.active_cpu;
if ( cpu != -1 )
- on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1);
}
vmx_clear_vmcs(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 5fb1421ac6..c4eee9b44e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2164,7 +2164,7 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
{
paging_mark_dirty(d, mfn_x(mfn));
p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
return;
}
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 971117c010..65e69001cd 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -522,7 +522,7 @@ static void __pirq_guest_eoi(struct domain *d, int irq)
}
if ( !cpus_empty(cpu_eoi_map) )
- on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
}
int pirq_guest_eoi(struct domain *d, int irq)
@@ -761,7 +761,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
{
cpu_eoi_map = action->cpu_eoi_map;
spin_unlock_irq(&desc->lock);
- on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
spin_lock_irq(&desc->lock);
}
break;
@@ -799,7 +799,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);
- on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1);
spin_lock_irq(&desc->lock);
}
diff --git a/xen/arch/x86/machine_kexec.c b/xen/arch/x86/machine_kexec.c
index 4d15e4b063..af7663e3fc 100644
--- a/xen/arch/x86/machine_kexec.c
+++ b/xen/arch/x86/machine_kexec.c
@@ -91,7 +91,6 @@ static void __machine_reboot_kexec(void *data)
void machine_reboot_kexec(xen_kexec_image_t *image)
{
int reboot_cpu_id;
- cpumask_t reboot_cpu;
reboot_cpu_id = 0;
@@ -100,9 +99,8 @@ void machine_reboot_kexec(xen_kexec_image_t *image)
if ( reboot_cpu_id != smp_processor_id() )
{
- cpus_clear(reboot_cpu);
- cpu_set(reboot_cpu_id, reboot_cpu);
- on_selected_cpus(reboot_cpu, __machine_reboot_kexec, image, 1, 0);
+ on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec,
+ image, 1, 0);
for (;;)
; /* nothing */
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 1f4199d55f..10adea8808 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -517,7 +517,7 @@ static void invalidate_shadow_ldt(struct vcpu *v, int flush)
/* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
if ( flush )
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ flush_tlb_mask(&v->vcpu_dirty_cpumask);
out:
spin_unlock(&v->arch.shadow_ldt_lock);
@@ -1250,7 +1250,7 @@ static void pae_flush_pgd(
paging_update_cr3(v);
cpus_or(m, m, v->vcpu_dirty_cpumask);
}
- flush_tlb_mask(m);
+ flush_tlb_mask(&m);
}
/* If below 4GB then the pgdir is not shadowed in low memory. */
@@ -1275,7 +1275,7 @@ static void pae_flush_pgd(
spin_unlock(&cache->lock);
}
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
#else
# define pae_flush_pgd(mfn, idx, nl3e) ((void)0)
@@ -2290,7 +2290,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
((nx & PGT_type_mask) == PGT_writable_page)) )
{
perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(mask);
+ flush_tlb_mask(&mask);
}
/* We lose existing type and validity. */
@@ -2489,7 +2489,7 @@ static void process_deferred_ops(void)
if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
{
if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
else
flush_tlb_local();
}
@@ -2824,9 +2824,9 @@ int do_mmuext_op(
}
pmask = vcpumask_to_pcpumask(d, vmask);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
- flush_tlb_mask(pmask);
+ flush_tlb_mask(&pmask);
else
- flush_tlb_one_mask(pmask, op.arg1.linear_addr);
+ flush_tlb_one_mask(&pmask, op.arg1.linear_addr);
break;
}
@@ -2835,7 +2835,7 @@ int do_mmuext_op(
break;
case MMUEXT_INVLPG_ALL:
- flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
+ flush_tlb_one_mask(&d->domain_dirty_cpumask, op.arg1.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
@@ -3688,7 +3688,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
pmask = vcpumask_to_pcpumask(d, vmask);
if ( cpu_isset(smp_processor_id(), pmask) )
this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB;
- flush_tlb_mask(pmask);
+ flush_tlb_mask(&pmask);
break;
}
break;
@@ -3706,7 +3706,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
flush_tlb_one_local(va);
break;
case UVMF_ALL:
- flush_tlb_one_mask(d->domain_dirty_cpumask, va);
+ flush_tlb_one_mask(&d->domain_dirty_cpumask, va);
break;
default:
if ( unlikely(!is_pv_32on64_domain(d) ?
@@ -3716,7 +3716,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
pmask = vcpumask_to_pcpumask(d, vmask);
if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB )
cpu_clear(smp_processor_id(), pmask);
- flush_tlb_one_mask(pmask, va);
+ flush_tlb_one_mask(&pmask, va);
break;
}
break;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index b7615f0d69..e9d38103fc 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -64,7 +64,7 @@ int hap_enable_log_dirty(struct domain *d)
/* set l1e entries of P2M table to be read-only. */
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
return 0;
}
@@ -83,7 +83,7 @@ void hap_clean_dirty_bitmap(struct domain *d)
{
/* set l1e entries of P2M table to be read-only. */
p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
/************************************************/
@@ -643,7 +643,7 @@ hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
safe_write_pte(p, new);
if ( (old_flags & _PAGE_PRESENT)
&& (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
#if CONFIG_PAGING_LEVELS == 3
/* install P2M in monitor table for PAE Xen */
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index ca4cb13ac4..875566d7c9 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -695,7 +695,7 @@ static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn,
}
if ( ftlb )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
return 0;
}
@@ -1145,7 +1145,7 @@ sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
rc = sh_validate_guest_entry(v, gmfn, entry, size);
if ( rc & SHADOW_SET_FLUSH )
/* Need to flush TLBs to pick up shadow PT changes */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
if ( rc & SHADOW_SET_ERROR )
{
/* This page is probably not a pagetable any more: tear it out of the
@@ -1393,7 +1393,7 @@ static void _shadow_prealloc(
/* See if that freed up enough space */
if ( space_is_available(d, order, count) )
{
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
return;
}
}
@@ -1447,7 +1447,7 @@ static void shadow_blow_tables(struct domain *d)
pagetable_get_mfn(v->arch.shadow_table[i]));
/* Make sure everyone sees the unshadowings */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
void shadow_blow_tables_per_domain(struct domain *d)
@@ -1554,7 +1554,7 @@ mfn_t shadow_alloc(struct domain *d,
if ( unlikely(!cpus_empty(mask)) )
{
perfc_incr(shadow_alloc_tlbflush);
- flush_tlb_mask(mask);
+ flush_tlb_mask(&mask);
}
/* Now safe to clear the page for reuse */
p = sh_map_domain_page(page_to_mfn(sp+i));
@@ -2803,7 +2803,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
/* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
if ( do_locking ) shadow_unlock(v->domain);
}
@@ -3435,7 +3435,7 @@ shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
{
sh_remove_all_shadows_and_parents(v, mfn);
if ( sh_remove_all_mappings(v, mfn) )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
}
@@ -3474,7 +3474,7 @@ shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
}
omfn = _mfn(mfn_x(omfn) + 1);
}
- flush_tlb_mask(flushmask);
+ flush_tlb_mask(&flushmask);
if ( npte )
unmap_domain_page(npte);
@@ -3752,7 +3752,7 @@ int shadow_track_dirty_vram(struct domain *d,
}
}
if ( flush_tlb )
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
goto out;
out_sl1ma:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 072dda0b6b..69752f879d 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3146,7 +3146,7 @@ static int sh_page_fault(struct vcpu *v,
*/
perfc_incr(shadow_rm_write_flush_tlb);
atomic_inc(&d->arch.paging.shadow.gtable_dirty_version);
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -4135,7 +4135,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
/* PAE guests have four shadow_table entries, based on the
@@ -4158,7 +4158,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
}
}
if ( flush )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
/* Now install the new shadows. */
for ( i = 0; i < 4; i++ )
{
@@ -4179,7 +4179,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
}
#elif GUEST_PAGING_LEVELS == 4
if ( sh_remove_write_access(v, gmfn, 4, 0) != 0 )
- flush_tlb_mask(v->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&v->domain->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
#else
#error This should never happen
diff --git a/xen/arch/x86/shutdown.c b/xen/arch/x86/shutdown.c
index 18340e5b25..1d8218fa27 100644
--- a/xen/arch/x86/shutdown.c
+++ b/xen/arch/x86/shutdown.c
@@ -310,7 +310,7 @@ void machine_restart(unsigned int delay_millisecs)
if ( get_apic_id() != boot_cpu_physical_apicid )
{
/* Send IPI to the boot CPU (logical cpu 0). */
- on_selected_cpus(cpumask_of_cpu(0), __machine_restart,
+ on_selected_cpus(cpumask_of(0), __machine_restart,
&delay_millisecs, 1, 0);
for ( ; ; )
halt();
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index dba5cb58b6..0bf82da494 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -19,11 +19,16 @@
#include <asm/mc146818rtc.h>
#include <asm/flushtlb.h>
#include <asm/hardirq.h>
-#include <asm/ipi.h>
#include <asm/hvm/support.h>
#include <mach_apic.h>
/*
+ * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
+ * excluding the local CPU. @cpumask may be empty.
+ */
+#define send_IPI_mask (genapic->send_IPI_mask)
+
+/*
* Some notes on x86 processor bugs affecting SMP operation:
*
* Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
@@ -84,14 +89,15 @@ void apic_wait_icr_idle(void)
cpu_relax();
}
-void send_IPI_mask_flat(cpumask_t cpumask, int vector)
+void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
{
- unsigned long mask = cpus_addr(cpumask)[0];
+ unsigned long mask = cpus_addr(*cpumask)[0];
unsigned long cfg;
unsigned long flags;
- /* An IPI with no target generates a send accept error from P5/P6 APICs. */
- WARN_ON(mask == 0);
+ mask &= ~(1UL << smp_processor_id());
+ if ( mask == 0 )
+ return;
local_irq_save(flags);
@@ -119,15 +125,18 @@ void send_IPI_mask_flat(cpumask_t cpumask, int vector)
local_irq_restore(flags);
}
-void send_IPI_mask_phys(cpumask_t mask, int vector)
+void send_IPI_mask_phys(const cpumask_t *mask, int vector)
{
unsigned long cfg, flags;
unsigned int query_cpu;
local_irq_save(flags);
- for_each_cpu_mask ( query_cpu, mask )
+ for_each_cpu_mask ( query_cpu, *mask )
{
+ if ( query_cpu == smp_processor_id() )
+ continue;
+
/*
* Wait for idle.
*/
@@ -170,20 +179,17 @@ fastcall void smp_invalidate_interrupt(void)
irq_exit();
}
-void flush_area_mask(cpumask_t mask, const void *va, unsigned int flags)
+void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
{
ASSERT(local_irq_is_enabled());
- if ( cpu_isset(smp_processor_id(), mask) )
- {
+ if ( cpu_isset(smp_processor_id(), *mask) )
flush_area_local(va, flags);
- cpu_clear(smp_processor_id(), mask);
- }
- if ( !cpus_empty(mask) )
+ if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
{
spin_lock(&flush_lock);
- flush_cpumask = mask;
+ cpus_andnot(flush_cpumask, *mask, *cpumask_of(smp_processor_id()));
flush_va = va;
flush_flags = flags;
send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
@@ -201,18 +207,16 @@ void new_tlbflush_clock_period(void)
/* Flush everyone else. We definitely flushed just before entry. */
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
- flush_mask(allbutself, FLUSH_TLB);
+ flush_mask(&allbutself, FLUSH_TLB);
/* No need for atomicity: we are the only possible updater. */
ASSERT(tlbflush_clock == 0);
tlbflush_clock++;
}
-void smp_send_event_check_mask(cpumask_t mask)
+void smp_send_event_check_mask(const cpumask_t *mask)
{
- cpu_clear(smp_processor_id(), mask);
- if ( !cpus_empty(mask) )
- send_IPI_mask(mask, EVENT_CHECK_VECTOR);
+ send_IPI_mask(mask, EVENT_CHECK_VECTOR);
}
/*
@@ -225,11 +229,12 @@ struct call_data_struct {
int wait;
atomic_t started;
atomic_t finished;
- cpumask_t selected;
+ const cpumask_t *selected;
};
static DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
+static void __smp_call_function_interrupt(void);
int smp_call_function(
void (*func) (void *info),
@@ -239,34 +244,21 @@ int smp_call_function(
{
cpumask_t allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
- return on_selected_cpus(allbutself, func, info, retry, wait);
+ return on_selected_cpus(&allbutself, func, info, retry, wait);
}
int on_selected_cpus(
- cpumask_t selected,
+ const cpumask_t *selected,
void (*func) (void *info),
void *info,
int retry,
int wait)
{
struct call_data_struct data;
- unsigned int nr_cpus = cpus_weight(selected);
+ unsigned int nr_cpus = cpus_weight(*selected);
ASSERT(local_irq_is_enabled());
- /* Legacy UP system with no APIC to deliver IPIs? */
- if ( unlikely(!cpu_has_apic) )
- {
- ASSERT(num_online_cpus() == 1);
- if ( cpu_isset(0, selected) )
- {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
- return 0;
- }
-
if ( nr_cpus == 0 )
return 0;
@@ -283,6 +275,13 @@ int on_selected_cpus(
send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
+ if ( cpu_isset(smp_processor_id(), *call_data->selected) )
+ {
+ local_irq_disable();
+ __smp_call_function_interrupt();
+ local_irq_enable();
+ }
+
while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
cpu_relax();
@@ -335,21 +334,23 @@ void smp_send_stop(void)
local_irq_enable();
}
+void smp_send_nmi_allbutself(void)
+{
+ send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
+}
+
fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(ipis);
}
-fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
+static void __smp_call_function_interrupt(void)
{
void (*func)(void *info) = call_data->func;
void *info = call_data->info;
- ack_APIC_irq();
- perfc_incr(ipis);
-
- if ( !cpu_isset(smp_processor_id(), call_data->selected) )
+ if ( !cpu_isset(smp_processor_id(), *call_data->selected) )
return;
irq_enter();
@@ -369,3 +370,10 @@ fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
irq_exit();
}
+
+fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
+{
+ ack_APIC_irq();
+ perfc_incr(ipis);
+ __smp_call_function_interrupt();
+}
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 9e9ff9b456..495ee844dc 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1189,7 +1189,7 @@ static void time_calibration(void *unused)
};
/* @wait=1 because we must wait for all cpus before freeing @r. */
- on_selected_cpus(r.cpu_calibration_map,
+ on_selected_cpus(&r.cpu_calibration_map,
opt_consistent_tscs
? time_calibration_tsc_rendezvous
: time_calibration_std_rendezvous,
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 08b9e2b00e..27b13f3e34 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -1,4 +1,5 @@
obj-y += bitmap.o
+obj-y += cpu.o
obj-y += domctl.o
obj-y += domain.o
obj-y += event_channel.o
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
new file mode 100644
index 0000000000..96cba72e9e
--- /dev/null
+++ b/xen/common/cpu.c
@@ -0,0 +1,26 @@
+#include <xen/config.h>
+#include <xen/cpumask.h>
+
+/*
+ * cpu_bit_bitmap[] is a special, "compressed" data structure that
+ * represents all NR_CPUS bits binary values of 1<<nr.
+ *
+ * It is used by cpumask_of() to get a constant address to a CPU
+ * mask value that has a single bit set only.
+ */
+
+/* cpu_bit_bitmap[0] is empty - so we can back into it */
+#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
+#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
+#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
+#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
+
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+
+ MASK_DECLARE_8(0), MASK_DECLARE_8(8),
+ MASK_DECLARE_8(16), MASK_DECLARE_8(24),
+#if BITS_PER_LONG > 32
+ MASK_DECLARE_8(32), MASK_DECLARE_8(40),
+ MASK_DECLARE_8(48), MASK_DECLARE_8(56),
+#endif
+};
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 530635469d..7bbc05d896 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -715,7 +715,7 @@ gnttab_unmap_grant_ref(
goto fault;
}
- flush_tlb_mask(current->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&current->domain->domain_dirty_cpumask);
for ( i = 0; i < partial_done; i++ )
__gnttab_unmap_common_complete(&(common[i]));
@@ -727,7 +727,7 @@ gnttab_unmap_grant_ref(
return 0;
fault:
- flush_tlb_mask(current->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&current->domain->domain_dirty_cpumask);
for ( i = 0; i < partial_done; i++ )
__gnttab_unmap_common_complete(&(common[i]));
@@ -774,7 +774,7 @@ gnttab_unmap_and_replace(
goto fault;
}
- flush_tlb_mask(current->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&current->domain->domain_dirty_cpumask);
for ( i = 0; i < partial_done; i++ )
__gnttab_unmap_common_complete(&(common[i]));
@@ -786,7 +786,7 @@ gnttab_unmap_and_replace(
return 0;
fault:
- flush_tlb_mask(current->domain->domain_dirty_cpumask);
+ flush_tlb_mask(&current->domain->domain_dirty_cpumask);
for ( i = 0; i < partial_done; i++ )
__gnttab_unmap_common_complete(&(common[i]));
@@ -1123,7 +1123,7 @@ gnttab_transfer(
#ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */
guest_physmap_remove_page(d, gop.mfn, mfn, 0);
#endif
- flush_tlb_mask(d->domain_dirty_cpumask);
+ flush_tlb_mask(&d->domain_dirty_cpumask);
/* Find the target domain. */
if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index c481df0f3f..5407456290 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -119,7 +119,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
if ( cpu == smp_processor_id() )
continue;
printk("\n*** Dumping CPU%d host state: ***\n", cpu);
- on_selected_cpus(cpumask_of_cpu(cpu), __dump_execstate, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1);
}
printk("\n");
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index bb143aedd6..bd514cfeed 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -431,7 +431,7 @@ static struct page_info *alloc_heap_pages(
if ( unlikely(!cpus_empty(mask)) )
{
perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(mask);
+ flush_tlb_mask(&mask);
}
return pg;
diff --git a/xen/include/asm-ia64/tlbflush.h b/xen/include/asm-ia64/tlbflush.h
index bb9f2e3772..705b8717e1 100644
--- a/xen/include/asm-ia64/tlbflush.h
+++ b/xen/include/asm-ia64/tlbflush.h
@@ -39,7 +39,7 @@ void domain_flush_tlb_vhpt(struct domain *d);
void flush_tlb_for_log_dirty(struct domain *d);
/* Flush v-tlb on cpus set in mask for current domain. */
-void flush_tlb_mask(cpumask_t mask);
+void flush_tlb_mask(const cpumask_t *mask);
/* Flush local machine TLB. */
void local_flush_tlb_all (void);
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index 09a8f8bd9a..9442009ee8 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -90,12 +90,12 @@ void flush_area_local(const void *va, unsigned int flags);
#define flush_local(flags) flush_area_local(NULL, flags)
/* Flush specified CPUs' TLBs/caches */
-void flush_area_mask(cpumask_t, const void *va, unsigned int flags);
+void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
#define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)
/* Flush all CPUs' TLBs/caches */
-#define flush_area_all(va, flags) flush_area_mask(cpu_online_map, va, flags)
-#define flush_all(flags) flush_mask(cpu_online_map, flags)
+#define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags)
+#define flush_all(flags) flush_mask(&cpu_online_map, flags)
/* Flush local TLBs */
#define flush_tlb_local() \
@@ -111,8 +111,8 @@ void flush_area_mask(cpumask_t, const void *va, unsigned int flags);
/* Flush all CPUs' TLBs */
#define flush_tlb_all() \
- flush_tlb_mask(cpu_online_map)
+ flush_tlb_mask(&cpu_online_map)
#define flush_tlb_one_all(v) \
- flush_tlb_one_mask(cpu_online_map, v)
+ flush_tlb_one_mask(&cpu_online_map, v)
#endif /* __FLUSHTLB_H__ */
diff --git a/xen/include/asm-x86/genapic.h b/xen/include/asm-x86/genapic.h
index 0653335be9..f99af3aa07 100644
--- a/xen/include/asm-x86/genapic.h
+++ b/xen/include/asm-x86/genapic.h
@@ -35,7 +35,7 @@ struct genapic {
void (*clustered_apic_check)(void);
cpumask_t (*target_cpus)(void);
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
- void (*send_IPI_mask)(cpumask_t mask, int vector);
+ void (*send_IPI_mask)(const cpumask_t *mask, int vector);
};
#define APICFUNC(x) .x = x
@@ -52,7 +52,7 @@ void init_apic_ldr_flat(void);
void clustered_apic_check_flat(void);
cpumask_t target_cpus_flat(void);
unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask);
-void send_IPI_mask_flat(cpumask_t mask, int vector);
+void send_IPI_mask_flat(const cpumask_t *mask, int vector);
#define GENAPIC_FLAT \
.int_delivery_mode = dest_LowestPrio, \
.int_dest_mode = 1 /* logical delivery */, \
@@ -66,7 +66,7 @@ void init_apic_ldr_x2apic(void);
void clustered_apic_check_x2apic(void);
cpumask_t target_cpus_x2apic(void);
unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask);
-void send_IPI_mask_x2apic(cpumask_t mask, int vector);
+void send_IPI_mask_x2apic(const cpumask_t *mask, int vector);
#define GENAPIC_X2APIC \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
@@ -80,7 +80,7 @@ void init_apic_ldr_phys(void);
void clustered_apic_check_phys(void);
cpumask_t target_cpus_phys(void);
unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask);
-void send_IPI_mask_phys(cpumask_t mask, int vector);
+void send_IPI_mask_phys(const cpumask_t *mask, int vector);
#define GENAPIC_PHYS \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
diff --git a/xen/include/asm-x86/ipi.h b/xen/include/asm-x86/ipi.h
deleted file mode 100644
index da8c94031f..0000000000
--- a/xen/include/asm-x86/ipi.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ASM_IPI_H
-#define __ASM_IPI_H
-
-#include <asm/genapic.h>
-
-#define send_IPI_mask (genapic->send_IPI_mask)
-
-#endif /* __ASM_IPI_H */
diff --git a/xen/include/asm-x86/smp.h b/xen/include/asm-x86/smp.h
index c62c53fce6..6d566d1a05 100644
--- a/xen/include/asm-x86/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -35,6 +35,8 @@ extern int pic_mode;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
+void smp_send_nmi_allbutself(void);
+
extern void (*mtrr_hook) (void);
#ifdef CONFIG_X86_64
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 23dfd09a14..43f538eba4 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -80,7 +80,6 @@
#include <xen/kernel.h>
typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-extern cpumask_t _unused_cpumask_arg_;
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
@@ -244,17 +243,23 @@ static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
return nxt;
}
-#define cpumask_of_cpu(cpu) \
-({ \
- typeof(_unused_cpumask_arg_) m; \
- if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(cpu); \
- } else { \
- cpus_clear(m); \
- cpu_set((cpu), m); \
- } \
- m; \
-})
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const cpumask_t *cpumask_of(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
+}
+
+#define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h
index 6ab913a430..93b0713205 100644
--- a/xen/include/xen/smp.h
+++ b/xen/include/xen/smp.h
@@ -9,9 +9,9 @@
*/
extern void smp_send_stop(void);
-extern void smp_send_event_check_mask(cpumask_t mask);
+extern void smp_send_event_check_mask(const cpumask_t *mask);
#define smp_send_event_check_cpu(cpu) \
- smp_send_event_check_mask(cpumask_of_cpu(cpu))
+ smp_send_event_check_mask(cpumask_of(cpu))
/*
* Prepare machine for booting other CPUs.
@@ -41,7 +41,7 @@ extern int smp_call_function(
* Call a function on a selection of processors
*/
extern int on_selected_cpus(
- cpumask_t selected,
+ const cpumask_t *selected,
void (*func) (void *info),
void *info,
int retry,
@@ -62,7 +62,7 @@ static inline int on_each_cpu(
int retry,
int wait)
{
- return on_selected_cpus(cpu_online_map, func, info, retry, wait);
+ return on_selected_cpus(&cpu_online_map, func, info, retry, wait);
}
#define smp_processor_id() raw_smp_processor_id()
diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h
index 0b59f63b22..efd0a54443 100644
--- a/xen/include/xen/softirq.h
+++ b/xen/include/xen/softirq.h
@@ -39,7 +39,7 @@ static inline void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
cpu_clear(cpu, mask);
}
- smp_send_event_check_mask(mask);
+ smp_send_event_check_mask(&mask);
}
static inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)