aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/smp.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
committerJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
commit4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d (patch)
tree495538ea99a0b2c90eb6f18c87fd48fab2f4ec3e /xen/arch/x86/smp.c
parent3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e (diff)
downloadxen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.gz
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.bz2
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.zip
eliminate direct assignments of CPU masks
Use cpumask_copy() instead of direct variable assignments for copying CPU masks. While direct assignments are not a problem when both sides are variables actually defined as cpumask_t (except for possibly copying *much* more than would actually need to be copied), they must not happen when the original variable is of type cpumask_var_t (which may have lass space allocated to it than a full cpumask_t). Eliminate as many of such assignments as possible (in several cases it's even possible to collapse two operations [copy then clear one bit] into one [cpumask_andnot()]), and thus set the way for reducing the allocation size in alloc_cpumask_var(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/smp.c')
-rw-r--r--xen/arch/x86/smp.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index d81ab79576..d9b5e5c498 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -257,8 +257,8 @@ void new_tlbflush_clock_period(void)
cpumask_t allbutself;
/* Flush everyone else. We definitely flushed just before entry. */
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
flush_mask(&allbutself, FLUSH_TLB);
/* No need for atomicity: we are the only possible updater. */
@@ -289,8 +289,10 @@ void smp_call_function(
void *info,
int wait)
{
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_t allbutself;
+
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
on_selected_cpus(&allbutself, func, info, wait);
}
@@ -306,9 +308,9 @@ void on_selected_cpus(
spin_lock(&call_lock);
- call_data.selected = *selected;
+ cpumask_copy(&call_data.selected, selected);
- nr_cpus = cpus_weight(call_data.selected);
+ nr_cpus = cpumask_weight(&call_data.selected);
if ( nr_cpus == 0 )
goto out;
@@ -318,14 +320,14 @@ void on_selected_cpus(
send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
- if ( cpu_isset(smp_processor_id(), call_data.selected) )
+ if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) )
{
local_irq_disable();
__smp_call_function_interrupt();
local_irq_enable();
}
- while ( !cpus_empty(call_data.selected) )
+ while ( !cpumask_empty(&call_data.selected) )
cpu_relax();
out:
@@ -396,7 +398,7 @@ static void __smp_call_function_interrupt(void)
void *info = call_data.info;
unsigned int cpu = smp_processor_id();
- if ( !cpu_isset(cpu, call_data.selected) )
+ if ( !cpumask_test_cpu(cpu, &call_data.selected) )
return;
irq_enter();
@@ -405,12 +407,12 @@ static void __smp_call_function_interrupt(void)
{
(*func)(info);
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
}
else
{
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
(*func)(info);
}