aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
committerJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
commit4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d (patch)
tree495538ea99a0b2c90eb6f18c87fd48fab2f4ec3e /xen/arch/x86
parent3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e (diff)
downloadxen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.gz
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.bz2
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.zip
eliminate direct assignments of CPU masks
Use cpumask_copy() instead of direct variable assignments for copying CPU masks. While direct assignments are not a problem when both sides are variables actually defined as cpumask_t (except for possibly copying *much* more than would actually need to be copied), they must not happen when the original variable is of type cpumask_var_t (which may have lass space allocated to it than a full cpumask_t). Eliminate as many of such assignments as possible (in several cases it's even possible to collapse two operations [copy then clear one bit] into one [cpumask_andnot()]), and thus set the way for reducing the allocation size in alloc_cpumask_var(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86')
-rw-r--r--xen/arch/x86/acpi/cpufreq/powernow.c2
-rw-r--r--xen/arch/x86/cpu/mtrr/main.c6
-rw-r--r--xen/arch/x86/irq.c14
-rw-r--r--xen/arch/x86/numa.c2
-rw-r--r--xen/arch/x86/smp.c24
-rw-r--r--xen/arch/x86/smpboot.c3
-rw-r--r--xen/arch/x86/time.c16
7 files changed, 35 insertions, 32 deletions
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c
index 715373fc03..846f9ca8e9 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -107,7 +107,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
if (unlikely(result))
return -ENODEV;
- online_policy_cpus = policy->cpus;
+ cpumask_and(&online_policy_cpus, &policy->cpus, &cpu_online_map);
next_perf_state = data->freq_table[next_state].index;
if (perf->state == next_perf_state) {
diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c
index 554b4e3d8d..e685c189f5 100644
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -231,9 +231,9 @@ static void set_mtrr(unsigned int reg, unsigned long base,
struct set_mtrr_data data;
unsigned long flags;
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- nr_cpus = cpus_weight(allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
data.smp_reg = reg;
data.smp_base = base;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 4a93f29d76..40326f2bfe 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1040,7 +1040,7 @@ static void irq_guest_eoi_timer_fn(void *data)
desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1366,9 +1366,9 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
ASSERT(action->ack_type == ACKTYPE_EOI);
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
- if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+ if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
@@ -1380,7 +1380,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
spin_unlock_irq(&desc->lock);
}
- if ( !cpus_empty(cpu_eoi_map) )
+ if ( !cpumask_empty(&cpu_eoi_map) )
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
@@ -1619,7 +1619,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1649,8 +1649,8 @@ static irq_guest_action_t *__pirq_guest_unbind(
* would need to flush all ready EOIs before returning as otherwise the
* desc->handler could change and we would call the wrong 'end' hook.
*/
- cpu_eoi_map = action->cpu_eoi_map;
- if ( !cpus_empty(cpu_eoi_map) )
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ if ( !cpumask_empty(&cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 61b5904a2c..83e9ee38bc 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -282,7 +282,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
node_set_online(0);
for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0);
- node_to_cpumask[0] = cpumask_of_cpu(0);
+ cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT);
}
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index d81ab79576..d9b5e5c498 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -257,8 +257,8 @@ void new_tlbflush_clock_period(void)
cpumask_t allbutself;
/* Flush everyone else. We definitely flushed just before entry. */
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
flush_mask(&allbutself, FLUSH_TLB);
/* No need for atomicity: we are the only possible updater. */
@@ -289,8 +289,10 @@ void smp_call_function(
void *info,
int wait)
{
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_t allbutself;
+
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
on_selected_cpus(&allbutself, func, info, wait);
}
@@ -306,9 +308,9 @@ void on_selected_cpus(
spin_lock(&call_lock);
- call_data.selected = *selected;
+ cpumask_copy(&call_data.selected, selected);
- nr_cpus = cpus_weight(call_data.selected);
+ nr_cpus = cpumask_weight(&call_data.selected);
if ( nr_cpus == 0 )
goto out;
@@ -318,14 +320,14 @@ void on_selected_cpus(
send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
- if ( cpu_isset(smp_processor_id(), call_data.selected) )
+ if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) )
{
local_irq_disable();
__smp_call_function_interrupt();
local_irq_enable();
}
- while ( !cpus_empty(call_data.selected) )
+ while ( !cpumask_empty(&call_data.selected) )
cpu_relax();
out:
@@ -396,7 +398,7 @@ static void __smp_call_function_interrupt(void)
void *info = call_data.info;
unsigned int cpu = smp_processor_id();
- if ( !cpu_isset(cpu, call_data.selected) )
+ if ( !cpumask_test_cpu(cpu, &call_data.selected) )
return;
irq_enter();
@@ -405,12 +407,12 @@ static void __smp_call_function_interrupt(void)
{
(*func)(info);
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
}
else
{
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
(*func)(info);
}
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 95a1116f84..a01698beb2 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -267,7 +267,8 @@ static void set_cpu_sibling_map(int cpu)
if ( c[cpu].x86_max_cores == 1 )
{
- per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+ cpumask_copy(&per_cpu(cpu_core_map, cpu),
+ &per_cpu(cpu_sibling_map, cpu));
c[cpu].booted_cores = 1;
return;
}
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 69b31193fe..e47e1b281d 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1232,10 +1232,10 @@ static void tsc_check_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
- while ( !cpu_isset(cpu, tsc_check_cpumask) )
+ while ( !cpumask_test_cpu(cpu, &tsc_check_cpumask) )
mb();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
+ cpumask_clear_cpu(cpu, &tsc_check_cpumask);
local_irq_enable();
}
@@ -1248,12 +1248,11 @@ static void tsc_check_reliability(void)
tsc_check_count++;
smp_call_function(tsc_check_slave, NULL, 0);
- tsc_check_cpumask = cpu_online_map;
+ cpumask_andnot(&tsc_check_cpumask, &cpu_online_map, cpumask_of(cpu));
local_irq_disable();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
local_irq_enable();
- while ( !cpus_empty(tsc_check_cpumask) )
+ while ( !cpumask_empty(&tsc_check_cpumask) )
cpu_relax();
spin_unlock(&lock);
@@ -1280,7 +1279,7 @@ static void time_calibration_tsc_rendezvous(void *_r)
int i;
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
/* Loop to get rid of cache effects on TSC skew. */
for ( i = 4; i >= 0; i-- )
@@ -1331,7 +1330,7 @@ static void time_calibration_std_rendezvous(void *_r)
{
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
if ( smp_processor_id() == 0 )
{
@@ -1362,10 +1361,11 @@ static void (*time_calibration_rendezvous_fn)(void *) =
static void time_calibration(void *unused)
{
struct calibration_rendezvous r = {
- .cpu_calibration_map = cpu_online_map,
.semaphore = ATOMIC_INIT(0)
};
+ cpumask_copy(&r.cpu_calibration_map, &cpu_online_map);
+
/* @wait=1 because we must wait for all cpus before freeing @r. */
on_selected_cpus(&r.cpu_calibration_map,
time_calibration_rendezvous_fn,