aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/acpi/cpufreq/powernow.c2
-rw-r--r--xen/arch/x86/cpu/mtrr/main.c6
-rw-r--r--xen/arch/x86/irq.c14
-rw-r--r--xen/arch/x86/numa.c2
-rw-r--r--xen/arch/x86/smp.c24
-rw-r--r--xen/arch/x86/smpboot.c3
-rw-r--r--xen/arch/x86/time.c16
-rw-r--r--xen/common/keyhandler.c17
-rw-r--r--xen/common/rcupdate.c9
-rw-r--r--xen/common/stop_machine.c6
-rw-r--r--xen/include/xen/cpumask.h25
-rw-r--r--xen/include/xen/irq.h2
12 files changed, 64 insertions, 62 deletions
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c
index 715373fc03..846f9ca8e9 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -107,7 +107,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
if (unlikely(result))
return -ENODEV;
- online_policy_cpus = policy->cpus;
+ cpumask_and(&online_policy_cpus, &policy->cpus, &cpu_online_map);
next_perf_state = data->freq_table[next_state].index;
if (perf->state == next_perf_state) {
diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c
index 554b4e3d8d..e685c189f5 100644
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -231,9 +231,9 @@ static void set_mtrr(unsigned int reg, unsigned long base,
struct set_mtrr_data data;
unsigned long flags;
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- nr_cpus = cpus_weight(allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
data.smp_reg = reg;
data.smp_base = base;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 4a93f29d76..40326f2bfe 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1040,7 +1040,7 @@ static void irq_guest_eoi_timer_fn(void *data)
desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1366,9 +1366,9 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
ASSERT(action->ack_type == ACKTYPE_EOI);
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
- if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+ if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
@@ -1380,7 +1380,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
spin_unlock_irq(&desc->lock);
}
- if ( !cpus_empty(cpu_eoi_map) )
+ if ( !cpumask_empty(&cpu_eoi_map) )
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
@@ -1619,7 +1619,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1649,8 +1649,8 @@ static irq_guest_action_t *__pirq_guest_unbind(
* would need to flush all ready EOIs before returning as otherwise the
* desc->handler could change and we would call the wrong 'end' hook.
*/
- cpu_eoi_map = action->cpu_eoi_map;
- if ( !cpus_empty(cpu_eoi_map) )
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ if ( !cpumask_empty(&cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index 61b5904a2c..83e9ee38bc 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -282,7 +282,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
node_set_online(0);
for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0);
- node_to_cpumask[0] = cpumask_of_cpu(0);
+ cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << PAGE_SHIFT);
}
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index d81ab79576..d9b5e5c498 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -257,8 +257,8 @@ void new_tlbflush_clock_period(void)
cpumask_t allbutself;
/* Flush everyone else. We definitely flushed just before entry. */
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
flush_mask(&allbutself, FLUSH_TLB);
/* No need for atomicity: we are the only possible updater. */
@@ -289,8 +289,10 @@ void smp_call_function(
void *info,
int wait)
{
- cpumask_t allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
+ cpumask_t allbutself;
+
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
on_selected_cpus(&allbutself, func, info, wait);
}
@@ -306,9 +308,9 @@ void on_selected_cpus(
spin_lock(&call_lock);
- call_data.selected = *selected;
+ cpumask_copy(&call_data.selected, selected);
- nr_cpus = cpus_weight(call_data.selected);
+ nr_cpus = cpumask_weight(&call_data.selected);
if ( nr_cpus == 0 )
goto out;
@@ -318,14 +320,14 @@ void on_selected_cpus(
send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
- if ( cpu_isset(smp_processor_id(), call_data.selected) )
+ if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) )
{
local_irq_disable();
__smp_call_function_interrupt();
local_irq_enable();
}
- while ( !cpus_empty(call_data.selected) )
+ while ( !cpumask_empty(&call_data.selected) )
cpu_relax();
out:
@@ -396,7 +398,7 @@ static void __smp_call_function_interrupt(void)
void *info = call_data.info;
unsigned int cpu = smp_processor_id();
- if ( !cpu_isset(cpu, call_data.selected) )
+ if ( !cpumask_test_cpu(cpu, &call_data.selected) )
return;
irq_enter();
@@ -405,12 +407,12 @@ static void __smp_call_function_interrupt(void)
{
(*func)(info);
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
}
else
{
mb();
- cpu_clear(cpu, call_data.selected);
+ cpumask_clear_cpu(cpu, &call_data.selected);
(*func)(info);
}
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 95a1116f84..a01698beb2 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -267,7 +267,8 @@ static void set_cpu_sibling_map(int cpu)
if ( c[cpu].x86_max_cores == 1 )
{
- per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+ cpumask_copy(&per_cpu(cpu_core_map, cpu),
+ &per_cpu(cpu_sibling_map, cpu));
c[cpu].booted_cores = 1;
return;
}
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 69b31193fe..e47e1b281d 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1232,10 +1232,10 @@ static void tsc_check_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
- while ( !cpu_isset(cpu, tsc_check_cpumask) )
+ while ( !cpumask_test_cpu(cpu, &tsc_check_cpumask) )
mb();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
+ cpumask_clear_cpu(cpu, &tsc_check_cpumask);
local_irq_enable();
}
@@ -1248,12 +1248,11 @@ static void tsc_check_reliability(void)
tsc_check_count++;
smp_call_function(tsc_check_slave, NULL, 0);
- tsc_check_cpumask = cpu_online_map;
+ cpumask_andnot(&tsc_check_cpumask, &cpu_online_map, cpumask_of(cpu));
local_irq_disable();
check_tsc_warp(cpu_khz, &tsc_max_warp);
- cpu_clear(cpu, tsc_check_cpumask);
local_irq_enable();
- while ( !cpus_empty(tsc_check_cpumask) )
+ while ( !cpumask_empty(&tsc_check_cpumask) )
cpu_relax();
spin_unlock(&lock);
@@ -1280,7 +1279,7 @@ static void time_calibration_tsc_rendezvous(void *_r)
int i;
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
/* Loop to get rid of cache effects on TSC skew. */
for ( i = 4; i >= 0; i-- )
@@ -1331,7 +1330,7 @@ static void time_calibration_std_rendezvous(void *_r)
{
struct cpu_calibration *c = &this_cpu(cpu_calibration);
struct calibration_rendezvous *r = _r;
- unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+ unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
if ( smp_processor_id() == 0 )
{
@@ -1362,10 +1361,11 @@ static void (*time_calibration_rendezvous_fn)(void *) =
static void time_calibration(void *unused)
{
struct calibration_rendezvous r = {
- .cpu_calibration_map = cpu_online_map,
.semaphore = ATOMIC_INIT(0)
};
+ cpumask_copy(&r.cpu_calibration_map, &cpu_online_map);
+
/* @wait=1 because we must wait for all cpus before freeing @r. */
on_selected_cpus(&r.cpu_calibration_map,
time_calibration_rendezvous_fn,
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 51cfc3cc98..25c5215023 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -93,11 +93,11 @@ void dump_execstate(struct cpu_user_regs *regs)
printk("\n");
}
- cpu_clear(cpu, dump_execstate_mask);
+ cpumask_clear_cpu(cpu, &dump_execstate_mask);
if ( !alt_key_handling )
return;
- cpu = cycle_cpu(cpu, dump_execstate_mask);
+ cpu = cpumask_cycle(cpu, &dump_execstate_mask);
if ( cpu < nr_cpu_ids )
{
smp_send_state_dump(cpu);
@@ -118,7 +118,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
printk("'%c' pressed -> dumping registers\n\n", key);
- dump_execstate_mask = cpu_online_map;
+ cpumask_copy(&dump_execstate_mask, &cpu_online_map);
/* Get local execution state out immediately, in case we get stuck. */
dump_execstate(regs);
@@ -131,7 +131,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
for_each_cpu_mask ( cpu, dump_execstate_mask )
{
smp_send_state_dump(cpu);
- while ( cpu_isset(cpu, dump_execstate_mask) )
+ while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
cpu_relax();
}
@@ -324,11 +324,11 @@ static void read_clocks_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
- while ( !cpu_isset(cpu, read_clocks_cpumask) )
+ while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) )
cpu_relax();
per_cpu(read_clocks_time, cpu) = NOW();
per_cpu(read_cycles_time, cpu) = get_cycles();
- cpu_clear(cpu, read_clocks_cpumask);
+ cpumask_clear_cpu(cpu, &read_clocks_cpumask);
local_irq_enable();
}
@@ -348,13 +348,12 @@ static void read_clocks(unsigned char key)
smp_call_function(read_clocks_slave, NULL, 0);
local_irq_disable();
- read_clocks_cpumask = cpu_online_map;
+ cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu));
per_cpu(read_clocks_time, cpu) = NOW();
per_cpu(read_cycles_time, cpu) = get_cycles();
- cpu_clear(cpu, read_clocks_cpumask);
local_irq_enable();
- while ( !cpus_empty(read_clocks_cpumask) )
+ while ( !cpumask_empty(&read_clocks_cpumask) )
cpu_relax();
min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c
index 189681174b..b14612aff8 100644
--- a/xen/common/rcupdate.c
+++ b/xen/common/rcupdate.c
@@ -161,8 +161,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
* Don't send IPI to itself. With irqs disabled,
* rdp->cpu is the current cpu.
*/
- cpumask = rcp->cpumask;
- cpu_clear(rdp->cpu, cpumask);
+ cpumask_andnot(&cpumask, &rcp->cpumask, cpumask_of(rdp->cpu));
cpumask_raise_softirq(&cpumask, SCHEDULE_SOFTIRQ);
}
}
@@ -258,7 +257,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
smp_wmb();
rcp->cur++;
- rcp->cpumask = cpu_online_map;
+ cpumask_copy(&rcp->cpumask, &cpu_online_map);
}
}
@@ -269,8 +268,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
*/
static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
{
- cpu_clear(cpu, rcp->cpumask);
- if (cpus_empty(rcp->cpumask)) {
+ cpumask_clear_cpu(cpu, &rcp->cpumask);
+ if (cpumask_empty(&rcp->cpumask)) {
/* batch completed ! */
rcp->completed = rcp->cur;
rcu_start_batch(rcp);
diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c
index 9bf271452d..eb38da58f8 100644
--- a/xen/common/stop_machine.c
+++ b/xen/common/stop_machine.c
@@ -81,9 +81,9 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
if ( !get_cpu_maps() )
return -EBUSY;
- allbutself = cpu_online_map;
- cpu_clear(smp_processor_id(), allbutself);
- nr_cpus = cpus_weight(allbutself);
+ cpumask_andnot(&allbutself, &cpu_online_map,
+ cpumask_of(smp_processor_id()));
+ nr_cpus = cpumask_weight(&allbutself);
/* Must not spin here as the holder will expect us to be descheduled. */
if ( !spin_trylock(&stopmachine_lock) )
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 3c9c3446d6..452a822435 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -359,11 +359,14 @@ typedef cpumask_t *cpumask_var_t;
static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
{
- /*
- * Once all direct cpumask assignments are gone, we could use
- * nr_cpumask_bits to determine the allocation size here.
- */
- return (*mask = xmalloc(cpumask_t)) != NULL;
+ *(void **)mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
+ return *mask != NULL;
+}
+
+static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
+{
+ *(void **)mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
+ return *mask != NULL;
}
static inline void free_cpumask_var(cpumask_var_t mask)
@@ -378,19 +381,17 @@ static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
return 1;
}
-static inline void free_cpumask_var(cpumask_var_t mask)
-{
-}
-#endif
-
static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
{
- if (!alloc_cpumask_var(mask))
- return 0;
cpumask_clear(*mask);
return 1;
}
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+}
+#endif
+
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index fba644b1ee..f20c294efe 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -153,7 +153,7 @@ extern irq_desc_t *pirq_spin_lock_irq_desc(
static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
{
- irq_desc[irq].affinity = *mask;
+ cpumask_copy(&irq_desc[irq].affinity, mask);
}
unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);