aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-11-08 10:36:10 +0100
committerJan Beulich <jbeulich@suse.com>2011-11-08 10:36:10 +0100
commit22bdce1c04881c0e909817ee11f7cecd6c852d8c (patch)
tree445ece053822f839c700f30d0159b4b73da68465 /xen
parentfe9ff5a9d54cced306aefaba126340e0c596717e (diff)
downloadxen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.tar.gz
xen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.tar.bz2
xen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.zip
eliminate first_cpu() etc
This includes the conversion from for_each_cpu_mask() to for_each-cpu(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/ia64/linux-xen/iosapic.c6
-rw-r--r--xen/arch/ia64/linux-xen/mca.c4
-rw-r--r--xen/arch/ia64/linux-xen/smp.c2
-rw-r--r--xen/arch/ia64/linux-xen/smpboot.c4
-rw-r--r--xen/arch/ia64/vmx/vacpi.c2
-rw-r--r--xen/arch/ia64/xen/dom0_ops.c2
-rw-r--r--xen/arch/ia64/xen/domain.c2
-rw-r--r--xen/arch/ia64/xen/vhpt.c4
-rw-r--r--xen/arch/x86/acpi/cpu_idle.c2
-rw-r--r--xen/arch/x86/acpi/cpufreq/cpufreq.c2
-rw-r--r--xen/arch/x86/acpi/cpufreq/powernow.c2
-rw-r--r--xen/arch/x86/genapic/x2apic.c2
-rw-r--r--xen/arch/x86/hpet.c2
-rw-r--r--xen/arch/x86/irq.c12
-rw-r--r--xen/arch/x86/microcode.c4
-rw-r--r--xen/arch/x86/platform_hypercall.c4
-rw-r--r--xen/arch/x86/setup.c8
-rw-r--r--xen/arch/x86/smp.c2
-rw-r--r--xen/arch/x86/smpboot.c8
-rw-r--r--xen/arch/x86/sysctl.c2
-rw-r--r--xen/common/cpu.c2
-rw-r--r--xen/common/cpupool.c2
-rw-r--r--xen/common/domctl.c4
-rw-r--r--xen/common/keyhandler.c2
-rw-r--r--xen/common/perfc.c4
-rw-r--r--xen/common/sched_credit2.c16
-rw-r--r--xen/common/sched_sedf.c4
-rw-r--r--xen/common/schedule.c2
-rw-r--r--xen/common/softirq.c2
-rw-r--r--xen/common/stop_machine.c2
-rw-r--r--xen/common/timer.c2
-rw-r--r--xen/drivers/acpi/pmstat.c2
-rw-r--r--xen/drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--xen/drivers/passthrough/vtd/iommu.c2
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/acpi.h2
-rw-r--r--xen/include/asm-x86/flushtlb.h2
-rw-r--r--xen/include/xen/cpumask.h84
37 files changed, 103 insertions, 111 deletions
diff --git a/xen/arch/ia64/linux-xen/iosapic.c b/xen/arch/ia64/linux-xen/iosapic.c
index f9a5608f38..b5e42717fb 100644
--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -704,7 +704,7 @@ get_target_cpu (unsigned int gsi, int vector)
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
- for_each_cpu_mask(numa_cpu, cpu_mask) {
+ for_each_cpu(numa_cpu, &cpu_mask) {
if (!cpu_online(numa_cpu))
cpumask_clear_cpu(numa_cpu, &cpu_mask);
}
@@ -717,8 +717,8 @@ get_target_cpu (unsigned int gsi, int vector)
/* Use vector assigment to distribute across cpus in node */
cpu_index = vector % num_cpus;
- for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
- numa_cpu = next_cpu(numa_cpu, cpu_mask);
+ for (numa_cpu = cpumask_first(&cpu_mask) ; i < cpu_index ; i++)
+ numa_cpu = cpumask_next(numa_cpu, &cpu_mask);
if (numa_cpu != NR_CPUS)
return cpu_physical_id(numa_cpu);
diff --git a/xen/arch/ia64/linux-xen/mca.c b/xen/arch/ia64/linux-xen/mca.c
index 8722492edc..7d50fa4f06 100644
--- a/xen/arch/ia64/linux-xen/mca.c
+++ b/xen/arch/ia64/linux-xen/mca.c
@@ -1415,7 +1415,7 @@ ia64_mca_cmc_poll (void *dummy)
#endif
{
/* Trigger a CMC interrupt cascade */
- platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
+ platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
@@ -1505,7 +1505,7 @@ ia64_mca_cpe_poll (void *dummy)
#endif
{
/* Trigger a CPE interrupt cascade */
- platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+ platform_send_ipi(cpumask_first(&cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
}
#endif /* CONFIG_ACPI */
diff --git a/xen/arch/ia64/linux-xen/smp.c b/xen/arch/ia64/linux-xen/smp.c
index 5fe6651e20..526e6eec01 100644
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -462,7 +462,7 @@ on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
call_data = &data;
wmb();
- for_each_cpu_mask(cpu, *selected)
+ for_each_cpu(cpu, selected)
send_IPI_single(cpu, IPI_CALL_FUNC);
while (atomic_read(wait ? &data.finished : &data.started) != nr_cpus)
diff --git a/xen/arch/ia64/linux-xen/smpboot.c b/xen/arch/ia64/linux-xen/smpboot.c
index 9e2de6b708..f9ee4fd531 100644
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ b/xen/arch/ia64/linux-xen/smpboot.c
@@ -687,9 +687,9 @@ clear_cpu_sibling_map(int cpu)
{
int i;
- for_each_cpu_mask(i, *per_cpu(cpu_sibling_mask, cpu))
+ for_each_cpu(i, per_cpu(cpu_sibling_mask, cpu))
cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, i));
- for_each_cpu_mask(i, *per_cpu(cpu_core_mask, cpu))
+ for_each_cpu(i, per_cpu(cpu_core_mask, cpu))
cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, i));
cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
diff --git a/xen/arch/ia64/vmx/vacpi.c b/xen/arch/ia64/vmx/vacpi.c
index d71ec011c2..1720aebcc7 100644
--- a/xen/arch/ia64/vmx/vacpi.c
+++ b/xen/arch/ia64/vmx/vacpi.c
@@ -191,7 +191,7 @@ void vacpi_init(struct domain *d)
s->last_gtime = NOW();
/* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
- init_timer(&s->timer, pmt_timer_callback, d, first_cpu(cpu_online_map));
+ init_timer(&s->timer, pmt_timer_callback, d, cpumask_first(&cpu_online_map));
pmt_timer_callback(d);
}
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
index 65dab551e3..92425096f4 100644
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -618,7 +618,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
XEN_GUEST_HANDLE_64(uint32) arr;
uint32_t i, val, max_array_ent = ti->max_cpu_index;
- ti->max_cpu_index = last_cpu(cpu_online_map);
+ ti->max_cpu_index = cpumask_last(&cpu_online_map);
max_array_ent = min(max_array_ent, ti->max_cpu_index);
arr = ti->cpu_to_core;
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index e48e31876b..d439e0a1b1 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -501,7 +501,7 @@ int vcpu_initialise(struct vcpu *v)
if (!VMX_DOMAIN(v))
init_timer(&v->arch.hlt_timer, hlt_timer_fn, v,
- first_cpu(cpu_online_map));
+ cpumask_any(&cpu_online_map));
return 0;
}
diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c
index 5af17ba7ff..684c748f43 100644
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -463,7 +463,7 @@ __domain_flush_vtlb_track_entry(struct domain* d,
local_purge = 0;
}
} else {
- for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {
+ for_each_cpu(cpu, &entry->pcpu_dirty_mask) {
/* Invalidate VHPT entries. */
cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);
@@ -559,7 +559,7 @@ void flush_tlb_mask(const cpumask_t *mask)
if (cpumask_subset(mask, cpumask_of(cpu)))
return;
- for_each_cpu_mask (cpu, *mask)
+ for_each_cpu (cpu, mask)
if (cpu != smp_processor_id())
smp_call_function_single
(cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index 805cd13053..17b198731d 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -251,7 +251,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask)
cpumask_and(&target, mask, &cpuidle_mwait_flags);
/* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
- for_each_cpu_mask(cpu, target)
+ for_each_cpu(cpu, &target)
mwait_wakeup(cpu) = 0;
cpumask_andnot(mask, mask, &target);
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 5fb7cfae9e..b868f164ff 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -487,7 +487,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return -EAGAIN;
}
- for_each_cpu_mask(j, online_policy_cpus)
+ for_each_cpu(j, &online_policy_cpus)
cpufreq_statistic_update(j, perf->state, next_perf_state);
perf->state = next_perf_state;
diff --git a/xen/arch/x86/acpi/cpufreq/powernow.c b/xen/arch/x86/acpi/cpufreq/powernow.c
index 743321aac8..fb174fa771 100644
--- a/xen/arch/x86/acpi/cpufreq/powernow.c
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c
@@ -130,7 +130,7 @@ static int powernow_cpufreq_target(struct cpufreq_policy *policy,
on_selected_cpus(cmd.mask, transition_pstate, &cmd, 1);
- for_each_cpu_mask(j, online_policy_cpus)
+ for_each_cpu(j, &online_policy_cpus)
cpufreq_statistic_update(j, perf->state, next_perf_state);
perf->state = next_perf_state;
diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c
index acc6a026df..bfddc1e328 100644
--- a/xen/arch/x86/genapic/x2apic.c
+++ b/xen/arch/x86/genapic/x2apic.c
@@ -72,7 +72,7 @@ static void __send_IPI_mask_x2apic(
local_irq_save(flags);
- for_each_cpu_mask ( cpu, *cpumask )
+ for_each_cpu ( cpu, cpumask )
{
if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
continue;
diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index ece6654b33..c503790cb5 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -182,7 +182,7 @@ again:
now = NOW();
/* find all expired events */
- for_each_cpu_mask(cpu, *ch->cpumask)
+ for_each_cpu(cpu, ch->cpumask)
{
s_time_t deadline;
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index c0137cab01..0271be93dc 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -125,7 +125,7 @@ static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_ma
if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
return -EBUSY;
trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
- for_each_cpu_mask(cpu, online_mask)
+ for_each_cpu(cpu, &online_mask)
per_cpu(vector_irq, cpu)[vector] = irq;
desc->arch.vector = vector;
cpumask_copy(desc->arch.cpu_mask, &online_mask);
@@ -223,7 +223,7 @@ static void __clear_irq_vector(int irq)
vector = desc->arch.vector;
cpumask_and(&tmp_mask, desc->arch.cpu_mask, &cpu_online_map);
- for_each_cpu_mask(cpu, tmp_mask) {
+ for_each_cpu(cpu, &tmp_mask) {
ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
per_cpu(vector_irq, cpu)[vector] = -1;
}
@@ -248,7 +248,7 @@ static void __clear_irq_vector(int irq)
old_vector = desc->arch.old_vector;
cpumask_and(&tmp_mask, desc->arch.old_cpu_mask, &cpu_online_map);
- for_each_cpu_mask(cpu, tmp_mask) {
+ for_each_cpu(cpu, &tmp_mask) {
ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
TRACE_3D(TRC_HW_IRQ_MOVE_FINISH, irq, old_vector, cpu);
per_cpu(vector_irq, cpu)[old_vector] = -1;
@@ -451,7 +451,7 @@ static int __assign_irq_vector(
else
irq_used_vectors = irq_get_used_vector_mask(irq);
- for_each_cpu_mask(cpu, *mask) {
+ for_each_cpu(cpu, mask) {
int new_cpu;
int vector, offset;
@@ -481,7 +481,7 @@ next:
&& test_bit(vector, irq_used_vectors) )
goto next;
- for_each_cpu_mask(new_cpu, tmp_mask)
+ for_each_cpu(new_cpu, &tmp_mask)
if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next;
/* Found one! */
@@ -493,7 +493,7 @@ next:
desc->arch.old_vector = desc->arch.vector;
}
trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
- for_each_cpu_mask(new_cpu, tmp_mask)
+ for_each_cpu(new_cpu, &tmp_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
desc->arch.vector = vector;
cpumask_copy(desc->arch.cpu_mask, &tmp_mask);
diff --git a/xen/arch/x86/microcode.c b/xen/arch/x86/microcode.c
index 12594b8c23..ffe625bc44 100644
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -125,7 +125,7 @@ static long do_microcode_update(void *_info)
if ( error )
info->error = error;
- info->cpu = next_cpu(info->cpu, cpu_online_map);
+ info->cpu = cpumask_next(info->cpu, &cpu_online_map);
if ( info->cpu < nr_cpu_ids )
return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
@@ -158,7 +158,7 @@ int microcode_update(XEN_GUEST_HANDLE(const_void) buf, unsigned long len)
info->buffer_size = len;
info->error = 0;
- info->cpu = first_cpu(cpu_online_map);
+ info->cpu = cpumask_first(&cpu_online_map);
return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info);
}
diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
index c1ad9ef636..79b5ec2ccc 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -366,7 +366,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
goto out;
guest_from_compat_handle(idletimes, op->u.getidletime.idletime);
- for_each_cpu_mask ( cpu, *cpumap )
+ for_each_cpu ( cpu, cpumap )
{
if ( idle_vcpu[cpu] == NULL )
cpumask_clear_cpu(cpu, cpumap);
@@ -460,7 +460,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
g_info->flags |= XEN_PCPU_FLAGS_ONLINE;
}
- g_info->max_present = last_cpu(cpu_present_map);
+ g_info->max_present = cpumask_last(&cpu_present_map);
put_cpu_maps();
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 8bc77b0fc8..bca36ed1aa 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -229,9 +229,9 @@ static void __init normalise_cpu_order(void)
* Find remaining CPU with longest-prefix match on APIC ID.
* Among identical longest-prefix matches, pick the smallest APIC ID.
*/
- for ( j = next_cpu(i, cpu_present_map);
+ for ( j = cpumask_next(i, &cpu_present_map);
j < nr_cpu_ids;
- j = next_cpu(j, cpu_present_map) )
+ j = cpumask_next(j, &cpu_present_map) )
{
diff = x86_cpu_to_apicid[j] ^ apicid;
while ( diff & (diff-1) )
@@ -248,12 +248,12 @@ static void __init normalise_cpu_order(void)
/* If no match then there must be no CPUs remaining to consider. */
if ( min_cpu >= nr_cpu_ids )
{
- BUG_ON(next_cpu(i, cpu_present_map) < nr_cpu_ids);
+ BUG_ON(cpumask_next(i, &cpu_present_map) < nr_cpu_ids);
break;
}
/* Switch the best-matching CPU with the next CPU in logical order. */
- j = next_cpu(i, cpu_present_map);
+ j = cpumask_next(i, &cpu_present_map);
apicid = x86_cpu_to_apicid[min_cpu];
x86_cpu_to_apicid[min_cpu] = x86_cpu_to_apicid[j];
x86_cpu_to_apicid[j] = apicid;
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index be80415034..77daca5ce2 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -182,7 +182,7 @@ void send_IPI_mask_phys(const cpumask_t *mask, int vector)
local_irq_save(flags);
- for_each_cpu_mask ( query_cpu, *mask )
+ for_each_cpu ( query_cpu, mask )
{
if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) )
continue;
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 454d75b969..e14354f290 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -248,7 +248,7 @@ static void set_cpu_sibling_map(int cpu)
if ( c[cpu].x86_num_siblings > 1 )
{
- for_each_cpu_mask ( i, cpu_sibling_setup_map )
+ for_each_cpu ( i, &cpu_sibling_setup_map )
{
if ( cpu_has(c, X86_FEATURE_TOPOEXT) ) {
if ( (c[cpu].phys_proc_id == c[i].phys_proc_id) &&
@@ -273,7 +273,7 @@ static void set_cpu_sibling_map(int cpu)
return;
}
- for_each_cpu_mask ( i, cpu_sibling_setup_map )
+ for_each_cpu ( i, &cpu_sibling_setup_map )
{
if ( c[cpu].phys_proc_id == c[i].phys_proc_id )
{
@@ -814,7 +814,7 @@ remove_siblinginfo(int cpu)
int sibling;
struct cpuinfo_x86 *c = cpu_data;
- for_each_cpu_mask ( sibling, *per_cpu(cpu_core_mask, cpu) )
+ for_each_cpu ( sibling, per_cpu(cpu_core_mask, cpu) )
{
cpumask_clear_cpu(cpu, per_cpu(cpu_core_mask, sibling));
/* Last thread sibling in this cpu core going down. */
@@ -822,7 +822,7 @@ remove_siblinginfo(int cpu)
c[sibling].booted_cores--;
}
- for_each_cpu_mask(sibling, *per_cpu(cpu_sibling_mask, cpu))
+ for_each_cpu(sibling, per_cpu(cpu_sibling_mask, cpu))
cpumask_clear_cpu(cpu, per_cpu(cpu_sibling_mask, sibling));
cpumask_clear(per_cpu(cpu_sibling_mask, cpu));
cpumask_clear(per_cpu(cpu_core_mask, cpu));
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 723d623ca5..738e5172fe 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -103,7 +103,7 @@ long arch_do_sysctl(
uint32_t i, max_cpu_index, last_online_cpu;
xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
- last_online_cpu = last_cpu(cpu_online_map);
+ last_online_cpu = cpumask_last(&cpu_online_map);
max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
ti->max_cpu_index = last_online_cpu;
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index c4fadef344..79abdb7b09 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -205,7 +205,7 @@ void enable_nonboot_cpus(void)
printk("Enabling non-boot CPUs ...\n");
- for_each_cpu_mask ( cpu, frozen_cpus )
+ for_each_cpu ( cpu, &frozen_cpus )
{
if ( (error = cpu_up(cpu)) )
{
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index e8da05be2a..fcc44b1e57 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -494,7 +494,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
- cpu = first_cpu(cpupool_free_cpus);
+ cpu = cpumask_first(&cpupool_free_cpus);
ret = -EINVAL;
if ( cpu >= nr_cpu_ids )
goto addcpu_out;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 74664f4ebc..6705a573df 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -190,7 +190,7 @@ static unsigned int default_vcpu0_location(cpumask_t *online)
cpu = cpumask_first(&cpu_exclude_map);
if ( cpumask_weight(&cpu_exclude_map) > 1 )
cpu = cpumask_next(cpu, &cpu_exclude_map);
- for_each_cpu_mask(i, *online)
+ for_each_cpu(i, online)
{
if ( cpumask_test_cpu(i, &cpu_exclude_map) )
continue;
@@ -541,7 +541,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
cpu = (i == 0) ?
default_vcpu0_location(online) :
- cycle_cpu(d->vcpu[i-1]->processor, *online);
+ cpumask_cycle(d->vcpu[i-1]->processor, online);
if ( alloc_vcpu(d, i, cpu) == NULL )
goto maxvcpu_out;
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 86e6b25e21..a8f256a410 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -128,7 +128,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
return;
/* Normal handling: synchronously dump the remaining CPUs' states. */
- for_each_cpu_mask ( cpu, dump_execstate_mask )
+ for_each_cpu ( cpu, &dump_execstate_mask )
{
smp_send_state_dump(cpu);
while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
diff --git a/xen/common/perfc.c b/xen/common/perfc.c
index 81b41ba3d9..215e074438 100644
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -211,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
{
case TYPE_SINGLE:
case TYPE_S_SINGLE:
- for_each_cpu_mask ( cpu, perfc_cpumap )
+ for_each_cpu ( cpu, &perfc_cpumap )
perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
++j;
break;
case TYPE_ARRAY:
case TYPE_S_ARRAY:
memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
- for_each_cpu_mask ( cpu, perfc_cpumap )
+ for_each_cpu ( cpu, &perfc_cpumap )
{
perfc_t *counters = per_cpu(perfcounters, cpu) + j;
unsigned int k;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 9314121092..86c4439300 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -521,7 +521,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *n
cpumask_andnot(&mask, &rqd->active, &rqd->idle);
cpumask_andnot(&mask, &mask, &rqd->tickled);
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, &mask)
{
struct csched_vcpu * cur;
@@ -1051,7 +1051,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
else
{
d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id);
- new_cpu = first_cpu(svc->migrate_rqd->active);
+ new_cpu = cpumask_first(&svc->migrate_rqd->active);
goto out_up;
}
}
@@ -1061,7 +1061,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
min_avgload = MAX_LOAD;
/* Find the runqueue with the lowest instantaneous load */
- for_each_cpu_mask(i, prv->active_queues)
+ for_each_cpu(i, &prv->active_queues)
{
struct csched_runqueue_data *rqd;
s_time_t rqd_avgload;
@@ -1099,7 +1099,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
else
{
BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
- new_cpu = first_cpu(prv->rqd[min_rqi].active);
+ new_cpu = cpumask_first(&prv->rqd[min_rqi].active);
}
out_up:
@@ -1179,7 +1179,7 @@ void migrate(const struct scheduler *ops,
on_runq=1;
}
__runq_deassign(svc);
- svc->vcpu->processor = first_cpu(trqd->active);
+ svc->vcpu->processor = cpumask_first(&trqd->active);
__runq_assign(svc, trqd);
if ( on_runq )
{
@@ -1219,7 +1219,7 @@ retry:
st.load_delta = 0;
- for_each_cpu_mask(i, prv->active_queues)
+ for_each_cpu(i, &prv->active_queues)
{
s_time_t delta;
@@ -1618,7 +1618,7 @@ csched_schedule(
{
int rq;
other_rqi = -2;
- for_each_cpu_mask ( rq, CSCHED_PRIV(ops)->active_queues )
+ for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues )
{
if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] )
{
@@ -1803,7 +1803,7 @@ csched_dump(const struct scheduler *ops)
"\tdefault-weight = %d\n",
cpumask_weight(&prv->active_queues),
CSCHED_DEFAULT_WEIGHT);
- for_each_cpu_mask(i, prv->active_queues)
+ for_each_cpu(i, &prv->active_queues)
{
s_time_t fraction;
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index dfb7ceef14..76b0e9d68c 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -442,7 +442,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v)
online = SEDF_CPUONLINE(v->domain->cpupool);
cpumask_and(&online_affinity, v->cpu_affinity, online);
- return first_cpu(online_affinity);
+ return cpumask_first(&online_affinity);
}
/*
@@ -1322,7 +1322,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op
{
struct vcpu *p;
struct domain *d;
- unsigned int cpu, nr_cpus = last_cpu(cpu_online_map) + 1;
+ unsigned int cpu, nr_cpus = cpumask_last(&cpu_online_map) + 1;
int *sumw = xzalloc_array(int, nr_cpus);
s_time_t *sumt = xzalloc_array(s_time_t, nr_cpus);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b22cf67fe5..c07d6f0f98 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1450,7 +1450,7 @@ void schedule_dump(struct cpupool *c)
printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name);
SCHED_OP(sched, dump_settings);
- for_each_cpu_mask (i, *cpus)
+ for_each_cpu (i, cpus)
{
pcpu_schedule_lock(i);
printk("CPU[%02d] ", i);
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 8634bafef5..3f1b302989 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -74,7 +74,7 @@ void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
cpumask_t send_mask;
cpumask_clear(&send_mask);
- for_each_cpu_mask(cpu, *mask)
+ for_each_cpu(cpu, mask)
if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
cpumask_set_cpu(cpu, &send_mask);
diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c
index eb38da58f8..0590504772 100644
--- a/xen/common/stop_machine.c
+++ b/xen/common/stop_machine.c
@@ -101,7 +101,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
smp_wmb();
- for_each_cpu_mask ( i, allbutself )
+ for_each_cpu ( i, &allbutself )
tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
stopmachine_set_state(STOPMACHINE_PREPARE);
diff --git a/xen/common/timer.c b/xen/common/timer.c
index 1e51ce3c58..0547ea31a7 100644
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -548,7 +548,7 @@ static struct keyhandler dump_timerq_keyhandler = {
static void migrate_timers_from_cpu(unsigned int old_cpu)
{
- unsigned int new_cpu = first_cpu(cpu_online_map);
+ unsigned int new_cpu = cpumask_any(&cpu_online_map);
struct timers *old_ts, *new_ts;
struct timer *t;
bool_t notify = 0;
diff --git a/xen/drivers/acpi/pmstat.c b/xen/drivers/acpi/pmstat.c
index c94a406653..dbc1c44a61 100644
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -223,7 +223,7 @@ static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) )
return -ENOMEM;
- for_each_cpu_mask(cpu, *policy->cpus)
+ for_each_cpu(cpu, policy->cpus)
affected_cpus[j++] = cpu;
ret = copy_to_guest(op->u.get_para.affected_cpus,
affected_cpus, op->u.get_para.cpu_num);
diff --git a/xen/drivers/cpufreq/cpufreq_ondemand.c b/xen/drivers/cpufreq/cpufreq_ondemand.c
index 4323cc5e9f..c9378adc4f 100644
--- a/xen/drivers/cpufreq/cpufreq_ondemand.c
+++ b/xen/drivers/cpufreq/cpufreq_ondemand.c
@@ -122,7 +122,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
return;
/* Get Idle Time */
- for_each_cpu_mask(j, *policy->cpus) {
+ for_each_cpu(j, policy->cpus) {
uint64_t idle_ns, total_idle_ns;
uint64_t load, load_freq, freq_avg;
struct cpu_dbs_info_s *j_dbs_info;
@@ -233,7 +233,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
dbs_enable++;
- for_each_cpu_mask(j, *policy->cpus) {
+ for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 2d6f101f27..349f7ed058 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1033,7 +1033,7 @@ static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
msg.address_lo |= MSI_PHYSICAL_MODE << 2;
msg.address_lo |= MSI_REDIRECTION_HINT_MODE << 3;
- dest = cpu_physical_id(first_cpu(mask));
+ dest = cpu_physical_id(cpumask_first(mask));
msg.address_lo |= dest << MSI_TARGET_CPU_SHIFT;
#endif
diff --git a/xen/include/asm-ia64/linux-xen/asm/acpi.h b/xen/include/asm-ia64/linux-xen/asm/acpi.h
index 60067149c9..ab07b9acea 100644
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h
@@ -139,7 +139,7 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
- for_each_cpu_mask((cpu), early_cpu_possible_map)
+ for_each_cpu(cpu, &early_cpu_possible_map)
static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
{
diff --git a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index c1519d5358..7f46632c11 100644
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -52,7 +52,7 @@ static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
#define tlbflush_filter(mask, page_timestamp) \
do { \
unsigned int cpu; \
- for_each_cpu_mask ( cpu, mask ) \
+ for_each_cpu ( cpu, &(mask) ) \
if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
cpumask_clear_cpu(cpu, &(mask)); \
} while ( 0 )
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index 8e69ed9b58..db53948663 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -37,18 +37,19 @@
* void cpumask_shift_right(dst, src, n) Shift right
* void cpumask_shift_left(dst, src, n) Shift left
*
- * int first_cpu(mask) Number lowest set bit, or NR_CPUS
- * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
- * int last_cpu(mask) Number highest set bit, or NR_CPUS
- * int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
+ * int cpumask_first(mask) Number lowest set bit, or NR_CPUS
+ * int cpumask_next(cpu, mask) Next cpu past 'cpu', or NR_CPUS
+ * int cpumask_last(mask) Number highest set bit, or NR_CPUS
+ * int cpumask_any(mask) Any cpu in mask, or NR_CPUS
+ * int cpumask_cycle(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
*
- * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
+ * const cpumask_t *cpumask_of(cpu) Return cpumask with bit 'cpu' set
* unsigned long *cpumask_bits(mask) Array of unsigned long's in mask
*
* int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
*
- * for_each_cpu_mask(cpu, mask) for-loop cpu over mask
+ * for_each_cpu(cpu, mask) for-loop cpu over mask
*
* int num_online_cpus() Number of online CPUs
* int num_possible_cpus() Number of all possible CPUs
@@ -210,42 +211,43 @@ static inline void cpumask_shift_left(cpumask_t *dstp,
bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
}
-#define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
-#define first_cpu(src) __first_cpu(&(src), nr_cpu_ids)
-static inline int __first_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_first(const cpumask_t *srcp)
{
- return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+ return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
}
-#define cpumask_next(n, src) __next_cpu(n, src, nr_cpu_ids)
-#define next_cpu(n, src) __next_cpu((n), &(src), nr_cpu_ids)
-static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_next(int n, const cpumask_t *srcp)
{
- return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+
+ return min_t(int, nr_cpu_ids,
+ find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
}
-#define cpumask_last(src) __last_cpu(src, nr_cpu_ids)
-#define last_cpu(src) __last_cpu(&(src), nr_cpu_ids)
-static inline int __last_cpu(const cpumask_t *srcp, int nbits)
+static inline int cpumask_last(const cpumask_t *srcp)
{
- int cpu, pcpu = nbits;
- for (cpu = __first_cpu(srcp, nbits);
- cpu < nbits;
- cpu = __next_cpu(cpu, srcp, nbits))
+ int cpu, pcpu = nr_cpu_ids;
+
+ for (cpu = cpumask_first(srcp);
+ cpu < nr_cpu_ids;
+ cpu = cpumask_next(cpu, srcp))
pcpu = cpu;
return pcpu;
}
-#define cpumask_cycle(n, src) __cycle_cpu(n, src, nr_cpu_ids)
-#define cycle_cpu(n, src) __cycle_cpu((n), &(src), nr_cpu_ids)
-static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+static inline int cpumask_cycle(int n, const cpumask_t *srcp)
{
- int nxt = __next_cpu(n, srcp, nbits);
- if (nxt == nbits)
- nxt = __first_cpu(srcp, nbits);
+ int nxt = cpumask_next(n, srcp);
+
+ if (nxt == nr_cpu_ids)
+ nxt = cpumask_first(srcp);
return nxt;
}
+#define cpumask_any(srcp) cpumask_first(srcp)
+
/*
* Special-case data structure for "single bit set only" constant CPU masks.
*
@@ -262,8 +264,6 @@ static inline const cpumask_t *cpumask_of(unsigned int cpu)
return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
}
-#define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
-
#if defined(__ia64__) /* XXX needs cleanup */
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
@@ -366,12 +366,13 @@ static inline void free_cpumask_var(cpumask_var_t mask)
#endif
#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask) \
- for ((cpu) = first_cpu(mask); \
- (cpu) < nr_cpu_ids; \
- (cpu) = next_cpu((cpu), (mask)))
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = cpumask_first(mask); \
+ (cpu) < nr_cpu_ids; \
+ (cpu) = cpumask_next(cpu, mask))
#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
#endif /* NR_CPUS */
/*
@@ -450,18 +451,9 @@ extern cpumask_t cpu_present_map;
#define cpu_present(cpu) ((cpu) == 0)
#endif
-#define any_online_cpu(mask) \
-({ \
- int cpu; \
- for_each_cpu_mask(cpu, (mask)) \
- if (cpu_online(cpu)) \
- break; \
- cpu; \
-})
-
-#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
+#define for_each_online_cpu(cpu) for_each_cpu(cpu, &cpu_online_map)
+#define for_each_present_cpu(cpu) for_each_cpu(cpu, &cpu_present_map)
/* Copy to/from cpumap provided by control tools. */
struct xenctl_cpumap;