diff options
author | Jan Beulich <jbeulich@suse.com> | 2011-11-08 10:36:10 +0100 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2011-11-08 10:36:10 +0100 |
commit | 22bdce1c04881c0e909817ee11f7cecd6c852d8c (patch) | |
tree | 445ece053822f839c700f30d0159b4b73da68465 /xen/common | |
parent | fe9ff5a9d54cced306aefaba126340e0c596717e (diff) | |
download | xen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.tar.gz xen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.tar.bz2 xen-22bdce1c04881c0e909817ee11f7cecd6c852d8c.zip |
eliminate first_cpu() etc
This includes the conversion from for_each_cpu_mask() to for_each-cpu().
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Diffstat (limited to 'xen/common')
-rw-r--r-- | xen/common/cpu.c | 2 | ||||
-rw-r--r-- | xen/common/cpupool.c | 2 | ||||
-rw-r--r-- | xen/common/domctl.c | 4 | ||||
-rw-r--r-- | xen/common/keyhandler.c | 2 | ||||
-rw-r--r-- | xen/common/perfc.c | 4 | ||||
-rw-r--r-- | xen/common/sched_credit2.c | 16 | ||||
-rw-r--r-- | xen/common/sched_sedf.c | 4 | ||||
-rw-r--r-- | xen/common/schedule.c | 2 | ||||
-rw-r--r-- | xen/common/softirq.c | 2 | ||||
-rw-r--r-- | xen/common/stop_machine.c | 2 | ||||
-rw-r--r-- | xen/common/timer.c | 2 |
11 files changed, 21 insertions, 21 deletions
diff --git a/xen/common/cpu.c b/xen/common/cpu.c index c4fadef344..79abdb7b09 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -205,7 +205,7 @@ void enable_nonboot_cpus(void) printk("Enabling non-boot CPUs ...\n"); - for_each_cpu_mask ( cpu, frozen_cpus ) + for_each_cpu ( cpu, &frozen_cpus ) { if ( (error = cpu_up(cpu)) ) { diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index e8da05be2a..fcc44b1e57 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -494,7 +494,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op) op->cpupool_id, cpu); spin_lock(&cpupool_lock); if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY ) - cpu = first_cpu(cpupool_free_cpus); + cpu = cpumask_first(&cpupool_free_cpus); ret = -EINVAL; if ( cpu >= nr_cpu_ids ) goto addcpu_out; diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 74664f4ebc..6705a573df 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -190,7 +190,7 @@ static unsigned int default_vcpu0_location(cpumask_t *online) cpu = cpumask_first(&cpu_exclude_map); if ( cpumask_weight(&cpu_exclude_map) > 1 ) cpu = cpumask_next(cpu, &cpu_exclude_map); - for_each_cpu_mask(i, *online) + for_each_cpu(i, online) { if ( cpumask_test_cpu(i, &cpu_exclude_map) ) continue; @@ -541,7 +541,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl) cpu = (i == 0) ? default_vcpu0_location(online) : - cycle_cpu(d->vcpu[i-1]->processor, *online); + cpumask_cycle(d->vcpu[i-1]->processor, online); if ( alloc_vcpu(d, i, cpu) == NULL ) goto maxvcpu_out; diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index 86e6b25e21..a8f256a410 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -128,7 +128,7 @@ static void dump_registers(unsigned char key, struct cpu_user_regs *regs) return; /* Normal handling: synchronously dump the remaining CPUs' states. */ - for_each_cpu_mask ( cpu, dump_execstate_mask ) + for_each_cpu ( cpu, &dump_execstate_mask ) { smp_send_state_dump(cpu); while ( cpumask_test_cpu(cpu, &dump_execstate_mask) ) diff --git a/xen/common/perfc.c b/xen/common/perfc.c index 81b41ba3d9..215e074438 100644 --- a/xen/common/perfc.c +++ b/xen/common/perfc.c @@ -211,14 +211,14 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, { case TYPE_SINGLE: case TYPE_S_SINGLE: - for_each_cpu_mask ( cpu, perfc_cpumap ) + for_each_cpu ( cpu, &perfc_cpumap ) perfc_vals[v++] = per_cpu(perfcounters, cpu)[j]; ++j; break; case TYPE_ARRAY: case TYPE_S_ARRAY: memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals)); - for_each_cpu_mask ( cpu, perfc_cpumap ) + for_each_cpu ( cpu, &perfc_cpumap ) { perfc_t *counters = per_cpu(perfcounters, cpu) + j; unsigned int k; diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 9314121092..86c4439300 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -521,7 +521,7 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *n cpumask_andnot(&mask, &rqd->active, &rqd->idle); cpumask_andnot(&mask, &mask, &rqd->tickled); - for_each_cpu_mask(i, mask) + for_each_cpu(i, &mask) { struct csched_vcpu * cur; @@ -1051,7 +1051,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc) else { d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id); - new_cpu = first_cpu(svc->migrate_rqd->active); + new_cpu = cpumask_first(&svc->migrate_rqd->active); goto out_up; } } @@ -1061,7 +1061,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc) min_avgload = MAX_LOAD; /* Find the runqueue with the lowest instantaneous load */ - for_each_cpu_mask(i, prv->active_queues) + for_each_cpu(i, &prv->active_queues) { struct csched_runqueue_data *rqd; s_time_t rqd_avgload; @@ -1099,7 +1099,7 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc) else { BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active)); - new_cpu = first_cpu(prv->rqd[min_rqi].active); + new_cpu = cpumask_first(&prv->rqd[min_rqi].active); } out_up: @@ -1179,7 +1179,7 @@ void migrate(const struct scheduler *ops, on_runq=1; } __runq_deassign(svc); - svc->vcpu->processor = first_cpu(trqd->active); + svc->vcpu->processor = cpumask_first(&trqd->active); __runq_assign(svc, trqd); if ( on_runq ) { @@ -1219,7 +1219,7 @@ retry: st.load_delta = 0; - for_each_cpu_mask(i, prv->active_queues) + for_each_cpu(i, &prv->active_queues) { s_time_t delta; @@ -1618,7 +1618,7 @@ csched_schedule( { int rq; other_rqi = -2; - for_each_cpu_mask ( rq, CSCHED_PRIV(ops)->active_queues ) + for_each_cpu ( rq, &CSCHED_PRIV(ops)->active_queues ) { if ( scurr->rqd == &CSCHED_PRIV(ops)->rqd[rq] ) { @@ -1803,7 +1803,7 @@ csched_dump(const struct scheduler *ops) "\tdefault-weight = %d\n", cpumask_weight(&prv->active_queues), CSCHED_DEFAULT_WEIGHT); - for_each_cpu_mask(i, prv->active_queues) + for_each_cpu(i, &prv->active_queues) { s_time_t fraction; diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index dfb7ceef14..76b0e9d68c 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -442,7 +442,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v) online = SEDF_CPUONLINE(v->domain->cpupool); cpumask_and(&online_affinity, v->cpu_affinity, online); - return first_cpu(online_affinity); + return cpumask_first(&online_affinity); } /* @@ -1322,7 +1322,7 @@ static int sedf_adjust_weights(struct cpupool *c, struct xen_domctl_scheduler_op { struct vcpu *p; struct domain *d; - unsigned int cpu, nr_cpus = last_cpu(cpu_online_map) + 1; + unsigned int cpu, nr_cpus = cpumask_last(&cpu_online_map) + 1; int *sumw = xzalloc_array(int, nr_cpus); s_time_t *sumt = xzalloc_array(s_time_t, nr_cpus); diff --git a/xen/common/schedule.c b/xen/common/schedule.c index b22cf67fe5..c07d6f0f98 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1450,7 +1450,7 @@ void schedule_dump(struct cpupool *c) printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name); SCHED_OP(sched, dump_settings); - for_each_cpu_mask (i, *cpus) + for_each_cpu (i, cpus) { pcpu_schedule_lock(i); printk("CPU[%02d] ", i); diff --git a/xen/common/softirq.c b/xen/common/softirq.c index 8634bafef5..3f1b302989 100644 --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -74,7 +74,7 @@ void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr) cpumask_t send_mask; cpumask_clear(&send_mask); - for_each_cpu_mask(cpu, *mask) + for_each_cpu(cpu, mask) if ( !test_and_set_bit(nr, &softirq_pending(cpu)) ) cpumask_set_cpu(cpu, &send_mask); diff --git a/xen/common/stop_machine.c b/xen/common/stop_machine.c index eb38da58f8..0590504772 100644 --- a/xen/common/stop_machine.c +++ b/xen/common/stop_machine.c @@ -101,7 +101,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) smp_wmb(); - for_each_cpu_mask ( i, allbutself ) + for_each_cpu ( i, &allbutself ) tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); stopmachine_set_state(STOPMACHINE_PREPARE); diff --git a/xen/common/timer.c b/xen/common/timer.c index 1e51ce3c58..0547ea31a7 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -548,7 +548,7 @@ static struct keyhandler dump_timerq_keyhandler = { static void migrate_timers_from_cpu(unsigned int old_cpu) { - unsigned int new_cpu = first_cpu(cpu_online_map); + unsigned int new_cpu = cpumask_any(&cpu_online_map); struct timers *old_ts, *new_ts; struct timer *t; bool_t notify = 0; |