diff options
author | Jan Beulich <jbeulich@suse.com> | 2011-10-21 09:19:44 +0200 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2011-10-21 09:19:44 +0200 |
commit | 3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e (patch) | |
tree | 02fa5b020be5edebcbc225f9bd6a9d413917055f /xen/common | |
parent | 20307695e5c22a58b4cce35fba5333375da5a7fa (diff) | |
download | xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.tar.gz xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.tar.bz2 xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.zip |
eliminate cpumask accessors referencing NR_CPUS
... in favor of using the new, nr_cpumask_bits-based ones.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common')
-rw-r--r-- | xen/common/cpu.c | 6 | ||||
-rw-r--r-- | xen/common/cpupool.c | 2 | ||||
-rw-r--r-- | xen/common/domain.c | 3 | ||||
-rw-r--r-- | xen/common/domctl.c | 27 | ||||
-rw-r--r-- | xen/common/keyhandler.c | 2 | ||||
-rw-r--r-- | xen/common/page_alloc.c | 12 | ||||
-rw-r--r-- | xen/common/rcupdate.c | 1 | ||||
-rw-r--r-- | xen/common/sched_credit.c | 72 | ||||
-rw-r--r-- | xen/common/sched_credit2.c | 12 | ||||
-rw-r--r-- | xen/common/schedule.c | 4 | ||||
-rw-r--r-- | xen/common/softirq.c | 5 | ||||
-rw-r--r-- | xen/common/trace.c | 3 |
12 files changed, 77 insertions, 72 deletions
diff --git a/xen/common/cpu.c b/xen/common/cpu.c index c80d923bec..c4fadef344 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -176,7 +176,7 @@ int disable_nonboot_cpus(void) BUG_ON(smp_processor_id() != 0); - cpus_clear(frozen_cpus); + cpumask_clear(&frozen_cpus); printk("Disabling non-boot CPUs ...\n"); @@ -192,7 +192,7 @@ int disable_nonboot_cpus(void) break; } - cpu_set(cpu, frozen_cpus); + cpumask_set_cpu(cpu, &frozen_cpus); } BUG_ON(!error && (num_online_cpus() != 1)); @@ -214,5 +214,5 @@ void enable_nonboot_cpus(void) } } - cpus_clear(frozen_cpus); + cpumask_clear(&frozen_cpus); } diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c index b470f621c7..459edf73fb 100644 --- a/xen/common/cpupool.c +++ b/xen/common/cpupool.c @@ -29,7 +29,7 @@ static struct cpupool *cpupool_list; /* linked list, sorted by poolid */ static int cpupool_moving_cpu = -1; static struct cpupool *cpupool_cpu_moving = NULL; -static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE; +static cpumask_t cpupool_locked_cpus; static DEFINE_SPINLOCK(cpupool_lock); diff --git a/xen/common/domain.c b/xen/common/domain.c index d0818c5398..d8ddb23d41 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -359,11 +359,12 @@ struct domain *domain_create( void domain_update_node_affinity(struct domain *d) { - cpumask_t cpumask = CPU_MASK_NONE; + cpumask_t cpumask; nodemask_t nodemask = NODE_MASK_NONE; struct vcpu *v; unsigned int node; + cpumask_clear(&cpumask); spin_lock(&d->node_affinity_lock); for_each_vcpu ( d, v ) diff --git a/xen/common/domctl.c b/xen/common/domctl.c index d0f0f28dd4..2fab24b63b 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -37,9 +37,9 @@ int cpumask_to_xenctl_cpumap( uint8_t bytemap[(NR_CPUS + 7) / 8]; guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8; - copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap)); + copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8); - bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS); + bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids); if ( copy_bytes != 0 ) if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) ) @@ -59,7 +59,7 @@ int xenctl_cpumap_to_cpumask( uint8_t bytemap[(NR_CPUS + 7) / 8]; guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8; - copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap)); + copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8); memset(bytemap, 0, sizeof(bytemap)); @@ -71,7 +71,7 @@ int xenctl_cpumap_to_cpumask( bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7)); } - bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS); + bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids); return 0; } @@ -154,7 +154,7 @@ static unsigned int default_vcpu0_location(cpumask_t *online) cpumask_t cpu_exclude_map; /* Do an initial CPU placement. Pick the least-populated CPU. */ - nr_cpus = last_cpu(cpu_online_map) + 1; + nr_cpus = cpumask_last(&cpu_online_map) + 1; cnt = xzalloc_array(unsigned int, nr_cpus); if ( cnt ) { @@ -171,18 +171,19 @@ static unsigned int default_vcpu0_location(cpumask_t *online) * If we're on a HT system, we only auto-allocate to a non-primary HT. We * favour high numbered CPUs in the event of a tie. */ - cpu = first_cpu(per_cpu(cpu_sibling_map, 0)); - if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 ) - cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0)); - cpu_exclude_map = per_cpu(cpu_sibling_map, 0); + cpumask_copy(&cpu_exclude_map, &per_cpu(cpu_sibling_map, 0)); + cpu = cpumask_first(&cpu_exclude_map); + if ( cpumask_weight(&cpu_exclude_map) > 1 ) + cpu = cpumask_next(cpu, &cpu_exclude_map); for_each_cpu_mask(i, *online) { - if ( cpu_isset(i, cpu_exclude_map) ) + if ( cpumask_test_cpu(i, &cpu_exclude_map) ) continue; - if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) && - (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) ) + if ( (i == cpumask_first(&per_cpu(cpu_sibling_map, i))) && + (cpumask_weight(&per_cpu(cpu_sibling_map, i)) > 1) ) continue; - cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i)); + cpumask_or(&cpu_exclude_map, &cpu_exclude_map, + &per_cpu(cpu_sibling_map, i)); if ( !cnt || cnt[i] <= cnt[cpu] ) cpu = i; } diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index db98475f93..51cfc3cc98 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -316,7 +316,7 @@ static struct keyhandler dump_domains_keyhandler = { .desc = "dump domain (and guest debug) info" }; -static cpumask_t read_clocks_cpumask = CPU_MASK_NONE; +static cpumask_t read_clocks_cpumask; static DEFINE_PER_CPU(s_time_t, read_clocks_time); static DEFINE_PER_CPU(u64, read_cycles_time); diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 3ce34af440..0f857d2142 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -304,7 +304,7 @@ static struct page_info *alloc_heap_pages( unsigned int first_node, i, j, zone = 0, nodemask_retry = 0; unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1); unsigned long request = 1UL << order; - cpumask_t extra_cpus_mask, mask; + cpumask_t mask; struct page_info *pg; nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map; @@ -418,7 +418,7 @@ static struct page_info *alloc_heap_pages( if ( d != NULL ) d->last_alloc_node = node; - cpus_clear(mask); + cpumask_clear(&mask); for ( i = 0; i < (1 << order); i++ ) { @@ -429,9 +429,11 @@ static struct page_info *alloc_heap_pages( if ( pg[i].u.free.need_tlbflush ) { /* Add in extra CPUs that need flushing because of this page. */ - cpus_andnot(extra_cpus_mask, cpu_online_map, mask); + static cpumask_t extra_cpus_mask; + + cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask); tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp); - cpus_or(mask, mask, extra_cpus_mask); + cpumask_or(&mask, &mask, &extra_cpus_mask); } /* Initialise fields which have other uses for free pages. */ @@ -441,7 +443,7 @@ static struct page_info *alloc_heap_pages( spin_unlock(&heap_lock); - if ( unlikely(!cpus_empty(mask)) ) + if ( unlikely(!cpumask_empty(&mask)) ) { perfc_incr(need_flush_tlb_flush); flush_tlb_mask(&mask); diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c index 9911f92e2e..189681174b 100644 --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -59,7 +59,6 @@ static struct rcu_ctrlblk { .cur = -300, .completed = -300, .lock = SPIN_LOCK_UNLOCKED, - .cpumask = CPU_MASK_NONE, }; /* diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 9e29957e1a..5b11c91554 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -260,7 +260,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) cpumask_t mask; ASSERT(cur); - cpus_clear(mask); + cpumask_clear(&mask); /* If strictly higher priority than current VCPU, signal the CPU */ if ( new->pri > cur->pri ) @@ -274,7 +274,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) else CSCHED_STAT_CRANK(tickle_local_other); - cpu_set(cpu, mask); + cpumask_set_cpu(cpu, &mask); } /* @@ -283,7 +283,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) */ if ( cur->pri > CSCHED_PRI_IDLE ) { - if ( cpus_empty(prv->idlers) ) + if ( cpumask_empty(&prv->idlers) ) { CSCHED_STAT_CRANK(tickle_idlers_none); } @@ -292,24 +292,24 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) cpumask_t idle_mask; cpumask_and(&idle_mask, &prv->idlers, new->vcpu->cpu_affinity); - if ( !cpus_empty(idle_mask) ) + if ( !cpumask_empty(&idle_mask) ) { CSCHED_STAT_CRANK(tickle_idlers_some); if ( opt_tickle_one_idle ) { this_cpu(last_tickle_cpu) = - cycle_cpu(this_cpu(last_tickle_cpu), idle_mask); - cpu_set(this_cpu(last_tickle_cpu), mask); + cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask); + cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask); } else - cpus_or(mask, mask, idle_mask); + cpumask_or(&mask, &mask, &idle_mask); } cpumask_and(&mask, &mask, new->vcpu->cpu_affinity); } } /* Send scheduler interrupts to designated CPUs */ - if ( !cpus_empty(mask) ) + if ( !cpumask_empty(&mask) ) cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ); } @@ -471,10 +471,10 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) */ online = CSCHED_CPUONLINE(vc->domain->cpupool); cpumask_and(&cpus, online, vc->cpu_affinity); - cpu = cpu_isset(vc->processor, cpus) + cpu = cpumask_test_cpu(vc->processor, &cpus) ? vc->processor - : cycle_cpu(vc->processor, cpus); - ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) ); + : cpumask_cycle(vc->processor, &cpus); + ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) ); /* * Try to find an idle processor within the above constraints. @@ -488,54 +488,54 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) * like run two VCPUs on co-hyperthreads while there are idle cores * or sockets. */ - cpus_and(idlers, cpu_online_map, CSCHED_PRIV(ops)->idlers); - cpu_set(cpu, idlers); - cpus_and(cpus, cpus, idlers); - cpu_clear(cpu, cpus); + cpumask_and(&idlers, &cpu_online_map, &CSCHED_PRIV(ops)->idlers); + cpumask_set_cpu(cpu, &idlers); + cpumask_and(&cpus, &cpus, &idlers); + cpumask_clear_cpu(cpu, &cpus); - while ( !cpus_empty(cpus) ) + while ( !cpumask_empty(&cpus) ) { cpumask_t cpu_idlers; cpumask_t nxt_idlers; int nxt, weight_cpu, weight_nxt; int migrate_factor; - nxt = cycle_cpu(cpu, cpus); + nxt = cpumask_cycle(cpu, &cpus); - if ( cpu_isset(cpu, per_cpu(cpu_core_map, nxt)) ) + if ( cpumask_test_cpu(cpu, &per_cpu(cpu_core_map, nxt)) ) { /* We're on the same socket, so check the busy-ness of threads. * Migrate if # of idlers is less at all */ - ASSERT( cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) ); + ASSERT( cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) ); migrate_factor = 1; - cpus_and(cpu_idlers, idlers, per_cpu(cpu_sibling_map, cpu)); - cpus_and(nxt_idlers, idlers, per_cpu(cpu_sibling_map, nxt)); + cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_sibling_map, cpu)); + cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_sibling_map, nxt)); } else { /* We're on different sockets, so check the busy-ness of cores. * Migrate only if the other core is twice as idle */ - ASSERT( !cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) ); + ASSERT( !cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) ); migrate_factor = 2; - cpus_and(cpu_idlers, idlers, per_cpu(cpu_core_map, cpu)); - cpus_and(nxt_idlers, idlers, per_cpu(cpu_core_map, nxt)); + cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_core_map, cpu)); + cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_core_map, nxt)); } - weight_cpu = cpus_weight(cpu_idlers); - weight_nxt = cpus_weight(nxt_idlers); + weight_cpu = cpumask_weight(&cpu_idlers); + weight_nxt = cpumask_weight(&nxt_idlers); /* smt_power_savings: consolidate work rather than spreading it */ if ( sched_smt_power_savings ? weight_cpu > weight_nxt : weight_cpu * migrate_factor < weight_nxt ) { - cpus_and(nxt_idlers, cpus, nxt_idlers); + cpumask_and(&nxt_idlers, &cpus, &nxt_idlers); spc = CSCHED_PCPU(nxt); - cpu = cycle_cpu(spc->idle_bias, nxt_idlers); - cpus_andnot(cpus, cpus, per_cpu(cpu_sibling_map, cpu)); + cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers); + cpumask_andnot(&cpus, &cpus, &per_cpu(cpu_sibling_map, cpu)); } else { - cpus_andnot(cpus, cpus, nxt_idlers); + cpumask_andnot(&cpus, &cpus, &nxt_idlers); } } @@ -1228,7 +1228,7 @@ csched_load_balance(struct csched_private *prv, int cpu, online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu)); /* If this CPU is going offline we shouldn't steal work. */ - if ( unlikely(!cpu_isset(cpu, *online)) ) + if ( unlikely(!cpumask_test_cpu(cpu, online)) ) goto out; if ( snext->pri == CSCHED_PRI_IDLE ) @@ -1242,14 +1242,14 @@ csched_load_balance(struct csched_private *prv, int cpu, * Peek at non-idling CPUs in the system, starting with our * immediate neighbour. */ - cpus_andnot(workers, *online, prv->idlers); - cpu_clear(cpu, workers); + cpumask_andnot(&workers, online, &prv->idlers); + cpumask_clear_cpu(cpu, &workers); peer_cpu = cpu; while ( !cpus_empty(workers) ) { - peer_cpu = cycle_cpu(peer_cpu, workers); - cpu_clear(peer_cpu, workers); + peer_cpu = cpumask_cycle(peer_cpu, &workers); + cpumask_clear_cpu(peer_cpu, &workers); /* * Get ahold of the scheduler lock for this peer CPU. @@ -1267,7 +1267,7 @@ csched_load_balance(struct csched_private *prv, int cpu, /* * Any work over there to steal? */ - speer = cpu_isset(peer_cpu, *online) ? + speer = cpumask_test_cpu(peer_cpu, online) ? csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL; pcpu_schedule_unlock(peer_cpu); if ( speer != NULL ) diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 398ccad159..5033819d6a 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -507,19 +507,19 @@ runq_tickle(const struct scheduler *ops, unsigned int cpu, struct csched_vcpu *n } /* Get a mask of idle, but not tickled */ - cpus_andnot(mask, rqd->idle, rqd->tickled); + cpumask_andnot(&mask, &rqd->idle, &rqd->tickled); /* If it's not empty, choose one */ - if ( !cpus_empty(mask) ) + if ( !cpumask_empty(&mask) ) { - ipid=first_cpu(mask); + ipid = cpumask_first(&mask); goto tickle; } /* Otherwise, look for the non-idle cpu with the lowest credit, * skipping cpus which have been tickled but not scheduled yet */ - cpus_andnot(mask, rqd->active, rqd->idle); - cpus_andnot(mask, mask, rqd->tickled); + cpumask_andnot(&mask, &rqd->active, &rqd->idle); + cpumask_andnot(&mask, &mask, &rqd->tickled); for_each_cpu_mask(i, mask) { @@ -573,7 +573,7 @@ tickle: sizeof(d), (unsigned char *)&d); } - cpu_set(ipid, rqd->tickled); + cpumask_set_cpu(ipid, &rqd->tickled); cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ); no_tickle: diff --git a/xen/common/schedule.c b/xen/common/schedule.c index e9a8600b8d..c4ccc79997 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -595,8 +595,8 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) if ( v->domain->is_pinned ) return -EINVAL; online = VCPU2ONLINE(v); - cpus_and(online_affinity, *affinity, *online); - if ( cpus_empty(online_affinity) ) + cpumask_and(&online_affinity, affinity, online); + if ( cpumask_empty(&online_affinity) ) return -EINVAL; vcpu_schedule_lock_irq(v); diff --git a/xen/common/softirq.c b/xen/common/softirq.c index b0e4890c75..8634bafef5 100644 --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -71,11 +71,12 @@ void open_softirq(int nr, softirq_handler handler) void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr) { int cpu; - cpumask_t send_mask = CPU_MASK_NONE; + cpumask_t send_mask; + cpumask_clear(&send_mask); for_each_cpu_mask(cpu, *mask) if ( !test_and_set_bit(nr, &softirq_pending(cpu)) ) - cpu_set(cpu, send_mask); + cpumask_set_cpu(cpu, &send_mask); smp_send_event_check_mask(&send_mask); } diff --git a/xen/common/trace.c b/xen/common/trace.c index 8c41990b7a..2253e3114a 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -70,7 +70,7 @@ static DEFINE_PER_CPU(unsigned long, lost_records_first_tsc); int tb_init_done __read_mostly; /* which CPUs tracing is enabled on */ -static cpumask_t tb_cpu_mask = CPU_MASK_ALL; +static cpumask_t tb_cpu_mask; /* which tracing events are enabled */ static u32 tb_event_mask = TRC_ALL; @@ -338,6 +338,7 @@ int trace_will_trace_event(u32 event) */ void __init init_trace_bufs(void) { + cpumask_setall(&tb_cpu_mask); register_cpu_notifier(&cpu_nfb); if ( opt_tbuf_size ) |