diff options
author | Jan Beulich <jbeulich@suse.com> | 2011-10-21 09:19:44 +0200 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2011-10-21 09:19:44 +0200 |
commit | 3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e (patch) | |
tree | 02fa5b020be5edebcbc225f9bd6a9d413917055f /xen/common/page_alloc.c | |
parent | 20307695e5c22a58b4cce35fba5333375da5a7fa (diff) | |
download | xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.tar.gz xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.tar.bz2 xen-3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e.zip |
eliminate cpumask accessors referencing NR_CPUS
... in favor of using the new, nr_cpumask_bits-based ones.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/page_alloc.c')
-rw-r--r-- | xen/common/page_alloc.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 3ce34af440..0f857d2142 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -304,7 +304,7 @@ static struct page_info *alloc_heap_pages( unsigned int first_node, i, j, zone = 0, nodemask_retry = 0; unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1); unsigned long request = 1UL << order; - cpumask_t extra_cpus_mask, mask; + cpumask_t mask; struct page_info *pg; nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map; @@ -418,7 +418,7 @@ static struct page_info *alloc_heap_pages( if ( d != NULL ) d->last_alloc_node = node; - cpus_clear(mask); + cpumask_clear(&mask); for ( i = 0; i < (1 << order); i++ ) { @@ -429,9 +429,11 @@ static struct page_info *alloc_heap_pages( if ( pg[i].u.free.need_tlbflush ) { /* Add in extra CPUs that need flushing because of this page. */ - cpus_andnot(extra_cpus_mask, cpu_online_map, mask); + static cpumask_t extra_cpus_mask; + + cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask); tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp); - cpus_or(mask, mask, extra_cpus_mask); + cpumask_or(&mask, &mask, &extra_cpus_mask); } /* Initialise fields which have other uses for free pages. */ @@ -441,7 +443,7 @@ static struct page_info *alloc_heap_pages( spin_unlock(&heap_lock); - if ( unlikely(!cpus_empty(mask)) ) + if ( unlikely(!cpumask_empty(&mask)) ) { perfc_incr(need_flush_tlb_flush); flush_tlb_mask(&mask); |