aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/irq.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
committerJan Beulich <jbeulich@suse.com>2011-10-21 09:21:09 +0200
commit4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d (patch)
tree495538ea99a0b2c90eb6f18c87fd48fab2f4ec3e /xen/arch/x86/irq.c
parent3cfbbfcf4cb1d4fecfd1c9a8bab090bdb61e993e (diff)
downloadxen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.gz
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.tar.bz2
xen-4f3e36d7ad7cd23f8df44e39fb14313a8a809e3d.zip
eliminate direct assignments of CPU masks
Use cpumask_copy() instead of direct variable assignments for copying CPU masks. While direct assignments are not a problem when both sides are variables actually defined as cpumask_t (except for possibly copying *much* more than would actually need to be copied), they must not happen when the original variable is of type cpumask_var_t (which may have lass space allocated to it than a full cpumask_t). Eliminate as many of such assignments as possible (in several cases it's even possible to collapse two operations [copy then clear one bit] into one [cpumask_andnot()]), and thus set the way for reducing the allocation size in alloc_cpumask_var(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/irq.c')
-rw-r--r--xen/arch/x86/irq.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 4a93f29d76..40326f2bfe 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1040,7 +1040,7 @@ static void irq_guest_eoi_timer_fn(void *data)
desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1366,9 +1366,9 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
ASSERT(action->ack_type == ACKTYPE_EOI);
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
- if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+ if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
__set_eoi_ready(desc);
spin_unlock(&desc->lock);
@@ -1380,7 +1380,7 @@ void desc_guest_eoi(struct irq_desc *desc, struct pirq *pirq)
spin_unlock_irq(&desc->lock);
}
- if ( !cpus_empty(cpu_eoi_map) )
+ if ( !cpumask_empty(&cpu_eoi_map) )
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
@@ -1619,7 +1619,7 @@ static irq_guest_action_t *__pirq_guest_unbind(
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
- cpu_eoi_map = action->cpu_eoi_map;
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
@@ -1649,8 +1649,8 @@ static irq_guest_action_t *__pirq_guest_unbind(
* would need to flush all ready EOIs before returning as otherwise the
* desc->handler could change and we would call the wrong 'end' hook.
*/
- cpu_eoi_map = action->cpu_eoi_map;
- if ( !cpus_empty(cpu_eoi_map) )
+ cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ if ( !cpumask_empty(&cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);