aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/irq.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2011-09-18 00:22:57 +0100
committerJan Beulich <jbeulich@suse.com>2011-09-18 00:22:57 +0100
commit6ac9f9c55cce7e94445120ee8751a165dfe2d9d3 (patch)
tree564d7e5fb09327622ca78c5202212c90baca88b5 /xen/arch/x86/irq.c
parent645bf0d3fb22c91491c73dd16d184e8e986e1251 (diff)
downloadxen-6ac9f9c55cce7e94445120ee8751a165dfe2d9d3.tar.gz
xen-6ac9f9c55cce7e94445120ee8751a165dfe2d9d3.tar.bz2
xen-6ac9f9c55cce7e94445120ee8751a165dfe2d9d3.zip
convert more literal uses of cpumask_t to pointers
This is particularly relevant as the number of CPUs to be supported increases (as recently happened for the default thereof). Signed-off-by: Jan Beulich <jbeulich@suse.com>
Diffstat (limited to 'xen/arch/x86/irq.c')
-rw-r--r--xen/arch/x86/irq.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index b9490403bd..922ee6bf4c 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -108,7 +108,7 @@ static void trace_irq_mask(u32 event, int irq, int vector, cpumask_t *mask)
trace_var(event, 1, sizeof(d), &d);
}
-static int __init __bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
+static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
{
cpumask_t online_mask;
int cpu;
@@ -117,7 +117,7 @@ static int __init __bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
BUG_ON((unsigned)irq >= nr_irqs);
BUG_ON((unsigned)vector >= NR_VECTORS);
- cpus_and(online_mask, cpu_mask, cpu_online_map);
+ cpus_and(online_mask, *cpu_mask, cpu_online_map);
if (cpus_empty(online_mask))
return -EINVAL;
if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, online_mask))
@@ -140,7 +140,7 @@ static int __init __bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
return 0;
}
-int __init bind_irq_vector(int irq, int vector, cpumask_t cpu_mask)
+int __init bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
{
unsigned long flags;
int ret;
@@ -583,7 +583,7 @@ void move_masked_irq(int irq)
* For correct operation this depends on the caller masking the irqs.
*/
if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
- desc->handler->set_affinity(irq, desc->pending_mask);
+ desc->handler->set_affinity(irq, &desc->pending_mask);
cpus_clear(desc->pending_mask);
}
@@ -1410,7 +1410,7 @@ int pirq_guest_bind(struct vcpu *v, struct pirq *pirq, int will_share)
/* Attempt to bind the interrupt target to the correct CPU. */
cpu_set(v->processor, cpumask);
if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
- desc->handler->set_affinity(irq, cpumask);
+ desc->handler->set_affinity(irq, &cpumask);
}
else if ( !will_share || !action->shareable )
{
@@ -1964,7 +1964,7 @@ void fixup_irqs(void)
desc->handler->disable(irq);
if ( desc->handler->set_affinity )
- desc->handler->set_affinity(irq, affinity);
+ desc->handler->set_affinity(irq, &affinity);
else if ( !(warned++) )
set_affinity = 0;