aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsos22@douglas.cl.cam.ac.uk <sos22@douglas.cl.cam.ac.uk>2005-07-08 17:33:42 +0000
committersos22@douglas.cl.cam.ac.uk <sos22@douglas.cl.cam.ac.uk>2005-07-08 17:33:42 +0000
commit971dbf697843d0211fe8a6f702b9ab80f7fd7b94 (patch)
treefc48a9f172ccd02fc39936f90fab4cb7ba9864f2
parentc1a9064dc71e4d551043f4eb1646ea51c0ee572f (diff)
downloadxen-971dbf697843d0211fe8a6f702b9ab80f7fd7b94.tar.gz
xen-971dbf697843d0211fe8a6f702b9ab80f7fd7b94.tar.bz2
xen-971dbf697843d0211fe8a6f702b9ab80f7fd7b94.zip
Get a very primitive relation of IRQ affinity working. For the
minute, we just pick one vcpu out of the allowed set and allows route the irq to that one; that's enough for the userspace irq balancer, but not enough for the kernel-space one. Whether it's actually worth implementing the full variant is open to debate. This also makes IRQ routing across vcpu hotplug events slightly easier. Signed-off-by: Steven Smith, sos22@cam.ac.uk
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c28
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c93
-rw-r--r--xen/common/event_channel.c6
3 files changed, 77 insertions, 50 deletions
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
index 4798f1d4ec..f095dcf983 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
@@ -1312,7 +1312,7 @@ void __devinit smp_prepare_boot_cpu(void)
/* hotplug down/up funtion pointer and target vcpu */
struct vcpu_hotplug_handler_t {
- void (*fn)();
+ void (*fn)(int vcpu);
u32 vcpu;
};
static struct vcpu_hotplug_handler_t vcpu_hotplug_handler;
@@ -1325,6 +1325,7 @@ static int __devinit cpu_enable(unsigned int cpu)
prepare_for_smp();
#endif
+ printk("<0>Starting enable cpu.\n");
/* get the target out of its holding state */
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
wmb();
@@ -1333,11 +1334,10 @@ static int __devinit cpu_enable(unsigned int cpu)
while (!cpu_online(cpu))
cpu_relax();
- /* re-route bound IRQs 0 to cpu */
- rebind_evtchn_from_irq(0, cpu, per_cpu(resched_irq, cpu));
- rebind_evtchn_from_irq(0, cpu, per_cpu(callfunc_irq, cpu));
-
+ printk("<0>Calling fixup_irqs.\n");
fixup_irqs(cpu_online_map);
+ printk("<0>Called fixup_irqs.\n");
+
/* counter the disable in fixup_irqs() */
local_irq_enable();
return 0;
@@ -1359,17 +1359,14 @@ int __cpu_disable(void)
if (cpu == 0)
return -EBUSY;
- /* Allow any queued timer interrupts to get serviced */
- local_irq_enable();
- mdelay(1);
- local_irq_disable();
-
cpu_clear(cpu, map);
fixup_irqs(map);
+ printk("<0>Done fixup_irqs.\n");
- /* re-route IRQs from dead vcpu to another */
- rebind_evtchn_from_irq(cpu, 0, per_cpu(resched_irq, cpu));
- rebind_evtchn_from_irq(cpu, 0, per_cpu(callfunc_irq, cpu));
+ local_irq_enable();
+ printk("<0>Interrupts on.\n");
+ local_irq_disable();
+ printk("<0>Interrupts off again.\n");
/* It's now safe to remove this processor from the online map */
cpu_clear(cpu, cpu_online_map);
@@ -1498,6 +1495,7 @@ int __devinit __cpu_up(unsigned int cpu)
/* Already up, and in cpu_quiescent now? */
if (cpu_isset(cpu, smp_commenced_mask)) {
cpu_enable(cpu);
+ printk("<0>cpu_enable completed.\n");
return 0;
}
#endif
@@ -1533,13 +1531,13 @@ void __init smp_intr_init(void)
int cpu = smp_processor_id();
per_cpu(resched_irq, cpu) =
- bind_ipi_to_irq(RESCHEDULE_VECTOR);
+ bind_ipi_on_cpu_to_irq(RESCHEDULE_VECTOR);
sprintf(resched_name[cpu], "resched%d", cpu);
BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
SA_INTERRUPT, resched_name[cpu], NULL));
per_cpu(callfunc_irq, cpu) =
- bind_ipi_to_irq(CALL_FUNCTION_VECTOR);
+ bind_ipi_on_cpu_to_irq(CALL_FUNCTION_VECTOR);
sprintf(callfunc_name[cpu], "callfunc%d", cpu);
BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
smp_call_function_interrupt,
diff --git a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
index b293eee001..bdc505e9b9 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
@@ -271,38 +271,6 @@ int bind_ipi_on_cpu_to_irq(int ipi)
return irq;
}
-void rebind_evtchn_from_ipi(int cpu, int newcpu, int ipi)
-{
- evtchn_op_t op;
- int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
-
- spin_lock(&irq_mapping_update_lock);
-
- op.cmd = EVTCHNOP_bind_vcpu;
- op.u.bind_vcpu.port = evtchn;
- op.u.bind_vcpu.vcpu = newcpu;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- printk(KERN_INFO "Failed to rebind IPI%d to CPU%d\n",ipi,newcpu);
-
- spin_unlock(&irq_mapping_update_lock);
-}
-
-void rebind_evtchn_from_irq(int cpu, int newcpu, int irq)
-{
- evtchn_op_t op;
- int evtchn = irq_to_evtchn[irq];
-
- spin_lock(&irq_mapping_update_lock);
-
- op.cmd = EVTCHNOP_bind_vcpu;
- op.u.bind_vcpu.port = evtchn;
- op.u.bind_vcpu.vcpu = newcpu;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- printk(KERN_INFO "Failed to rebind IRQ%d to CPU%d\n",irq,newcpu);
-
- spin_unlock(&irq_mapping_update_lock);
-}
-
void unbind_ipi_from_irq(int ipi)
{
evtchn_op_t op;
@@ -363,6 +331,63 @@ void unbind_evtchn_from_irq(int evtchn)
spin_unlock(&irq_mapping_update_lock);
}
+static void do_nothing_function(void *ign)
+{
+}
+
+/* Rebind an evtchn so that it gets delivered to a specific cpu */
+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+{
+ evtchn_op_t op;
+ int evtchn;
+
+ printk("<0>Rebind irq %d to vcpu %d.\n", irq, tcpu);
+ spin_lock(&irq_mapping_update_lock);
+ evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn)) {
+ spin_unlock(&irq_mapping_update_lock);
+ return;
+ }
+
+ printk("<0>Is evtchn %d.\n", evtchn);
+
+ /* Tell Xen to send future instances of this interrupt to the
+ other vcpu */
+ op.cmd = EVTCHNOP_bind_vcpu;
+ op.u.bind_vcpu.port = evtchn;
+ op.u.bind_vcpu.vcpu = tcpu;
+
+ /* If this fails, it usually just indicates that we're dealing
+ with a virq or IPI channel, which don't actually need to be
+ rebound. Ignore it, but don't do the xenlinux-level rebind
+ in that case. */
+ if (HYPERVISOR_event_channel_op(&op) >= 0)
+ bind_evtchn_to_cpu(evtchn, tcpu);
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ /* Now send the new target processor a NOP IPI. When this
+ returns, it will check for any pending interrupts, and so
+ service any that got delivered to the wrong processor by
+ mistake. */
+ /* XXX: The only time this is called with interrupts disabled is
+ from the hotplug/hotunplug path. In that case, all cpus are
+ stopped with interrupts disabled, and the missed interrupts
+ will be picked up when they start again. This is kind of a
+ hack. */
+ if (!irqs_disabled()) {
+ printk("<0>Doing nop ipi\n");
+ smp_call_function(do_nothing_function, NULL, 0, 0);
+ printk("<0>Done nop ipi\n");
+ }
+}
+
+
+static void set_affinity_irq(unsigned irq, cpumask_t dest)
+{
+ unsigned tcpu = first_cpu(dest);
+ rebind_irq_to_cpu(irq, tcpu);
+}
/*
* Interface to generic handling in irq.c
@@ -425,7 +450,7 @@ static struct hw_interrupt_type dynirq_type = {
disable_dynirq,
ack_dynirq,
end_dynirq,
- NULL
+ set_affinity_irq
};
static inline void pirq_unmask_notify(int pirq)
@@ -549,7 +574,7 @@ static struct hw_interrupt_type pirq_type = {
disable_pirq,
ack_pirq,
end_pirq,
- NULL
+ set_affinity_irq
};
void irq_suspend(void)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 13145e114d..21e11a69e4 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -587,13 +587,16 @@ static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind)
struct evtchn *chn;
long rc = 0;
- if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] == NULL) )
+ if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] == NULL) ) {
+ printf("vcpu %d bad.\n", vcpu);
return -EINVAL;
+ }
spin_lock(&d->evtchn_lock);
if ( !port_is_valid(d, port) )
{
+ printf("port %d bad.\n", port);
rc = -EINVAL;
goto out;
}
@@ -607,6 +610,7 @@ static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind)
chn->notify_vcpu_id = vcpu;
break;
default:
+ printf("evtchn type %d can't be rebound.\n", chn->state);
rc = -EINVAL;
break;
}