aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/softirq.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-04-14 10:44:29 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-04-14 10:44:29 +0100
commit58f293ed52f73282e8e71db854801a06aa0a168e (patch)
tree6808b71d452896ca16114427357ee0106d81624b /xen/common/softirq.c
parent822cdd45d1eb196b569da50ebde213b8a8330634 (diff)
downloadxen-58f293ed52f73282e8e71db854801a06aa0a168e.tar.gz
xen-58f293ed52f73282e8e71db854801a06aa0a168e.tar.bz2
xen-58f293ed52f73282e8e71db854801a06aa0a168e.zip
Per-cpu tasklet lists.
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/softirq.c')
-rw-r--r--xen/common/softirq.c77
1 files changed, 59 insertions, 18 deletions
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 7b04f36f2d..f99d0d4f7c 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -78,7 +78,8 @@ void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
- if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+ if ( !test_and_set_bit(nr, &softirq_pending(cpu))
+ && (cpu != smp_processor_id()) )
smp_send_event_check_cpu(cpu);
}
@@ -87,46 +88,54 @@ void raise_softirq(unsigned int nr)
set_bit(nr, &softirq_pending(smp_processor_id()));
}
-static LIST_HEAD(tasklet_list);
+static bool_t tasklets_initialised;
+static DEFINE_PER_CPU(struct list_head, tasklet_list);
static DEFINE_SPINLOCK(tasklet_lock);
-void tasklet_schedule(struct tasklet *t)
+void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
{
unsigned long flags;
spin_lock_irqsave(&tasklet_lock, flags);
- if ( !t->is_dead )
+ if ( tasklets_initialised && !t->is_dead )
{
- if ( !t->is_scheduled && !t->is_running )
+ t->scheduled_on = cpu;
+ if ( !t->is_running )
{
- BUG_ON(!list_empty(&t->list));
- list_add_tail(&t->list, &tasklet_list);
+ list_del(&t->list);
+ list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
}
- t->is_scheduled = 1;
- raise_softirq(TASKLET_SOFTIRQ);
}
spin_unlock_irqrestore(&tasklet_lock, flags);
}
+void tasklet_schedule(struct tasklet *t)
+{
+ tasklet_schedule_on_cpu(t, smp_processor_id());
+}
+
static void tasklet_action(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct list_head *list = &per_cpu(tasklet_list, cpu);
struct tasklet *t;
spin_lock_irq(&tasklet_lock);
- if ( list_empty(&tasklet_list) )
+ if ( list_empty(list) )
{
spin_unlock_irq(&tasklet_lock);
return;
}
- t = list_entry(tasklet_list.next, struct tasklet, list);
+ t = list_entry(list->next, struct tasklet, list);
list_del_init(&t->list);
- BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
- t->is_scheduled = 0;
+ BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
+ t->scheduled_on = -1;
t->is_running = 1;
spin_unlock_irq(&tasklet_lock);
@@ -135,17 +144,19 @@ static void tasklet_action(void)
t->is_running = 0;
- if ( t->is_scheduled )
+ if ( t->scheduled_on >= 0 )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, &tasklet_list);
+ list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
+ if ( t->scheduled_on != cpu )
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
}
/*
* If there is more work to do then reschedule. We don't grab more work
* immediately as we want to allow other softirq work to happen first.
*/
- if ( !list_empty(&tasklet_list) )
+ if ( !list_empty(list) )
raise_softirq(TASKLET_SOFTIRQ);
spin_unlock_irq(&tasklet_lock);
@@ -159,10 +170,10 @@ void tasklet_kill(struct tasklet *t)
if ( !list_empty(&t->list) )
{
- BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
+ BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
}
- t->is_scheduled = 0;
+ t->scheduled_on = -1;
t->is_dead = 1;
while ( t->is_running )
@@ -175,18 +186,48 @@ void tasklet_kill(struct tasklet *t)
spin_unlock_irqrestore(&tasklet_lock, flags);
}
+void migrate_tasklets_from_cpu(unsigned int cpu)
+{
+ struct list_head *list = &per_cpu(tasklet_list, cpu);
+ unsigned long flags;
+ struct tasklet *t;
+
+ spin_lock_irqsave(&tasklet_lock, flags);
+
+ while ( !list_empty(list) )
+ {
+ t = list_entry(list->next, struct tasklet, list);
+ BUG_ON(t->scheduled_on != cpu);
+ t->scheduled_on = smp_processor_id();
+ list_del(&t->list);
+ list_add_tail(&t->list, &this_cpu(tasklet_list));
+ }
+
+ raise_softirq(TASKLET_SOFTIRQ);
+
+ spin_unlock_irqrestore(&tasklet_lock, flags);
+}
+
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data)
{
memset(t, 0, sizeof(*t));
INIT_LIST_HEAD(&t->list);
+ t->scheduled_on = -1;
t->func = func;
t->data = data;
}
void __init softirq_init(void)
{
+ unsigned int cpu;
+
+ for_each_possible_cpu ( cpu )
+ INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
+
+ tasklets_initialised = 1;
}
/*