aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/tasklet.c
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-05-11 11:10:24 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-05-11 11:10:24 +0100
commita974b14d790ce883da98ba8094a0c59109bae0b7 (patch)
tree1d97d9c50e4fb676c0c3e3ada2cfd3b1f9cd6921 /xen/common/tasklet.c
parent0e13089b1456e71c99005caa7d4b78ba34656337 (diff)
downloadxen-a974b14d790ce883da98ba8094a0c59109bae0b7.tar.gz
xen-a974b14d790ce883da98ba8094a0c59109bae0b7.tar.bz2
xen-a974b14d790ce883da98ba8094a0c59109bae0b7.zip
tasklet: Improve scheduler interaction.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen/common/tasklet.c')
-rw-r--r--xen/common/tasklet.c52
1 files changed, 28 insertions, 24 deletions
diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c
index e2109770ad..7a6717e55b 100644
--- a/xen/common/tasklet.c
+++ b/xen/common/tasklet.c
@@ -20,15 +20,23 @@
/* Some subsystems call into us before we are initialised. We ignore them. */
static bool_t tasklets_initialised;
-/*
- * NB. Any modification to a tasklet_list requires the scheduler to run
- * on the related CPU so that its idle VCPU's priority is set correctly.
- */
+DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
+
static DEFINE_PER_CPU(struct list_head, tasklet_list);
/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);
+static void tasklet_enqueue(struct tasklet *t)
+{
+ unsigned int cpu = t->scheduled_on;
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
+
+ list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
+ if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+}
+
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
{
unsigned long flags;
@@ -41,8 +49,7 @@ void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
if ( !t->is_running )
{
list_del(&t->list);
- list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
- cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ tasklet_enqueue(t);
}
}
@@ -57,19 +64,21 @@ void tasklet_schedule(struct tasklet *t)
void do_tasklet(void)
{
unsigned int cpu = smp_processor_id();
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
struct list_head *list = &per_cpu(tasklet_list, cpu);
struct tasklet *t;
- if ( likely(list_empty(list)) )
+ /*
+ * Work must be enqueued *and* scheduled. Otherwise there is no work to
+ * do, and/or scheduler needs to run to update idle vcpu priority.
+ */
+ if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
return;
spin_lock_irq(&tasklet_lock);
if ( unlikely(list_empty(list)) )
- {
- spin_unlock_irq(&tasklet_lock);
- return;
- }
+ goto out;
t = list_entry(list->next, struct tasklet, list);
list_del_init(&t->list);
@@ -88,21 +97,19 @@ void do_tasklet(void)
if ( t->scheduled_on >= 0 )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
- if ( t->scheduled_on != cpu )
- cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
+ tasklet_enqueue(t);
}
- raise_softirq(SCHEDULE_SOFTIRQ);
+ out:
+ if ( list_empty(list) )
+ {
+ clear_bit(_TASKLET_enqueued, work_to_do);
+ raise_softirq(SCHEDULE_SOFTIRQ);
+ }
spin_unlock_irq(&tasklet_lock);
}
-bool_t tasklet_queue_empty(unsigned int cpu)
-{
- return list_empty(&per_cpu(tasklet_list, cpu));
-}
-
void tasklet_kill(struct tasklet *t)
{
unsigned long flags;
@@ -113,7 +120,6 @@ void tasklet_kill(struct tasklet *t)
{
BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
- cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}
t->scheduled_on = -1;
@@ -143,11 +149,9 @@ void migrate_tasklets_from_cpu(unsigned int cpu)
BUG_ON(t->scheduled_on != cpu);
t->scheduled_on = smp_processor_id();
list_del(&t->list);
- list_add_tail(&t->list, &this_cpu(tasklet_list));
+ tasklet_enqueue(t);
}
- raise_softirq(SCHEDULE_SOFTIRQ);
-
spin_unlock_irqrestore(&tasklet_lock, flags);
}