aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-05-11 11:10:24 +0100
committerKeir Fraser <keir.fraser@citrix.com>2010-05-11 11:10:24 +0100
commita974b14d790ce883da98ba8094a0c59109bae0b7 (patch)
tree1d97d9c50e4fb676c0c3e3ada2cfd3b1f9cd6921
parent0e13089b1456e71c99005caa7d4b78ba34656337 (diff)
downloadxen-a974b14d790ce883da98ba8094a0c59109bae0b7.tar.gz
xen-a974b14d790ce883da98ba8094a0c59109bae0b7.tar.bz2
xen-a974b14d790ce883da98ba8094a0c59109bae0b7.zip
tasklet: Improve scheduler interaction.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/common/sched_credit.c5
-rw-r--r--xen/common/sched_credit2.c5
-rw-r--r--xen/common/sched_sedf.c15
-rw-r--r--xen/common/schedule.c21
-rw-r--r--xen/common/tasklet.c52
-rw-r--r--xen/include/xen/sched-if.h3
-rw-r--r--xen/include/xen/sched.h6
-rw-r--r--xen/include/xen/tasklet.h8
8 files changed, 72 insertions, 43 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 07b7070e55..6dbe7de4ae 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1243,7 +1243,8 @@ csched_load_balance(struct csched_private *prv, int cpu,
* fast for the common case.
*/
static struct task_slice
-csched_schedule(const struct scheduler *ops, s_time_t now)
+csched_schedule(
+ const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
{
const int cpu = smp_processor_id();
struct list_head * const runq = RUNQ(cpu);
@@ -1278,7 +1279,7 @@ csched_schedule(const struct scheduler *ops, s_time_t now)
snext = __runq_elem(runq->next);
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
- if ( !tasklet_queue_empty(cpu) )
+ if ( tasklet_work_scheduled )
{
snext = CSCHED_VCPU(idle_vcpu[cpu]);
snext->pri = CSCHED_PRI_TS_BOOST;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 5d4b2af93f..fe4fa3344d 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -897,7 +897,8 @@ void __dump_execstate(void *unused);
* fast for the common case.
*/
static struct task_slice
-csched_schedule(const struct scheduler *ops, s_time_t now)
+csched_schedule(
+ const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
{
const int cpu = smp_processor_id();
struct csched_runqueue_data *rqd = RQD(ops, cpu);
@@ -921,7 +922,7 @@ csched_schedule(const struct scheduler *ops, s_time_t now)
burn_credits(rqd, scurr, now);
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
- if ( !tasklet_queue_empty(cpu) )
+ if ( tasklet_work_scheduled )
{
snext = CSCHED_VCPU(idle_vcpu[cpu]);
goto out;
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 6f7cc2eba3..6223fc030d 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -790,7 +790,8 @@ static struct task_slice sedf_do_extra_schedule(
-timeslice for the current period used up
-domain on waitqueue has started it's period
-and various others ;) in general: determine which domain to run next*/
-static struct task_slice sedf_do_schedule(const struct scheduler *ops, s_time_t now)
+static struct task_slice sedf_do_schedule(
+ const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled)
{
int cpu = smp_processor_id();
struct list_head *runq = RUNQ(cpu);
@@ -826,18 +827,13 @@ static struct task_slice sedf_do_schedule(const struct scheduler *ops, s_time_t
check_waitq:
update_queues(now, runq, waitq);
- if ( unlikely(!cpu_isset(cpu, *SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
- {
- ret.task = IDLETASK(cpu);
- ret.time = SECONDS(1);
- goto sched_done;
- }
-
/*now simply pick the first domain from the runqueue, which has the
earliest deadline, because the list is sorted*/
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
- if ( !tasklet_queue_empty(cpu) || (list_empty(runq) && list_empty(waitq)) )
+ if ( tasklet_work_scheduled ||
+ (list_empty(runq) && list_empty(waitq)) ||
+ unlikely(!cpu_isset(cpu, *SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
{
ret.task = IDLETASK(cpu);
ret.time = SECONDS(1);
@@ -870,7 +866,6 @@ static struct task_slice sedf_do_schedule(const struct scheduler *ops, s_time_t
extraq, cpu);
}
- sched_done:
/*TODO: Do something USEFUL when this happens and find out, why it
still can happen!!!*/
if ( ret.time < 0)
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index ebc9703dbc..dffcbf5bda 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -930,6 +930,8 @@ static void schedule(void)
struct vcpu *prev = current, *next = NULL;
s_time_t now = NOW();
struct scheduler *sched = this_cpu(scheduler);
+ unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do);
+ bool_t tasklet_work_scheduled = 0;
struct schedule_data *sd;
struct task_slice next_slice;
@@ -940,12 +942,29 @@ static void schedule(void)
sd = &this_cpu(schedule_data);
+ /* Update tasklet scheduling status. */
+ switch ( *tasklet_work )
+ {
+ case TASKLET_enqueued:
+ set_bit(_TASKLET_scheduled, tasklet_work);
+ case TASKLET_enqueued|TASKLET_scheduled:
+ tasklet_work_scheduled = 1;
+ break;
+ case TASKLET_scheduled:
+ clear_bit(_TASKLET_scheduled, tasklet_work);
+ case 0:
+ /*tasklet_work_scheduled = 0;*/
+ break;
+ default:
+ BUG();
+ }
+
spin_lock_irq(sd->schedule_lock);
stop_timer(&sd->s_timer);
/* get policy-specific decision on scheduling... */
- next_slice = sched->do_schedule(sched, now);
+ next_slice = sched->do_schedule(sched, now, tasklet_work_scheduled);
next = next_slice.task;
diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c
index e2109770ad..7a6717e55b 100644
--- a/xen/common/tasklet.c
+++ b/xen/common/tasklet.c
@@ -20,15 +20,23 @@
/* Some subsystems call into us before we are initialised. We ignore them. */
static bool_t tasklets_initialised;
-/*
- * NB. Any modification to a tasklet_list requires the scheduler to run
- * on the related CPU so that its idle VCPU's priority is set correctly.
- */
+DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
+
static DEFINE_PER_CPU(struct list_head, tasklet_list);
/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);
+static void tasklet_enqueue(struct tasklet *t)
+{
+ unsigned int cpu = t->scheduled_on;
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
+
+ list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
+ if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+}
+
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
{
unsigned long flags;
@@ -41,8 +49,7 @@ void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
if ( !t->is_running )
{
list_del(&t->list);
- list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
- cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ tasklet_enqueue(t);
}
}
@@ -57,19 +64,21 @@ void tasklet_schedule(struct tasklet *t)
void do_tasklet(void)
{
unsigned int cpu = smp_processor_id();
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
struct list_head *list = &per_cpu(tasklet_list, cpu);
struct tasklet *t;
- if ( likely(list_empty(list)) )
+ /*
+ * Work must be enqueued *and* scheduled. Otherwise there is no work to
+ * do, and/or scheduler needs to run to update idle vcpu priority.
+ */
+ if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
return;
spin_lock_irq(&tasklet_lock);
if ( unlikely(list_empty(list)) )
- {
- spin_unlock_irq(&tasklet_lock);
- return;
- }
+ goto out;
t = list_entry(list->next, struct tasklet, list);
list_del_init(&t->list);
@@ -88,21 +97,19 @@ void do_tasklet(void)
if ( t->scheduled_on >= 0 )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
- if ( t->scheduled_on != cpu )
- cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
+ tasklet_enqueue(t);
}
- raise_softirq(SCHEDULE_SOFTIRQ);
+ out:
+ if ( list_empty(list) )
+ {
+ clear_bit(_TASKLET_enqueued, work_to_do);
+ raise_softirq(SCHEDULE_SOFTIRQ);
+ }
spin_unlock_irq(&tasklet_lock);
}
-bool_t tasklet_queue_empty(unsigned int cpu)
-{
- return list_empty(&per_cpu(tasklet_list, cpu));
-}
-
void tasklet_kill(struct tasklet *t)
{
unsigned long flags;
@@ -113,7 +120,6 @@ void tasklet_kill(struct tasklet *t)
{
BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
- cpu_raise_softirq(t->scheduled_on, SCHEDULE_SOFTIRQ);
}
t->scheduled_on = -1;
@@ -143,11 +149,9 @@ void migrate_tasklets_from_cpu(unsigned int cpu)
BUG_ON(t->scheduled_on != cpu);
t->scheduled_on = smp_processor_id();
list_del(&t->list);
- list_add_tail(&t->list, &this_cpu(tasklet_list));
+ tasklet_enqueue(t);
}
- raise_softirq(SCHEDULE_SOFTIRQ);
-
spin_unlock_irqrestore(&tasklet_lock, flags);
}
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 7ab636646c..af21e95e8f 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -110,7 +110,8 @@ struct scheduler {
void (*wake) (const struct scheduler *, struct vcpu *);
void (*context_saved) (const struct scheduler *, struct vcpu *);
- struct task_slice (*do_schedule) (const struct scheduler *, s_time_t);
+ struct task_slice (*do_schedule) (const struct scheduler *, s_time_t,
+ bool_t tasklet_work_scheduled);
int (*pick_cpu) (const struct scheduler *, struct vcpu *);
int (*adjust) (const struct scheduler *, struct domain *,
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 6da6b2d0f9..4bcc7f1bf2 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -595,8 +595,10 @@ uint64_t get_cpu_idle_time(unsigned int cpu);
* Used by idle loop to decide whether there is work to do:
* (1) Run softirqs; or (2) Play dead; or (3) Run tasklets.
*/
-#define cpu_is_haltable(cpu) \
- (!softirq_pending(cpu) && cpu_online(cpu) && tasklet_queue_empty(cpu))
+#define cpu_is_haltable(cpu) \
+ (!softirq_pending(cpu) && \
+ cpu_online(cpu) && \
+ !per_cpu(tasklet_work_to_do, cpu))
#define IS_PRIV(_d) ((_d)->is_privileged)
#define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
diff --git a/xen/include/xen/tasklet.h b/xen/include/xen/tasklet.h
index 5eb8f8979b..d324fb39c1 100644
--- a/xen/include/xen/tasklet.h
+++ b/xen/include/xen/tasklet.h
@@ -24,10 +24,16 @@ struct tasklet
#define DECLARE_TASKLET(name, func, data) \
struct tasklet name = { LIST_HEAD_INIT(name.list), -1, 0, 0, func, data }
+/* Indicates status of tasklet work on each CPU. */
+DECLARE_PER_CPU(unsigned long, tasklet_work_to_do);
+#define _TASKLET_enqueued 0 /* Tasklet work is enqueued for this CPU. */
+#define _TASKLET_scheduled 1 /* Scheduler has scheduled do_tasklet(). */
+#define TASKLET_enqueued (1ul << _TASKLET_enqueued)
+#define TASKLET_scheduled (1ul << _TASKLET_scheduled)
+
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
void tasklet_schedule(struct tasklet *t);
void do_tasklet(void);
-bool_t tasklet_queue_empty(unsigned int cpu);
void tasklet_kill(struct tasklet *t);
void migrate_tasklets_from_cpu(unsigned int cpu);
void tasklet_init(