aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/sched-if.h
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-01-13 16:44:04 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-01-13 16:44:04 +0100
commitfc5423d8cdf92d98f5eda9edc771cb546184ea93 (patch)
tree0f2b1da377e0dfce4680345821cc47a31b70ce78 /xen/include/xen/sched-if.h
parentc62fffdcdc3490cf5c671eb428f8ca0036b7722a (diff)
downloadxen-fc5423d8cdf92d98f5eda9edc771cb546184ea93.tar.gz
xen-fc5423d8cdf92d98f5eda9edc771cb546184ea93.tar.bz2
xen-fc5423d8cdf92d98f5eda9edc771cb546184ea93.zip
Introduce a locking protocol for acquiring the 'scheduler
lock' on a particular VCPU. Since this requires acquiring the approrpiate per-CPU lock, we must re-check the VCPU's current CPU binding after the lock is acquired. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/include/xen/sched-if.h')
-rw-r--r--xen/include/xen/sched-if.h37
1 files changed, 33 insertions, 4 deletions
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 7b94338391..d61d5c70d3 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -16,16 +16,47 @@ struct schedule_data {
struct vcpu *curr; /* current task */
struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
- struct timer s_timer; /* scheduling timer */
+ struct timer s_timer; /* scheduling timer */
unsigned long tick; /* current periodic 'tick' */
#ifdef BUCKETS
u32 hist[BUCKETS]; /* for scheduler latency histogram */
#endif
} __cacheline_aligned;
+extern struct schedule_data schedule_data[];
+
+static inline void vcpu_schedule_lock(struct vcpu *v)
+{
+ unsigned int cpu;
+
+ for ( ; ; )
+ {
+ cpu = v->processor;
+ spin_lock(&schedule_data[cpu].schedule_lock);
+ if ( likely(v->processor == cpu) )
+ break;
+ spin_unlock(&schedule_data[cpu].schedule_lock);
+ }
+}
+
+#define vcpu_schedule_lock_irq(v) \
+ do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
+#define vcpu_schedule_lock_irqsave(v, flags) \
+ do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+
+static inline void vcpu_schedule_unlock(struct vcpu *v)
+{
+ spin_unlock(&schedule_data[v->processor].schedule_lock);
+}
+
+#define vcpu_schedule_unlock_irq(v) \
+ do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
+#define vcpu_schedule_unlock_irqrestore(v, flags) \
+ do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
+
struct task_slice {
struct vcpu *task;
- s_time_t time;
+ s_time_t time;
};
struct scheduler {
@@ -48,6 +79,4 @@ struct scheduler {
void (*dump_cpu_state) (int);
};
-extern struct schedule_data schedule_data[];
-
#endif /* __XEN_SCHED_IF_H__ */