aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/sched-if.h
diff options
context:
space:
mode:
Diffstat (limited to 'xen/include/xen/sched-if.h')
-rw-r--r--xen/include/xen/sched-if.h138
1 files changed, 56 insertions, 82 deletions
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index ad4d80c223..d95e254c5a 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -47,96 +47,70 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
-static inline spinlock_t * pcpu_schedule_lock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- for ( ; ; )
- {
- /* The per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- */
- lock=per_cpu(schedule_data, cpu).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
- break;
- spin_unlock(lock);
- }
- return lock;
+#define sched_lock(kind, param, cpu, irq, arg...) \
+static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
+{ \
+ for ( ; ; ) \
+ { \
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+ /* \
+ * v->processor may change when grabbing the lock; but \
+ * per_cpu(v->processor) may also change, if changing cpu pool \
+ * also changes the scheduler lock. Retry until they match. \
+ * \
+ * It may also be the case that v->processor may change but the \
+ * lock may be the same; this will succeed in that case. \
+ */ \
+ spin_lock##irq(lock, ## arg); \
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+ return lock; \
+ spin_unlock##irq(lock, ## arg); \
+ } \
}
-static inline int pcpu_schedule_trylock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- lock=per_cpu(schedule_data, cpu).schedule_lock;
- if ( ! spin_trylock(lock) )
- return 0;
- if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
- return 1;
- else
- {
- spin_unlock(lock);
- return 0;
- }
-}
-
-#define pcpu_schedule_lock_irq(p) \
- do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
-#define pcpu_schedule_lock_irqsave(p, flags) \
- do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
-
-static inline void pcpu_schedule_unlock(int cpu)
-{
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+#define sched_unlock(kind, param, cpu, irq, arg...) \
+static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
+ EXTRA_TYPE(arg), param) \
+{ \
+ ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+ spin_unlock##irq(lock, ## arg); \
}
-#define pcpu_schedule_unlock_irq(p) \
- do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
-#define pcpu_schedule_unlock_irqrestore(p, flags) \
- do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
-
-static inline void vcpu_schedule_lock(struct vcpu *v)
+#define EXTRA_TYPE(arg)
+sched_lock(pcpu, unsigned int cpu, cpu, )
+sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(pcpu, unsigned int cpu, cpu, _irq)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(pcpu, unsigned int cpu, cpu, )
+sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+#undef EXTRA_TYPE
+
+#define EXTRA_TYPE(arg) , unsigned long arg
+#define spin_unlock_irqsave spin_unlock_irqrestore
+sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+#undef spin_unlock_irqsave
+sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+#undef EXTRA_TYPE
+
+#undef sched_unlock
+#undef sched_lock
+
+static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
{
- spinlock_t * lock;
-
- for ( ; ; )
- {
- /* v->processor may change when grabbing the lock; but
- * per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- *
- * It may also be the case that v->processor may change
- * but the lock may be the same; this will succeed
- * in that case.
- */
- lock=per_cpu(schedule_data, v->processor).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
- break;
- spin_unlock(lock);
- }
-}
-
-#define vcpu_schedule_lock_irq(v) \
- do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
-#define vcpu_schedule_lock_irqsave(v, flags) \
- do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
-static inline void vcpu_schedule_unlock(struct vcpu *v)
-{
- spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
+ if ( !spin_trylock(lock) )
+ return NULL;
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+ return lock;
+ spin_unlock(lock);
+ return NULL;
}
-#define vcpu_schedule_unlock_irq(v) \
- do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
-#define vcpu_schedule_unlock_irqrestore(v, flags) \
- do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
-
struct task_slice {
struct vcpu *task;
s_time_t time;