aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-10-14 08:57:56 +0200
committerJan Beulich <jbeulich@suse.com>2013-10-14 08:57:56 +0200
commiteedd60391610629b4e8a2e8278b857ff884f750d (patch)
tree2074ed67f4e6eadf0c5ace2f94442411be7fdc67 /xen/include
parent48830988a28b7fb1eed225354e25572aa955749a (diff)
downloadxen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.gz
xen-eedd60391610629b4e8a2e8278b857ff884f750d.tar.bz2
xen-eedd60391610629b4e8a2e8278b857ff884f750d.zip
scheduler: adjust internal locking interface
Make the locking functions return the lock pointers, so they can be passed to the unlocking functions (which in turn can check that the lock is still actually providing the intended protection, i.e. the parameters determining which lock is the right one didn't change). Further use proper spin lock primitives rather than open coded local_irq_...() constructs, so that interrupts can be re-enabled as appropriate while spinning. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/include')
-rw-r--r--xen/include/xen/sched-if.h138
1 files changed, 56 insertions, 82 deletions
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index ad4d80c223..d95e254c5a 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -47,96 +47,70 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
-static inline spinlock_t * pcpu_schedule_lock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- for ( ; ; )
- {
- /* The per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- */
- lock=per_cpu(schedule_data, cpu).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
- break;
- spin_unlock(lock);
- }
- return lock;
+#define sched_lock(kind, param, cpu, irq, arg...) \
+static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
+{ \
+ for ( ; ; ) \
+ { \
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
+ /* \
+ * v->processor may change when grabbing the lock; but \
+ * per_cpu(v->processor) may also change, if changing cpu pool \
+ * also changes the scheduler lock. Retry until they match. \
+ * \
+ * It may also be the case that v->processor may change but the \
+ * lock may be the same; this will succeed in that case. \
+ */ \
+ spin_lock##irq(lock, ## arg); \
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
+ return lock; \
+ spin_unlock##irq(lock, ## arg); \
+ } \
}
-static inline int pcpu_schedule_trylock(int cpu)
-{
- spinlock_t * lock=NULL;
-
- lock=per_cpu(schedule_data, cpu).schedule_lock;
- if ( ! spin_trylock(lock) )
- return 0;
- if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
- return 1;
- else
- {
- spin_unlock(lock);
- return 0;
- }
-}
-
-#define pcpu_schedule_lock_irq(p) \
- do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
-#define pcpu_schedule_lock_irqsave(p, flags) \
- do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
-
-static inline void pcpu_schedule_unlock(int cpu)
-{
- spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+#define sched_unlock(kind, param, cpu, irq, arg...) \
+static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
+ EXTRA_TYPE(arg), param) \
+{ \
+ ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
+ spin_unlock##irq(lock, ## arg); \
}
-#define pcpu_schedule_unlock_irq(p) \
- do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
-#define pcpu_schedule_unlock_irqrestore(p, flags) \
- do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
-
-static inline void vcpu_schedule_lock(struct vcpu *v)
+#define EXTRA_TYPE(arg)
+sched_lock(pcpu, unsigned int cpu, cpu, )
+sched_lock(vcpu, const struct vcpu *v, v->processor, )
+sched_lock(pcpu, unsigned int cpu, cpu, _irq)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
+sched_unlock(pcpu, unsigned int cpu, cpu, )
+sched_unlock(vcpu, const struct vcpu *v, v->processor, )
+sched_unlock(pcpu, unsigned int cpu, cpu, _irq)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
+#undef EXTRA_TYPE
+
+#define EXTRA_TYPE(arg) , unsigned long arg
+#define spin_unlock_irqsave spin_unlock_irqrestore
+sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags)
+sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
+#undef spin_unlock_irqsave
+sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags)
+sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
+#undef EXTRA_TYPE
+
+#undef sched_unlock
+#undef sched_lock
+
+static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
{
- spinlock_t * lock;
-
- for ( ; ; )
- {
- /* v->processor may change when grabbing the lock; but
- * per_cpu(v->processor) may also change, if changing
- * cpu pool also changes the scheduler lock. Retry
- * until they match.
- *
- * It may also be the case that v->processor may change
- * but the lock may be the same; this will succeed
- * in that case.
- */
- lock=per_cpu(schedule_data, v->processor).schedule_lock;
-
- spin_lock(lock);
- if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
- break;
- spin_unlock(lock);
- }
-}
-
-#define vcpu_schedule_lock_irq(v) \
- do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
-#define vcpu_schedule_lock_irqsave(v, flags) \
- do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )
+ spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
-static inline void vcpu_schedule_unlock(struct vcpu *v)
-{
- spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
+ if ( !spin_trylock(lock) )
+ return NULL;
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+ return lock;
+ spin_unlock(lock);
+ return NULL;
}
-#define vcpu_schedule_unlock_irq(v) \
- do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
-#define vcpu_schedule_unlock_irqrestore(v, flags) \
- do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )
-
struct task_slice {
struct vcpu *task;
s_time_t time;