aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/sched-if.h
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-24 08:26:59 +0000
committerKeir Fraser <keir@xen.org>2010-12-24 08:26:59 +0000
commitd2f6b6016990b570c6782e1639ca1c0b07013b59 (patch)
tree18098d7d1f3581e1500ce0bdd819083c8b5d702f /xen/include/xen/sched-if.h
parent188be59890f39c0af54367e28cd0a80b80343b55 (diff)
downloadxen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.gz
xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.tar.bz2
xen-d2f6b6016990b570c6782e1639ca1c0b07013b59.zip
scheduler: Introduce pcpu_schedule_lock
Many places in Xen, particularly schedule.c, grab the per-cpu spinlock directly, rather than through vcpu_schedule_lock(). Since the lock pointer may change between the time it's read and the time the lock is successfully acquired, we need to check after acquiring the lock to make sure that the pcpu's lock hasn't changed, due to cpu initialization or cpupool activity. Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/include/xen/sched-if.h')
-rw-r--r--xen/include/xen/sched-if.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 84f7f5a1c8..e8f0262a6b 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -39,6 +39,57 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data);
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);
+static inline spinlock_t * pcpu_schedule_lock(int cpu)
+{
+ spinlock_t * lock=NULL;
+
+ for ( ; ; )
+ {
+ /* The per_cpu(v->processor) may also change, if changing
+ * cpu pool also changes the scheduler lock. Retry
+ * until they match.
+ */
+ lock=per_cpu(schedule_data, cpu).schedule_lock;
+
+ spin_lock(lock);
+ if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
+ break;
+ spin_unlock(lock);
+ }
+ return lock;
+}
+
+static inline int pcpu_schedule_trylock(int cpu)
+{
+ spinlock_t * lock=NULL;
+
+ lock=per_cpu(schedule_data, cpu).schedule_lock;
+ if ( ! spin_trylock(lock) )
+ return 0;
+ if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
+ return 1;
+ else
+ {
+ spin_unlock(lock);
+ return 0;
+ }
+}
+
+#define pcpu_schedule_lock_irq(p) \
+ do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
+#define pcpu_schedule_lock_irqsave(p, flags) \
+ do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )
+
+static inline void pcpu_schedule_unlock(int cpu)
+{
+ spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
+}
+
+#define pcpu_schedule_unlock_irq(p) \
+ do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
+#define pcpu_schedule_unlock_irqrestore(p, flags) \
+ do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )
+
static inline void vcpu_schedule_lock(struct vcpu *v)
{
spinlock_t * lock;