diff options
-rw-r--r-- | xen/common/sched_credit.c | 11 | ||||
-rw-r--r-- | xen/common/sched_credit2.c | 22 | ||||
-rw-r--r-- | xen/common/sched_sedf.c | 29 | ||||
-rw-r--r-- | xen/common/schedule.c | 61 | ||||
-rw-r--r-- | xen/include/xen/sched-if.h | 138 |
5 files changed, 125 insertions, 136 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 3d6ea7acf3..192c653937 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -1166,6 +1166,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) struct csched_pcpu * const spc = CSCHED_PCPU(cpu); struct list_head *runq, *elem, *next, *last_under; struct csched_vcpu *svc_elem; + spinlock_t *lock; unsigned long flags; int sort_epoch; @@ -1175,7 +1176,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) spc->runq_sort_last = sort_epoch; - pcpu_schedule_lock_irqsave(cpu, flags); + lock = pcpu_schedule_lock_irqsave(cpu, &flags); runq = &spc->runq; elem = runq->next; @@ -1200,7 +1201,7 @@ csched_runq_sort(struct csched_private *prv, unsigned int cpu) elem = next; } - pcpu_schedule_unlock_irqrestore(cpu, flags); + pcpu_schedule_unlock_irqrestore(lock, flags, cpu); } static void @@ -1563,7 +1564,9 @@ csched_load_balance(struct csched_private *prv, int cpu, * could cause a deadlock if the peer CPU is also load * balancing and trying to lock this CPU. */ - if ( !pcpu_schedule_trylock(peer_cpu) ) + spinlock_t *lock = pcpu_schedule_trylock(peer_cpu); + + if ( !lock ) { SCHED_STAT_CRANK(steal_trylock_failed); peer_cpu = cpumask_cycle(peer_cpu, &workers); @@ -1573,7 +1576,7 @@ csched_load_balance(struct csched_private *prv, int cpu, /* Any work over there to steal? */ speer = cpumask_test_cpu(peer_cpu, online) ? csched_runq_steal(peer_cpu, cpu, snext->pri, bstep) : NULL; - pcpu_schedule_unlock(peer_cpu); + pcpu_schedule_unlock(lock, peer_cpu); /* As soon as one vcpu is found, balancing ends */ if ( speer != NULL ) diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index ae0abc2ab9..84e547bad8 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -882,15 +882,17 @@ csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) */ if ( ! is_idle_vcpu(vc) ) { + spinlock_t *lock; + /* FIXME: Do we need the private lock here? */ list_add_tail(&svc->sdom_elem, &svc->sdom->vcpu); /* Add vcpu to runqueue of initial processor */ - vcpu_schedule_lock_irq(vc); + lock = vcpu_schedule_lock_irq(vc); runq_assign(ops, vc); - vcpu_schedule_unlock_irq(vc); + vcpu_schedule_unlock_irq(lock, vc); sdom->nr_vcpus++; } @@ -917,14 +919,16 @@ csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) if ( ! is_idle_vcpu(vc) ) { + spinlock_t *lock; + SCHED_STAT_CRANK(vcpu_destroy); /* Remove from runqueue */ - vcpu_schedule_lock_irq(vc); + lock = vcpu_schedule_lock_irq(vc); runq_deassign(ops, vc); - vcpu_schedule_unlock_irq(vc); + vcpu_schedule_unlock_irq(lock, vc); /* Remove from sdom list. Don't need a lock for this, as it's called * syncronously when nothing else can happen. */ @@ -1011,8 +1015,7 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc) { struct csched_vcpu * const svc = CSCHED_VCPU(vc); s_time_t now = NOW(); - - vcpu_schedule_lock_irq(vc); + spinlock_t *lock = vcpu_schedule_lock_irq(vc); BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor)); @@ -1038,7 +1041,7 @@ csched_context_saved(const struct scheduler *ops, struct vcpu *vc) else if ( !is_idle_vcpu(vc) ) update_load(ops, svc->rqd, svc, -1, now); - vcpu_schedule_unlock_irq(vc); + vcpu_schedule_unlock_irq(lock, vc); } #define MAX_LOAD (1ULL<<60); @@ -1456,14 +1459,14 @@ csched_dom_cntl( * must never lock csched_priv.lock if we're holding a runqueue lock. * Also, calling vcpu_schedule_lock() is enough, since IRQs have already * been disabled. */ - vcpu_schedule_lock(svc->vcpu); + spinlock_t *lock = vcpu_schedule_lock(svc->vcpu); BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor)); svc->weight = sdom->weight; update_max_weight(svc->rqd, svc->weight, old_weight); - vcpu_schedule_unlock(svc->vcpu); + vcpu_schedule_unlock(lock, svc->vcpu); } } } @@ -1993,6 +1996,7 @@ static void init_pcpu(const struct scheduler *ops, int cpu) cpumask_set_cpu(cpu, &rqd->idle); cpumask_set_cpu(cpu, &rqd->active); + /* _Not_ pcpu_schedule_unlock(): per_cpu().schedule_lock changed! */ spin_unlock(old_lock); cpumask_set_cpu(cpu, &prv->initialized); diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index d1812b6c48..7c24171e9e 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -1350,14 +1350,16 @@ static int sedf_adjust_weights(struct cpupool *c, int nr_cpus, int *sumw, s_time if ( EDOM_INFO(p)->weight ) { /* Interrupts already off */ - vcpu_schedule_lock(p); + spinlock_t *lock = vcpu_schedule_lock(p); + EDOM_INFO(p)->period_orig = EDOM_INFO(p)->period = WEIGHT_PERIOD; EDOM_INFO(p)->slice_orig = EDOM_INFO(p)->slice = (EDOM_INFO(p)->weight * (WEIGHT_PERIOD - WEIGHT_SAFETY - sumt[cpu])) / sumw[cpu]; - vcpu_schedule_unlock(p); + + vcpu_schedule_unlock(lock, p); } } } @@ -1418,21 +1420,24 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen { /* (Here and everywhere in the following) IRQs are already off, * hence vcpu_spin_lock() is the one. */ - vcpu_schedule_lock(v); + spinlock_t *lock = vcpu_schedule_lock(v); + EDOM_INFO(v)->extraweight = op->u.sedf.weight; EDOM_INFO(v)->weight = 0; EDOM_INFO(v)->slice = 0; EDOM_INFO(v)->period = WEIGHT_PERIOD; - vcpu_schedule_unlock(v); + vcpu_schedule_unlock(lock, v); } } else { /* Weight-driven domains with real-time execution */ - for_each_vcpu ( p, v ) { - vcpu_schedule_lock(v); + for_each_vcpu ( p, v ) + { + spinlock_t *lock = vcpu_schedule_lock(v); + EDOM_INFO(v)->weight = op->u.sedf.weight; - vcpu_schedule_unlock(v); + vcpu_schedule_unlock(lock, v); } } } @@ -1454,14 +1459,15 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen /* Time-driven domains */ for_each_vcpu ( p, v ) { - vcpu_schedule_lock(v); + spinlock_t *lock = vcpu_schedule_lock(v); + EDOM_INFO(v)->weight = 0; EDOM_INFO(v)->extraweight = 0; EDOM_INFO(v)->period_orig = EDOM_INFO(v)->period = op->u.sedf.period; EDOM_INFO(v)->slice_orig = EDOM_INFO(v)->slice = op->u.sedf.slice; - vcpu_schedule_unlock(v); + vcpu_schedule_unlock(lock, v); } } @@ -1471,13 +1477,14 @@ static int sedf_adjust(const struct scheduler *ops, struct domain *p, struct xen for_each_vcpu ( p, v ) { - vcpu_schedule_lock(v); + spinlock_t *lock = vcpu_schedule_lock(v); + EDOM_INFO(v)->status = (EDOM_INFO(v)->status & ~EXTRA_AWARE) | (op->u.sedf.extratime & EXTRA_AWARE); EDOM_INFO(v)->latency = op->u.sedf.latency; extraq_check(v); - vcpu_schedule_unlock(v); + vcpu_schedule_unlock(lock, v); } } else if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo ) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 1ddfb22df6..95472bad0d 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -160,18 +160,16 @@ static inline void vcpu_runstate_change( void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate) { + spinlock_t *lock = likely(v == current) ? NULL : vcpu_schedule_lock_irq(v); s_time_t delta; - if ( unlikely(v != current) ) - vcpu_schedule_lock_irq(v); - memcpy(runstate, &v->runstate, sizeof(*runstate)); delta = NOW() - runstate->state_entry_time; if ( delta > 0 ) runstate->time[runstate->state] += delta; - if ( unlikely(v != current) ) - vcpu_schedule_unlock_irq(v); + if ( unlikely(lock != NULL) ) + vcpu_schedule_unlock_irq(lock, v); } uint64_t get_cpu_idle_time(unsigned int cpu) @@ -333,8 +331,7 @@ void sched_destroy_domain(struct domain *d) void vcpu_sleep_nosync(struct vcpu *v) { unsigned long flags; - - vcpu_schedule_lock_irqsave(v, flags); + spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); if ( likely(!vcpu_runnable(v)) ) { @@ -344,7 +341,7 @@ void vcpu_sleep_nosync(struct vcpu *v) SCHED_OP(VCPU2OP(v), sleep, v); } - vcpu_schedule_unlock_irqrestore(v, flags); + vcpu_schedule_unlock_irqrestore(lock, flags, v); TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } @@ -362,8 +359,7 @@ void vcpu_sleep_sync(struct vcpu *v) void vcpu_wake(struct vcpu *v) { unsigned long flags; - - vcpu_schedule_lock_irqsave(v, flags); + spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); if ( likely(vcpu_runnable(v)) ) { @@ -377,7 +373,7 @@ void vcpu_wake(struct vcpu *v) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); } - vcpu_schedule_unlock_irqrestore(v, flags); + vcpu_schedule_unlock_irqrestore(lock, flags, v); TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } @@ -528,10 +524,11 @@ static void vcpu_migrate(struct vcpu *v) */ void vcpu_force_reschedule(struct vcpu *v) { - vcpu_schedule_lock_irq(v); + spinlock_t *lock = vcpu_schedule_lock_irq(v); + if ( v->is_running ) set_bit(_VPF_migrating, &v->pause_flags); - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); if ( test_bit(_VPF_migrating, &v->pause_flags) ) { @@ -546,7 +543,7 @@ void restore_vcpu_affinity(struct domain *d) for_each_vcpu ( d, v ) { - vcpu_schedule_lock_irq(v); + spinlock_t *lock = vcpu_schedule_lock_irq(v); if ( v->affinity_broken ) { @@ -559,13 +556,13 @@ void restore_vcpu_affinity(struct domain *d) if ( v->processor == smp_processor_id() ) { set_bit(_VPF_migrating, &v->pause_flags); - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); vcpu_sleep_nosync(v); vcpu_migrate(v); } else { - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); } } @@ -592,7 +589,7 @@ int cpu_disable_scheduler(unsigned int cpu) { for_each_vcpu ( d, v ) { - vcpu_schedule_lock_irq(v); + spinlock_t *lock = vcpu_schedule_lock_irq(v); cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid); if ( cpumask_empty(&online_affinity) && @@ -613,13 +610,13 @@ int cpu_disable_scheduler(unsigned int cpu) if ( v->processor == cpu ) { set_bit(_VPF_migrating, &v->pause_flags); - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); vcpu_sleep_nosync(v); vcpu_migrate(v); } else { - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); } /* @@ -646,6 +643,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) { cpumask_t online_affinity; cpumask_t *online; + spinlock_t *lock; if ( v->domain->is_pinned ) return -EINVAL; @@ -654,7 +652,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) if ( cpumask_empty(&online_affinity) ) return -EINVAL; - vcpu_schedule_lock_irq(v); + lock = vcpu_schedule_lock_irq(v); cpumask_copy(v->cpu_affinity, affinity); @@ -662,7 +660,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) * when changing the affinity */ set_bit(_VPF_migrating, &v->pause_flags); - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); domain_update_node_affinity(v->domain); @@ -776,10 +774,10 @@ static long do_poll(struct sched_poll *sched_poll) static long do_yield(void) { struct vcpu * v=current; + spinlock_t *lock = vcpu_schedule_lock_irq(v); - vcpu_schedule_lock_irq(v); SCHED_OP(VCPU2OP(v), yield, v); - vcpu_schedule_unlock_irq(v); + vcpu_schedule_unlock_irq(lock, v); TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id); raise_softirq(SCHEDULE_SOFTIRQ); @@ -1140,6 +1138,7 @@ static void schedule(void) unsigned long *tasklet_work = &this_cpu(tasklet_work_to_do); bool_t tasklet_work_scheduled = 0; struct schedule_data *sd; + spinlock_t *lock; struct task_slice next_slice; int cpu = smp_processor_id(); @@ -1166,7 +1165,7 @@ static void schedule(void) BUG(); } - pcpu_schedule_lock_irq(cpu); + lock = pcpu_schedule_lock_irq(cpu); stop_timer(&sd->s_timer); @@ -1183,7 +1182,7 @@ static void schedule(void) if ( unlikely(prev == next) ) { - pcpu_schedule_unlock_irq(cpu); + pcpu_schedule_unlock_irq(lock, cpu); trace_continue_running(next); return continue_running(prev); } @@ -1221,7 +1220,7 @@ static void schedule(void) ASSERT(!next->is_running); next->is_running = 1; - pcpu_schedule_unlock_irq(cpu); + pcpu_schedule_unlock_irq(lock, cpu); SCHED_STAT_CRANK(sched_ctx); @@ -1408,6 +1407,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) { unsigned long flags; struct vcpu *idle; + spinlock_t *lock; void *ppriv, *ppriv_old, *vpriv, *vpriv_old; struct scheduler *old_ops = per_cpu(scheduler, cpu); struct scheduler *new_ops = (c == NULL) ? &ops : c->sched; @@ -1426,7 +1426,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) return -ENOMEM; } - pcpu_schedule_lock_irqsave(cpu, flags); + lock = pcpu_schedule_lock_irqsave(cpu, &flags); SCHED_OP(old_ops, tick_suspend, cpu); vpriv_old = idle->sched_priv; @@ -1437,7 +1437,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c) SCHED_OP(new_ops, tick_resume, cpu); SCHED_OP(new_ops, insert_vcpu, idle); - pcpu_schedule_unlock_irqrestore(cpu, flags); + pcpu_schedule_unlock_irqrestore(lock, flags, cpu); SCHED_OP(old_ops, free_vdata, vpriv_old); SCHED_OP(old_ops, free_pdata, ppriv_old, cpu); @@ -1495,10 +1495,11 @@ void schedule_dump(struct cpupool *c) for_each_cpu (i, cpus) { - pcpu_schedule_lock(i); + spinlock_t *lock = pcpu_schedule_lock(i); + printk("CPU[%02d] ", i); SCHED_OP(sched, dump_cpu_state, i); - pcpu_schedule_unlock(i); + pcpu_schedule_unlock(lock, i); } } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index ad4d80c223..d95e254c5a 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -47,96 +47,70 @@ DECLARE_PER_CPU(struct schedule_data, schedule_data); DECLARE_PER_CPU(struct scheduler *, scheduler); DECLARE_PER_CPU(struct cpupool *, cpupool); -static inline spinlock_t * pcpu_schedule_lock(int cpu) -{ - spinlock_t * lock=NULL; - - for ( ; ; ) - { - /* The per_cpu(v->processor) may also change, if changing - * cpu pool also changes the scheduler lock. Retry - * until they match. - */ - lock=per_cpu(schedule_data, cpu).schedule_lock; - - spin_lock(lock); - if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) - break; - spin_unlock(lock); - } - return lock; +#define sched_lock(kind, param, cpu, irq, arg...) \ +static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \ +{ \ + for ( ; ; ) \ + { \ + spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \ + /* \ + * v->processor may change when grabbing the lock; but \ + * per_cpu(v->processor) may also change, if changing cpu pool \ + * also changes the scheduler lock. Retry until they match. \ + * \ + * It may also be the case that v->processor may change but the \ + * lock may be the same; this will succeed in that case. \ + */ \ + spin_lock##irq(lock, ## arg); \ + if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \ + return lock; \ + spin_unlock##irq(lock, ## arg); \ + } \ } -static inline int pcpu_schedule_trylock(int cpu) -{ - spinlock_t * lock=NULL; - - lock=per_cpu(schedule_data, cpu).schedule_lock; - if ( ! spin_trylock(lock) ) - return 0; - if ( lock == per_cpu(schedule_data, cpu).schedule_lock ) - return 1; - else - { - spin_unlock(lock); - return 0; - } -} - -#define pcpu_schedule_lock_irq(p) \ - do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 ) -#define pcpu_schedule_lock_irqsave(p, flags) \ - do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 ) - -static inline void pcpu_schedule_unlock(int cpu) -{ - spin_unlock(per_cpu(schedule_data, cpu).schedule_lock); +#define sched_unlock(kind, param, cpu, irq, arg...) \ +static inline void kind##_schedule_unlock##irq(spinlock_t *lock \ + EXTRA_TYPE(arg), param) \ +{ \ + ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \ + spin_unlock##irq(lock, ## arg); \ } -#define pcpu_schedule_unlock_irq(p) \ - do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 ) -#define pcpu_schedule_unlock_irqrestore(p, flags) \ - do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 ) - -static inline void vcpu_schedule_lock(struct vcpu *v) +#define EXTRA_TYPE(arg) +sched_lock(pcpu, unsigned int cpu, cpu, ) +sched_lock(vcpu, const struct vcpu *v, v->processor, ) +sched_lock(pcpu, unsigned int cpu, cpu, _irq) +sched_lock(vcpu, const struct vcpu *v, v->processor, _irq) +sched_unlock(pcpu, unsigned int cpu, cpu, ) +sched_unlock(vcpu, const struct vcpu *v, v->processor, ) +sched_unlock(pcpu, unsigned int cpu, cpu, _irq) +sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq) +#undef EXTRA_TYPE + +#define EXTRA_TYPE(arg) , unsigned long arg +#define spin_unlock_irqsave spin_unlock_irqrestore +sched_lock(pcpu, unsigned int cpu, cpu, _irqsave, *flags) +sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags) +#undef spin_unlock_irqsave +sched_unlock(pcpu, unsigned int cpu, cpu, _irqrestore, flags) +sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags) +#undef EXTRA_TYPE + +#undef sched_unlock +#undef sched_lock + +static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu) { - spinlock_t * lock; - - for ( ; ; ) - { - /* v->processor may change when grabbing the lock; but - * per_cpu(v->processor) may also change, if changing - * cpu pool also changes the scheduler lock. Retry - * until they match. - * - * It may also be the case that v->processor may change - * but the lock may be the same; this will succeed - * in that case. - */ - lock=per_cpu(schedule_data, v->processor).schedule_lock; - - spin_lock(lock); - if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) ) - break; - spin_unlock(lock); - } -} - -#define vcpu_schedule_lock_irq(v) \ - do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 ) -#define vcpu_schedule_lock_irqsave(v, flags) \ - do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 ) + spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; -static inline void vcpu_schedule_unlock(struct vcpu *v) -{ - spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock); + if ( !spin_trylock(lock) ) + return NULL; + if ( lock == per_cpu(schedule_data, cpu).schedule_lock ) + return lock; + spin_unlock(lock); + return NULL; } -#define vcpu_schedule_unlock_irq(v) \ - do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 ) -#define vcpu_schedule_unlock_irqrestore(v, flags) \ - do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 ) - struct task_slice { struct vcpu *task; s_time_t time; |