diff options
Diffstat (limited to 'xen/common')
-rw-r--r-- | xen/common/sched_bvt.c | 16 | ||||
-rw-r--r-- | xen/common/sched_fair_bvt.c | 27 | ||||
-rw-r--r-- | xen/common/sched_rrobin.c | 8 | ||||
-rw-r--r-- | xen/common/schedule.c | 16 |
4 files changed, 29 insertions, 38 deletions
diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c index 14f9a3017c..e4498359e6 100644 --- a/xen/common/sched_bvt.c +++ b/xen/common/sched_bvt.c @@ -148,11 +148,11 @@ int bvt_init_idle_task(struct domain *p) bvt_add_task(p); - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); set_bit(DF_RUNNING, &p->flags); if ( !__task_on_runqueue(RUNLIST(p)) ) __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); return 0; } @@ -218,7 +218,7 @@ int bvt_adjdom(struct domain *p, if ( mcu_adv == 0 ) return -EINVAL; - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); inf->mcu_advance = mcu_adv; inf->warp = warp; inf->warpl = warpl; @@ -229,18 +229,18 @@ int bvt_adjdom(struct domain *p, p->domain, inf->mcu_advance, inf->warp, inf->warpl, inf->warpu ); - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); } else if ( cmd->direction == SCHED_INFO_GET ) { struct bvt_dom_info *inf = BVT_INFO(p); - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); params->mcu_adv = inf->mcu_advance; params->warp = inf->warp; params->warpl = inf->warpl; params->warpu = inf->warpu; - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); } return 0; @@ -411,7 +411,7 @@ static void bvt_dump_cpu_state(int i) struct bvt_dom_info *d_inf; struct domain *d; - spin_lock_irqsave(&schedule_lock[i], flags); + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); printk("svt=0x%08lX ", CPU_SVT(i)); queue = RUNQUEUE(i); @@ -430,7 +430,7 @@ static void bvt_dump_cpu_state(int i) (unsigned long)list, (unsigned long)list->next, (unsigned long)list->prev); } - spin_unlock_irqrestore(&schedule_lock[i], flags); + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); } /* We use cache to create the bvt_dom_infos diff --git a/xen/common/sched_fair_bvt.c b/xen/common/sched_fair_bvt.c index 6442f8489e..a8a60bafd1 100644 --- a/xen/common/sched_fair_bvt.c +++ b/xen/common/sched_fair_bvt.c @@ -160,12 +160,11 @@ int fbvt_init_idle_task(struct domain *p) if(fbvt_alloc_task(p) < 0) return -1; fbvt_add_task(p); -//printk("< ----- >Initialising idle task for processor %d, address %d, priv %d\n", p->processor, (int)p, (int)p->sched_priv); - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); set_bit(DF_RUNNING, &p->flags); if ( !__task_on_runqueue(RUNLIST(p)) ) __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); return 0; } @@ -233,7 +232,7 @@ int fbvt_adjdom(struct domain *p, if ( mcu_adv == 0 ) return -EINVAL; - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); inf->mcu_advance = mcu_adv; inf->warp = warp; inf->warpl = warpl; @@ -244,18 +243,20 @@ int fbvt_adjdom(struct domain *p, p->domain, inf->mcu_advance, inf->warp, inf->warpl, inf->warpu ); - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, + flags); } else if ( cmd->direction == SCHED_INFO_GET ) { struct fbvt_dom_info *inf = FBVT_INFO(p); - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); params->mcu_adv = inf->mcu_advance; params->warp = inf->warp; params->warpl = inf->warpl; params->warpu = inf->warpu; - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, + flags); } return 0; @@ -285,7 +286,6 @@ static task_slice_t fbvt_do_schedule(s_time_t now) struct fbvt_dom_info *next_prime_inf = NULL; task_slice_t ret; -//if(prev->sched_priv == NULL) printk("----> %d\n", prev->domain); ASSERT(prev->sched_priv != NULL); ASSERT(prev_inf != NULL); @@ -450,7 +450,6 @@ static task_slice_t fbvt_do_schedule(s_time_t now) next->min_slice = ctx_allow; ret.task = next; ret.time = r_time; -//printk("NEXT --> domain %d (address %d, processor %d), priv %d\n",next->domain, (int)next, next->processor, (int)next->sched_priv); return ret; } @@ -476,7 +475,7 @@ static void fbvt_dump_cpu_state(int i) struct fbvt_dom_info *d_inf; struct domain *d; - spin_lock_irqsave(&schedule_lock[i], flags); + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); printk("svt=0x%08lX ", CPU_SVT(i)); queue = RUNQUEUE(i); @@ -495,7 +494,7 @@ static void fbvt_dump_cpu_state(int i) (unsigned long)list, (unsigned long)list->next, (unsigned long)list->prev); } - spin_unlock_irqrestore(&schedule_lock[i], flags); + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); } @@ -559,14 +558,10 @@ static void fbvt_wake(struct domain *d) int cpu = d->processor; s32 io_warp; -//printk("-|--> Adding new domain %d\n",d->domain); -//printk("-|--> Current%d (address %d, processor %d) %d\n",current->domain,(int)current, current->processor, (int)current->sched_priv); /* If on the runqueue already then someone has done the wakeup work. */ if ( unlikely(__task_on_runqueue(RUNLIST(d))) ) return; -//printk("----> Not on runqueue\n"); __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu)); -//printk(" ---> %d\n",(int)current->sched_priv); now = NOW(); @@ -617,7 +612,6 @@ static void fbvt_wake(struct domain *d) __calc_evt(inf); curr = schedule_data[cpu].curr; -//printk(" ---> %d\n",(int)current->sched_priv); /* Currently-running domain should run at least for ctx_allow. */ min_time = curr->lastschd + curr->min_slice; @@ -626,7 +620,6 @@ static void fbvt_wake(struct domain *d) cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) ) mod_ac_timer(&schedule_data[cpu].s_timer, min_time); -//printk(" ---> %d\n",(int)current->sched_priv); } struct scheduler sched_fbvt_def = { diff --git a/xen/common/sched_rrobin.c b/xen/common/sched_rrobin.c index 8ddd828314..49daaa05d2 100644 --- a/xen/common/sched_rrobin.c +++ b/xen/common/sched_rrobin.c @@ -95,11 +95,11 @@ static int rr_init_idle_task(struct domain *p) if(rr_alloc_task(p) < 0) return -1; rr_add_task(p); - spin_lock_irqsave(&schedule_lock[p->processor], flags); + spin_lock_irqsave(&schedule_data[p->processor].schedule_lock, flags); set_bit(DF_RUNNING, &p->flags); if ( !__task_on_runqueue(RUNLIST(p)) ) __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor)); - spin_unlock_irqrestore(&schedule_lock[p->processor], flags); + spin_unlock_irqrestore(&schedule_data[p->processor].schedule_lock, flags); return 0; } @@ -194,7 +194,7 @@ static void rr_dump_cpu_state(int i) int loop = 0; struct rrobin_dom_info *d_inf; - spin_lock_irqsave(&schedule_lock[i], flags); + spin_lock_irqsave(&schedule_data[i].schedule_lock, flags); queue = RUNQUEUE(i); printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, @@ -210,7 +210,7 @@ static void rr_dump_cpu_state(int i) d_inf = list_entry(list, struct rrobin_dom_info, run_list); rr_dump_domain(d_inf->domain); } - spin_unlock_irqrestore(&schedule_lock[i], flags); + spin_unlock_irqrestore(&schedule_data[i].schedule_lock, flags); } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 96a80e0638..4a8a1b0612 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -85,8 +85,6 @@ static struct scheduler ops; (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \ : (typeof(ops.fn(__VA_ARGS__)))0 ) -spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned; - /* Per-CPU periodic timer sends an event to the currently-executing domain. */ static struct ac_timer t_timer[NR_CPUS]; @@ -168,10 +166,10 @@ void domain_sleep(struct domain *d) unsigned long flags; int cpu = d->processor; - spin_lock_irqsave(&schedule_lock[cpu], flags); + spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); if ( likely(!domain_runnable(d)) ) SCHED_OP(sleep, d); - spin_unlock_irqrestore(&schedule_lock[cpu], flags); + spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); /* Synchronous. */ while ( test_bit(DF_RUNNING, &d->flags) && !domain_runnable(d) ) @@ -185,7 +183,7 @@ void domain_wake(struct domain *d) { unsigned long flags; int cpu = d->processor; - spin_lock_irqsave(&schedule_lock[cpu], flags); + spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags); if ( likely(domain_runnable(d)) ) { TRACE_2D(TRC_SCHED_WAKE, d->domain, d); @@ -194,7 +192,7 @@ void domain_wake(struct domain *d) d->wokenup = NOW(); #endif } - spin_unlock_irqrestore(&schedule_lock[cpu], flags); + spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags); } /* Block the currently-executing domain until a pertinent event occurs. */ @@ -326,7 +324,7 @@ void __enter_scheduler(void) perfc_incrc(sched_run); - spin_lock_irq(&schedule_lock[cpu]); + spin_lock_irq(&schedule_data[cpu].schedule_lock); now = NOW(); @@ -360,7 +358,7 @@ void __enter_scheduler(void) schedule_data[cpu].s_timer.expires = now + r_time; add_ac_timer(&schedule_data[cpu].s_timer); - spin_unlock_irq(&schedule_lock[cpu]); + spin_unlock_irq(&schedule_data[cpu].schedule_lock); /* Ensure that the domain has an up-to-date time base. */ if ( !is_idle_task(next) ) @@ -481,7 +479,7 @@ void __init scheduler_init(void) for ( i = 0; i < NR_CPUS; i++ ) { - spin_lock_init(&schedule_lock[i]); + spin_lock_init(&schedule_data[i].schedule_lock); schedule_data[i].curr = &idle0_task; init_ac_timer(&schedule_data[i].s_timer); |