/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- **************************************************************************** * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge * (C) 2002-2003 University of Cambridge **************************************************************************** * * File: common/schedule.c * Author: Rolf Neugebauer & Keir Fraser * * Description: CPU scheduling * implements A Borrowed Virtual Time scheduler. * (see Duda & Cheriton SOSP'99) */ #include #include #include #include #include #include #include #include #include #include #include /*#define WAKEUP_HISTO*/ /*#define BLOCKTIME_HISTO*/ #if defined(WAKEUP_HISTO) #define BUCKETS 31 #elif defined(BLOCKTIME_HISTO) #define BUCKETS 200 #endif #define MCU (s32)MICROSECS(100) /* Minimum unit */ #define MCU_ADVANCE 10 /* default weight */ #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */ static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */ typedef struct schedule_data_st { struct list_head runqueue; /* runqueue */ struct task_struct *curr; /* current task */ struct task_struct *idle; /* idle task for this cpu */ u32 svt; /* system virtual time. per CPU??? */ struct ac_timer s_timer; /* scheduling timer */ #ifdef BUCKETS u32 hist[BUCKETS]; /* for scheduler latency histogram */ #endif } __cacheline_aligned schedule_data_t; static schedule_data_t schedule_data[NR_CPUS]; spinlock_t schedule_lock[NR_CPUS] __cacheline_aligned; /* Per-CPU periodic timer sends an event to the currently-executing domain. */ static struct ac_timer t_timer[NR_CPUS]; /* * Per-CPU timer which ensures that even guests with very long quantums get * their time-of-day state updated often enough to avoid wrapping. */ static struct ac_timer fallback_timer[NR_CPUS]; /* Various timer handlers. */ static void s_timer_fn(unsigned long unused); static void t_timer_fn(unsigned long unused); static void dom_timer_fn(unsigned long data); static void fallback_timer_fn(unsigned long unused); /* * Wrappers for run-queue management. Must be called with the schedule_lock * held. */ static inline void __add_to_runqueue_head(struct task_struct * p) { list_add(&p->run_list, &schedule_data[p->processor].runqueue); } static inline void __add_to_runqueue_tail(struct task_struct * p) { list_add_tail(&p->run_list, &schedule_data[p->processor].runqueue); } static inline void __del_from_runqueue(struct task_struct * p) { list_del(&p->run_list); p->run_list.next = NULL; } static inline int __task_on_runqueue(struct task_struct *p) { return p->run_list.next != NULL; } #define next_domain(p) \\ list_entry((p)->run_list.next, struct task_struct, run_list) /* * Calculate the effective virtual time for a domain. Take into account * warping limits */ static void __calc_evt(struct task_struct *p) { s_time_t now = NOW(); if ( p->warpback ) { if ( ((now - p->warped) < p->warpl) && ((now - p->uwarped) > p->warpu) ) { /* allowed to warp */ p->evt = p->avt - p->warp; } else { /* warped for too long -> unwarp */ p->evt = p->avt; p->uwarped = now; p->warpback = 0; } } else { p->evt = p->avt; } } /* * Add and remove a domain */ void sched_add_domain(struct task_struct *p) { p->state = TASK_STOPPED; p->mcu_advance = MCU_ADVANCE; if ( p->domain == IDLE_DOMAIN_ID ) { p->avt = p->evt = ~0U; schedule_data[p->processor].idle = p; } else { /* Set avt end evt to system virtual time. */ p->avt = schedule_data[p->processor].svt; p->evt = schedule_data[p->processor].svt; /* Set some default values here. */ p->warpback = 0; p->warp = 0; p->warpl = 0; p->warpu = 0; /* Initialise the per-domain timer. */ init_ac_timer(&p->timer); p->timer.cpu = p->processor;
# extract filename components of all items in src_list in dst_list
# example usage: extract_filename_components(ALL_LIBS_BASENAME ALL_LIBS NAME_WE)
macro(extract_filename_components dst_list src_list component)  
  set(list_var "${${src_list}}")
  #message(STATUS "list_var:     ${list_var}")
  #message(STATUS "component:    ${component}")
  foreach(item ${list_var})
    get_filename_component(BASENAME ${item} ${component})
  list( APPEND ${dst_list} ${BASENAME})
  endforeach()
  #message(STATUS "dst_list:     ${${dst_list}}")
endmacro()


# extract target properties of all items in src_list in dst_list
# example usage: extract_target_properties(QT_INCLUDES Qt5::Core INTERFACE_INCLUDE_DIR)
macro(extract_target_properties target_props target_list property)  
  set(list_var "${${target_list}}")
  # message(STATUS "list_var:     ${list_var}")
  #message(STATUS "property:    ${property}")
  foreach(item ${list_var})
    get_target_property(value ${item} ${property})
  list( APPEND ${target_props} ${value})
  endforeach()
  #message(STATUS "target_props:     ${${target_props}}")
  list(REMOVE_DUPLICATES ${target_props})
endmacro()
dule_data[cpu].s_timer.expires = now + r_time; add_ac_timer(&schedule_data[cpu].s_timer); spin_unlock_irq(&schedule_lock[cpu]); /* Ensure that the domain has an up-to-date time base. */ if ( !is_idle_task(next) ) update_dom_time(next->shared_info); if ( unlikely(prev == next) ) return; perfc_incrc(sched_ctx); #if defined(WAKEUP_HISTO) if ( !is_idle_task(next) && next->wokenup ) { ulong diff = (ulong)(now - next->wokenup); diff /= (ulong)MILLISECS(1); if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; else schedule_data[cpu].hist[BUCKETS-1]++; } next->wokenup = (s_time_t)0; #elif defined(BLOCKTIME_HISTO) prev->lastdeschd = now; if ( !is_idle_task(next) ) { ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10)); if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; else schedule_data[cpu].hist[BUCKETS-1]++; } #endif switch_to(prev, next); if ( unlikely(prev->state == TASK_DYING) ) put_task_struct(prev); /* Mark a timer event for the newly-scheduled domain. */ if ( !is_idle_task(next) ) set_bit(_EVENT_TIMER, &next->shared_info->events); schedule_tail(next); BUG(); } /* No locking needed -- pointer comparison is safe :-) */ int idle_cpu(int cpu) { struct task_struct *p = schedule_data[cpu].curr; return p == idle_task[cpu]; } /**************************************************************************** * Timers: the scheduler utilises a number of timers * - s_timer: per CPU timer for preemption and scheduling decisions * - t_timer: per CPU periodic timer to send timer interrupt to current dom * - dom_timer: per domain timer to specifiy timeout values * - fallback_timer: safeguard to ensure time is up to date ****************************************************************************/ /* The scheduler timer: force a run through the scheduler*/ static void s_timer_fn(unsigned long unused) { set_bit(_HYP_EVENT_NEED_RESCHED, ¤t->hyp_events); perfc_incrc(sched_irq); } /* Periodic tick timer: send timer event to current domain*/ static void t_timer_fn(unsigned long unused) { struct task_struct *p = current; if ( !is_idle_task(p) ) set_bit(_EVENT_TIMER, &p->shared_info->events); t_timer[p->processor].expires = NOW() + MILLISECS(10); add_ac_timer(&t_timer[p->processor]); } /* Domain timer function, sends a virtual timer interrupt to domain */ static void dom_timer_fn(unsigned long data) { unsigned long cpu_mask = 0; struct task_struct *p = (struct task_struct *)data; cpu_mask |= mark_guest_event(p, _EVENT_TIMER); guest_event_notify(cpu_mask); } /* Fallback timer to ensure guests get time updated 'often enough'. */ static void fallback_timer_fn(unsigned long unused) { struct task_struct *p = current; if ( !is_idle_task(p) ) update_dom_time(p->shared_info); fallback_timer[p->processor].expires = NOW() + MILLISECS(500); add_ac_timer(&fallback_timer[p->processor]); } /* Initialise the data structures. */ void __init scheduler_init(void) { int i; printk("Initialising schedulers\n"); for ( i = 0; i < NR_CPUS; i++ ) { INIT_LIST_HEAD(&schedule_data[i].runqueue); spin_lock_init(&schedule_lock[i]); schedule_data[i].curr = &idle0_task; init_ac_timer(&schedule_data[i].s_timer); schedule_data[i].s_timer.cpu = i; schedule_data[i].s_timer.data = 2; schedule_data[i].s_timer.function = &s_timer_fn; init_ac_timer(&t_timer[i]); t_timer[i].cpu = i; t_timer[i].data = 3; t_timer[i].function = &t_timer_fn; init_ac_timer(&fallback_timer[i]); fallback_timer[i].cpu = i; fallback_timer[i].data = 4; fallback_timer[i].function = &fallback_timer_fn; } schedule_data[0].idle = &idle0_task; } /* * Start a scheduler for each CPU * This has to be done *after* the timers, e.g., APICs, have been initialised */ void schedulers_start(void) { printk("Start schedulers\n"); s_timer_fn(0); smp_call_function((void *)s_timer_fn, NULL, 1, 1); t_timer_fn(0); smp_call_function((void *)t_timer_fn, NULL, 1, 1); fallback_timer_fn(0); smp_call_function((void *)fallback_timer_fn, NULL, 1, 1); } static void process_timeout(unsigned long __data) { struct task_struct * p = (struct task_struct *) __data; wake_up(p); } static void dump_rqueue(struct list_head *queue, char *name) { struct list_head *list; int loop = 0; struct task_struct *p; printk ("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue, (unsigned long) queue->next, (unsigned long) queue->prev); list_for_each (list, queue) { p = list_entry(list, struct task_struct, run_list); printk("%3d: %llu has=%c mcua=0x%04lX" " ev=0x%08X av=0x%08X c=0x%X%08X\n", loop++, p->domain, p->has_cpu ? 'T':'F', p->mcu_advance, p->evt, p->avt, (u32)(p->cpu_time>>32), (u32)p->cpu_time); printk(" l: %lx n: %lx p: %lx\n", (unsigned long)list, (unsigned long)list->next, (unsigned long)list->prev); } return; } void dump_runq(u_char key, void *dev_id, struct pt_regs *regs) { u_long flags; s_time_t now = NOW(); int i; printk("BVT: mcu=0x%08Xns ctx_allow=0x%08Xns NOW=0x%08X%08X\n", (u32)MCU, (u32)ctx_allow, (u32)(now>>32), (u32)now); for (i = 0; i < smp_num_cpus; i++) { spin_lock_irqsave(&schedule_lock[i], flags); printk("CPU[%02d] svt=0x%08X ", i, (s32)schedule_data[i].svt); dump_rqueue(&schedule_data[i].runqueue, "rq"); spin_unlock_irqrestore(&schedule_lock[i], flags); } return; } #if defined(WAKEUP_HISTO) || defined(BLOCKTIME_HISTO) void print_sched_histo(u_char key, void *dev_id, struct pt_regs *regs) { int loop, i, j; for (loop = 0; loop < smp_num_cpus; loop++) { j = 0; printf ("CPU[%02d]: scheduler latency histogram (ms:[count])\n", loop); for (i=0; i:[%7u] ", schedule_data[loop].hist[i]); j++; if (!(j % 5)) printk("\n"); } } printk("\n"); } } void reset_sched_histo(u_char key, void *dev_id, struct pt_regs *regs) { int loop, i; for (loop = 0; loop < smp_num_cpus; loop++) for (i=0; i