From 6d91c142bca75bbac5bbc4ecea5da47967b6797b Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 18 Aug 2010 14:22:48 +0100 Subject: timers: Improve debug-key printing. Signed-off-by: Keir Fraser --- xen/common/timer.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/xen/common/timer.c b/xen/common/timer.c index 38540cee8c..52283cfad0 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -533,6 +534,13 @@ s_time_t align_timer(s_time_t firsttick, uint64_t period) return firsttick + (period - 1) - ((firsttick - 1) % period); } +static void dump_timer(struct timer *t, s_time_t now) +{ + printk(" ex=%8ldus timer=%p cb=%p(%p)", + (t->expires - now) / 1000, t, t->function, t->data); + print_symbol(" %s\n", (unsigned long)t->function); +} + static void dump_timerq(unsigned char key) { struct timer *t; @@ -541,28 +549,19 @@ static void dump_timerq(unsigned char key) s_time_t now = NOW(); int i, j; - printk("Dumping timer queues: NOW=0x%08X%08X\n", - (u32)(now>>32), (u32)now); + printk("Dumping timer queues:\n"); for_each_online_cpu( i ) { ts = &per_cpu(timers, i); - printk("CPU[%02d] ", i); + printk("CPU%02d:\n", i); spin_lock_irqsave(&ts->lock, flags); for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ ) - { - t = ts->heap[j]; - printk (" %d : %p ex=0x%08X%08X %p %p\n", - j, t, (u32)(t->expires>>32), (u32)t->expires, - t->data, t->function); - } + dump_timer(ts->heap[j], now); for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ ) - printk (" L%d : %p ex=0x%08X%08X %p %p\n", - j, t, (u32)(t->expires>>32), (u32)t->expires, - t->data, t->function); + dump_timer(t, now); spin_unlock_irqrestore(&ts->lock, flags); - printk("\n"); } } -- cgit v1.2.3 From 116d62ddfd1157364e2e5a2a44ef89573da37595 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 18 Aug 2010 14:56:01 +0100 Subject: timers: Simplify implementation logic. Signed-off-by: Keir Fraser --- xen/arch/x86/acpi/cpu_idle.c | 2 +- xen/arch/x86/acpi/cpuidle_menu.c | 2 +- xen/arch/x86/hpet.c | 22 +++++++------- xen/arch/x86/time.c | 2 +- xen/common/timer.c | 63 +++++++++------------------------------- xen/include/xen/timer.h | 9 ++---- 6 files changed, 29 insertions(+), 71 deletions(-) diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index 6c79d17f83..cdf0f492b4 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -252,7 +252,7 @@ void cpuidle_wakeup_mwait(cpumask_t *mask) static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { unsigned int cpu = smp_processor_id(); - s_time_t expires = per_cpu(timer_deadline_start, cpu); + s_time_t expires = per_cpu(timer_deadline, cpu); __monitor((void *)&mwait_wakeup(cpu), 0, 0); smp_mb(); diff --git a/xen/arch/x86/acpi/cpuidle_menu.c b/xen/arch/x86/acpi/cpuidle_menu.c index 38a5543c00..69527766d6 100644 --- a/xen/arch/x86/acpi/cpuidle_menu.c +++ b/xen/arch/x86/acpi/cpuidle_menu.c @@ -173,7 +173,7 @@ static inline s_time_t avg_intr_interval_us(void) static unsigned int get_sleep_length_us(void) { - s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000; + s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000; /* * while us < 0 or us > (u32)-1, return a large u32, * choose (unsigned int)-2000 to avoid wrapping while added with exit diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c index addabc7e1e..b26f677967 100644 --- a/xen/arch/x86/hpet.c +++ b/xen/arch/x86/hpet.c @@ -36,14 +36,14 @@ struct hpet_event_channel cpumask_t cpumask; /* * cpumask_lock is used to prevent hpet intr handler from accessing other - * cpu's timer_deadline_start/end after the other cpu's mask was cleared -- - * mask cleared means cpu waken up, then accessing timer_deadline_xxx from + * cpu's timer_deadline after the other cpu's mask was cleared -- + * mask cleared means cpu waken up, then accessing timer_deadline from * other cpu is not safe. * It is not used for protecting cpumask, so set ops needn't take it. * Multiple cpus clear cpumask simultaneously is ok due to the atomic * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for * clearing cpumask, and handle_hpet_broadcast() have to take write lock - * for read cpumask & access timer_deadline_xxx. + * for read cpumask & access timer_deadline. */ rwlock_t cpumask_lock; spinlock_t lock; @@ -212,10 +212,10 @@ again: if ( cpu_isset(cpu, ch->cpumask) ) { - if ( per_cpu(timer_deadline_start, cpu) <= now ) + if ( per_cpu(timer_deadline, cpu) <= now ) cpu_set(cpu, mask); - else if ( per_cpu(timer_deadline_end, cpu) < next_event ) - next_event = per_cpu(timer_deadline_end, cpu); + else if ( per_cpu(timer_deadline, cpu) < next_event ) + next_event = per_cpu(timer_deadline, cpu); } write_unlock_irq(&ch->cpumask_lock); @@ -661,7 +661,7 @@ void hpet_broadcast_enter(void) int cpu = smp_processor_id(); struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu); - if ( this_cpu(timer_deadline_start) == 0 ) + if ( this_cpu(timer_deadline) == 0 ) return; if ( !ch ) @@ -682,8 +682,8 @@ void hpet_broadcast_enter(void) spin_lock(&ch->lock); /* reprogram if current cpu expire time is nearer */ - if ( this_cpu(timer_deadline_end) < ch->next_event ) - reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1); + if ( this_cpu(timer_deadline) < ch->next_event ) + reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1); spin_unlock(&ch->lock); } @@ -692,7 +692,7 @@ void hpet_broadcast_exit(void) int cpu = smp_processor_id(); struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu); - if ( this_cpu(timer_deadline_start) == 0 ) + if ( this_cpu(timer_deadline) == 0 ) return; if ( !ch ) @@ -700,7 +700,7 @@ void hpet_broadcast_exit(void) /* Reprogram the deadline; trigger timer work now if it has passed. */ enable_APIC_timer(); - if ( !reprogram_timer(this_cpu(timer_deadline_start)) ) + if ( !reprogram_timer(this_cpu(timer_deadline)) ) raise_softirq(TIMER_SOFTIRQ); read_lock_irq(&ch->cpumask_lock); diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index 8a08a53791..3fdcca9b73 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1488,7 +1488,7 @@ void pit_broadcast_exit(void) int cpu = smp_processor_id(); if ( cpu_test_and_clear(cpu, pit_broadcast_mask) ) - reprogram_timer(per_cpu(timer_deadline_start, cpu)); + reprogram_timer(this_cpu(timer_deadline)); } int pit_broadcast_is_available(void) diff --git a/xen/common/timer.c b/xen/common/timer.c index 52283cfad0..648f299dfd 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -23,16 +23,12 @@ #include #include -/* - * We pull handlers off the timer list this far in future, - * rather than reprogramming the time hardware. - */ +/* We program the time hardware this far behind the closest deadline. */ static unsigned int timer_slop __read_mostly = 50000; /* 50 us */ integer_param("timer_slop", timer_slop); struct timers { spinlock_t lock; - bool_t overflow; struct timer **heap; struct timer *list; struct timer *running; @@ -43,8 +39,7 @@ static DEFINE_PER_CPU(struct timers, timers); static cpumask_t timer_valid_cpumask; -DEFINE_PER_CPU(s_time_t, timer_deadline_start); -DEFINE_PER_CPU(s_time_t, timer_deadline_end); +DEFINE_PER_CPU(s_time_t, timer_deadline); /**************************************************************************** * HEAP OPERATIONS. @@ -210,7 +205,6 @@ static int add_entry(struct timer *t) return rc; /* Fall back to adding to the slower linked list. */ - timers->overflow = 1; t->status = TIMER_STATUS_in_list; return add_to_list(&timers->list, t); } @@ -311,7 +305,6 @@ void set_timer(struct timer *timer, s_time_t expires) deactivate_timer(timer); timer->expires = expires; - timer->expires_end = expires + timer_slop; activate_timer(timer); @@ -427,13 +420,13 @@ static void timer_softirq_action(void) { struct timer *t, **heap, *next; struct timers *ts; - s_time_t now; + s_time_t now, deadline; ts = &this_cpu(timers); heap = ts->heap; /* If we overflowed the heap, try to allocate a larger heap. */ - if ( unlikely(ts->overflow) ) + if ( unlikely(ts->list != NULL) ) { /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */ int old_limit = GET_HEAP_LIMIT(heap); @@ -481,46 +474,16 @@ static void timer_softirq_action(void) add_entry(t); } - ts->overflow = (ts->list != NULL); - if ( unlikely(ts->overflow) ) - { - /* Find earliest deadline at head of list or top of heap. */ - this_cpu(timer_deadline_start) = ts->list->expires; - if ( (GET_HEAP_SIZE(heap) != 0) && - ((t = heap[1])->expires < this_cpu(timer_deadline_start)) ) - this_cpu(timer_deadline_start) = t->expires; - this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start); - } - else - { - /* - * Find the earliest deadline that encompasses largest number of timers - * on the heap. To do this we take timers from the heap while their - * valid deadline ranges continue to intersect. - */ - s_time_t start = 0, end = STIME_MAX; - struct timer **list_tail = &ts->list; - - while ( (GET_HEAP_SIZE(heap) != 0) && - ((t = heap[1])->expires <= end) ) - { - remove_entry(t); - - t->status = TIMER_STATUS_in_list; - t->list_next = NULL; - *list_tail = t; - list_tail = &t->list_next; - - start = t->expires; - if ( end > t->expires_end ) - end = t->expires_end; - } - - this_cpu(timer_deadline_start) = start; - this_cpu(timer_deadline_end) = end; - } + /* Find earliest deadline from head of linked list and top of heap. */ + deadline = STIME_MAX; + if ( GET_HEAP_SIZE(heap) != 0 ) + deadline = heap[1]->expires; + if ( (ts->list != NULL) && (ts->list->expires < deadline) ) + deadline = ts->list->expires; + this_cpu(timer_deadline) = + (deadline == STIME_MAX) ? 0 : deadline + timer_slop; - if ( !reprogram_timer(this_cpu(timer_deadline_start)) ) + if ( !reprogram_timer(this_cpu(timer_deadline)) ) raise_softirq(TIMER_SOFTIRQ); spin_unlock_irq(&ts->lock); diff --git a/xen/include/xen/timer.h b/xen/include/xen/timer.h index 53b3c3cc17..e8b80f248e 100644 --- a/xen/include/xen/timer.h +++ b/xen/include/xen/timer.h @@ -16,7 +16,6 @@ struct timer { /* System time expiry value (nanoseconds since boot). */ s_time_t expires; - s_time_t expires_end; /* Position in active-timer data structure. */ union { @@ -82,12 +81,8 @@ void kill_timer(struct timer *timer); /* Bootstrap initialisation. Must be called before any other timer function. */ void timer_init(void); -/* - * Next timer deadline for each CPU. - * Modified only by the local CPU and never in interrupt context. - */ -DECLARE_PER_CPU(s_time_t, timer_deadline_start); -DECLARE_PER_CPU(s_time_t, timer_deadline_end); +/* Next timer deadline for each CPU. */ +DECLARE_PER_CPU(s_time_t, timer_deadline); /* Arch-defined function to reprogram timer hardware for new deadline. */ int reprogram_timer(s_time_t timeout); -- cgit v1.2.3