/******************************************************************************
* arch/x86/time.c
*
* Per-CPU time calibration and management.
*
* Copyright (c) 2002-2005, K A Fraser
*
* Portions from Linux are:
* Copyright (c) 1991, 1992, 1995 Linus Torvalds
*/
#include <xen/config.h>
#include <xen/errno.h>
#include <xen/event.h>
#include <xen/sched.h>
#include <xen/lib.h>
#include <xen/config.h>
#include <xen/init.h>
#include <xen/time.h>
#include <xen/timer.h>
#include <xen/smp.h>
#include <xen/irq.h>
#include <xen/softirq.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/mpspec.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/mc146818rtc.h>
#include <asm/div64.h>
#include <asm/hpet.h>
#include <io_ports.h>
/* opt_clocksource: Force clocksource to one of: pit, hpet, cyclone, acpi. */
static char opt_clocksource[10];
string_param("clocksource", opt_clocksource);
/*
* opt_consistent_tscs: All TSCs tick at the exact same rate, allowing
* simplified system time handling.
*/
static int opt_consistent_tscs;
boolean_param("consistent_tscs", opt_consistent_tscs);
unsigned long cpu_khz; /* CPU clock frequency in kHz. */
DEFINE_SPINLOCK(rtc_lock);
unsigned long pit0_ticks;
static u32 wc_sec, wc_nsec; /* UTC time at last 'time update'. */
static DEFINE_SPINLOCK(wc_lock);
struct time_scale {
int shift;
u32 mul_frac;
};
struct cpu_time {
u64 local_tsc_stamp;
s_time_t stime_local_stamp;
s_time_t stime_master_stamp;
struct time_scale tsc_scale;
};
struct platform_timesource {
char *id;
char *name;
u64 frequency;
u64 (*read_counter)(void);
int (*init)(struct platform_timesource *);
void (*resume)(struct platform_timesource *);
int counter_bits;
};
static DEFINE_PER_CPU(struct cpu_time, cpu_time);
/* Calibrate all CPUs to platform timer every EPOCH. */
#define EPOCH MILLISECS(1000)
static struct timer calibration_timer;
/*
* We simulate a 32-bit platform timer from the 16-bit PIT ch2 counter.
* Otherwise overflow happens too quickly (~50ms) for us to guarantee that
* softirq handling will happen in time.
*
* The pit_lock protects the 16- and 32-bit stamp fields as well as the
*/
static DEFINE_SPINLOCK(pit_lock);
static u16 pit_stamp16;
static u32 pit_stamp32;
static int using_pit;
/*
* 32-bit division of integer dividend and integer divisor yielding
* 32-bit fractional quotient.
*/
static inline u32 div_frac(u32 dividend, u32 divisor)
{
u32 quotient, remainder;
ASSERT(dividend < divisor);
asm (
"divl %4"
: "=a" (quotient), "=d" (remainder)
: "0" (0), "1" (dividend), "r" (divisor) );
return quotient;
}
/*
* 32-bit multiplication of multiplicand and fractional multiplier
* yielding 32-bit product (radix point at same position as in multiplicand).
*/
static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
{
u32 product_int, product_frac;
asm (
"mul %3"
: "=a" (product_frac), "=d" (product_int)
: "0" (multiplicand), "r" (multiplier) );
return product_int;
}
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
*/
static inline u64 scale_delta(u64 delta, struct time_scale *scale)
{
u64 product;
#ifdef CONFIG_X86_32
u32 tmp1, tmp2;
#endif
if ( scale->shift < 0 )
delta >>= -scale->shift;
else
delta <<= scale->shift;
#ifdef CONFIG_X86_32
asm (
"mul %5 ; "
"mov %4,%%eax ; "
"mov %%edx,%4 ; "
"mul %5 ; "
"xor %5,%5 ; "
"add %4,%%eax ; "
"adc %5,%%edx ; "
: "=A" (product), "=r" (tmp1), "=r" (tmp2)
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
#else
asm (
"mul %%rdx ; shrd $32,%%rdx,%%rax"
: "=a" (product) : "0" (delta), "d" ((u64)scale->mul_frac) );
#endif
return product;
}
/* Compute the reciprocal of the given time_scale. */
static inline struct time_scale scale_reciprocal(struct time_scale scale)
{
struct time_scale reciprocal;
u32 dividend;
dividend = 0x80000000u;
reciprocal.shift = 1 - scale.shift;
while ( unlikely(dividend >= scale.mul_frac) )
{
dividend >>= 1;
reciprocal.shift++;
}
asm (
"divl %4"
: "=a" (reciprocal.mul_frac), "=d" (dividend)
: "0" (0), "1" (dividend), "r" (scale.mul_frac) );
return reciprocal;
}
/*
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
*/
extern int xen_cpuidle;
static cpumask_t pit_broadcast_mask;
static void smp_send_timer_broadcast_ipi(void)
{
int cpu = smp_processor_id();
cpumask_t mask;
cpus_and(mask, cpu_online_map, pit_broadcast_mask);
if ( cpu_isset(cpu, mask) )
{
cpu_clear(cpu, mask);
raise_softirq(TIMER_SOFTIRQ);
}