aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c
diff options
context:
space:
mode:
Diffstat (limited to 'xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c')
-rw-r--r--xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c b/xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c
new file mode 100644
index 0000000000..0035bed074
--- /dev/null
+++ b/xenolinux-2.4.25-sparse/arch/xeno/lib/delay.c
@@ -0,0 +1,52 @@
+/*
+ * Precise Delay Loops for i386
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
+
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ rdtscl(bclock);
+ do
+ {
+ rep_nop();
+ rdtscl(now);
+ } while ((now-bclock) < loops);
+}
+
+inline void __const_udelay(unsigned long xloops)
+{
+ int d0;
+ __asm__("mull %0"
+ :"=d" (xloops), "=&a" (d0)
+ :"1" (xloops),"0" (current_cpu_data.loops_per_jiffy));
+ __delay(xloops * HZ);
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
+}