aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch')
-rw-r--r--target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch56
1 files changed, 56 insertions, 0 deletions
diff --git a/target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch b/target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch
new file mode 100644
index 0000000000..894eed1e5b
--- /dev/null
+++ b/target/linux/ar71xx/patches-2.6.32/902-mips_clocksource_init_war.patch
@@ -0,0 +1,56 @@
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -16,6 +16,22 @@
+ #include <asm/cevt-r4k.h>
+
+ /*
++ * Compare interrupt can be routed and latched outside the core,
++ * so a single execution hazard barrier may not be enough to give
++ * it time to clear as seen in the Cause register. 4 time the
++ * pipeline depth seems reasonably conservative, and empirically
++ * works better in configurations with high CPU/bus clock ratios.
++ */
++
++#define compare_change_hazard() \
++ do { \
++ irq_disable_hazard(); \
++ irq_disable_hazard(); \
++ irq_disable_hazard(); \
++ irq_disable_hazard(); \
++ } while (0)
++
++/*
+ * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
+ * of these routines with SMTC-specific variants.
+ */
+@@ -31,6 +47,7 @@ static int mips_next_event(unsigned long
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
++ compare_change_hazard();
+ res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
+ return res;
+ }
+@@ -100,22 +117,6 @@ static int c0_compare_int_pending(void)
+ return (read_c0_cause() >> cp0_compare_irq) & 0x100;
+ }
+
+-/*
+- * Compare interrupt can be routed and latched outside the core,
+- * so a single execution hazard barrier may not be enough to give
+- * it time to clear as seen in the Cause register. 4 time the
+- * pipeline depth seems reasonably conservative, and empirically
+- * works better in configurations with high CPU/bus clock ratios.
+- */
+-
+-#define compare_change_hazard() \
+- do { \
+- irq_disable_hazard(); \
+- irq_disable_hazard(); \
+- irq_disable_hazard(); \
+- irq_disable_hazard(); \
+- } while (0)
+-
+ int c0_compare_int_usable(void)
+ {
+ unsigned int delta;