aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h')
-rw-r--r--xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h74
1 files changed, 74 insertions, 0 deletions
diff --git a/xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h b/xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
new file mode 100644
index 0000000000..7972ce7d74
--- /dev/null
+++ b/xenolinux-2.4.25-sparse/include/asm-xeno/mmu_context.h
@@ -0,0 +1,74 @@
+#ifndef __I386_MMU_CONTEXT_H
+#define __I386_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <asm/desc.h>
+#include <asm/atomic.h>
+#include <asm/pgalloc.h>
+
+/*
+ * hooks to add arch specific data into the mm struct.
+ * Note that destroy_context is called even if init_new_context
+ * fails.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+#ifdef CONFIG_SMP
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+ if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
+}
+#else
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+#endif
+
+extern pgd_t *cur_pgd;
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
+{
+ if (prev != next) {
+ /* stop flush ipis for the previous mm */
+ clear_bit(cpu, &prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ cpu_tlbstate[cpu].active_mm = next;
+#endif
+
+ /* Re-load page tables */
+ cur_pgd = next->pgd;
+ queue_pt_switch(__pa(cur_pgd));
+ /* load_LDT, if either the previous or next thread
+ * has a non-default LDT.
+ */
+ if (next->context.size+prev->context.size)
+ load_LDT(&next->context);
+ }
+#ifdef CONFIG_SMP
+ else {
+ cpu_tlbstate[cpu].state = TLBSTATE_OK;
+ if(cpu_tlbstate[cpu].active_mm != next)
+ out_of_line_bug();
+ if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload %cr3.
+ */
+ cur_pgd = next->pgd;
+ queue_pt_switch(__pa(cur_pgd));
+ load_LDT(next);
+ }
+ }
+#endif
+}
+
+#define activate_mm(prev, next) \
+do { \
+ switch_mm((prev),(next),NULL,smp_processor_id()); \
+ flush_page_update_queue(); \
+} while ( 0 )
+
+#endif