aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-28 17:47:14 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-28 17:47:14 +0000
commit8b5918674e6db47b5344e4f88e47aaa09715c177 (patch)
tree26e9771e64d08e0c04e8bb1145be416e00856b0b
parent793aa523b8b977e3161ac953d36c0d8d632a2e2e (diff)
downloadxen-8b5918674e6db47b5344e4f88e47aaa09715c177.tar.gz
xen-8b5918674e6db47b5344e4f88e47aaa09715c177.tar.bz2
xen-8b5918674e6db47b5344e4f88e47aaa09715c177.zip
bitkeeper revision 1.1768 (42c18d2259NPELcGV7ohyZNh72ufSw)
Based on the Keir's suggestion yesterday, I fixed the bug in xenlinux. Now the LTP test cases pass well in domU; I ran LTP in domU along with an infinite loop of "make clean; make -j4" on dom0 for sanity tests. Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c54
-rw-r--r--linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h40
2 files changed, 48 insertions, 46 deletions
diff --git a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
index 99210e91cb..48afb51919 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
@@ -474,51 +474,26 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
- asm volatile("movl %%es,%0" : "=m" (prev->es));
- if (unlikely(next->es | prev->es))
+ if (unlikely(next->es))
loadsegment(es, next->es);
- asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
- if (unlikely(next->ds | prev->ds))
+ if (unlikely(next->ds))
loadsegment(ds, next->ds);
/*
* Switch FS and GS.
*/
- {
- unsigned fsindex;
- asm volatile("movl %%fs,%0" : "=g" (fsindex));
- /* segment register != 0 always requires a reload.
- also reload when it has changed.
- when prev process used 64bit base always reload
- to avoid an information leak. */
- if (unlikely(fsindex | next->fsindex | prev->fs)) {
- loadsegment(fs, next->fsindex);
- /* check if the user used a selector != 0
- * if yes clear 64bit base, since overloaded base
- * is always mapped to the Null selector
- */
- if (fsindex)
- prev->fs = 0;
- }
- /* when next process has a 64bit base use it */
- if (next->fs) {
- HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
- }
- prev->fsindex = fsindex;
- }
- {
- unsigned gsindex;
- asm volatile("movl %%gs,%0" : "=g" (gsindex));
- if (unlikely(gsindex | next->gsindex | prev->gs)) {
- load_gs_index(next->gsindex);
- if (gsindex)
- prev->gs = 0;
- }
- if (next->gs)
- HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
- prev->gsindex = gsindex;
- }
+ if (unlikely(next->fsindex))
+ loadsegment(fs, next->fsindex);
+
+ if (next->fs)
+ HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs);
+
+ if (unlikely(next->gsindex))
+ load_gs_index(next->gsindex);
+
+ if (next->gs)
+ HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs);
/*
* Switch the PDA context.
@@ -660,7 +635,6 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
if (doit) {
load_gs_index(0);
ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
- printk("do_arch_prctl: SET_SET: addr = %lx\n", addr);
}
}
put_cpu();
@@ -700,7 +674,6 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
base = read_32bit_tls(task, FS_TLS);
else if (doit) {
rdmsrl(MSR_FS_BASE, base);
- printk("do_arch_prctl: GET_FS: addr = %lx\n", addr);
} else
base = task->thread.fs;
ret = put_user(base, (unsigned long __user *)addr);
@@ -712,7 +685,6 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
base = read_32bit_tls(task, GS_TLS);
else if (doit) {
rdmsrl(MSR_KERNEL_GS_BASE, base);
- printk("do_arch_prctl: GET_GS: addr = %lx\n", addr);
} else
base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr);
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
index dc194ff88e..592bef5b7d 100644
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
@@ -16,18 +16,48 @@
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
+#if 0 /* XEN: no lazy tlb */
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
+#endif
}
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+
+#define prepare_arch_switch(rq,next) __prepare_arch_switch()
+#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
+#define task_running(rq, p) ((rq)->curr == (p))
+
+static inline void __prepare_arch_switch(void)
{
+ /*
+ * Save away %es, %ds, %fs and %gs. Must happen before reload
+ * of cr3/ldt (i.e., not in __switch_to).
+ */
+ __asm__ __volatile__ (
+ "movl %%es,%0 ; movl %%ds,%1 ; movl %%fs,%2 ; movl %%gs,%3"
+ : "=m" (current->thread.es),
+ "=m" (current->thread.ds),
+ "=m" (current->thread.fsindex),
+ "=m" (current->thread.gsindex) );
+
+ if (current->thread.ds)
+ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
+
+ if (current->thread.es)
+ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
+
+ if (current->thread.fsindex) {
+ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
+ current->thread.fs = 0;
+ }
+
+ if (current->thread.gsindex) {
+ load_gs_index(0);
+ current->thread.gs = 0;
+ }
}
-#endif
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)