diff options
author | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-06-23 09:19:35 +0000 |
---|---|---|
committer | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-06-23 09:19:35 +0000 |
commit | 07b33c0a712d674f3de1a92028da0ba85ecd7867 (patch) | |
tree | a9fc28c07e832ee67b1b81f9117de3abd6144d1e | |
parent | 61724bdcde2b4c894b7650799dca5ac93a1bd6ba (diff) | |
download | xen-07b33c0a712d674f3de1a92028da0ba85ecd7867.tar.gz xen-07b33c0a712d674f3de1a92028da0ba85ecd7867.tar.bz2 xen-07b33c0a712d674f3de1a92028da0ba85ecd7867.zip |
bitkeeper revision 1.1731 (42ba7ea7qvujqMf6s6vK-pJzJhkEBA)
The attached patch replaces the three hypercalls with a signle
multi-call in switch_mm, as x86 xenlinux does.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
-rw-r--r-- | linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h | 32 |
1 files changed, 26 insertions, 6 deletions
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h index 4e487a06d6..dc194ff88e 100644 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h @@ -33,20 +33,40 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); + struct mmuext_op _op[3], *op = _op; + if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); -#ifdef CONFIG_SMP +#if 0 /* XEN: no lazy tlb */ write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif set_bit(cpu, &next->cpu_vm_mask); - load_cr3(next->pgd); - xen_new_user_pt(__pa(__user_pgd(next->pgd))); - if (unlikely(next->context.ldt != prev->context.ldt)) - load_LDT_nolock(&next->context, cpu); + + /* load_cr3(next->pgd) */ + per_cpu(cur_pgd, smp_processor_id()) = next->pgd; + op->cmd = MMUEXT_NEW_BASEPTR; + op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); + op++; + + /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */ + op->cmd = MMUEXT_NEW_USER_BASEPTR; + op->mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT); + op++; + + if (unlikely(next->context.ldt != prev->context.ldt)) { + /* load_LDT_nolock(&next->context, cpu) */ + op->cmd = MMUEXT_SET_LDT; + op->linear_addr = (unsigned long)next->context.ldt; + op->nr_ents = next->context.size; + op++; + } + + BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); } -#ifdef CONFIG_SMP + +#if 0 /* XEN: no lazy tlb */ else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) |