diff options
author | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2006-09-19 10:50:10 +0100 |
---|---|---|
committer | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2006-09-19 10:50:10 +0100 |
commit | 6f562e72cdc4b7e1519e23be75f812aebbf41db3 (patch) | |
tree | 9ff052247cd4bb8a08d6ad953ec1242a053b8c17 /xen/arch/x86/flushtlb.c | |
parent | 21979707527234bf3249c3999c2499c66aab180b (diff) | |
download | xen-6f562e72cdc4b7e1519e23be75f812aebbf41db3.tar.gz xen-6f562e72cdc4b7e1519e23be75f812aebbf41db3.tar.bz2 xen-6f562e72cdc4b7e1519e23be75f812aebbf41db3.zip |
[XEN][X86_64] USe GLOBAL bit to build user mappings.
Avoids need to flush user mappings when switching between
user and kernel contexts.
Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/flushtlb.c')
-rw-r--r-- | xen/arch/x86/flushtlb.c | 101 |
1 files changed, 70 insertions, 31 deletions
diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c index e3415dd3dd..fc3f1f3a4a 100644 --- a/xen/arch/x86/flushtlb.c +++ b/xen/arch/x86/flushtlb.c @@ -4,13 +4,14 @@ * TLB flushes are timestamped using a global virtual 'clock' which ticks * on any TLB flush on any processor. * - * Copyright (c) 2003-2004, K A Fraser + * Copyright (c) 2003-2006, K A Fraser */ #include <xen/config.h> #include <xen/sched.h> #include <xen/softirq.h> #include <asm/flushtlb.h> +#include <asm/page.h> /* Debug builds: Wrap frequently to stress-test the wrap logic. */ #ifdef NDEBUG @@ -22,21 +23,17 @@ u32 tlbflush_clock = 1U; DEFINE_PER_CPU(u32, tlbflush_time); -void write_cr3(unsigned long cr3) +/* + * pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value. + * + * This must happen *before* we flush the TLB. If we do it after, we race other + * CPUs invalidating PTEs. For example, a page invalidated after the flush + * might get the old timestamp, but this CPU can speculatively fetch the + * mapping into its TLB after the flush but before inc'ing the clock. + */ +static u32 pre_flush(void) { u32 t, t1, t2; - unsigned long flags; - - /* This non-reentrant function is sometimes called in interrupt context. */ - local_irq_save(flags); - - /* - * STEP 1. Increment the virtual clock *before* flushing the TLB. - * If we do it after, we race other CPUs invalidating PTEs. - * (e.g., a page invalidated after the flush might get the old - * timestamp, but this CPU can speculatively fetch the mapping - * into its TLB after the flush but before inc'ing the clock). - */ t = tlbflush_clock; do { @@ -52,26 +49,68 @@ void write_cr3(unsigned long cr3) if ( unlikely(t2 == 0) ) raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ); - /* - * STEP 2. Update %CR3, thereby flushing the TLB. - */ - skip_clocktick: + return t2; +} + +/* + * post_flush(): Update this CPU's timestamp with specified clock value. + * + * Note that this happens *after* flushing the TLB, as otherwise we can race a + * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU + * stamp and so does not force a synchronous TLB flush, but the flush in this + * function hasn't yet occurred and so the TLB might be stale). The ordering + * would only actually matter if this function were interruptible, and + * something that abuses the stale mapping could exist in an interrupt + * handler. In fact neither of these is the case, so really we are being ultra + * paranoid. + */ +static void post_flush(u32 t) +{ + this_cpu(tlbflush_time) = t; +} + +void write_cr3(unsigned long cr3) +{ + unsigned long flags; + u32 t; + + /* This non-reentrant function is sometimes called in interrupt context. */ + local_irq_save(flags); + + t = pre_flush(); + +#ifdef USER_MAPPINGS_ARE_GLOBAL + __pge_off(); + __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); + __pge_on(); +#else __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" ); +#endif + + post_flush(t); + + local_irq_restore(flags); +} + +void local_flush_tlb(void) +{ + unsigned long flags; + u32 t; + + /* This non-reentrant function is sometimes called in interrupt context. */ + local_irq_save(flags); + + t = pre_flush(); + +#ifdef USER_MAPPINGS_ARE_GLOBAL + __pge_off(); + __pge_on(); +#else + __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" ); +#endif - /* - * STEP 3. Update this CPU's timestamp. Note that this happens *after* - * flushing the TLB, as otherwise we can race a NEED_FLUSH() test - * on another CPU. (e.g., other CPU sees the updated CPU stamp and - * so does not force a synchronous TLB flush, but the flush in this - * function hasn't yet occurred and so the TLB might be stale). - * The ordering would only actually matter if this function were - * interruptible, and something that abuses the stale mapping could - * exist in an interrupt handler. In fact neither of these is the - * case, so really we are being ultra paranoid. - */ - - this_cpu(tlbflush_time) = t2; + post_flush(t); local_irq_restore(flags); } |