aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/flushtlb.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-10-16 17:31:37 +0100
committerKeir Fraser <keir@xensource.com>2007-10-16 17:31:37 +0100
commit19435c10abf79a343c267baaa4e16d349ee93333 (patch)
tree8665bac83a3081160d57f376f6a064b15bf4b23e /xen/arch/x86/flushtlb.c
parent43e55cbba80d154ac6786416e19e69912501b5e4 (diff)
downloadxen-19435c10abf79a343c267baaa4e16d349ee93333.tar.gz
xen-19435c10abf79a343c267baaa4e16d349ee93333.tar.bz2
xen-19435c10abf79a343c267baaa4e16d349ee93333.zip
x86: consolidate/enhance TLB flushing interface
Folding into a single local handler and a single SMP multiplexor as well as adding capability to also flush caches through the same interfaces (a subsequent patch will make use of this). Once at changing cpuinfo_x86, this patch also removes several unused fields apparently inherited from Linux. Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/flushtlb.c')
-rw-r--r--xen/arch/x86/flushtlb.c79
1 files changed, 61 insertions, 18 deletions
diff --git a/xen/arch/x86/flushtlb.c b/xen/arch/x86/flushtlb.c
index f7c06f30ef..69aa7fa21d 100644
--- a/xen/arch/x86/flushtlb.c
+++ b/xen/arch/x86/flushtlb.c
@@ -84,10 +84,10 @@ void write_cr3(unsigned long cr3)
#ifdef USER_MAPPINGS_ARE_GLOBAL
__pge_off();
- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
+ asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
__pge_on();
#else
- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
+ asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
#endif
post_flush(t);
@@ -95,26 +95,69 @@ void write_cr3(unsigned long cr3)
local_irq_restore(flags);
}
-void local_flush_tlb(void)
+void flush_area_local(const void *va, unsigned int flags)
{
- unsigned long flags;
- u32 t;
+ const struct cpuinfo_x86 *c = &current_cpu_data;
+ unsigned int level = flags & FLUSH_LEVEL_MASK;
+ unsigned long irqfl;
- /* This non-reentrant function is sometimes called in interrupt context. */
- local_irq_save(flags);
+ ASSERT(level < CONFIG_PAGING_LEVELS);
- t = pre_flush();
-
- hvm_flush_guest_tlbs();
-
-#ifdef USER_MAPPINGS_ARE_GLOBAL
- __pge_off();
- __pge_on();
-#else
- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (read_cr3()) : "memory" );
+ /* This non-reentrant function is sometimes called in interrupt context. */
+ local_irq_save(irqfl);
+
+ if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
+ {
+ if ( (level != 0) && test_bit(level, &c->invlpg_works_ok) )
+ {
+ asm volatile ( "invlpg %0"
+ : : "m" (*(const char *)(va)) : "memory" );
+ }
+ else
+ {
+ u32 t = pre_flush();
+
+ hvm_flush_guest_tlbs();
+
+#ifndef USER_MAPPINGS_ARE_GLOBAL
+ if ( !(flags & FLUSH_TLB_GLOBAL) ||
+ !(mmu_cr4_features & X86_CR4_PGE) )
+ {
+ asm volatile ( "mov %0, %%cr3"
+ : : "r" (read_cr3()) : "memory" );
+ }
+ else
#endif
+ {
+ __pge_off();
+ barrier();
+ __pge_on();
+ }
+
+ post_flush(t);
+ }
+ }
- post_flush(t);
+ if ( flags & FLUSH_CACHE )
+ {
+ unsigned long i, sz;
+
+ sz = level ? (1UL << ((level - 1) * PAGETABLE_ORDER)) : ULONG_MAX;
+
+ if ( c->x86_clflush_size && c->x86_cache_size &&
+ (sz < (c->x86_cache_size >> (PAGE_SHIFT - 10))) )
+ {
+ sz <<= PAGE_SHIFT;
+ va = (const void *)((unsigned long)va & ~(sz - 1));
+ for ( i = 0; i < sz; i += c->x86_clflush_size )
+ asm volatile ( "clflush %0"
+ : : "m" (((const char *)va)[i]) );
+ }
+ else
+ {
+ wbinvd();
+ }
+ }
- local_irq_restore(flags);
+ local_irq_restore(irqfl);
}