aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-19 10:11:54 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-19 10:11:54 +0000
commit692779e12869a2da82059623c8e6885efdae1ff5 (patch)
tree8b5da37825d199e2f40809a48491cd0b43d931a2
parentfaaa844f73ecc1acad0858ed71ae31cc7d9ed063 (diff)
downloadxen-692779e12869a2da82059623c8e6885efdae1ff5.tar.gz
xen-692779e12869a2da82059623c8e6885efdae1ff5.tar.bz2
xen-692779e12869a2da82059623c8e6885efdae1ff5.zip
Shadow: tidy the virtual-TLB translation cache.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
-rw-r--r--xen/arch/x86/mm/shadow/multi.c19
-rw-r--r--xen/arch/x86/mm/shadow/private.h30
-rw-r--r--xen/arch/x86/mm/shadow/types.h38
3 files changed, 27 insertions, 60 deletions
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 37cea817c3..fe74ec566f 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2829,6 +2829,12 @@ static int sh_page_fault(struct vcpu *v,
goto not_a_shadow_fault;
}
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+ /* Remember this successful VA->GFN translation for later. */
+ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn),
+ regs->error_code | PFEC_page_present);
+#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
+
/* Make sure there is enough free shadow memory to build a chain of
* shadow tables. (We never allocate a top-level shadow on this path,
* only a 32b l1, pae l1, or 64b l3+2+1. Note that while
@@ -3113,10 +3119,10 @@ sh_gva_to_gfn(struct vcpu *v, unsigned long va, uint32_t *pfec)
gfn_t gfn;
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
- struct shadow_vtlb t = {0};
/* Check the vTLB cache first */
- if ( vtlb_lookup(v, va, pfec[0], &t) )
- return t.frame_number;
+ unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
+ if ( VALID_GFN(vtlb_gfn) )
+ return vtlb_gfn;
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
if ( guest_walk_tables(v, va, &gw, pfec[0], 0) != 0 )
@@ -3128,11 +3134,8 @@ sh_gva_to_gfn(struct vcpu *v, unsigned long va, uint32_t *pfec)
gfn = guest_walk_to_gfn(&gw);
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
- t.page_number = va >> PAGE_SHIFT;
- t.frame_number = gfn_x(gfn);
- t.flags = accumulate_guest_flags(v, &gw);
- t.pfec = pfec[0];
- vtlb_insert(v, t);
+ /* Remember this successful VA->GFN translation for later. */
+ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), pfec[0]);
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
return gfn_x(gfn);
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 15315ef757..541177d2de 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -688,8 +688,7 @@ void shadow_continue_emulation(
*
* We keep a cache of virtual-to-physical translations that we have seen
* since the last TLB flush. This is safe to use for frame translations,
- * but callers that use the rights need to re-check the actual guest tables
- * before triggering a fault.
+ * but callers need to re-check the actual guest tables if the lookup fails.
*
* Lookups and updates are protected by a per-vTLB (and hence per-vcpu)
* lock. This lock is held *only* while reading or writing the table,
@@ -702,8 +701,9 @@ void shadow_continue_emulation(
struct shadow_vtlb {
unsigned long page_number; /* Guest virtual address >> PAGE_SHIFT */
unsigned long frame_number; /* Guest physical address >> PAGE_SHIFT */
- uint32_t pfec; /* Pagefault code for the lookup that filled this entry */
- uint32_t flags; /* Accumulated guest pte flags, or 0 for an empty slot. */
+ uint32_t pfec; /* PF error code of the lookup that filled this
+ * entry. A pfec of zero means the slot is empty
+ * (since that would require us to re-try anyway) */
};
/* Call whenever the guest flushes hit actual TLB */
@@ -720,32 +720,34 @@ static inline int vtlb_hash(unsigned long page_number)
}
/* Put a translation into the vTLB, potentially clobbering an old one */
-static inline void vtlb_insert(struct vcpu *v, struct shadow_vtlb entry)
+static inline void vtlb_insert(struct vcpu *v, unsigned long page,
+ unsigned long frame, uint32_t pfec)
{
+ struct shadow_vtlb entry =
+ { .page_number = page, .frame_number = frame, .pfec = pfec };
spin_lock(&v->arch.paging.vtlb_lock);
- v->arch.paging.vtlb[vtlb_hash(entry.page_number)] = entry;
+ v->arch.paging.vtlb[vtlb_hash(page)] = entry;
spin_unlock(&v->arch.paging.vtlb_lock);
}
-/* Look a translation up in the vTLB. Returns 0 if not found. */
-static inline int vtlb_lookup(struct vcpu *v, unsigned long va, uint32_t pfec,
- struct shadow_vtlb *result)
+/* Look a translation up in the vTLB. Returns INVALID_GFN if not found. */
+static inline unsigned long vtlb_lookup(struct vcpu *v,
+ unsigned long va, uint32_t pfec)
{
unsigned long page_number = va >> PAGE_SHIFT;
- int rv = 0;
+ unsigned long frame_number = INVALID_GFN;
int i = vtlb_hash(page_number);
spin_lock(&v->arch.paging.vtlb_lock);
- if ( v->arch.paging.vtlb[i].flags != 0
+ if ( v->arch.paging.vtlb[i].pfec != 0
&& v->arch.paging.vtlb[i].page_number == page_number
/* Any successful walk that had at least these pfec bits is OK */
&& (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
{
- rv = 1;
- result[0] = v->arch.paging.vtlb[i];
+ frame_number = v->arch.paging.vtlb[i].frame_number;
}
spin_unlock(&v->arch.paging.vtlb_lock);
- return rv;
+ return frame_number;
}
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index 2ca3611edb..8949c3d73e 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -527,44 +527,6 @@ struct shadow_walk_t
#endif
#endif /* GUEST_PAGING_LEVELS >= 3 */
-static inline u32
-accumulate_guest_flags(struct vcpu *v, walk_t *gw)
-{
- u32 accumulated_flags;
-
- if ( unlikely(!(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT)) )
- return 0;
-
- // We accumulate the permission flags with bitwise ANDing.
- // This works for the PRESENT bit, RW bit, and USER bit.
- // For the NX bit, however, the polarity is wrong, so we accumulate the
- // inverse of the NX bit.
- //
- accumulated_flags = guest_l1e_get_flags(gw->l1e) ^ _PAGE_NX_BIT;
- accumulated_flags &= guest_l2e_get_flags(gw->l2e) ^ _PAGE_NX_BIT;
-
- // Note that PAE guests do not have USER or RW or NX bits in their L3s.
- //
-#if GUEST_PAGING_LEVELS == 3
- accumulated_flags &=
- ~_PAGE_PRESENT | (guest_l3e_get_flags(gw->l3e) & _PAGE_PRESENT);
-#elif GUEST_PAGING_LEVELS >= 4
- accumulated_flags &= guest_l3e_get_flags(gw->l3e) ^ _PAGE_NX_BIT;
- accumulated_flags &= guest_l4e_get_flags(gw->l4e) ^ _PAGE_NX_BIT;
-#endif
-
- // Revert the NX bit back to its original polarity
- accumulated_flags ^= _PAGE_NX_BIT;
-
- // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
- // entries (since even the guest kernel runs in ring 3).
- //
- if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
- accumulated_flags |= _PAGE_USER;
-
- return accumulated_flags;
-}
-
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
/******************************************************************************