aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Deegan <Tim.Deegan@xensource.com>2006-11-23 17:44:12 +0000
committerTim Deegan <Tim.Deegan@xensource.com>2006-11-23 17:44:12 +0000
commit08f6effd42e99aef744f59b738d3e66628befeb7 (patch)
treed1f3409b338b55ac327195d95bcb3c31fbfc7f35
parent63a54d2cb36f030d34832de059b4b559c673ff3c (diff)
downloadxen-08f6effd42e99aef744f59b738d3e66628befeb7.tar.gz
xen-08f6effd42e99aef744f59b738d3e66628befeb7.tar.bz2
xen-08f6effd42e99aef744f59b738d3e66628befeb7.zip
[XEN] Don't call domain_crash_synchronous() with shadow lock held.
Call domain_crash() and propagate an error to the caller instead. Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
-rw-r--r--xen/arch/x86/mm/shadow/common.c4
-rw-r--r--xen/arch/x86/mm/shadow/multi.c105
-rw-r--r--xen/arch/x86/mm/shadow/private.h36
-rw-r--r--xen/include/asm-x86/shadow.h5
4 files changed, 94 insertions, 56 deletions
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index b6479e8da7..5f7b2bd5c9 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -791,6 +791,7 @@ shadow_alloc_p2m_page(struct domain *d)
struct list_head *entry;
mfn_t mfn;
void *p;
+ int ok;
if ( list_empty(&d->arch.shadow.p2m_freelist) &&
!shadow_alloc_p2m_pages(d) )
@@ -799,7 +800,8 @@ shadow_alloc_p2m_page(struct domain *d)
list_del(entry);
list_add_tail(entry, &d->arch.shadow.p2m_inuse);
mfn = page_to_mfn(list_entry(entry, struct page_info, list));
- sh_get_ref(mfn, 0);
+ ok = sh_get_ref(mfn, 0);
+ ASSERT(ok); /* First sh_get_ref() can't possibly overflow */
p = sh_map_domain_page(mfn);
clear_page(p);
sh_unmap_domain_page(p);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 472550644e..7e3f69c7fb 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -832,12 +832,6 @@ l1e_propagate_from_guest(struct vcpu *v,
/* These functions update shadow entries (and do bookkeeping on the shadow
* tables they are in). It is intended that they are the only
* functions which ever write (non-zero) data onto a shadow page.
- *
- * They return a set of flags:
- * SHADOW_SET_CHANGED -- we actually wrote a new value to the shadow.
- * SHADOW_SET_FLUSH -- the caller must cause a TLB flush.
- * SHADOW_SET_ERROR -- the input is not a valid entry (for example, if
- * shadow_get_page_from_l1e() fails).
*/
static inline void safe_write_entry(void *dst, void *src)
@@ -982,10 +976,12 @@ static int shadow_set_l4e(struct vcpu *v,
| (((unsigned long)sl4e) & ~PAGE_MASK));
if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
- {
/* About to install a new reference */
- sh_get_ref(shadow_l4e_get_mfn(new_sl4e), paddr);
- }
+ if ( !sh_get_ref(shadow_l4e_get_mfn(new_sl4e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
@@ -1022,11 +1018,13 @@ static int shadow_set_l3e(struct vcpu *v,
paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
| (((unsigned long)sl3e) & ~PAGE_MASK));
- if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
- {
+ if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
/* About to install a new reference */
- sh_get_ref(shadow_l3e_get_mfn(new_sl3e), paddr);
- }
+ if ( !sh_get_ref(shadow_l3e_get_mfn(new_sl3e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn);
@@ -1077,10 +1075,12 @@ static int shadow_set_l2e(struct vcpu *v,
| (((unsigned long)sl2e) & ~PAGE_MASK));
if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
- {
/* About to install a new reference */
- sh_get_ref(shadow_l2e_get_mfn(new_sl2e), paddr);
- }
+ if ( !sh_get_ref(shadow_l2e_get_mfn(new_sl2e), paddr) )
+ {
+ domain_crash(v->domain);
+ return SHADOW_SET_ERROR;
+ }
/* Write the new entry */
#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2
@@ -1727,6 +1727,8 @@ static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v,
*sl3mfn, &new_sl4e, ft);
r = shadow_set_l4e(v, sl4e, new_sl4e, sl4mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
}
/* Now follow it down a level. Guaranteed to succeed. */
return sh_linear_l3_table(v) + shadow_l3_linear_offset(gw->va);
@@ -1745,7 +1747,7 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
if ( !valid_mfn(gw->l2mfn) ) return NULL; /* No guest page. */
/* Get the l3e */
sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft);
- ASSERT(sl3e != NULL); /* Since we know guest PT is valid this far */
+ if ( sl3e == NULL ) return NULL;
if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
{
*sl2mfn = shadow_l3e_get_mfn(*sl3e);
@@ -1767,6 +1769,8 @@ static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
*sl2mfn, &new_sl3e, ft);
r = shadow_set_l3e(v, sl3e, new_sl3e, sl3mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
}
/* Now follow it down a level. Guaranteed to succeed. */
return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
@@ -1848,6 +1852,8 @@ static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v,
*sl1mfn, &new_sl2e, ft);
r = shadow_set_l2e(v, sl2e, new_sl2e, sl2mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ if ( r & SHADOW_SET_ERROR )
+ return NULL;
/* This next line is important: in 32-on-PAE and 32-on-64 modes,
* the guest l1 table has an 8k shadow, and we need to return
* the right mfn of the pair. This call will set it for us as a
@@ -2711,10 +2717,17 @@ static int sh_page_fault(struct vcpu *v,
/* Acquire the shadow. This must happen before we figure out the rights
* for the shadow entry, since we might promote a page here. */
- // XXX -- this code will need to change somewhat if/when the shadow code
- // can directly map superpages...
ptr_sl1e = shadow_get_and_create_l1e(v, &gw, &sl1mfn, ft);
- ASSERT(ptr_sl1e);
+ if ( unlikely(ptr_sl1e == NULL) )
+ {
+ /* Couldn't get the sl1e! Since we know the guest entries
+ * are OK, this can only have been caused by a failed
+ * shadow_set_l*e(), which will have crashed the guest.
+ * Get out of the fault handler immediately. */
+ ASSERT(test_bit(_DOMF_dying, &d->domain_flags));
+ shadow_unlock(d);
+ return 0;
+ }
/* Calculate the shadow entry and write it */
l1e_propagate_from_guest(v, (gw.l1e) ? gw.l1e : &gw.eff_l1e, gw.l1mfn,
@@ -3265,10 +3278,9 @@ sh_set_toplevel_shadow(struct vcpu *v,
smfn = get_shadow_status(v, gmfn, root_type);
if ( valid_mfn(smfn) )
{
- /* Pull this root shadow to the front of the list of roots. */
- struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
- list_del(&sp->list);
- list_add(&sp->list, &d->arch.shadow.toplevel_shadows);
+ /* Pull this root shadow out of the list of roots (we will put
+ * it back in at the head). */
+ list_del(&mfn_to_shadow_page(smfn)->list);
}
else
{
@@ -3276,8 +3288,6 @@ sh_set_toplevel_shadow(struct vcpu *v,
shadow_prealloc(d, SHADOW_MAX_ORDER);
/* Shadow the page. */
smfn = sh_make_shadow(v, gmfn, root_type);
- list_add(&mfn_to_shadow_page(smfn)->list,
- &d->arch.shadow.toplevel_shadows);
}
ASSERT(valid_mfn(smfn));
@@ -3287,10 +3297,22 @@ sh_set_toplevel_shadow(struct vcpu *v,
mfn_to_page(gmfn)->shadow_flags &= ~SHF_unhooked_mappings;
#endif
- /* Take a ref to this page: it will be released in sh_detach_old_tables()
- * or in the next call to sh_set_toplevel_shadow(). */
- sh_get_ref(smfn, 0);
- sh_pin(smfn);
+ /* Pin the shadow and put it (back) on the list of top-level shadows */
+ if ( sh_pin(smfn) )
+ list_add(&mfn_to_shadow_page(smfn)->list,
+ &d->arch.shadow.toplevel_shadows);
+ else
+ {
+ SHADOW_ERROR("can't pin %#lx as toplevel shadow\n", mfn_x(smfn));
+ domain_crash(v->domain);
+ }
+
+ /* Take a ref to this page: it will be released in sh_detach_old_tables. */
+ if ( !sh_get_ref(smfn, 0) )
+ {
+ SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
+ domain_crash(v->domain);
+ }
/* Done. Install it */
SHADOW_PRINTK("%u/%u [%u] gmfn %#"SH_PRI_mfn" smfn %#"SH_PRI_mfn"\n",
@@ -3559,7 +3581,7 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn)
#endif
#endif
mfn_t sl1mfn;
-
+ int r;
/* Carefully look in the shadow linear map for the l1e we expect */
#if SHADOW_PAGING_LEVELS >= 4
@@ -3588,7 +3610,8 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn)
/* Found it! Need to remove its write permissions. */
sl1mfn = shadow_l2e_get_mfn(*sl2p);
sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW);
- shadow_set_l1e(v, sl1p, sl1e, sl1mfn);
+ r = shadow_set_l1e(v, sl1p, sl1e, sl1mfn);
+ ASSERT( !(r & SHADOW_SET_ERROR) );
return 1;
}
#endif
@@ -3608,7 +3631,7 @@ int sh_remove_write_access(struct vcpu *v, mfn_t sl1mfn, mfn_t readonly_mfn)
&& (flags & _PAGE_RW)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
{
- shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
+ (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
/* Remember the last shadow that we shot a writeable mapping in */
v->arch.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
@@ -3636,7 +3659,7 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t sl1mfn, mfn_t target_mfn)
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
{
- shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
+ (void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(), sl1mfn);
if ( (mfn_to_page(target_mfn)->count_info & PGC_count_mask) == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
@@ -3654,17 +3677,17 @@ void sh_clear_shadow_entry(struct vcpu *v, void *ep, mfn_t smfn)
switch ( mfn_to_shadow_page(smfn)->type )
{
case SH_type_l1_shadow:
- shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
+ (void) shadow_set_l1e(v, ep, shadow_l1e_empty(), smfn); break;
case SH_type_l2_shadow:
#if GUEST_PAGING_LEVELS == 3
case SH_type_l2h_shadow:
#endif
- shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
+ (void) shadow_set_l2e(v, ep, shadow_l2e_empty(), smfn); break;
#if GUEST_PAGING_LEVELS >= 4
case SH_type_l3_shadow:
- shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
+ (void) shadow_set_l3e(v, ep, shadow_l3e_empty(), smfn); break;
case SH_type_l4_shadow:
- shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
+ (void) shadow_set_l4e(v, ep, shadow_l4e_empty(), smfn); break;
#endif
default: BUG(); /* Called with the wrong kind of shadow. */
}
@@ -3686,7 +3709,7 @@ int sh_remove_l1_shadow(struct vcpu *v, mfn_t sl2mfn, mfn_t sl1mfn)
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
{
- shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
+ (void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
if ( mfn_to_shadow_page(sl1mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
@@ -3709,7 +3732,7 @@ int sh_remove_l2_shadow(struct vcpu *v, mfn_t sl3mfn, mfn_t sl2mfn)
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
{
- shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
+ (void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
if ( mfn_to_shadow_page(sl2mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
@@ -3731,7 +3754,7 @@ int sh_remove_l3_shadow(struct vcpu *v, mfn_t sl4mfn, mfn_t sl3mfn)
if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
{
- shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
+ (void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
if ( mfn_to_shadow_page(sl3mfn)->type == 0 )
/* This breaks us cleanly out of the FOREACH macro */
done = 1;
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index e56dd3517d..11cb65910b 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -260,6 +260,18 @@ void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
/******************************************************************************
+ * Flags used in the return value of the shadow_set_lXe() functions...
+ */
+
+/* We actually wrote something new to the shadow */
+#define SHADOW_SET_CHANGED 0x1
+/* Caller should flush TLBs to clear the old entry */
+#define SHADOW_SET_FLUSH 0x2
+/* Something went wrong: the shadow entry was invalid or refcount failed */
+#define SHADOW_SET_ERROR 0x4
+
+
+/******************************************************************************
* MFN/page-info handling
*/
@@ -350,8 +362,9 @@ void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);
/* Increase the refcount of a shadow page. Arguments are the mfn to refcount,
* and the physical address of the shadow entry that holds the ref (or zero
- * if the ref is held by something else) */
-static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
+ * if the ref is held by something else).
+ * Returns 0 for failure, 1 for success. */
+static inline int sh_get_ref(mfn_t smfn, paddr_t entry_pa)
{
u32 x, nx;
struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
@@ -365,7 +378,7 @@ static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
{
SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",
sp->backpointer, mfn_x(smfn));
- domain_crash_synchronous();
+ return 0;
}
/* Guarded by the shadow lock, so no need for atomic update */
@@ -374,6 +387,8 @@ static inline void sh_get_ref(mfn_t smfn, paddr_t entry_pa)
/* We remember the first shadow entry that points to each shadow. */
if ( entry_pa != 0 && sp->up == 0 )
sp->up = entry_pa;
+
+ return 1;
}
@@ -396,9 +411,9 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
if ( unlikely(x == 0) )
{
- SHADOW_PRINTK("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
- mfn_x(smfn), sp->count, sp->type);
- domain_crash_synchronous();
+ SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
+ mfn_x(smfn), sp->count, sp->type);
+ BUG();
}
/* Guarded by the shadow lock, so no need for atomic update */
@@ -409,8 +424,9 @@ static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
}
-/* Pin a shadow page: take an extra refcount and set the pin bit. */
-static inline void sh_pin(mfn_t smfn)
+/* Pin a shadow page: take an extra refcount and set the pin bit.
+ * Returns 0 for failure, 1 for success. */
+static inline int sh_pin(mfn_t smfn)
{
struct shadow_page_info *sp;
@@ -418,9 +434,11 @@ static inline void sh_pin(mfn_t smfn)
sp = mfn_to_shadow_page(smfn);
if ( !(sp->pinned) )
{
- sh_get_ref(smfn, 0);
+ if ( !sh_get_ref(smfn, 0) )
+ return 0;
sp->pinned = 1;
}
+ return 1;
}
/* Unpin a shadow page: unset the pin bit and release the extra ref. */
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 0a990e0382..f7af14dae1 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -68,11 +68,6 @@
*/
#define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; })
-// flags used in the return value of the shadow_set_lXe() functions...
-#define SHADOW_SET_CHANGED 0x1
-#define SHADOW_SET_FLUSH 0x2
-#define SHADOW_SET_ERROR 0x4
-
// How do we tell that we have a 32-bit PV guest in a 64-bit Xen?
#ifdef __x86_64__
#define pv_32bit_guest(_v) 0 // not yet supported