aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-03-15 13:55:40 +0000
committerJan Beulich <jbeulich@novell.com>2011-03-15 13:55:40 +0000
commitf21647ecc2b3be3df191bb75cea5c107d6498767 (patch)
tree96d57cd21234f660ce5a321af3c92c5d18ad9393
parent2b254451a7cec9074bba9bffee27d24289d2fea2 (diff)
downloadxen-f21647ecc2b3be3df191bb75cea5c107d6498767.tar.gz
xen-f21647ecc2b3be3df191bb75cea5c107d6498767.tar.bz2
xen-f21647ecc2b3be3df191bb75cea5c107d6498767.zip
x86: run-time callers of map_pages_to_xen() must check for errors
Again, (out-of-memory) errors must not cause hypervisor crashes, and hence ought to be propagated. This also adjusts the cache attribute changing loop in get_page_from_l1e() to not go through an unnecessary iteration. While this could be considered mere cleanup, it is actually a requirement for the subsequent now necessary error recovery path. Also make a few functions static, easing the check for potential callers needing adjustment. Signed-off-by: Jan Beulich <jbeulich@novell.com> xen-unstable changeset: 22997:5f28dcea1355 xen-unstable date: Wed Mar 09 16:15:36 2011 +0000 x86: don't BUG() post-boot in alloc_xen_pagetable() Instead, propagate the condition to the caller, all of which also get adjusted to check for that situation. Signed-off-by: Jan Beulich <jbeulich@novell.com> xen-unstable changeset: 22996:1eeccafe9042 xen-unstable date: Wed Mar 09 16:14:59 2011 +0000
-rw-r--r--xen/arch/x86/mm.c93
-rw-r--r--xen/arch/x86/x86_32/mm.c3
-rw-r--r--xen/arch/x86/x86_64/mm.c47
-rw-r--r--xen/include/asm-x86/mm.h2
4 files changed, 95 insertions, 50 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 84044d1931..c995c177ba 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -767,8 +767,9 @@ int is_iomem_page(unsigned long mfn)
return (page_get_owner(page) == dom_io);
}
-static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
+static int update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
{
+ int err = 0;
#ifdef __x86_64__
bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START);
@@ -776,12 +777,14 @@ static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
if ( unlikely(alias) && cacheattr )
- map_pages_to_xen(xen_va, mfn, 1, 0);
- map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+ err = map_pages_to_xen(xen_va, mfn, 1, 0);
+ if ( !err )
+ err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
- if ( unlikely(alias) && !cacheattr )
- map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
+ if ( unlikely(alias) && !cacheattr && !err )
+ err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
#endif
+ return err;
}
int
@@ -793,6 +796,7 @@ get_page_from_l1e(
uint32_t l1f = l1e_get_flags(l1e);
struct vcpu *curr = current;
struct domain *real_pg_owner;
+ bool_t write;
if ( !(l1f & _PAGE_PRESENT) )
return 1;
@@ -849,9 +853,9 @@ get_page_from_l1e(
* contribute to writeable mapping refcounts. (This allows the
* qemu-dm helper process in dom0 to map the domain's memory without
* messing up the count of "real" writable mappings.) */
- if ( (l1f & _PAGE_RW) &&
- ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) &&
- !get_page_type(page, PGT_writable_page) )
+ write = (l1f & _PAGE_RW) &&
+ ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner));
+ if ( write && !get_page_type(page, PGT_writable_page) )
goto could_not_pin;
if ( pte_flags_to_cacheattr(l1f) !=
@@ -862,22 +866,36 @@ get_page_from_l1e(
if ( is_xen_heap_page(page) )
{
- if ( (l1f & _PAGE_RW) &&
- ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
+ if ( write )
put_page_type(page);
put_page(page);
MEM_LOG("Attempt to change cache attributes of Xen heap page");
return 0;
}
- while ( ((y & PGC_cacheattr_mask) >> PGC_cacheattr_base) != cacheattr )
- {
+ do {
x = y;
nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
- y = cmpxchg(&page->count_info, x, nx);
- }
+ } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+ if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+ {
+ cacheattr = y & PGC_cacheattr_mask;
+ do {
+ x = y;
+ nx = (x & ~PGC_cacheattr_mask) | cacheattr;
+ } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+ if ( write )
+ put_page_type(page);
+ put_page(page);
- update_xen_mappings(mfn, cacheattr);
+ MEM_LOG("Error updating mappings for mfn %lx (pfn %lx,"
+ " from L1 entry %" PRIpte ") for %d",
+ mfn, get_gpfn_from_mfn(mfn),
+ l1e_get_intpte(l1e), l1e_owner->domain_id);
+ return 0;
+ }
}
return 1;
@@ -2005,6 +2023,21 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
#endif
+static int cleanup_page_cacheattr(struct page_info *page)
+{
+ uint32_t cacheattr =
+ (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
+
+ if ( likely(cacheattr == 0) )
+ return 0;
+
+ page->count_info &= ~PGC_cacheattr_mask;
+
+ BUG_ON(is_xen_heap_page(page));
+
+ return update_xen_mappings(page_to_mfn(page), 0);
+}
+
void put_page(struct page_info *page)
{
unsigned long nx, x, y = page->count_info;
@@ -2018,8 +2051,10 @@ void put_page(struct page_info *page)
if ( unlikely((nx & PGC_count_mask) == 0) )
{
- cleanup_page_cacheattr(page);
- free_domheap_page(page);
+ if ( cleanup_page_cacheattr(page) == 0 )
+ free_domheap_page(page);
+ else
+ MEM_LOG("Leaking pfn %lx", page_to_mfn(page));
}
}
@@ -2678,21 +2713,6 @@ static void put_superpage(unsigned long mfn)
#endif
-void cleanup_page_cacheattr(struct page_info *page)
-{
- uint32_t cacheattr =
- (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
-
- if ( likely(cacheattr == 0) )
- return;
-
- page->count_info &= ~PGC_cacheattr_mask;
-
- BUG_ON(is_xen_heap_page(page));
-
- update_xen_mappings(page_to_mfn(page), 0);
-}
-
int new_guest_cr3(unsigned long mfn)
{
@@ -5141,8 +5161,11 @@ int map_pages_to_xen(
while ( nr_mfns != 0 )
{
#ifdef __x86_64__
- l3_pgentry_t *pl3e = virt_to_xen_l3e(virt);
- l3_pgentry_t ol3e = *pl3e;
+ l3_pgentry_t ol3e, *pl3e = virt_to_xen_l3e(virt);
+
+ if ( !pl3e )
+ return -ENOMEM;
+ ol3e = *pl3e;
if ( cpu_has_page1gb &&
!(((virt >> PAGE_SHIFT) | mfn) &
@@ -5262,6 +5285,8 @@ int map_pages_to_xen(
#endif
pl2e = virt_to_xen_l2e(virt);
+ if ( !pl2e )
+ return -ENOMEM;
if ( ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
(nr_mfns >= (1<<PAGETABLE_ORDER)) &&
diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
index 0b7f59965f..73a2a9bcbc 100644
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -48,7 +48,8 @@ void *alloc_xen_pagetable(void)
if ( !early_boot )
{
void *v = alloc_xenheap_page();
- BUG_ON(v == NULL);
+
+ BUG_ON(!dom0 && !v);
return v;
}
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index e973c0edcb..46c55080bc 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -84,8 +84,9 @@ void *alloc_xen_pagetable(void)
if ( !early_boot )
{
struct page_info *pg = alloc_domheap_page(NULL, 0);
- BUG_ON(pg == NULL);
- return page_to_virt(pg);
+
+ BUG_ON(!dom0 && !pg);
+ return pg ? page_to_virt(pg) : NULL;
}
mfn = alloc_boot_pages(1, 1);
@@ -100,6 +101,9 @@ l3_pgentry_t *virt_to_xen_l3e(unsigned long v)
if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
{
l3_pgentry_t *pl3e = alloc_xen_pagetable();
+
+ if ( !pl3e )
+ return NULL;
clear_page(pl3e);
l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
}
@@ -112,9 +116,15 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
l3_pgentry_t *pl3e;
pl3e = virt_to_xen_l3e(v);
+ if ( !pl3e )
+ return NULL;
+
if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
{
l2_pgentry_t *pl2e = alloc_xen_pagetable();
+
+ if ( !pl2e )
+ return NULL;
clear_page(pl2e);
l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
}
@@ -429,6 +439,7 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info)
l3_pgentry_t *l3_ro_mpt = NULL;
l2_pgentry_t *l2_ro_mpt = NULL;
struct page_info *l1_pg;
+ int err = 0;
smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1));
@@ -479,24 +490,25 @@ static int setup_compat_m2p_table(struct mem_hotadd_info *info)
memflags = MEMF_node(phys_to_nid(i << PAGE_SHIFT));
l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
- map_pages_to_xen(rwva,
- page_to_mfn(l1_pg),
- 1UL << PAGETABLE_ORDER,
- PAGE_HYPERVISOR);
+ err = map_pages_to_xen(rwva, page_to_mfn(l1_pg),
+ 1UL << PAGETABLE_ORDER,
+ PAGE_HYPERVISOR);
+ if ( err )
+ break;
memset((void *)rwva, 0x55, 1UL << L2_PAGETABLE_SHIFT);
/* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT));
}
#undef CNT
#undef MFN
- return 0;
+ return err;
}
/*
* Allocate and map the machine-to-phys table.
* The L3 for RO/RWRW MPT and the L2 for compatible MPT should be setup already
*/
-int setup_m2p_table(struct mem_hotadd_info *info)
+static int setup_m2p_table(struct mem_hotadd_info *info)
{
unsigned long i, va, smap, emap;
unsigned int n, memflags;
@@ -550,11 +562,13 @@ int setup_m2p_table(struct mem_hotadd_info *info)
else
{
l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
- map_pages_to_xen(
+ ret = map_pages_to_xen(
RDWR_MPT_VIRT_START + i * sizeof(unsigned long),
page_to_mfn(l1_pg),
1UL << PAGETABLE_ORDER,
PAGE_HYPERVISOR);
+ if ( ret )
+ goto error;
memset((void *)(RDWR_MPT_VIRT_START + i * sizeof(unsigned long)),
0x55, 1UL << L2_PAGETABLE_SHIFT);
@@ -898,13 +912,13 @@ void cleanup_frame_table(struct mem_hotadd_info *info)
flush_tlb_all();
}
-/* Should we be paraniod failure in map_pages_to_xen? */
static int setup_frametable_chunk(void *start, void *end,
struct mem_hotadd_info *info)
{
unsigned long s = (unsigned long)start;
unsigned long e = (unsigned long)end;
unsigned long mfn;
+ int err;
ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
@@ -912,14 +926,17 @@ static int setup_frametable_chunk(void *start, void *end,
for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
{
mfn = alloc_hotadd_mfn(info);
- map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
+ err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER,
+ PAGE_HYPERVISOR);
+ if ( err )
+ return err;
}
memset(start, -1, s - (unsigned long)start);
return 0;
}
-int extend_frame_table(struct mem_hotadd_info *info)
+static int extend_frame_table(struct mem_hotadd_info *info)
{
unsigned long cidx, nidx, eidx, spfn, epfn;
@@ -940,12 +957,16 @@ int extend_frame_table(struct mem_hotadd_info *info)
while ( cidx < eidx )
{
+ int err;
+
nidx = find_next_bit(pdx_group_valid, eidx, cidx);
if ( nidx >= eidx )
nidx = eidx;
- setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
+ err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
pdx_to_page(nidx * PDX_GROUP_COUNT),
info);
+ if ( err )
+ return err;
cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
}
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 95d0c3ef0f..c93a022952 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -325,8 +325,6 @@ int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
int _shadow_mode_refcounts(struct domain *d);
-void cleanup_page_cacheattr(struct page_info *page);
-
int is_iomem_page(unsigned long mfn);
void clear_superpage_mark(struct page_info *page);