aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common/tmem_xen.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:14:34 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:14:34 +0100
commit53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea (patch)
treee8eb111683587f5a494d743ee61093272b2b0bdb /xen/common/tmem_xen.c
parentf697f2fc18f11c28cffc61f39025c3bfa92d376d (diff)
downloadxen-53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea.tar.gz
xen-53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea.tar.bz2
xen-53a4e820f2888b1c7fcac6cc65c5ce854a2ff1ea.zip
tmem: partial adjustments for x86 16Tb support
Despite the changes below, tmem still has code assuming to be able to directly access all memory, or mapping arbitrary amounts of not directly accessible memory. I cannot see how to fix this without converting _all_ its domheap allocations to xenheap ones. And even then I wouldn't be certain about there not being other cases where the "all memory is always mapped" assumption would be broken. Therefore, tmem gets disabled by the next patch for the time being if the full 1:1 mapping isn't always visible. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/tmem_xen.c')
-rw-r--r--xen/common/tmem_xen.c26
1 files changed, 8 insertions, 18 deletions
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 25fbd6c39b..3a1f3c912b 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -393,7 +393,8 @@ static void tmh_persistent_pool_page_put(void *page_va)
struct page_info *pi;
ASSERT(IS_PAGE_ALIGNED(page_va));
- pi = virt_to_page(page_va);
+ pi = mfn_to_page(domain_page_map_to_mfn(page_va));
+ unmap_domain_page(page_va);
ASSERT(IS_VALID_PAGE(pi));
_tmh_free_page_thispool(pi);
}
@@ -441,39 +442,28 @@ static int cpu_callback(
{
case CPU_UP_PREPARE: {
if ( per_cpu(dstmem, cpu) == NULL )
- {
- struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
- per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
- }
+ per_cpu(dstmem, cpu) = alloc_xenheap_pages(dstmem_order, 0);
if ( per_cpu(workmem, cpu) == NULL )
- {
- struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
- per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
- }
+ per_cpu(workmem, cpu) = alloc_xenheap_pages(workmem_order, 0);
if ( per_cpu(scratch_page, cpu) == NULL )
- {
- struct page_info *p = alloc_domheap_page(NULL, 0);
- per_cpu(scratch_page, cpu) = p ? page_to_virt(p) : NULL;
- }
+ per_cpu(scratch_page, cpu) = alloc_xenheap_page();
break;
}
case CPU_DEAD:
case CPU_UP_CANCELED: {
if ( per_cpu(dstmem, cpu) != NULL )
{
- struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
- free_domheap_pages(p, dstmem_order);
+ free_xenheap_pages(per_cpu(dstmem, cpu), dstmem_order);
per_cpu(dstmem, cpu) = NULL;
}
if ( per_cpu(workmem, cpu) != NULL )
{
- struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
- free_domheap_pages(p, workmem_order);
+ free_xenheap_pages(per_cpu(workmem, cpu), workmem_order);
per_cpu(workmem, cpu) = NULL;
}
if ( per_cpu(scratch_page, cpu) != NULL )
{
- free_domheap_page(virt_to_page(per_cpu(scratch_page, cpu)));
+ free_xenheap_page(per_cpu(scratch_page, cpu));
per_cpu(scratch_page, cpu) = NULL;
}
break;