aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:06:20 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:06:20 +0100
commit4b28bf6ae90bd83fd1113d8bdc53c3266ffeb328 (patch)
tree43489d3149200ddb5e03ad01309023727326ff30 /xen/arch/x86/mm.c
parenta8d2b06db7826063df9d04be9d6f928bf2189bd0 (diff)
downloadxen-4b28bf6ae90bd83fd1113d8bdc53c3266ffeb328.tar.gz
xen-4b28bf6ae90bd83fd1113d8bdc53c3266ffeb328.tar.bz2
xen-4b28bf6ae90bd83fd1113d8bdc53c3266ffeb328.zip
x86: re-introduce map_domain_page() et al
This is being done mostly in the form previously used on x86-32, utilizing the second L3 page table slot within the per-domain mapping area for those mappings. It remains to be determined whether that concept is really suitable, or whether instead re-implementing at least the non-global variant from scratch would be better. Also add the helpers {clear,copy}_domain_page() as well as initial uses of them. One question is whether, to exercise the non-trivial code paths, we shouldn't make the trivial shortcuts conditional upon NDEBUG being defined. See the debugging patch at the end of the series. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/mm.c')
-rw-r--r--xen/arch/x86/mm.c16
1 files changed, 2 insertions, 14 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e043ba10b3..9627b076c6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2661,9 +2661,6 @@ static inline int vcpumask_to_pcpumask(
}
}
-#define fixmap_domain_page(mfn) mfn_to_virt(mfn)
-#define fixunmap_domain_page(ptr) ((void)(ptr))
-
long do_mmuext_op(
XEN_GUEST_HANDLE_PARAM(mmuext_op_t) uops,
unsigned int count,
@@ -2983,7 +2980,6 @@ long do_mmuext_op(
case MMUEXT_CLEAR_PAGE: {
struct page_info *page;
- unsigned char *ptr;
page = get_page_from_gfn(d, op.arg1.mfn, NULL, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
@@ -2998,9 +2994,7 @@ long do_mmuext_op(
/* A page is dirtied when it's being cleared. */
paging_mark_dirty(d, page_to_mfn(page));
- ptr = fixmap_domain_page(page_to_mfn(page));
- clear_page(ptr);
- fixunmap_domain_page(ptr);
+ clear_domain_page(page_to_mfn(page));
put_page_and_type(page);
break;
@@ -3008,8 +3002,6 @@ long do_mmuext_op(
case MMUEXT_COPY_PAGE:
{
- const unsigned char *src;
- unsigned char *dst;
struct page_info *src_page, *dst_page;
src_page = get_page_from_gfn(d, op.arg2.src_mfn, NULL, P2M_ALLOC);
@@ -3034,11 +3026,7 @@ long do_mmuext_op(
/* A page is dirtied when it's being copied to. */
paging_mark_dirty(d, page_to_mfn(dst_page));
- src = __map_domain_page(src_page);
- dst = fixmap_domain_page(page_to_mfn(dst_page));
- copy_page(dst, src);
- fixunmap_domain_page(dst);
- unmap_domain_page(src);
+ copy_domain_page(page_to_mfn(dst_page), page_to_mfn(src_page));
put_page_and_type(dst_page);
put_page(src_page);