aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-02-28 11:09:39 +0100
committerJan Beulich <jbeulich@suse.com>2013-02-28 11:09:39 +0100
commit8db1e759556d3a3832f92e91d6c848c5ce2d3fa1 (patch)
tree4dfe7fd0f989b23a377df70973f569491f618551
parent703ac3abcfc5f649c038070867ee12c67f730548 (diff)
downloadxen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.tar.gz
xen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.tar.bz2
xen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.zip
x86: rework hypercall argument translation area setup
... using the new per-domain mapping management functions, adding destroy_perdomain_mapping() to the previously introduced pair. Rather than using an order-1 Xen heap allocation, use (currently 2) individual domain heap pages to populate space in the per-domain mapping area. Also fix a benign off-by-one mistake in is_compat_arg_xlat_range(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/mm.c53
-rw-r--r--xen/arch/x86/usercopy.c2
-rw-r--r--xen/arch/x86/x86_64/mm.c20
-rw-r--r--xen/include/asm-x86/config.h9
-rw-r--r--xen/include/asm-x86/domain.h3
-rw-r--r--xen/include/asm-x86/mm.h2
-rw-r--r--xen/include/asm-x86/x86_64/uaccess.h5
7 files changed, 71 insertions, 23 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 335a85ced8..7d5c25b841 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5657,6 +5657,59 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
return rc;
}
+void destroy_perdomain_mapping(struct domain *d, unsigned long va,
+ unsigned int nr)
+{
+ const l3_pgentry_t *l3tab, *pl3e;
+
+ ASSERT(va >= PERDOMAIN_VIRT_START &&
+ va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+ ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
+
+ if ( !d->arch.perdomain_l3_pg )
+ return;
+
+ l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+ pl3e = l3tab + l3_table_offset(va);
+
+ if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
+ {
+ const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+ const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+ unsigned int i = l1_table_offset(va);
+
+ while ( nr )
+ {
+ if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
+ {
+ l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+
+ for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
+ {
+ if ( (l1e_get_flags(l1tab[i]) &
+ (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
+ (_PAGE_PRESENT | _PAGE_AVAIL0) )
+ free_domheap_page(l1e_get_page(l1tab[i]));
+ l1tab[i] = l1e_empty();
+ }
+
+ unmap_domain_page(l1tab);
+ }
+ else if ( nr + i < L1_PAGETABLE_ENTRIES )
+ break;
+ else
+ nr -= L1_PAGETABLE_ENTRIES - i;
+
+ ++pl2e;
+ i = 0;
+ }
+
+ unmap_domain_page(l2tab);
+ }
+
+ unmap_domain_page(l3tab);
+}
+
void free_perdomain_mappings(struct domain *d)
{
l3_pgentry_t *l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
index 8bd9469c9e..b79202bc98 100644
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -6,8 +6,8 @@
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
-#include <xen/config.h>
#include <xen/lib.h>
+#include <xen/sched.h>
#include <asm/uaccess.h>
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 8a5e1cc0fa..8cd0d30e84 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -832,27 +832,17 @@ void __init zap_low_mappings(void)
__PAGE_HYPERVISOR);
}
-void *compat_arg_xlat_virt_base(void)
-{
- return current->arch.compat_arg_xlat;
-}
-
int setup_compat_arg_xlat(struct vcpu *v)
{
- unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
-
- v->arch.compat_arg_xlat = alloc_xenheap_pages(order,
- MEMF_node(vcpu_to_node(v)));
-
- return v->arch.compat_arg_xlat ? 0 : -ENOMEM;
+ return create_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+ PFN_UP(COMPAT_ARG_XLAT_SIZE),
+ NULL, NIL(struct page_info *));
}
void free_compat_arg_xlat(struct vcpu *v)
{
- unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
-
- free_xenheap_pages(v->arch.compat_arg_xlat, order);
- v->arch.compat_arg_xlat = NULL;
+ destroy_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+ PFN_UP(COMPAT_ARG_XLAT_SIZE));
}
void cleanup_frame_table(struct mem_hotadd_info *info)
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index dc928edf75..0a5f031303 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -212,7 +212,7 @@ extern unsigned char boot_edid_info[128];
/* Slot 260: per-domain mappings (including map cache). */
#define PERDOMAIN_VIRT_START (PML4_ADDR(260))
#define PERDOMAIN_SLOT_MBYTES (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
-#define PERDOMAIN_SLOTS 2
+#define PERDOMAIN_SLOTS 3
#define PERDOMAIN_VIRT_SLOT(s) (PERDOMAIN_VIRT_START + (s) * \
(PERDOMAIN_SLOT_MBYTES << 20))
/* Slot 261: machine-to-phys conversion table (256GB). */
@@ -311,6 +311,13 @@ extern unsigned long xen_phys_start;
#define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + \
MAPCACHE_ENTRIES * PAGE_SIZE)
+/* Argument translation area. The third per-domain-mapping sub-area. */
+#define ARG_XLAT_VIRT_START PERDOMAIN_VIRT_SLOT(2)
+/* Allow for at least one guard page (COMPAT_ARG_XLAT_SIZE being 2 pages): */
+#define ARG_XLAT_VA_SHIFT (2 + PAGE_SHIFT)
+#define ARG_XLAT_START(v) \
+ (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT))
+
#define ELFSIZE 64
#define ARCH_CRASH_SAVE_VMCOREINFO
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 30efe33220..f91f662161 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -442,9 +442,6 @@ struct arch_vcpu
/* A secondary copy of the vcpu time info. */
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
-
- void *compat_arg_xlat;
-
} __cacheline_aligned;
/* Shorthands to improve code legibility. */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 3c65a7c1aa..fd9d654433 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -579,6 +579,8 @@ int map_ldt_shadow_page(unsigned int);
int create_perdomain_mapping(struct domain *, unsigned long va,
unsigned int nr, l1_pgentry_t **,
struct page_info **);
+void destroy_perdomain_mapping(struct domain *, unsigned long va,
+ unsigned int nr);
void free_perdomain_mappings(struct domain *);
extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
diff --git a/xen/include/asm-x86/x86_64/uaccess.h b/xen/include/asm-x86/x86_64/uaccess.h
index b82ad9da47..953abe7693 100644
--- a/xen/include/asm-x86/x86_64/uaccess.h
+++ b/xen/include/asm-x86/x86_64/uaccess.h
@@ -1,16 +1,15 @@
#ifndef __X86_64_UACCESS_H
#define __X86_64_UACCESS_H
-#define COMPAT_ARG_XLAT_VIRT_BASE compat_arg_xlat_virt_base()
+#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current))
#define COMPAT_ARG_XLAT_SIZE (2*PAGE_SIZE)
struct vcpu;
-void *compat_arg_xlat_virt_base(void);
int setup_compat_arg_xlat(struct vcpu *v);
void free_compat_arg_xlat(struct vcpu *v);
#define is_compat_arg_xlat_range(addr, size) ({ \
unsigned long __off; \
__off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
- (__off <= COMPAT_ARG_XLAT_SIZE) && \
+ (__off < COMPAT_ARG_XLAT_SIZE) && \
((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE); \
})