diff options
author | Jan Beulich <jbeulich@suse.com> | 2013-02-28 11:09:39 +0100 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2013-02-28 11:09:39 +0100 |
commit | 8db1e759556d3a3832f92e91d6c848c5ce2d3fa1 (patch) | |
tree | 4dfe7fd0f989b23a377df70973f569491f618551 /xen/arch/x86/mm.c | |
parent | 703ac3abcfc5f649c038070867ee12c67f730548 (diff) | |
download | xen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.tar.gz xen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.tar.bz2 xen-8db1e759556d3a3832f92e91d6c848c5ce2d3fa1.zip |
x86: rework hypercall argument translation area setup
... using the new per-domain mapping management functions, adding
destroy_perdomain_mapping() to the previously introduced pair.
Rather than using an order-1 Xen heap allocation, use (currently 2)
individual domain heap pages to populate space in the per-domain
mapping area.
Also fix a benign off-by-one mistake in is_compat_arg_xlat_range().
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/mm.c')
-rw-r--r-- | xen/arch/x86/mm.c | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 335a85ced8..7d5c25b841 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5657,6 +5657,59 @@ int create_perdomain_mapping(struct domain *d, unsigned long va, return rc; } +void destroy_perdomain_mapping(struct domain *d, unsigned long va, + unsigned int nr) +{ + const l3_pgentry_t *l3tab, *pl3e; + + ASSERT(va >= PERDOMAIN_VIRT_START && + va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS)); + ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1))); + + if ( !d->arch.perdomain_l3_pg ) + return; + + l3tab = __map_domain_page(d->arch.perdomain_l3_pg); + pl3e = l3tab + l3_table_offset(va); + + if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT ) + { + const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e)); + const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va); + unsigned int i = l1_table_offset(va); + + while ( nr ) + { + if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT ) + { + l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e)); + + for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i ) + { + if ( (l1e_get_flags(l1tab[i]) & + (_PAGE_PRESENT | _PAGE_AVAIL0)) == + (_PAGE_PRESENT | _PAGE_AVAIL0) ) + free_domheap_page(l1e_get_page(l1tab[i])); + l1tab[i] = l1e_empty(); + } + + unmap_domain_page(l1tab); + } + else if ( nr + i < L1_PAGETABLE_ENTRIES ) + break; + else + nr -= L1_PAGETABLE_ENTRIES - i; + + ++pl2e; + i = 0; + } + + unmap_domain_page(l2tab); + } + + unmap_domain_page(l3tab); +} + void free_perdomain_mappings(struct domain *d) { l3_pgentry_t *l3tab = __map_domain_page(d->arch.perdomain_l3_pg); |