aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64/mm.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-07-15 14:17:56 +0200
committerJan Beulich <jbeulich@suse.com>2013-07-15 14:17:56 +0200
commit8bfaa2c23fb96d8d6bc5c0383373f9be918b82ec (patch)
tree12b3972374a80addd69f0d786f74233383016cdb /xen/arch/x86/x86_64/mm.c
parent395f777ae0eed67b03596fe38d6d90f307ddd036 (diff)
downloadxen-8bfaa2c23fb96d8d6bc5c0383373f9be918b82ec.tar.gz
xen-8bfaa2c23fb96d8d6bc5c0383373f9be918b82ec.tar.bz2
xen-8bfaa2c23fb96d8d6bc5c0383373f9be918b82ec.zip
x86: add locking to map_pages_to_xen()
While boot time calls don't need this, run time uses of the function which may result in L2 page tables getting populated need to be serialized to avoid two CPUs populating the same L2 (or L3) entry, overwriting each other's results. This is expected to fix what would seem to be a regression from commit b0581b92 ("x86: make map_domain_page_global() a simple wrapper around vmap()"), albeit that change only made more readily visible the already existing issue. This patch intentionally does not - add locking to the page table de-allocation logic in destroy_xen_mappings() (the only user having potential races here, msix_put_fixmap(), gets converted to use __set_fixmap() instead) - avoid races between super page splitting and reconstruction in map_pages_to_xen() (no such uses exist; races between multiple splitting attempts or between multiple reconstruction attempts are being taken care of) If we wanted to take care of these, we'd need to alter the behavior of virt_to_xen_l?e() - they would need to return with the lock held then. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/x86_64/mm.c')
-rw-r--r--xen/arch/x86/x86_64/mm.c62
1 files changed, 0 insertions, 62 deletions
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 96a033bf88..26cb998b0c 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -65,68 +65,6 @@ int __mfn_valid(unsigned long mfn)
pdx_group_valid));
}
-l3_pgentry_t *virt_to_xen_l3e(unsigned long v)
-{
- l4_pgentry_t *pl4e;
-
- pl4e = &idle_pg_table[l4_table_offset(v)];
- if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
- {
- l3_pgentry_t *pl3e = alloc_xen_pagetable();
-
- if ( !pl3e )
- return NULL;
- clear_page(pl3e);
- l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
- }
-
- return l4e_to_l3e(*pl4e) + l3_table_offset(v);
-}
-
-l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
-{
- l3_pgentry_t *pl3e;
-
- pl3e = virt_to_xen_l3e(v);
- if ( !pl3e )
- return NULL;
-
- if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
- {
- l2_pgentry_t *pl2e = alloc_xen_pagetable();
-
- if ( !pl2e )
- return NULL;
- clear_page(pl2e);
- l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
- }
-
- BUG_ON(l3e_get_flags(*pl3e) & _PAGE_PSE);
- return l3e_to_l2e(*pl3e) + l2_table_offset(v);
-}
-
-l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
-{
- l2_pgentry_t *pl2e;
-
- pl2e = virt_to_xen_l2e(v);
- if ( !pl2e )
- return NULL;
-
- if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
- {
- l1_pgentry_t *pl1e = alloc_xen_pagetable();
-
- if ( !pl1e )
- return NULL;
- clear_page(pl1e);
- l2e_write(pl2e, l2e_from_paddr(__pa(pl1e), __PAGE_HYPERVISOR));
- }
-
- BUG_ON(l2e_get_flags(*pl2e) & _PAGE_PSE);
- return l2e_to_l1e(*pl2e) + l1_table_offset(v);
-}
-
void *do_page_walk(struct vcpu *v, unsigned long addr)
{
unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);