aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c')
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c129
1 files changed, 124 insertions, 5 deletions
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
index 641bca708d..cf0488175f 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
@@ -44,8 +44,10 @@
#define pte_offset_kernel pte_offset
#define pud_t pgd_t
#define pud_offset(d, va) d
+#elif defined(CONFIG_X86_64)
+#define pmd_val_ma(v) (v).pmd
#else
-#define pmd_val_ma(v) (v).pud.pgd.pgd;
+#define pmd_val_ma(v) (v).pud.pgd.pgd
#endif
#ifndef CONFIG_XEN_SHADOW_MODE
@@ -64,7 +66,25 @@ void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
u.val = pmd_val_ma(val);
BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
}
-#endif
+
+#ifdef CONFIG_X86_64
+void xen_l3_entry_update(pud_t *ptr, pud_t val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val.pud;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
+{
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val.pgd;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+}
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_XEN_SHADOW_MODE */
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
@@ -82,6 +102,14 @@ void xen_pt_switch(unsigned long ptr)
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
+void xen_new_user_pt(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
void xen_tlb_flush(void)
{
struct mmuext_op op;
@@ -141,7 +169,11 @@ void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
void xen_pgd_pin(unsigned long ptr)
{
struct mmuext_op op;
+#ifdef CONFIG_X86_64
+ op.cmd = MMUEXT_PIN_L4_TABLE;
+#else
op.cmd = MMUEXT_PIN_L2_TABLE;
+#endif
op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
@@ -169,7 +201,41 @@ void xen_pte_unpin(unsigned long ptr)
op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
-#endif
+
+#ifdef CONFIG_X86_64
+void xen_pud_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L3_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pud_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pmd_pin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_PIN_L2_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+void xen_pmd_unpin(unsigned long ptr)
+{
+ struct mmuext_op op;
+ op.cmd = MMUEXT_UNPIN_TABLE;
+ op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_XEN_SHADOW_MODE */
void xen_set_ldt(unsigned long ptr, unsigned long len)
{
@@ -180,11 +246,60 @@ void xen_set_ldt(unsigned long ptr, unsigned long len)
BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
}
+void xen_contig_memory(unsigned long vstart, unsigned int order)
+{
+ /*
+ * Ensure multi-page extents are contiguous in machine memory. This code
+ * could be cleaned up some, and the number of hypercalls reduced.
+ */
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long mfn, i, flags;
+
+ scrub_pages(vstart, 1 << order);
+
+ balloon_lock(flags);
+
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ mfn = pte_mfn(*pte);
+ HYPERVISOR_update_va_mapping(
+ vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ INVALID_P2M_ENTRY;
+ BUG_ON(HYPERVISOR_dom_mem_op(
+ MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+ }
+
+ /* 2. Get a new contiguous memory extent. */
+ BUG_ON(HYPERVISOR_dom_mem_op(
+ MEMOP_increase_reservation, &mfn, 1, order) != 1);
+
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ HYPERVISOR_update_va_mapping(
+ vstart + (i*PAGE_SIZE),
+ __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+ xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
+ }
+
+ flush_tlb_all();
+
+ balloon_unlock(flags);
+}
+
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
unsigned long allocate_empty_lowmem_region(unsigned long pages)
{
- pgd_t *pgd;
+ pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -209,8 +324,12 @@ unsigned long allocate_empty_lowmem_region(unsigned long pages)
pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
+ pfn_array[i] = pte_mfn(*pte);
+#ifdef CONFIG_X86_64
+ xen_l1_entry_update(pte, 0);
+#else
HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
+#endif
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
}