aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/ia64/vmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'xen/arch/ia64/vmmu.c')
-rw-r--r--xen/arch/ia64/vmmu.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
index 60126b23b2..078b1663aa 100644
--- a/xen/arch/ia64/vmmu.c
+++ b/xen/arch/ia64/vmmu.c
@@ -792,3 +792,55 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
return IA64_NO_FAULT;
}
+/*
+ * [FIXME] Is there any effective way to move this routine
+ * into vmx_uaccess.h? struct exec_domain is incomplete type
+ * in that way...
+ *
+ * This is the interface to lookup virtual TLB, and then
+ * return corresponding machine address in 2nd parameter.
+ * The 3rd parameter contains how many bytes mapped by
+ * matched vTLB entry, thus to allow caller copy more once.
+ *
+ * If failed to lookup, -EFAULT is returned. Or else reutrn
+ * 0. All upper domain access utilities rely on this routine
+ * to determine the real machine address.
+ *
+ * Yes, put_user and get_user seems to somhow slow upon it.
+ * However it's the necessary steps for any vmx domain virtual
+ * address, since that's difference address space as HV's one.
+ * Later some short-circuit may be created for special case
+ */
+long
+__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
+{
+ unsigned long mpfn, gpfn, m, n = *len;
+ thash_cb_t *vtlb;
+ unsigned long end; /* end of the area mapped by current entry */
+ thash_data_t *entry;
+ struct vcpu *v = current;
+ ia64_rr vrr;
+
+ vtlb = vmx_vcpu_get_vtlb(v);
+ vrr = vmx_vcpu_rr(v, va);
+ entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
+ if (entry == NULL)
+ return -EFAULT;
+
+ gpfn =(entry->ppn>>(PAGE_SHIFT-12));
+ gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
+ gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
+
+ mpfn = __gpfn_to_mfn(v->domain, gpfn);
+ m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
+ /* machine address may be not continuous */
+ end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
+ /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
+ /* Current entry can't map all requested area */
+ if ((m + n) > end)
+ n = end - m;
+
+ *ma = m;
+ *len = n;
+ return 0;
+}