aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.rootkeys1
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/uaccess.h27
-rw-r--r--xen/arch/ia64/vmmu.c52
-rw-r--r--xen/include/asm-ia64/vmx_uaccess.h156
4 files changed, 232 insertions, 4 deletions
diff --git a/.rootkeys b/.rootkeys
index a134ac2d99..390cee03b1 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -1388,6 +1388,7 @@
428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
+42b8e0d63B41CDo2Nqmf8Vt0_RercA xen/include/asm-ia64/vmx_uaccess.h
428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
diff --git a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
index def5aaac47..a81d3aec3f 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
@@ -1,6 +1,17 @@
---- ../../linux-2.6.11/include/asm-ia64/uaccess.h 2005-06-06 10:36:23.000000000 -0600
-+++ include/asm-ia64/uaccess.h 2005-06-10 18:08:06.000000000 -0600
-@@ -60,6 +60,11 @@
+--- ../../linux-2.6.11/include/asm-ia64/uaccess.h 2005-03-02 00:37:53.000000000 -0700
++++ include/asm-ia64/uaccess.h 2005-06-21 21:53:20.000000000 -0600
+@@ -32,6 +32,10 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
++#ifdef CONFIG_VTI
++#include <asm/vmx_uaccess.h>
++#else // CONFIG_VTI
++
+ #include <linux/compiler.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+@@ -60,6 +64,11 @@
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
@@ -12,7 +23,7 @@
#define __access_ok(addr, size, segment) \
({ \
__chk_user_ptr(addr); \
-@@ -67,6 +72,7 @@
+@@ -67,6 +76,7 @@
&& ((segment).seg == KERNEL_DS.seg \
|| likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
})
@@ -20,3 +31,11 @@
#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
static inline int
+@@ -343,6 +353,7 @@
+ __su_ret; \
+ })
+
++#endif // CONFIG_VTI
+ /* Generic code can't deal with the location-relative format that we use for compactness. */
+ #define ARCH_HAS_SORT_EXTABLE
+ #define ARCH_HAS_SEARCH_EXTABLE
diff --git a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
index 60126b23b2..078b1663aa 100644
--- a/xen/arch/ia64/vmmu.c
+++ b/xen/arch/ia64/vmmu.c
@@ -792,3 +792,55 @@ IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key)
return IA64_NO_FAULT;
}
+/*
+ * [FIXME] Is there any effective way to move this routine
+ * into vmx_uaccess.h? struct exec_domain is incomplete type
+ * in that way...
+ *
+ * This is the interface to lookup virtual TLB, and then
+ * return corresponding machine address in 2nd parameter.
+ * The 3rd parameter contains how many bytes mapped by
+ * matched vTLB entry, thus to allow caller copy more once.
+ *
+ * If failed to lookup, -EFAULT is returned. Or else reutrn
+ * 0. All upper domain access utilities rely on this routine
+ * to determine the real machine address.
+ *
+ * Yes, put_user and get_user seems to somhow slow upon it.
+ * However it's the necessary steps for any vmx domain virtual
+ * address, since that's difference address space as HV's one.
+ * Later some short-circuit may be created for special case
+ */
+long
+__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len)
+{
+ unsigned long mpfn, gpfn, m, n = *len;
+ thash_cb_t *vtlb;
+ unsigned long end; /* end of the area mapped by current entry */
+ thash_data_t *entry;
+ struct vcpu *v = current;
+ ia64_rr vrr;
+
+ vtlb = vmx_vcpu_get_vtlb(v);
+ vrr = vmx_vcpu_rr(v, va);
+ entry = vtlb_lookup_ex(vtlb, vrr.rid, va, DSIDE_TLB);
+ if (entry == NULL)
+ return -EFAULT;
+
+ gpfn =(entry->ppn>>(PAGE_SHIFT-12));
+ gpfn =PAGEALIGN(gpfn,(entry->ps-PAGE_SHIFT));
+ gpfn = gpfn | POFFSET(va>>PAGE_SHIFT,(entry->ps-PAGE_SHIFT));
+
+ mpfn = __gpfn_to_mfn(v->domain, gpfn);
+ m = (mpfn<<PAGE_SHIFT) | (va & (PAGE_SIZE - 1));
+ /* machine address may be not continuous */
+ end = PAGEALIGN(m, PAGE_SHIFT) + PAGE_SIZE;
+ /*end = PAGEALIGN(m, entry->ps) + PSIZE(entry->ps);*/
+ /* Current entry can't map all requested area */
+ if ((m + n) > end)
+ n = end - m;
+
+ *ma = m;
+ *len = n;
+ return 0;
+}
diff --git a/xen/include/asm-ia64/vmx_uaccess.h b/xen/include/asm-ia64/vmx_uaccess.h
new file mode 100644
index 0000000000..a6e27425f6
--- /dev/null
+++ b/xen/include/asm-ia64/vmx_uaccess.h
@@ -0,0 +1,156 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
+ * across the domain/hypervisor boundary.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Note: For vmx enabled environment, poor man's policy is actually
+ * useless since HV resides in completely different address space as
+ * domain. So the only way to do the access is search vTLB first, and
+ * access identity mapped address if hit.
+ *
+ * Copyright (c) 2004, Intel Corporation.
+ * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
+ */
+
+#ifndef __ASM_IA64_VMX_UACCESS_H__
+#define __ASM_IA64_VMX_UACCESS_H__
+
+#include <xen/compiler.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+
+#include <asm/intrinsics.h>
+#include <asm/vmmu.h>
+
+/* Since HV never accesses domain space directly, most security check can
+ * be dummy now
+ */
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+/* For back compatibility */
+#define __access_ok(addr, size, segment) 1
+#define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user((x), (ptr))
+#define get_user(x, ptr) __get_user((x), (ptr))
+
+#define __put_user(x, ptr) __do_put_user((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __do_get_user((x), (ptr), sizeof(*(ptr)))
+
+/* TODO: add specific unaligned access later. If assuming aligned at
+ * 1,2,4,8 bytes by far, it's impossible for operand spaning two
+ * vTLB entry
+ */
+extern long
+__domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
+
+#define __do_put_user(x, ptr, size) \
+({ \
+ __typeof__ (x) __pu_x = (x); \
+ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__ (size) __pu_size = (size); \
+ unsigned long __pu_ma; \
+ long __pu_err; \
+ \
+ __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr, \
+ &__pu_ma, &__pu_size); \
+ __pu_err ? (__pu_err = -EFAULT) : \
+ (*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x); \
+ __pu_err; \
+})
+
+#define __do_get_user(x, ptr, size) \
+({ \
+ __typeof__ (x) __gu_x = (x); \
+ __typeof__ (*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__ (size) __gu_size = (size); \
+ unsigned long __gu_ma; \
+ long __gu_err; \
+ \
+ __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr, \
+ &__gu_ma, &__gu_size); \
+ __gu_err ? (__gu_err = -EFAULT) : \
+ (x = *((__typeof__ (*(ptr)) *)__va(__gu_ma))); \
+ __gu_err; \
+})
+
+/* More complex copy from domain */
+#define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
+#define copy_to_user(to, from, n) __copy_to_user((to), (from), (n))
+#define clear_user(to, n) __clear_user((t0), (n))
+
+static inline unsigned long
+__copy_from_user(void *to, void *from, unsigned long n)
+{
+ unsigned long ma, i;
+
+ i = n;
+ while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
+ memcpy(to, (void *)__va(ma), i);
+ n -= i;
+ if (!n)
+ break;
+ from += i;
+ to += i;
+ i = n;
+ }
+ return n;
+}
+
+static inline unsigned long
+__copy_to_user(void *to, void *from, unsigned long n)
+{
+ unsigned long ma, i;
+
+ i = n;
+ while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
+ memcpy((void *)__va(ma), from, i);
+ n -= i;
+ if (!n)
+ break;
+ from += i;
+ to += i;
+ i = n;
+ }
+ return n;
+}
+
+static inline unsigned long
+__clear_user(void *to, unsigned long n)
+{
+ unsigned long ma, i;
+
+ i = n;
+ while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
+ memset((void *)__va(ma), 0, i);
+ n -= i;
+ if (!n)
+ break;
+ to += i;
+ i = n;
+ }
+ return n;
+}
+
+#endif // __ASM_IA64_VMX_UACCESS_H__