aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>2005-06-17 15:26:47 +0000
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>2005-06-17 15:26:47 +0000
commitc05a8d0f5539e99d4fd37e3f6080dbf3aab1c1e9 (patch)
treec77fc21cb38990a9b2b180f17a381637a3109cf9
parentf20097a1a336b78fb999abafcd8466215e3a86f5 (diff)
downloadxen-c05a8d0f5539e99d4fd37e3f6080dbf3aab1c1e9.tar.gz
xen-c05a8d0f5539e99d4fd37e3f6080dbf3aab1c1e9.tar.bz2
xen-c05a8d0f5539e99d4fd37e3f6080dbf3aab1c1e9.zip
bitkeeper revision 1.1713.2.2 (42b2ebb7w1AbEWudFq4LvJe0d7ByxQ)
two new files for VTI patch Signed-off-by Anthony Xu <Anthony.xu@intel.com> Signed-off-by Eddie Dong <Eddie.dong@intel.com> Signed-off-by Kevin Tian <Kevin.tian@intel.com>
-rw-r--r--.rootkeys2
-rw-r--r--xen/arch/ia64/mm.c141
-rw-r--r--xen/arch/ia64/vmx_hypercall.c186
3 files changed, 329 insertions, 0 deletions
diff --git a/.rootkeys b/.rootkeys
index 510c4c6ca6..731c728444 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -1109,6 +1109,7 @@
421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ia64/irq.c
4272a8e4lavI6DrTvqaIhXeR5RuKBw xen/arch/ia64/ivt.S
421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
+42b2eaeez20voHWlBDjrqORiNg6uhg xen/arch/ia64/mm.c
421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
428b9f38Gp0KcPokG9Nq5v1rGk2FkA xen/arch/ia64/mmio.c
425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h
@@ -1207,6 +1208,7 @@
428b9f38PglyXM-mJJfo19ycuQrEhw xen/arch/ia64/vlsapic.c
428b9f38EmpBsMHL3WbOZoieteBGdQ xen/arch/ia64/vmmu.c
428b9f38hU-X5aX0MIY3EU0Yw4PjcA xen/arch/ia64/vmx_entry.S
+42b2eaf3YR7Sfx76IvKeqfHJiU6qXw xen/arch/ia64/vmx_hypercall.c
428b9f38S76bWI96g7uPLmE-uAcmdg xen/arch/ia64/vmx_init.c
428b9f385AMSyCRYBsckQClQY4ZgHA xen/arch/ia64/vmx_interrupt.c
428b9f380IOjPmj0N6eelH-WJjl1xg xen/arch/ia64/vmx_ivt.S
diff --git a/xen/arch/ia64/mm.c b/xen/arch/ia64/mm.c
new file mode 100644
index 0000000000..755596f89e
--- /dev/null
+++ b/xen/arch/ia64/mm.c
@@ -0,0 +1,141 @@
+/******************************************************************************
+ * arch/ia64/mm.c
+ *
+ * Copyright (c) 2002-2005 K A Fraser
+ * Copyright (c) 2004 Christian Limpach
+ * Copyright (c) 2005, Intel Corporation.
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * A description of the x86 page table API:
+ *
+ * Domains trap to do_mmu_update with a list of update requests.
+ * This is a list of (ptr, val) pairs, where the requested operation
+ * is *ptr = val.
+ *
+ * Reference counting of pages:
+ * ----------------------------
+ * Each page has two refcounts: tot_count and type_count.
+ *
+ * TOT_COUNT is the obvious reference count. It counts all uses of a
+ * physical page frame by a domain, including uses as a page directory,
+ * a page table, or simple mappings via a PTE. This count prevents a
+ * domain from releasing a frame back to the free pool when it still holds
+ * a reference to it.
+ *
+ * TYPE_COUNT is more subtle. A frame can be put to one of three
+ * mutually-exclusive uses: it might be used as a page directory, or a
+ * page table, or it may be mapped writable by the domain [of course, a
+ * frame may not be used in any of these three ways!].
+ * So, type_count is a count of the number of times a frame is being
+ * referred to in its current incarnation. Therefore, a page can only
+ * change its type when its type count is zero.
+ *
+ * Pinning the page type:
+ * ----------------------
+ * The type of a page can be pinned/unpinned with the commands
+ * MMUEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
+ * pinning is not reference counted, so it can't be nested).
+ * This is useful to prevent a page's type count falling to zero, at which
+ * point safety checks would need to be carried out next time the count
+ * is increased again.
+ *
+ * A further note on writable page mappings:
+ * -----------------------------------------
+ * For simplicity, the count of writable mappings for a page may not
+ * correspond to reality. The 'writable count' is incremented for every
+ * PTE which maps the page with the _PAGE_RW flag set. However, for
+ * write access to be possible the page directory entry must also have
+ * its _PAGE_RW bit set. We do not check this as it complicates the
+ * reference counting considerably [consider the case of multiple
+ * directory entries referencing a single page table, some with the RW
+ * bit set, others not -- it starts getting a bit messy].
+ * In normal use, this simplification shouldn't be a problem.
+ * However, the logic can be added if required.
+ *
+ * One more note on read-only page mappings:
+ * -----------------------------------------
+ * We want domains to be able to map pages for read-only access. The
+ * main reason is that page tables and directories should be readable
+ * by a domain, but it would not be safe for them to be writable.
+ * However, domains have free access to rings 1 & 2 of the Intel
+ * privilege model. In terms of page protection, these are considered
+ * to be part of 'supervisor mode'. The WP bit in CR0 controls whether
+ * read-only restrictions are respected in supervisor mode -- if the
+ * bit is clear then any mapped page is writable.
+ *
+ * We get round this by always setting the WP bit and disallowing
+ * updates to it. This is very unlikely to cause a problem for guest
+ * OS's, which will generally use the WP bit to simplify copy-on-write
+ * implementation (in that case, OS wants a fault when it writes to
+ * an application-supplied buffer).
+ */
+
+#include <xen/config.h>
+#include <public/xen.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <xen/errno.h>
+#include <asm/vmx_vcpu.h>
+#include <asm/vmmu.h>
+#include <asm/regionreg.h>
+
+/*
+ uregs->ptr is virtual address
+ uregs->val is pte value
+ */
+#ifdef CONFIG_VTI
+int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom)
+{
+ int i,cmd;
+ u64 mfn, gpfn;
+ VCPU *vcpu;
+ mmu_update_t req;
+ ia64_rr rr;
+ thash_cb_t *hcb;
+ thash_data_t entry={0};
+ vcpu = current;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ for ( i = 0; i < count; i++ )
+ {
+ copy_from_user(&req, ureqs, sizeof(req));
+ cmd = req.ptr&3;
+ req.ptr &= ~3;
+ if(cmd ==MMU_NORMAL_PT_UPDATE){
+ entry.page_flags = req.val;
+ entry.locked = 1;
+ entry.tc = 1;
+ entry.cl = DSIDE_TLB;
+ rr = vmx_vcpu_rr(vcpu, req.ptr);
+ entry.ps = rr.ps;
+ entry.rid = rr.rid;
+ vtlb_insert(hcb, &entry, req.ptr);
+ }else if(cmd == MMU_MACHPHYS_UPDATE){
+ mfn = req.ptr >>PAGE_SHIFT;
+ gpfn = req.val;
+ set_machinetophys(mfn,gpfn);
+ }else{
+ printf("Unkown command of mmu_update:ptr: %lx,val: %lx \n",req.ptr,req.val);
+ while(1);
+ }
+ ureqs ++;
+ }
+ return 0;
+}
+#endif
diff --git a/xen/arch/ia64/vmx_hypercall.c b/xen/arch/ia64/vmx_hypercall.c
new file mode 100644
index 0000000000..5e0d8917eb
--- /dev/null
+++ b/xen/arch/ia64/vmx_hypercall.c
@@ -0,0 +1,186 @@
+/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/*
+ * vmx_hyparcall.c: handling hypercall from domain
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <asm/vmx_vcpu.h>
+#include <public/xen.h>
+#include <public/event_channel.h>
+#include <asm/vmmu.h>
+#include <asm/tlb.h>
+#include <asm/regionreg.h>
+#include <asm/page.h>
+#include <xen/mm.h>
+
+
+void hyper_not_support(void)
+{
+ VCPU *vcpu=current;
+ vmx_vcpu_set_gr(vcpu, 8, -1, 0);
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_mmu_update(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,r33,r34,r35,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ vmx_vcpu_get_gr(vcpu,17,&r33);
+ vmx_vcpu_get_gr(vcpu,18,&r34);
+ vmx_vcpu_get_gr(vcpu,19,&r35);
+ ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_dom_mem_op(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,r33,r34,r35,r36;
+ u64 ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ vmx_vcpu_get_gr(vcpu,17,&r33);
+ vmx_vcpu_get_gr(vcpu,18,&r34);
+ vmx_vcpu_get_gr(vcpu,19,&r35);
+ vmx_vcpu_get_gr(vcpu,20,&r36);
+ ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
+ printf("do_dom_mem return value: %lx\n", ret);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+
+void hyper_sched_op(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ ret=do_sched_op(r32);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_dom0_op(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ ret=do_dom0_op((dom0_op_t *)r32);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_event_channel_op(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ ret=do_event_channel_op((evtchn_op_t *)r32);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+void hyper_xen_version(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ ret=do_xen_version((int )r32);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
+{
+ int i;
+ ia64_rr rr;
+ thash_cb_t *hcb;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ rr = vmx_vcpu_rr(vcpu, va);
+ return thash_lock_tc(hcb, va ,1U<<rr.ps, rr.rid, DSIDE_TLB, lock);
+}
+
+/*
+ * Lock guest page in vTLB, so that it's not relinquished by recycle
+ * session when HV is servicing that hypercall.
+ */
+void hyper_lock_page(void)
+{
+//TODO:
+ VCPU *vcpu=current;
+ u64 va,lock, ret;
+ vmx_vcpu_get_gr(vcpu,16,&va);
+ vmx_vcpu_get_gr(vcpu,17,&lock);
+ ret=do_lock_page(vcpu, va, lock);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+static int do_set_shared_page(VCPU *vcpu, u64 gpa)
+{
+ u64 shared_info, o_info;
+ if(vcpu->domain!=dom0)
+ return -EPERM;
+ shared_info = __gpa_to_mpa(vcpu->domain, gpa);
+ o_info = (u64)vcpu->domain->shared_info;
+ vcpu->domain->shared_info= (shared_info_t *)__va(shared_info);
+
+ /* Copy existing shared info into new page */
+ if (!o_info) {
+ memcpy((void*)vcpu->domain->shared_info, (void*)o_info, PAGE_SIZE);
+ /* If original page belongs to xen heap, then relinguish back
+ * to xen heap. Or else, leave to domain itself to decide.
+ */
+ if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
+ free_xenheap_page(o_info);
+ }
+ return 0;
+}
+
+void hyper_set_shared_page(void)
+{
+ VCPU *vcpu=current;
+ u64 gpa,ret;
+ vmx_vcpu_get_gr(vcpu,16,&gpa);
+
+ ret=do_set_shared_page(vcpu, gpa);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+
+ vmx_vcpu_increment_iip(vcpu);
+}
+
+/*
+void hyper_grant_table_op(void)
+{
+ VCPU *vcpu=current;
+ u64 r32,r33,r34,ret;
+ vmx_vcpu_get_gr(vcpu,16,&r32);
+ vmx_vcpu_get_gr(vcpu,17,&r33);
+ vmx_vcpu_get_gr(vcpu,18,&r34);
+
+ ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
+ vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+}
+*/