aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorxen-ia64.adm@bkbits.net <xen-ia64.adm@bkbits.net>2005-06-16 19:36:55 +0000
committerxen-ia64.adm@bkbits.net <xen-ia64.adm@bkbits.net>2005-06-16 19:36:55 +0000
commitf20097a1a336b78fb999abafcd8466215e3a86f5 (patch)
tree2cb7155ec679042908ffffed7e1a213321407294
parent7752c1a0368f1331775ff7ea5d1d6733da795ff0 (diff)
parentdc1a6a5f30f03613da5608b0895fe6ee3f056358 (diff)
downloadxen-f20097a1a336b78fb999abafcd8466215e3a86f5.tar.gz
xen-f20097a1a336b78fb999abafcd8466215e3a86f5.tar.bz2
xen-f20097a1a336b78fb999abafcd8466215e3a86f5.zip
bitkeeper revision 1.1713.2.1 (42b1d4d7vqzNt8h7EyzTEvrzC7m5RA)
Merge bk://xen.bkbits.net/xeno-unstable.bk into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
-rw-r--r--.rootkeys4
-rw-r--r--xen/arch/ia64/Makefile2
-rw-r--r--xen/arch/ia64/asm-offsets.c3
-rw-r--r--xen/arch/ia64/dom0_ops.c58
-rw-r--r--xen/arch/ia64/domain.c226
-rw-r--r--xen/arch/ia64/hypercall.c27
-rw-r--r--xen/arch/ia64/hyperprivop.S54
-rw-r--r--xen/arch/ia64/ivt.S49
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/io.h2
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/ptrace.h8
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/uaccess.h22
-rw-r--r--xen/arch/ia64/privop.c53
-rw-r--r--xen/arch/ia64/process.c50
-rw-r--r--xen/arch/ia64/regionreg.c10
-rw-r--r--xen/arch/ia64/tools/mkbuildtree2
-rw-r--r--xen/arch/ia64/vcpu.c22
-rw-r--r--xen/arch/ia64/vhpt.c31
-rw-r--r--xen/arch/ia64/vmmu.c76
-rw-r--r--xen/arch/ia64/vmx_ivt.S84
-rw-r--r--xen/arch/ia64/vmx_minstate.h8
-rw-r--r--xen/arch/ia64/vmx_process.c1
-rw-r--r--xen/arch/ia64/vtlb.c96
-rw-r--r--xen/arch/ia64/xenmem.c2
-rw-r--r--xen/arch/ia64/xenmisc.c8
-rw-r--r--xen/common/Makefile4
-rw-r--r--xen/include/asm-ia64/config.h5
-rw-r--r--xen/include/asm-ia64/domain.h15
-rw-r--r--xen/include/asm-ia64/event.h16
-rw-r--r--xen/include/asm-ia64/mm.h140
-rw-r--r--xen/include/asm-ia64/tlb.h10
-rw-r--r--xen/include/asm-ia64/vcpu.h4
-rw-r--r--xen/include/asm-ia64/vhpt.h17
-rw-r--r--xen/include/asm-ia64/vmmu.h31
-rw-r--r--xen/include/asm-ia64/vmx_platform.h2
-rw-r--r--xen/include/asm-ia64/vmx_ptrace.h97
-rw-r--r--xen/include/asm-ia64/vmx_vpd.h1
-rw-r--r--xen/include/asm-x86/event.h16
-rw-r--r--xen/include/public/arch-ia64.h138
-rw-r--r--xen/include/public/arch-x86_32.h3
-rw-r--r--xen/include/public/arch-x86_64.h3
-rw-r--r--xen/include/public/xen.h3
-rw-r--r--xen/include/xen/event.h2
42 files changed, 908 insertions, 497 deletions
diff --git a/.rootkeys b/.rootkeys
index d53afebc33..510c4c6ca6 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -1140,6 +1140,7 @@
425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
425ae5162bIl2Dgd19x-FceB4L9oGw xen/arch/ia64/patch/linux-2.6.11/types.h
+42ae01f01KDfSgVQnscwJ0psRmEaCw xen/arch/ia64/patch/linux-2.6.11/uaccess.h
425ae516cFUNY2jHD46bujcF5NJheA xen/arch/ia64/patch/linux-2.6.11/unaligned.c
421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
@@ -1358,6 +1359,7 @@
421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
+42b1d2d0rkNCmG2nFOnL-OfhJG9mDw xen/include/asm-ia64/event.h
4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
@@ -1382,7 +1384,6 @@
428b9f38is0zTsIm96_BKo4MLw0SzQ xen/include/asm-ia64/vmx_pal_vsa.h
428b9f38iDqbugHUheJrcTCD7zlb4g xen/include/asm-ia64/vmx_phy_mode.h
428b9f38grd_B0AGB1yp0Gi2befHaQ xen/include/asm-ia64/vmx_platform.h
-428b9f38lm0ntDBusHggeQXkx1-1HQ xen/include/asm-ia64/vmx_ptrace.h
428b9f38XgwHchZEpOzRtWfz0agFNQ xen/include/asm-ia64/vmx_vcpu.h
428b9f38tDTTJbkoONcAB9ODP8CiVg xen/include/asm-ia64/vmx_vpd.h
428b9f38_o0U5uJqmxZf_bqi6_PqVw xen/include/asm-ia64/vtm.h
@@ -1406,6 +1407,7 @@
40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
4204e7acwzqgXyTAPKa1nM-L7Ec0Qw xen/include/asm-x86/domain.h
41d3eaaeIBzW621S1oa0c2yk7X43qQ xen/include/asm-x86/e820.h
+42b1d2caFkOByU5n4LuMnT05f3kJFg xen/include/asm-x86/event.h
3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
4294b5eep4lWuDtYUR74gYwt-_FnHA xen/include/asm-x86/genapic.h
diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
index 2e59a7d19d..03f56326fb 100644
--- a/xen/arch/ia64/Makefile
+++ b/xen/arch/ia64/Makefile
@@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
- vtlb.o mmio.o vlsapic.o
+ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
endif
# perfmon.o
# unwind.o needed for kernel unwinding (rare)
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index 41bbbc7d5b..4b019209d5 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -75,6 +75,9 @@ void foo(void)
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
+ DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
+ DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
+ DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
BLANK();
diff --git a/xen/arch/ia64/dom0_ops.c b/xen/arch/ia64/dom0_ops.c
index e0b48080bc..c1b1d5c241 100644
--- a/xen/arch/ia64/dom0_ops.c
+++ b/xen/arch/ia64/dom0_ops.c
@@ -18,14 +18,6 @@
#include <xen/console.h>
#include <public/sched_ctl.h>
-#define TRC_DOM0OP_ENTER_BASE 0x00020000
-#define TRC_DOM0OP_LEAVE_BASE 0x00030000
-
-static int msr_cpu_mask;
-static unsigned long msr_addr;
-static unsigned long msr_lo;
-static unsigned long msr_hi;
-
long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
{
long ret = 0;
@@ -35,6 +27,49 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
switch ( op->cmd )
{
+ /*
+ * NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
+ * it actually allocates and maps pages.
+ */
+ case DOM0_GETMEMLIST:
+ {
+ unsigned long i;
+ struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
+ unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
+ unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
+ unsigned long pfn;
+ unsigned long *buffer = op->u.getmemlist.buffer;
+ struct page *page;
+
+ ret = -EINVAL;
+ if ( d != NULL )
+ {
+ ret = 0;
+
+ for ( i = start_page; i < (start_page + nr_pages); i++ )
+ {
+ page = map_new_domain_page(d, i << PAGE_SHIFT);
+ if ( page == NULL )
+ {
+ ret = -ENOMEM;
+ break;
+ }
+ pfn = page_to_pfn(page);
+ if ( put_user(pfn, buffer) )
+ {
+ ret = -EFAULT;
+ break;
+ }
+ buffer++;
+ }
+
+ op->u.getmemlist.num_pfns = i - start_page;
+ copy_to_user(u_dom0_op, op, sizeof(*op));
+
+ put_domain(d);
+ }
+ }
+ break;
default:
ret = -ENOSYS;
@@ -43,10 +78,3 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
return ret;
}
-
-void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
-{
- int i;
-
- dummy();
-}
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index 869396ed06..40a38b2e07 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -76,7 +76,7 @@ extern unsigned long dom_fw_setup(struct domain *, char *, int);
/* this belongs in include/asm, but there doesn't seem to be a suitable place */
void free_perdomain_pt(struct domain *d)
{
- dummy();
+ printf("free_perdomain_pt: not implemented\n");
//free_page((unsigned long)d->mm.perdomain_pt);
}
@@ -166,27 +166,49 @@ void arch_free_vcpu_struct(struct vcpu *v)
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
+static void init_switch_stack(struct vcpu *v)
+{
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+ extern void ia64_ret_from_clone;
+
+ memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
+ sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
+ sw->b0 = (unsigned long) &ia64_ret_from_clone;
+ sw->ar_fpsr = FPSR_DEFAULT;
+ v->arch._thread.ksp = (unsigned long) sw - 16;
+ // stay on kernel stack because may get interrupts!
+ // ia64_ret_from_clone (which b0 gets in new_thread) switches
+ // to user stack
+ v->arch._thread.on_ustack = 0;
+ memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
+}
+
#ifdef CONFIG_VTI
void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
struct thread_info *ti = alloc_thread_info(v);
- /* If domain is VMX domain, shared info area is created
- * by domain and then domain notifies HV by specific hypercall.
- * If domain is xenolinux, shared info area is created by
- * HV.
- * Since we have no idea about whether domain is VMX now,
- * (dom0 when parse and domN when build), postpone possible
- * allocation.
- */
+ /* Clear thread_info to clear some important fields, like preempt_count */
+ memset(ti, 0, sizeof(struct thread_info));
+ init_switch_stack(v);
+
+ /* Shared info area is required to be allocated at domain
+ * creation, since control panel will write some I/O info
+ * between front end and back end to that area. However for
+ * vmx domain, our design is to let domain itself to allcoate
+ * shared info area, to keep machine page contiguous. So this
+ * page will be released later when domainN issues request
+ * after up.
+ */
+ d->shared_info = (void *)alloc_xenheap_page();
/* FIXME: Because full virtual cpu info is placed in this area,
* it's unlikely to put it into one shareinfo page. Later
* need split vcpu context from vcpu_info and conforms to
* normal xen convention.
*/
- d->shared_info = NULL;
v->vcpu_info = (void *)alloc_xenheap_page();
if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
@@ -194,9 +216,6 @@ void arch_do_createdomain(struct vcpu *v)
}
memset(v->vcpu_info, 0, PAGE_SIZE);
- /* Clear thread_info to clear some important fields, like preempt_count */
- memset(ti, 0, sizeof(struct thread_info));
-
/* Allocate per-domain vTLB and vhpt */
v->arch.vtlb = init_domain_tlb(v);
@@ -211,38 +230,25 @@ void arch_do_createdomain(struct vcpu *v)
d->xen_vastart = 0xf000000000000000;
d->xen_vaend = 0xf300000000000000;
d->arch.breakimm = 0x1000;
-
- // stay on kernel stack because may get interrupts!
- // ia64_ret_from_clone (which b0 gets in new_thread) switches
- // to user stack
- v->arch._thread.on_ustack = 0;
}
#else // CONFIG_VTI
void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
+ struct thread_info *ti = alloc_thread_info(v);
+
+ /* Clear thread_info to clear some important fields, like preempt_count */
+ memset(ti, 0, sizeof(struct thread_info));
+ init_switch_stack(v);
d->shared_info = (void *)alloc_xenheap_page();
- v->vcpu_info = (void *)alloc_xenheap_page();
- if (!v->vcpu_info) {
+ if (!d->shared_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(v->vcpu_info, 0, PAGE_SIZE);
- /* pin mapping */
- // FIXME: Does this belong here? Or do only at domain switch time?
-#if 0
- // this is now done in ia64_new_rr7
- {
- /* WARNING: following must be inlined to avoid nested fault */
- unsigned long psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
- pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
- PAGE_SHIFT);
- ia64_set_psr(psr);
- ia64_srlz_i();
- }
-#endif
+ memset(d->shared_info, 0, PAGE_SIZE);
+ v->vcpu_info = &(d->shared_info->vcpu_data[0]);
+
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
@@ -258,33 +264,63 @@ void arch_do_createdomain(struct vcpu *v)
d->shared_info_va = 0xf100000000000000;
d->arch.breakimm = 0x1000;
v->arch.breakimm = d->arch.breakimm;
- // stay on kernel stack because may get interrupts!
- // ia64_ret_from_clone (which b0 gets in new_thread) switches
- // to user stack
- v->arch._thread.on_ustack = 0;
+
+ d->arch.mm = xmalloc(struct mm_struct);
+ if (unlikely(!d->arch.mm)) {
+ printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
+ memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+ d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+ if (unlikely(!d->arch.mm->pgd)) {
+ printk("Can't allocate pgd for domain %d\n",d->domain_id);
+ return -ENOMEM;
+ }
}
#endif // CONFIG_VTI
-void arch_do_boot_vcpu(struct vcpu *v)
+void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
{
- return;
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+
+ printf("arch_getdomaininfo_ctxt\n");
+ c->regs = *regs;
+ c->vcpu = v->vcpu_info->arch;
+ c->shared = v->domain->shared_info->arch;
}
int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
- dummy();
- return 1;
+ struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+
+ printf("arch_set_info_guest\n");
+ *regs = c->regs;
+ regs->cr_ipsr = IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
+ regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
+ regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+
+ v->vcpu_info->arch = c->vcpu;
+ init_all_rr(v);
+
+ // this should be in userspace
+ regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS console=ttyS0",256L); //FIXME
+ v->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
+
+ v->domain->shared_info->arch = c->shared;
+ return 0;
}
-int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
+void arch_do_boot_vcpu(struct vcpu *v)
{
- dummy();
- return 1;
+ printf("arch_do_boot_vcpu: not implemented\n");
+ return;
}
void domain_relinquish_resources(struct domain *d)
{
- dummy();
+ /* FIXME */
+ printf("domain_relinquish_resources: not implemented\n");
}
#ifdef CONFIG_VTI
@@ -294,10 +330,8 @@ void new_thread(struct vcpu *v,
unsigned long start_info)
{
struct domain *d = v->domain;
- struct switch_stack *sw;
struct xen_regs *regs;
struct ia64_boot_param *bp;
- extern char ia64_ret_from_clone;
extern char saved_command_line[];
//char *dom0_cmdline = "BOOT_IMAGE=scsi0:\EFI\redhat\xenlinux nomca root=/dev/sdb1 ro";
@@ -305,11 +339,8 @@ void new_thread(struct vcpu *v,
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
- regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
- sw = (struct switch_stack *) regs - 1;
- /* Sanity Clear */
- memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
+ regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
@@ -320,33 +351,23 @@ void new_thread(struct vcpu *v,
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
}
regs->cr_iip = start_pc;
- regs->ar_rsc = 0x0;
- regs->cr_ifs = 0x0;
- regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
- printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
- v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
- printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
-
- sw->b0 = (unsigned long) &ia64_ret_from_clone;
- v->arch._thread.ksp = (unsigned long) sw - 16;
- printk("new_thread, about to call init_all_rr\n");
+ regs->cr_ifs = 0; /* why? - matthewc */
+ regs->ar_fpsr = FPSR_DEFAULT;
if (VMX_DOMAIN(v)) {
vmx_init_all_rr(v);
} else
init_all_rr(v);
- // set up boot parameters (and fake firmware)
- printk("new_thread, about to call dom_fw_setup\n");
- VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
- printk("new_thread, done with dom_fw_setup\n");
if (VMX_DOMAIN(v)) {
+ VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
/* Virtual processor context setup */
VMX_VPD(v, vpsr) = IA64_PSR_BN;
VPD_CR(v, dcr) = 0;
} else {
- // don't forget to set this!
+ regs->r28 = dom_fw_setup(d,saved_command_line,256L);
v->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
+ d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
}
}
#else // CONFIG_VTI
@@ -359,54 +380,27 @@ void new_thread(struct vcpu *v,
unsigned long start_info)
{
struct domain *d = v->domain;
- struct switch_stack *sw;
struct pt_regs *regs;
- unsigned long new_rbs;
struct ia64_boot_param *bp;
- extern char ia64_ret_from_clone;
extern char saved_command_line[];
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
+
regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
- sw = (struct switch_stack *) regs - 1;
- memset(sw,0,sizeof(struct switch_stack)+sizeof(struct pt_regs));
- new_rbs = (unsigned long) v + IA64_RBS_OFFSET;
regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT; // domain runs at PL2
regs->cr_iip = start_pc;
- regs->ar_rsc = 0; /* lazy mode */
- regs->ar_rnat = 0;
- regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- regs->loadrs = 0;
- //regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */
- //regs->r8 = 0x01234567890abcdef; // FIXME: temp marker
- //regs->r12 = ((unsigned long) regs - 16); /* 16 byte scratch */
regs->cr_ifs = 1UL << 63;
- regs->pr = 0;
- sw->pr = 0;
- regs->ar_pfs = 0;
- sw->caller_unat = 0;
- sw->ar_pfs = 0;
- sw->ar_bspstore = new_rbs;
- //regs->r13 = (unsigned long) v;
-printf("new_thread: v=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
-v,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
- sw->b0 = (unsigned long) &ia64_ret_from_clone;
- v->arch._thread.ksp = (unsigned long) sw - 16;
- //v->thread_info->flags = 0;
-printk("new_thread, about to call init_all_rr\n");
+ regs->ar_fpsr = FPSR_DEFAULT;
init_all_rr(v);
- // set up boot parameters (and fake firmware)
-printk("new_thread, about to call dom_fw_setup\n");
regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
-printk("new_thread, done with dom_fw_setup\n");
- // don't forget to set this!
v->vcpu_info->arch.banknum = 1;
- memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
+ v->vcpu_info->arch.metaphysical_mode = 1;
+ d->shared_info->arch.flags = (d == dom0) ? (SIF_INITDOMAIN|SIF_PRIVILEGED|SIF_BLK_BE_DOMAIN|SIF_NET_BE_DOMAIN|SIF_USB_BE_DOMAIN) : 0;
}
#endif // CONFIG_VTI
@@ -1037,21 +1031,6 @@ int construct_dom0(struct domain *d,
strcpy(d->name,"Domain0");
#endif
- // prepare domain0 pagetable (maps METAphysical to physical)
- // following is roughly mm_init() in linux/kernel/fork.c
- d->arch.mm = xmalloc(struct mm_struct);
- if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain0\n");
- return -ENOMEM;
- }
- memset(d->arch.mm, 0, sizeof(*d->arch.mm));
- d->arch.mm->pgd = pgd_alloc(d->arch.mm);
- if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain0\n");
- return -ENOMEM;
- }
-
-
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
@@ -1146,19 +1125,6 @@ int construct_domU(struct domain *d,
printk("parsedomainelfimage returns %d\n",rc);
if ( rc != 0 ) return rc;
- d->arch.mm = xmalloc(struct mm_struct);
- if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
- memset(d->arch.mm, 0, sizeof(*d->arch.mm));
- d->arch.mm->pgd = pgd_alloc(d->arch.mm);
- if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain %d\n",d->domain_id);
- return -ENOMEM;
- }
-
-
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
@@ -1231,10 +1197,10 @@ void machine_halt(void)
while(1);
}
-void dummy(void)
+void dummy_called(char *function)
{
if (platform_is_hp_ski()) asm("break 0;;");
- printf("dummy called: spinning....\n");
+ printf("dummy called in %s: spinning....\n", function);
while(1);
}
diff --git a/xen/arch/ia64/hypercall.c b/xen/arch/ia64/hypercall.c
index 0fcc6f7cf8..648bbfbbe8 100644
--- a/xen/arch/ia64/hypercall.c
+++ b/xen/arch/ia64/hypercall.c
@@ -19,8 +19,6 @@ extern unsigned long translate_domain_mpaddr(unsigned long);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
-void fooefi(void) {}
-
int
ia64_hypercall (struct pt_regs *regs)
{
@@ -122,6 +120,31 @@ ia64_hypercall (struct pt_regs *regs)
case 0xfffb: // test dummy hypercall
regs->r8 = domU_staging_read_8(vcpu_get_gr(v,32));
break;
+
+ case __HYPERVISOR_dom0_op:
+ regs->r8 = do_dom0_op(regs->r14);
+ break;
+
+ case __HYPERVISOR_dom_mem_op:
+#ifdef CONFIG_VTI
+ regs->r8 = do_dom_mem_op(regs->r14, regs->r15, regs->r16, regs->r17, regs->r18);
+#else
+ /* we don't handle reservations; just return success */
+ regs->r8 = regs->r16;
+#endif
+ break;
+
+ case __HYPERVISOR_event_channel_op:
+ regs->r8 = do_event_channel_op(regs->r14);
+ break;
+
+ case __HYPERVISOR_console_io:
+ regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
+ break;
+
+ default:
+ printf("unknown hypercall %x\n", regs->r2);
+ regs->r8 = (unsigned long)-1;
}
return 1;
}
diff --git a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
index 6903c66782..235c8322eb 100644
--- a/xen/arch/ia64/hyperprivop.S
+++ b/xen/arch/ia64/hyperprivop.S
@@ -41,40 +41,46 @@
// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
-#if 1
// HYPERPRIVOP_SSM_I?
// assumes domain interrupts pending, so just do it
cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
(p7) br.sptk.many hyper_ssm_i;;
-#endif
-#if 1
- // if domain interrupts pending, give up for now and do it the slow way
+
+ // FIXME. This algorithm gives up (goes to the slow path) if there
+ // are ANY interrupts pending, even if they are currently
+ // undeliverable. This should be improved later...
adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r20=[r20] ;;
- cmp.ne p7,p0=r0,r20
-(p7) br.sptk.many dispatch_break_fault ;;
+ ld4 r20=[r20] ;;
+ cmp.eq p7,p0=r0,r20
+(p7) br.cond.sptk.many 1f
+ mov r20=IA64_KR(CURRENT);;
+ adds r21=IA64_VCPU_IRR0_OFFSET,r20;
+ adds r22=IA64_VCPU_IRR0_OFFSET+8,r20;;
+ ld8 r23=[r21],16; ld8 r24=[r22],16;;
+ ld8 r21=[r21]; ld8 r22=[r22];;
+ or r23=r23,r24; or r21=r21,r22;;
+ or r20=r23,r21;;
+1: // when we get to here r20=~=interrupts pending
// HYPERPRIVOP_RFI?
cmp.eq p7,p6=XEN_HYPER_RFI,r17
(p7) br.sptk.many hyper_rfi;;
+ cmp.ne p7,p0=r20,r0
+(p7) br.spnt.many dispatch_break_fault ;;
+
// hard to test, because only called from rbs_switch
// HYPERPRIVOP_COVER?
cmp.eq p7,p6=XEN_HYPER_COVER,r17
(p7) br.sptk.many hyper_cover;;
-#endif
-#if 1
// HYPERPRIVOP_SSM_DT?
cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
(p7) br.sptk.many hyper_ssm_dt;;
-#endif
-#if 1
// HYPERPRIVOP_RSM_DT?
cmp.eq p7,p6=XEN_HYPER_RSM_DT,r17
(p7) br.sptk.many hyper_rsm_dt;;
-#endif
// if not one of the above, give up for now and do it the slow way
br.sptk.many dispatch_break_fault ;;
@@ -336,12 +342,16 @@ GLOBAL_ENTRY(fast_break_reflect)
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
-#ifdef FAST_HYPERPRIVOP_CNT
- movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
- ld8 r21=[r20];;
- adds r21=1,r21;;
- st8 [r20]=r21;;
-#endif
+ // if no interrupts pending, proceed
+ cmp.eq p7,p0=r20,r0
+(p7) br.sptk.many 1f
+ // interrupts pending, if rfi'ing to interrupts on, go slow way
+ adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r21=[r20];; // r21 = vcr.ipsr
+ extr.u r22=r21,IA64_PSR_I_BIT,1 ;;
+ cmp.ne p7,p0=r22,r0 ;;
+(p7) br.spnt.many dispatch_break_fault ;;
+1:
adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r21=[r20];; // r21 = vcr.ipsr
extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
@@ -375,7 +385,13 @@ ENTRY(hyper_rfi)
(p7) br.sptk.many dispatch_break_fault ;;
// OK now, let's do an rfi.
- // r18=&vpsr.i|vpsr.ic, r21==vpsr, r20==&vcr.iip, r22=vcr.iip
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
+ ld8 r23=[r20];;
+ adds r23=1,r23;;
+ st8 [r20]=r23;;
+#endif
+ // r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
mov cr.iip=r22;;
adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
diff --git a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
index 4d6785c310..b1def7004f 100644
--- a/xen/arch/ia64/ivt.S
+++ b/xen/arch/ia64/ivt.S
@@ -348,12 +348,23 @@ ENTRY(alt_itlb_miss)
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
+ mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_itlb_miss:
+ movl r17=PAGE_KERNEL
+ mov r21=cr.ipsr
+ movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+ ;;
+#else
mov r16=cr.ifa // get address that caused the TLB miss
movl r17=PAGE_KERNEL
mov r21=cr.ipsr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r31=pr
;;
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into r21
;;
@@ -367,9 +378,15 @@ ENTRY(alt_itlb_miss)
#endif
extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+#ifdef XEN
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(59)
+#else
shr.u r18=r16,57 // move address bit 61 to bit 4
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
+#endif
cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
or r19=r17,r19 // insert PTE control bits into r19
;;
@@ -393,13 +410,18 @@ ENTRY(alt_dtlb_miss)
// ;;
//#endif
#endif
+#ifdef XEN
+ mov r31=pr
mov r16=cr.ifa // get address that caused the TLB miss
+ ;;
+late_alt_dtlb_miss:
movl r17=PAGE_KERNEL
mov r20=cr.isr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r21=cr.ipsr
- mov r31=pr
;;
+#else
+#endif
#ifdef CONFIG_DISABLE_VHPT
shr.u r22=r16,61 // get the region number into r21
;;
@@ -414,24 +436,33 @@ ENTRY(alt_dtlb_miss)
extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
+#ifdef XEN
+ shr.u r18=r16,55 // move address bit 59 to bit 4
+ and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
+ tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
+ ;;
+ and r18=0x10,r18 // bit 4=address-bit(59)
+#else
shr.u r18=r16,57 // move address bit 61 to bit 4
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
+#endif
cmp.ne p8,p0=r0,r23
(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
(p8) br.cond.spnt page_fault
#ifdef XEN
;;
- // FIXME: inadequate test, this is where we test for Xen address
- // note that 0xf000 (cached) and 0xd000 (uncached) addresses
- // should be OK. (Though no I/O is done in Xen, EFI needs uncached
- // addresses and some domain EFI calls are passed through)
- tbit.nz p0,p8=r16,60
-(p8) br.cond.spnt page_fault
-//(p8) br.cond.spnt 0
- ;;
+ // Test for Xen address, if not handle via page_fault
+ // note that 0xf000 (cached) and 0xe800 (uncached) addresses
+ // should be OK.
+ extr.u r22=r16,59,5;;
+ cmp.eq p8,p0=0x1e,r22
+(p8) br.cond.spnt 1f;;
+ cmp.ne p8,p0=0x1d,r22
+(p8) br.cond.sptk page_fault ;;
+1:
#endif
dep r21=-1,r21,IA64_PSR_ED_BIT,1
diff --git a/xen/arch/ia64/patch/linux-2.6.11/io.h b/xen/arch/ia64/patch/linux-2.6.11/io.h
index c935f35cf3..b42ae6b549 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/io.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/io.h
@@ -5,7 +5,7 @@
#define SLOW_DOWN_IO do { } while (0)
+#ifdef XEN
-+#define __IA64_UNCACHED_OFFSET 0xd000000000000000UL /* region 6 */
++#define __IA64_UNCACHED_OFFSET 0xe800000000000000UL
+#else
#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
+#endif
diff --git a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
index dd79914f59..f96ceb1ba0 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/ptrace.h
@@ -4,9 +4,9 @@
* (because the memory stack pointer MUST ALWAYS be aligned this way)
*
*/
-+#ifdef CONFIG_VTI
-+#include "vmx_ptrace.h"
-+#else //CONFIG_VTI
++#ifdef XEN
++#include <public/arch-ia64.h>
++#else
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
@@ -14,7 +14,7 @@
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
-+#endif // CONFIG_VTI
++#endif
/*
* This structure contains the addition registers that need to
diff --git a/xen/arch/ia64/patch/linux-2.6.11/uaccess.h b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
new file mode 100644
index 0000000000..def5aaac47
--- /dev/null
+++ b/xen/arch/ia64/patch/linux-2.6.11/uaccess.h
@@ -0,0 +1,22 @@
+--- ../../linux-2.6.11/include/asm-ia64/uaccess.h 2005-06-06 10:36:23.000000000 -0600
++++ include/asm-ia64/uaccess.h 2005-06-10 18:08:06.000000000 -0600
+@@ -60,6 +60,11 @@
+ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
+ * point inside the virtually mapped linear page table.
+ */
++#ifdef XEN
++/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
++#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
++#define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr)))
++#else
+ #define __access_ok(addr, size, segment) \
+ ({ \
+ __chk_user_ptr(addr); \
+@@ -67,6 +72,7 @@
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+ })
++#endif
+ #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+ static inline int
diff --git a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c
index 1f50ea2448..c4fbcca45c 100644
--- a/xen/arch/ia64/privop.c
+++ b/xen/arch/ia64/privop.c
@@ -748,10 +748,22 @@ priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
#define HYPERPRIVOP_ITC_D 0x5
#define HYPERPRIVOP_ITC_I 0x6
#define HYPERPRIVOP_SSM_I 0x7
-#define HYPERPRIVOP_MAX 0x7
+#define HYPERPRIVOP_GET_IVR 0x8
+#define HYPERPRIVOP_GET_TPR 0x9
+#define HYPERPRIVOP_SET_TPR 0xa
+#define HYPERPRIVOP_EOI 0xb
+#define HYPERPRIVOP_SET_ITM 0xc
+#define HYPERPRIVOP_THASH 0xd
+#define HYPERPRIVOP_PTC_GA 0xe
+#define HYPERPRIVOP_ITR_D 0xf
+#define HYPERPRIVOP_GET_RR 0x10
+#define HYPERPRIVOP_SET_RR 0x11
+#define HYPERPRIVOP_MAX 0x11
char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
+ "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
+ "=rr", "rr=",
0
};
@@ -766,6 +778,7 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
struct vcpu *v = (struct domain *) current;
INST64 inst;
UINT64 val;
+ UINT64 itir, ifa;
// FIXME: Handle faults appropriately for these
if (!iim || iim > HYPERPRIVOP_MAX) {
@@ -797,6 +810,44 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
case HYPERPRIVOP_SSM_I:
(void)vcpu_set_psr_i(v);
return 1;
+ case HYPERPRIVOP_GET_IVR:
+ (void)vcpu_get_ivr(v,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_GET_TPR:
+ (void)vcpu_get_tpr(v,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_SET_TPR:
+ (void)vcpu_set_tpr(v,regs->r8);
+ return 1;
+ case HYPERPRIVOP_EOI:
+ (void)vcpu_set_eoi(v,0L);
+ return 1;
+ case HYPERPRIVOP_SET_ITM:
+ (void)vcpu_set_itm(v,regs->r8);
+ return 1;
+ case HYPERPRIVOP_THASH:
+ (void)vcpu_thash(v,regs->r8,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_PTC_GA:
+ // FIXME: this doesn't seem to work yet, turned off
+ //(void)vcpu_ptc_ga(v,regs->r8,regs->r9);
+ //return 1;
+ break;
+ case HYPERPRIVOP_ITR_D:
+ (void)vcpu_get_itir(v,&itir);
+ (void)vcpu_get_ifa(v,&ifa);
+ (void)vcpu_itr_d(v,regs->r8,regs->r9,itir,ifa);
+ return 1;
+ case HYPERPRIVOP_GET_RR:
+ (void)vcpu_get_rr(v,regs->r8,&val);
+ regs->r8 = val;
+ return 1;
+ case HYPERPRIVOP_SET_RR:
+ (void)vcpu_set_rr(v,regs->r8,regs->r9);
+ return 1;
}
return 0;
}
diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c
index f664b74a42..a26194d8f3 100644
--- a/xen/arch/ia64/process.c
+++ b/xen/arch/ia64/process.c
@@ -313,45 +313,31 @@ void xen_handle_domain_access(unsigned long address, unsigned long isr, struct p
}
if (address < 0x4000) printf("WARNING: page_fault @%p, iip=%p\n",address,iip);
+ if (trp = match_tr(current,address)) {
+ // FIXME address had better be pre-validated on insert
+ pteval = translate_domain_pte(trp->page_flags,address,trp->itir);
+ vcpu_itc_no_srlz(current,6,address,pteval,-1UL,(trp->itir>>2)&0x3f);
+ return;
+ }
// if we are fortunate enough to have it in the 1-entry TLB...
if (pteval = match_dtlb(ed,address,&ps,NULL)) {
vcpu_itc_no_srlz(ed,6,address,pteval,-1UL,ps);
return;
}
- // look in the TRs
- fault = vcpu_tpa(ed,address,&mpaddr);
- if (fault != IA64_NO_FAULT) {
- static int uacnt = 0;
- // can't translate it, just fail (poor man's exception)
- // which results in retrying execution
-//printk("*** xen_handle_domain_access: poor man's exception cnt=%i iip=%p, addr=%p...\n",uacnt++,iip,address);
- if (ia64_done_with_exception(regs)) {
+ if (ia64_done_with_exception(regs)) {
//if (!(uacnt++ & 0x3ff)) printk("*** xen_handle_domain_access: successfully handled cnt=%d iip=%p, addr=%p...\n",uacnt,iip,address);
return;
- }
- else {
- // should never happen. If it does, region 0 addr may
- // indicate a bad xen pointer
- printk("*** xen_handle_domain_access: exception table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- panic_domain(regs,"*** xen_handle_domain_access: exception table"
- " lookup failed, iip=%p, addr=%p, spinning...\n",
- iip,address);
- }
}
- if (d == dom0) {
- if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
- printk("xen_handle_domain_access: vcpu_tpa returned out-of-bounds dom0 mpaddr %p! continuing...\n",mpaddr);
- tdpfoo();
- }
+ else {
+ // should never happen. If it does, region 0 addr may
+ // indicate a bad xen pointer
+ printk("*** xen_handle_domain_access: exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
+ panic_domain(regs,"*** xen_handle_domain_access: exception table"
+ " lookup failed, iip=%p, addr=%p, spinning...\n",
+ iip,address);
}
-//printk("*** xen_handle_domain_access: tpa resolved miss @%p...\n",address);
- pteval = lookup_domain_mpa(d,mpaddr);
- // would be nice to have a counter here
- //printf("Handling privop data TLB miss\n");
- // FIXME, must be inlined or potential for nested fault here!
- vcpu_itc_no_srlz(ed,2,address,pteval,-1UL,PAGE_SHIFT);
}
void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
@@ -441,7 +427,7 @@ panic_domain(0,"ia64_do_page_fault: @%p???, iip=%p, b0=%p, itc=%p (spinning...)\
if (pteval & _PAGE_P)
{
pteval = translate_domain_pte(pteval,address,itir);
- vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
+ vcpu_itc_no_srlz(current,is_data?6:1,address,pteval,-1UL,(itir>>2)&0x3f);
return;
}
else vector = is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
@@ -768,7 +754,7 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
vcpu_set_gr(current,8,-1L);
break;
default:
- printf("ia64_handle_break: bad ssc code %lx, iip=%p\n",ssc,regs->cr_iip);
+ printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p\n",ssc,regs->cr_iip,regs->b0);
break;
}
vcpu_increment_iip(current);
diff --git a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
index 6653d4b6a8..a40d0098e3 100644
--- a/xen/arch/ia64/regionreg.c
+++ b/xen/arch/ia64/regionreg.c
@@ -274,6 +274,7 @@ int set_one_rr(unsigned long rr, unsigned long val)
return 0;
}
+#ifdef CONFIG_VTI
memrrv.rrval = rrv.rrval;
if (rreg == 7) {
newrrv.rid = newrid;
@@ -290,6 +291,15 @@ int set_one_rr(unsigned long rr, unsigned long val)
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
set_rr(rr,newrrv.rrval);
}
+#else
+ memrrv.rrval = rrv.rrval;
+ newrrv.rid = newrid;
+ newrrv.ve = 1; // VHPT now enabled for region 7!!
+ newrrv.ps = PAGE_SHIFT;
+ if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
+ if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ else set_rr(rr,newrrv.rrval);
+#endif
return 1;
}
diff --git a/xen/arch/ia64/tools/mkbuildtree b/xen/arch/ia64/tools/mkbuildtree
index 5964c836c8..18d0c72c67 100644
--- a/xen/arch/ia64/tools/mkbuildtree
+++ b/xen/arch/ia64/tools/mkbuildtree
@@ -259,7 +259,7 @@ softlink include/asm-ia64/string.h include/asm-ia64/string.h
softlink include/asm-ia64/thread_info.h include/asm-ia64/thread_info.h
softlink include/asm-ia64/timex.h include/asm-ia64/timex.h
softlink include/asm-ia64/topology.h include/asm-ia64/topology.h
-softlink include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h
+cp_patch include/asm-ia64/uaccess.h include/asm-ia64/uaccess.h uaccess.h
softlink include/asm-ia64/unaligned.h include/asm-ia64/unaligned.h
softlink include/asm-ia64/unistd.h include/asm-ia64/unistd.h
softlink include/asm-ia64/unwind.h include/asm-ia64/unwind.h
diff --git a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c
index b55e5b6bd7..45ae1bc656 100644
--- a/xen/arch/ia64/vcpu.c
+++ b/xen/arch/ia64/vcpu.c
@@ -43,8 +43,9 @@ typedef union {
#ifdef PRIVOP_ADDR_COUNT
struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = {
- { "rsm", { 0 }, { 0 }, 0 },
- { "ssm", { 0 }, { 0 }, 0 }
+ { "=ifa", { 0 }, { 0 }, 0 },
+ { "thash", { 0 }, { 0 }, 0 },
+ 0
};
extern void privop_count_addr(unsigned long addr, int inst);
#define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst)
@@ -135,7 +136,7 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
- PRIVOP_COUNT_ADDR(regs,_RSM);
+ //PRIVOP_COUNT_ADDR(regs,_RSM);
// TODO: All of these bits need to be virtualized
// TODO: Only allowed for current vcpu
__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
@@ -183,7 +184,7 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
REGS *regs = vcpu_regs(vcpu);
UINT64 mask, enabling_interrupts = 0;
- PRIVOP_COUNT_ADDR(regs,_SSM);
+ //PRIVOP_COUNT_ADDR(regs,_SSM);
// TODO: All of these bits need to be virtualized
__asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory");
imm = *(struct ia64_psr *)&imm24;
@@ -369,6 +370,8 @@ IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
{
UINT64 val = PSCB(vcpu,ifa);
+ REGS *regs = vcpu_regs(vcpu);
+ PRIVOP_COUNT_ADDR(regs,_GET_IFA);
*pval = val;
return (IA64_NO_FAULT);
}
@@ -422,6 +425,8 @@ IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
{
//return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval);
UINT64 val = PSCB(vcpu,iha);
+ REGS *regs = vcpu_regs(vcpu);
+ PRIVOP_COUNT_ADDR(regs,_THASH);
*pval = val;
return (IA64_NO_FAULT);
}
@@ -539,7 +544,7 @@ void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
} else
#endif // CONFIG_VTI
{
- if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return;
+ /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
if (test_bit(vector,PSCBX(vcpu,irr))) {
//printf("vcpu_pend_interrupt: overrun\n");
}
@@ -569,10 +574,10 @@ UINT64 vcpu_check_pending_interrupts(VCPU *vcpu)
UINT64 *p, *q, *r, bits, bitnum, mask, i, vector;
p = &PSCBX(vcpu,irr[3]);
- q = &PSCB(vcpu,delivery_mask[3]);
+ /* q = &PSCB(vcpu,delivery_mask[3]); */
r = &PSCBX(vcpu,insvc[3]);
for (i = 3; ; p--, q--, r--, i--) {
- bits = *p & *q;
+ bits = *p /* & *q */;
if (bits) break; // got a potential interrupt
if (*r) {
// nothing in this word which is pending+inservice
@@ -1589,7 +1594,8 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64 IorD, UINT64 vaddr, UINT64 pte, UINT64
// addresses never get flushed. More work needed if this
// ever happens.
//printf("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- vhpt_insert(vaddr,pte,logps<<2);
+ if (logps > PAGE_SHIFT) vhpt_multiple_insert(vaddr,pte,logps);
+ else vhpt_insert(vaddr,pte,logps<<2);
}
// even if domain pagesize is larger than PAGE_SIZE, just put
// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
diff --git a/xen/arch/ia64/vhpt.c b/xen/arch/ia64/vhpt.c
index b535f9fc56..86495a8fe8 100644
--- a/xen/arch/ia64/vhpt.c
+++ b/xen/arch/ia64/vhpt.c
@@ -87,6 +87,37 @@ void vhpt_map(void)
ia64_srlz_i();
}
+void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
+{
+ unsigned long mask = (1L << logps) - 1;
+ int i;
+
+ if (logps-PAGE_SHIFT > 10) {
+ // if this happens, we may want to revisit this algorithm
+ printf("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
+ while(1);
+ }
+ if (logps-PAGE_SHIFT > 2) {
+ // FIXME: Should add counter here to see how often this
+ // happens (e.g. for 16MB pages!) and determine if it
+ // is a performance problem. On a quick look, it takes
+ // about 39000 instrs for a 16MB page and it seems to occur
+ // only a few times/second, so OK for now.
+ // An alternate solution would be to just insert the one
+ // 16KB in the vhpt (but with the full mapping)?
+ //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
+ //"va=%p, pa=%p, pa-masked=%p\n",
+ //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
+ //(pte&_PFN_MASK)&~mask);
+ }
+ vaddr &= ~mask;
+ pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
+ for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
+ vhpt_insert(vaddr,pte,logps<<2);
+ vaddr += PAGE_SIZE;
+ }
+}
+
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
diff --git a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
index c39d6f2851..60126b23b2 100644
--- a/xen/arch/ia64/vmmu.c
+++ b/xen/arch/ia64/vmmu.c
@@ -454,12 +454,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -467,9 +468,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
@@ -488,11 +487,12 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TC;
+ data.tc = 1;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -500,42 +500,27 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
panic("Tlb conflict!!");
return;
}
- sections.v = THASH_SECTION_TC;
- thash_purge_entries(hcb, &data, sections);
- thash_insert(hcb, &data, ifa);
+ thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
}
-IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
+/*
+ * Return TRUE/FALSE for success of lock operation
+ */
+int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
{
- thash_data_t data, *ovl;
thash_cb_t *hcb;
- search_section_t sections;
- rr_t vrr;
+ rr_t vrr;
+ u64 preferred_size;
- hcb = vmx_vcpu_get_vtlb(vcpu);
- data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
- data.itir=0;
- data.ps = ps;
- data.vadr=PAGEALIGN(va,ps);
- data.section=THASH_TLB_FM;
- data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, va, &vrr);
- data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
-
- ovl = thash_find_overlap(hcb, &data, sections);
- if (ovl) {
- // generate MCA.
- panic("Foreignmap Tlb conflict!!");
- return;
- }
- thash_insert(hcb, &data, va);
- return IA64_NO_FAULT;
+ hcb = vmx_vcpu_get_vtlb(vcpu);
+ va = PAGEALIGN(va,vrr.ps);
+ preferred_size = PSIZE(vrr.ps);
+ return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
}
-
IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
{
@@ -548,11 +533,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=ISIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
if (ovl) {
@@ -560,7 +546,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -579,11 +566,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
data.itir=itir;
data.vadr=PAGEALIGN(ifa,data.ps);
- data.section=THASH_TLB_TR;
+ data.tc = 0;
data.cl=DSIDE_TLB;
vmx_vcpu_get_rr(vcpu, ifa, &vrr);
data.rid = vrr.rid;
- sections.v = THASH_SECTION_TR;
+ sections.tr = 1;
+ sections.tc = 0;
ovl = thash_find_overlap(hcb, &data, sections);
while (ovl) {
@@ -591,7 +579,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
panic("Tlb conflict!!");
return;
}
- sections.v=THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
thash_purge_entries(hcb, &data, sections);
thash_tr_insert(hcb, &data, ifa, idx);
return IA64_NO_FAULT;
@@ -607,7 +596,8 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
return IA64_NO_FAULT;
}
@@ -619,7 +609,8 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
search_section_t sections;
hcb = vmx_vcpu_get_vtlb(vcpu);
rr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+ sections.tr = 1;
+ sections.tc = 1;
thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
return IA64_NO_FAULT;
}
@@ -632,7 +623,8 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
thash_data_t data, *ovl;
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
- sections.v = THASH_SECTION_TC;
+ sections.tr = 0;
+ sections.tc = 1;
vadr = PAGEALIGN(vadr, ps);
thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
diff --git a/xen/arch/ia64/vmx_ivt.S b/xen/arch/ia64/vmx_ivt.S
index 9647386a8c..407dc4cd86 100644
--- a/xen/arch/ia64/vmx_ivt.S
+++ b/xen/arch/ia64/vmx_ivt.S
@@ -180,7 +180,7 @@ ENTRY(vmx_dtlb_miss)
mov r29=cr.ipsr;
;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_1
+(p6)br.sptk vmx_fault_2
mov r16 = cr.ifa
;;
thash r17 = r16
@@ -249,9 +249,9 @@ ENTRY(vmx_alt_itlb_miss)
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
;;
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
- shr.u r18=r16,57 // move address bit 61 to bit 4
+ shr.u r18=r16,55 // move address bit 59 to bit 4
;;
- andcm r18=0x10,r18 // bit 4=~address-bit(61)
+ and r18=0x10,r18 // bit 4=address-bit(61)
or r19=r17,r19 // insert PTE control bits into r19
;;
or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
@@ -280,11 +280,11 @@ ENTRY(vmx_alt_dtlb_miss)
;;
and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
- shr.u r18=r16,57 // move address bit 61 to bit 4
+ shr.u r18=r16,55 // move address bit 59 to bit 4
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
;;
- andcm r18=0x10,r18 // bit 4=~address-bit(61)
+ and r18=0x10,r18 // bit 4=address-bit(61)
(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
dep r24=-1,r24,IA64_PSR_ED_BIT,1
or r19=r19,r17 // insert PTE control bits into r19
@@ -346,7 +346,12 @@ END(vmx_daccess_bit)
ENTRY(vmx_break_fault)
mov r31=pr
mov r19=11
- br.sptk.many vmx_dispatch_break_fault
+ mov r30=cr.iim
+ mov r29=0x1100
+ ;;
+ cmp4.eq p6,p7=r29,r30
+ (p6) br.dptk.few vmx_hypercall_dispatch
+ (p7) br.sptk.many vmx_dispatch_break_fault
END(vmx_break_fault)
.org vmx_ia64_ivt+0x3000
@@ -929,10 +934,9 @@ END(vmx_dispatch_tlb_miss)
ENTRY(vmx_dispatch_break_fault)
- cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
- ;;
VMX_SAVE_MIN_WITH_COVER_R19
;;
+ ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
mov out0=cr.ifa
adds out1=16,sp
@@ -951,9 +955,37 @@ ENTRY(vmx_dispatch_break_fault)
;;
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_break
+ ;;
END(vmx_dispatch_break_fault)
+ENTRY(vmx_hypercall_dispatch)
+ VMX_SAVE_MIN_WITH_COVER
+ ssm psr.ic
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+ ssm psr.i // restore psr.i
+ adds r3=16,r2 // set up second base pointer
+ ;;
+ VMX_SAVE_REST
+ ;;
+ movl r14=ia64_leave_hypervisor
+ movl r2=hyper_call_table
+ ;;
+ mov rp=r14
+ shladd r2=r15,3,r2
+ ;;
+ ld8 r2=[r2]
+ ;;
+ mov b6=r2
+ ;;
+ br.call.sptk.many b6=b6
+ ;;
+END(vmx_hypercall_dispatch)
+
+
+
ENTRY(vmx_dispatch_interrupt)
cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */
;;
@@ -976,3 +1008,39 @@ ENTRY(vmx_dispatch_interrupt)
mov rp=r14
br.call.sptk.many b6=vmx_ia64_handle_irq
END(vmx_dispatch_interrupt)
+
+
+
+ .rodata
+ .align 8
+ .globl hyper_call_table
+hyper_call_table:
+ data8 hyper_not_support //hyper_set_trap_table /* 0 */
+ data8 hyper_mmu_update
+ data8 hyper_not_support //hyper_set_gdt
+ data8 hyper_not_support //hyper_stack_switch
+ data8 hyper_not_support //hyper_set_callbacks
+ data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
+ data8 hyper_sched_op
+ data8 hyper_dom0_op
+ data8 hyper_not_support //hyper_set_debugreg
+ data8 hyper_not_support //hyper_get_debugreg
+ data8 hyper_not_support //hyper_update_descriptor /* 10 */
+ data8 hyper_not_support //hyper_set_fast_trap
+ data8 hyper_dom_mem_op
+ data8 hyper_not_support //hyper_multicall
+ data8 hyper_not_support //hyper_update_va_mapping
+ data8 hyper_not_support //hyper_set_timer_op /* 15 */
+ data8 hyper_event_channel_op
+ data8 hyper_xen_version
+ data8 hyper_not_support //hyper_console_io
+ data8 hyper_not_support //hyper_physdev_op
+ data8 hyper_not_support //hyper_grant_table_op /* 20 */
+ data8 hyper_not_support //hyper_vm_assist
+ data8 hyper_not_support //hyper_update_va_mapping_otherdomain
+ data8 hyper_not_support //hyper_switch_vm86
+ data8 hyper_not_support //hyper_boot_vcpu
+ data8 hyper_not_support //hyper_ni_hypercall /* 25 */
+ data8 hyper_not_support //hyper_mmuext_op
+ data8 hyper_lock_page
+ data8 hyper_set_shared_page
diff --git a/xen/arch/ia64/vmx_minstate.h b/xen/arch/ia64/vmx_minstate.h
index afee6516d9..76f8e7f065 100644
--- a/xen/arch/ia64/vmx_minstate.h
+++ b/xen/arch/ia64/vmx_minstate.h
@@ -282,11 +282,9 @@
;; \
.mem.offset 0,0; st8.spill [r4]=r20,16; \
.mem.offset 8,0; st8.spill [r5]=r21,16; \
- mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r22,16; \
.mem.offset 8,0; st8.spill [r5]=r23,16; \
- mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r4]=r24,16; \
.mem.offset 8,0; st8.spill [r5]=r25,16; \
@@ -296,9 +294,11 @@
;; \
.mem.offset 0,0; st8.spill [r4]=r28,16; \
.mem.offset 8,0; st8.spill [r5]=r29,16; \
+ mov r26=b6; \
;; \
.mem.offset 0,0; st8.spill [r4]=r30,16; \
.mem.offset 8,0; st8.spill [r5]=r31,16; \
+ mov r27=b7; \
;; \
mov r30=ar.unat; \
;; \
@@ -317,8 +317,8 @@
adds r2=PT(B6)-PT(F10),r2; \
adds r3=PT(B7)-PT(F11),r3; \
;; \
- st8 [r2]=r18,16; /* b6 */ \
- st8 [r3]=r19,16; /* b7 */ \
+ st8 [r2]=r26,16; /* b6 */ \
+ st8 [r3]=r27,16; /* b7 */ \
;; \
st8 [r2]=r9; /* ar.csd */ \
st8 [r3]=r10; /* ar.ssd */ \
diff --git a/xen/arch/ia64/vmx_process.c b/xen/arch/ia64/vmx_process.c
index 2c541af113..8ab671cdb8 100644
--- a/xen/arch/ia64/vmx_process.c
+++ b/xen/arch/ia64/vmx_process.c
@@ -116,7 +116,6 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
case FW_HYPERCALL_EFI_GET_TIME:
{
unsigned long *tv, *tc;
- fooefi();
vmx_vcpu_get_gr(v, 32, &tv);
vmx_vcpu_get_gr(v, 33, &tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
diff --git a/xen/arch/ia64/vtlb.c b/xen/arch/ia64/vtlb.c
index 6cbb4478b7..86565531bf 100644
--- a/xen/arch/ia64/vtlb.c
+++ b/xen/arch/ia64/vtlb.c
@@ -252,7 +252,7 @@ static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
/* Find overlap TLB entry */
for (cch=priv->cur_cch; cch; cch = cch->next) {
- if ( ((1UL<<cch->section) & priv->s_sect.v) &&
+ if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) &&
__is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
priv->_curva, priv->_eva) ) {
return cch;
@@ -322,7 +322,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
{
- if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) {
+ if ( hcb->ht != THASH_TLB || entry->tc ) {
panic("wrong parameter\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -356,7 +356,7 @@ thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
* 3: The caller need to make sure the new entry will not overlap
* with any existed entry.
*/
-static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t *hash_table, *cch;
rr_t vrr;
@@ -411,7 +411,7 @@ void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
rr_t vrr;
vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
- if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) {
+ if ( entry->ps != vrr.ps && entry->tc ) {
panic("Not support for multiple page size now\n");
}
entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -450,7 +450,7 @@ static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
thash_internal_t *priv = &hcb->priv;
int idx;
- if ( entry->section == THASH_TLB_TR ) {
+ if ( !entry->tc ) {
return rem_tr(hcb, entry->cl, entry->tr_idx);
}
rem_thash(hcb, entry);
@@ -525,19 +525,19 @@ thash_data_t *thash_find_overlap(thash_cb_t *hcb,
thash_data_t *in, search_section_t s_sect)
{
return (hcb->find_overlap)(hcb, in->vadr,
- in->ps, in->rid, in->cl, s_sect);
+ PSIZE(in->ps), in->rid, in->cl, s_sect);
}
static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
@@ -553,15 +553,15 @@ static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
}
static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb,
- u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+ u64 va, u64 size, int rid, char cl, search_section_t s_sect)
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
u64 tag;
rr_t vrr;
- priv->_curva = PAGEALIGN(va,ps);
- priv->_eva = priv->_curva + PSIZE(ps);
+ priv->_curva = va & ~(size-1);
+ priv->_eva = priv->_curva + size;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
@@ -691,13 +691,46 @@ void thash_purge_entries_ex(thash_cb_t *hcb,
{
thash_data_t *ovl;
- ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect);
+ ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
while ( ovl != NULL ) {
(hcb->rem_hash)(hcb, ovl);
ovl = (hcb->next_overlap)(hcb);
};
}
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ * Notes: Only TC entry can purge and insert.
+ */
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+#ifdef XEN_DEBUGGER
+ vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+ if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
+ panic ("Oops, wrong call for purge_and_insert\n");
+ return;
+ }
+#endif
+ in->vadr = PAGEALIGN(in->vadr,in->ps);
+ in->ppn = PAGEALIGN(in->ppn, in->ps-12);
+ sections.tr = 0;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
+ in->rid, in->cl, sections);
+ if(ovl)
+ (hcb->rem_hash)(hcb, ovl);
+#ifdef XEN_DEBUGGER
+ ovl = (hcb->next_overlap)(hcb);
+ if ( ovl ) {
+ panic ("Oops, 2+ overlaps for purge_and_insert\n");
+ return;
+ }
+#endif
+ (hcb->ins_hash)(hcb, in, in->vadr);
+}
/*
* Purge all TCs or VHPT entries including those in Hash table.
@@ -766,6 +799,42 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
return NULL;
}
+/*
+ * Lock/Unlock TC if found.
+ * NOTES: Only the page in prefered size can be handled.
+ * return:
+ * 1: failure
+ * 0: success
+ */
+int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
+{
+ thash_data_t *ovl;
+ search_section_t sections;
+
+ sections.tr = 1;
+ sections.tc = 1;
+ ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
+ if ( ovl ) {
+ if ( !ovl->tc ) {
+// panic("Oops, TR for lock\n");
+ return 0;
+ }
+ else if ( lock ) {
+ if ( ovl->locked ) {
+ DPRINTK("Oops, already locked entry\n");
+ }
+ ovl->locked = 1;
+ }
+ else if ( !lock ) {
+ if ( !ovl->locked ) {
+ DPRINTK("Oops, already unlocked entry\n");
+ }
+ ovl->locked = 0;
+ }
+ return 0;
+ }
+ return 1;
+}
/*
* Notifier when TLB is deleted from hash table and its collision chain.
@@ -824,7 +893,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
}
}
-
#ifdef VTLB_DEBUG
static u64 cch_length_statistics[MAX_CCH_LENGTH+1];
u64 sanity_check=0;
diff --git a/xen/arch/ia64/xenmem.c b/xen/arch/ia64/xenmem.c
index 3a749840a0..088611b22a 100644
--- a/xen/arch/ia64/xenmem.c
+++ b/xen/arch/ia64/xenmem.c
@@ -52,7 +52,7 @@ paging_init (void)
panic("Not enough memory to bootstrap Xen.\n");
printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
- memset(mpt_table, 0x55, mpt_table_size);
+ memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size);
/* Any more setup here? On VMX enabled platform,
* there's no need to keep guest linear pg table,
diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
index bb9f83019a..6703b397ab 100644
--- a/xen/arch/ia64/xenmisc.c
+++ b/xen/arch/ia64/xenmisc.c
@@ -63,13 +63,7 @@ void sync_lazy_execstate_mask(cpumask_t mask) {}
void sync_lazy_execstate_all(void) {}
int grant_table_create(struct domain *d) { return 0; }
-void grant_table_destroy(struct domain *d)
-{
- printf("grant_table_destroy: domain_destruct not tested!!!\n");
- printf("grant_table_destroy: ensure atomic_* calls work in domain_destruct!!\n");
- dummy();
- return;
-}
+void grant_table_destroy(struct domain *d) { return; }
struct pt_regs *guest_cpu_user_regs(void) { return ia64_task_regs(current); }
diff --git a/xen/common/Makefile b/xen/common/Makefile
index ee312fde92..892d407585 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -1,8 +1,8 @@
include $(BASEDIR)/Rules.mk
-ifeq ($(TARGET_ARCH),ia64)
-OBJS := $(subst dom_mem_ops.o,,$(OBJS))
+ifeq ($(TARGET_ARCH),ia64)
+#OBJS := $(subst dom_mem_ops.o,,$(OBJS))
OBJS := $(subst grant_table.o,,$(OBJS))
endif
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index 442d49a382..9df0d907aa 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -177,8 +177,7 @@ void sort_main_extable(void);
// see include/asm-x86/atomic.h (different from standard linux)
#define _atomic_set(v,i) (((v).counter) = (i))
#define _atomic_read(v) ((v).counter)
-// FIXME following needs work
-#define atomic_compareandswap(old, new, v) old
+#define atomic_compareandswap(old, new, v) ((atomic_t){ cmpxchg(v, _atomic_read(old), _atomic_read(new)) })
// see include/asm-ia64/mm.h, handle remaining pfn_info uses until gone
#define pfn_info page
@@ -227,6 +226,8 @@ struct screen_info { };
#define FORCE_CRASH() asm("break 0;;");
+#define dummy() dummy_called(__FUNCTION__)
+
// these declarations got moved at some point, find a better place for them
extern int ht_per_core;
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index 0f0e37895b..27ff16e560 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -2,18 +2,17 @@
#define __ASM_DOMAIN_H__
#include <linux/thread_info.h>
+#include <asm/tlb.h>
#ifdef CONFIG_VTI
#include <asm/vmx_vpd.h>
#include <asm/vmmu.h>
#include <asm/regionreg.h>
+#include <public/arch-ia64.h>
#endif // CONFIG_VTI
#include <xen/list.h>
extern void arch_do_createdomain(struct vcpu *);
-extern int arch_final_setup_guestos(
- struct vcpu *, struct vcpu_guest_context *);
-
extern void domain_relinquish_resources(struct domain *);
#ifdef CONFIG_VTI
@@ -36,7 +35,15 @@ struct arch_domain {
int imp_va_msb;
ia64_rr emul_phy_rr0;
ia64_rr emul_phy_rr4;
- u64 *pmt; /* physical to machine table */
+ unsigned long *pmt; /* physical to machine table */
+ /*
+ * max_pfn is the maximum page frame in guest physical space, including
+ * inter-middle I/O ranges and memory holes. This is different with
+ * max_pages in domain struct, which indicates maximum memory size
+ */
+ unsigned long max_pfn;
+ unsigned int section_nr;
+ mm_section_t *sections; /* Describe memory hole except for Dom0 */
#endif //CONFIG_VTI
u64 xen_vastart;
u64 xen_vaend;
diff --git a/xen/include/asm-ia64/event.h b/xen/include/asm-ia64/event.h
new file mode 100644
index 0000000000..e7b5cda8b1
--- /dev/null
+++ b/xen/include/asm-ia64/event.h
@@ -0,0 +1,16 @@
+/******************************************************************************
+ * event.h
+ *
+ * A nice interface for passing asynchronous events to guest OSes.
+ * (architecture-dependent part)
+ *
+ */
+
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+static inline void evtchn_notify(struct vcpu *v)
+{
+}
+
+#endif
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
index a762ec6318..c84a7c781a 100644
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -27,43 +27,12 @@ typedef unsigned long page_flags_t;
/*
* Per-page-frame information.
+ *
+ * Every architecture must ensure the following:
+ * 1. 'struct pfn_info' contains a 'struct list_head list'.
+ * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
*/
-
-//FIXME: This can go away when common/dom0_ops.c is fully arch-independent
-#if 0
-struct pfn_info
-{
- /* Each frame can be threaded onto a doubly-linked list. */
- struct list_head list;
- /* Context-dependent fields follow... */
- union {
-
- /* Page is in use by a domain. */
- struct {
- /* Owner of this page. */
- struct domain *domain;
- /* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
- /* Type reference count and various PGT_xxx flags and fields. */
- u32 type_info;
- } inuse;
-
- /* Page is on a free list. */
- struct {
- /* Mask of possibly-tainted TLBs. */
- unsigned long cpu_mask;
- /* Must be at same offset as 'u.inuse.count_flags'. */
- u32 __unavailable;
- /* Order-size of the free chunk this page is the head of. */
- u8 order;
- } free;
-
- } u;
-
- /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
- u32 tlbflush_timestamp;
-};
-#endif
+#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
struct page
{
@@ -82,7 +51,7 @@ struct page
/* Page is in use by a domain. */
struct {
/* Owner of this page. */
- u64 _domain;
+ u32 _domain;
/* Type reference count and various PGT_xxx flags and fields. */
u32 type_info;
} inuse;
@@ -104,37 +73,49 @@ struct page
#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
-//FIXME: These can go away when common/dom0_ops.c is fully arch-independent
- /* The following page types are MUTUALLY EXCLUSIVE. */
+/* Still small set of flags defined by far on IA-64 */
+/* The following page types are MUTUALLY EXCLUSIVE. */
#define PGT_none (0<<29) /* no special uses of this page */
#define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
#define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
#define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
#define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
-#define PGT_gdt_page (5<<29) /* using this page in a GDT? */
-#define PGT_ldt_page (6<<29) /* using this page in an LDT? */
-#define PGT_writeable_page (7<<29) /* has writable mappings of this page? */
-#define PGT_type_mask (7<<29) /* Bits 29-31. */
+#define PGT_writeable_page (5<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (5<<29) /* Bits 29-31. */
+
/* Has this page been validated for use as its current type? */
#define _PGT_validated 28
#define PGT_validated (1<<_PGT_validated)
- /* 28-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1<<28)-1)
+/* Owning guest has pinned this page to its current type? */
+#define _PGT_pinned 27
+#define PGT_pinned (1U<<_PGT_pinned)
+
+/* 27-bit count of uses of this frame as its current type. */
+#define PGT_count_mask ((1U<<27)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated 31
#define PGC_allocated (1U<<_PGC_allocated)
-#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+/* Set when the page is used as a page table */
+#define _PGC_page_table 30
+#define PGC_page_table (1U<<_PGC_page_table)
+/* 30-bit count of references to this frame. */
+#define PGC_count_mask ((1U<<30)-1)
#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
&& (page_to_phys(_pfn) >= xen_pstart))
-#define pickle_domptr(_d) ((u64)(_d))
-#define unpickle_domptr(_d) ((struct domain*)(_d))
+static inline struct domain *unpickle_domptr(u32 _d)
+{ return (_d == 0) ? NULL : __va(_d); }
+static inline u32 pickle_domptr(struct domain *_d)
+{ return (_d == NULL) ? 0 : (u32)__pa(_d); }
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+/* Dummy now */
+#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
+
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern struct list_head free_list;
@@ -151,16 +132,46 @@ void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
static inline void put_page(struct pfn_info *page)
{
- dummy();
-}
+ u32 nx, x, y = page->count_info;
+
+ do {
+ x = y;
+ nx = x - 1;
+ }
+ while (unlikely((y = cmpxchg(&page->count_info, x, nx)) != x));
+ if (unlikely((nx & PGC_count_mask) == 0))
+ free_domheap_page(page);
+}
+/* count_info and ownership are checked atomically. */
static inline int get_page(struct pfn_info *page,
struct domain *domain)
{
- dummy();
+ u64 x, nx, y = *((u64*)&page->count_info);
+ u32 _domain = pickle_domptr(domain);
+
+ do {
+ x = y;
+ nx = x + 1;
+ if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
+ unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
+ unlikely((x >> 32) != _domain)) { /* Wrong owner? */
+ DPRINTK("Error pfn %lx: rd=%p, od=%p, caf=%08x, taf=%08x\n",
+ page_to_pfn(page), domain, unpickle_domptr(d),
+ x, page->u.inuse.typeinfo);
+ return 0;
+ }
+ }
+ while(unlikely(y = cmpxchg(&page->count_info, x, nx)) != x);
+
+ return 1;
}
+/* No type info now */
+#define put_page_and_type(page) put_page((page))
+#define get_page_and_type(page, domain, type) get_page((page))
+
#define set_machinetophys(_mfn, _pfn) do { } while(0);
#ifdef MEMORY_GUARD
@@ -364,17 +375,40 @@ extern unsigned long *mpt_table;
#undef machine_to_phys_mapping
#define machine_to_phys_mapping mpt_table
+#define INVALID_M2P_ENTRY (~0U)
+#define VALID_M2P(_e) (!((_e) & (1U<<63)))
+#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e))
/* If pmt table is provided by control pannel later, we need __get_user
* here. However if it's allocated by HV, we should access it directly
*/
-#define phys_to_machine_mapping(d, gpfn) \
- ((d) == dom0 ? gpfn : (d)->arch.pmt[(gpfn)])
+#define phys_to_machine_mapping(d, gpfn) \
+ ((d) == dom0 ? gpfn : \
+ (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \
+ INVALID_MFN))
#define __mfn_to_gpfn(_d, mfn) \
machine_to_phys_mapping[(mfn)]
#define __gpfn_to_mfn(_d, gpfn) \
phys_to_machine_mapping((_d), (gpfn))
+
+#define __gpfn_invalid(_d, gpfn) \
+ (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
+
+#define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
+
+/* Return I/O type if trye */
+#define __gpfn_is_io(_d, gpfn) \
+ (__gpfn_valid(_d, gpfn) ? \
+ (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0)
+
+#define __gpfn_is_mem(_d, gpfn) \
+ (__gpfn_valid(_d, gpfn) ? \
+ ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0)
+
+
+#define __gpa_to_mpa(_d, gpa) \
+ ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
#endif // CONFIG_VTI
#endif /* __ASM_IA64_MM_H__ */
diff --git a/xen/include/asm-ia64/tlb.h b/xen/include/asm-ia64/tlb.h
index 7947bf3dcc..049f7b5f21 100644
--- a/xen/include/asm-ia64/tlb.h
+++ b/xen/include/asm-ia64/tlb.h
@@ -39,11 +39,11 @@ typedef struct {
typedef union {
unsigned long value;
struct {
- uint64_t ve : 1;
- uint64_t rv1 : 1;
- uint64_t ps : 6;
- uint64_t rid : 24;
- uint64_t rv2 : 32;
+ unsigned long ve : 1;
+ unsigned long rv1 : 1;
+ unsigned long ps : 6;
+ unsigned long rid : 24;
+ unsigned long rv2 : 32;
};
} rr_t;
#endif // CONFIG_VTI
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
index d3ef4c229b..f4ca7de36f 100644
--- a/xen/include/asm-ia64/vcpu.h
+++ b/xen/include/asm-ia64/vcpu.h
@@ -23,8 +23,8 @@ typedef struct pt_regs REGS;
#define PRIVOP_ADDR_COUNT
#ifdef PRIVOP_ADDR_COUNT
-#define _RSM 0
-#define _SSM 1
+#define _GET_IFA 0
+#define _THASH 1
#define PRIVOP_COUNT_NINSTS 2
#define PRIVOP_COUNT_NADDRS 30
diff --git a/xen/include/asm-ia64/vhpt.h b/xen/include/asm-ia64/vhpt.h
index e4268f0947..2ef29b32af 100644
--- a/xen/include/asm-ia64/vhpt.h
+++ b/xen/include/asm-ia64/vhpt.h
@@ -140,12 +140,20 @@ CC_##Name:; \
mov r16 = cr.ifa; \
movl r30 = int_counts; \
;; \
+ extr.u r17=r16,59,5 \
+ ;; \
+ cmp.eq p6,p0=0x1e,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
+ cmp.eq p6,p0=0x1d,r17; \
+(p6) br.cond.spnt .Alt_##Name \
+ ;; \
thash r28 = r16; \
adds r30 = CAUSE_VHPT_CC_HANDLED << 3, r30; \
;; \
ttag r19 = r16; \
- ld8 r27 = [r30]; \
- adds r17 = VLE_CCHAIN_OFFSET, r28; \
+ld8 r27 = [r30]; \
+adds r17 = VLE_CCHAIN_OFFSET, r28; \
;; \
ld8 r17 = [r17]; \
;; \
@@ -192,6 +200,11 @@ CC_##Name:; \
rfi; \
;; \
\
+.Alt_##Name:; \
+ mov pr = r31, 0x1ffff; \
+ ;; \
+ br.cond.sptk late_alt_##Name \
+ ;; \
.Out_##Name:; \
mov pr = r31, 0x1ffff; \
;; \
diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h
index cee7d89a90..8464c929ac 100644
--- a/xen/include/asm-ia64/vmmu.h
+++ b/xen/include/asm-ia64/vmmu.h
@@ -28,13 +28,13 @@
#include "public/xen.h"
#include "asm/tlb.h"
-#define THASH_TLB_TR 0
-#define THASH_TLB_TC 1
-#define THASH_TLB_FM 2 // foreign map
+//#define THASH_TLB_TR 0
+//#define THASH_TLB_TC 1
-#define THASH_SECTION_TR (1<<0)
-#define THASH_SECTION_TC (1<<1)
-#define THASH_SECTION_FM (1<<2)
+
+// bit definition of TR, TC search cmobination
+//#define THASH_SECTION_TR (1<<0)
+//#define THASH_SECTION_TC (1<<1)
/*
* Next bit definition must be same with THASH_TLB_XX
@@ -43,8 +43,7 @@ typedef union search_section {
struct {
u32 tr : 1;
u32 tc : 1;
- u32 fm : 1;
- u32 rsv: 29;
+ u32 rsv: 30;
};
u32 v;
} search_section_t;
@@ -80,12 +79,10 @@ typedef struct thash_data {
u64 ig1 : 11; //53-63
};
struct {
- u64 __rv1 : 12;
- // sizeof(domid_t) must be less than 38!!! Refer to its definition
- u64 fm_dom : 38; // 12-49 foreign map domain ID
- u64 __rv2 : 3; // 50-52
+ u64 __rv1 : 53; // 0-52
// next extension to ig1, only for TLB instance
- u64 section : 2; // 53-54 TR, TC or FM (thash_TLB_XX)
+ u64 tc : 1; // 53 TR or TC
+ u64 locked : 1; // 54 entry locked or not
CACHE_LINE_TYPE cl : 1; // I side or D side cache line
u64 nomap : 1; // entry cann't be inserted into machine TLB.
u64 __ig1 : 5; // 56-61
@@ -227,8 +224,8 @@ typedef struct thash_cb {
INVALID_ENTRY(hcb, hash) = 1; \
hash->next = NULL; }
-#define PURGABLE_ENTRY(hcb,en) \
- ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC)
+#define PURGABLE_ENTRY(hcb,en) \
+ ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
/*
@@ -306,7 +303,7 @@ extern void thash_purge_entries_ex(thash_cb_t *hcb,
u64 rid, u64 va, u64 sz,
search_section_t p_sect,
CACHE_LINE_TYPE cl);
-extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
/*
* Purge all TCs or VHPT entries including those in Hash table.
@@ -323,6 +320,7 @@ extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,
thash_data_t *in);
extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
u64 rid, u64 va,CACHE_LINE_TYPE cl);
+extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
@@ -332,6 +330,7 @@ extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
extern void purge_machine_tc_by_domid(domid_t domid);
extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
+extern thash_cb_t *init_domain_tlb(struct vcpu *d);
#define VTLB_DEBUG
#ifdef VTLB_DEBUG
diff --git a/xen/include/asm-ia64/vmx_platform.h b/xen/include/asm-ia64/vmx_platform.h
index bf59e61fec..37560863fa 100644
--- a/xen/include/asm-ia64/vmx_platform.h
+++ b/xen/include/asm-ia64/vmx_platform.h
@@ -25,7 +25,7 @@
struct mmio_list;
typedef struct virutal_platform_def {
//unsigned long *real_mode_data; /* E820, etc. */
- //unsigned long shared_page_va;
+ unsigned long shared_page_va;
//struct vmx_virpit_t vmx_pit;
//struct vmx_handler_t vmx_handler;
//struct mi_per_cpu_info mpci; /* MMIO */
diff --git a/xen/include/asm-ia64/vmx_ptrace.h b/xen/include/asm-ia64/vmx_ptrace.h
deleted file mode 100644
index 4065c097f4..0000000000
--- a/xen/include/asm-ia64/vmx_ptrace.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2003 Intel Co
- * Suresh Siddha <suresh.b.siddha@intel.com>
- * Fenghua Yu <fenghua.yu@intel.com>
- * Arun Sharma <arun.sharma@intel.com>
- *
- * 12/07/98 S. Eranian added pt_regs & switch_stack
- * 12/21/98 D. Mosberger updated to match latest code
- * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
- * 4/28/05 Anthony Xu ported to Xen
- *
- */
-
-struct pt_regs {
- /* The following registers are saved by SAVE_MIN: */
- unsigned long b6; /* scratch */
- unsigned long b7; /* scratch */
-
- unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
- unsigned long ar_ssd; /* reserved for future use (scratch) */
-
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
-
- unsigned long cr_ipsr; /* interrupted task's psr */
- unsigned long cr_iip; /* interrupted task's instruction pointer */
- unsigned long cr_ifs; /* interrupted task's function state */
-
- unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
- unsigned long ar_pfs; /* prev function state */
- unsigned long ar_rsc; /* RSE configuration */
- /* The following two are valid only if cr_ipsr.cpl > 0: */
- unsigned long ar_rnat; /* RSE NaT */
- unsigned long ar_bspstore; /* RSE bspstore */
-
- unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long loadrs; /* size of dirty partition << 16 */
-
- unsigned long r1; /* the gp pointer */
- unsigned long r12; /* interrupted task's memory stack pointer */
- unsigned long r13; /* thread pointer */
-
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long r15; /* scratch */
-
- /* The remaining registers are NOT saved for system calls. */
-
- unsigned long r14; /* scratch */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
-
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
-
- unsigned long ar_ccv; /* compare/exchange value (scratch) */
-
- /*
- * Floating point registers that the kernel considers scratch:
- */
- struct ia64_fpreg f6; /* scratch */
- struct ia64_fpreg f7; /* scratch */
- struct ia64_fpreg f8; /* scratch */
- struct ia64_fpreg f9; /* scratch */
- struct ia64_fpreg f10; /* scratch */
- struct ia64_fpreg f11; /* scratch */
-};
-
-
diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h
index 78149ba31f..be29ed1d47 100644
--- a/xen/include/asm-ia64/vmx_vpd.h
+++ b/xen/include/asm-ia64/vmx_vpd.h
@@ -26,6 +26,7 @@
#include <asm/vtm.h>
#include <asm/vmx_platform.h>
+#include <public/arch-ia64.h>
#define VPD_SHIFT 17 /* 128K requirement */
#define VPD_SIZE (1 << VPD_SHIFT)
diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
new file mode 100644
index 0000000000..e7b5cda8b1
--- /dev/null
+++ b/xen/include/asm-x86/event.h
@@ -0,0 +1,16 @@
+/******************************************************************************
+ * event.h
+ *
+ * A nice interface for passing asynchronous events to guest OSes.
+ * (architecture-dependent part)
+ *
+ */
+
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+static inline void evtchn_notify(struct vcpu *v)
+{
+}
+
+#endif
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index ec00554959..cd259c2e04 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -14,11 +14,41 @@
#define _MEMORY_PADDING(_X)
#define MEMORY_PADDING
+/* Maximum number of virtual CPUs in multi-processor guests. */
+/* WARNING: before changing this, check that shared_info fits on a page */
+#define MAX_VIRT_CPUS 1
+
#ifndef __ASSEMBLY__
/* NB. Both the following are 64 bits each. */
typedef unsigned long memory_t; /* Full-sized pointer/address/memory-size. */
+#define MAX_NR_SECTION 32 // at most 32 memory holes
+typedef struct {
+ unsigned long start; /* start of memory hole */
+ unsigned long end; /* end of memory hole */
+} mm_section_t;
+
+typedef struct {
+ unsigned long mfn : 56;
+ unsigned long type: 8;
+} pmt_entry_t;
+
+#define GPFN_MEM (0UL << 56) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
+#define GPFN_LOW_MMIO (2UL << 56) /* Low MMIO range */
+#define GPFN_PIB (3UL << 56) /* PIB base */
+#define GPFN_IOSAPIC (4UL << 56) /* IOSAPIC base */
+#define GPFN_LEGACY_IO (5UL << 56) /* Legacy I/O base */
+#define GPFN_GFW (6UL << 56) /* Guest Firmware */
+#define GPFN_HIGH_MMIO (7UL << 56) /* High MMIO range */
+
+#define GPFN_IO_MASK (7UL << 56) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK (31UL << 59) /* Guest pfn is invalid */
+
+#define INVALID_MFN (~0UL)
+
+
typedef struct
{
} PACKED cpu_user_regs;
@@ -28,11 +58,99 @@ typedef struct
* structure size will still be 8 bytes, so no other alignments will change.
*/
typedef struct {
- u32 tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
- u32 tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
+ unsigned int tsc_bits; /* 0: 32 bits read from the CPU's TSC. */
+ unsigned int tsc_bitshift; /* 4: 'tsc_bits' uses N:N+31 of TSC. */
} PACKED tsc_timestamp_t; /* 8 bytes */
-#include <asm/tlb.h> /* TR_ENTRY */
+struct pt_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+struct pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+
+#ifdef CONFIG_VTI
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long cr_iipa; /* for emulation */
+ unsigned long cr_isr; /* for emulation */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long rfi_pfs; /* used for elulating rfi */
+#endif
+
+ /* The following registers are saved by SAVE_REST: */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct pt_fpreg f6; /* scratch */
+ struct pt_fpreg f7; /* scratch */
+ struct pt_fpreg f8; /* scratch */
+ struct pt_fpreg f9; /* scratch */
+ struct pt_fpreg f10; /* scratch */
+ struct pt_fpreg f11; /* scratch */
+};
typedef struct {
unsigned long ipsr;
@@ -64,18 +182,20 @@ typedef struct {
unsigned long krs[8]; // kernel registers
unsigned long pkrs[8]; // protection key registers
unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-//} PACKED arch_shared_info_t;
+//} PACKED arch_vcpu_info_t;
} arch_vcpu_info_t; // DON'T PACK
typedef struct {
+ int evtchn_vector;
+ int domain_controller_evtchn;
+ unsigned int flags;
+//} PACKED arch_shared_info_t;
} arch_shared_info_t; // DON'T PACK
-/*
- * The following is all CPU context. Note that the i387_ctxt block is filled
- * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
- */
typedef struct vcpu_guest_context {
- //unsigned long flags;
+ struct pt_regs regs;
+ arch_vcpu_info_t vcpu;
+ arch_shared_info_t shared;
} PACKED vcpu_guest_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h
index 21f97669d5..1a11a3be86 100644
--- a/xen/include/public/arch-x86_32.h
+++ b/xen/include/public/arch-x86_32.h
@@ -73,6 +73,9 @@
#define machine_to_phys_mapping ((u32 *)HYPERVISOR_VIRT_START)
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
#ifndef __ASSEMBLY__
/* NB. Both the following are 32 bits each. */
diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h
index a4f4ac2fcf..634c53a34e 100644
--- a/xen/include/public/arch-x86_64.h
+++ b/xen/include/public/arch-x86_64.h
@@ -73,6 +73,9 @@
#define HYPERVISOR_VIRT_END (0xFFFF880000000000UL)
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
#ifndef __ASSEMBLY__
/* The machine->physical mapping table starts at this address, read-only. */
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index d46472c16c..2fdd400e92 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -287,9 +287,6 @@ typedef struct
/* Event channel endpoints per domain. */
#define NR_EVENT_CHANNELS 1024
-/* Support for multi-processor guests. */
-#define MAX_VIRT_CPUS 32
-
/*
* Per-VCPU information goes here. This will be cleaned up more when Xen
* actually supports multi-VCPU guests.
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 734427266b..05683344ca 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -13,6 +13,7 @@
#include <xen/sched.h>
#include <xen/smp.h>
#include <asm/bitops.h>
+#include <asm/event.h>
/*
* EVENT-CHANNEL NOTIFICATIONS
@@ -34,6 +35,7 @@ static inline void evtchn_set_pending(struct vcpu *v, int port)
{
/* The VCPU pending flag must be set /after/ update to evtchn-pend. */
set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
+ evtchn_notify(v);
/*
* NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of