aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordjm@sportsman.spdomain <djm@sportsman.spdomain>2005-06-09 22:10:19 +0000
committerdjm@sportsman.spdomain <djm@sportsman.spdomain>2005-06-09 22:10:19 +0000
commit03ab2e2693f31d8679ec91c599f622536162f8f4 (patch)
treee9dac32f1675e9e9aa4c76e091b626b204cba5d7
parentbd031b9bbc9f16a3ee3e00d84772605825369d96 (diff)
parent800a239fd39bf9c400be06ef43bfcbf7a5dc94fb (diff)
downloadxen-03ab2e2693f31d8679ec91c599f622536162f8f4.tar.gz
xen-03ab2e2693f31d8679ec91c599f622536162f8f4.tar.bz2
xen-03ab2e2693f31d8679ec91c599f622536162f8f4.zip
bitkeeper revision 1.1706 (42a8be4bKG9EZTToo_Pa4wDcO7VpRw)
Merge http://xen-ia64.bkbits.net/xeno-unstable-ia64.bk into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
-rw-r--r--.rootkeys2
-rw-r--r--xen/arch/ia64/Makefile3
-rw-r--r--xen/arch/ia64/acpi.c4
-rw-r--r--xen/arch/ia64/asm-offsets.c12
-rw-r--r--xen/arch/ia64/dom_fw.c4
-rw-r--r--xen/arch/ia64/domain.c15
-rw-r--r--xen/arch/ia64/hyperprivop.S418
-rw-r--r--xen/arch/ia64/ivt.S25
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/efi.c12
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/pgalloc.h16
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/sn_sal.h33
-rw-r--r--xen/arch/ia64/pcdp.c6
-rw-r--r--xen/arch/ia64/privop.c34
-rw-r--r--xen/arch/ia64/process.c57
-rw-r--r--xen/arch/ia64/regionreg.c23
-rw-r--r--xen/arch/ia64/sn_console.c84
-rw-r--r--xen/arch/ia64/tools/mkbuildtree7
-rw-r--r--xen/arch/ia64/vcpu.c26
-rw-r--r--xen/arch/ia64/xensetup.c3
-rw-r--r--xen/include/asm-ia64/config.h1
-rw-r--r--xen/include/asm-ia64/domain.h7
-rw-r--r--xen/include/asm-ia64/xensystem.h2
-rw-r--r--xen/include/public/arch-ia64.h5
23 files changed, 724 insertions, 75 deletions
diff --git a/.rootkeys b/.rootkeys
index a7745827e3..b64aa43ac2 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -1146,6 +1146,7 @@
428bb037KSxe7_UyqseK5bWhGe3KwA xen/arch/ia64/patch/linux-2.6.11/ptrace.h
425ae516LecDyXlwh3NLBtHZKXmMcA xen/arch/ia64/patch/linux-2.6.11/series
425ae516RFiPn2CGkpJ21LM-1lJcQg xen/arch/ia64/patch/linux-2.6.11/setup.c
+42a8bcc8E6zmTKC5xgOcFLcnzbhVEw xen/arch/ia64/patch/linux-2.6.11/sn_sal.h
425ae516p4ICTkjqNYEfYFxqULj4dw xen/arch/ia64/patch/linux-2.6.11/system.h
425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
@@ -1201,6 +1202,7 @@
41a26ebcJ30TFl1v2kR8rqpEBvOtVw xen/arch/ia64/regionreg.c
421098b69pUiIJrqu_w0JMUnZ2uc2A xen/arch/ia64/smp.c
421098b6_ToSGrf6Pk1Uwg5aMAIBxg xen/arch/ia64/smpboot.c
+42a8bd43dIEIsS-EoQqt5Df1RTr5Hg xen/arch/ia64/sn_console.c
428b9f38JJDW35iDn5DlfXTu700rkQ xen/arch/ia64/tools/README.RunVT
421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
42376c6dfyY0eq8MS2dK3BW2rFuEGg xen/arch/ia64/tools/README.xenia64linux
diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
index d323f407c5..2e59a7d19d 100644
--- a/xen/arch/ia64/Makefile
+++ b/xen/arch/ia64/Makefile
@@ -9,7 +9,8 @@ OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
xenmem.o sal.o cmdline.o mm_init.o tlb.o smpboot.o \
extable.o linuxextable.o xenirq.o xentime.o \
regionreg.o entry.o unaligned.o privop.o vcpu.o \
- irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o
+ irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \
+ sn_console.o
ifeq ($(CONFIG_VTI),y)
OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
diff --git a/xen/arch/ia64/acpi.c b/xen/arch/ia64/acpi.c
index 39ae5fe986..6dbc687b8b 100644
--- a/xen/arch/ia64/acpi.c
+++ b/xen/arch/ia64/acpi.c
@@ -68,7 +68,7 @@ unsigned char acpi_legacy_devices;
const char *
acpi_get_sysname (void)
{
-#ifdef CONFIG_IA64_GENERIC
+/* #ifdef CONFIG_IA64_GENERIC */
unsigned long rsdp_phys;
struct acpi20_table_rsdp *rsdp;
struct acpi_table_xsdt *xsdt;
@@ -101,6 +101,7 @@ acpi_get_sysname (void)
}
return "dig";
+/*
#else
# if defined (CONFIG_IA64_HP_SIM)
return "hpsim";
@@ -114,6 +115,7 @@ acpi_get_sysname (void)
# error Unknown platform. Fix acpi.c.
# endif
#endif
+*/
}
#ifdef CONFIG_ACPI_BOOT
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index 56847d15dc..41bbbc7d5b 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -46,12 +46,19 @@ void foo(void)
DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
+ DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
+ DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
+ DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
+ DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
+ DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
+ DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
+ DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
@@ -64,6 +71,11 @@ void foo(void)
DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
+ DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_rr0));
+ DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
+ DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
+ DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
+
BLANK();
//DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
diff --git a/xen/arch/ia64/dom_fw.c b/xen/arch/ia64/dom_fw.c
index 65f245d870..b57a727a50 100644
--- a/xen/arch/ia64/dom_fw.c
+++ b/xen/arch/ia64/dom_fw.c
@@ -50,7 +50,7 @@ void dom_efi_hypercall_patch(struct domain *d, unsigned long paddr, unsigned lon
if (d == dom0) paddr += dom0_start;
imva = domain_mpa_to_imva(d,paddr);
- build_hypercall_bundle(imva,d->breakimm,hypercall,1);
+ build_hypercall_bundle(imva,d->arch.breakimm,hypercall,1);
}
@@ -61,7 +61,7 @@ void dom_fw_hypercall_patch(struct domain *d, unsigned long paddr, unsigned long
if (d == dom0) paddr += dom0_start;
imva = domain_mpa_to_imva(d,paddr);
- build_hypercall_bundle(imva,d->breakimm,hypercall,ret);
+ build_hypercall_bundle(imva,d->arch.breakimm,hypercall,ret);
}
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index 4ddb1db5e5..869396ed06 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -210,7 +210,7 @@ void arch_do_createdomain(struct vcpu *v)
*/
d->xen_vastart = 0xf000000000000000;
d->xen_vaend = 0xf300000000000000;
- d->breakimm = 0x1000;
+ d->arch.breakimm = 0x1000;
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
@@ -244,9 +244,11 @@ void arch_do_createdomain(struct vcpu *v)
}
#endif
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
- if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
+ if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
v->vcpu_info->arch.metaphysical_mode = 1;
+ v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
+ v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
#define DOMAIN_RID_BITS_DEFAULT 18
if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
BUG();
@@ -254,7 +256,8 @@ void arch_do_createdomain(struct vcpu *v)
d->xen_vastart = 0xf000000000000000;
d->xen_vaend = 0xf300000000000000;
d->shared_info_va = 0xf100000000000000;
- d->breakimm = 0x1000;
+ d->arch.breakimm = 0x1000;
+ v->arch.breakimm = d->arch.breakimm;
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
// to user stack
@@ -403,6 +406,7 @@ printk("new_thread, about to call dom_fw_setup\n");
printk("new_thread, done with dom_fw_setup\n");
// don't forget to set this!
v->vcpu_info->arch.banknum = 1;
+ memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
}
#endif // CONFIG_VTI
@@ -450,7 +454,11 @@ extern unsigned long vhpt_paddr, vhpt_pend;
if (d == dom0) p = map_new_domain0_page(mpaddr);
else
#endif
+ {
p = alloc_domheap_page(d);
+ // zero out pages for security reasons
+ memset(__va(page_to_phys(p)),0,PAGE_SIZE);
+ }
if (unlikely(!p)) {
printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
@@ -509,7 +517,6 @@ tryagain:
}
/* if lookup fails and mpaddr is "legal", "create" the page */
if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
- // FIXME: should zero out pages for security reasons
if (map_new_domain_page(d,mpaddr)) goto tryagain;
}
printk("lookup_domain_mpa: bad mpa %p (> %p\n",
diff --git a/xen/arch/ia64/hyperprivop.S b/xen/arch/ia64/hyperprivop.S
index 4c8ebb09fc..6903c66782 100644
--- a/xen/arch/ia64/hyperprivop.S
+++ b/xen/arch/ia64/hyperprivop.S
@@ -14,6 +14,25 @@
#include <asm/system.h>
#include <public/arch-ia64.h>
+#define FAST_HYPERPRIVOP_CNT
+#define FAST_REFLECT_CNT
+
+// Should be included from common header file (also in process.c)
+// NO PSR_CLR IS DIFFERENT! (CPL)
+#define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT)
+#define IA64_PSR_CPL0 (__IA64_UL(1) << IA64_PSR_CPL0_BIT)
+// note IA64_PSR_PK removed from following, why is this necessary?
+#define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \
+ IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \
+ IA64_PSR_IT | IA64_PSR_BN)
+
+#define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \
+ IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \
+ IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \
+ IA64_PSR_MC | IA64_PSR_IS | \
+ IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
+ IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
+
// Note: not hand-scheduled for now
// Registers at entry
// r16 == cr.isr
@@ -22,7 +41,13 @@
// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
- //cover;;
+#if 1
+ // HYPERPRIVOP_SSM_I?
+ // assumes domain interrupts pending, so just do it
+ cmp.eq p7,p6=XEN_HYPER_SSM_I,r17
+(p7) br.sptk.many hyper_ssm_i;;
+#endif
+#if 1
// if domain interrupts pending, give up for now and do it the slow way
adds r20=XSI_PEND_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r20=[r20] ;;
@@ -32,11 +57,291 @@ GLOBAL_ENTRY(fast_hyperprivop)
// HYPERPRIVOP_RFI?
cmp.eq p7,p6=XEN_HYPER_RFI,r17
(p7) br.sptk.many hyper_rfi;;
- // if not rfi, give up for now and do it the slow way
+
+// hard to test, because only called from rbs_switch
+ // HYPERPRIVOP_COVER?
+ cmp.eq p7,p6=XEN_HYPER_COVER,r17
+(p7) br.sptk.many hyper_cover;;
+#endif
+
+#if 1
+ // HYPERPRIVOP_SSM_DT?
+ cmp.eq p7,p6=XEN_HYPER_SSM_DT,r17
+(p7) br.sptk.many hyper_ssm_dt;;
+#endif
+
+#if 1
+ // HYPERPRIVOP_RSM_DT?
+ cmp.eq p7,p6=XEN_HYPER_RSM_DT,r17
+(p7) br.sptk.many hyper_rsm_dt;;
+#endif
+
+ // if not one of the above, give up for now and do it the slow way
+ br.sptk.many dispatch_break_fault ;;
+
+
+// give up for now if: ipsr.be==1, ipsr.pp==1
+// from reflect_interruption, don't need to:
+// - printf first extint (debug only)
+// - check for interrupt collection enabled (routine will force on)
+// - set ifa (not valid for extint)
+// - set iha (not valid for extint)
+// - set itir (not valid for extint)
+// DO need to
+// - increment the HYPER_SSM_I fast_hyperprivop counter
+// - set shared_mem iip to instruction after HYPER_SSM_I
+// - set cr.iip to guest iva+0x3000
+// - set shared_mem ipsr to [vcpu_get_ipsr_int_state]
+// be = pp = bn = 0; dt = it = rt = 1; cpl = 3 or 0;
+// i = shared_mem interrupt_delivery_enabled
+// ic = shared_mem interrupt_collection_enabled
+// ri = instruction after HYPER_SSM_I
+// all other bits unchanged from real cr.ipsr
+// - set cr.ipsr (DELIVER_PSR_SET/CLEAR, don't forget cpl!)
+// - set shared_mem isr: isr.ei to instr following HYPER_SSM_I
+// and isr.ri to cr.isr.ri (all other bits zero)
+// - cover and set shared_mem precover_ifs to cr.ifs
+// ^^^ MISSED THIS FOR fast_break??
+// - set shared_mem ifs and incomplete_regframe to 0
+// - set shared_mem interrupt_delivery_enabled to 0
+// - set shared_mem interrupt_collection_enabled to 0
+// - set r31 to SHAREDINFO_ADDR
+// - virtual bank switch 0
+// maybe implement later
+// - verify that there really IS a deliverable interrupt pending
+// - set shared_mem iva
+// needs to be done but not implemented (in reflect_interruption)
+// - set shared_mem iipa
+// don't know for sure
+// - set shared_mem unat
+// r16 == cr.isr
+// r17 == cr.iim
+// r18 == XSI_PSR_IC
+// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r31 == pr
+ENTRY(hyper_ssm_i)
+ // give up for now if: ipsr.be==1, ipsr.pp==1
+ mov r30=cr.ipsr;;
+ mov r29=cr.iip;;
+ extr.u r21=r30,IA64_PSR_BE_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0
+(p7) br.sptk.many dispatch_break_fault ;;
+ extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0
+(p7) br.sptk.many dispatch_break_fault ;;
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SSM_I);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ // set shared_mem iip to instruction after HYPER_SSM_I
+ extr.u r20=r30,41,2 ;;
+ cmp.eq p6,p7=2,r20 ;;
+(p6) mov r20=0
+(p6) adds r29=16,r29
+(p7) adds r20=1,r20 ;;
+ dep r30=r20,r30,41,2;; // adjust cr.ipsr.ri but don't save yet
+ adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r29 ;;
+ // set shared_mem isr
+ extr.u r16=r16,38,1;; // grab cr.isr.ir bit
+ dep r16=r16,r0,38,1 ;; // insert into cr.isr (rest of bits zero)
+ dep r16=r20,r16,41,2 ;; // deposit cr.isr.ri
+ adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r16 ;;
+ // set cr.ipsr
+ mov r29=r30 ;;
+ movl r28=DELIVER_PSR_SET;;
+ movl r27=~DELIVER_PSR_CLR;;
+ or r29=r29,r28;;
+ and r29=r29,r27;;
+ mov cr.ipsr=r29;;
+ // set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
+ extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
+ cmp.eq p6,p7=3,r29;;
+(p6) dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
+(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
+ ;;
+ // FOR SSM_I ONLY, also turn on psr.i and psr.ic
+ movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT|IA64_PSR_I|IA64_PSR_IC);;
+ movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN);;
+ or r30=r30,r28;;
+ and r30=r30,r27;;
+ adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r30 ;;
+ // set shared_mem interrupt_delivery_enabled to 0
+ // set shared_mem interrupt_collection_enabled to 0
+ st8 [r18]=r0;;
+ // cover and set shared_mem precover_ifs to cr.ifs
+ // set shared_mem ifs and incomplete_regframe to 0
+ cover ;;
+ mov r20=cr.ifs;;
+ adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r21]=r0 ;;
+ adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r0 ;;
+ adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r20 ;;
+ // leave cr.ifs alone for later rfi
+ // set iip to go to domain IVA break instruction vector
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_IVA_OFFSET,r22;;
+ ld8 r23=[r22];;
+ movl r24=0x3000;;
+ add r24=r24,r23;;
+ mov cr.iip=r24;;
+ // OK, now all set to go except for switch to virtual bank0
+ mov r30=r2; mov r29=r3;;
+ adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
+ adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ bsw.1;;
+ st8 [r2]=r16,16; st8 [r3]=r17,16 ;;
+ st8 [r2]=r18,16; st8 [r3]=r19,16 ;;
+ st8 [r2]=r20,16; st8 [r3]=r21,16 ;;
+ st8 [r2]=r22,16; st8 [r3]=r23,16 ;;
+ st8 [r2]=r24,16; st8 [r3]=r25,16 ;;
+ st8 [r2]=r26,16; st8 [r3]=r27,16 ;;
+ st8 [r2]=r28,16; st8 [r3]=r29,16 ;;
+ st8 [r2]=r30,16; st8 [r3]=r31,16 ;;
+ movl r31=XSI_IPSR;;
+ bsw.0 ;;
+ mov r2=r30; mov r3=r29;;
+ adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r20]=r0 ;;
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+
+// reflect domain breaks directly to domain
+// FIXME: DOES NOT WORK YET
+// r16 == cr.isr
+// r17 == cr.iim
+// r18 == XSI_PSR_IC
+// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r31 == pr
+GLOBAL_ENTRY(fast_break_reflect)
+#define FAST_BREAK
+#ifndef FAST_BREAK
br.sptk.many dispatch_break_fault ;;
+#endif
+ mov r30=cr.ipsr;;
+ mov r29=cr.iip;;
+ extr.u r21=r30,IA64_PSR_BE_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0 ;;
+(p7) br.sptk.many dispatch_break_fault ;;
+ extr.u r21=r30,IA64_PSR_PP_BIT,1 ;;
+ cmp.ne p7,p0=r21,r0 ;;
+(p7) br.sptk.many dispatch_break_fault ;;
+#if 1 /* special handling in case running on simulator */
+ movl r20=first_break;;
+ ld4 r23=[r20];;
+ movl r21=0x80001;
+ movl r22=0x80002;;
+ cmp.ne p7,p0=r23,r0;;
+(p7) br.sptk.many dispatch_break_fault ;;
+ cmp.eq p7,p0=r21,r17;
+(p7) br.sptk.many dispatch_break_fault ;;
+ cmp.eq p7,p0=r22,r17;
+(p7) br.sptk.many dispatch_break_fault ;;
+#endif
+#ifdef FAST_REFLECT_CNT
+ movl r20=fast_reflect_count+((0x2c00>>8)*8);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ // save iim in shared_info
+ adds r21=XSI_IIM_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r17;;
+ // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
+ adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r29;;
+ // set shared_mem isr
+ adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r16 ;;
+ // set cr.ipsr
+ mov r29=r30 ;;
+ movl r28=DELIVER_PSR_SET;;
+ movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
+ or r29=r29,r28;;
+ and r29=r29,r27;;
+ mov cr.ipsr=r29;;
+ // set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
+ extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
+ cmp.eq p6,p7=3,r29;;
+(p6) dep r30=-1,r30,IA64_PSR_CPL0_BIT,2
+(p7) dep r30=0,r30,IA64_PSR_CPL0_BIT,2
+ ;;
+ movl r28=(IA64_PSR_DT|IA64_PSR_IT|IA64_PSR_RT);;
+ movl r27=~(IA64_PSR_BE|IA64_PSR_PP|IA64_PSR_BN);;
+ or r30=r30,r28;;
+ and r30=r30,r27;;
+ // also set shared_mem ipsr.i and ipsr.ic appropriately
+ ld8 r20=[r18];;
+ extr.u r22=r20,32,32
+ cmp4.eq p6,p7=r20,r0;;
+(p6) dep r30=0,r30,IA64_PSR_IC_BIT,1
+(p7) dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;;
+ cmp4.eq p6,p7=r22,r0;;
+(p6) dep r30=0,r30,IA64_PSR_I_BIT,1
+(p7) dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
+ adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r30 ;;
+ // set shared_mem interrupt_delivery_enabled to 0
+ // set shared_mem interrupt_collection_enabled to 0
+ st8 [r18]=r0;;
+ // cover and set shared_mem precover_ifs to cr.ifs
+ // set shared_mem ifs and incomplete_regframe to 0
+ cover ;;
+ mov r20=cr.ifs;;
+ adds r21=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r21]=r0 ;;
+ adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r0 ;;
+ adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st8 [r21]=r20 ;;
+ // vpsr.i = vpsr.ic = 0 on delivery of interruption
+ st8 [r18]=r0;;
+ // FIXME: need to save iipa and isr to be arch-compliant
+ // set iip to go to domain IVA break instruction vector
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_IVA_OFFSET,r22;;
+ ld8 r23=[r22];;
+ movl r24=0x2c00;;
+ add r24=r24,r23;;
+ mov cr.iip=r24;;
+ // OK, now all set to go except for switch to virtual bank0
+ mov r30=r2; mov r29=r3;;
+ adds r2=XSI_BANK1_OFS-XSI_PSR_IC_OFS,r18;
+ adds r3=(XSI_BANK1_OFS+8)-XSI_PSR_IC_OFS,r18;;
+ bsw.1;;
+ st8 [r2]=r16,16; st8 [r3]=r17,16 ;;
+ st8 [r2]=r18,16; st8 [r3]=r19,16 ;;
+ st8 [r2]=r20,16; st8 [r3]=r21,16 ;;
+ st8 [r2]=r22,16; st8 [r3]=r23,16 ;;
+ st8 [r2]=r24,16; st8 [r3]=r25,16 ;;
+ st8 [r2]=r26,16; st8 [r3]=r27,16 ;;
+ st8 [r2]=r28,16; st8 [r3]=r29,16 ;;
+ st8 [r2]=r30,16; st8 [r3]=r31,16 ;;
+ movl r31=XSI_IPSR;;
+ bsw.0 ;;
+ mov r2=r30; mov r3=r29;;
+ adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+ st4 [r20]=r0 ;;
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RFI);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r21=[r20];; // r21 = vcr.ipsr
extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
@@ -78,8 +383,6 @@ ENTRY(hyper_rfi)
ld8 r20=[r20];;
dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
mov cr.ifs=r20 ;;
-// TODO: increment a counter so we can count how many rfi's go the fast way
-// but where? counter must be pinned
// ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
@@ -101,3 +404,110 @@ ENTRY(hyper_rfi)
;;
rfi
;;
+
+ENTRY(hyper_cover)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_COVER);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ mov r24=cr.ipsr
+ mov r25=cr.iip;;
+ // skip test for vpsr.ic.. it's a prerequisite for hyperprivops
+ cover ;;
+ adds r20=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
+ mov r30=cr.ifs;;
+ adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18
+ ld4 r21=[r20] ;;
+ cmp.eq p6,p7=r21,r0 ;;
+(p6) st8 [r22]=r30;;
+(p7) st4 [r20]=r0;;
+ mov cr.ifs=r0;;
+ // adjust return address to skip over break instruction
+ extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+
+#if 1
+// return from metaphysical mode (meta=1) to virtual mode (meta=0)
+ENTRY(hyper_ssm_dt)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SSM_DT);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ mov r24=cr.ipsr
+ mov r25=cr.iip;;
+ adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld4 r21=[r20];;
+ cmp.eq p7,p0=r21,r0 // meta==0?
+(p7) br.spnt.many 1f ;; // already in virtual mode
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_META_SAVED_RR0_OFFSET,r22;;
+ ld4 r23=[r22];;
+ mov rr[r0]=r23;;
+ srlz.i;;
+ st4 [r20]=r0 ;;
+ // adjust return address to skip over break instruction
+1: extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+
+// go to metaphysical mode (meta=1) from virtual mode (meta=0)
+ENTRY(hyper_rsm_dt)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_RSM_DT);;
+ ld8 r21=[r20];;
+ adds r21=1,r21;;
+ st8 [r20]=r21;;
+#endif
+ mov r24=cr.ipsr
+ mov r25=cr.iip;;
+ adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld4 r21=[r20];;
+ cmp.ne p7,p0=r21,r0 // meta==0?
+(p7) br.spnt.many 1f ;; // already in metaphysical mode
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
+ ld4 r23=[r22];;
+ mov rr[r0]=r23;;
+ srlz.i;;
+ adds r21=1,r0 ;;
+ st4 [r20]=r21 ;;
+ // adjust return address to skip over break instruction
+1: extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+#endif
diff --git a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S
index 0938f6075e..4d6785c310 100644
--- a/xen/arch/ia64/ivt.S
+++ b/xen/arch/ia64/ivt.S
@@ -783,21 +783,22 @@ ENTRY(break_fault)
ld8 r19=[r18]
;;
cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
-(p7) br.sptk.many dispatch_privop_fault
+(p7) br.spnt.many dispatch_privop_fault
;;
- cmp4.ne p7,p0=r0,r19
-(p7) br.sptk.many dispatch_break_fault
- // If we get to here, we have a hyperprivop
- // For now, hyperprivops are handled through the break mechanism
- // Later, they will be fast hand-coded assembly with psr.ic off
+ // if vpsr.ic is off, we have a hyperprivop
+ // A hyperprivop is hand-coded assembly with psr.ic off
// which means no calls, no use of r1-r15 and no memory accesses
// except to pinned addresses!
-#define FAST_HYPERPRIVOPS
-#ifdef FAST_HYPERPRIVOPS
- br.sptk.many fast_hyperprivop
-#else
- br.sptk.many dispatch_break_fault
-#endif
+ cmp4.eq p7,p0=r0,r19
+(p7) br.sptk.many fast_hyperprivop
+ ;;
+ mov r22=IA64_KR(CURRENT);;
+ adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22;;
+ ld4 r23=[r22];;
+ cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
+(p6) br.spnt.many dispatch_break_fault
+ ;;
+ br.sptk.many fast_break_reflect
;;
#endif
mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
diff --git a/xen/arch/ia64/patch/linux-2.6.11/efi.c b/xen/arch/ia64/patch/linux-2.6.11/efi.c
index 11e65f091d..e79d178edc 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/efi.c
+++ b/xen/arch/ia64/patch/linux-2.6.11/efi.c
@@ -1,17 +1,23 @@
--- ../../linux-2.6.11/arch/ia64/kernel/efi.c 2005-03-02 00:37:47.000000000 -0700
-+++ arch/ia64/efi.c 2005-04-29 14:09:24.000000000 -0600
-@@ -320,6 +320,10 @@
++++ arch/ia64/efi.c 2005-06-09 06:15:36.000000000 -0600
+@@ -320,6 +320,16 @@
if (!(md->attribute & EFI_MEMORY_WB))
continue;
+#ifdef XEN
++// this works around a problem in the ski bootloader
++{
++ extern long running_on_sim;
++ if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
++ continue;
++}
+// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
+ if (md->phys_addr >= 0x100000000) continue;
+#endif
/*
* granule_addr is the base of md's first granule.
* [granule_addr - first_non_wb_addr) is guaranteed to
-@@ -719,6 +723,30 @@
+@@ -719,6 +729,30 @@
return 0;
}
diff --git a/xen/arch/ia64/patch/linux-2.6.11/pgalloc.h b/xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
index be8e0bdd42..64ea618a6d 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
+++ b/xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
@@ -1,54 +1,58 @@
--- ../../linux-2.6.11/include/asm-ia64/pgalloc.h 2005-03-02 00:37:31.000000000 -0700
-+++ include/asm-ia64/pgalloc.h 2005-04-29 17:09:20.000000000 -0600
-@@ -61,7 +61,11 @@
++++ include/asm-ia64/pgalloc.h 2005-06-09 13:40:48.000000000 -0600
+@@ -61,7 +61,12 @@
pgd_t *pgd = pgd_alloc_one_fast(mm);
if (unlikely(pgd == NULL)) {
+#ifdef XEN
+ pgd = (pgd_t *)alloc_xenheap_page();
++ memset(pgd,0,PAGE_SIZE);
+#else
pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+#endif
}
return pgd;
}
-@@ -104,7 +108,11 @@
+@@ -104,7 +109,12 @@
static inline pmd_t*
pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
{
+#ifdef XEN
+ pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
++ memset(pmd,0,PAGE_SIZE);
+#else
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
return pmd;
}
-@@ -136,7 +144,11 @@
+@@ -136,7 +146,12 @@
static inline struct page *
pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{
+#ifdef XEN
+ struct page *pte = alloc_xenheap_page();
++ memset(pte,0,PAGE_SIZE);
+#else
struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+#endif
return pte;
}
-@@ -144,7 +156,11 @@
+@@ -144,7 +159,12 @@
static inline pte_t *
pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{
+#ifdef XEN
+ pte_t *pte = (pte_t *)alloc_xenheap_page();
++ memset(pte,0,PAGE_SIZE);
+#else
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+#endif
return pte;
}
-@@ -152,13 +168,21 @@
+@@ -152,13 +172,21 @@
static inline void
pte_free (struct page *pte)
{
diff --git a/xen/arch/ia64/patch/linux-2.6.11/sn_sal.h b/xen/arch/ia64/patch/linux-2.6.11/sn_sal.h
new file mode 100644
index 0000000000..b38c1300f1
--- /dev/null
+++ b/xen/arch/ia64/patch/linux-2.6.11/sn_sal.h
@@ -0,0 +1,33 @@
+--- /data/lwork/attica1/edwardsg/linux-2.6.11/include/asm-ia64/sn/sn_sal.h 2005-03-02 01:38:33 -06:00
++++ include/asm-ia64/sn/sn_sal.h 2005-06-01 14:31:47 -05:00
+@@ -123,6 +123,7 @@
+ #define SALRET_ERROR (-3)
+
+
++#ifndef XEN
+ /**
+ * sn_sal_rev_major - get the major SGI SAL revision number
+ *
+@@ -226,6 +227,7 @@ ia64_sn_get_klconfig_addr(nasid_t nasid)
+ }
+ return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
+ }
++#endif /* !XEN */
+
+ /*
+ * Returns the next console character.
+@@ -304,6 +306,7 @@ ia64_sn_console_putb(const char *buf, in
+ return (u64)0;
+ }
+
++#ifndef XEN
+ /*
+ * Print a platform error record
+ */
+@@ -987,5 +990,5 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opc
+ *v0 = (int) rv.v0;
+ return (int) rv.status;
+ }
+-
++#endif /* !XEN */
+ #endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/xen/arch/ia64/pcdp.c b/xen/arch/ia64/pcdp.c
index e2ab84a8c7..469047a69d 100644
--- a/xen/arch/ia64/pcdp.c
+++ b/xen/arch/ia64/pcdp.c
@@ -26,9 +26,9 @@ setup_serial_console(struct pcdp_uart *uart)
#ifdef XEN
extern char opt_com1[1];
if (opt_com1[0]) return 0;
- sprintf(&opt_com1[0], "0x%lx,%lu,%dn1",
- uart->addr.address, uart->baud,
- uart->bits ? uart->bits : 8);
+ sprintf(&opt_com1[0], "%lu,%dn1,0x%lx,9",
+ uart->baud, uart->bits ? uart->bits : 8,
+ uart->addr.address);
return 0;
#else
#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c
index 19ff98fc8d..1f50ea2448 100644
--- a/xen/arch/ia64/privop.c
+++ b/xen/arch/ia64/privop.c
@@ -747,14 +747,16 @@ priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
#define HYPERPRIVOP_COVER 0x4
#define HYPERPRIVOP_ITC_D 0x5
#define HYPERPRIVOP_ITC_I 0x6
-#define HYPERPRIVOP_MAX 0x6
+#define HYPERPRIVOP_SSM_I 0x7
+#define HYPERPRIVOP_MAX 0x7
char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
- 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i",
+ 0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
0
};
-unsigned long hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
+unsigned long slow_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
+unsigned long fast_hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
/* hyperprivops are generally executed in assembly (with physical psr.ic off)
* so this code is primarily used for debugging them */
@@ -765,13 +767,12 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
INST64 inst;
UINT64 val;
-// FIXME: Add instrumentation for these
// FIXME: Handle faults appropriately for these
if (!iim || iim > HYPERPRIVOP_MAX) {
printf("bad hyperprivop; ignored\n");
return 1;
}
- hyperpriv_cnt[iim]++;
+ slow_hyperpriv_cnt[iim]++;
switch(iim) {
case HYPERPRIVOP_RFI:
(void)vcpu_rfi(v);
@@ -793,6 +794,9 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
inst.inst = 0;
(void)priv_itc_i(v,inst);
return 1;
+ case HYPERPRIVOP_SSM_I:
+ (void)vcpu_set_psr_i(v);
+ return 1;
}
return 0;
}
@@ -981,18 +985,28 @@ int dump_hyperprivop_counts(char *buf)
{
int i;
char *s = buf;
- s += sprintf(s,"Slow hyperprivops:\n");
+ unsigned long total = 0;
+ for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += slow_hyperpriv_cnt[i];
+ s += sprintf(s,"Slow hyperprivops (total %d):\n",total);
+ for (i = 1; i <= HYPERPRIVOP_MAX; i++)
+ if (slow_hyperpriv_cnt[i])
+ s += sprintf(s,"%10d %s\n",
+ slow_hyperpriv_cnt[i], hyperpriv_str[i]);
+ total = 0;
+ for (i = 1; i <= HYPERPRIVOP_MAX; i++) total += fast_hyperpriv_cnt[i];
+ s += sprintf(s,"Fast hyperprivops (total %d):\n",total);
for (i = 1; i <= HYPERPRIVOP_MAX; i++)
- if (hyperpriv_cnt[i])
+ if (fast_hyperpriv_cnt[i])
s += sprintf(s,"%10d %s\n",
- hyperpriv_cnt[i], hyperpriv_str[i]);
+ fast_hyperpriv_cnt[i], hyperpriv_str[i]);
return s - buf;
}
void zero_hyperprivop_counts(void)
{
int i;
- for (i = 0; i <= HYPERPRIVOP_MAX; i++) hyperpriv_cnt[i] = 0;
+ for (i = 0; i <= HYPERPRIVOP_MAX; i++) slow_hyperpriv_cnt[i] = 0;
+ for (i = 0; i <= HYPERPRIVOP_MAX; i++) fast_hyperpriv_cnt[i] = 0;
}
#define TMPBUFLEN 8*1024
@@ -1002,6 +1016,7 @@ int dump_privop_counts_to_user(char __user *ubuf, int len)
int n = dump_privop_counts(buf);
n += dump_hyperprivop_counts(buf + n);
+ n += dump_reflect_counts(buf + n);
#ifdef PRIVOP_ADDR_COUNT
n += dump_privop_addrs(buf + n);
#endif
@@ -1019,6 +1034,7 @@ int zero_privop_counts_to_user(char __user *ubuf, int len)
#ifdef PRIVOP_ADDR_COUNT
zero_privop_addrs();
#endif
+ zero_reflect_counts();
if (len < TMPBUFLEN) return -1;
if (__copy_to_user(ubuf,buf,n)) return -1;
return n;
diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c
index 414880882a..f664b74a42 100644
--- a/xen/arch/ia64/process.c
+++ b/xen/arch/ia64/process.c
@@ -130,6 +130,42 @@ unsigned long translate_domain_mpaddr(unsigned long mpaddr)
return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
}
+unsigned long slow_reflect_count[0x80] = { 0 };
+unsigned long fast_reflect_count[0x80] = { 0 };
+
+#define inc_slow_reflect_count(vec) slow_reflect_count[vec>>8]++;
+
+void zero_reflect_counts(void)
+{
+ int i;
+ for (i=0; i<0x80; i++) slow_reflect_count[i] = 0;
+ for (i=0; i<0x80; i++) fast_reflect_count[i] = 0;
+}
+
+int dump_reflect_counts(char *buf)
+{
+ int i,j,cnt;
+ char *s = buf;
+
+ s += sprintf(s,"Slow reflections by vector:\n");
+ for (i = 0, j = 0; i < 0x80; i++) {
+ if (cnt = slow_reflect_count[i]) {
+ s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
+ if ((j++ & 3) == 3) s += sprintf(s,"\n");
+ }
+ }
+ if (j & 3) s += sprintf(s,"\n");
+ s += sprintf(s,"Fast reflections by vector:\n");
+ for (i = 0, j = 0; i < 0x80; i++) {
+ if (cnt = fast_reflect_count[i]) {
+ s += sprintf(s,"0x%02x00:%10d, ",i,cnt);
+ if ((j++ & 3) == 3) s += sprintf(s,"\n");
+ }
+ }
+ if (j & 3) s += sprintf(s,"\n");
+ return s - buf;
+}
+
void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
{
unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
@@ -165,6 +201,7 @@ panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.ii
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
// NOTE: nested trap must NOT pass PSCB address
//regs->r31 = (unsigned long) &PSCB(v);
+ inc_slow_reflect_count(vector);
return;
}
@@ -195,10 +232,14 @@ panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.ii
PSCB(v,interrupt_delivery_enabled) = 0;
PSCB(v,interrupt_collection_enabled) = 0;
+
+ inc_slow_reflect_count(vector);
}
void foodpi(void) {}
+unsigned long pending_false_positive = 0;
+
// ONLY gets called from ia64_leave_kernel
// ONLY call with interrupts disabled?? (else might miss one?)
// NEVER successful if already reflecting a trap/fault because psr.i==0
@@ -215,6 +256,8 @@ void deliver_pending_interrupt(struct pt_regs *regs)
printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
}
+ else if (PSCB(v,pending_interruption))
+ ++pending_false_positive;
}
}
@@ -725,30 +768,31 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
vcpu_set_gr(current,8,-1L);
break;
default:
- printf("ia64_handle_break: bad ssc code %lx\n",ssc);
+ printf("ia64_handle_break: bad ssc code %lx, iip=%p\n",ssc,regs->cr_iip);
break;
}
vcpu_increment_iip(current);
}
+int first_break = 1;
+
void
ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
{
- static int first_time = 1;
struct domain *d = (struct domain *) current->domain;
struct vcpu *v = (struct domain *) current;
extern unsigned long running_on_sim;
- if (first_time) {
+ if (first_break) {
if (platform_is_hp_ski()) running_on_sim = 1;
else running_on_sim = 0;
- first_time = 0;
+ first_break = 0;
}
if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
else do_ssc(vcpu_get_gr(current,36), regs);
}
- else if (iim == d->breakimm) {
+ else if (iim == d->arch.breakimm) {
if (ia64_hypercall(regs))
vcpu_increment_iip(current);
}
@@ -811,7 +855,8 @@ ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long i
check_lazy_cover = 1;
vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break;
case 25:
- vector = IA64_DISABLED_FPREG_VECTOR; break;
+ vector = IA64_DISABLED_FPREG_VECTOR;
+ break;
case 26:
printf("*** NaT fault... attempting to handle as privop\n");
vector = priv_emulate(v,regs,isr);
diff --git a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
index 6742d4f602..6653d4b6a8 100644
--- a/xen/arch/ia64/regionreg.c
+++ b/xen/arch/ia64/regionreg.c
@@ -63,9 +63,14 @@ unsigned long allocate_reserved_rid(void)
// returns -1 if none available
-unsigned long allocate_metaphysical_rid(void)
+unsigned long allocate_metaphysical_rr0(void)
{
- unsigned long rid = allocate_reserved_rid();
+ ia64_rr rrv;
+
+ rrv.rid = allocate_reserved_rid();
+ rrv.ps = PAGE_SHIFT;
+ rrv.ve = 0;
+ return rrv.rrval;
}
int deallocate_metaphysical_rid(unsigned long rid)
@@ -282,22 +287,20 @@ int set_one_rr(unsigned long rr, unsigned long val)
if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
newrrv.ps = PAGE_SHIFT;
+ if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
set_rr(rr,newrrv.rrval);
}
return 1;
}
// set rr0 to the passed rid (for metaphysical mode so don't use domain offset
-int set_metaphysical_rr(unsigned long rr, unsigned long rid)
+int set_metaphysical_rr0(void)
{
+ struct vcpu *v = current;
ia64_rr rrv;
- rrv.rrval = 0;
- rrv.rid = rid;
- rrv.ps = PAGE_SHIFT;
// rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
- rrv.ve = 0;
- set_rr(rr,rrv.rrval);
+ set_rr(0,v->arch.metaphysical_rr0);
}
// validates/changes region registers 0-6 in the currently executing domain
@@ -322,7 +325,7 @@ void init_all_rr(struct vcpu *v)
ia64_rr rrv;
rrv.rrval = 0;
- rrv.rid = v->domain->metaphysical_rid;
+ rrv.rrval = v->domain->arch.metaphysical_rr0;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
@@ -376,7 +379,7 @@ unsigned long load_region_regs(struct vcpu *v)
ia64_rr rrv;
rrv.rrval = 0;
- rrv.rid = v->domain->metaphysical_rid;
+ rrv.rid = v->domain->arch.metaphysical_rr0;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
rr0 = rrv.rrval;
diff --git a/xen/arch/ia64/sn_console.c b/xen/arch/ia64/sn_console.c
new file mode 100644
index 0000000000..d29a82935c
--- /dev/null
+++ b/xen/arch/ia64/sn_console.c
@@ -0,0 +1,84 @@
+/*
+ * C-Brick Serial Port (and console) driver for SGI Altix machines.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <asm/acpi.h>
+#include <asm/sn/sn_sal.h>
+#include <xen/serial.h>
+
+void sn_putc(struct serial_port *, char);
+
+static struct uart_driver sn_sal_console = {
+ .putc = sn_putc,
+};
+
+/**
+ * early_sn_setup - early setup routine for SN platforms
+ *
+ * pulled from arch/ia64/sn/kernel/setup.c
+ */
+static void __init early_sn_setup(void)
+{
+ efi_system_table_t *efi_systab;
+ efi_config_table_t *config_tables;
+ struct ia64_sal_systab *sal_systab;
+ struct ia64_sal_desc_entry_point *ep;
+ char *p;
+ int i, j;
+
+ /*
+ * Parse enough of the SAL tables to locate the SAL entry point. Since, console
+ * IO on SN2 is done via SAL calls, early_printk won't work without this.
+ *
+ * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
+ * Any changes to those file may have to be made hereas well.
+ */
+ efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
+ config_tables = __va(efi_systab->tables);
+ for (i = 0; i < efi_systab->nr_tables; i++) {
+ if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
+ 0) {
+ sal_systab = __va(config_tables[i].table);
+ p = (char *)(sal_systab + 1);
+ for (j = 0; j < sal_systab->entry_count; j++) {
+ if (*p == SAL_DESC_ENTRY_POINT) {
+ ep = (struct ia64_sal_desc_entry_point
+ *)p;
+ ia64_sal_handler_init(__va
+ (ep->sal_proc),
+ __va(ep->gp));
+ return;
+ }
+ p += SAL_DESC_SIZE(*p);
+ }
+ }
+ }
+ /* Uh-oh, SAL not available?? */
+ printk(KERN_ERR "failed to find SAL entry point\n");
+}
+
+/**
+ * sn_serial_console_early_setup - Sets up early console output support
+ *
+ * pulled from drivers/serial/sn_console.c
+ */
+int __init sn_serial_console_early_setup(void)
+{
+ if (strcmp("sn2",acpi_get_sysname()))
+ return -1;
+
+ early_sn_setup(); /* Find SAL entry points */
+ serial_register_uart(0, &sn_sal_console, NULL);
+
+ return 0;
+}
+
+/*
+ * sn_putc - Send a character to the console, polled or interrupt mode
+ */
+void sn_putc(struct serial_port *port, char c)
+{
+ return ia64_sn_console_putc(c);
+}
diff --git a/xen/arch/ia64/tools/mkbuildtree b/xen/arch/ia64/tools/mkbuildtree
index e1d373f545..5964c836c8 100644
--- a/xen/arch/ia64/tools/mkbuildtree
+++ b/xen/arch/ia64/tools/mkbuildtree
@@ -45,6 +45,7 @@ fi
mkdir include/asm-generic
mkdir include/asm-ia64/linux
mkdir include/asm-ia64/linux/byteorder
+mkdir include/asm-ia64/sn
# use "gcc -Iinclude/asm-ia64" to find these linux includes
#ln -s $XEN/include/xen $XEN/include/linux
#ln -s $XEN/include/asm-ia64/linux $XEN/include/asm-ia64/xen
@@ -198,6 +199,12 @@ null include/asm-ia64/module.h
null include/asm-ia64/ia32.h
null include/asm-ia64/tlbflush.h
+null include/asm-ia64/sn/arch.h
+null include/asm-ia64/sn/geo.h
+null include/asm-ia64/sn/nodepda.h
+null include/asm-ia64/sn/sn_cpuid.h
+cp_patch include/asm-ia64/sn/sn_sal.h include/asm-ia64/sn/sn_sal.h sn_sal.h
+
softlink include/asm-ia64/acpi.h include/asm-ia64/acpi.h
softlink include/asm-ia64/asmmacro.h include/asm-ia64/asmmacro.h
softlink include/asm-ia64/atomic.h include/asm-ia64/atomic.h
diff --git a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c
index fb9db8a35f..b55e5b6bd7 100644
--- a/xen/arch/ia64/vcpu.c
+++ b/xen/arch/ia64/vcpu.c
@@ -117,7 +117,7 @@ void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode)
{
/* only do something if mode changes */
if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) {
- if (newmode) set_metaphysical_rr(0,vcpu->domain->metaphysical_rid);
+ if (newmode) set_metaphysical_rr0();
else if (PSCB(vcpu,rrs[0]) != -1)
set_one_rr(0, PSCB(vcpu,rrs[0]));
PSCB(vcpu,metaphysical_mode) = newmode;
@@ -170,6 +170,13 @@ IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
return IA64_NO_FAULT;
}
+IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
+{
+ PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ PSCB(vcpu,interrupt_collection_enabled) = 1;
+ return IA64_NO_FAULT;
+}
+
IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
@@ -643,6 +650,7 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
#ifdef HEARTBEAT_FREQ
#define N_DOMS 16 // period in seconds
static long count[N_DOMS] = { 0 };
+ static long nonclockcount[N_DOMS] = { 0 };
REGS *regs = vcpu_regs(vcpu);
unsigned domid = vcpu->domain->domain_id;
#endif
@@ -664,15 +672,15 @@ IA64FAULT vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
}
#ifdef HEARTBEAT_FREQ
if (domid >= N_DOMS) domid = N_DOMS-1;
- if (vector == (PSCB(vcpu,itv) & 0xff) &&
- !(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
- printf("Dom%d heartbeat... iip=%p,psr.i=%d,pend=%d\n",
- domid, regs->cr_iip,
- current->vcpu_info->arch.interrupt_delivery_enabled,
- current->vcpu_info->arch.pending_interruption);
- count[domid] = 0;
- dump_runq();
+ if (vector == (PSCB(vcpu,itv) & 0xff)) {
+ if (!(++count[domid] & ((HEARTBEAT_FREQ*1024)-1))) {
+ printf("Dom%d heartbeat... ticks=%lx,nonticks=%lx\n",
+ domid, count[domid], nonclockcount[domid]);
+ //count[domid] = 0;
+ //dump_runq();
+ }
}
+ else nonclockcount[domid]++;
#endif
// now have an unmasked, pending, deliverable vector!
// getting ivr has "side effects"
diff --git a/xen/arch/ia64/xensetup.c b/xen/arch/ia64/xensetup.c
index e669ae51e7..d95eebfc9f 100644
--- a/xen/arch/ia64/xensetup.c
+++ b/xen/arch/ia64/xensetup.c
@@ -214,6 +214,9 @@ void start_kernel(void)
init_frametable();
+ ia64_fph_enable();
+ __ia64_init_fpu();
+
alloc_dom0();
#ifdef DOMU_BUILD_STAGING
alloc_domU_staging();
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index fbff17469f..1052e595ac 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -19,6 +19,7 @@
#define CONFIG_IA64_GRANULE_16MB
#define CONFIG_EFI_PCDP
+#define CONFIG_SERIAL_SGI_L1_CONSOLE
#ifndef __ASSEMBLY__
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index 6481257eb4..0f0e37895b 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -27,7 +27,7 @@ struct trap_bounce {
struct arch_domain {
struct mm_struct *active_mm;
struct mm_struct *mm;
- int metaphysical_rid;
+ int metaphysical_rr0;
int starting_rid; /* first RID assigned to domain */
int ending_rid; /* one beyond highest RID assigned to domain */
int rid_bits; /* number of virtual rid bits (default: 18) */
@@ -47,11 +47,9 @@ struct arch_domain {
u64 entry;
#endif
};
-#define metaphysical_rid arch.metaphysical_rid
#define starting_rid arch.starting_rid
#define ending_rid arch.ending_rid
#define rid_bits arch.rid_bits
-#define breakimm arch.breakimm
#define xen_vastart arch.xen_vastart
#define xen_vaend arch.xen_vaend
#define shared_info_va arch.shared_info_va
@@ -75,6 +73,9 @@ struct arch_vcpu {
unsigned long xen_timer_interval;
#endif
void *regs; /* temporary until find a better way to do privops */
+ int metaphysical_rr0; // from arch_domain (so is pinned)
+ int metaphysical_saved_rr0; // from arch_domain (so is pinned)
+ int breakimm; // from arch_domain (so is pinned)
struct mm_struct *active_mm;
struct thread_struct _thread; // this must be last
#ifdef CONFIG_VTI
diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h
index 7c63a738b0..c7c9771fee 100644
--- a/xen/include/asm-ia64/xensystem.h
+++ b/xen/include/asm-ia64/xensystem.h
@@ -50,6 +50,8 @@ extern struct task_struct *vmx_ia64_switch_to (void *next_task);
} while (0)
#else // CONFIG_VTI
#define __switch_to(prev,next,last) do { \
+ ia64_save_fpu(prev->arch._thread.fph); \
+ ia64_load_fpu(next->arch._thread.fph); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index d2cc7c07ea..ec00554959 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -81,10 +81,11 @@ typedef struct vcpu_guest_context {
#endif /* !__ASSEMBLY__ */
#define XEN_HYPER_RFI 1
-#define XEN_HYPER_RSM_PSR_DT 2
-#define XEN_HYPER_SSM_PSR_DT 3
+#define XEN_HYPER_RSM_DT 2
+#define XEN_HYPER_SSM_DT 3
#define XEN_HYPER_COVER 4
#define XEN_HYPER_ITC_D 5
#define XEN_HYPER_ITC_I 6
+#define XEN_HYPER_SSM_I 7
#endif /* __HYPERVISOR_IF_IA64_H__ */