aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>2005-09-19 11:08:20 -0600
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>2005-09-19 11:08:20 -0600
commit286f47e0ac672c039328947caf9eaf1b236a5dbd (patch)
treea8844b171dfde44d5360529f285175fb39f31685
parentabe0d573b20fb28174704ab1848f98a564e59bb3 (diff)
downloadxen-286f47e0ac672c039328947caf9eaf1b236a5dbd.tar.gz
xen-286f47e0ac672c039328947caf9eaf1b236a5dbd.tar.bz2
xen-286f47e0ac672c039328947caf9eaf1b236a5dbd.zip
Merge vcpu phase 2
Signed-off-by Anthony Xu <Anthony.xu@intel.com> 1. Add r4,r5,r6,r7,eml_unat,rfi_pfs six members at the end of pt_regs, thus VMM for non-VT domain doesn't need to save/restore SWITCH_STACK just in case VMM may need modify guest r4,r5,r6,r7 value, eml_unat is saved in case VMM may need modify guest unat, rfi_pfs is used to emulate guest rfi instruction for VT domain, and is used as dummy member for alignment of F6, F7 etc. in pt_regs for non-VT domain. And add code to save/restore r4,r5,r6, r7,eml_unat in minstate.h and entry.s for non-VT domain. and remove code to save/restore SWITCH_STACK in ivt.S for non-VT domain. 2. Originally guest banked registers of VT domain are saved directly in VPD. Now Guest banked registers of VT domain are saved to pt_regs at the entry of VMM, just for consistence with non-VT domain, vmx_vcpu_bsw0 and vmx_vcpu_bsw1 are rewriten for accomodating this change. 3. After above changes, all guest registers and nat bits are saved at same place both for VTI domain and for non-VTI domain, so vcpu_get/set_gr and rse_get/set_gr are merged, and all these functions can handle guest nats. 4. Merged vcpu_bsw0 and vcpu_bsw1, Now VMM used vgr[16],vbgr[16],vnat,vbnat in mapped_regs_t struct (is also vpd) to emuate guest banking switch operation. 5. Removed some CONFIG_VTIs and cleaned up some unused structure members and codes. Note: Credit accidentally omitted from merge vcpu phase 1, included here: This patch is based on ver 6723. And definitely I can boot dom0 with this patch. Following things are done in this patch. 1. Merge structure pt_reg. 2. Though vcpu_info structure has been merged, non-vt domain used pointer vcpu->vcpu_info->arch.privregs, and vt domain used pointer vcpu->arch.arch_vmx.vpd, the value of these two pointers are different, that means vt and non-vt domain still use different privileged registers pages, in this case, we can't merge vcpu.c, so I merged these two pointer, and put it at vcpu->arch.privregs. vcpu->vcpu_info->arch.privregs and vcpu->arch.arch_vmx.vpd will not exist. Why put it at vcpu->arch.privregs? 1. There will be one less pointer unreferenced when accessing this privileged registers page. 2. vcpu->vcpu_info can be accessed by guest, but guest can't access privileged registers page through this address, guest can access this privileged page only through another special mapping. So there is no need to expose this pointer to guest by putting it in vcpu->vcpu_info structure. All accesses to this page is through VCPU(vcpu,y) macro, 3. Merged following functions. Vcpu_set/get_(interruption control registers from cr16 to cr25), corresponding functions vmx_vcpu_set/get_*** will not exist. Vcpu->arch.arch_vmx.in_service[4] will not exist, we will all use vcpu->arch.insvc[4] 4. Cleaned up some unused structure members and codes. Signed-off-by Anthony Xu <Anthony.xu@intel.com>
-rw-r--r--xen/arch/ia64/asm-offsets.c24
-rw-r--r--xen/arch/ia64/linux-xen/entry.S37
-rw-r--r--xen/arch/ia64/linux-xen/entry.h13
-rw-r--r--xen/arch/ia64/linux-xen/minstate.h17
-rw-r--r--xen/arch/ia64/linux-xen/unaligned.c18
-rw-r--r--xen/arch/ia64/vmx/mmio.c26
-rw-r--r--xen/arch/ia64/vmx/pal_emul.c18
-rw-r--r--xen/arch/ia64/vmx/vmmu.c2
-rw-r--r--xen/arch/ia64/vmx/vmx_entry.S182
-rw-r--r--xen/arch/ia64/vmx/vmx_hypercall.c70
-rw-r--r--xen/arch/ia64/vmx/vmx_interrupt.c8
-rw-r--r--xen/arch/ia64/vmx/vmx_ivt.S66
-rw-r--r--xen/arch/ia64/vmx/vmx_minstate.h138
-rw-r--r--xen/arch/ia64/vmx/vmx_process.c125
-rw-r--r--xen/arch/ia64/vmx/vmx_vcpu.c12
-rw-r--r--xen/arch/ia64/vmx/vmx_virt.c108
-rw-r--r--xen/arch/ia64/vmx/vtlb.c5
-rw-r--r--xen/arch/ia64/xen/domain.c3
-rw-r--r--xen/arch/ia64/xen/ivt.S12
-rw-r--r--xen/arch/ia64/xen/privop.c26
-rw-r--r--xen/arch/ia64/xen/process.c22
-rw-r--r--xen/arch/ia64/xen/vcpu.c106
-rw-r--r--xen/include/asm-ia64/ia64_int.h10
-rw-r--r--xen/include/asm-ia64/privop.h8
-rw-r--r--xen/include/asm-ia64/vcpu.h3
-rw-r--r--xen/include/asm-ia64/vmx_vcpu.h4
-rw-r--r--xen/include/asm-ia64/vmx_vpd.h5
-rw-r--r--xen/include/asm-ia64/xenkregs.h7
-rw-r--r--xen/include/asm-ia64/xensystem.h4
-rw-r--r--xen/include/public/arch-ia64.h55
30 files changed, 614 insertions, 520 deletions
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index 6242811296..ad0fbcf9b0 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -138,17 +138,6 @@ void foo(void)
DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
-#ifdef CONFIG_VTI
- DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
- DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
- DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
- DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
- DEFINE(IA64_PT_REGS_CR_IIPA_OFFSET, offsetof (struct pt_regs, cr_iipa));
- DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct pt_regs, cr_isr));
- DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
- DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
-#endif //CONFIG_VTI
DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
@@ -172,6 +161,19 @@ void foo(void)
DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
+ DEFINE(IA64_PT_REGS_R4_OFFSET, offsetof (struct pt_regs, r4));
+ DEFINE(IA64_PT_REGS_R5_OFFSET, offsetof (struct pt_regs, r5));
+ DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
+ DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
+ DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
+ DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
+ DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
+ DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
+ DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
+ DEFINE(IA64_VCPU_OPCODE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.opcode));
+ DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
+ DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f));
+ DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.flags));
BLANK();
diff --git a/xen/arch/ia64/linux-xen/entry.S b/xen/arch/ia64/linux-xen/entry.S
index 53d228cec8..22f68b8950 100644
--- a/xen/arch/ia64/linux-xen/entry.S
+++ b/xen/arch/ia64/linux-xen/entry.S
@@ -633,10 +633,19 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
// new domains are cloned but not exec'ed so switch to user mode here
cmp.ne pKStk,pUStk=r0,r0
#ifdef CONFIG_VTI
- br.cond.spnt ia64_leave_hypervisor
+ br.cond.spnt ia64_leave_hypervisor
#else // CONFIG_VTI
- br.cond.spnt ia64_leave_kernel
+ br.cond.spnt ia64_leave_kernel
#endif // CONFIG_VTI
+
+// adds r16 = IA64_VCPU_FLAGS_OFFSET, r13
+// ;;
+// ld8 r16 = [r16]
+// ;;
+// cmp.ne p6,p7 = r16, r0
+// (p6) br.cond.spnt ia64_leave_hypervisor
+// (p7) br.cond.spnt ia64_leave_kernel
+// ;;
#else
.ret8:
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -875,10 +884,16 @@ GLOBAL_ENTRY(ia64_leave_kernel)
#ifdef XEN
alloc loc0=ar.pfs,0,1,1,0
adds out0=16,r12
+ adds r7 = PT(EML_UNAT)+16,r12
;;
+ ld8 r7 = [r7]
(p6) br.call.sptk.many b0=deliver_pending_interrupt
+ ;;
mov ar.pfs=loc0
+ mov ar.unat=r7 /* load eml_unat */
mov r31=r0
+
+
#else
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
@@ -957,6 +972,23 @@ GLOBAL_ENTRY(ia64_leave_kernel)
ldf.fill f6=[r2],PT(F7)-PT(F6)
;;
ldf.fill f7=[r2],PT(F11)-PT(F7)
+#ifdef XEN
+ ldf.fill f8=[r3],PT(R5)-PT(F8)
+ ;;
+ ldf.fill f11=[r2],PT(R4)-PT(F11)
+ mov ar.ccv=r15
+ ;;
+ ld8.fill r4=[r2],16
+ ld8.fill r5=[r3],16
+ ;;
+ ld8.fill r6=[r2]
+ ld8.fill r7=[r3]
+ ;;
+ srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
+ ;;
+ bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
+ ;;
+#else
ldf.fill f8=[r3],32
;;
srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
@@ -965,6 +997,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
ldf.fill f11=[r2]
bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
;;
+#endif
#ifdef XEN
(pUStk) movl r18=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
(pUStk) ld8 r18=[r18]
diff --git a/xen/arch/ia64/linux-xen/entry.h b/xen/arch/ia64/linux-xen/entry.h
index 48b9470039..6dbe978f73 100644
--- a/xen/arch/ia64/linux-xen/entry.h
+++ b/xen/arch/ia64/linux-xen/entry.h
@@ -23,19 +23,8 @@
#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
-
-#ifdef XEN
-#ifdef CONFIG_VTI
-#define PRED_EMUL 2 /* Need to save r4-r7 for inst emulation */
-#define PRED_NON_EMUL 3 /* No need to save r4-r7 for normal path */
-#define PRED_BN0 6 /* Guest is in bank 0 */
-#define PRED_BN1 7 /* Guest is in bank 1 */
-# define pEml PASTE(p,PRED_EMUL)
-# define pNonEml PASTE(p,PRED_NON_EMUL)
-# define pBN0 PASTE(p,PRED_BN0)
-# define pBN1 PASTE(p,PRED_BN1)
+#ifdef XEN
#define VPD(f) (VPD_##f##_START_OFFSET)
-#endif // CONFIG_VTI
#endif
#define PT_REGS_SAVES(off) \
diff --git a/xen/arch/ia64/linux-xen/minstate.h b/xen/arch/ia64/linux-xen/minstate.h
index 5ab898906e..56e4cb3d3f 100644
--- a/xen/arch/ia64/linux-xen/minstate.h
+++ b/xen/arch/ia64/linux-xen/minstate.h
@@ -241,16 +241,25 @@
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
- stf.spill [r2]=f10; \
- stf.spill [r3]=f11; \
- adds r25=PT(B7)-PT(F11),r3; \
+ stf.spill [r2]=f10,32; \
+ stf.spill [r3]=f11,24; \
;; \
+.mem.offset 0,0; st8.spill [r2]=r4,16; \
+.mem.offset 8,0; st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r6,16; \
+.mem.offset 8,0; st8.spill [r3]=r7; \
+ adds r25=PT(B7)-PT(R7),r3; \
+ ;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
+ mov r26=ar.unat; \
+ ;; \
st8 [r25]=r10; /* ar.ssd */ \
- ;;
+ st8 [r2]=r26; /* eml_unat */ \
+ ;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
diff --git a/xen/arch/ia64/linux-xen/unaligned.c b/xen/arch/ia64/linux-xen/unaligned.c
index ace9c4fa05..29f412ce64 100644
--- a/xen/arch/ia64/linux-xen/unaligned.c
+++ b/xen/arch/ia64/linux-xen/unaligned.c
@@ -201,7 +201,8 @@ static u16 gr_info[32]={
RPT(r1), RPT(r2), RPT(r3),
-#if defined(XEN) && defined(CONFIG_VTI)
+//#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
RPT(r4), RPT(r5), RPT(r6), RPT(r7),
#else //CONFIG_VTI
RSW(r4), RSW(r5), RSW(r6), RSW(r7),
@@ -295,7 +296,8 @@ rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
return reg;
}
-#if defined(XEN) && defined(CONFIG_VTI)
+//#if defined(XEN) && defined(CONFIG_VTI)
+#if defined(XEN)
void
set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat)
{
@@ -402,12 +404,14 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigne
bspstore = ia64_get_bspstore();
}
*val=*addr;
- if(bspstore < rnat_addr){
- *nat=!!(ia64_get_rnat()&nat_mask);
- }else{
- *nat = !!((*rnat_addr)&nat_mask);
+ if(nat){
+ if(bspstore < rnat_addr){
+ *nat=!!(ia64_get_rnat()&nat_mask);
+ }else{
+ *nat = !!((*rnat_addr)&nat_mask);
+ }
+ ia64_set_rsc(old_rsc);
}
- ia64_set_rsc(old_rsc);
}
#else // CONFIG_VTI
diff --git a/xen/arch/ia64/vmx/mmio.c b/xen/arch/ia64/vmx/mmio.c
index 09ff5b9374..f603fb7351 100644
--- a/xen/arch/ia64/vmx/mmio.c
+++ b/xen/arch/ia64/vmx/mmio.c
@@ -419,10 +419,10 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
size=(inst.M1.x6&0x3);
if((inst.M1.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- vmx_vcpu_get_gr(vcpu,inst.M4.r2,&data);
+ vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
}else if((inst.M1.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- vmx_vcpu_get_gr(vcpu,inst.M1.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M1.r1,&value);
}
}
// Integer Load + Reg update
@@ -430,11 +430,11 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
inst_type = SL_INTEGER;
dir = IOREQ_READ; //write
size = (inst.M2.x6&0x3);
- vmx_vcpu_get_gr(vcpu,inst.M2.r1,&value);
- vmx_vcpu_get_gr(vcpu,inst.M2.r3,&temp);
- vmx_vcpu_get_gr(vcpu,inst.M2.r2,&post_update);
+ vcpu_get_gr_nat(vcpu,inst.M2.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
}
// Integer Load/Store + Imm update
else if(inst.M3.major==5){
@@ -442,25 +442,25 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
size=(inst.M3.x6&0x3);
if((inst.M5.x6>>2)>0xb){ // write
dir=IOREQ_WRITE; //write
- vmx_vcpu_get_gr(vcpu,inst.M5.r2,&data);
- vmx_vcpu_get_gr(vcpu,inst.M5.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
+ vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
post_update = (inst.M5.i<<7)+inst.M5.imm7;
if(inst.M5.s)
temp -= post_update;
else
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
}else if((inst.M3.x6>>2)<0xb){ // read
dir=IOREQ_READ;
- vmx_vcpu_get_gr(vcpu,inst.M3.r1,&value);
- vmx_vcpu_get_gr(vcpu,inst.M3.r3,&temp);
+ vcpu_get_gr_nat(vcpu,inst.M3.r1,&value);
+ vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
post_update = (inst.M3.i<<7)+inst.M3.imm7;
if(inst.M3.s)
temp -= post_update;
else
temp += post_update;
- vmx_vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
+ vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
}
}
@@ -488,7 +488,7 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
data = (value & 0xffffffff00000000U) | (data & 0xffffffffU);
if(inst_type==SL_INTEGER){ //gp
- vmx_vcpu_set_gr(vcpu,inst.M1.r1,data,0);
+ vcpu_set_gr(vcpu,inst.M1.r1,data,0);
}else{
panic("Don't support ldfd now !");
/* switch(inst.M6.f1){
diff --git a/xen/arch/ia64/vmx/pal_emul.c b/xen/arch/ia64/vmx/pal_emul.c
index ab5096eb69..878df476c8 100644
--- a/xen/arch/ia64/vmx/pal_emul.c
+++ b/xen/arch/ia64/vmx/pal_emul.c
@@ -24,18 +24,18 @@ static void
get_pal_parameters (VCPU *vcpu, UINT64 *gr29,
UINT64 *gr30, UINT64 *gr31) {
- vmx_vcpu_get_gr(vcpu,29,gr29);
- vmx_vcpu_get_gr(vcpu,30,gr30);
- vmx_vcpu_get_gr(vcpu,31,gr31);
+ vcpu_get_gr_nat(vcpu,29,gr29);
+ vcpu_get_gr_nat(vcpu,30,gr30);
+ vcpu_get_gr_nat(vcpu,31,gr31);
}
static void
set_pal_result (VCPU *vcpu,struct ia64_pal_retval result) {
- vmx_vcpu_set_gr(vcpu,8, result.status,0);
- vmx_vcpu_set_gr(vcpu,9, result.v0,0);
- vmx_vcpu_set_gr(vcpu,10, result.v1,0);
- vmx_vcpu_set_gr(vcpu,11, result.v2,0);
+ vcpu_set_gr(vcpu,8, result.status,0);
+ vcpu_set_gr(vcpu,9, result.v0,0);
+ vcpu_set_gr(vcpu,10, result.v1,0);
+ vcpu_set_gr(vcpu,11, result.v2,0);
}
@@ -45,7 +45,7 @@ pal_cache_flush (VCPU *vcpu) {
struct ia64_pal_retval result;
get_pal_parameters (vcpu, &gr29, &gr30, &gr31);
- vmx_vcpu_get_gr(vcpu,28,&gr28);
+ vcpu_get_gr_nat(vcpu,28,&gr28);
/* Always call Host Pal in int=1 */
gr30 = gr30 &(~(0x2UL));
@@ -236,7 +236,7 @@ pal_emul( VCPU *vcpu) {
struct ia64_pal_retval result;
- vmx_vcpu_get_gr(vcpu,28,&gr28); //bank1
+ vcpu_get_gr_nat(vcpu,28,&gr28); //bank1
switch (gr28) {
case PAL_CACHE_FLUSH:
diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c
index 5b8dd8b596..9a00754366 100644
--- a/xen/arch/ia64/vmx/vmmu.c
+++ b/xen/arch/ia64/vmx/vmmu.c
@@ -713,7 +713,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr)
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
regs=vcpu_regs(vcpu);
- pt_isr.val=regs->cr_isr;
+ pt_isr.val=VMX(vcpu,cr_isr);
visr.val=0;
visr.ei=pt_isr.ei;
visr.ir=pt_isr.ir;
diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S
index bf8e232014..4eda79cc9d 100644
--- a/xen/arch/ia64/vmx/vmx_entry.S
+++ b/xen/arch/ia64/vmx/vmx_entry.S
@@ -83,7 +83,6 @@ GLOBAL_ENTRY(ia64_leave_nested)
;;
adds r21=PT(PR)+16,r12
;;
-
lfetch [r21],PT(CR_IPSR)-PT(PR)
adds r2=PT(B6)+16,r12
adds r3=PT(R16)+16,r12
@@ -216,138 +215,90 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
;;
alloc loc0=ar.pfs,0,1,1,0
adds out0=16,r12
+ adds r7 = PT(EML_UNAT)+16,r12
;;
+ ld8 r7 = [r7]
br.call.sptk.many b0=leave_hypervisor_tail
;;
mov ar.pfs=loc0
- adds r8=IA64_VPD_BASE_OFFSET,r13
- ;;
- ld8 r8=[r8]
- ;;
- adds r9=VPD(VPSR),r8
- ;;
- ld8 r9=[r9]
- ;;
- tbit.z pBN0,pBN1=r9,IA64_PSR_BN_BIT
- ;;
-(pBN0) add r7=VPD(VBNAT),r8;
-(pBN1) add r7=VPD(VNAT),r8;
- ;;
- ld8 r7=[r7]
- ;;
mov ar.unat=r7
-(pBN0) add r4=VPD(VBGR),r8;
-(pBN1) add r4=VPD(VGR),r8;
-(pBN0) add r5=VPD(VBGR)+0x8,r8;
-(pBN1) add r5=VPD(VGR)+0x8,r8;
- ;;
- ld8.fill r16=[r4],16
- ld8.fill r17=[r5],16
- ;;
- ld8.fill r18=[r4],16
- ld8.fill r19=[r5],16
- ;;
- ld8.fill r20=[r4],16
- ld8.fill r21=[r5],16
- ;;
- ld8.fill r22=[r4],16
- ld8.fill r23=[r5],16
- ;;
- ld8.fill r24=[r4],16
- ld8.fill r25=[r5],16
- ;;
- ld8.fill r26=[r4],16
- ld8.fill r27=[r5],16
- ;;
- ld8.fill r28=[r4],16
- ld8.fill r29=[r5],16
- ;;
- ld8.fill r30=[r4],16
- ld8.fill r31=[r5],16
- ;;
- bsw.0
- ;;
- mov r18=r8 //vpd
- mov r19=r9 //vpsr
adds r20=PT(PR)+16,r12
;;
lfetch [r20],PT(CR_IPSR)-PT(PR)
- adds r16=PT(B6)+16,r12
- adds r17=PT(B7)+16,r12
+ adds r2 = PT(B6)+16,r12
+ adds r3 = PT(B7)+16,r12
;;
lfetch [r20]
- mov r21=r13 // get current
;;
- ld8 r30=[r16],16 // load b6
- ld8 r31=[r17],16 // load b7
- add r20=PT(EML_UNAT)+16,r12
+ ld8 r24=[r2],16 /* B6 */
+ ld8 r25=[r3],16 /* B7 */
;;
- ld8 r29=[r20] //load ar_unat
- mov b6=r30
- mov b7=r31
- ld8 r30=[r16],16 //load ar_csd
- ld8 r31=[r17],16 //load ar_ssd
+ ld8 r26=[r2],16 /* ar_csd */
+ ld8 r27=[r3],16 /* ar_ssd */
+ mov b6 = r24
;;
- mov ar.unat=r29
- mov ar.csd=r30
- mov ar.ssd=r31
+ ld8.fill r8=[r2],16
+ ld8.fill r9=[r3],16
+ mov b7 = r25
;;
- ld8.fill r8=[r16],16 //load r8
- ld8.fill r9=[r17],16 //load r9
+ mov ar.csd = r26
+ mov ar.ssd = r27
;;
- ld8.fill r10=[r16],PT(R1)-PT(R10) //load r10
- ld8.fill r11=[r17],PT(R12)-PT(R11) //load r11
+ ld8.fill r10=[r2],PT(R15)-PT(R10)
+ ld8.fill r11=[r3],PT(R14)-PT(R11)
;;
- ld8.fill r1=[r16],16 //load r1
- ld8.fill r12=[r17],16 //load r12
+ ld8.fill r15=[r2],PT(R16)-PT(R15)
+ ld8.fill r14=[r3],PT(R17)-PT(R14)
;;
- ld8.fill r13=[r16],16 //load r13
- ld8 r30=[r17],16 //load ar_fpsr
+ ld8.fill r16=[r2],16
+ ld8.fill r17=[r3],16
;;
- ld8.fill r15=[r16],16 //load r15
- ld8.fill r14=[r17],16 //load r14
- mov ar.fpsr=r30
+ ld8.fill r18=[r2],16
+ ld8.fill r19=[r3],16
;;
- ld8.fill r2=[r16],16 //load r2
- ld8.fill r3=[r17],16 //load r3
+ ld8.fill r20=[r2],16
+ ld8.fill r21=[r3],16
;;
-/*
-(pEml) ld8.fill r4=[r16],16 //load r4
-(pEml) ld8.fill r5=[r17],16 //load r5
+ ld8.fill r22=[r2],16
+ ld8.fill r23=[r3],16
;;
-(pEml) ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6
-(pEml) ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7
+ ld8.fill r24=[r2],16
+ ld8.fill r25=[r3],16
;;
-(pNonEml) adds r16=PT(AR_CCV)-PT(R4),r16
-(pNonEml) adds r17=PT(F7)-PT(R5),r17
+ ld8.fill r26=[r2],16
+ ld8.fill r27=[r3],16
;;
-*/
- ld8.fill r4=[r16],16 //load r4
- ld8.fill r5=[r17],16 //load r5
- ;;
- ld8.fill r6=[r16],PT(AR_CCV)-PT(R6) //load r6
- ld8.fill r7=[r17],PT(F7)-PT(R7) //load r7
+ ld8.fill r28=[r2],16
+ ld8.fill r29=[r3],16
+ ;;
+ ld8.fill r30=[r2],PT(F6)-PT(R30)
+ ld8.fill r31=[r3],PT(F7)-PT(R31)
;;
-
- ld8 r30=[r16],PT(F6)-PT(AR_CCV)
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
;;
- srlz.i // ensure interruption collection is off
+ ldf.fill f6=[r2],32
+ ldf.fill f7=[r3],32
;;
- invala // invalidate ALAT
+ ldf.fill f8=[r2],32
+ ldf.fill f9=[r3],32
;;
- ldf.fill f6=[r16],32
- ldf.fill f7=[r17],32
+ ldf.fill f10=[r2],32
+ ldf.fill f11=[r3],24
;;
- ldf.fill f8=[r16],32
- ldf.fill f9=[r17],32
+ ld8.fill r4=[r2],16 //load r4
+ ld8.fill r5=[r3],16 //load r5
;;
- ldf.fill f10=[r16]
- ldf.fill f11=[r17]
+ ld8.fill r6=[r2] //load r6
+ ld8.fill r7=[r3] //load r7
;;
- mov ar.ccv=r30
- adds r16=PT(CR_IPSR)-PT(F10),r16
- adds r17=PT(CR_IIP)-PT(F11),r17
+ srlz.i // ensure interruption collection is off
+ ;;
+ bsw.0
+ ;;
+ adds r16 = PT(CR_IPSR)+16,r12
+ adds r17 = PT(CR_IIP)+16,r12
+ mov r21=r13 // get current
;;
ld8 r31=[r16],16 // load cr.ipsr
ld8 r30=[r17],16 // load cr.iip
@@ -358,13 +309,26 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
ld8 r27=[r16],16 // load ar.pfs
ld8 r26=[r17],16 // load ar.rsc
;;
- ld8 r25=[r16],16 // load ar.rnat (may be garbage)
- ld8 r24=[r17],16// load ar.bspstore (may be garbage)
+ ld8 r25=[r16],16 // load ar.rnat
+ ld8 r24=[r17],16 // load ar.bspstore
;;
ld8 r23=[r16],16 // load predicates
- ld8 r22=[r17],PT(RFI_PFS)-PT(B0) // load b0
+ ld8 r22=[r17],16 // load b0
;;
ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
+ ld8.fill r1=[r17],16 //load r1
+ ;;
+ ld8.fill r12=[r16],16 //load r12
+ ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
+ ;;
+ ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
+ ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
+ ;;
+ ld8.fill r3=[r16] //load r3
+ ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
+ ;;
+ mov ar.fpsr=r19
+ mov ar.ccv=r18
;;
//rbs_switch
// loadrs has already been shifted
@@ -389,7 +353,13 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
;;
vmx_dorfirfi_back:
mov ar.pfs=r27
-
+ adds r18=IA64_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18] //vpd
+ ;;
+ adds r19=VPD(VPSR),r18
+ ;;
+ ld8 r19=[r19] //vpsr
//vsa_sync_write_start
movl r20=__vsa_base
;;
diff --git a/xen/arch/ia64/vmx/vmx_hypercall.c b/xen/arch/ia64/vmx/vmx_hypercall.c
index 193ba410fe..6414b01a1d 100644
--- a/xen/arch/ia64/vmx/vmx_hypercall.c
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c
@@ -35,7 +35,7 @@
void hyper_not_support(void)
{
VCPU *vcpu=current;
- vmx_vcpu_set_gr(vcpu, 8, -1, 0);
+ vcpu_set_gr(vcpu, 8, -1, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -43,12 +43,12 @@ void hyper_mmu_update(void)
{
VCPU *vcpu=current;
u64 r32,r33,r34,r35,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
- vmx_vcpu_get_gr(vcpu,19,&r35);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,19,&r35);
ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -65,18 +65,18 @@ unsigned long __hypercall_create_continuation(
if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
panic("PREEMPT happen in multicall\n"); // Not support yet
} else {
- vmx_vcpu_set_gr(vcpu, 15, op, 0);
+ vcpu_set_gr(vcpu, 15, op, 0);
for ( i = 0; i < nr_args; i++) {
switch (i) {
- case 0: vmx_vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
+ case 0: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
break;
- case 1: vmx_vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
+ case 1: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
break;
- case 2: vmx_vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
+ case 2: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
break;
- case 3: vmx_vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
+ case 3: vcpu_set_gr(vcpu, 19, va_arg(args, unsigned long), 0);
break;
- case 4: vmx_vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
+ case 4: vcpu_set_gr(vcpu, 20, va_arg(args, unsigned long), 0);
break;
default: panic("Too many args for hypercall continuation\n");
break;
@@ -93,15 +93,15 @@ void hyper_dom_mem_op(void)
VCPU *vcpu=current;
u64 r32,r33,r34,r35,r36;
u64 ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
- vmx_vcpu_get_gr(vcpu,19,&r35);
- vmx_vcpu_get_gr(vcpu,20,&r36);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,19,&r35);
+ vcpu_get_gr_nat(vcpu,20,&r36);
// ret=do_dom_mem_op(r32,(u64 *)r33,r34,r35,r36);
ret = 0;
printf("do_dom_mem return value: %lx\n", ret);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
/* Hard to define a special return value to indicate hypercall restart.
* So just add a new mark, which is SMP safe
@@ -117,9 +117,9 @@ void hyper_sched_op(void)
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_sched_op(r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -128,9 +128,9 @@ void hyper_dom0_op(void)
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_dom0_op((dom0_op_t *)r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -139,9 +139,9 @@ void hyper_event_channel_op(void)
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_event_channel_op((evtchn_op_t *)r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -149,9 +149,9 @@ void hyper_xen_version(void)
{
VCPU *vcpu=current;
u64 r32,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,16,&r32);
ret=do_xen_version((int )r32);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -174,10 +174,10 @@ void hyper_lock_page(void)
//TODO:
VCPU *vcpu=current;
u64 va,lock, ret;
- vmx_vcpu_get_gr(vcpu,16,&va);
- vmx_vcpu_get_gr(vcpu,17,&lock);
+ vcpu_get_gr_nat(vcpu,16,&va);
+ vcpu_get_gr_nat(vcpu,17,&lock);
ret=do_lock_page(vcpu, va, lock);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -213,10 +213,10 @@ void hyper_set_shared_page(void)
{
VCPU *vcpu=current;
u64 gpa,ret;
- vmx_vcpu_get_gr(vcpu,16,&gpa);
+ vcpu_get_gr_nat(vcpu,16,&gpa);
ret=do_set_shared_page(vcpu, gpa);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
@@ -226,11 +226,11 @@ void hyper_grant_table_op(void)
{
VCPU *vcpu=current;
u64 r32,r33,r34,ret;
- vmx_vcpu_get_gr(vcpu,16,&r32);
- vmx_vcpu_get_gr(vcpu,17,&r33);
- vmx_vcpu_get_gr(vcpu,18,&r34);
+ vcpu_get_gr_nat(vcpu,16,&r32);
+ vcpu_get_gr_nat(vcpu,17,&r33);
+ vcpu_get_gr_nat(vcpu,18,&r34);
ret=do_grant_table_op((unsigned int)r32, (void *)r33, (unsigned int)r34);
- vmx_vcpu_set_gr(vcpu, 8, ret, 0);
+ vcpu_set_gr(vcpu, 8, ret, 0);
}
*/
diff --git a/xen/arch/ia64/vmx/vmx_interrupt.c b/xen/arch/ia64/vmx/vmx_interrupt.c
index 7c5b9815f8..c517cb41ed 100644
--- a/xen/arch/ia64/vmx/vmx_interrupt.c
+++ b/xen/arch/ia64/vmx/vmx_interrupt.c
@@ -37,11 +37,8 @@ collect_interruption(VCPU *vcpu)
IA64_PSR vpsr;
REGS * regs = vcpu_regs(vcpu);
vpsr.val = vmx_vcpu_get_psr(vcpu);
-
+ vcpu_bsw0(vcpu);
if(vpsr.ic){
- extern void vmx_dorfirfi(void);
- if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
- panic("COLLECT interruption for vmx_dorfirfi\n");
/* Sync mpsr id/da/dd/ss/ed bits to vipsr
* since after guest do rfi, we still want these bits on in
@@ -65,7 +62,7 @@ collect_interruption(VCPU *vcpu)
vifs &= ~IA64_IFS_V;
vcpu_set_ifs(vcpu, vifs);
- vcpu_set_iipa(vcpu, regs->cr_iipa);
+ vcpu_set_iipa(vcpu, VMX(vcpu,cr_iipa));
}
vdcr = VCPU(vcpu,dcr);
@@ -88,6 +85,7 @@ collect_interruption(VCPU *vcpu)
vmx_vcpu_set_psr(vcpu, vpsr.val);
}
+
int
inject_guest_interruption(VCPU *vcpu, u64 vec)
{
diff --git a/xen/arch/ia64/vmx/vmx_ivt.S b/xen/arch/ia64/vmx/vmx_ivt.S
index b78489925b..9dc711968a 100644
--- a/xen/arch/ia64/vmx/vmx_ivt.S
+++ b/xen/arch/ia64/vmx/vmx_ivt.S
@@ -690,9 +690,14 @@ END(vmx_single_step_trap)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
ENTRY(vmx_virtualization_fault)
- VMX_DBG_FAULT(37)
mov r31=pr
mov r19=37
+ adds r16 = IA64_VCPU_CAUSE_OFFSET,r21
+ adds r17 = IA64_VCPU_OPCODE_OFFSET,r21
+ ;;
+ st8 [r16] = r24
+ st8 [r17] = r25
+ ;;
br.sptk vmx_dispatch_virtualization_fault
END(vmx_virtualization_fault)
@@ -897,44 +902,45 @@ ENTRY(vmx_dispatch_reflection)
* r31: contains saved predicates (pr)
*/
VMX_SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,4,0
+ alloc r14=ar.pfs,0,0,5,0
mov out0=cr.ifa
mov out1=cr.isr
mov out2=cr.iim
mov out3=r15
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out4=16,r12
br.call.sptk.many b6=vmx_reflect_interruption
END(vmx_dispatch_reflection)
ENTRY(vmx_dispatch_virtualization_fault)
VMX_SAVE_MIN_WITH_COVER_R19
;;
- alloc r14=ar.pfs,0,0,3,0 // now it's safe (must be first in insn group!)
+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
mov out0=r13 //vcpu
- mov out1=r4 //cause
- mov out2=r5 //opcode
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out1=16,sp //regs
br.call.sptk.many b6=vmx_emulate
END(vmx_dispatch_virtualization_fault)
@@ -949,7 +955,7 @@ ENTRY(vmx_dispatch_vexirq)
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
+ adds r3=8,r2 // set up second base pointer
;;
VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
@@ -961,21 +967,21 @@ END(vmx_dispatch_vexirq)
ENTRY(vmx_dispatch_tlb_miss)
VMX_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,3,0
- mov out0=r13
+ mov out0=cr.ifa
mov out1=r15
- mov out2=cr.ifa
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out2=16,r12
br.call.sptk.many b6=vmx_hpw_miss
END(vmx_dispatch_tlb_miss)
@@ -986,21 +992,21 @@ ENTRY(vmx_dispatch_break_fault)
;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
mov out0=cr.ifa
- adds out1=16,sp
mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice
-
+ adds r3=8,r2 // set up second base pointer
+ ;;
ssm psr.ic
;;
srlz.i // guarantee that interruption collection is on
;;
(p15)ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ adds out1=16,sp
br.call.sptk.many b6=vmx_ia64_handle_break
;;
END(vmx_dispatch_break_fault)
@@ -1013,7 +1019,7 @@ ENTRY(vmx_hypercall_dispatch)
srlz.i // guarantee that interruption collection is on
;;
(p15) ssm psr.i // restore psr.i
- adds r3=16,r2 // set up second base pointer
+ adds r3=8,r2 // set up second base pointer
;;
VMX_SAVE_REST
;;
@@ -1038,19 +1044,19 @@ ENTRY(vmx_dispatch_interrupt)
;;
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
mov out0=cr.ivr // pass cr.ivr as first arg
- add out1=16,sp // pass pointer to pt_regs as second arg
-
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
ssm psr.ic
;;
srlz.i
;;
(p15) ssm psr.i
- adds r3=16,r2 // set up second base pointer for SAVE_REST
- ;;
- VMX_SAVE_REST
movl r14=ia64_leave_hypervisor
;;
+ VMX_SAVE_REST
mov rp=r14
+ ;;
+ add out1=16,sp // pass pointer to pt_regs as second arg
br.call.sptk.many b6=vmx_ia64_handle_irq
END(vmx_dispatch_interrupt)
diff --git a/xen/arch/ia64/vmx/vmx_minstate.h b/xen/arch/ia64/vmx/vmx_minstate.h
index e82e61dee3..2d0dd77e00 100644
--- a/xen/arch/ia64/vmx/vmx_minstate.h
+++ b/xen/arch/ia64/vmx/vmx_minstate.h
@@ -65,7 +65,6 @@
ld8 r25=[r25]; /* read vpd base */ \
ld8 r20=[r20]; /* read entry point */ \
;; \
- mov r6=r25; \
add r20=PAL_VPS_SYNC_READ,r20; \
;; \
{ .mii; \
@@ -80,21 +79,19 @@
br.cond.sptk b0; /* call the service */ \
;; \
}; \
- ld8 r7=[r22]; \
+ ld8 r17=[r22]; \
/* deposite ipsr bit cpl into vpd.vpsr, since epc will change */ \
extr.u r30=r16, IA64_PSR_CPL0_BIT, 2; \
;; \
- dep r7=r30, r7, IA64_PSR_CPL0_BIT, 2; \
- ;; \
+ dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2; \
extr.u r30=r16, IA64_PSR_BE_BIT, 5; \
;; \
- dep r7=r30, r7, IA64_PSR_BE_BIT, 5; \
- ;; \
+ dep r17=r30, r17, IA64_PSR_BE_BIT, 5; \
extr.u r30=r16, IA64_PSR_RI_BIT, 2; \
;; \
- dep r7=r30, r7, IA64_PSR_RI_BIT, 2; \
+ dep r17=r30, r17, IA64_PSR_RI_BIT, 2; \
;; \
- st8 [r22]=r7; \
+ st8 [r22]=r17; \
;;
@@ -156,12 +153,14 @@
VMX_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
- mov r26=ar.unat; /* M */ \
+ mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
+ mov r26=ar.pfs; /* I */ \
mov r18=cr.isr; \
COVER; /* B;; (or nothing) */ \
;; \
tbit.z p6,p0=r29,IA64_PSR_VM_BIT; \
+ ;; \
tbit.nz.or p6,p0 = r18,39; \
;; \
(p6) br.sptk.few vmx_panic; \
@@ -193,7 +192,6 @@
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
- mov r8=ar.pfs; /* I */ \
mov r9=cr.iip; /* M */ \
mov r10=ar.fpsr; /* M */ \
;; \
@@ -201,8 +199,8 @@
st8 [r17]=r30,16; /* save cr.ifs */ \
sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
;; \
- st8 [r16]=r26,16; /* save ar.unat */ \
- st8 [r17]=r8,16; /* save ar.pfs */ \
+ st8 [r16]=r25,16; /* save ar.unat */ \
+ st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=r27,16; /* save ar.rsc */ \
@@ -227,32 +225,18 @@
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
- adds r2=PT(F6),r1; \
- ;; \
- .mem.offset 0,0; st8.spill [r16]=r4,16; \
- .mem.offset 8,0; st8.spill [r17]=r5,16; \
- ;; \
- .mem.offset 0,0; st8.spill [r16]=r6,16; \
- .mem.offset 8,0; st8.spill [r17]=r7,16; \
- mov r20=ar.ccv; \
- ;; \
- mov r18=cr.iipa; \
- mov r4=cr.isr; \
- mov r22=ar.unat; \
- ;; \
- st8 [r16]=r18,16; \
- st8 [r17]=r4; \
- ;; \
- adds r16=PT(EML_UNAT),r1; \
- adds r17=PT(AR_CCV),r1; \
- ;; \
- st8 [r16]=r22,8; \
- st8 [r17]=r20; \
- mov r4=r24; \
- mov r5=r25; \
+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
- st8 [r16]=r0; \
+ adds r16=IA64_VCPU_IIPA_OFFSET,r13; \
+ adds r17=IA64_VCPU_ISR_OFFSET,r13; \
+ mov r26=cr.iipa; \
+ mov r27=cr.isr; \
+ ;; \
+ st8 [r16]=r26; \
+ st8 [r17]=r27; \
+ ;; \
EXTRA; \
+ mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
@@ -268,9 +252,7 @@
* psr.ic: on
* r2: points to &pt_regs.f6
* r3: points to &pt_regs.f7
- * r4,r5,scrach
- * r6: points to vpd
- * r7: vpsr
+ * r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
@@ -278,46 +260,35 @@
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define VMX_SAVE_REST \
- tbit.z pBN0,pBN1=r7,IA64_PSR_BN_BIT; /* guest bank0 or bank1 ? */ \
- ;; \
-(pBN0) add r4=VPD(VBGR),r6; \
-(pBN0) add r5=VPD(VBGR)+0x8,r6; \
-(pBN0) add r7=VPD(VBNAT),r6; \
- ;; \
-(pBN1) add r5=VPD(VGR)+0x8,r6; \
-(pBN1) add r4=VPD(VGR),r6; \
-(pBN1) add r7=VPD(VNAT),r6; \
- ;; \
-.mem.offset 0,0; st8.spill [r4]=r16,16; \
-.mem.offset 8,0; st8.spill [r5]=r17,16; \
+.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r18,16; \
-.mem.offset 8,0; st8.spill [r5]=r19,16; \
+.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 8,0; st8.spill [r3]=r19,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r20,16; \
-.mem.offset 8,0; st8.spill [r5]=r21,16; \
+.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 8,0; st8.spill [r3]=r21,16; \
+ mov r18=b6; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r22,16; \
-.mem.offset 8,0; st8.spill [r5]=r23,16; \
+.mem.offset 0,0; st8.spill [r2]=r22,16; \
+.mem.offset 8,0; st8.spill [r3]=r23,16; \
+ mov r19=b7; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r24,16; \
-.mem.offset 8,0; st8.spill [r5]=r25,16; \
+.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r26,16; \
-.mem.offset 8,0; st8.spill [r5]=r27,16; \
+.mem.offset 0,0; st8.spill [r2]=r26,16; \
+.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r28,16; \
-.mem.offset 8,0; st8.spill [r5]=r29,16; \
- mov r26=b6; \
+.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
-.mem.offset 0,0; st8.spill [r4]=r30,16; \
-.mem.offset 8,0; st8.spill [r5]=r31,16; \
- mov r27=b7; \
+.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 8,0; st8.spill [r3]=r31,32; \
;; \
- mov r30=ar.unat; \
- ;; \
- st8 [r7]=r30; \
- mov ar.fpsr=r11; /* M-unit */ \
+ mov ar.fpsr=r11; \
+ st8 [r2]=r8,8; \
+ adds r24=PT(B6)-PT(F7),r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
@@ -325,17 +296,24 @@
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
- stf.spill [r2]=f10; \
- stf.spill [r3]=f11; \
+ stf.spill [r2]=f10,32; \
+ stf.spill [r3]=f11,24; \
;; \
- adds r2=PT(B6)-PT(F10),r2; \
- adds r3=PT(B7)-PT(F11),r3; \
- ;; \
- st8 [r2]=r26,16; /* b6 */ \
- st8 [r3]=r27,16; /* b7 */ \
+.mem.offset 0,0; st8.spill [r2]=r4,16; \
+.mem.offset 8,0; st8.spill [r3]=r5,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r6,16; \
+.mem.offset 8,0; st8.spill [r3]=r7; \
+ adds r25=PT(B7)-PT(R7),r3; \
;; \
- st8 [r2]=r9; /* ar.csd */ \
- st8 [r3]=r10; /* ar.ssd */ \
+ st8 [r24]=r18,16; /* b6 */ \
+ st8 [r25]=r19,16; /* b7 */ \
+ ;; \
+ st8 [r24]=r9; /* ar.csd */ \
+ mov r26=ar.unat; \
+ ;; \
+ st8 [r25]=r10; /* ar.ssd */ \
+ st8 [r2]=r26; /* eml_unat */ \
;;
#define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
diff --git a/xen/arch/ia64/vmx/vmx_process.c b/xen/arch/ia64/vmx/vmx_process.c
index 2e53fb6a21..824ea618b1 100644
--- a/xen/arch/ia64/vmx/vmx_process.c
+++ b/xen/arch/ia64/vmx/vmx_process.c
@@ -72,8 +72,8 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
first_time = 0;
}
if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
- if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
- else do_ssc(vcpu_get_gr(current,36), regs);
+ if (running_on_sim) do_ssc(vcpu_get_gr_nat(current,36), regs);
+ else do_ssc(vcpu_get_gr_nat(current,36), regs);
}
#endif
if (iim == d->arch.breakimm) {
@@ -93,7 +93,7 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
break;
case FW_HYPERCALL_SAL_CALL:
for (i = 0; i < 8; i++)
- vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
+ vcpu_get_gr_nat(v, 32+i, &sal_param[i]);
x = sal_emulator(sal_param[0], sal_param[1],
sal_param[2], sal_param[3],
sal_param[4], sal_param[5],
@@ -118,8 +118,8 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
case FW_HYPERCALL_EFI_GET_TIME:
{
unsigned long *tv, *tc;
- vmx_vcpu_get_gr(v, 32, &tv);
- vmx_vcpu_get_gr(v, 33, &tc);
+ vcpu_get_gr_nat(v, 32, &tv);
+ vcpu_get_gr_nat(v, 33, &tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
tv = __va(translate_domain_mpaddr(tv));
if (tc) tc = __va(translate_domain_mpaddr(tc));
@@ -154,7 +154,7 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
pal_emul(current);
vmx_vcpu_increment_iip(current);
} else
- vmx_reflect_interruption(ifa,isr,iim,11);
+ vmx_reflect_interruption(ifa,isr,iim,11,regs);
}
static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
@@ -170,10 +170,9 @@ static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
- UINT64 vector)
+ UINT64 vector,REGS *regs)
{
VCPU *vcpu = current;
- REGS *regs=vcpu_regs(vcpu);
UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
panic("Guest nested fault!");
@@ -189,6 +188,36 @@ void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
inject_guest_interruption(vcpu, vector);
}
+
+void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
+{
+ unsigned long i, * src,* dst, *sunat, *dunat;
+ IA64_PSR vpsr;
+ src=&regs->r16;
+ sunat=&regs->eml_unat;
+ vpsr.val = vmx_vcpu_get_psr(v);
+ if(vpsr.bn){
+ dst = &VCPU(v, vgr[0]);
+ dunat =&VCPU(v, vnat);
+ __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
+ dep %2 = %0, %2, 0, 16;;
+ st8 [%3] = %2;;"
+ ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
+
+ }else{
+ dst = &VCPU(v, vbgr[0]);
+// dunat =&VCPU(v, vbnat);
+// __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
+// dep %2 = %0, %2, 16, 16;;
+// st8 [%3] = %2;;"
+// ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
+
+ }
+ for(i=0; i<16; i++)
+ *dst++ = *src++;
+}
+
+
// ONLY gets called from ia64_leave_kernel
// ONLY call with interrupts disabled?? (else might miss one?)
// NEVER successful if already reflecting a trap/fault because psr.i==0
@@ -200,7 +229,6 @@ void leave_hypervisor_tail(struct pt_regs *regs)
if (!is_idle_task(d) ) { // always comes from guest
extern void vmx_dorfirfi(void);
struct pt_regs *user_regs = vcpu_regs(current);
-
if (local_softirq_pending())
do_softirq();
local_irq_disable();
@@ -224,18 +252,22 @@ void leave_hypervisor_tail(struct pt_regs *regs)
VCPU(v, irr[0]) |= 1UL << 0x10;
v->arch.irq_new_pending = 1;
}
-
+
if ( v->arch.irq_new_pending ) {
v->arch.irq_new_pending = 0;
vmx_check_pending_irq(v);
}
+// if (VCPU(v,vac).a_bsw){
+// save_banked_regs_to_vpd(v,regs);
+// }
+
}
}
extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
/* We came here because the H/W VHPT walker failed to find an entry */
-void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
+void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
{
IA64_PSR vpsr;
CACHE_LINE_TYPE type;
@@ -245,16 +277,17 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
REGS *regs;
thash_cb_t *vtlb, *vhpt;
thash_data_t *data, me;
- vtlb=vmx_vcpu_get_vtlb(vcpu);
+ VCPU *v = current;
+ vtlb=vmx_vcpu_get_vtlb(v);
#ifdef VTLB_DEBUG
check_vtlb_sanity(vtlb);
dump_vtlb(vtlb);
#endif
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- regs = vcpu_regs(vcpu);
- misr.val=regs->cr_isr;
+ vpsr.val = vmx_vcpu_get_psr(v);
+ misr.val=VMX(v,cr_isr);
+
/* TODO
- if(vcpu->domain->id && vec == 2 &&
+ if(v->domain->id && vec == 2 &&
vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
emulate_ins(&v);
return;
@@ -262,110 +295,110 @@ void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
*/
if((vec==1)&&(!vpsr.it)){
- physical_itlb_miss(vcpu, vadr);
+ physical_itlb_miss(v, vadr);
return;
}
if((vec==2)&&(!vpsr.dt)){
- if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
- emulate_io_inst(vcpu,((vadr<<1)>>1),4); // UC
+ if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
+ emulate_io_inst(v,((vadr<<1)>>1),4); // UC
}else{
- physical_dtlb_miss(vcpu, vadr);
+ physical_dtlb_miss(v, vadr);
}
return;
}
- vrr = vmx_vcpu_rr(vcpu,vadr);
+ vrr = vmx_vcpu_rr(v, vadr);
if(vec == 1) type = ISIDE_TLB;
else if(vec == 2) type = DSIDE_TLB;
else panic("wrong vec\n");
-// prepare_if_physical_mode(vcpu);
+// prepare_if_physical_mode(v);
if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
- if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain, data->ppn>>(PAGE_SHIFT-12))){
+ if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, data->ppn>>(PAGE_SHIFT-12))){
vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
- emulate_io_inst(vcpu, vadr, data->ma);
+ emulate_io_inst(v, vadr, data->ma);
return IA64_FAULT;
}
if ( data->ps != vrr.ps ) {
- machine_tlb_insert(vcpu, data);
+ machine_tlb_insert(v, data);
}
else {
thash_insert(vtlb->ts->vhpt,data,vadr);
}
}else if(type == DSIDE_TLB){
- if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
+ if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- alt_dtlb(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ alt_dtlb(v, vadr);
return IA64_FAULT;
} else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
} else{
- vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
- vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
+ vmx_vcpu_thash(v, vadr, &vhpt_adr);
+ vrr=vmx_vcpu_rr(v,vhpt_adr);
data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- dtlb_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ dtlb_fault(v, vadr);
return IA64_FAULT;
}else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
}else{
if(vpsr.ic){
- vcpu_set_isr(vcpu, misr.val);
- dvhpt_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ dvhpt_fault(v, vadr);
return IA64_FAULT;
}else{
if(misr.sp){
//TODO lds emulation
panic("Don't support speculation load");
}else{
- nested_dtlb(vcpu);
+ nested_dtlb(v);
return IA64_FAULT;
}
}
}
}
}else if(type == ISIDE_TLB){
- if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
+ if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- alt_itlb(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ alt_itlb(v, vadr);
return IA64_FAULT;
} else{
- vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
- vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
+ vmx_vcpu_thash(v, vadr, &vhpt_adr);
+ vrr=vmx_vcpu_rr(v,vhpt_adr);
data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
if(data){
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- itlb_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ itlb_fault(v, vadr);
return IA64_FAULT;
}else{
if(!vpsr.ic){
misr.ni=1;
}
- vcpu_set_isr(vcpu, misr.val);
- ivhpt_fault(vcpu, vadr);
+ vcpu_set_isr(v, misr.val);
+ ivhpt_fault(v, vadr);
return IA64_FAULT;
}
}
diff --git a/xen/arch/ia64/vmx/vmx_vcpu.c b/xen/arch/ia64/vmx/vmx_vcpu.c
index ae59bc2559..c97e593db0 100644
--- a/xen/arch/ia64/vmx/vmx_vcpu.c
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c
@@ -308,6 +308,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
UINT64 ifs, psr;
REGS *regs = vcpu_regs(vcpu);
psr = VCPU(vcpu,ipsr);
+ vcpu_bsw1(vcpu);
vmx_vcpu_set_psr(vcpu,psr);
ifs=VCPU(vcpu,ifs);
if((ifs>>63)&&(ifs<<1)){
@@ -326,7 +327,7 @@ vmx_vcpu_get_psr(VCPU *vcpu)
return VCPU(vcpu,vpsr);
}
-
+#if 0
IA64FAULT
vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
{
@@ -378,8 +379,8 @@ vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
return IA64_NO_FAULT;
}
-
-
+#endif
+#if 0
IA64FAULT
vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
{
@@ -387,9 +388,11 @@ vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
int nat;
//TODO, Eddie
if (!regs) return 0;
+#if 0
if (reg >= 16 && reg < 32) {
return vmx_vcpu_get_bgr(vcpu,reg,val);
}
+#endif
getreg(reg,val,&nat,regs); // FIXME: handle NATs later
if(nat){
return IA64_FAULT;
@@ -410,13 +413,16 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
if (!regs) return IA64_ILLOP_FAULT;
if (reg >= sof + 32) return IA64_ILLOP_FAULT;
+#if 0
if ( reg >= 16 && reg < 32 ) {
return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
}
+#endif
setreg(reg,value,nat,regs);
return IA64_NO_FAULT;
}
+#endif
IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
{
diff --git a/xen/arch/ia64/vmx/vmx_virt.c b/xen/arch/ia64/vmx/vmx_virt.c
index 98bd2fa143..b118ac03cd 100644
--- a/xen/arch/ia64/vmx/vmx_virt.c
+++ b/xen/arch/ia64/vmx/vmx_virt.c
@@ -161,13 +161,13 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
/*
if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
- return vmx_vcpu_set_gr(vcpu, tgt, val);
+ return vcpu_set_gr(vcpu, tgt, val);
else return fault;
*/
val = vmx_vcpu_get_psr(vcpu);
val = (val & MASK(0, 32)) | (val & MASK(35, 2));
last_guest_psr = val;
- return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
+ return vcpu_set_gr(vcpu, tgt, val, 0);
}
/**
@@ -177,7 +177,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
{
UINT64 val;
IA64FAULT fault;
- if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
+ if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
panic(" get_psr nat bit fault\n");
val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
@@ -229,7 +229,7 @@ IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- return vmx_vcpu_bsw0(vcpu);
+ return vcpu_bsw0(vcpu);
}
IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
@@ -244,7 +244,7 @@ IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- return vmx_vcpu_bsw1(vcpu);
+ return vcpu_bsw1(vcpu);
}
IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
@@ -265,7 +265,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
privilege_op (vcpu);
return IA64_FAULT;
}
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -299,7 +299,7 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -334,8 +334,8 @@ IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
- ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
+ ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
+ ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
#ifdef VMAL_NO_FAULT_CHECK
if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
set_isr_reg_nat_consumption(vcpu,0,0);
@@ -382,20 +382,20 @@ IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
#endif //CHECK_FAULT
}
#ifdef CHECK_FAULT
if(unimplemented_gva(vcpu, r3)){
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
vmx_vcpu_thash(vcpu, r3, &r1);
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
@@ -412,20 +412,20 @@ IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
#endif //CHECK_FAULT
}
#ifdef CHECK_FAULT
if(unimplemented_gva(vcpu, r3)){
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
+ vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
vmx_vcpu_ttag(vcpu, r3, &r1);
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
@@ -448,7 +448,7 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,1);
rnat_comsumption(vcpu);
@@ -470,7 +470,7 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
if(vmx_vcpu_tpa(vcpu, r3, &r1)){
return IA64_FAULT;
}
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
@@ -493,7 +493,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif
- if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
+ if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,1);
rnat_comsumption(vcpu);
@@ -503,7 +503,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
if(vmx_vcpu_tak(vcpu, r3, &r1)){
return IA64_FAULT;
}
- vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+ vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
@@ -531,7 +531,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -588,7 +588,7 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
+ if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
#ifdef VMAL_NO_FAULT_CHECK
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -648,7 +648,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
return IA64_FAULT;
}
#endif // VMAL_NO_FAULT_CHECK
- ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pte);
+ ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
#ifdef VMAL_NO_FAULT_CHECK
if( ret1 != IA64_NO_FAULT ){
set_isr_reg_nat_consumption(vcpu,0,0);
@@ -734,7 +734,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
if(inst.M29.ar3!=44){
panic("Can't support ar register other than itc");
}
- if(vmx_vcpu_get_gr(vcpu,inst.M29.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -778,7 +778,7 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
#endif // CHECK_FAULT
u64 r1;
vmx_vcpu_get_itc(vcpu,&r1);
- vmx_vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
+ vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
return IA64_NO_FAULT;
}
@@ -800,7 +800,7 @@ IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -823,7 +823,7 @@ IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -846,7 +846,7 @@ IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -869,7 +869,7 @@ IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -892,7 +892,7 @@ IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -915,7 +915,7 @@ IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
+ if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -949,7 +949,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -963,7 +963,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_rr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
@@ -985,7 +985,7 @@ IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1000,7 +1000,7 @@ IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_pkr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
@@ -1022,7 +1022,7 @@ IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1037,7 +1037,7 @@ IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_dbr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
@@ -1059,7 +1059,7 @@ IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1074,7 +1074,7 @@ IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_ibr(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
@@ -1096,7 +1096,7 @@ IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1111,7 +1111,7 @@ IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_pmc(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
@@ -1124,7 +1124,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif //CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
+ if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1139,7 +1139,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
}
#endif //CHECK_FAULT
vmx_vcpu_get_cpuid(vcpu,r3,&r1);
- return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
+ return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
@@ -1160,7 +1160,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
return IA64_FAULT;
}
#endif // CHECK_FAULT
- if(vmx_vcpu_get_gr(vcpu, inst.M32.r2, &r2)){
+ if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
#ifdef CHECK_FAULT
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
@@ -1214,11 +1214,11 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
#define cr_get(cr) \
((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
+ vcpu_set_gr(vcpu, tgt, val,0):fault;
#define vmx_cr_get(cr) \
((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
- vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
+ vcpu_set_gr(vcpu, tgt, val,0):fault;
IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
{
@@ -1260,9 +1260,9 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
case 64:return vmx_cr_get(lid);
case 65:
vmx_vcpu_get_ivr(vcpu,&val);
- return vmx_vcpu_set_gr(vcpu,tgt,val,0);
+ return vcpu_set_gr(vcpu,tgt,val,0);
case 66:return vmx_cr_get(tpr);
- case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0);
+ case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
case 68:return vmx_cr_get(irr0);
case 69:return vmx_cr_get(irr1);
case 70:return vmx_cr_get(irr2);
@@ -1306,18 +1306,19 @@ IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
*/
void
-vmx_emulate(VCPU *vcpu, UINT64 cause, UINT64 opcode)
+vmx_emulate(VCPU *vcpu, REGS *regs)
{
IA64_BUNDLE bundle;
int slot;
IA64_SLOT_TYPE slot_type;
IA64FAULT status;
INST64 inst;
- REGS * regs;
- UINT64 iip;
- regs = vcpu_regs(vcpu);
+ UINT64 iip, cause, opcode;
iip = regs->cr_iip;
IA64_PSR vpsr;
+ cause = VMX(vcpu,cause);
+ opcode = VMX(vcpu,opcode);
+
/*
if (privop_trace) {
static long i = 400;
@@ -1356,7 +1357,6 @@ if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
#else
inst.inst=opcode;
#endif /* BYPASS_VMAL_OPCODE */
- vcpu_set_regs(vcpu, regs);
/*
* Switch to actual virtual rid in rr0 and rr4,
* which is required by some tlb related instructions.
diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c
index 3307297d2e..78e0b897cb 100644
--- a/xen/arch/ia64/vmx/vtlb.c
+++ b/xen/arch/ia64/vmx/vtlb.c
@@ -421,10 +421,11 @@ static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
*cch = *hash_table;
*hash_table = vhpt_entry;
hash_table->next = cch;
+ if(hash_table->tag==hash_table->next->tag)
+ while(1);
+
}
- if(hash_table->tag==hash_table->next->tag)
- while(1);
}
return /*hash_table*/;
}
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 76914bdc0c..f1bf1f3791 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -364,7 +364,8 @@ void new_thread(struct vcpu *v,
#ifdef CONFIG_VTI
vmx_init_all_rr(v);
if (d == dom0)
- VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
+// VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L);
+ regs->r28 = dom_fw_setup(d,saved_command_line,256L);
/* Virtual processor context setup */
VCPU(v, vpsr) = IA64_PSR_BN;
VCPU(v, dcr) = 0;
diff --git a/xen/arch/ia64/xen/ivt.S b/xen/arch/ia64/xen/ivt.S
index 82866a2d67..5f7b4db205 100644
--- a/xen/arch/ia64/xen/ivt.S
+++ b/xen/arch/ia64/xen/ivt.S
@@ -1012,7 +1012,8 @@ dispatch_break_fault_post_save:
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_break
+// br.sptk.many ia64_prepare_handle_break
+ br.call.sptk.many b6=ia64_handle_break
END(dispatch_break_fault)
#endif
@@ -1239,7 +1240,8 @@ ENTRY(dispatch_privop_fault)
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_privop
+// br.sptk.many ia64_prepare_handle_privop
+ br.call.sptk.many b6=ia64_handle_privop
END(dispatch_privop_fault)
#endif
@@ -1307,7 +1309,8 @@ ENTRY(dispatch_unaligned_handler)
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_unaligned
+// br.sptk.many ia64_prepare_handle_unaligned
+ br.call.sptk.many b6=ia64_handle_unaligned
END(dispatch_unaligned_handler)
.org ia64_ivt+0x4c00
@@ -1874,7 +1877,8 @@ GLOBAL_ENTRY(dispatch_reflection)
movl r14=ia64_leave_kernel
;;
mov rp=r14
- br.sptk.many ia64_prepare_handle_reflection
+// br.sptk.many ia64_prepare_handle_reflection
+ br.call.sptk.many b6=ia64_handle_reflection
END(dispatch_reflection)
#define SAVE_MIN_COVER_DONE DO_SAVE_MIN(,mov r30=cr.ifs,)
diff --git a/xen/arch/ia64/xen/privop.c b/xen/arch/ia64/xen/privop.c
index 1c13823d4f..36fa02831b 100644
--- a/xen/arch/ia64/xen/privop.c
+++ b/xen/arch/ia64/xen/privop.c
@@ -143,7 +143,7 @@ IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, padr);
+ return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0);
else return fault;
}
@@ -158,7 +158,7 @@ IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M46.r1, key);
+ return vcpu_set_gr(vcpu, inst.M46.r1, key,0);
else return fault;
}
@@ -244,7 +244,7 @@ IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
UINT64 val;
if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
- return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
+ return vcpu_set_gr(vcpu, inst.M29.r2-64, val,0);
else return IA64_ILLOP_FAULT;
}
else {
@@ -369,12 +369,12 @@ IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
if (inst.M43.r1 > 63) { // privified mov from cpuid
fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
}
else {
fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
}
return fault;
}
@@ -386,7 +386,7 @@ IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
@@ -397,7 +397,7 @@ IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
@@ -408,7 +408,7 @@ IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
else return fault;
}
@@ -420,12 +420,12 @@ IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
if (inst.M43.r1 > 63) { // privified mov from pmd
fault = vcpu_get_pmd(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1-64, val, 0);
}
else {
fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
if (fault == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, inst.M43.r1, val);
+ return vcpu_set_gr(vcpu, inst.M43.r1, val, 0);
}
return fault;
}
@@ -434,7 +434,7 @@ unsigned long from_cr_cnt[128] = { 0 };
#define cr_get(cr) \
((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
- vcpu_set_gr(vcpu, tgt, val) : fault;
+ vcpu_set_gr(vcpu, tgt, val, 0) : fault;
IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
{
@@ -460,7 +460,7 @@ IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
case 64:return cr_get(lid);
case 65:return cr_get(ivr);
case 66:return cr_get(tpr);
- case 67:return vcpu_set_gr(vcpu,tgt,0L);
+ case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
case 68:return cr_get(irr0);
case 69:return cr_get(irr1);
case 70:return cr_get(irr2);
@@ -482,7 +482,7 @@ IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
IA64FAULT fault;
if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
- return vcpu_set_gr(vcpu, tgt, val);
+ return vcpu_set_gr(vcpu, tgt, val, 0);
else return fault;
}
diff --git a/xen/arch/ia64/xen/process.c b/xen/arch/ia64/xen/process.c
index 73cae3f323..a58a937947 100644
--- a/xen/arch/ia64/xen/process.c
+++ b/xen/arch/ia64/xen/process.c
@@ -548,7 +548,7 @@ do_ssc(unsigned long ssc, struct pt_regs *regs)
break;
case SSC_GETCHAR:
retval = ia64_ssc(0,0,0,0,ssc);
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_WAIT_COMPLETION:
if (arg0) { // metaphysical address
@@ -562,7 +562,7 @@ do_ssc(unsigned long ssc, struct pt_regs *regs)
/**/ retval = 0;
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_OPEN:
arg1 = vcpu_get_gr(current,33); // access rights
@@ -572,7 +572,7 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
retval = ia64_ssc(arg0,arg1,0,0,ssc);
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
break;
case SSC_WRITE:
case SSC_READ:
@@ -620,7 +620,7 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
req->len = last_count;
}
else retval = -1L;
- vcpu_set_gr(current,8,retval);
+ vcpu_set_gr(current,8,retval,0);
//if (last_count >= PAGE_SIZE) printf("retval=%x\n",retval);
break;
case SSC_CONNECT_INTERRUPT:
@@ -631,7 +631,7 @@ if (!running_on_sim) { printf("SSC_OPEN, not implemented on hardware. (ignoring
(void)ia64_ssc(arg0,arg1,arg2,arg3,ssc);
break;
case SSC_NETDEV_PROBE:
- vcpu_set_gr(current,8,-1L);
+ vcpu_set_gr(current,8,-1L,0);
break;
default:
printf("ia64_handle_break: bad ssc code %lx, iip=%p, b0=%p... spinning\n",ssc,regs->cr_iip,regs->b0);
@@ -763,18 +763,18 @@ unsigned long __hypercall_create_continuation(
if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
panic("PREEMPT happen in multicall\n"); // Not support yet
} else {
- vcpu_set_gr(vcpu, 2, op);
+ vcpu_set_gr(vcpu, 2, op, 0);
for ( i = 0; i < nr_args; i++) {
switch (i) {
- case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long));
+ case 0: vcpu_set_gr(vcpu, 14, va_arg(args, unsigned long), 0);
break;
- case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long));
+ case 1: vcpu_set_gr(vcpu, 15, va_arg(args, unsigned long), 0);
break;
- case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long));
+ case 2: vcpu_set_gr(vcpu, 16, va_arg(args, unsigned long), 0);
break;
- case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long));
+ case 3: vcpu_set_gr(vcpu, 17, va_arg(args, unsigned long), 0);
break;
- case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long));
+ case 4: vcpu_set_gr(vcpu, 18, va_arg(args, unsigned long), 0);
break;
default: panic("Too many args for hypercall continuation\n");
break;
diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c
index 0764c16c33..ec3c86e483 100644
--- a/xen/arch/ia64/xen/vcpu.c
+++ b/xen/arch/ia64/xen/vcpu.c
@@ -31,7 +31,8 @@ typedef union {
//typedef struct domain VCPU;
// this def for vcpu_regs won't work if kernel stack is present
-#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs)
+//#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
+#define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
#define PSCB(x,y) VCPU(x,y)
#define PSCBX(x,y) x->arch.y
@@ -70,22 +71,45 @@ extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa);
/**************************************************************************
VCPU general register access routines
**************************************************************************/
-
+#ifdef XEN
UINT64
vcpu_get_gr(VCPU *vcpu, unsigned reg)
{
REGS *regs = vcpu_regs(vcpu);
UINT64 val;
-
if (!reg) return 0;
getreg(reg,&val,0,regs); // FIXME: handle NATs later
return val;
}
+IA64FAULT
+vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ int nat;
+ getreg(reg,val,&nat,regs); // FIXME: handle NATs later
+ if(nat)
+ return IA64_NAT_CONSUMPTION_VECTOR;
+ return 0;
+}
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
IA64FAULT
+vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ if (!reg) return IA64_ILLOP_FAULT;
+ long sof = (regs->cr_ifs) & 0x7f;
+ if (reg >= sof + 32) return IA64_ILLOP_FAULT;
+ setreg(reg,value,nat,regs); // FIXME: handle NATs later
+ return IA64_NO_FAULT;
+}
+#else
+// returns:
+// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
+// IA64_NO_FAULT otherwise
+IA64FAULT
vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
{
REGS *regs = vcpu_regs(vcpu);
@@ -97,6 +121,7 @@ vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value)
return IA64_NO_FAULT;
}
+#endif
/**************************************************************************
VCPU privileged application register access routines
**************************************************************************/
@@ -586,11 +611,9 @@ void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
printf("vcpu_pend_interrupt: bad vector\n");
return;
}
-//#ifdef CONFIG_VTI
if ( VMX_DOMAIN(vcpu) ) {
set_bit(vector,VCPU(vcpu,irr));
} else
-//#endif // CONFIG_VTI
{
/* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */
if (test_bit(vector,PSCBX(vcpu,irr))) {
@@ -1360,7 +1383,7 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, UINT64 *pt
vcpu_thash(vcpu, address, &iha);
if (__copy_from_user(&pte, (void *)iha, sizeof(pte)) != 0)
- return IA64_VHPT_TRANS_VECTOR;
+ return IA64_VHPT_FAULT;
/*
* Optimisation: this VHPT walker aborts on not-present pages
@@ -1496,6 +1519,18 @@ printf("%lx=vcpu_get_pmd(%x)\n",val,reg);
/**************************************************************************
VCPU banked general register access routines
**************************************************************************/
+#define vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
+do{ \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 0, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 16, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i),"r"(*b1unat),"r"(*b0unat),"r"(*runat),"r"(b1unat), \
+ "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
+}while(0)
IA64FAULT vcpu_bsw0(VCPU *vcpu)
{
@@ -1504,15 +1539,41 @@ IA64FAULT vcpu_bsw0(VCPU *vcpu)
unsigned long *r = &regs->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
- int i;
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &PSCB(vcpu,vbnat);
+ unsigned long *b1unat = &PSCB(vcpu,vnat);
- if (PSCB(vcpu,banknum)) {
- for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
- PSCB(vcpu,banknum) = 0;
- }
+ unsigned long i;
+
+ if(VMX_DOMAIN(vcpu)){
+ if(VCPU(vcpu,vpsr)&IA64_PSR_BN){
+ for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
+ vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
+ }
+ }else{
+ if (PSCB(vcpu,banknum)) {
+ for (i = 0; i < 16; i++) { *b1++ = *r; *r++ = *b0++; }
+ vcpu_bsw0_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ PSCB(vcpu,banknum) = 0;
+ }
+ }
return (IA64_NO_FAULT);
}
+#define vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT) \
+do{ \
+ __asm__ __volatile__ ( \
+ ";;extr.u %0 = %3,%6,16;;\n" \
+ "dep %1 = %0, %1, 16, 16;;\n" \
+ "st8 [%4] = %1\n" \
+ "extr.u %0 = %2, 0, 16;;\n" \
+ "dep %3 = %0, %3, %6, 16;;\n" \
+ "st8 [%5] = %3\n" \
+ ::"r"(i),"r"(*b0unat),"r"(*b1unat),"r"(*runat),"r"(b0unat), \
+ "r"(runat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); \
+}while(0)
+
IA64FAULT vcpu_bsw1(VCPU *vcpu)
{
// TODO: Only allowed for current vcpu
@@ -1520,12 +1581,25 @@ IA64FAULT vcpu_bsw1(VCPU *vcpu)
unsigned long *r = &regs->r16;
unsigned long *b0 = &PSCB(vcpu,bank0_regs[0]);
unsigned long *b1 = &PSCB(vcpu,bank1_regs[0]);
- int i;
+ unsigned long *runat = &regs->eml_unat;
+ unsigned long *b0unat = &PSCB(vcpu,vbnat);
+ unsigned long *b1unat = &PSCB(vcpu,vnat);
- if (!PSCB(vcpu,banknum)) {
- for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
- PSCB(vcpu,banknum) = 1;
- }
+ unsigned long i;
+
+ if(VMX_DOMAIN(vcpu)){
+ if(!(VCPU(vcpu,vpsr)&IA64_PSR_BN)){
+ for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
+ vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ VCPU(vcpu,vpsr) |= IA64_PSR_BN;
+ }
+ }else{
+ if (!PSCB(vcpu,banknum)) {
+ for (i = 0; i < 16; i++) { *b0++ = *r; *r++ = *b1++; }
+ vcpu_bsw1_unat(i,b0unat,b1unat,runat,IA64_PT_REGS_R16_SLOT);
+ PSCB(vcpu,banknum) = 1;
+ }
+ }
return (IA64_NO_FAULT);
}
diff --git a/xen/include/asm-ia64/ia64_int.h b/xen/include/asm-ia64/ia64_int.h
index 90478f5f6c..5808a99be5 100644
--- a/xen/include/asm-ia64/ia64_int.h
+++ b/xen/include/asm-ia64/ia64_int.h
@@ -33,14 +33,10 @@
#define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
#define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
-#define IA64_NO_FAULT 0x0001
+#define IA64_NO_FAULT 0x0000
+#define IA64_FAULT 0x0001
#define IA64_RFI_IN_PROGRESS 0x0002
#define IA64_RETRY 0x0003
-#ifdef CONFIG_VTI
-#undef IA64_NO_FAULT
-#define IA64_NO_FAULT 0x0000
-#define IA64_FAULT 0x0001
-#endif //CONFIG_VTI
#define IA64_FORCED_IFA 0x0004
#define IA64_ILLOP_FAULT (IA64_GENEX_VECTOR | 0x00)
#define IA64_PRIVOP_FAULT (IA64_GENEX_VECTOR | 0x10)
@@ -49,7 +45,7 @@
#define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40)
#define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80)
#define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR)
-
+#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x10)
#if !defined(__ASSEMBLY__)
typedef unsigned long IA64FAULT;
typedef unsigned long IA64INTVECTOR;
diff --git a/xen/include/asm-ia64/privop.h b/xen/include/asm-ia64/privop.h
index 49c8bd9f11..552ae82f8c 100644
--- a/xen/include/asm-ia64/privop.h
+++ b/xen/include/asm-ia64/privop.h
@@ -2,11 +2,11 @@
#define _XEN_IA64_PRIVOP_H
#include <asm/ia64_int.h>
-#ifdef CONFIG_VTI
+//#ifdef CONFIG_VTI
#include <asm/vmx_vcpu.h>
-#else //CONFIG_VTI
+//#else //CONFIG_VTI
#include <asm/vcpu.h>
-#endif //CONFIG_VTI
+//#endif //CONFIG_VTI
typedef unsigned long IA64_INST;
@@ -95,7 +95,7 @@ typedef union U_INST64_M33 {
typedef union U_INST64_M35 {
IA64_INST inst;
struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
-
+
} INST64_M35;
typedef union U_INST64_M36 {
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
index 105249c8c3..f18bf651ac 100644
--- a/xen/include/asm-ia64/vcpu.h
+++ b/xen/include/asm-ia64/vcpu.h
@@ -35,7 +35,8 @@ struct privop_addr_count {
/* general registers */
extern UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg);
-extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value);
+extern IA64FAULT vcpu_get_gr_nat(VCPU *vcpu, unsigned reg, UINT64 *val);
+extern IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value, int nat);
/* application registers */
extern IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val);
/* psr */
diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h
index 2d351200b7..6f99a392a6 100644
--- a/xen/include/asm-ia64/vmx_vcpu.h
+++ b/xen/include/asm-ia64/vmx_vcpu.h
@@ -93,8 +93,10 @@ extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
+#if 0
extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
+#endif
extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
@@ -453,6 +455,7 @@ IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
/**************************************************************************
VCPU banked general register access routines
**************************************************************************/
+#if 0
static inline
IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
{
@@ -467,6 +470,7 @@ IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
VCPU(vcpu,vpsr) |= IA64_PSR_BN;
return (IA64_NO_FAULT);
}
+#endif
#if 0
/* Another hash performance algorithm */
#define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h
index 5dccaf3f30..04fd57c151 100644
--- a/xen/include/asm-ia64/vmx_vpd.h
+++ b/xen/include/asm-ia64/vmx_vpd.h
@@ -68,6 +68,11 @@ struct arch_vmx_struct {
vtime_t vtm;
unsigned long vrr[8];
unsigned long vkr[8];
+ unsigned long cr_iipa; /* for emulation */
+ unsigned long cr_isr; /* for emulation */
+ unsigned long cause;
+ unsigned long opcode;
+
// unsigned long mrr5;
// unsigned long mrr6;
// unsigned long mrr7;
diff --git a/xen/include/asm-ia64/xenkregs.h b/xen/include/asm-ia64/xenkregs.h
index c2eb14e37c..5a0c5c0bd6 100644
--- a/xen/include/asm-ia64/xenkregs.h
+++ b/xen/include/asm-ia64/xenkregs.h
@@ -8,13 +8,6 @@
#define IA64_TR_VHPT 4 /* dtr4: vhpt */
#define IA64_TR_ARCH_INFO 5
-#ifdef CONFIG_VTI
-#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */
-#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen image in domain space */
-#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */
-#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest physical memory 256M */
-#endif // CONFIG_VTI
-
/* Processor status register bits: */
#define IA64_PSR_VM_BIT 46
#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h
index d5082ac7a8..95284f8a77 100644
--- a/xen/include/asm-ia64/xensystem.h
+++ b/xen/include/asm-ia64/xensystem.h
@@ -16,10 +16,6 @@
/* Define HV space hierarchy */
#define XEN_VIRT_SPACE_LOW 0xe800000000000000
#define XEN_VIRT_SPACE_HIGH 0xf800000000000000
-/* This is address to mapping rr7 switch stub, in region 5 */
-#ifdef CONFIG_VTI
-#define XEN_RR7_SWITCH_STUB 0xb700000000000000
-#endif // CONFIG_VTI
#define XEN_START_ADDR 0xf000000000000000
#define HYPERVISOR_VIRT_START 0xf000000000000000
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index ae738a3d8d..05900bfa0b 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -94,38 +94,22 @@ typedef struct cpu_user_regs{
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
-
- union {
- struct {
- /* The following registers are saved by SAVE_REST: */
- unsigned long r16; /* scratch */
- unsigned long r17; /* scratch */
- unsigned long r18; /* scratch */
- unsigned long r19; /* scratch */
- unsigned long r20; /* scratch */
- unsigned long r21; /* scratch */
- unsigned long r22; /* scratch */
- unsigned long r23; /* scratch */
- unsigned long r24; /* scratch */
- unsigned long r25; /* scratch */
- unsigned long r26; /* scratch */
- unsigned long r27; /* scratch */
- unsigned long r28; /* scratch */
- unsigned long r29; /* scratch */
- unsigned long r30; /* scratch */
- unsigned long r31; /* scratch */
- };
- struct {
- unsigned long r4; /* preserved */
- unsigned long r5; /* preserved */
- unsigned long r6; /* preserved */
- unsigned long r7; /* preserved */
- unsigned long cr_iipa; /* for emulation */
- unsigned long cr_isr; /* for emulation */
- unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
- };
- };
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
@@ -137,6 +121,13 @@ typedef struct cpu_user_regs{
struct pt_fpreg f9; /* scratch */
struct pt_fpreg f10; /* scratch */
struct pt_fpreg f11; /* scratch */
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long rfi_pfs; /* used for elulating rfi */
+
}cpu_user_regs_t;
typedef union {