diff options
author | Keir Fraser <keir@xen.org> | 2012-09-12 13:29:30 +0100 |
---|---|---|
committer | Keir Fraser <keir@xen.org> | 2012-09-12 13:29:30 +0100 |
commit | 5d1181a5ea5e0f11d481a94b16ed00d883f9726e (patch) | |
tree | 4b43be5829873f2ec1a1b2d0f7e26b15dffb11c6 /xen/arch/x86/hvm/svm | |
parent | b314cd733413babc5978b819793ad5c77f094adf (diff) | |
download | xen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.tar.gz xen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.tar.bz2 xen-5d1181a5ea5e0f11d481a94b16ed00d883f9726e.zip |
xen: Remove x86_32 build target.
Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/svm')
-rw-r--r-- | xen/arch/x86/hvm/svm/emulate.c | 2 | ||||
-rw-r--r-- | xen/arch/x86/hvm/svm/entry.S | 31 | ||||
-rw-r--r-- | xen/arch/x86/hvm/svm/svm.c | 6 |
3 files changed, 0 insertions, 39 deletions
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c index 0c72f00798..9d09a462a4 100644 --- a/xen/arch/x86/hvm/svm/emulate.c +++ b/xen/arch/x86/hvm/svm/emulate.c @@ -44,9 +44,7 @@ static unsigned int is_prefix(u8 opc) case 0xF0: case 0xF3: case 0xF2: -#if __x86_64__ case 0x40 ... 0x4f: -#endif /* __x86_64__ */ return 1; } return 0; diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S index 7458664fd0..e70b499e4e 100644 --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -33,24 +33,11 @@ #define get_current(reg) GET_CURRENT(r(reg)) -#if defined(__x86_64__) #define r(reg) %r##reg #define addr_of(lbl) lbl(%rip) #define call_with_regs(fn) \ mov %rsp,%rdi; \ call fn; -#else /* defined(__i386__) */ -#define r(reg) %e##reg -#define addr_of(lbl) lbl -#define UREGS_rax UREGS_eax -#define UREGS_rip UREGS_eip -#define UREGS_rsp UREGS_esp -#define call_with_regs(fn) \ - mov %esp,%eax; \ - push %eax; \ - call fn; \ - add $4,%esp; -#endif ENTRY(svm_asm_do_resume) call svm_intr_assist @@ -93,7 +80,6 @@ UNLIKELY_END(svm_trace) mov VCPU_svm_vmcb_pa(r(bx)),r(ax) -#if defined(__x86_64__) pop %r15 pop %r14 pop %r13 @@ -109,18 +95,9 @@ UNLIKELY_END(svm_trace) pop %rdx pop %rsi pop %rdi -#else /* defined(__i386__) */ - pop %ebx - pop %ecx - pop %edx - pop %esi - pop %edi - pop %ebp -#endif VMRUN -#if defined(__x86_64__) push %rdi push %rsi push %rdx @@ -136,14 +113,6 @@ UNLIKELY_END(svm_trace) push %r13 push %r14 push %r15 -#else /* defined(__i386__) */ - push %ebp - push %edi - push %esi - push %edx - push %ecx - push %ebx -#endif get_current(bx) movb $0,VCPU_svm_vmcb_in_sync(r(bx)) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 7533bf302d..505d800c8e 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -871,12 +871,10 @@ static void svm_ctxt_switch_from(struct vcpu *v) svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); -#ifdef __x86_64__ /* Resume use of ISTs now that the host TR is reinstated. */ idt_tables[cpu][TRAP_double_fault].a |= IST_DF << 32; idt_tables[cpu][TRAP_nmi].a |= IST_NMI << 32; idt_tables[cpu][TRAP_machine_check].a |= IST_MCE << 32; -#endif } static void svm_ctxt_switch_to(struct vcpu *v) @@ -884,7 +882,6 @@ static void svm_ctxt_switch_to(struct vcpu *v) struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int cpu = smp_processor_id(); -#ifdef __x86_64__ /* * This is required, because VMRUN does consistency check * and some of the DOM0 selectors are pointing to @@ -902,7 +899,6 @@ static void svm_ctxt_switch_to(struct vcpu *v) idt_tables[cpu][TRAP_double_fault].a &= ~(7UL << 32); idt_tables[cpu][TRAP_nmi].a &= ~(7UL << 32); idt_tables[cpu][TRAP_machine_check].a &= ~(7UL << 32); -#endif svm_restore_dr(v); @@ -1222,7 +1218,6 @@ static int svm_cpu_up(void) /* Initialize core's ASID handling. */ svm_asid_init(c); -#ifdef __x86_64__ /* * Check whether EFER.LMSLE can be written. * Unfortunately there's no feature bit defined for this. @@ -1242,7 +1237,6 @@ static int svm_cpu_up(void) printk(XENLOG_WARNING "Inconsistent LMSLE support across CPUs!\n"); cpu_has_lmsl = 0; } -#endif /* Initialize OSVW bits to be used by guests */ svm_host_osvw_init(); |