aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-07-07 11:21:34 +0100
committerKeir Fraser <keir@xensource.com>2007-07-07 11:21:34 +0100
commit38e46984b66243cca9dd98338b5e09ef4d1899ca (patch)
tree97eb778584ef1805c5e6a04fefb1bd91ec6ba115
parent2f9d88f61640676302638f28183cca674db3e99d (diff)
downloadxen-38e46984b66243cca9dd98338b5e09ef4d1899ca.tar.gz
xen-38e46984b66243cca9dd98338b5e09ef4d1899ca.tar.bz2
xen-38e46984b66243cca9dd98338b5e09ef4d1899ca.zip
SVM: Sync with VMX code changes.
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/hvm/svm/emulate.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c86
-rw-r--r--xen/include/asm-x86/hvm/svm/svm.h40
3 files changed, 66 insertions, 64 deletions
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
index 8c175d23de..9ef5a0b7bd 100644
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -201,7 +201,7 @@ unsigned long get_effective_addr_modrm64(struct cpu_user_regs *regs,
#if __x86_64__
/* 64-bit mode */
- if (vmcb->cs.attr.fields.l && svm_long_mode_enabled(v))
+ if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
return vmcb->rip + inst_len + *size + disp;
#endif
return disp;
@@ -327,7 +327,7 @@ unsigned long svm_rip2pointer(struct vcpu *v)
*/
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
unsigned long p = vmcb->cs.base + vmcb->rip;
- if (!(vmcb->cs.attr.fields.l && svm_long_mode_enabled(v)))
+ if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
return p;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c53ff69ec0..5f61dbf6c3 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -69,7 +69,7 @@ static void *root_vmcb[NR_CPUS] __read_mostly;
/* hardware assisted paging bits */
extern int opt_hap_enabled;
-static inline void svm_inject_exception(struct vcpu *v, int trap,
+static void svm_inject_exception(struct vcpu *v, int trap,
int ev, int error_code)
{
eventinj_t event;
@@ -98,6 +98,57 @@ static void stop_svm(void)
write_efer(read_efer() & ~EFER_SVME);
}
+#ifdef __x86_64__
+
+static int svm_lme_is_set(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LME;
+}
+
+static int svm_long_mode_enabled(struct vcpu *v)
+{
+ u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
+ return guest_efer & EFER_LMA;
+}
+
+#else /* __i386__ */
+
+static int svm_lme_is_set(struct vcpu *v)
+{ return 0; }
+static int svm_long_mode_enabled(struct vcpu *v)
+{ return 0; }
+
+#endif
+
+static int svm_cr4_pae_is_set(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return guest_cr4 & X86_CR4_PAE;
+}
+
+static int svm_paging_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
+ return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
+}
+
+static int svm_pae_enabled(struct vcpu *v)
+{
+ unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
+ return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
+}
+
+static int svm_nx_enabled(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
+}
+
+static int svm_pgbit_test(struct vcpu *v)
+{
+ return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
+}
+
static void svm_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
@@ -122,7 +173,7 @@ static void svm_store_cpu_guest_regs(
}
}
-static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
+static int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
u32 ecx = regs->ecx;
@@ -149,7 +200,6 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
goto gp_fault;
}
-#ifdef __x86_64__
if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
{
/* EFER.LME transition from 0 to 1. */
@@ -170,7 +220,6 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
goto gp_fault;
}
}
-#endif /* __x86_64__ */
v->arch.hvm_svm.cpu_shadow_efer = msr_content;
vmcb->efer = msr_content | EFER_SVME;
@@ -204,7 +253,7 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
#define savedebug(_v,_reg) \
asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
-static inline void svm_save_dr(struct vcpu *v)
+static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -224,7 +273,7 @@ static inline void svm_save_dr(struct vcpu *v)
}
-static inline void __restore_debug_registers(struct vcpu *v)
+static void __restore_debug_registers(struct vcpu *v)
{
loaddebug(&v->arch.guest_context, 0);
loaddebug(&v->arch.guest_context, 1);
@@ -546,7 +595,7 @@ static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
return 0;
}
-static inline void svm_restore_dr(struct vcpu *v)
+static void svm_restore_dr(struct vcpu *v)
{
if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
__restore_debug_registers(v);
@@ -636,11 +685,8 @@ static void svm_sync_vmcb(struct vcpu *v)
static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int long_mode = 0;
+ int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#ifdef __x86_64__
- long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
switch ( seg )
{
case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
@@ -1123,7 +1169,7 @@ static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb,
__update_guest_eip(vmcb, inst_len);
}
-static inline unsigned long *get_reg_p(
+static unsigned long *get_reg_p(
unsigned int gpreg,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
@@ -1188,7 +1234,7 @@ static inline unsigned long *get_reg_p(
}
-static inline unsigned long get_reg(
+static unsigned long get_reg(
unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
unsigned long *gp;
@@ -1197,7 +1243,7 @@ static inline unsigned long get_reg(
}
-static inline void set_reg(
+static void set_reg(
unsigned int gpreg, unsigned long value,
struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
@@ -1300,7 +1346,7 @@ static void svm_get_prefix_info(struct vcpu *v, unsigned int dir,
/* Get the address of INS/OUTS instruction */
-static inline int svm_get_io_address(
+static int svm_get_io_address(
struct vcpu *v, struct cpu_user_regs *regs,
unsigned int size, ioio_info_t info,
unsigned long *count, unsigned long *addr)
@@ -1311,10 +1357,8 @@ static inline int svm_get_io_address(
svm_segment_register_t *seg = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-#ifdef __x86_64__
- /* If we're in long mode, we shouldn't check the segment presence & limit */
+ /* If we're in long mode, don't check the segment presence & limit */
long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
-#endif
/* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit.
* l field combined with EFER_LMA says whether it's 16 or 64 bit.
@@ -1641,7 +1685,6 @@ static int svm_set_cr0(unsigned long value)
if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
{
-#if defined(__x86_64__)
if ( svm_lme_is_set(v) )
{
if ( !svm_cr4_pae_is_set(v) )
@@ -1654,7 +1697,6 @@ static int svm_set_cr0(unsigned long value)
v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
vmcb->efer |= EFER_LMA | EFER_LME;
}
-#endif /* __x86_64__ */
if ( !paging_mode_hap(v->domain) )
{
@@ -2067,7 +2109,7 @@ static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
return result;
}
-static inline void svm_do_msr_access(
+static void svm_do_msr_access(
struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -2170,7 +2212,7 @@ static inline void svm_do_msr_access(
__update_guest_eip(vmcb, inst_len);
}
-static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h
index 341ccf0963..9125d0a27f 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -30,46 +30,6 @@
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
-static inline int svm_long_mode_enabled(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LMA;
-}
-
-static inline int svm_lme_is_set(struct vcpu *v)
-{
- u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
- return guest_efer & EFER_LME;
-}
-
-static inline int svm_cr4_pae_is_set(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return guest_cr4 & X86_CR4_PAE;
-}
-
-static inline int svm_paging_enabled(struct vcpu *v)
-{
- unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
- return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
-}
-
-static inline int svm_pae_enabled(struct vcpu *v)
-{
- unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
- return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
-}
-
-static inline int svm_nx_enabled(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
-}
-
-static inline int svm_pgbit_test(struct vcpu *v)
-{
- return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
-}
-
#define SVM_REG_EAX (0)
#define SVM_REG_ECX (1)
#define SVM_REG_EDX (2)