aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-17 13:33:09 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-17 13:33:09 +0100
commit15b866d372aacaeda4ece10ccdda9633db04676f (patch)
tree48793e881f14d9e07d5aa320e35f4b7e3e3eaf78
parentd105814c00447dd2412c53a2ae5773316e8220bf (diff)
downloadxen-15b866d372aacaeda4ece10ccdda9633db04676f.tar.gz
xen-15b866d372aacaeda4ece10ccdda9633db04676f.tar.bz2
xen-15b866d372aacaeda4ece10ccdda9633db04676f.zip
hvm: New HVM function hvm_set_segment_register().
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/hvm/svm/svm.c72
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c74
-rw-r--r--xen/include/asm-x86/hvm/hvm.h9
-rw-r--r--xen/include/asm-x86/hvm/svm/svm.h16
4 files changed, 156 insertions, 15 deletions
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index fe038c2aac..891be1a5de 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -618,9 +618,7 @@ static void svm_sync_vmcb(struct vcpu *v)
arch_svm->vmcb_in_sync = 1;
- asm volatile (
- ".byte 0x0f,0x01,0xdb" /* vmsave */
- : : "a" (__pa(arch_svm->vmcb)) );
+ svm_vmsave(arch_svm->vmcb);
}
static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
@@ -649,6 +647,7 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
switch ( seg )
{
case x86_seg_cs:
@@ -685,7 +684,58 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
svm_sync_vmcb(v);
memcpy(reg, &vmcb->ldtr, sizeof(*reg));
break;
- default: BUG();
+ default:
+ BUG();
+ }
+}
+
+static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ switch ( seg )
+ {
+ case x86_seg_cs:
+ memcpy(&vmcb->cs, reg, sizeof(*reg));
+ break;
+ case x86_seg_ds:
+ memcpy(&vmcb->ds, reg, sizeof(*reg));
+ break;
+ case x86_seg_es:
+ memcpy(&vmcb->es, reg, sizeof(*reg));
+ break;
+ case x86_seg_fs:
+ svm_sync_vmcb(v);
+ memcpy(&vmcb->fs, reg, sizeof(*reg));
+ svm_vmload(vmcb);
+ break;
+ case x86_seg_gs:
+ svm_sync_vmcb(v);
+ memcpy(&vmcb->gs, reg, sizeof(*reg));
+ svm_vmload(vmcb);
+ break;
+ case x86_seg_ss:
+ memcpy(&vmcb->ss, reg, sizeof(*reg));
+ break;
+ case x86_seg_tr:
+ svm_sync_vmcb(v);
+ memcpy(&vmcb->tr, reg, sizeof(*reg));
+ svm_vmload(vmcb);
+ break;
+ case x86_seg_gdtr:
+ memcpy(&vmcb->gdtr, reg, sizeof(*reg));
+ break;
+ case x86_seg_idtr:
+ memcpy(&vmcb->idtr, reg, sizeof(*reg));
+ break;
+ case x86_seg_ldtr:
+ svm_sync_vmcb(v);
+ memcpy(&vmcb->ldtr, reg, sizeof(*reg));
+ svm_vmload(vmcb);
+ break;
+ default:
+ BUG();
}
}
@@ -787,10 +837,7 @@ static void svm_ctxt_switch_from(struct vcpu *v)
svm_save_dr(v);
svm_sync_vmcb(v);
-
- asm volatile (
- ".byte 0x0f,0x01,0xda" /* vmload */
- : : "a" (__pa(root_vmcb[cpu])) );
+ svm_vmload(root_vmcb[cpu]);
#ifdef __x86_64__
/* Resume use of ISTs now that the host TR is reinstated. */
@@ -826,12 +873,8 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_restore_dr(v);
- asm volatile (
- ".byte 0x0f,0x01,0xdb" /* vmsave */
- : : "a" (__pa(root_vmcb[cpu])) );
- asm volatile (
- ".byte 0x0f,0x01,0xda" /* vmload */
- : : "a" (__pa(v->arch.hvm_svm.vmcb)) );
+ svm_vmsave(root_vmcb[cpu]);
+ svm_vmload(v->arch.hvm_svm.vmcb);
}
static void svm_do_resume(struct vcpu *v)
@@ -926,6 +969,7 @@ static struct hvm_function_table svm_function_table = {
.guest_x86_mode = svm_guest_x86_mode,
.get_segment_base = svm_get_segment_base,
.get_segment_register = svm_get_segment_register,
+ .set_segment_register = svm_set_segment_register,
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 52d1b0793f..093e1878f4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -957,6 +957,79 @@ static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
reg->attr.fields.p = 0;
}
+static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ u16 attr;
+
+ ASSERT(v == current);
+
+ attr = reg->attr.bytes;
+ attr = ((attr & 0xf00) << 4) | (attr & 0xff);
+
+ switch ( seg )
+ {
+ case x86_seg_cs:
+ __vmwrite(GUEST_CS_SELECTOR, reg->sel);
+ __vmwrite(GUEST_CS_LIMIT, reg->limit);
+ __vmwrite(GUEST_CS_BASE, reg->base);
+ __vmwrite(GUEST_CS_AR_BYTES, attr);
+ break;
+ case x86_seg_ds:
+ __vmwrite(GUEST_DS_SELECTOR, reg->sel);
+ __vmwrite(GUEST_DS_LIMIT, reg->limit);
+ __vmwrite(GUEST_DS_BASE, reg->base);
+ __vmwrite(GUEST_DS_AR_BYTES, attr);
+ break;
+ case x86_seg_es:
+ __vmwrite(GUEST_ES_SELECTOR, reg->sel);
+ __vmwrite(GUEST_ES_LIMIT, reg->limit);
+ __vmwrite(GUEST_ES_BASE, reg->base);
+ __vmwrite(GUEST_ES_AR_BYTES, attr);
+ break;
+ case x86_seg_fs:
+ __vmwrite(GUEST_FS_SELECTOR, reg->sel);
+ __vmwrite(GUEST_FS_LIMIT, reg->limit);
+ __vmwrite(GUEST_FS_BASE, reg->base);
+ __vmwrite(GUEST_FS_AR_BYTES, attr);
+ break;
+ case x86_seg_gs:
+ __vmwrite(GUEST_GS_SELECTOR, reg->sel);
+ __vmwrite(GUEST_GS_LIMIT, reg->limit);
+ __vmwrite(GUEST_GS_BASE, reg->base);
+ __vmwrite(GUEST_GS_AR_BYTES, attr);
+ break;
+ case x86_seg_ss:
+ __vmwrite(GUEST_SS_SELECTOR, reg->sel);
+ __vmwrite(GUEST_SS_LIMIT, reg->limit);
+ __vmwrite(GUEST_SS_BASE, reg->base);
+ __vmwrite(GUEST_SS_AR_BYTES, attr);
+ break;
+ case x86_seg_tr:
+ __vmwrite(GUEST_TR_SELECTOR, reg->sel);
+ __vmwrite(GUEST_TR_LIMIT, reg->limit);
+ __vmwrite(GUEST_TR_BASE, reg->base);
+ __vmwrite(GUEST_TR_AR_BYTES, attr);
+ break;
+ case x86_seg_gdtr:
+ __vmwrite(GUEST_GDTR_LIMIT, reg->limit);
+ __vmwrite(GUEST_GDTR_BASE, reg->base);
+ break;
+ case x86_seg_idtr:
+ __vmwrite(GUEST_IDTR_LIMIT, reg->limit);
+ __vmwrite(GUEST_IDTR_BASE, reg->base);
+ break;
+ case x86_seg_ldtr:
+ __vmwrite(GUEST_LDTR_SELECTOR, reg->sel);
+ __vmwrite(GUEST_LDTR_LIMIT, reg->limit);
+ __vmwrite(GUEST_LDTR_BASE, reg->base);
+ __vmwrite(GUEST_LDTR_AR_BYTES, attr);
+ break;
+ default:
+ BUG();
+ }
+}
+
/* Make sure that xen intercepts any FP accesses from current */
static void vmx_stts(struct vcpu *v)
{
@@ -1160,6 +1233,7 @@ static struct hvm_function_table vmx_function_table = {
.guest_x86_mode = vmx_guest_x86_mode,
.get_segment_base = vmx_get_segment_base,
.get_segment_register = vmx_get_segment_register,
+ .set_segment_register = vmx_set_segment_register,
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index f9b95681b6..b9dcf2bd18 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -105,6 +105,8 @@ struct hvm_function_table {
unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg);
+ void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg);
/*
* Re-set the value of CR3 that Xen runs on when handling VM exits.
@@ -254,6 +256,13 @@ hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
hvm_funcs.get_segment_register(v, seg, reg);
}
+static inline void
+hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
+{
+ hvm_funcs.set_segment_register(v, seg, reg);
+}
+
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h
index 9125d0a27f..649afe9082 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -28,7 +28,7 @@
#include <asm/hvm/svm/vmcb.h>
#include <asm/i387.h>
-extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
+void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
#define SVM_REG_EAX (0)
#define SVM_REG_ECX (1)
@@ -47,4 +47,18 @@ extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
#define SVM_REG_R14 (14)
#define SVM_REG_R15 (15)
+static inline void svm_vmload(void *vmcb)
+{
+ asm volatile (
+ ".byte 0x0f,0x01,0xda" /* vmload */
+ : : "a" (__pa(vmcb)) : "memory" );
+}
+
+static inline void svm_vmsave(void *vmcb)
+{
+ asm volatile (
+ ".byte 0x0f,0x01,0xdb" /* vmsave */
+ : : "a" (__pa(vmcb)) : "memory" );
+}
+
#endif /* __ASM_X86_HVM_SVM_H__ */