aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/hvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'xen/arch/x86/hvm/hvm.c')
-rw-r--r--xen/arch/x86/hvm/hvm.c146
1 files changed, 137 insertions, 9 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 14c965cef9..92a63bf325 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -232,6 +232,8 @@ int hvm_domain_initialise(struct domain *d)
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
+ d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
+
hvm_init_cacheattr_region_list(d);
rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
@@ -285,9 +287,10 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
{
struct vcpu *v;
struct hvm_hw_cpu ctxt;
+ struct segment_register seg;
struct vcpu_guest_context *vc;
- for_each_vcpu(d, v)
+ for_each_vcpu ( d, v )
{
/* We don't need to save state for a vcpu that is down; the restore
* code will leave it down if there is nothing saved. */
@@ -297,12 +300,69 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(v, &ctxt);
- /* Other vcpu register state */
+ hvm_get_segment_register(v, x86_seg_idtr, &seg);
+ ctxt.idtr_limit = seg.limit;
+ ctxt.idtr_base = seg.base;
+
+ hvm_get_segment_register(v, x86_seg_gdtr, &seg);
+ ctxt.gdtr_limit = seg.limit;
+ ctxt.gdtr_base = seg.base;
+
+ hvm_get_segment_register(v, x86_seg_cs, &seg);
+ ctxt.cs_sel = seg.sel;
+ ctxt.cs_limit = seg.limit;
+ ctxt.cs_base = seg.base;
+ ctxt.cs_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_ds, &seg);
+ ctxt.ds_sel = seg.sel;
+ ctxt.ds_limit = seg.limit;
+ ctxt.ds_base = seg.base;
+ ctxt.ds_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_es, &seg);
+ ctxt.es_sel = seg.sel;
+ ctxt.es_limit = seg.limit;
+ ctxt.es_base = seg.base;
+ ctxt.es_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_ss, &seg);
+ ctxt.ss_sel = seg.sel;
+ ctxt.ss_limit = seg.limit;
+ ctxt.ss_base = seg.base;
+ ctxt.ss_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_fs, &seg);
+ ctxt.fs_sel = seg.sel;
+ ctxt.fs_limit = seg.limit;
+ ctxt.fs_base = seg.base;
+ ctxt.fs_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_gs, &seg);
+ ctxt.gs_sel = seg.sel;
+ ctxt.gs_limit = seg.limit;
+ ctxt.gs_base = seg.base;
+ ctxt.gs_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_tr, &seg);
+ ctxt.tr_sel = seg.sel;
+ ctxt.tr_limit = seg.limit;
+ ctxt.tr_base = seg.base;
+ ctxt.tr_arbytes = seg.attr.bytes;
+
+ hvm_get_segment_register(v, x86_seg_ldtr, &seg);
+ ctxt.ldtr_sel = seg.sel;
+ ctxt.ldtr_limit = seg.limit;
+ ctxt.ldtr_base = seg.base;
+ ctxt.ldtr_arbytes = seg.attr.bytes;
+
vc = &v->arch.guest_context;
+
if ( v->fpu_initialised )
memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
else
memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+
ctxt.rax = vc->user_regs.eax;
ctxt.rbx = vc->user_regs.ebx;
ctxt.rcx = vc->user_regs.ecx;
@@ -341,6 +401,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
int vcpuid, rc;
struct vcpu *v;
struct hvm_hw_cpu ctxt;
+ struct segment_register seg;
struct vcpu_guest_context *vc;
/* Which vcpu is this? */
@@ -396,8 +457,64 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
return -EINVAL;
- /* Other vcpu register state */
+ seg.limit = ctxt.idtr_limit;
+ seg.base = ctxt.idtr_base;
+ hvm_set_segment_register(v, x86_seg_idtr, &seg);
+
+ seg.limit = ctxt.gdtr_limit;
+ seg.base = ctxt.gdtr_base;
+ hvm_set_segment_register(v, x86_seg_gdtr, &seg);
+
+ seg.sel = ctxt.cs_sel;
+ seg.limit = ctxt.cs_limit;
+ seg.base = ctxt.cs_base;
+ seg.attr.bytes = ctxt.cs_arbytes;
+ hvm_set_segment_register(v, x86_seg_cs, &seg);
+
+ seg.sel = ctxt.ds_sel;
+ seg.limit = ctxt.ds_limit;
+ seg.base = ctxt.ds_base;
+ seg.attr.bytes = ctxt.ds_arbytes;
+ hvm_set_segment_register(v, x86_seg_ds, &seg);
+
+ seg.sel = ctxt.es_sel;
+ seg.limit = ctxt.es_limit;
+ seg.base = ctxt.es_base;
+ seg.attr.bytes = ctxt.es_arbytes;
+ hvm_set_segment_register(v, x86_seg_es, &seg);
+
+ seg.sel = ctxt.ss_sel;
+ seg.limit = ctxt.ss_limit;
+ seg.base = ctxt.ss_base;
+ seg.attr.bytes = ctxt.ss_arbytes;
+ hvm_set_segment_register(v, x86_seg_ss, &seg);
+
+ seg.sel = ctxt.fs_sel;
+ seg.limit = ctxt.fs_limit;
+ seg.base = ctxt.fs_base;
+ seg.attr.bytes = ctxt.fs_arbytes;
+ hvm_set_segment_register(v, x86_seg_fs, &seg);
+
+ seg.sel = ctxt.gs_sel;
+ seg.limit = ctxt.gs_limit;
+ seg.base = ctxt.gs_base;
+ seg.attr.bytes = ctxt.gs_arbytes;
+ hvm_set_segment_register(v, x86_seg_gs, &seg);
+
+ seg.sel = ctxt.tr_sel;
+ seg.limit = ctxt.tr_limit;
+ seg.base = ctxt.tr_base;
+ seg.attr.bytes = ctxt.tr_arbytes;
+ hvm_set_segment_register(v, x86_seg_tr, &seg);
+
+ seg.sel = ctxt.ldtr_sel;
+ seg.limit = ctxt.ldtr_limit;
+ seg.base = ctxt.ldtr_base;
+ seg.attr.bytes = ctxt.ldtr_arbytes;
+ hvm_set_segment_register(v, x86_seg_ldtr, &seg);
+
memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+
vc->user_regs.eax = ctxt.rax;
vc->user_regs.ebx = ctxt.rbx;
vc->user_regs.ecx = ctxt.rcx;
@@ -1269,6 +1386,7 @@ void hvm_task_switch(
static enum hvm_copy_result __hvm_copy(
void *buf, paddr_t addr, int size, int dir, int virt, int fetch)
{
+ struct vcpu *curr = current;
unsigned long gfn, mfn;
p2m_type_t p2mt;
char *p;
@@ -1277,12 +1395,22 @@ static enum hvm_copy_result __hvm_copy(
if ( virt )
{
- struct segment_register sreg;
- hvm_get_segment_register(current, x86_seg_ss, &sreg);
- if ( sreg.attr.fields.dpl == 3 )
- pfec |= PFEC_user_mode;
+ /*
+ * We cannot use hvm_get_segment_register() while executing in
+ * vmx_realmode() as segment register state is cached. Furthermore,
+ * VMREADs on every data access hurts emulation performance.
+ */
+ if ( !curr->arch.hvm_vmx.vmxemul )
+ {
+ struct segment_register sreg;
+ hvm_get_segment_register(curr, x86_seg_ss, &sreg);
+ if ( sreg.attr.fields.dpl == 3 )
+ pfec |= PFEC_user_mode;
+ }
+
if ( dir )
pfec |= PFEC_write_access;
+
if ( fetch )
pfec |= PFEC_insn_fetch;
}
@@ -1294,7 +1422,7 @@ static enum hvm_copy_result __hvm_copy(
if ( virt )
{
- gfn = paging_gva_to_gfn(current, addr, &pfec);
+ gfn = paging_gva_to_gfn(curr, addr, &pfec);
if ( gfn == INVALID_GFN )
{
if ( virt == 2 ) /* 2 means generate a fault */
@@ -1318,7 +1446,7 @@ static enum hvm_copy_result __hvm_copy(
if ( dir )
{
memcpy(p, buf, count); /* dir == TRUE: *to* guest */
- paging_mark_dirty(current->domain, mfn);
+ paging_mark_dirty(curr->domain, mfn);
}
else
memcpy(buf, p, count); /* dir == FALSE: *from guest */