aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2007-12-07 14:28:07 +0000
committerKeir Fraser <keir.fraser@citrix.com>2007-12-07 14:28:07 +0000
commit34117867609c6e98a9e975c4ab6c4f2454408315 (patch)
tree0779e9bc99bffc27d13770a74bcda840b5599a5b
parenta3024bb60156eb668d3c8326a9fcb8ba5ce97ba4 (diff)
downloadxen-34117867609c6e98a9e975c4ab6c4f2454408315.tar.gz
xen-34117867609c6e98a9e975c4ab6c4f2454408315.tar.bz2
xen-34117867609c6e98a9e975c4ab6c4f2454408315.zip
hvm: Clean up AP initialisation. This allows AP bringup into emulated
real mode when running on VMX, as well as removing 100 LOC. Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c93
-rw-r--r--xen/arch/x86/hvm/svm/svm.c152
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c18
-rw-r--r--xen/include/asm-x86/hvm/hvm.h10
4 files changed, 88 insertions, 185 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c42ff62aa5..ff49865b99 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1590,57 +1590,90 @@ void hvm_hypercall_page_initialise(struct domain *d,
hvm_funcs.init_hypercall_page(d, hypercall_page);
}
-
-/*
- * only called in HVM domain BSP context
- * when booting, vcpuid is always equal to apic_id
- */
int hvm_bringup_ap(int vcpuid, int trampoline_vector)
{
- struct vcpu *v;
struct domain *d = current->domain;
+ struct vcpu *v;
struct vcpu_guest_context *ctxt;
- int rc = 0;
+ struct segment_register reg;
- BUG_ON(!is_hvm_domain(d));
+ ASSERT(is_hvm_domain(d));
if ( (v = d->vcpu[vcpuid]) == NULL )
return -ENOENT;
- if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
+ v->fpu_initialised = 0;
+ v->arch.flags |= TF_kernel_mode;
+ v->is_initialised = 1;
+
+ ctxt = &v->arch.guest_context;
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->flags = VGCF_online;
+ ctxt->user_regs.eflags = 2;
+
+#ifdef VMXASSIST
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
- gdprintk(XENLOG_ERR,
- "Failed to allocate memory in hvm_bringup_ap.\n");
- return -ENOMEM;
+ ctxt->user_regs.eip = VMXASSIST_BASE;
+ ctxt->user_regs.edx = vcpuid;
+ ctxt->user_regs.ebx = trampoline_vector;
+ goto done;
}
+#endif
+
+ v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+ hvm_update_guest_cr(v, 0);
+
+ v->arch.hvm_vcpu.guest_cr[2] = 0;
+ hvm_update_guest_cr(v, 2);
+
+ v->arch.hvm_vcpu.guest_cr[3] = 0;
+ hvm_update_guest_cr(v, 3);
+
+ v->arch.hvm_vcpu.guest_cr[4] = 0;
+ hvm_update_guest_cr(v, 4);
- hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
+ v->arch.hvm_vcpu.guest_efer = 0;
+ hvm_update_guest_efer(v);
+ reg.sel = trampoline_vector << 8;
+ reg.base = (uint32_t)reg.sel << 4;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x89b;
+ hvm_set_segment_register(v, x86_seg_cs, &reg);
+
+ reg.sel = reg.base = 0;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x893;
+ hvm_set_segment_register(v, x86_seg_ds, &reg);
+ hvm_set_segment_register(v, x86_seg_es, &reg);
+ hvm_set_segment_register(v, x86_seg_fs, &reg);
+ hvm_set_segment_register(v, x86_seg_gs, &reg);
+ hvm_set_segment_register(v, x86_seg_ss, &reg);
+
+ reg.attr.bytes = 0x82; /* LDT */
+ hvm_set_segment_register(v, x86_seg_ldtr, &reg);
+
+ reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+ hvm_set_segment_register(v, x86_seg_tr, &reg);
+
+ reg.attr.bytes = 0;
+ hvm_set_segment_register(v, x86_seg_gdtr, &reg);
+ hvm_set_segment_register(v, x86_seg_idtr, &reg);
+
+#ifdef VMXASSIST
+ done:
+#endif
/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
- LOCK_BIGLOCK(d);
- rc = -EEXIST;
- if ( !v->is_initialised )
- rc = boot_vcpu(d, vcpuid, ctxt);
- UNLOCK_BIGLOCK(d);
-
- if ( rc != 0 )
- {
- gdprintk(XENLOG_ERR,
- "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
- goto out;
- }
-
if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
vcpu_wake(v);
- gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
- out:
- xfree(ctxt);
- return rc;
+ gdprintk(XENLOG_INFO, "AP %d bringup succeeded.\n", vcpuid);
+ return 0;
}
static int hvmop_set_pci_intx_level(
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b33b20af77..f471474b55 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -62,8 +62,6 @@ int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
int inst_len);
asmlinkage void do_IRQ(struct cpu_user_regs *);
-static int svm_reset_to_realmode(
- struct vcpu *v, struct cpu_user_regs *regs);
static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
static void svm_update_guest_efer(struct vcpu *v);
static void svm_inject_exception(
@@ -617,8 +615,24 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
struct segment_register *reg)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ int sync = 0;
- ASSERT(v == current);
+ ASSERT((v == current) || !vcpu_runnable(v));
+
+ switch ( seg )
+ {
+ case x86_seg_fs:
+ case x86_seg_gs:
+ case x86_seg_tr:
+ case x86_seg_ldtr:
+ sync = (v == current);
+ break;
+ default:
+ break;
+ }
+
+ if ( sync )
+ svm_sync_vmcb(v);
switch ( seg )
{
@@ -632,23 +646,17 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
memcpy(&vmcb->es, reg, sizeof(*reg));
break;
case x86_seg_fs:
- svm_sync_vmcb(v);
memcpy(&vmcb->fs, reg, sizeof(*reg));
- svm_vmload(vmcb);
break;
case x86_seg_gs:
- svm_sync_vmcb(v);
memcpy(&vmcb->gs, reg, sizeof(*reg));
- svm_vmload(vmcb);
break;
case x86_seg_ss:
memcpy(&vmcb->ss, reg, sizeof(*reg));
vmcb->cpl = vmcb->ss.attr.fields.dpl;
break;
case x86_seg_tr:
- svm_sync_vmcb(v);
memcpy(&vmcb->tr, reg, sizeof(*reg));
- svm_vmload(vmcb);
break;
case x86_seg_gdtr:
memcpy(&vmcb->gdtr, reg, sizeof(*reg));
@@ -657,13 +665,14 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
memcpy(&vmcb->idtr, reg, sizeof(*reg));
break;
case x86_seg_ldtr:
- svm_sync_vmcb(v);
memcpy(&vmcb->ldtr, reg, sizeof(*reg));
- svm_vmload(vmcb);
break;
default:
BUG();
}
+
+ if ( sync )
+ svm_vmload(vmcb);
}
/* Make sure that xen intercepts any FP accesses from current */
@@ -684,47 +693,11 @@ static void svm_stts(struct vcpu *v)
}
}
-
static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
{
v->arch.hvm_svm.vmcb->tsc_offset = offset;
}
-
-static void svm_init_ap_context(
- struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
-{
- struct vcpu *v;
- struct vmcb_struct *vmcb;
- cpu_user_regs_t *regs;
- u16 cs_sel;
-
- /* We know this is safe because hvm_bringup_ap() does it */
- v = current->domain->vcpu[vcpuid];
- vmcb = v->arch.hvm_svm.vmcb;
- regs = &v->arch.guest_context.user_regs;
-
- memset(ctxt, 0, sizeof(*ctxt));
-
- /*
- * We execute the trampoline code in real mode. The trampoline vector
- * passed to us is page alligned and is the physical frame number for
- * the code. We will execute this code in real mode.
- */
- cs_sel = trampoline_vector << 8;
- ctxt->user_regs.eip = 0x0;
- ctxt->user_regs.cs = cs_sel;
-
- /*
- * This is the launch of an AP; set state so that we begin executing
- * the trampoline code in real-mode.
- */
- svm_reset_to_realmode(v, regs);
- /* Adjust the vmcb's hidden register state. */
- vmcb->cs.sel = cs_sel;
- vmcb->cs.base = (cs_sel << 4);
-}
-
static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
{
char *p;
@@ -916,7 +889,6 @@ static struct hvm_function_table svm_function_table = {
.stts = svm_stts,
.set_tsc_offset = svm_set_tsc_offset,
.inject_exception = svm_inject_exception,
- .init_ap_context = svm_init_ap_context,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending
};
@@ -2037,90 +2009,6 @@ void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
domain_crash(v->domain);
}
-
-/*
- * Reset to realmode causes execution to start at 0xF000:0xFFF0 in
- * 16-bit realmode. Basically, this mimics a processor reset.
- *
- * returns 0 on success, non-zero otherwise
- */
-static int svm_reset_to_realmode(struct vcpu *v,
- struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- memset(regs, 0, sizeof(struct cpu_user_regs));
-
- regs->eflags = 2;
-
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
- svm_update_guest_cr(v, 0);
-
- v->arch.hvm_vcpu.guest_cr[2] = 0;
- svm_update_guest_cr(v, 2);
-
- v->arch.hvm_vcpu.guest_cr[4] = 0;
- svm_update_guest_cr(v, 4);
-
- vmcb->efer = EFER_SVME;
-
- /* This will jump to ROMBIOS */
- regs->eip = 0xFFF0;
-
- /* Set up the segment registers and all their hidden states. */
- vmcb->cs.sel = 0xF000;
- vmcb->cs.attr.bytes = 0x089b;
- vmcb->cs.limit = 0xffff;
- vmcb->cs.base = 0x000F0000;
-
- vmcb->ss.sel = 0x00;
- vmcb->ss.attr.bytes = 0x0893;
- vmcb->ss.limit = 0xffff;
- vmcb->ss.base = 0x00;
-
- vmcb->ds.sel = 0x00;
- vmcb->ds.attr.bytes = 0x0893;
- vmcb->ds.limit = 0xffff;
- vmcb->ds.base = 0x00;
-
- vmcb->es.sel = 0x00;
- vmcb->es.attr.bytes = 0x0893;
- vmcb->es.limit = 0xffff;
- vmcb->es.base = 0x00;
-
- vmcb->fs.sel = 0x00;
- vmcb->fs.attr.bytes = 0x0893;
- vmcb->fs.limit = 0xffff;
- vmcb->fs.base = 0x00;
-
- vmcb->gs.sel = 0x00;
- vmcb->gs.attr.bytes = 0x0893;
- vmcb->gs.limit = 0xffff;
- vmcb->gs.base = 0x00;
-
- vmcb->ldtr.sel = 0x00;
- vmcb->ldtr.attr.bytes = 0x0000;
- vmcb->ldtr.limit = 0x0;
- vmcb->ldtr.base = 0x00;
-
- vmcb->gdtr.sel = 0x00;
- vmcb->gdtr.attr.bytes = 0x0000;
- vmcb->gdtr.limit = 0x0;
- vmcb->gdtr.base = 0x00;
-
- vmcb->tr.sel = 0;
- vmcb->tr.attr.bytes = 0;
- vmcb->tr.limit = 0x0;
- vmcb->tr.base = 0;
-
- vmcb->idtr.sel = 0x00;
- vmcb->idtr.attr.bytes = 0x0000;
- vmcb->idtr.limit = 0x3ff;
- vmcb->idtr.base = 0x00;
-
- return 0;
-}
-
asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned int exit_reason;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 667b59f964..ff2d8f3cbc 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -863,7 +863,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
{
uint32_t attr;
- ASSERT(v == current);
+ ASSERT((v == current) || !vcpu_runnable(v));
attr = reg->attr.bytes;
attr = ((attr & 0xf00) << 4) | (attr & 0xff);
@@ -872,6 +872,8 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
if ( !reg->attr.fields.p )
attr |= (1u << 16);
+ vmx_vmcs_enter(v);
+
switch ( seg )
{
case x86_seg_cs:
@@ -933,6 +935,8 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
default:
BUG();
}
+
+ vmx_vmcs_exit(v);
}
/* Make sure that xen intercepts any FP accesses from current */
@@ -965,17 +969,6 @@ static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
vmx_vmcs_exit(v);
}
-static void vmx_init_ap_context(
- struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
-{
- memset(ctxt, 0, sizeof(*ctxt));
-#ifdef VMXASSIST
- ctxt->user_regs.eip = VMXASSIST_BASE;
- ctxt->user_regs.edx = vcpuid;
- ctxt->user_regs.ebx = trampoline_vector;
-#endif
-}
-
void do_nmi(struct cpu_user_regs *);
static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
@@ -1159,7 +1152,6 @@ static struct hvm_function_table vmx_function_table = {
.stts = vmx_stts,
.set_tsc_offset = vmx_set_tsc_offset,
.inject_exception = vmx_inject_exception,
- .init_ap_context = vmx_init_ap_context,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
.cpu_up = vmx_cpu_up,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 96ad350eb6..079e3c5da2 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -121,9 +121,6 @@ struct hvm_function_table {
void (*inject_exception)(unsigned int trapnr, int errcode,
unsigned long cr2);
- void (*init_ap_context)(struct vcpu_guest_context *ctxt,
- int vcpuid, int trampoline_vector);
-
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
@@ -239,13 +236,6 @@ void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
static inline void
-hvm_init_ap_context(struct vcpu_guest_context *ctxt,
- int vcpuid, int trampoline_vector)
-{
- return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
-}
-
-static inline void
hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)
{
hvm_funcs.inject_exception(trapnr, errcode, cr2);