aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-03-23 15:53:52 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2006-03-23 15:53:52 +0100
commit3b214d9b38158d1cd18790b67b5a27fd57cb9955 (patch)
tree5706e9113b7a6689cd7b331b20c0335c401de6ad
parentc671da80d229f26c86ee96edccfb6678fcc1d948 (diff)
downloadxen-3b214d9b38158d1cd18790b67b5a27fd57cb9955.tar.gz
xen-3b214d9b38158d1cd18790b67b5a27fd57cb9955.tar.bz2
xen-3b214d9b38158d1cd18790b67b5a27fd57cb9955.zip
Merge hvm_store_cpu_guest_regs() and hvm_store_cpu_guest_ctrl_regs()
into a single function. On VMX, make the function work even when the passed VCPU is not the currently-executing VCPU. This allows gdbserver-xen to get correct cpu register context for VMX guests. Signed-off-by: Keir Fraser <keir@xensource.com> Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
-rw-r--r--xen/arch/x86/dom0_ops.c3
-rw-r--r--xen/arch/x86/hvm/platform.c2
-rw-r--r--xen/arch/x86/hvm/svm/svm.c58
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c100
-rw-r--r--xen/arch/x86/x86_32/traps.c3
-rw-r--r--xen/arch/x86/x86_64/traps.c3
-rw-r--r--xen/include/asm-x86/hvm/hvm.h21
7 files changed, 112 insertions, 78 deletions
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index cfb8eba348..ab76b4e9f5 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -460,8 +460,7 @@ void arch_getdomaininfo_ctxt(
if ( hvm_guest(v) )
{
- hvm_store_cpu_guest_regs(v, &c->user_regs);
- hvm_store_cpu_guest_ctrl_regs(v, c->ctrlreg);
+ hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
}
else
{
diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c
index 4ae448ba39..94eb1fb13f 100644
--- a/xen/arch/x86/hvm/platform.c
+++ b/xen/arch/x86/hvm/platform.c
@@ -773,7 +773,7 @@ void handle_mmio(unsigned long va, unsigned long gpa)
mmio_opp = &v->arch.hvm_vcpu.mmio_op;
regs = mmio_opp->inst_decoder_regs;
- hvm_store_cpu_guest_regs(v, regs);
+ hvm_store_cpu_guest_regs(v, regs, NULL);
if ((inst_len = hvm_instruction_length(v)) <= 0) {
printf("handle_mmio: failed to get instruction length\n");
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 42bda15b38..22a5bcb418 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -201,31 +201,41 @@ int svm_initialize_guest_resources(struct vcpu *v)
}
static void svm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
+ struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ if ( regs != NULL )
+ {
#if defined (__x86_64__)
- regs->rip = vmcb->rip;
- regs->rsp = vmcb->rsp;
- regs->rflags = vmcb->rflags;
- regs->cs = vmcb->cs.sel;
- regs->ds = vmcb->ds.sel;
- regs->es = vmcb->es.sel;
- regs->ss = vmcb->ss.sel;
- regs->gs = vmcb->gs.sel;
- regs->fs = vmcb->fs.sel;
+ regs->rip = vmcb->rip;
+ regs->rsp = vmcb->rsp;
+ regs->rflags = vmcb->rflags;
+ regs->cs = vmcb->cs.sel;
+ regs->ds = vmcb->ds.sel;
+ regs->es = vmcb->es.sel;
+ regs->ss = vmcb->ss.sel;
+ regs->gs = vmcb->gs.sel;
+ regs->fs = vmcb->fs.sel;
#elif defined (__i386__)
- regs->eip = vmcb->rip;
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
- regs->cs = vmcb->cs.sel;
- regs->ds = vmcb->ds.sel;
- regs->es = vmcb->es.sel;
- regs->ss = vmcb->ss.sel;
- regs->gs = vmcb->gs.sel;
- regs->fs = vmcb->fs.sel;
+ regs->eip = vmcb->rip;
+ regs->esp = vmcb->rsp;
+ regs->eflags = vmcb->rflags;
+ regs->cs = vmcb->cs.sel;
+ regs->ds = vmcb->ds.sel;
+ regs->es = vmcb->es.sel;
+ regs->ss = vmcb->ss.sel;
+ regs->gs = vmcb->gs.sel;
+ regs->fs = vmcb->fs.sel;
#endif
+ }
+
+ if ( crs != NULL )
+ {
+ crs[0] = vmcb->cr0;
+ crs[3] = vmcb->cr3;
+ crs[4] = vmcb->cr4;
+ }
}
static void svm_load_cpu_guest_regs(
@@ -372,15 +382,6 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
return 1;
}
-void svm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- crs[0] = vmcb->cr0;
- crs[3] = vmcb->cr3;
- crs[4] = vmcb->cr4;
-}
-
void svm_modify_guest_state(struct vcpu *v)
{
svm_modify_vmcb(v, &v->arch.guest_context.user_regs);
@@ -448,7 +449,6 @@ int start_svm(void)
hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
- hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs;
hvm_funcs.modify_guest_state = svm_modify_guest_state;
hvm_funcs.realmode = svm_realmode;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index fcca96efdb..69af5901dc 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -398,31 +398,81 @@ void vmx_migrate_timers(struct vcpu *v)
migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
}
-void vmx_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
+struct vmx_store_cpu_guest_regs_callback_info {
+ struct vcpu *v;
+ struct cpu_user_regs *regs;
+ unsigned long *crs;
+};
+
+static void vmx_store_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
+
+static void vmx_store_cpu_guest_regs_callback(void *data)
{
+ struct vmx_store_cpu_guest_regs_callback_info *info = data;
+ vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
+}
+
+static void vmx_store_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
+{
+ if ( v != current )
+ {
+ /* Non-current VCPUs must be paused to get a register snapshot. */
+ ASSERT(atomic_read(&v->pausecnt) != 0);
+
+ if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
+ {
+ /* Get register details from remote CPU. */
+ struct vmx_store_cpu_guest_regs_callback_info info = {
+ .v = v, .regs = regs, .crs = crs };
+ cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
+ on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
+ &info, 1, 1);
+ return;
+ }
+
+ /* Register details are on this CPU. Load the correct VMCS. */
+ __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+ }
+
+ ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
+
+ if ( regs != NULL )
+ {
#if defined (__x86_64__)
- __vmread(GUEST_RFLAGS, &regs->rflags);
- __vmread(GUEST_SS_SELECTOR, &regs->ss);
- __vmread(GUEST_CS_SELECTOR, &regs->cs);
- __vmread(GUEST_DS_SELECTOR, &regs->ds);
- __vmread(GUEST_ES_SELECTOR, &regs->es);
- __vmread(GUEST_GS_SELECTOR, &regs->gs);
- __vmread(GUEST_FS_SELECTOR, &regs->fs);
- __vmread(GUEST_RIP, &regs->rip);
- __vmread(GUEST_RSP, &regs->rsp);
+ __vmread(GUEST_RFLAGS, &regs->rflags);
+ __vmread(GUEST_SS_SELECTOR, &regs->ss);
+ __vmread(GUEST_CS_SELECTOR, &regs->cs);
+ __vmread(GUEST_DS_SELECTOR, &regs->ds);
+ __vmread(GUEST_ES_SELECTOR, &regs->es);
+ __vmread(GUEST_GS_SELECTOR, &regs->gs);
+ __vmread(GUEST_FS_SELECTOR, &regs->fs);
+ __vmread(GUEST_RIP, &regs->rip);
+ __vmread(GUEST_RSP, &regs->rsp);
#elif defined (__i386__)
- __vmread(GUEST_RFLAGS, &regs->eflags);
- __vmread(GUEST_SS_SELECTOR, &regs->ss);
- __vmread(GUEST_CS_SELECTOR, &regs->cs);
- __vmread(GUEST_DS_SELECTOR, &regs->ds);
- __vmread(GUEST_ES_SELECTOR, &regs->es);
- __vmread(GUEST_GS_SELECTOR, &regs->gs);
- __vmread(GUEST_FS_SELECTOR, &regs->fs);
- __vmread(GUEST_RIP, &regs->eip);
- __vmread(GUEST_RSP, &regs->esp);
-#else
-#error Unsupported architecture
+ __vmread(GUEST_RFLAGS, &regs->eflags);
+ __vmread(GUEST_SS_SELECTOR, &regs->ss);
+ __vmread(GUEST_CS_SELECTOR, &regs->cs);
+ __vmread(GUEST_DS_SELECTOR, &regs->ds);
+ __vmread(GUEST_ES_SELECTOR, &regs->es);
+ __vmread(GUEST_GS_SELECTOR, &regs->gs);
+ __vmread(GUEST_FS_SELECTOR, &regs->fs);
+ __vmread(GUEST_RIP, &regs->eip);
+ __vmread(GUEST_RSP, &regs->esp);
#endif
+ }
+
+ if ( crs != NULL )
+ {
+ __vmread(CR0_READ_SHADOW, &crs[0]);
+ __vmread(GUEST_CR3, &crs[3]);
+ __vmread(CR4_READ_SHADOW, &crs[4]);
+ }
+
+ /* Reload current VCPU's VMCS if it was temporarily unloaded. */
+ if ( (v != current) && hvm_guest(current) )
+ __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
}
void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
@@ -456,13 +506,6 @@ void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
#endif
}
-void vmx_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
-{
- __vmread(CR0_READ_SHADOW, &crs[0]);
- __vmread(GUEST_CR3, &crs[3]);
- __vmread(CR4_READ_SHADOW, &crs[4]);
-}
-
void vmx_modify_guest_state(struct vcpu *v)
{
modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs);
@@ -616,7 +659,6 @@ int start_vmx(void)
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
- hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs;
hvm_funcs.modify_guest_state = vmx_modify_guest_state;
hvm_funcs.realmode = vmx_realmode;
diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c
index 6fc271867c..1c570dad29 100644
--- a/xen/arch/x86/x86_32/traps.c
+++ b/xen/arch/x86/x86_32/traps.c
@@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs *regs)
if ( hvm_guest(current) && guest_mode(regs) )
{
context = "hvm";
- hvm_store_cpu_guest_regs(current, &fault_regs);
- hvm_store_cpu_guest_ctrl_regs(current, fault_crs);
+ hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
}
else
{
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index ef1f21cb09..e78c8f9453 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -27,8 +27,7 @@ void show_registers(struct cpu_user_regs *regs)
if ( hvm_guest(current) && guest_mode(regs) )
{
context = "hvm";
- hvm_store_cpu_guest_regs(current, &fault_regs);
- hvm_store_cpu_guest_ctrl_regs(current, fault_crs);
+ hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
}
else
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 70af33035b..eff373996c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -41,12 +41,12 @@ struct hvm_function_table {
/*
* Store and load guest state:
* 1) load/store guest register state,
- * 2) store guest control register state (used for panic dumps),
- * 3) modify guest state (e.g., set debug flags).
+ * 2) modify guest state (e.g., set debug flags).
*/
- void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
- void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
- void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]);
+ void (*store_cpu_guest_regs)(
+ struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
+ void (*load_cpu_guest_regs)(
+ struct vcpu *v, struct cpu_user_regs *r);
void (*modify_guest_state)(struct vcpu *v);
/*
@@ -93,9 +93,10 @@ hvm_relinquish_guest_resources(struct domain *d)
}
static inline void
-hvm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
+hvm_store_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
{
- hvm_funcs.store_cpu_guest_regs(v, r);
+ hvm_funcs.store_cpu_guest_regs(v, r, crs);
}
static inline void
@@ -105,12 +106,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
}
static inline void
-hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
-{
- hvm_funcs.store_cpu_guest_ctrl_regs(v, crs);
-}
-
-static inline void
hvm_modify_guest_state(struct vcpu *v)
{
hvm_funcs.modify_guest_state(v);