aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/io.c4
-rw-r--r--xen/arch/x86/hvm/svm/svm.c12
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c16
-rw-r--r--xen/include/asm-x86/hvm/trace.h158
-rw-r--r--xen/include/public/trace.h6
5 files changed, 173 insertions, 23 deletions
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 26961642ca..2592039e85 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -40,6 +40,7 @@
#include <asm/hvm/vpt.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vlapic.h>
+#include <asm/hvm/trace.h>
#include <public/sched.h>
#include <xen/iocap.h>
@@ -476,6 +477,7 @@ static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
printk("Error: %s unknown port size\n", __FUNCTION__);
domain_crash_synchronous();
}
+ HVMTRACE_1D(IO_ASSIST, current, p->data);
}
}
@@ -491,6 +493,8 @@ static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
dst = mmio_opp->operand[1];
size = operand_size(src);
+ HVMTRACE_1D(MMIO_ASSIST, current, p->data);
+
switch (mmio_opp->instr) {
case INSTR_MOV:
if (dst & REGISTER) {
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 7f6b1c66b4..d0cc5f21cd 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1482,7 +1482,7 @@ static void svm_io_instruction(struct vcpu *v)
if (dir==IOREQ_READ)
HVMTRACE_2D(IO_READ, v, port, size);
else
- HVMTRACE_2D(IO_WRITE, v, port, size);
+ HVMTRACE_3D(IO_WRITE, v, port, size, regs->eax);
HVM_DBG_LOG(DBG_LEVEL_IO,
"svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
@@ -1759,6 +1759,7 @@ static void svm_cr_access(
vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
+ HVMTRACE_0D(CLTS, current);
break;
case INSTR_LMSW:
@@ -1766,6 +1767,7 @@ static void svm_cr_access(
value = get_reg(gpreg, regs, vmcb) & 0xF;
value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
result = svm_set_cr0(value);
+ HVMTRACE_1D(LMSW, current, value);
break;
case INSTR_SMSW:
@@ -1912,7 +1914,7 @@ static void svm_do_msr_access(
regs->edx = msr_content >> 32;
done:
- HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
+ hvmtrace_msr_read(v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
@@ -1922,7 +1924,7 @@ static void svm_do_msr_access(
{
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
+ hvmtrace_msr_write(v, ecx, msr_content);
switch (ecx)
{
@@ -2158,7 +2160,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
exit_reason = vmcb->exitcode;
- HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
+ hvmtrace_vmexit(v, regs->eip, exit_reason);
if ( unlikely(exit_reason == VMEXIT_INVALID) )
{
@@ -2378,7 +2380,7 @@ asmlinkage void svm_trace_vmentry(void)
struct vcpu *v = current;
/* This is the last C code before the VMRUN instruction. */
- HVMTRACE_0D(VMENTRY, v);
+ hvmtrace_vmentry(v);
}
/*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cb88ab5c6c..647cabfa77 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1825,7 +1825,7 @@ static void vmx_io_instruction(unsigned long exit_qualification,
if ( dir == IOREQ_READ )
HVMTRACE_2D(IO_READ, current, port, size);
else
- HVMTRACE_2D(IO_WRITE, current, port, size);
+ HVMTRACE_3D(IO_WRITE, current, port, size, regs->eax);
if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
hvm_print_line(current, regs->eax); /* guest debug output */
@@ -2249,11 +2249,13 @@ static int vmx_cr_access(unsigned long exit_qualification,
v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
__vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+ HVMTRACE_0D(CLTS, current);
break;
case TYPE_LMSW:
value = v->arch.hvm_vcpu.guest_cr[0];
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
+ HVMTRACE_1D(LMSW, current, value);
return vmx_set_cr0(value);
default:
BUG();
@@ -2326,7 +2328,7 @@ static int vmx_do_msr_read(struct cpu_user_regs *regs)
regs->edx = msr_content >> 32;
done:
- HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
+ hvmtrace_msr_read(v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax,
(unsigned long)regs->edx);
@@ -2407,7 +2409,8 @@ static int vmx_do_msr_write(struct cpu_user_regs *regs)
ecx, (u32)regs->eax, (u32)regs->edx);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
+
+ hvmtrace_msr_write(v, ecx, msr_content);
switch ( ecx )
{
@@ -2550,7 +2553,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
exit_reason = __vmread(VM_EXIT_REASON);
- HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
+ hvmtrace_vmexit(VMEXIT, v, regs->eip, exit_reason);
perfc_incra(vmexits, exit_reason);
@@ -2637,7 +2640,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
if ( paging_fault(exit_qualification, regs) )
{
- HVMTRACE_2D(PF_XEN, v, exit_qualification, regs->error_code);
+ hvmtrace_pf_xen(v, exit_qualification, regs->error_code);
break;
}
@@ -2791,7 +2794,8 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
asmlinkage void vmx_trace_vmentry(void)
{
struct vcpu *v = current;
- HVMTRACE_0D(VMENTRY, v);
+
+ hvmtrace_vmentry(v);
}
/*
diff --git a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-x86/hvm/trace.h
index 740d1d1108..54c578f912 100644
--- a/xen/include/asm-x86/hvm/trace.h
+++ b/xen/include/asm-x86/hvm/trace.h
@@ -26,20 +26,154 @@
#define DO_TRC_HVM_VMMCALL 1
#define DO_TRC_HVM_HLT 1
#define DO_TRC_HVM_INVLPG 1
+#define DO_TRC_HVM_IO_ASSIST 1
+#define DO_TRC_HVM_MMIO_ASSIST 1
+#define DO_TRC_HVM_CLTS 1
+#define DO_TRC_HVM_LMSW 1
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) \
- do { \
- if (DO_TRC_HVM_ ## evt) \
- TRACE_5D( \
- TRC_HVM_ ## evt, \
- ((vcpu)->domain->domain_id<<16) + (vcpu)->vcpu_id, \
- d1, d2, d3, d4 \
- ); \
+
+
+static inline void hvmtrace_vmexit(struct vcpu *v,
+ unsigned long rip,
+ unsigned long exit_reason)
+{
+#ifdef __x86_64__
+ if(hvm_long_mode_enabled(v))
+ {
+ struct {
+ unsigned did:16, vid:16;
+ unsigned exit_reason:32;
+ u64 rip;
+ } d;
+
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.exit_reason = exit_reason;
+ d.rip = rip;
+ trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+ } else {
+#endif
+ struct {
+ unsigned did:16, vid:16;
+ unsigned exit_reason:32;
+ u32 eip;
+ } d;
+
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.exit_reason = exit_reason;
+ d.eip = rip;
+ trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+#ifdef __x86_64__
+ }
+#endif
+}
+
+
+static inline void hvmtrace_vmentry(struct vcpu *v)
+{
+ struct {
+ unsigned did:16, vid:16;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content)
+{
+ struct {
+ unsigned did:16, vid:16;
+ u32 ecx;
+ u64 msr_content;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.ecx = ecx;
+ d.msr_content = msr_content;
+ trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content)
+{
+ struct {
+ unsigned did:16, vid:16;
+ u32 ecx;
+ u64 msr_content;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.ecx = ecx;
+ d.msr_content = msr_content;
+ trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va,
+ u32 error_code)
+{
+#ifdef __x86_64__
+ if(hvm_long_mode_enabled(v))
+ {
+ struct {
+ unsigned did:16, vid:16;
+ u32 error_code;
+ u64 va;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.error_code = error_code;
+ d.va = va;
+ trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d),
+ (unsigned char *)&d);
+ } else {
+#endif
+ struct {
+ unsigned did:16, vid:16;
+ u32 error_code;
+ u32 va;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.error_code = error_code;
+ d.va = va;
+ trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), (unsigned char *)&d);
+#ifdef __x86_64__
+ }
+#endif
+}
+
+#define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4) \
+ do { \
+ if (DO_TRC_HVM_ ## evt) \
+ { \
+ struct { \
+ unsigned did:16, vid:16; \
+ u32 d[4]; \
+ } _d; \
+ _d.did=(vcpu)->domain->domain_id; \
+ _d.vid=(vcpu)->vcpu_id; \
+ _d.d[0]=(d1); \
+ _d.d[1]=(d2); \
+ _d.d[2]=(d3); \
+ _d.d[3]=(d4); \
+ trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \
+ sizeof(u32)*count+1, (unsigned char *)&_d); \
+ } \
} while(0)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) HVMTRACE_4D(evt, vcpu, d1, d2, d3, 0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2) HVMTRACE_4D(evt, vcpu, d1, d2, 0, 0)
-#define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_4D(evt, vcpu, d1, 0, 0, 0)
-#define HVMTRACE_0D(evt, vcpu) HVMTRACE_4D(evt, vcpu, 0, 0, 0, 0)
+#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) HVMTRACE_ND(evt, vcpu, 4, d1, d2, d3, d4)
+#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) HVMTRACE_ND(evt, vcpu, 3, d1, d2, d3, 0)
+#define HVMTRACE_2D(evt, vcpu, d1, d2) HVMTRACE_ND(evt, vcpu, 2, d1, d2, 0, 0)
+#define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_ND(evt, vcpu, 1, d1, 0, 0, 0)
+#define HVMTRACE_0D(evt, vcpu) HVMTRACE_ND(evt, vcpu, 0, 0, 0, 0, 0)
#endif //__ASM_X86_HVM_TRACE_H__
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
index a99c782d00..ecdce0c43d 100644
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -77,6 +77,7 @@
/* trace events per subclass */
#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + 0x03)
#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
@@ -98,6 +99,11 @@
#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
+#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
+#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
+#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
+#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
+#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + 0x20)
/* This structure represents a single trace buffer record. */
struct t_rec {