aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/hvm/trace.h
diff options
context:
space:
mode:
authorGeorge Dunlap <gdunlap@xensource.com>2007-09-21 15:33:37 +0100
committerGeorge Dunlap <gdunlap@xensource.com>2007-09-21 15:33:37 +0100
commita92facd683d73389cda898d5061f6283bd41d43c (patch)
treed1866967fca6455f0c0e646db33cdf89a272ed13 /xen/include/asm-x86/hvm/trace.h
parent2ec9f061dc68fc474238150bb1a21c5a882407b0 (diff)
downloadxen-a92facd683d73389cda898d5061f6283bd41d43c.tar.gz
xen-a92facd683d73389cda898d5061f6283bd41d43c.tar.bz2
xen-a92facd683d73389cda898d5061f6283bd41d43c.zip
[xen][hvm][tracing] Refine hvm tracing
This patch does two things: * Allows hvm traces to take advantage of the variable-size traces * Adds some hvm functionality This includes tracing actions like clts and lmsw, values of {p,mm}io reads and writes, and making different trace records for hvm domains running in 64-bit mode if the trace record includes a virtual address.
Diffstat (limited to 'xen/include/asm-x86/hvm/trace.h')
-rw-r--r--xen/include/asm-x86/hvm/trace.h158
1 files changed, 146 insertions, 12 deletions
diff --git a/xen/include/asm-x86/hvm/trace.h b/xen/include/asm-x86/hvm/trace.h
index 740d1d1108..54c578f912 100644
--- a/xen/include/asm-x86/hvm/trace.h
+++ b/xen/include/asm-x86/hvm/trace.h
@@ -26,20 +26,154 @@
#define DO_TRC_HVM_VMMCALL 1
#define DO_TRC_HVM_HLT 1
#define DO_TRC_HVM_INVLPG 1
+#define DO_TRC_HVM_IO_ASSIST 1
+#define DO_TRC_HVM_MMIO_ASSIST 1
+#define DO_TRC_HVM_CLTS 1
+#define DO_TRC_HVM_LMSW 1
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) \
- do { \
- if (DO_TRC_HVM_ ## evt) \
- TRACE_5D( \
- TRC_HVM_ ## evt, \
- ((vcpu)->domain->domain_id<<16) + (vcpu)->vcpu_id, \
- d1, d2, d3, d4 \
- ); \
+
+
+static inline void hvmtrace_vmexit(struct vcpu *v,
+ unsigned long rip,
+ unsigned long exit_reason)
+{
+#ifdef __x86_64__
+ if(hvm_long_mode_enabled(v))
+ {
+ struct {
+ unsigned did:16, vid:16;
+ unsigned exit_reason:32;
+ u64 rip;
+ } d;
+
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.exit_reason = exit_reason;
+ d.rip = rip;
+ trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+ } else {
+#endif
+ struct {
+ unsigned did:16, vid:16;
+ unsigned exit_reason:32;
+ u32 eip;
+ } d;
+
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.exit_reason = exit_reason;
+ d.eip = rip;
+ trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+#ifdef __x86_64__
+ }
+#endif
+}
+
+
+static inline void hvmtrace_vmentry(struct vcpu *v)
+{
+ struct {
+ unsigned did:16, vid:16;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content)
+{
+ struct {
+ unsigned did:16, vid:16;
+ u32 ecx;
+ u64 msr_content;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.ecx = ecx;
+ d.msr_content = msr_content;
+ trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content)
+{
+ struct {
+ unsigned did:16, vid:16;
+ u32 ecx;
+ u64 msr_content;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.ecx = ecx;
+ d.msr_content = msr_content;
+ trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), (unsigned char *)&d);
+}
+
+static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va,
+ u32 error_code)
+{
+#ifdef __x86_64__
+ if(hvm_long_mode_enabled(v))
+ {
+ struct {
+ unsigned did:16, vid:16;
+ u32 error_code;
+ u64 va;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.error_code = error_code;
+ d.va = va;
+ trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d),
+ (unsigned char *)&d);
+ } else {
+#endif
+ struct {
+ unsigned did:16, vid:16;
+ u32 error_code;
+ u32 va;
+ } d;
+ d.did = v->domain->domain_id;
+ d.vid = v->vcpu_id;
+ d.error_code = error_code;
+ d.va = va;
+ trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), (unsigned char *)&d);
+#ifdef __x86_64__
+ }
+#endif
+}
+
+#define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4) \
+ do { \
+ if (DO_TRC_HVM_ ## evt) \
+ { \
+ struct { \
+ unsigned did:16, vid:16; \
+ u32 d[4]; \
+ } _d; \
+ _d.did=(vcpu)->domain->domain_id; \
+ _d.vid=(vcpu)->vcpu_id; \
+ _d.d[0]=(d1); \
+ _d.d[1]=(d2); \
+ _d.d[2]=(d3); \
+ _d.d[3]=(d4); \
+ trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \
+ sizeof(u32)*count+1, (unsigned char *)&_d); \
+ } \
} while(0)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) HVMTRACE_4D(evt, vcpu, d1, d2, d3, 0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2) HVMTRACE_4D(evt, vcpu, d1, d2, 0, 0)
-#define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_4D(evt, vcpu, d1, 0, 0, 0)
-#define HVMTRACE_0D(evt, vcpu) HVMTRACE_4D(evt, vcpu, 0, 0, 0, 0)
+#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4) HVMTRACE_ND(evt, vcpu, 4, d1, d2, d3, d4)
+#define HVMTRACE_3D(evt, vcpu, d1, d2, d3) HVMTRACE_ND(evt, vcpu, 3, d1, d2, d3, 0)
+#define HVMTRACE_2D(evt, vcpu, d1, d2) HVMTRACE_ND(evt, vcpu, 2, d1, d2, 0, 0)
+#define HVMTRACE_1D(evt, vcpu, d1) HVMTRACE_ND(evt, vcpu, 1, d1, 0, 0, 0)
+#define HVMTRACE_0D(evt, vcpu) HVMTRACE_ND(evt, vcpu, 0, 0, 0, 0, 0)
#endif //__ASM_X86_HVM_TRACE_H__
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */