aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-02-26 17:04:39 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-02-26 17:04:39 +0000
commit7998832e812ab453e5373d8e63c0afa164fcddfe (patch)
tree1f105b06000962c19187324a661b2491185d41f5
parent05741dbcafce2672294d685f1dd099556693b8b7 (diff)
downloadxen-7998832e812ab453e5373d8e63c0afa164fcddfe.tar.gz
xen-7998832e812ab453e5373d8e63c0afa164fcddfe.tar.bz2
xen-7998832e812ab453e5373d8e63c0afa164fcddfe.zip
[xentrace][HVM] introduce HVM tracing to unify SVM and VMX tracing
* new tracing class TRC_HVM replacing TRC_VMX * 20 new trace events to differentiate the information carried by them ** added corresponding trace points in SVM and VMX code ** updated formats file: descriptive formatting of trace event data ** completely replacing previous simple tracing in VMX * possibility to toggle single events on and off at compile time in include/asm-x86/hvm/trace.h Signed-off-by: Thomas Friebel <thomas.friebel@amd.com>
-rw-r--r--xen/arch/x86/hvm/io.c1
-rw-r--r--xen/arch/x86/hvm/svm/intr.c7
-rw-r--r--xen/arch/x86/hvm/svm/svm.c70
-rw-r--r--xen/arch/x86/hvm/vmx/intr.c3
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c83
-rw-r--r--xen/common/trace.c2
-rw-r--r--xen/include/asm-x86/hvm/support.h3
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h2
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h6
-rw-r--r--xen/include/public/trace.h35
10 files changed, 128 insertions, 84 deletions
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 3a7a875bcc..f82c28e82a 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -416,7 +416,6 @@ static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
printk("Error: %s unknown port size\n", __FUNCTION__);
domain_crash_synchronous();
}
- TRACE_VMEXIT(3, regs->eax);
}
}
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index a4a458c72d..ff7ea26e58 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -37,6 +37,7 @@
#include <xen/kernel.h>
#include <public/hvm/ioreq.h>
#include <xen/domain_page.h>
+#include <asm/hvm/trace.h>
/*
* Most of this code is copied from vmx_io.c and modified
@@ -108,6 +109,7 @@ asmlinkage void svm_intr_assist(void)
if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow )
{
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
+ HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
svm_inject_extint(v, 0x0); /* actual vector doesn't really matter */
return;
}
@@ -128,7 +130,10 @@ asmlinkage void svm_intr_assist(void)
if ( re_injecting && (pt = is_pt_irq(v, intr_vector, intr_type)) )
++pt->pending_intr_nr;
/* let's inject this interrupt */
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, intr_vector, 0);
+ if (re_injecting)
+ HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
+ else
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
svm_inject_extint(v, intr_vector);
break;
case APIC_DM_SMI:
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 441ad1c9a1..fcff16a67c 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -48,6 +48,7 @@
#include <asm/x86_emulate.h>
#include <public/sched.h>
#include <asm/hvm/vpt.h>
+#include <asm/hvm/trace.h>
#define SVM_EXTRA_DEBUG
@@ -81,6 +82,11 @@ static inline void svm_inject_exception(struct vcpu *v, int trap,
eventinj_t event;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ if ( trap == TRAP_page_fault )
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
+ else
+ HVMTRACE_2D(INJ_EXC, v, trap, error_code);
+
event.bytes = 0;
event.fields.v = 1;
event.fields.type = EVENTTYPE_EXCEPTION;
@@ -977,9 +983,9 @@ static void svm_hvm_inject_exception(
unsigned int trapnr, int errcode, unsigned long cr2)
{
struct vcpu *v = current;
- svm_inject_exception(v, trapnr, (errcode != -1), errcode);
if ( trapnr == TRAP_page_fault )
v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
+ svm_inject_exception(v, trapnr, (errcode != -1), errcode);
}
static int svm_event_injection_faulted(struct vcpu *v)
@@ -1209,13 +1215,17 @@ static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb,
regs->ecx = (unsigned long)ecx;
regs->edx = (unsigned long)edx;
+ HVMTRACE_3D(CPUID, v, input,
+ ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
+
inst_len = __get_instruction_length(vmcb, INSTR_CPUID, NULL);
ASSERT(inst_len > 0);
__update_guest_eip(vmcb, inst_len);
}
-static inline unsigned long *get_reg_p(unsigned int gpreg,
- struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
+static inline unsigned long *get_reg_p(
+ unsigned int gpreg,
+ struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
{
unsigned long *reg_p = NULL;
switch (gpreg)
@@ -1300,6 +1310,8 @@ static void svm_dr_access(struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ HVMTRACE_0D(DR_WRITE, v);
+
v->arch.hvm_vcpu.flag_dr_dirty = 1;
__restore_debug_registers(v);
@@ -1579,6 +1591,11 @@ static void svm_io_instruction(struct vcpu *v)
else
size = 1;
+ if (dir==IOREQ_READ)
+ HVMTRACE_2D(IO_READ, v, port, size);
+ else
+ HVMTRACE_2D(IO_WRITE, v, port, size);
+
HVM_DBG_LOG(DBG_LEVEL_IO,
"svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
"exit_qualification = %"PRIx64,
@@ -1835,6 +1852,8 @@ static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
return;
}
+ HVMTRACE_2D(CR_READ, v, cr, value);
+
set_reg(gp, value, regs, vmcb);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
@@ -1859,6 +1878,8 @@ static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
value = get_reg(gpreg, regs, vmcb);
+ HVMTRACE_2D(CR_WRITE, v, cr, value);
+
HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
HVM_DBG_LOG(DBG_LEVEL_1, "current = %lx,", (unsigned long) current);
@@ -2152,6 +2173,7 @@ static inline void svm_do_msr_access(
regs->edx = msr_content >> 32;
done:
+ HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
@@ -2161,6 +2183,8 @@ static inline void svm_do_msr_access(
{
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
+
switch (ecx)
{
case MSR_IA32_TIME_STAMP_COUNTER:
@@ -2198,9 +2222,12 @@ static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
/* Check for interrupt not handled or new interrupt. */
if ( (vmcb->rflags & X86_EFLAGS_IF) &&
- (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
+ (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) ) {
+ HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
return;
+ }
+ HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
hvm_hlt(vmcb->rflags);
}
@@ -2312,6 +2339,8 @@ void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
__update_guest_eip (vmcb, inst_len);
}
+ HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
+
paging_invlpg(v, g_vaddr);
}
@@ -2428,6 +2457,8 @@ static int svm_do_vmmcall(struct vcpu *v, struct cpu_user_regs *regs)
inst_len = __get_instruction_length(vmcb, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
+ HVMTRACE_1D(VMMCALL, v, regs->eax);
+
if ( regs->eax & 0x80000000 )
{
/* VMMCALL sanity check */
@@ -2723,7 +2754,6 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
unsigned int exit_reason;
unsigned long eip;
struct vcpu *v = current;
- int error;
int do_debug = 0;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -2732,6 +2762,8 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
exit_reason = vmcb->exitcode;
save_svm_cpu_user_regs(v, regs);
+ HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
+
if (exit_reason == VMEXIT_INVALID)
{
svm_dump_vmcb(__func__, vmcb);
@@ -2854,8 +2886,6 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
}
#endif /* SVM_EXTRA_DEBUG */
- TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
-
switch (exit_reason)
{
case VMEXIT_EXCEPTION_DB:
@@ -2872,9 +2902,16 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
break;
case VMEXIT_INTR:
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(INTR, v);
+ break;
case VMEXIT_NMI:
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(NMI, v);
+ break;
case VMEXIT_SMI:
- /* Asynchronous events, handled when we STGI'd after the VMEXIT. */
+ /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
+ HVMTRACE_0D(SMI, v);
break;
case VMEXIT_INIT:
@@ -2914,16 +2951,14 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
- if (!(error = svm_do_page_fault(va, regs)))
+ if ( svm_do_page_fault(va, regs) )
{
- /* Inject #PG using Interruption-Information Fields */
- svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
-
- v->arch.hvm_svm.cpu_cr2 = va;
- vmcb->cr2 = va;
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
- VMEXIT_EXCEPTION_PF, va);
+ HVMTRACE_2D(PF_XEN, v, va, regs->error_code);
+ break;
}
+
+ v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
+ svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
break;
}
@@ -3061,6 +3096,9 @@ asmlinkage void svm_load_cr2(void)
{
struct vcpu *v = current;
+ // this is the last C code before the VMRUN instruction
+ HVMTRACE_0D(VMENTRY, v);
+
local_irq_disable();
asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2));
}
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index 5be7aaeb02..8c0dc81a1c 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -37,6 +37,7 @@
#include <asm/hvm/vpic.h>
#include <asm/hvm/vlapic.h>
#include <public/hvm/ioreq.h>
+#include <asm/hvm/trace.h>
static inline void
@@ -162,8 +163,8 @@ asmlinkage void vmx_intr_assist(void)
case APIC_DM_EXTINT:
case APIC_DM_FIXED:
case APIC_DM_LOWEST:
+ HVMTRACE_2D(INJ_VIRQ, v, highest_vector, /*fake=*/ 0);
vmx_inject_extint(v, highest_vector, VMX_DELIVER_NO_ERROR_CODE);
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id, highest_vector, 0);
break;
case APIC_DM_SMI:
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index aabe5b789b..e15b6eca54 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -49,6 +49,7 @@
#include <asm/x86_emulate.h>
#include <asm/hvm/vpt.h>
#include <public/hvm/save.h>
+#include <asm/hvm/trace.h>
static void vmx_ctxt_switch_from(struct vcpu *v);
static void vmx_ctxt_switch_to(struct vcpu *v);
@@ -1127,7 +1128,6 @@ static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
result = paging_fault(va, regs);
- TRACE_VMEXIT(2, result);
#if 0
if ( !result )
{
@@ -1223,6 +1223,9 @@ static void vmx_do_cpuid(struct cpu_user_regs *regs)
regs->ebx = (unsigned long)ebx;
regs->ecx = (unsigned long)ecx;
regs->edx = (unsigned long)edx;
+
+ HVMTRACE_3D(CPUID, current, input,
+ ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx);
}
#define CASE_GET_REG_P(REG, reg) \
@@ -1247,6 +1250,8 @@ static void vmx_dr_access(unsigned long exit_qualification,
{
struct vcpu *v = current;
+ HVMTRACE_0D(DR_WRITE, v);
+
v->arch.hvm_vcpu.flag_dr_dirty = 1;
/* We could probably be smarter about this */
@@ -1267,6 +1272,8 @@ static void vmx_do_invlpg(unsigned long va)
unsigned long eip;
struct vcpu *v = current;
+ HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
+
eip = __vmread(GUEST_RIP);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
@@ -1416,11 +1423,14 @@ static void vmx_io_instruction(unsigned long exit_qualification,
else
port = regs->edx & 0xffff;
- TRACE_VMEXIT(1, port);
-
size = (exit_qualification & 7) + 1;
dir = test_bit(3, &exit_qualification); /* direction */
+ if (dir==IOREQ_READ)
+ HVMTRACE_2D(IO_READ, current, port, size);
+ else
+ HVMTRACE_2D(IO_WRITE, current, port, size);
+
if ( test_bit(4, &exit_qualification) ) { /* string instruction */
unsigned long addr, count = 1, base;
paddr_t paddr;
@@ -1614,9 +1624,6 @@ static void vmx_io_instruction(unsigned long exit_qualification,
if ( port == 0xe9 && dir == IOREQ_WRITE && size == 1 )
hvm_print_line(current, regs->eax); /* guest debug output */
- if ( dir == IOREQ_WRITE )
- TRACE_VMEXIT(2, regs->eax);
-
regs->eip += inst_len;
send_pio_req(port, 1, size, regs->eax, dir, df, 0);
}
@@ -2062,9 +2069,7 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
goto exit_and_crash;
}
- TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
- TRACE_VMEXIT(2, cr);
- TRACE_VMEXIT(3, value);
+ HVMTRACE_2D(CR_WRITE, v, cr, value);
HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
@@ -2231,9 +2236,7 @@ static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
break;
}
- TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
- TRACE_VMEXIT(2, cr);
- TRACE_VMEXIT(3, value);
+ HVMTRACE_2D(CR_READ, v, cr, value);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
}
@@ -2256,7 +2259,7 @@ static int vmx_cr_access(unsigned long exit_qualification,
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
- TRACE_VMEXIT(1, TYPE_CLTS);
+// TRACE_VMEXIT(1, TYPE_CLTS);
/* We initialise the FPU now, to avoid needing another vmexit. */
setup_fpu(v);
@@ -2272,8 +2275,8 @@ static int vmx_cr_access(unsigned long exit_qualification,
value = v->arch.hvm_vmx.cpu_shadow_cr0;
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
- TRACE_VMEXIT(1, TYPE_LMSW);
- TRACE_VMEXIT(2, value);
+// TRACE_VMEXIT(1, TYPE_LMSW);
+// TRACE_VMEXIT(2, value);
return vmx_set_cr0(value);
break;
default:
@@ -2327,6 +2330,7 @@ static inline int vmx_do_msr_read(struct cpu_user_regs *regs)
regs->edx = msr_content >> 32;
done:
+ HVMTRACE_2D(MSR_READ, v, ecx, msr_content);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax,
(unsigned long)regs->edx);
@@ -2343,6 +2347,7 @@ static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
ecx, (u32)regs->eax, (u32)regs->edx);
msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_2D(MSR_WRITE, v, ecx, msr_content);
switch (ecx) {
case MSR_IA32_TIME_STAMP_COUNTER:
@@ -2373,6 +2378,7 @@ static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
static void vmx_do_hlt(void)
{
unsigned long rflags;
+ HVMTRACE_0D(HLT, current);
rflags = __vmread(GUEST_RFLAGS);
hvm_hlt(rflags);
}
@@ -2396,7 +2402,7 @@ static inline void vmx_do_extint(struct cpu_user_regs *regs)
BUG_ON(!(vector & INTR_INFO_VALID_MASK));
vector &= INTR_INFO_VECTOR_MASK;
- TRACE_VMEXIT(1, vector);
+ HVMTRACE_1D(INTR, current, vector);
switch(vector) {
case LOCAL_TIMER_VECTOR:
@@ -2560,12 +2566,11 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
unsigned long exit_qualification, inst_len = 0;
struct vcpu *v = current;
- TRACE_3D(TRC_VMX_VMEXIT + v->vcpu_id, 0, 0, 0);
-
exit_reason = __vmread(VM_EXIT_REASON);
+ HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
+
perfc_incra(vmexits, exit_reason);
- TRACE_VMEXIT(0, exit_reason);
if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
local_irq_enable();
@@ -2589,7 +2594,6 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
vector = intr_info & INTR_INFO_VECTOR_MASK;
- TRACE_VMEXIT(1, vector);
perfc_incra(cause_vector, vector);
switch ( vector )
@@ -2647,26 +2651,24 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
exit_qualification = __vmread(EXIT_QUALIFICATION);
regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
- TRACE_VMEXIT(3, regs->error_code);
- TRACE_VMEXIT(4, exit_qualification);
-
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
(unsigned long)regs->eax, (unsigned long)regs->ebx,
(unsigned long)regs->ecx, (unsigned long)regs->edx,
(unsigned long)regs->esi, (unsigned long)regs->edi);
- if ( !vmx_do_page_fault(exit_qualification, regs) )
+ if ( vmx_do_page_fault(exit_qualification, regs) )
{
- /* Inject #PG using Interruption-Information Fields. */
- vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
- v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
- TRACE_3D(TRC_VMX_INTR, v->domain->domain_id,
- TRAP_page_fault, exit_qualification);
+ HVMTRACE_2D(PF_XEN, v, exit_qualification, regs->error_code);
+ break;
}
+
+ v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
+ vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
break;
}
case TRAP_nmi:
+ HVMTRACE_0D(NMI, v);
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
else
@@ -2708,11 +2710,11 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
__update_guest_eip(inst_len);
exit_qualification = __vmread(EXIT_QUALIFICATION);
vmx_do_invlpg(exit_qualification);
- TRACE_VMEXIT(4, exit_qualification);
break;
}
case EXIT_REASON_VMCALL:
{
+ HVMTRACE_1D(VMMCALL, v, regs->eax);
inst_len = __get_instruction_length(); /* Safe: VMCALL */
__update_guest_eip(inst_len);
hvm_do_hypercall(regs);
@@ -2724,7 +2726,6 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
if ( vmx_cr_access(exit_qualification, regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(4, exit_qualification);
break;
}
case EXIT_REASON_DR_ACCESS:
@@ -2735,23 +2736,16 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
exit_qualification = __vmread(EXIT_QUALIFICATION);
inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
vmx_io_instruction(exit_qualification, inst_len);
- TRACE_VMEXIT(4, exit_qualification);
break;
case EXIT_REASON_MSR_READ:
inst_len = __get_instruction_length(); /* Safe: RDMSR */
if ( vmx_do_msr_read(regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(1, regs->ecx);
- TRACE_VMEXIT(2, regs->eax);
- TRACE_VMEXIT(3, regs->edx);
break;
case EXIT_REASON_MSR_WRITE:
inst_len = __get_instruction_length(); /* Safe: WRMSR */
if ( vmx_do_msr_write(regs) )
__update_guest_eip(inst_len);
- TRACE_VMEXIT(1, regs->ecx);
- TRACE_VMEXIT(2, regs->eax);
- TRACE_VMEXIT(3, regs->edx);
break;
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
@@ -2785,18 +2779,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
asmlinkage void vmx_trace_vmentry(void)
{
struct vcpu *v = current;
- TRACE_5D(TRC_VMX_VMENTRY + current->vcpu_id,
- v->arch.hvm_vcpu.hvm_trace_values[0],
- v->arch.hvm_vcpu.hvm_trace_values[1],
- v->arch.hvm_vcpu.hvm_trace_values[2],
- v->arch.hvm_vcpu.hvm_trace_values[3],
- v->arch.hvm_vcpu.hvm_trace_values[4]);
-
- TRACE_VMEXIT(0, 0);
- TRACE_VMEXIT(1, 0);
- TRACE_VMEXIT(2, 0);
- TRACE_VMEXIT(3, 0);
- TRACE_VMEXIT(4, 0);
+ HVMTRACE_0D(VMENTRY, v);
}
/*
diff --git a/xen/common/trace.c b/xen/common/trace.c
index eb791c1f8f..6fc4abe312 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned long, lost_records);
/* a flag recording whether initialization has been done */
/* or more properly, if the tbuf subsystem is enabled right now */
-int tb_init_done;
+int tb_init_done __read_mostly;
/* which CPUs tracing is enabled on */
static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 4a3b14afe5..6e43a0301c 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -116,9 +116,6 @@ extern unsigned int opt_hvm_debug_level;
#define HVM_DBG_LOG(level, _f, _a...)
#endif
-#define TRACE_VMEXIT(index, value) \
- current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
-
/*
* Save/restore support
*/
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 0d4c95930c..4da4c8a9c6 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -45,8 +45,6 @@ struct hvm_vcpu {
/* Flags */
int flag_dr_dirty;
- unsigned long hvm_trace_values[5];
-
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 701edf0e63..43be711bd2 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/i387.h>
+#include <asm/hvm/trace.h>
extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_vmentry(void);
@@ -294,6 +295,11 @@ static inline void __vmx_inject_exception(struct vcpu *v, int trap, int type,
__vmwrite(VM_ENTRY_INSTRUCTION_LEN, ilen);
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
+
+ if (trap == TRAP_page_fault)
+ HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
+ else
+ HVMTRACE_2D(INJ_EXC, v, trap, error_code);
}
static inline void vmx_inject_hw_exception(
diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h
index ad567eda0c..bc34abe3ce 100644
--- a/xen/include/public/trace.h
+++ b/xen/include/public/trace.h
@@ -31,17 +31,16 @@
#define TRC_GEN 0x0001f000 /* General trace */
#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
-#define TRC_VMX 0x0008f000 /* Xen VMX trace */
+#define TRC_HVM 0x0008f000 /* Xen HVM trace */
#define TRC_MEM 0x0010f000 /* Xen memory trace */
#define TRC_ALL 0xfffff000
/* Trace subclasses */
#define TRC_SUBCLS_SHIFT 12
-/* trace subclasses for VMX */
-#define TRC_VMXEXIT 0x00081000 /* VMX exit trace */
-#define TRC_VMXENTRY 0x00082000 /* VMX exit trace */
-#define TRC_VMXINTR 0x00084000 /* VMX interrupt trace */
+/* trace subclasses for SVM */
+#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
+#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
/* Trace events per class */
#define TRC_LOST_RECORDS (TRC_GEN + 1)
@@ -67,10 +66,28 @@
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
/* trace events per subclass */
-#define TRC_VMX_VMEXIT (TRC_VMXEXIT + 1)
-#define TRC_VMX_VMENTRY (TRC_VMXENTRY + 1)
-#define TRC_VMX_INTR (TRC_VMXINTR + 1)
-
+#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
+#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
+#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
+#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
+#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
+#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
+#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
+#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
+#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
+#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
+#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
+#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
+#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
+#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
+#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
+#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
+#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
+#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
/* This structure represents a single trace buffer record. */
struct t_rec {