aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorJoe Epstein <jepstein98@gmail.com>2011-01-07 11:54:48 +0000
committerJoe Epstein <jepstein98@gmail.com>2011-01-07 11:54:48 +0000
commitdf402bb9f0dcf49f03098ac3bca64b01dd4decce (patch)
tree396a4308d1c4664834a505a296e22376177eff10 /xen
parent81f8af44baecf94a72d08cde1619f5af63c1b3a7 (diff)
downloadxen-df402bb9f0dcf49f03098ac3bca64b01dd4decce.tar.gz
xen-df402bb9f0dcf49f03098ac3bca64b01dd4decce.tar.bz2
xen-df402bb9f0dcf49f03098ac3bca64b01dd4decce.zip
mem_access: added INT3/CRx capture
* Allows a memory event listener to register for events on changes to CR0, CR3, and CR4, as well as INT3 instructions, as a part of the mem_access mechanism. These events can be either synchronous or asynchronous. * For INT3, the logic works independent of a debugger, and so both can be supported. * The presence and type of listener are stored and accessed through HVM params. * Changed the event mask handling to ensure that the right events are captured based on the listeners. * Added the ability to inject HW/SW traps into a VCPU when it next resumes (rather than try to modify the existing IRQ injection code paths). Only one trap to inject can be outstanding at a time. Signed-off-by: Joe Epstein <jepstein98@gmail.com> Acked-by: Keir Fraser <keir@xen.org> Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/hvm.c164
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c4
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c72
-rw-r--r--xen/include/asm-x86/hvm/hvm.h8
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h5
-rw-r--r--xen/include/public/hvm/hvm_op.h22
-rw-r--r--xen/include/public/hvm/params.h15
-rw-r--r--xen/include/public/mem_event.h4
8 files changed, 278 insertions, 16 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d2107db3d3..7ae6f9a2b0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -309,6 +309,15 @@ void hvm_do_resume(struct vcpu *v)
return; /* bail */
}
}
+
+ /* Inject pending hw/sw trap */
+ if (v->arch.hvm_vcpu.inject_trap != -1)
+ {
+ hvm_inject_exception(v->arch.hvm_vcpu.inject_trap,
+ v->arch.hvm_vcpu.inject_error_code,
+ v->arch.hvm_vcpu.inject_cr2);
+ v->arch.hvm_vcpu.inject_trap = -1;
+ }
}
static void hvm_init_ioreq_page(
@@ -949,6 +958,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
+ v->arch.hvm_vcpu.inject_trap = -1;
+
#ifdef CONFIG_COMPAT
rc = setup_compat_arg_xlat(v);
if ( rc != 0 )
@@ -3236,10 +3247,45 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
case HVM_PARAM_ACPI_IOPORTS_LOCATION:
rc = pmtimer_change_ioport(d, a.value);
break;
+ case HVM_PARAM_MEMORY_EVENT_CR0:
+ case HVM_PARAM_MEMORY_EVENT_CR3:
+ case HVM_PARAM_MEMORY_EVENT_CR4:
+ if ( d->domain_id == current->domain->domain_id )
+ rc = -EPERM;
+ break;
+ case HVM_PARAM_MEMORY_EVENT_INT3:
+ if ( d->domain_id == current->domain->domain_id )
+ {
+ rc = -EPERM;
+ break;
+ }
+ if ( a.value & HVMPME_onchangeonly )
+ rc = -EINVAL;
+ break;
}
- if ( rc == 0 )
+ if ( rc == 0 )
+ {
d->arch.hvm_domain.params[a.index] = a.value;
+
+ switch( a.index )
+ {
+ case HVM_PARAM_MEMORY_EVENT_INT3:
+ {
+ domain_pause(d);
+ domain_unpause(d); /* Causes guest to latch new status */
+ break;
+ }
+ case HVM_PARAM_MEMORY_EVENT_CR3:
+ {
+ for_each_vcpu ( d, v )
+ hvm_funcs.update_guest_cr(v, 0); /* Latches new CR3 mask through CR0 code */
+ break;
+ }
+ }
+
+ }
+
}
else
{
@@ -3657,6 +3703,44 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
break;
}
+ case HVMOP_inject_trap:
+ {
+ xen_hvm_inject_trap_t tr;
+ struct domain *d;
+ struct vcpu *v;
+
+ if ( copy_from_guest(&tr, arg, 1 ) )
+ return -EFAULT;
+
+ if ( current->domain->domain_id == tr.domid )
+ return -EPERM;
+
+ rc = rcu_lock_target_domain_by_id(tr.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail8;
+
+ rc = -ENOENT;
+ if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
+ goto param_fail8;
+
+ if ( v->arch.hvm_vcpu.inject_trap != -1 )
+ rc = -EBUSY;
+ else
+ {
+ v->arch.hvm_vcpu.inject_trap = tr.trap;
+ v->arch.hvm_vcpu.inject_error_code = tr.error_code;
+ v->arch.hvm_vcpu.inject_cr2 = tr.cr2;
+ }
+
+ param_fail8:
+ rcu_unlock_domain(d);
+ break;
+ }
+
default:
{
gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
@@ -3697,6 +3781,84 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+static int hvm_memory_event_traps(long p, uint32_t reason,
+ unsigned long value, unsigned long old,
+ bool_t gla_valid, unsigned long gla)
+{
+ struct vcpu* v = current;
+ struct domain *d = v->domain;
+ mem_event_request_t req;
+ int rc;
+
+ if ( !(p & HVMPME_MODE_MASK) )
+ return 0;
+
+ if ( (p & HVMPME_onchangeonly) && (value == old) )
+ return 1;
+
+ rc = mem_event_check_ring(d);
+ if ( rc )
+ return rc;
+
+ memset(&req, 0, sizeof(req));
+ req.type = MEM_EVENT_TYPE_ACCESS;
+ req.reason = reason;
+
+ if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync )
+ {
+ req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ vcpu_pause_nosync(v);
+ }
+
+ req.gfn = value;
+ req.vcpu_id = v->vcpu_id;
+ if ( gla_valid )
+ {
+ req.offset = gla & ((1 << PAGE_SHIFT) - 1);
+ req.gla = gla;
+ req.gla_valid = 1;
+ }
+
+ mem_event_put_request(d, &req);
+
+ return 1;
+}
+
+void hvm_memory_event_cr0(unsigned long value, unsigned long old)
+{
+ hvm_memory_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR0],
+ MEM_EVENT_REASON_CR0,
+ value, old, 0, 0);
+}
+
+void hvm_memory_event_cr3(unsigned long value, unsigned long old)
+{
+ hvm_memory_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR3],
+ MEM_EVENT_REASON_CR3,
+ value, old, 0, 0);
+}
+
+void hvm_memory_event_cr4(unsigned long value, unsigned long old)
+{
+ hvm_memory_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_CR4],
+ MEM_EVENT_REASON_CR4,
+ value, old, 0, 0);
+}
+
+int hvm_memory_event_int3(unsigned long gla)
+{
+ uint32_t pfec = PFEC_page_present;
+ unsigned long gfn;
+ gfn = paging_gva_to_gfn(current, gla, &pfec);
+
+ return hvm_memory_event_traps(current->domain->arch.hvm_domain
+ .params[HVM_PARAM_MEMORY_EVENT_INT3],
+ MEM_EVENT_REASON_INT3,
+ gfn, 0, 1, gla);
+}
/*
* Local variables:
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 77b275a9c6..6f7fc1a8cc 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1082,7 +1082,9 @@ void vmx_do_resume(struct vcpu *v)
hvm_asid_flush_vcpu(v);
}
- debug_state = v->domain->debugger_attached;
+ debug_state = v->domain->debugger_attached
+ || v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+
if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
{
v->arch.hvm_vcpu.debug_state_latch = debug_state;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0301c7eeb2..cd5545b368 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1064,12 +1064,16 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
if ( paging_mode_hap(v->domain) )
{
- /* We manage GUEST_CR3 when guest CR0.PE is zero. */
+ /* We manage GUEST_CR3 when guest CR0.PE is zero or when cr3 memevents are on */
uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING);
v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
if ( !hvm_paging_enabled(v) )
v->arch.hvm_vmx.exec_control |= cr3_ctls;
+
+ if ( v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_CR3] )
+ v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+
vmx_update_cpu_exec_control(v);
/* Changing CR0.PE can change some bits in real CR4. */
@@ -1252,9 +1256,12 @@ void vmx_inject_hw_exception(int trap, int error_code)
unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
struct vcpu *curr = current;
+ int type = X86_EVENTTYPE_HW_EXCEPTION;
+
switch ( trap )
{
case TRAP_debug:
+ type = X86_EVENTTYPE_SW_EXCEPTION;
if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
{
__restore_debug_registers(curr);
@@ -1269,6 +1276,9 @@ void vmx_inject_hw_exception(int trap, int error_code)
domain_pause_for_debugger();
return;
}
+
+ type = X86_EVENTTYPE_SW_EXCEPTION;
+ __vmwrite(VM_ENTRY_INSTRUCTION_LEN, 1); /* int3 */
}
if ( unlikely(intr_info & INTR_INFO_VALID_MASK) &&
@@ -1279,7 +1289,7 @@ void vmx_inject_hw_exception(int trap, int error_code)
error_code = 0;
}
- __vmx_inject_exception(trap, X86_EVENTTYPE_HW_EXCEPTION, error_code);
+ __vmx_inject_exception(trap, type, error_code);
if ( trap == TRAP_page_fault )
HVMTRACE_LONG_2D(PF_INJECT, error_code,
@@ -1565,6 +1575,8 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
unsigned long value;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
+ int rc = 0;
+ unsigned long old;
switch ( gp )
{
@@ -1589,13 +1601,25 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
switch ( cr )
{
case 0:
- return !hvm_set_cr0(value);
+ old = v->arch.hvm_vcpu.guest_cr[0];
+ rc = !hvm_set_cr0(value);
+ if (rc)
+ hvm_memory_event_cr0(value, old);
+ return rc;
case 3:
- return !hvm_set_cr3(value);
+ old = v->arch.hvm_vcpu.guest_cr[3];
+ rc = !hvm_set_cr3(value);
+ if (rc)
+ hvm_memory_event_cr3(value, old);
+ return rc;
case 4:
- return !hvm_set_cr4(value);
+ old = v->arch.hvm_vcpu.guest_cr[4];
+ rc = !hvm_set_cr4(value);
+ if (rc)
+ hvm_memory_event_cr4(value, old);
+ return rc;
case 8:
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1676,11 +1700,17 @@ static int vmx_cr_access(unsigned long exit_qualification,
cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
mov_from_cr(cr, gp, regs);
break;
- case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
+ {
+ unsigned long old = v->arch.hvm_vcpu.guest_cr[0];
v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
vmx_update_guest_cr(v, 0);
+
+ hvm_memory_event_cr0(v->arch.hvm_vcpu.guest_cr[0], old);
+
HVMTRACE_0D(CLTS);
break;
+ }
case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
value = v->arch.hvm_vcpu.guest_cr[0];
/* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
@@ -2351,13 +2381,29 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
goto exit_and_crash;
domain_pause_for_debugger();
break;
- case TRAP_int3:
- if ( !v->domain->debugger_attached )
- goto exit_and_crash;
- update_guest_eip(); /* Safe: INT3 */
- current->arch.gdbsx_vcpu_event = TRAP_int3;
- domain_pause_for_debugger();
- break;
+ case TRAP_int3:
+ {
+ if ( v->domain->debugger_attached )
+ {
+ update_guest_eip(); /* Safe: INT3 */
+ current->arch.gdbsx_vcpu_event = TRAP_int3;
+ domain_pause_for_debugger();
+ break;
+ }
+ else {
+ int handled = hvm_memory_event_int3(regs->eip);
+
+ if ( handled < 0 )
+ {
+ vmx_inject_exception(TRAP_int3, HVM_DELIVER_NO_ERROR_CODE, 0);
+ break;
+ }
+ else if ( handled )
+ break;
+ }
+
+ goto exit_and_crash;
+ }
case TRAP_no_device:
vmx_fpu_dirty_intercept();
break;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 53e4f97932..dc8e238a95 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -372,4 +372,12 @@ bool_t hvm_hap_nested_page_fault(unsigned long gpa,
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content);
+/* Called for current VCPU on crX changes by guest */
+void hvm_memory_event_cr0(unsigned long value, unsigned long old);
+void hvm_memory_event_cr3(unsigned long value, unsigned long old);
+void hvm_memory_event_cr4(unsigned long value, unsigned long old);
+
+/* Called for current VCPU on int3: returns -1 if no listener */
+int hvm_memory_event_int3(unsigned long gla);
+
#endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 682027fffa..f5a04a80bf 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -114,6 +114,11 @@ struct hvm_vcpu {
/* We may write up to m128 as a number of device-model transactions. */
paddr_t mmio_large_write_pa;
unsigned int mmio_large_write_bytes;
+
+ /* Pending hw/sw interrupt */
+ int inject_trap; /* -1 for nothing to inject */
+ int inject_error_code;
+ unsigned long inject_cr2;
};
#endif /* __ASM_X86_HVM_VCPU_H__ */
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 3ea1f5ce5b..427ffb70bc 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -200,4 +200,26 @@ struct xen_hvm_get_mem_access {
};
typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
+
+#define HVMOP_inject_trap 14
+/* Inject a trap into a VCPU, which will get taken up on the next
+ * scheduling of it. Note that the caller should know enough of the
+ * state of the CPU before injecting, to know what the effect of
+ * injecting the trap will be.
+ */
+struct xen_hvm_inject_trap {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* VCPU */
+ uint32_t vcpuid;
+ /* Trap number */
+ uint32_t trap;
+ /* Error code, or -1 to skip */
+ uint32_t error_code;
+ /* CR2 for page faults */
+ uint64_t cr2;
+};
+typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 2359f33fe9..45b9a63980 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -124,6 +124,19 @@
*/
#define HVM_PARAM_ACPI_IOPORTS_LOCATION 19
-#define HVM_NR_PARAMS 20
+/* Enable blocking memory events, async or sync (pause vcpu until response)
+ * onchangeonly indicates messages only on a change of value */
+#define HVM_PARAM_MEMORY_EVENT_CR0 20
+#define HVM_PARAM_MEMORY_EVENT_CR3 21
+#define HVM_PARAM_MEMORY_EVENT_CR4 22
+#define HVM_PARAM_MEMORY_EVENT_INT3 23
+
+#define HVMPME_MODE_MASK (3 << 0)
+#define HVMPME_mode_disabled 0
+#define HVMPME_mode_async 1
+#define HVMPME_mode_sync 2
+#define HVMPME_onchangeonly (1 << 2)
+
+#define HVM_NR_PARAMS 24
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h
index 75c65a20aa..e3271d94e0 100644
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -37,6 +37,10 @@
/* Reasons for the memory event request */
#define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */
#define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */
+#define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */
+#define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */
+#define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */
+#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
typedef struct mem_event_shared_page {
uint32_t port;