aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-06-20 11:50:16 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-06-20 11:50:16 +0100
commit19636447b80747cd9da57cd540f09f475b13cfce (patch)
treebe76ad344ed33741fd7f70b231b4563202791ee0 /xen
parenteed63189dabd90abe422b0e94ab8854783329bed (diff)
downloadxen-19636447b80747cd9da57cd540f09f475b13cfce.tar.gz
xen-19636447b80747cd9da57cd540f09f475b13cfce.tar.bz2
xen-19636447b80747cd9da57cd540f09f475b13cfce.zip
hvm: Support injection of virtual NMIs and clean up ExtInt handling in general.
Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/irq.c61
-rw-r--r--xen/arch/x86/hvm/svm/intr.c146
-rw-r--r--xen/arch/x86/hvm/svm/svm.c40
-rw-r--r--xen/arch/x86/hvm/vioapic.c34
-rw-r--r--xen/arch/x86/hvm/vlapic.c7
-rw-r--r--xen/arch/x86/hvm/vmx/intr.c106
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c22
-rw-r--r--xen/arch/x86/hvm/vpic.c3
-rw-r--r--xen/arch/x86/hvm/vpt.c9
-rw-r--r--xen/include/asm-x86/event.h1
-rw-r--r--xen/include/asm-x86/hvm/hvm.h16
-rw-r--r--xen/include/asm-x86/hvm/irq.h12
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h4
-rw-r--r--xen/include/asm-x86/hvm/vlapic.h2
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h11
-rw-r--r--xen/include/asm-x86/hvm/vpic.h2
-rw-r--r--xen/include/asm-x86/hvm/vpt.h3
17 files changed, 264 insertions, 215 deletions
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index c6c3e7c906..6494ea8567 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -285,43 +285,49 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
}
}
-int cpu_has_pending_irq(struct vcpu *v)
+enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
{
struct hvm_domain *plat = &v->domain->arch.hvm_domain;
- /* APIC */
+ if ( unlikely(v->arch.hvm_vcpu.nmi_pending) )
+ return hvm_intack_nmi;
+
if ( vlapic_has_interrupt(v) != -1 )
- return 1;
+ return hvm_intack_lapic;
- /* PIC */
if ( !vlapic_accept_pic_intr(v) )
- return 0;
+ return hvm_intack_none;
- return plat->vpic[0].int_output;
+ return plat->vpic[0].int_output ? hvm_intack_pic : hvm_intack_none;
}
-int cpu_get_interrupt(struct vcpu *v, int *type)
+int hvm_vcpu_ack_pending_irq(struct vcpu *v, enum hvm_intack type, int *vector)
{
- int vector;
-
- if ( (vector = cpu_get_apic_interrupt(v, type)) != -1 )
- return vector;
-
- if ( (v->vcpu_id == 0) &&
- ((vector = cpu_get_pic_interrupt(v, type)) != -1) )
- return vector;
+ switch ( type )
+ {
+ case hvm_intack_nmi:
+ return test_and_clear_bool(v->arch.hvm_vcpu.nmi_pending);
+ case hvm_intack_lapic:
+ return ((*vector = cpu_get_apic_interrupt(v)) != -1);
+ case hvm_intack_pic:
+ ASSERT(v->vcpu_id == 0);
+ return ((*vector = cpu_get_pic_interrupt(v)) != -1);
+ default:
+ break;
+ }
- return -1;
+ return 0;
}
-int get_isa_irq_vector(struct vcpu *v, int isa_irq, int type)
+int get_isa_irq_vector(struct vcpu *v, int isa_irq, enum hvm_intack src)
{
unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
- if ( type == APIC_DM_EXTINT )
+ if ( src == hvm_intack_pic )
return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
+ (isa_irq & 7));
+ ASSERT(src == hvm_intack_lapic);
return domain_vioapic(v->domain)->redirtbl[gsi].fields.vector;
}
@@ -337,19 +343,20 @@ int is_isa_irq_masked(struct vcpu *v, int isa_irq)
domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
}
-/*
- * TODO: 1. Should not need special treatment of event-channel events.
- * 2. Should take notice of interrupt shadows (or clear them).
- */
int hvm_local_events_need_delivery(struct vcpu *v)
{
- int pending;
+ enum hvm_intack type;
+
+ /* TODO: Get rid of event-channel special case. */
+ if ( vcpu_info(v, evtchn_upcall_pending) )
+ type = hvm_intack_pic;
+ else
+ type = hvm_vcpu_has_pending_irq(v);
- pending = (vcpu_info(v, evtchn_upcall_pending) || cpu_has_pending_irq(v));
- if ( unlikely(pending) )
- pending = hvm_interrupts_enabled(v);
+ if ( likely(type == hvm_intack_none) )
+ return 0;
- return pending;
+ return hvm_interrupts_enabled(v, type);
}
#if 0 /* Keep for debugging */
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 0730586308..c3a30b78e8 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -15,7 +15,6 @@
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
- *
*/
#include <xen/config.h>
@@ -39,100 +38,119 @@
#include <xen/domain_page.h>
#include <asm/hvm/trace.h>
-/*
- * Most of this code is copied from vmx_io.c and modified
- * to be suitable for SVM.
- */
-
-static inline int svm_inject_extint(struct vcpu *v, int trap)
+static void svm_inject_dummy_vintr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
vintr_t intr = vmcb->vintr;
- /* Update only relevant fields */
intr.fields.irq = 1;
intr.fields.intr_masking = 1;
- intr.fields.vector = trap;
+ intr.fields.vector = 0;
intr.fields.prio = 0xF;
intr.fields.ign_tpr = 1;
vmcb->vintr = intr;
+}
+
+static void svm_inject_nmi(struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ eventinj_t event;
- return 0;
+ event.bytes = 0;
+ event.fields.v = 1;
+ event.fields.type = EVENTTYPE_NMI;
+ event.fields.vector = 2;
+
+ ASSERT(vmcb->eventinj.fields.v == 0);
+ vmcb->eventinj = event;
+}
+
+static void svm_inject_extint(struct vcpu *v, int vector)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ eventinj_t event;
+
+ event.bytes = 0;
+ event.fields.v = 1;
+ event.fields.type = EVENTTYPE_INTR;
+ event.fields.vector = vector;
+
+ ASSERT(vmcb->eventinj.fields.v == 0);
+ vmcb->eventinj = event;
}
asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int intr_type = APIC_DM_EXTINT;
- int intr_vector = -1;
+ enum hvm_intack intr_source;
+ int intr_vector;
/*
- * Previous Interrupt delivery caused this intercept?
+ * Previous event delivery caused this intercept?
* This will happen if the injection is latched by the processor (hence
- * clearing vintr.fields.irq) but then subsequently a fault occurs (e.g.,
- * due to lack of shadow mapping of guest IDT or guest-kernel stack).
- *
- * NB. Exceptions that fault during delivery are lost. This needs to be
- * fixed but we'll usually get away with it since faults are usually
- * idempotent. But this isn't the case for e.g. software interrupts!
+ * clearing vintr.fields.irq or eventinj.v) but then subsequently a fault
+ * occurs (e.g., due to lack of shadow mapping of guest IDT or guest-kernel
+ * stack).
*/
- if ( vmcb->exitintinfo.fields.v && (vmcb->exitintinfo.fields.type == 0) )
+ if ( vmcb->exitintinfo.fields.v )
{
- intr_vector = vmcb->exitintinfo.fields.vector;
+ vmcb->eventinj = vmcb->exitintinfo;
vmcb->exitintinfo.bytes = 0;
HVMTRACE_1D(REINJ_VIRQ, v, intr_vector);
- svm_inject_extint(v, intr_vector);
return;
}
- /*
- * Previous interrupt still pending? This occurs if we return from VMRUN
- * very early in the entry-to-guest process. Usually this is because an
- * external physical interrupt was pending when we executed VMRUN.
- */
- if ( vmcb->vintr.fields.irq )
- return;
-
- /* Crank the handle on interrupt state and check for new interrrupts. */
+ /* Crank the handle on interrupt state. */
pt_update_irq(v);
hvm_set_callback_irq_level();
- if ( !cpu_has_pending_irq(v) )
- return;
-
- /*
- * If the guest can't take an interrupt right now, create a 'fake'
- * virtual interrupt on to intercept as soon as the guest _can_ take
- * interrupts. Do not obtain the next interrupt from the vlapic/pic
- * if unable to inject.
- *
- * Also do this if there is an exception pending. This is because
- * the delivery of the exception can arbitrarily delay the injection
- * of the vintr (for example, if the exception is handled via an
- * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
- * - the vTPR could be modified upwards, so we need to wait until the
- * exception is delivered before we can safely decide that an
- * interrupt is deliverable; and
- * - the guest might look at the APIC/PIC state, so we ought not to have
- * cleared the interrupt out of the IRR.
- */
- if ( irq_masked(vmcb->rflags) || vmcb->interrupt_shadow
- || vmcb->eventinj.fields.v )
- {
- vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
- HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
- svm_inject_extint(v, 0x0); /* actual vector doesn't matter */
- return;
- }
- /* Okay, we can deliver the interrupt: grab it and update PIC state. */
- intr_vector = cpu_get_interrupt(v, &intr_type);
- BUG_ON(intr_vector < 0);
+ do {
+ intr_source = hvm_vcpu_has_pending_irq(v);
+ if ( likely(intr_source == hvm_intack_none) )
+ return;
- HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
- svm_inject_extint(v, intr_vector);
+ /*
+ * If the guest can't take an interrupt right now, create a 'fake'
+ * virtual interrupt on to intercept as soon as the guest _can_ take
+ * interrupts. Do not obtain the next interrupt from the vlapic/pic
+ * if unable to inject.
+ *
+ * Also do this if there is an injection already pending. This is
+ * because the event delivery can arbitrarily delay the injection
+ * of the vintr (for example, if the exception is handled via an
+ * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
+ * - the vTPR could be modified upwards, so we need to wait until the
+ * exception is delivered before we can safely decide that an
+ * interrupt is deliverable; and
+ * - the guest might look at the APIC/PIC state, so we ought not to
+ * have cleared the interrupt out of the IRR.
+ *
+ * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
+ * shadow. This is hard to do without hardware support. We should also
+ * track 'NMI blocking' from NMI injection until IRET. This can be done
+ * quite easily in software by intercepting the unblocking IRET.
+ */
+ if ( !hvm_interrupts_enabled(v, intr_source) ||
+ vmcb->eventinj.fields.v )
+ {
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
+ HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
+ svm_inject_dummy_vintr(v);
+ return;
+ }
+ } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
- pt_intr_post(v, intr_vector, intr_type);
+ if ( intr_source == hvm_intack_nmi )
+ {
+ svm_inject_nmi(v);
+ }
+ else
+ {
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
+ svm_inject_extint(v, intr_vector);
+ pt_intr_post(v, intr_vector, intr_source);
+ }
}
/*
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 981932cc16..968e3b5fc9 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -312,26 +312,8 @@ int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
c->sysenter_esp = vmcb->sysenter_esp;
c->sysenter_eip = vmcb->sysenter_eip;
- /* Save any event/interrupt that was being injected when we last
- * exited. Although there are three(!) VMCB fields that can contain
- * active events, we only need to save at most one: because the
- * intr_assist logic never delivers an IRQ when any other event is
- * active, we know that the only possible collision is if we inject
- * a fault while exitintinfo contains a valid event (the delivery of
- * which caused the last exit). In that case replaying just the
- * first event should cause the same behaviour when we restore. */
- if ( vmcb->vintr.fields.irq
- && /* Check it's not a fake interrupt (see svm_intr_assist()) */
- !(vmcb->general1_intercepts & GENERAL1_INTERCEPT_VINTR) )
- {
- c->pending_vector = vmcb->vintr.fields.vector;
- c->pending_type = 0; /* External interrupt */
- c->pending_error_valid = 0;
- c->pending_reserved = 0;
- c->pending_valid = 1;
- c->error_code = 0;
- }
- else if ( vmcb->exitintinfo.fields.v )
+ /* Save any event/interrupt that was being injected when we last exited. */
+ if ( vmcb->exitintinfo.fields.v )
{
c->pending_event = vmcb->exitintinfo.bytes & 0xffffffff;
c->error_code = vmcb->exitintinfo.fields.errorcode;
@@ -569,10 +551,15 @@ static inline void svm_restore_dr(struct vcpu *v)
__restore_debug_registers(v);
}
-static int svm_interrupts_enabled(struct vcpu *v)
+static int svm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
- unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
- return !irq_masked(eflags);
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( type == hvm_intack_nmi )
+ return !vmcb->interrupt_shadow;
+
+ ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
+ return !irq_masked(vmcb->rflags) && !vmcb->interrupt_shadow;
}
static int svm_guest_x86_mode(struct vcpu *v)
@@ -2160,11 +2147,14 @@ static inline void svm_do_msr_access(
static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
+ enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
+
__update_guest_eip(vmcb, 1);
/* Check for interrupt not handled or new interrupt. */
- if ( (vmcb->rflags & X86_EFLAGS_IF) &&
- (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) ) {
+ if ( vmcb->eventinj.fields.v ||
+ ((type != hvm_intack_none) && hvm_interrupts_enabled(current, type)) )
+ {
HVMTRACE_1D(HLT, current, /*int pending=*/ 1);
return;
}
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index d755caad5f..016d91c3e4 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -254,17 +254,11 @@ static void ioapic_inj_irq(
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %d trig %d deliv %d",
vector, trig_mode, delivery_mode);
- switch ( delivery_mode )
- {
- case dest_Fixed:
- case dest_LowestPrio:
- if ( vlapic_set_irq(target, vector, trig_mode) )
- vcpu_kick(vlapic_vcpu(target));
- break;
- default:
- gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode);
- break;
- }
+ ASSERT((delivery_mode == dest_Fixed) ||
+ (delivery_mode == dest_LowestPrio));
+
+ if ( vlapic_set_irq(target, vector, trig_mode) )
+ vcpu_kick(vlapic_vcpu(target));
}
static uint32_t ioapic_get_delivery_bitmask(
@@ -368,7 +362,6 @@ static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
}
case dest_Fixed:
- case dest_ExtINT:
{
uint8_t bit;
for ( bit = 0; deliver_bitmask != 0; bit++ )
@@ -393,10 +386,21 @@ static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
break;
}
- case dest_SMI:
case dest_NMI:
- case dest_INIT:
- case dest__reserved_2:
+ {
+ uint8_t bit;
+ for ( bit = 0; deliver_bitmask != 0; bit++ )
+ {
+ if ( !(deliver_bitmask & (1 << bit)) )
+ continue;
+ deliver_bitmask &= ~(1 << bit);
+ if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
+ !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) )
+ vcpu_kick(v);
+ }
+ break;
+ }
+
default:
gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
delivery_mode);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 4e9e598bea..99ca287400 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -294,7 +294,8 @@ static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
break;
case APIC_DM_NMI:
- gdprintk(XENLOG_WARNING, "Ignoring guest NMI\n");
+ if ( !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) )
+ vcpu_kick(v);
break;
case APIC_DM_INIT:
@@ -747,7 +748,7 @@ int vlapic_has_interrupt(struct vcpu *v)
return highest_irr;
}
-int cpu_get_apic_interrupt(struct vcpu *v, int *mode)
+int cpu_get_apic_interrupt(struct vcpu *v)
{
int vector = vlapic_has_interrupt(v);
struct vlapic *vlapic = vcpu_vlapic(v);
@@ -757,8 +758,6 @@ int cpu_get_apic_interrupt(struct vcpu *v, int *mode)
vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
vlapic_clear_irr(vector, vlapic);
-
- *mode = APIC_DM_FIXED;
return vector;
}
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index eb16e5e7d8..359960d350 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -102,8 +102,8 @@ static void update_tpr_threshold(struct vlapic *vlapic)
asmlinkage void vmx_intr_assist(void)
{
- int has_ext_irq, intr_vector, intr_type = 0;
- unsigned long eflags, intr_shadow;
+ int intr_vector;
+ enum hvm_intack intr_source;
struct vcpu *v = current;
unsigned int idtv_info_field;
unsigned long inst_len;
@@ -114,65 +114,67 @@ asmlinkage void vmx_intr_assist(void)
update_tpr_threshold(vcpu_vlapic(v));
- has_ext_irq = cpu_has_pending_irq(v);
-
- if ( unlikely(v->arch.hvm_vmx.vector_injected) )
- {
- v->arch.hvm_vmx.vector_injected = 0;
- if ( unlikely(has_ext_irq) )
- enable_irq_window(v);
- return;
- }
-
- /* This could be moved earlier in the VMX resume sequence. */
- idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
- if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
- {
- __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+ do {
+ intr_source = hvm_vcpu_has_pending_irq(v);
+
+ if ( unlikely(v->arch.hvm_vmx.vector_injected) )
+ {
+ v->arch.hvm_vmx.vector_injected = 0;
+ if ( unlikely(intr_source != hvm_intack_none) )
+ enable_irq_window(v);
+ return;
+ }
+
+ /* This could be moved earlier in the VMX resume sequence. */
+ idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
+ if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
+ {
+ __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+
+ /*
+ * Safe: the length will only be interpreted for software
+ * exceptions and interrupts. If we get here then delivery of some
+ * event caused a fault, and this always results in defined
+ * VM_EXIT_INSTRUCTION_LEN.
+ */
+ inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
+ __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
+
+ if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
+ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ __vmread(IDT_VECTORING_ERROR_CODE));
+ if ( unlikely(intr_source != hvm_intack_none) )
+ enable_irq_window(v);
+
+ HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
+ return;
+ }
+
+ if ( likely(intr_source == hvm_intack_none) )
+ return;
/*
- * Safe: the length will only be interpreted for software exceptions
- * and interrupts. If we get here then delivery of some event caused a
- * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
+ * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but
+ * should wait for exit from 'NMI blocking' window (NMI injection to
+ * next IRET). This requires us to use the new 'virtual NMI' support.
*/
- inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
- __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
-
- if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
- __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
- __vmread(IDT_VECTORING_ERROR_CODE));
- if ( unlikely(has_ext_irq) )
+ if ( !hvm_interrupts_enabled(v, intr_source) )
+ {
enable_irq_window(v);
+ return;
+ }
+ } while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
- HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
- return;
- }
-
- if ( likely(!has_ext_irq) )
- return;
-
- intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
- if ( unlikely(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)) )
+ if ( intr_source == hvm_intack_nmi )
{
- enable_irq_window(v);
- HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
- return;
+ vmx_inject_nmi(v);
}
-
- eflags = __vmread(GUEST_RFLAGS);
- if ( irq_masked(eflags) )
+ else
{
- enable_irq_window(v);
- return;
+ HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
+ vmx_inject_extint(v, intr_vector);
+ pt_intr_post(v, intr_vector, intr_source);
}
-
- intr_vector = cpu_get_interrupt(v, &intr_type);
- BUG_ON(intr_vector < 0);
-
- HVMTRACE_2D(INJ_VIRQ, v, intr_vector, /*fake=*/ 0);
- vmx_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE);
-
- pt_intr_post(v, intr_vector, intr_type);
}
/*
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b1a11ead65..af0612173f 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1115,16 +1115,26 @@ static int vmx_nx_enabled(struct vcpu *v)
return v->arch.hvm_vmx.efer & EFER_NX;
}
-static int vmx_interrupts_enabled(struct vcpu *v)
+static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
- unsigned long eflags = __vmread(GUEST_RFLAGS);
- return !irq_masked(eflags);
-}
+ unsigned long intr_shadow, eflags;
+
+ ASSERT(v == current);
+ intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ intr_shadow &= VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS;
+
+ if ( type == hvm_intack_nmi )
+ return !intr_shadow;
+
+ ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
+ eflags = __vmread(GUEST_RFLAGS);
+ return !irq_masked(eflags) && !intr_shadow;
+}
static void vmx_update_host_cr3(struct vcpu *v)
{
- ASSERT( (v == current) || !vcpu_runnable(v) );
+ ASSERT((v == current) || !vcpu_runnable(v));
vmx_vmcs_enter(v);
__vmwrite(HOST_CR3, v->arch.cr3);
vmx_vmcs_exit(v);
@@ -1132,7 +1142,7 @@ static void vmx_update_host_cr3(struct vcpu *v)
static void vmx_update_guest_cr3(struct vcpu *v)
{
- ASSERT( (v == current) || !vcpu_runnable(v) );
+ ASSERT((v == current) || !vcpu_runnable(v));
vmx_vmcs_enter(v);
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
vmx_vmcs_exit(v);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 19ef5d0756..46ab18fc46 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -499,7 +499,7 @@ void vpic_irq_negative_edge(struct domain *d, int irq)
vpic_update_int_output(vpic);
}
-int cpu_get_pic_interrupt(struct vcpu *v, int *type)
+int cpu_get_pic_interrupt(struct vcpu *v)
{
int irq, vector;
struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
@@ -512,6 +512,5 @@ int cpu_get_pic_interrupt(struct vcpu *v, int *type)
return -1;
vector = vpic[irq >> 3].irq_base + (irq & 7);
- *type = APIC_DM_EXTINT;
return vector;
}
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index cbd1ffa8b7..f7d9b91e04 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -155,7 +155,8 @@ void pt_update_irq(struct vcpu *v)
}
}
-static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
+static struct periodic_time *is_pt_irq(
+ struct vcpu *v, int vector, enum hvm_intack src)
{
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
@@ -174,7 +175,7 @@ static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
return pt;
}
- vec = get_isa_irq_vector(v, pt->irq, type);
+ vec = get_isa_irq_vector(v, pt->irq, src);
/* RTC irq need special care */
if ( (vector != vec) || (pt->irq == 8 && !is_rtc_periodic_irq(rtc)) )
@@ -186,7 +187,7 @@ static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
return NULL;
}
-void pt_intr_post(struct vcpu *v, int vector, int type)
+void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src)
{
struct periodic_time *pt;
time_cb *cb;
@@ -194,7 +195,7 @@ void pt_intr_post(struct vcpu *v, int vector, int type)
spin_lock(&v->arch.hvm_vcpu.tm_lock);
- pt = is_pt_irq(v, vector, type);
+ pt = is_pt_irq(v, vector, src);
if ( pt == NULL )
{
spin_unlock(&v->arch.hvm_vcpu.tm_lock);
diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
index 32b157b8db..05671f7ef3 100644
--- a/xen/include/asm-x86/event.h
+++ b/xen/include/asm-x86/event.h
@@ -10,7 +10,6 @@
#define __ASM_EVENT_H__
#include <xen/shared.h>
-#include <asm/hvm/irq.h> /* cpu_has_pending_irq() */
static inline void vcpu_kick(struct vcpu *v)
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index dd93c36e2a..203427593e 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -55,6 +55,14 @@ typedef struct segment_register {
u64 base;
} __attribute__ ((packed)) segment_register_t;
+/* Interrupt acknowledgement sources. */
+enum hvm_intack {
+ hvm_intack_none,
+ hvm_intack_pic,
+ hvm_intack_lapic,
+ hvm_intack_nmi
+};
+
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
@@ -106,7 +114,7 @@ struct hvm_function_table {
int (*long_mode_enabled)(struct vcpu *v);
int (*pae_enabled)(struct vcpu *v);
int (*nx_enabled)(struct vcpu *v);
- int (*interrupts_enabled)(struct vcpu *v);
+ int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
int (*guest_x86_mode)(struct vcpu *v);
unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
@@ -199,16 +207,16 @@ hvm_long_mode_enabled(struct vcpu *v)
#define hvm_long_mode_enabled(v) (v,0)
#endif
- static inline int
+static inline int
hvm_pae_enabled(struct vcpu *v)
{
return hvm_funcs.pae_enabled(v);
}
static inline int
-hvm_interrupts_enabled(struct vcpu *v)
+hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
- return hvm_funcs.interrupts_enabled(v);
+ return hvm_funcs.interrupts_enabled(v, type);
}
static inline int
diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
index 1095e86fd0..96f509b632 100644
--- a/xen/include/asm-x86/hvm/irq.h
+++ b/xen/include/asm-x86/hvm/irq.h
@@ -24,11 +24,11 @@
#include <xen/types.h>
#include <xen/spinlock.h>
+#include <asm/hvm/hvm.h>
#include <asm/hvm/vpic.h>
#include <asm/hvm/vioapic.h>
#include <public/hvm/save.h>
-
struct hvm_irq {
/*
* Virtual interrupt wires for a single PCI bus.
@@ -58,7 +58,6 @@ struct hvm_irq {
HVMIRQ_callback_gsi,
HVMIRQ_callback_pci_intx
} callback_via_type;
- uint32_t pad; /* So the next field will be aligned */
};
union {
uint32_t gsi;
@@ -115,9 +114,12 @@ void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
void hvm_set_callback_irq_level(void);
void hvm_set_callback_via(struct domain *d, uint64_t via);
-int cpu_get_interrupt(struct vcpu *v, int *type);
-int cpu_has_pending_irq(struct vcpu *v);
-int get_isa_irq_vector(struct vcpu *vcpu, int irq, int type);
+/* Check/Acknowledge next pending interrupt. */
+enum hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
+int hvm_vcpu_ack_pending_irq(
+ struct vcpu *v, enum hvm_intack type, int *vector);
+
+int get_isa_irq_vector(struct vcpu *vcpu, int irq, enum hvm_intack src);
int is_isa_irq_masked(struct vcpu *v, int isa_irq);
#endif /* __ASM_X86_HVM_IRQ_H__ */
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index b0a6956120..a6a762ef36 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -30,12 +30,14 @@
struct hvm_vcpu {
unsigned long hw_cr3; /* value we give to HW to use */
- unsigned long ioflags;
struct hvm_io_op io_op;
struct vlapic vlapic;
s64 cache_tsc_offset;
u64 guest_time;
+ /* Is an NMI pending for delivery to this VCPU core? */
+ bool_t nmi_pending; /* NB. integrate flag with save/restore */
+
/* Lock and list for virtual platform timers. */
spinlock_t tm_lock;
struct list_head tm_list;
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index ed4fe172b7..ca09df2bcb 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -76,7 +76,7 @@ int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
int vlapic_find_highest_irr(struct vlapic *vlapic);
int vlapic_has_interrupt(struct vcpu *v);
-int cpu_get_apic_interrupt(struct vcpu *v, int *mode);
+int cpu_get_apic_interrupt(struct vcpu *v);
int vlapic_init(struct vcpu *v);
void vlapic_destroy(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 104e203c3e..f42d6f34a4 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -336,9 +336,16 @@ static inline void vmx_inject_sw_exception(
instruction_len);
}
-static inline void vmx_inject_extint(struct vcpu *v, int trap, int error_code)
+static inline void vmx_inject_extint(struct vcpu *v, int trap)
{
- __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR, error_code, 0);
+ __vmx_inject_exception(v, trap, INTR_TYPE_EXT_INTR,
+ VMX_DELIVER_NO_ERROR_CODE, 0);
+}
+
+static inline void vmx_inject_nmi(struct vcpu *v)
+{
+ __vmx_inject_exception(v, 2, INTR_TYPE_NMI,
+ VMX_DELIVER_NO_ERROR_CODE, 0);
}
#endif /* __ASM_X86_HVM_VMX_VMX_H__ */
diff --git a/xen/include/asm-x86/hvm/vpic.h b/xen/include/asm-x86/hvm/vpic.h
index 6bb9bb35ea..59dad65c24 100644
--- a/xen/include/asm-x86/hvm/vpic.h
+++ b/xen/include/asm-x86/hvm/vpic.h
@@ -32,7 +32,7 @@
void vpic_irq_positive_edge(struct domain *d, int irq);
void vpic_irq_negative_edge(struct domain *d, int irq);
void vpic_init(struct domain *d);
-int cpu_get_pic_interrupt(struct vcpu *v, int *type);
+int cpu_get_pic_interrupt(struct vcpu *v);
int is_periodic_irq(struct vcpu *v, int irq, int type);
#endif /* __ASM_X86_HVM_VPIC_H__ */
diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
index acd245a93d..929fd4b306 100644
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -29,6 +29,7 @@
#include <xen/timer.h>
#include <xen/list.h>
#include <asm/hvm/vpic.h>
+#include <asm/hvm/irq.h>
#include <public/hvm/save.h>
struct HPETState;
@@ -119,7 +120,7 @@ struct pl_time { /* platform time */
void pt_freeze_time(struct vcpu *v);
void pt_thaw_time(struct vcpu *v);
void pt_update_irq(struct vcpu *v);
-void pt_intr_post(struct vcpu *v, int vector, int type);
+void pt_intr_post(struct vcpu *v, int vector, enum hvm_intack src);
void pt_reset(struct vcpu *v);
void pt_migrate(struct vcpu *v);
void create_periodic_time(