diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2009-09-16 09:30:41 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2009-09-16 09:30:41 +0100 |
commit | 71708b1966462eeac3a3a2bc1a74086b2b25b62c (patch) | |
tree | 8a87d5408a4492abd3cb55694fe20ef9c622f630 /xen/arch/x86/hvm/vpt.c | |
parent | 057958b0ab83fe767397a7ef896b4e6ed63529d5 (diff) | |
download | xen-71708b1966462eeac3a3a2bc1a74086b2b25b62c.tar.gz xen-71708b1966462eeac3a3a2bc1a74086b2b25b62c.tar.bz2 xen-71708b1966462eeac3a3a2bc1a74086b2b25b62c.zip |
x86 hvm: suspend platform timer emulation while its IRQ is masked
This patch gets rid of a timer which IRQ is masked from vcpu's timer
list. It reduces the overhead of VM EXIT and context switch of vm.
Also fixes a potential bug.
(1) VCPU#0: mask the IRQ of a timer. (ex. vioapic.redir[2].mask=1)
(2) VCPU#1: pt_timer_fn() is invoked by expiration of the timer.
(3) VCPU#1: pt_update_irq() is called but does nothing by
pt_irq_masked()==1.
(4) VCPU#1: sleep by halt.
(5) VCPU#0: unmask the IRQ of the timer.
After that, no one wakes up the VCPU#1.
IRQ of ISA is masked by:
- PIC's IMR
- IOAPIC's redir[0]
- IOAPIC's redir[N].mask
- LAPIC's LVT0
- LAPIC enabled/disabled
IRQ of LAPIC timer is masked by:
- LAPIC's LVTT
- LAPIC disabled
When above stuffs are changed, the corresponding vcpu is kicked and
suspended timer emulation is resumed.
In addition, a small bug fix in pt_adjust_global_vcpu_target().
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Diffstat (limited to 'xen/arch/x86/hvm/vpt.c')
-rw-r--r-- | xen/arch/x86/hvm/vpt.c | 60 |
1 files changed, 53 insertions, 7 deletions
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c index f586d242d1..1a5804a356 100644 --- a/xen/arch/x86/hvm/vpt.c +++ b/xen/arch/x86/hvm/vpt.c @@ -216,19 +216,30 @@ static void pt_timer_fn(void *data) void pt_update_irq(struct vcpu *v) { struct list_head *head = &v->arch.hvm_vcpu.tm_list; - struct periodic_time *pt, *earliest_pt = NULL; + struct periodic_time *pt, *temp, *earliest_pt = NULL; uint64_t max_lag = -1ULL; int irq, is_lapic; spin_lock(&v->arch.hvm_vcpu.tm_lock); - list_for_each_entry ( pt, head, list ) + list_for_each_entry_safe ( pt, temp, head, list ) { - if ( pt->pending_intr_nr && !pt_irq_masked(pt) && - ((pt->last_plt_gtime + pt->period) < max_lag) ) + if ( pt->pending_intr_nr ) { - max_lag = pt->last_plt_gtime + pt->period; - earliest_pt = pt; + if ( pt_irq_masked(pt) ) + { + /* suspend timer emulation */ + list_del(&pt->list); + pt->on_list = 0; + } + else + { + if ( (pt->last_plt_gtime + pt->period) < max_lag ) + { + max_lag = pt->last_plt_gtime + pt->period; + earliest_pt = pt; + } + } } } @@ -412,6 +423,7 @@ void destroy_periodic_time(struct periodic_time *pt) if ( pt->on_list ) list_del(&pt->list); pt->on_list = 0; + pt->pending_intr_nr = 0; pt_unlock(pt); /* @@ -451,12 +463,14 @@ static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v) void pt_adjust_global_vcpu_target(struct vcpu *v) { - struct pl_time *pl_time = &v->domain->arch.hvm_domain.pl_time; + struct pl_time *pl_time; int i; if ( v == NULL ) return; + pl_time = &v->domain->arch.hvm_domain.pl_time; + spin_lock(&pl_time->vpit.lock); pt_adjust_vcpu(&pl_time->vpit.pt0, v); spin_unlock(&pl_time->vpit.lock); @@ -470,3 +484,35 @@ void pt_adjust_global_vcpu_target(struct vcpu *v) pt_adjust_vcpu(&pl_time->vhpet.pt[i], v); spin_unlock(&pl_time->vhpet.lock); } + + +static void pt_resume(struct periodic_time *pt) +{ + if ( pt->vcpu == NULL ) + return; + + pt_lock(pt); + if ( pt->pending_intr_nr && !pt->on_list ) + { + pt->on_list = 1; + list_add(&pt->list, &pt->vcpu->arch.hvm_vcpu.tm_list); + vcpu_kick(pt->vcpu); + } + pt_unlock(pt); +} + +void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt) +{ + int i; + + if ( d ) + { + pt_resume(&d->arch.hvm_domain.pl_time.vpit.pt0); + pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt); + for ( i = 0; i < HPET_TIMER_NUM; i++ ) + pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]); + } + + if ( vlapic_pt ) + pt_resume(vlapic_pt); +} |