diff options
author | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2007-04-03 11:44:10 +0100 |
---|---|---|
committer | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2007-04-03 11:44:10 +0100 |
commit | a9344ed5826043957f11d480e0cf48ec4bdc73b8 (patch) | |
tree | 0626f6f329ff0cba3be28a1ffd8fcbd57b5a2ca7 /xen/arch/x86 | |
parent | 9123bf1da67844083f2fc514f09b27eb61fda716 (diff) | |
download | xen-a9344ed5826043957f11d480e0cf48ec4bdc73b8.tar.gz xen-a9344ed5826043957f11d480e0cf48ec4bdc73b8.tar.bz2 xen-a9344ed5826043957f11d480e0cf48ec4bdc73b8.zip |
xen: Allow vcpus to defer a shutdown request across critical
asynchronous operations (in particular, hvm ioreq requests).
Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86')
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 3 | ||||
-rw-r--r-- | xen/arch/x86/hvm/io.c | 9 | ||||
-rw-r--r-- | xen/arch/x86/mm.c | 3 | ||||
-rw-r--r-- | xen/arch/x86/mm/shadow/multi.c | 4 |
4 files changed, 13 insertions, 6 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 552caca822..888fe818d0 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -373,6 +373,9 @@ void hvm_send_assist_req(struct vcpu *v) { ioreq_t *p; + if ( unlikely(!vcpu_start_shutdown_deferral(v)) ) + return; /* implicitly bins the i/o operation */ + p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; if ( unlikely(p->state != STATE_IOREQ_NONE) ) { diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 605a42978b..4bff30b2b9 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -771,10 +771,11 @@ void hvm_io_assist(struct vcpu *v) struct cpu_user_regs *regs; struct hvm_io_op *io_opp; unsigned long gmfn; + struct domain *d = v->domain; io_opp = &v->arch.hvm_vcpu.io_op; regs = &io_opp->io_context; - vio = get_vio(v->domain, v->vcpu_id); + vio = get_vio(d, v->vcpu_id); p = &vio->vp_ioreq; if ( p->state != STATE_IORESP_READY ) @@ -797,11 +798,13 @@ void hvm_io_assist(struct vcpu *v) memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); /* Has memory been dirtied? */ - if ( p->dir == IOREQ_READ && p->data_is_ptr ) + if ( (p->dir == IOREQ_READ) && p->data_is_ptr ) { gmfn = get_mfn_from_gpfn(paging_gva_to_gfn(v, p->data)); - mark_dirty(v->domain, gmfn); + mark_dirty(d, gmfn); } + + vcpu_end_shutdown_deferral(v); } /* diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index b559b1f567..fac87bcf02 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -806,7 +806,8 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d) * (Note that the undestroyable active grants are not a security hole in * Xen. All active grants can safely be cleaned up when the domain dies.) */ - if ( (l1e_get_flags(l1e) & _PAGE_GNTTAB) && !d->is_shutdown && !d->is_dying ) + if ( (l1e_get_flags(l1e) & _PAGE_GNTTAB) && + !d->is_shutting_down && !d->is_dying ) { MEM_LOG("Attempt to implicitly unmap a granted PTE %" PRIpte, l1e_get_intpte(l1e)); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index b1419a7b27..0a49f07add 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -2823,8 +2823,8 @@ static int sh_page_fault(struct vcpu *v, * are OK, this can only have been caused by a failed * shadow_set_l*e(), which will have crashed the guest. * Get out of the fault handler immediately. */ - ASSERT(d->is_shutdown); - unmap_walk(v, &gw); + ASSERT(d->is_shutting_down); + unmap_walk(v, &gw); shadow_unlock(d); return 0; } |