diff options
author | Andres Lagar-Cavilla <andres@lagarcavilla.org> | 2012-02-10 16:07:07 +0000 |
---|---|---|
committer | Andres Lagar-Cavilla <andres@lagarcavilla.org> | 2012-02-10 16:07:07 +0000 |
commit | bb9e2139bca549fb8fd52c6fef4054b3ebf9ff3e (patch) | |
tree | 5dfb86f77180ff6a36b22a975702619e10becd7f /xen/arch/x86/hvm/emulate.c | |
parent | 4fbe3f3e71ed8f619adfe66302d636923ff0cb04 (diff) | |
download | xen-bb9e2139bca549fb8fd52c6fef4054b3ebf9ff3e.tar.gz xen-bb9e2139bca549fb8fd52c6fef4054b3ebf9ff3e.tar.bz2 xen-bb9e2139bca549fb8fd52c6fef4054b3ebf9ff3e.zip |
x86/mm: Refactor possibly deadlocking get_gfn calls
When calling get_gfn multiple times on different gfn's in the same function, we
can easily deadlock if p2m lookups are locked. Thus, refactor these calls to
enforce simple deadlock-avoidance rules:
- Lowest-numbered domain first
- Lowest-numbered gfn first
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavila.org>
Acked-by: Tim Deegan <tim@xen.org>
Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/arch/x86/hvm/emulate.c')
-rw-r--r-- | xen/arch/x86/hvm/emulate.c | 33 |
1 files changed, 14 insertions, 19 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index c40c3d61a4..c2d089df56 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -660,12 +660,13 @@ static int hvmemul_rep_movs( { struct hvm_emulate_ctxt *hvmemul_ctxt = container_of(ctxt, struct hvm_emulate_ctxt, ctxt); - unsigned long saddr, daddr, bytes, sgfn, dgfn; + unsigned long saddr, daddr, bytes; paddr_t sgpa, dgpa; uint32_t pfec = PFEC_page_present; - p2m_type_t p2mt; + p2m_type_t sp2mt, dp2mt; int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; + struct two_gfns tg; rc = hvmemul_virtual_to_linear( src_seg, src_offset, bytes_per_rep, reps, hvm_access_read, @@ -693,26 +694,23 @@ static int hvmemul_rep_movs( if ( rc != X86EMUL_OKAY ) return rc; - /* XXX In a fine-grained p2m locking scenario, we need to sort this - * get_gfn's, or else we might deadlock */ - sgfn = sgpa >> PAGE_SHIFT; - (void)get_gfn(current->domain, sgfn, &p2mt); - if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) + get_two_gfns(current->domain, sgpa >> PAGE_SHIFT, &sp2mt, NULL, NULL, + current->domain, dgpa >> PAGE_SHIFT, &dp2mt, NULL, NULL, + p2m_guest, &tg); + + if ( !p2m_is_ram(sp2mt) && !p2m_is_grant(sp2mt) ) { rc = hvmemul_do_mmio( sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL); - put_gfn(current->domain, sgfn); + put_two_gfns(&tg); return rc; } - dgfn = dgpa >> PAGE_SHIFT; - (void)get_gfn(current->domain, dgfn, &p2mt); - if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) + if ( !p2m_is_ram(dp2mt) && !p2m_is_grant(dp2mt) ) { rc = hvmemul_do_mmio( dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL); - put_gfn(current->domain, sgfn); - put_gfn(current->domain, dgfn); + put_two_gfns(&tg); return rc; } @@ -730,8 +728,7 @@ static int hvmemul_rep_movs( */ if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) ) { - put_gfn(current->domain, sgfn); - put_gfn(current->domain, dgfn); + put_two_gfns(&tg); return X86EMUL_UNHANDLEABLE; } @@ -743,8 +740,7 @@ static int hvmemul_rep_movs( buf = xmalloc_bytes(bytes); if ( buf == NULL ) { - put_gfn(current->domain, sgfn); - put_gfn(current->domain, dgfn); + put_two_gfns(&tg); return X86EMUL_UNHANDLEABLE; } @@ -757,8 +753,7 @@ static int hvmemul_rep_movs( rc = hvm_copy_to_guest_phys(dgpa, buf, bytes); xfree(buf); - put_gfn(current->domain, sgfn); - put_gfn(current->domain, dgfn); + put_two_gfns(&tg); if ( rc == HVMCOPY_gfn_paged_out ) return X86EMUL_RETRY; |