diff options
author | Tim Deegan <Tim.Deegan@citrix.com> | 2011-06-02 13:16:52 +0100 |
---|---|---|
committer | Tim Deegan <Tim.Deegan@citrix.com> | 2011-06-02 13:16:52 +0100 |
commit | 4ce6c4a45536f7e8094ef3f1cebfa048f23028f0 (patch) | |
tree | 93a99593392617e3cd4a608bf414ea15569e564a /xen/arch/x86/hvm/emulate.c | |
parent | 8c7d0b26207932a07061b03e1873dc3c8987f568 (diff) | |
download | xen-4ce6c4a45536f7e8094ef3f1cebfa048f23028f0.tar.gz xen-4ce6c4a45536f7e8094ef3f1cebfa048f23028f0.tar.bz2 xen-4ce6c4a45536f7e8094ef3f1cebfa048f23028f0.zip |
x86/mm/p2m: Make p2m interfaces take struct domain arguments.
As part of the nested HVM patch series, many p2m functions were changed
to take pointers to p2m tables rather than to domains. This patch
reverses that for almost all of them, which:
- gets rid of a lot of "p2m_get_hostp2m(d)" in code which really
shouldn't have to know anything about how gfns become mfns.
- ties sharing and paging interfaces to a domain, which is
what they actually act on, rather than a particular p2m table.
In developing this patch it became clear that memory-sharing and nested
HVM are unlikely to work well together. I haven't tried to fix that
here beyond adding some assertions around suspect paths (as this patch
is big enough with just the interface changes)
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
Diffstat (limited to 'xen/arch/x86/hvm/emulate.c')
-rw-r--r-- | xen/arch/x86/hvm/emulate.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index ce51354324..4d225856a2 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -55,7 +55,6 @@ static int hvmemul_do_io( paddr_t value = ram_gpa; int value_is_ptr = (p_data == NULL); struct vcpu *curr = current; - struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain); ioreq_t *p = get_ioreq(curr); unsigned long ram_gfn = paddr_to_pfn(ram_gpa); p2m_type_t p2mt; @@ -63,10 +62,10 @@ static int hvmemul_do_io( int rc; /* Check for paged out page */ - ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt); + ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt); if ( p2m_is_paging(p2mt) ) { - p2m_mem_paging_populate(p2m, ram_gfn); + p2m_mem_paging_populate(curr->domain, ram_gfn); return X86EMUL_RETRY; } if ( p2m_is_shared(p2mt) ) @@ -640,7 +639,6 @@ static int hvmemul_rep_movs( unsigned long saddr, daddr, bytes; paddr_t sgpa, dgpa; uint32_t pfec = PFEC_page_present; - struct p2m_domain *p2m = p2m_get_hostp2m(current->domain); p2m_type_t p2mt; int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; @@ -671,12 +669,12 @@ static int hvmemul_rep_movs( if ( rc != X86EMUL_OKAY ) return rc; - (void)gfn_to_mfn(p2m, sgpa >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(current->domain, sgpa >> PAGE_SHIFT, &p2mt); if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) return hvmemul_do_mmio( sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL); - (void)gfn_to_mfn(p2m, dgpa >> PAGE_SHIFT, &p2mt); + (void)gfn_to_mfn(current->domain, dgpa >> PAGE_SHIFT, &p2mt); if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) ) return hvmemul_do_mmio( dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL); |