aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-01-23 14:12:54 +0100
committerJan Beulich <jbeulich@suse.com>2013-01-23 14:12:54 +0100
commitbcf557675d858e3b2e92788f5424ed4bfdac413d (patch)
treefacf1e523888c80e963541a203cc890160ef7adc /xen
parentbad20150553601964143dff43d932accceba3e5e (diff)
downloadxen-bcf557675d858e3b2e92788f5424ed4bfdac413d.tar.gz
xen-bcf557675d858e3b2e92788f5424ed4bfdac413d.tar.bz2
xen-bcf557675d858e3b2e92788f5424ed4bfdac413d.zip
x86: properly use map_domain_page() in nested HVM code
This eliminates a couple of incorrect/inconsistent uses of map_domain_page() from VT-x code. Note that this does _not_ add error handling where none was present before, even though I think NULL returns from any of the mapping operations touched here need to properly be handled. I just don't know this code well enough to figure out what the right action in each case would be. Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/hvm/hvm.c42
-rw-r--r--xen/arch/x86/hvm/nestedhvm.c3
-rw-r--r--xen/arch/x86/hvm/svm/nestedsvm.c15
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c48
-rw-r--r--xen/include/asm-x86/hvm/hvm.h6
5 files changed, 66 insertions, 48 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d702de4e86..34b86f4f39 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1966,7 +1966,8 @@ int hvm_virtual_to_linear_addr(
/* On non-NULL return, we leave this function holding an additional
* ref on the underlying mfn, if any */
-static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
+static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable,
+ bool_t permanent)
{
void *map;
p2m_type_t p2mt;
@@ -1991,28 +1992,41 @@ static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
if ( writable )
paging_mark_dirty(d, page_to_mfn(page));
- map = __map_domain_page(page);
+ if ( !permanent )
+ return __map_domain_page(page);
+
+ map = __map_domain_page_global(page);
+ if ( !map )
+ put_page(page);
+
return map;
}
-void *hvm_map_guest_frame_rw(unsigned long gfn)
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent)
{
- return __hvm_map_guest_frame(gfn, 1);
+ return __hvm_map_guest_frame(gfn, 1, permanent);
}
-void *hvm_map_guest_frame_ro(unsigned long gfn)
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
{
- return __hvm_map_guest_frame(gfn, 0);
+ return __hvm_map_guest_frame(gfn, 0, permanent);
}
-void hvm_unmap_guest_frame(void *p)
+void hvm_unmap_guest_frame(void *p, bool_t permanent)
{
- if ( p )
- {
- unsigned long mfn = domain_page_map_to_mfn(p);
+ unsigned long mfn;
+
+ if ( !p )
+ return;
+
+ mfn = domain_page_map_to_mfn(p);
+
+ if ( !permanent )
unmap_domain_page(p);
- put_page(mfn_to_page(mfn));
- }
+ else
+ unmap_domain_page_global(p);
+
+ put_page(mfn_to_page(mfn));
}
static void *hvm_map_entry(unsigned long va)
@@ -2038,7 +2052,7 @@ static void *hvm_map_entry(unsigned long va)
if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
goto fail;
- v = hvm_map_guest_frame_rw(gfn);
+ v = hvm_map_guest_frame_rw(gfn, 0);
if ( v == NULL )
goto fail;
@@ -2051,7 +2065,7 @@ static void *hvm_map_entry(unsigned long va)
static void hvm_unmap_entry(void *p)
{
- hvm_unmap_guest_frame(p);
+ hvm_unmap_guest_frame(p, 0);
}
static int hvm_load_segment_selector(
diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index 6047cb5ee0..b3cf07dda3 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -53,8 +53,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
nv->nv_ioport80 = 0;
nv->nv_ioportED = 0;
- if (nv->nv_vvmcx)
- hvm_unmap_guest_frame(nv->nv_vvmcx);
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
nv->nv_vvmcxaddr = VMCX_EADDR;
nv->nv_flushp2m = 0;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index dc8d382deb..e32c8724d2 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -69,15 +69,14 @@ int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr)
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
- ASSERT(nv->nv_vvmcx != NULL);
ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
- hvm_unmap_guest_frame(nv->nv_vvmcx);
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
nv->nv_vvmcxaddr = VMCX_EADDR;
}
if (nv->nv_vvmcx == NULL) {
- nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT);
+ nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT, 1);
if (nv->nv_vvmcx == NULL)
return 0;
nv->nv_vvmcxaddr = vmcbaddr;
@@ -141,6 +140,8 @@ void nsvm_vcpu_destroy(struct vcpu *v)
get_order_from_bytes(MSRPM_SIZE));
svm->ns_merged_msrpm = NULL;
}
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
+ nv->nv_vvmcx = NULL;
if (nv->nv_n2vmcx) {
free_vmcb(nv->nv_n2vmcx);
nv->nv_n2vmcx = NULL;
@@ -358,11 +359,11 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
svm->ns_oiomap_pa = svm->ns_iomap_pa;
svm->ns_iomap_pa = ns_vmcb->_iopm_base_pa;
- ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT);
+ ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT, 0);
ASSERT(ns_viomap != NULL);
ioport_80 = test_bit(0x80, ns_viomap);
ioport_ed = test_bit(0xed, ns_viomap);
- hvm_unmap_guest_frame(ns_viomap);
+ hvm_unmap_guest_frame(ns_viomap, 0);
svm->ns_iomap = nestedhvm_vcpu_iomap_get(ioport_80, ioport_ed);
@@ -888,7 +889,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1)
break;
}
- io_bitmap = hvm_map_guest_frame_ro(gfn);
+ io_bitmap = hvm_map_guest_frame_ro(gfn, 0);
if (io_bitmap == NULL) {
gdprintk(XENLOG_ERR,
"IOIO intercept: mapping of permission map failed\n");
@@ -896,7 +897,7 @@ nsvm_vmcb_guest_intercepts_ioio(paddr_t iopm_pa, uint64_t exitinfo1)
}
enabled = test_bit(port, io_bitmap);
- hvm_unmap_guest_frame(io_bitmap);
+ hvm_unmap_guest_frame(io_bitmap, 0);
if (!enabled)
return NESTEDHVM_VMEXIT_HOST;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index d4e9b02df9..90b4da9b04 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -569,18 +569,20 @@ void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value)
static void nvmx_update_apic_access_address(struct vcpu *v)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- u64 apic_gpfn, apic_mfn;
u32 ctrl;
- void *apic_va;
ctrl = __n2_secondary_exec_control(v);
if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
{
+ p2m_type_t p2mt;
+ unsigned long apic_gpfn;
+ struct page_info *apic_pg;
+
apic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
- apic_va = hvm_map_guest_frame_ro(apic_gpfn);
- apic_mfn = virt_to_mfn(apic_va);
- __vmwrite(APIC_ACCESS_ADDR, (apic_mfn << PAGE_SHIFT));
- hvm_unmap_guest_frame(apic_va);
+ apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+ ASSERT(apic_pg && !p2m_is_paging(p2mt));
+ __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
+ put_page(apic_pg);
}
else
__vmwrite(APIC_ACCESS_ADDR, 0);
@@ -589,18 +591,20 @@ static void nvmx_update_apic_access_address(struct vcpu *v)
static void nvmx_update_virtual_apic_address(struct vcpu *v)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- u64 vapic_gpfn, vapic_mfn;
u32 ctrl;
- void *vapic_va;
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_TPR_SHADOW )
{
+ p2m_type_t p2mt;
+ unsigned long vapic_gpfn;
+ struct page_info *vapic_pg;
+
vapic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
- vapic_va = hvm_map_guest_frame_ro(vapic_gpfn);
- vapic_mfn = virt_to_mfn(vapic_va);
- __vmwrite(VIRTUAL_APIC_PAGE_ADDR, (vapic_mfn << PAGE_SHIFT));
- hvm_unmap_guest_frame(vapic_va);
+ vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+ ASSERT(vapic_pg && !p2m_is_paging(p2mt));
+ __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
+ put_page(vapic_pg);
}
else
__vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
@@ -641,9 +645,9 @@ static void __map_msr_bitmap(struct vcpu *v)
unsigned long gpa;
if ( nvmx->msrbitmap )
- hvm_unmap_guest_frame (nvmx->msrbitmap);
+ hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
- nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+ nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
}
static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
@@ -654,9 +658,9 @@ static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
index = vmcs_reg == IO_BITMAP_A ? 0 : 1;
if (nvmx->iobitmap[index])
- hvm_unmap_guest_frame (nvmx->iobitmap[index]);
+ hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
- nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+ nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
}
static inline void map_io_bitmap_all(struct vcpu *v)
@@ -673,17 +677,17 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
__clear_current_vvmcs(v);
if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
- hvm_unmap_guest_frame(nvcpu->nv_vvmcx);
+ hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
for (i=0; i<2; i++) {
if ( nvmx->iobitmap[i] ) {
- hvm_unmap_guest_frame(nvmx->iobitmap[i]);
+ hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
nvmx->iobitmap[i] = NULL;
}
}
if ( nvmx->msrbitmap ) {
- hvm_unmap_guest_frame(nvmx->msrbitmap);
+ hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
nvmx->msrbitmap = NULL;
}
}
@@ -1289,7 +1293,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
{
- nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+ nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
nvcpu->nv_vvmcxaddr = gpa;
map_io_bitmap_all (v);
__map_msr_bitmap(v);
@@ -1350,10 +1354,10 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
else
{
/* Even if this VMCS isn't the current one, we must clear it. */
- vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+ vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
if ( vvmcs )
__set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0);
- hvm_unmap_guest_frame(vvmcs);
+ hvm_unmap_guest_frame(vvmcs, 0);
}
vmreturn(regs, VMSUCCEED);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 76e9cc8fbe..35a4b3eac2 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -423,9 +423,9 @@ int hvm_virtual_to_linear_addr(
unsigned int addr_size,
unsigned long *linear_addr);
-void *hvm_map_guest_frame_rw(unsigned long gfn);
-void *hvm_map_guest_frame_ro(unsigned long gfn);
-void hvm_unmap_guest_frame(void *p);
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent);
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
+void hvm_unmap_guest_frame(void *p, bool_t permanent);
static inline void hvm_set_info_guest(struct vcpu *v)
{