aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorTim Deegan <tim@xen.org>2012-03-08 16:40:05 +0000
committerTim Deegan <tim@xen.org>2012-03-08 16:40:05 +0000
commit770d1e858de56ba8f2e0d7e45c08f48d599528e5 (patch)
treefcae677231c44b4c20c0816f41cbd51074a5412e /xen
parent7a3de767373545388a9fded238b3450c5b21066d (diff)
downloadxen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.tar.gz
xen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.tar.bz2
xen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.zip
Use a reserved pfn in the guest address space to store mem event rings
This solves a long-standing issue in which the pages backing these rings were pages belonging to dom0 user-space processes. Thus, if the process would die unexpectedly, Xen would keep posting events to a page now belonging to some other process. We update all API-consumers in tree (xenpaging and xen-access). This is an API/ABI change, so please speak up if it breaks your accumptions. The patch touches tools, hypervisor x86/hvm bits, and hypervisor x86/mm bits. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Ian Campbell <ian.campbell@citrix.com> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/x86/mm/mem_event.c50
-rw-r--r--xen/include/public/domctl.h1
-rw-r--r--xen/include/public/hvm/params.h7
-rw-r--r--xen/include/xen/sched.h1
4 files changed, 30 insertions, 29 deletions
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index 9a8b26b07d..b6dde8d0de 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -44,16 +44,11 @@ static int mem_event_enable(
xen_domctl_mem_event_op_t *mec,
struct mem_event_domain *med,
int pause_flag,
+ int param,
xen_event_channel_notification_t notification_fn)
{
int rc;
- struct domain *dom_mem_event = current->domain;
- struct vcpu *v = current;
- unsigned long ring_addr = mec->ring_addr;
- l1_pgentry_t l1e;
- unsigned long ring_gfn = 0; /* gcc ... */
- p2m_type_t p2mt;
- mfn_t ring_mfn;
+ unsigned long ring_gfn = d->arch.hvm_domain.params[param];
/* Only one helper at a time. If the helper crashed,
* the ring is in an undefined state and so is the guest.
@@ -61,22 +56,18 @@ static int mem_event_enable(
if ( med->ring_page )
return -EBUSY;
- /* Get MFN of ring page */
- guest_get_eff_l1e(v, ring_addr, &l1e);
- ring_gfn = l1e_get_pfn(l1e);
- ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt);
-
- if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
- {
- put_gfn(dom_mem_event, ring_gfn);
- return -EINVAL;
- }
+ /* The parameter defaults to zero, and it should be
+ * set to something */
+ if ( ring_gfn == 0 )
+ return -ENOSYS;
mem_event_ring_lock_init(med);
+ mem_event_ring_lock(med);
- /* Map ring page */
- med->ring_page = map_domain_page(mfn_x(ring_mfn));
- put_gfn(dom_mem_event, ring_gfn);
+ rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct,
+ &med->ring_page);
+ if ( rc < 0 )
+ goto err;
/* Set the number of currently blocked vCPUs to 0. */
med->blocked = 0;
@@ -101,11 +92,13 @@ static int mem_event_enable(
/* Initialize the last-chance wait queue. */
init_waitqueue_head(&med->wq);
+ mem_event_ring_unlock(med);
return 0;
err:
- unmap_domain_page(med->ring_page);
- med->ring_page = NULL;
+ destroy_ring_for_helper(&med->ring_page,
+ med->ring_pg_struct);
+ mem_event_ring_unlock(med);
return rc;
}
@@ -221,9 +214,6 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
/* Free domU's event channel and leave the other one unbound */
free_xen_event_channel(d->vcpu[0], med->xen_port);
-
- unmap_domain_page(med->ring_page);
- med->ring_page = NULL;
/* Unblock all vCPUs */
for_each_vcpu ( d, v )
@@ -235,6 +225,8 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
}
}
+ destroy_ring_for_helper(&med->ring_page,
+ med->ring_pg_struct);
mem_event_ring_unlock(med);
}
@@ -548,7 +540,9 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
if ( p2m->pod.entry_count )
break;
- rc = mem_event_enable(d, mec, med, _VPF_mem_paging, mem_paging_notification);
+ rc = mem_event_enable(d, mec, med, _VPF_mem_paging,
+ HVM_PARAM_PAGING_RING_PFN,
+ mem_paging_notification);
}
break;
@@ -584,7 +578,9 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
break;
- rc = mem_event_enable(d, mec, med, _VPF_mem_access, mem_access_notification);
+ rc = mem_event_enable(d, mec, med, _VPF_mem_access,
+ HVM_PARAM_ACCESS_RING_PFN,
+ mem_access_notification);
}
break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 941acc3590..a3ed8d76be 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -748,7 +748,6 @@ struct xen_domctl_mem_event_op {
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
uint32_t port; /* OUT: event channel for ring */
- uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */
};
typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 6699788eae..55c1b578f7 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -142,6 +142,11 @@
/* Boolean: Enable nestedhvm (hvm only) */
#define HVM_PARAM_NESTEDHVM 24
-#define HVM_NR_PARAMS 27
+/* Params for the mem event rings */
+#define HVM_PARAM_PAGING_RING_PFN 27
+#define HVM_PARAM_ACCESS_RING_PFN 28
+#define HVM_PARAM_SHARING_RING_PFN 29
+
+#define HVM_NR_PARAMS 30
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 8c5931368d..65219cf988 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -192,6 +192,7 @@ struct mem_event_domain
unsigned char target_producers;
/* shared ring page */
void *ring_page;
+ struct page_info *ring_pg_struct;
/* front-end ring */
mem_event_front_ring_t front_ring;
/* event channel port (vcpu0 only) */