aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorOlaf Hering <olaf@aepfle.de>2011-09-16 12:19:26 +0100
committerOlaf Hering <olaf@aepfle.de>2011-09-16 12:19:26 +0100
commit2c36185d4407f6932ecf3e8d5244049c816a8f91 (patch)
tree997b84c388238b9f9b12be38b704616d69ccb396 /xen
parentade1da50bcd9f686f32b5f92dc376260d650b396 (diff)
downloadxen-2c36185d4407f6932ecf3e8d5244049c816a8f91.tar.gz
xen-2c36185d4407f6932ecf3e8d5244049c816a8f91.tar.bz2
xen-2c36185d4407f6932ecf3e8d5244049c816a8f91.zip
mem_event: use different ringbuffers for share, paging and access
Up to now a single ring buffer was used for mem_share, xenpaging and xen-access. Each helper would have to cooperate and pull only its own requests from the ring. Unfortunately this was not implemented. And even if it was, it would make the whole concept fragile because a crash or early exit of one helper would stall the others. What happend up to now is that active xenpaging + memory_sharing would push memsharing requests in the buffer. xenpaging is not prepared for such requests. This patch creates an independet ring buffer for mem_share, xenpaging and xen-access and adds also new functions to enable xenpaging and xen-access. The xc_mem_event_enable/xc_mem_event_disable functions will be removed. The various XEN_DOMCTL_MEM_EVENT_* macros were cleaned up. Due to the removal the API changed, so the SONAME will be changed too. Signed-off-by: Olaf Hering <olaf@aepfle.de> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Ian Jackson <ian.jackson@eu.citrix.com> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen')
-rw-r--r--xen/arch/ia64/xen/dom0_ops.c2
-rw-r--r--xen/arch/x86/hvm/hvm.c4
-rw-r--r--xen/arch/x86/mm/mem_event.c161
-rw-r--r--xen/arch/x86/mm/mem_paging.c4
-rw-r--r--xen/arch/x86/mm/mem_sharing.c22
-rw-r--r--xen/arch/x86/mm/p2m.c18
-rw-r--r--xen/include/public/domctl.h43
-rw-r--r--xen/include/xen/sched.h6
8 files changed, 142 insertions, 118 deletions
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
index 7eb446623a..f6ba0e3e03 100644
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -688,7 +688,7 @@ long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
switch(mec->op)
{
- case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL:
{
if (mec->u.enable) {
ret = -EINVAL; /* not implemented */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7f1372fde7..bce2df8d0a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4025,7 +4025,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
if ( (p & HVMPME_onchangeonly) && (value == old) )
return 1;
- rc = mem_event_check_ring(d, &d->mem_event);
+ rc = mem_event_check_ring(d, &d->mem_access);
if ( rc )
return rc;
@@ -4048,7 +4048,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
req.gla_valid = 1;
}
- mem_event_put_request(d, &d->mem_event, &req);
+ mem_event_put_request(d, &d->mem_access, &req);
return 1;
}
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index 05d5e0658d..c6c864dbd5 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -37,24 +37,52 @@
#define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock)
#define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock)
-static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d,
+ xen_domctl_mem_event_op_t *mec,
+ struct mem_event_domain *med)
{
int rc;
+ struct domain *dom_mem_event = current->domain;
+ struct vcpu *v = current;
+ unsigned long ring_addr = mec->ring_addr;
+ unsigned long shared_addr = mec->shared_addr;
+ l1_pgentry_t l1e;
+ unsigned long gfn;
+ p2m_type_t p2mt;
+ mfn_t ring_mfn;
+ mfn_t shared_mfn;
+
+ /* Only one helper at a time. If the helper crashed,
+ * the ring is in an undefined state and so is the guest.
+ */
+ if ( med->ring_page )
+ return -EBUSY;
+
+ /* Get MFN of ring page */
+ guest_get_eff_l1e(v, ring_addr, &l1e);
+ gfn = l1e_get_pfn(l1e);
+ ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+
+ if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
+ return -EINVAL;
+
+ /* Get MFN of shared page */
+ guest_get_eff_l1e(v, shared_addr, &l1e);
+ gfn = l1e_get_pfn(l1e);
+ shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+
+ if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
+ return -EINVAL;
/* Map ring and shared pages */
med->ring_page = map_domain_page(mfn_x(ring_mfn));
- if ( med->ring_page == NULL )
- goto err;
-
med->shared_page = map_domain_page(mfn_x(shared_mfn));
- if ( med->shared_page == NULL )
- goto err_ring;
/* Allocate event channel */
rc = alloc_unbound_xen_event_channel(d->vcpu[0],
current->domain->domain_id);
if ( rc < 0 )
- goto err_shared;
+ goto err;
((mem_event_shared_page_t *)med->shared_page)->port = rc;
med->xen_port = rc;
@@ -71,14 +99,14 @@ static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_
return 0;
- err_shared:
+ err:
unmap_domain_page(med->shared_page);
med->shared_page = NULL;
- err_ring:
+
unmap_domain_page(med->ring_page);
med->ring_page = NULL;
- err:
- return 1;
+
+ return rc;
}
static int mem_event_disable(struct mem_event_domain *med)
@@ -220,86 +248,79 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
rc = -ENOSYS;
- switch ( mec-> mode )
+ switch ( mec->mode )
{
- case 0:
+ case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
{
+ struct mem_event_domain *med = &d->mem_paging;
+ rc = -ENODEV;
+ /* Only HAP is supported */
+ if ( !hap_enabled(d) )
+ break;
+
+ /* Currently only EPT is supported */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ break;
+
switch( mec->op )
{
- case XEN_DOMCTL_MEM_EVENT_OP_ENABLE:
+ case XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE:
{
- struct domain *dom_mem_event = current->domain;
- struct vcpu *v = current;
- struct mem_event_domain *med = &d->mem_event;
- unsigned long ring_addr = mec->ring_addr;
- unsigned long shared_addr = mec->shared_addr;
- l1_pgentry_t l1e;
- unsigned long gfn;
- p2m_type_t p2mt;
- mfn_t ring_mfn;
- mfn_t shared_mfn;
-
- /* Only one xenpaging at a time. If xenpaging crashed,
- * the cache is in an undefined state and so is the guest
- */
- rc = -EBUSY;
- if ( med->ring_page )
- break;
-
- /* Currently only EPT is supported */
- rc = -ENODEV;
- if ( !(hap_enabled(d) &&
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) )
- break;
-
- /* Get MFN of ring page */
- guest_get_eff_l1e(v, ring_addr, &l1e);
- gfn = l1e_get_pfn(l1e);
- ring_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+ rc = mem_event_enable(d, mec, med);
+ }
+ break;
- rc = -EINVAL;
- if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
- break;
+ case XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE:
+ {
+ rc = mem_event_disable(med);
+ }
+ break;
- /* Get MFN of shared page */
- guest_get_eff_l1e(v, shared_addr, &l1e);
- gfn = l1e_get_pfn(l1e);
- shared_mfn = gfn_to_mfn(dom_mem_event, gfn, &p2mt);
+ default:
+ {
+ if ( med->ring_page )
+ rc = mem_paging_domctl(d, mec, u_domctl);
+ }
+ break;
+ }
+ }
+ break;
- rc = -EINVAL;
- if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
- break;
+ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
+ {
+ struct mem_event_domain *med = &d->mem_access;
+ rc = -ENODEV;
+ /* Only HAP is supported */
+ if ( !hap_enabled(d) )
+ break;
- rc = -EINVAL;
- if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 )
- break;
+ /* Currently only EPT is supported */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+ break;
- rc = 0;
+ switch( mec->op )
+ {
+ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
+ {
+ rc = mem_event_enable(d, mec, med);
}
break;
- case XEN_DOMCTL_MEM_EVENT_OP_DISABLE:
+ case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE:
{
- rc = mem_event_disable(&d->mem_event);
+ rc = mem_event_disable(&d->mem_access);
}
break;
default:
- rc = -ENOSYS;
- break;
+ {
+ if ( med->ring_page )
+ rc = mem_access_domctl(d, mec, u_domctl);
}
break;
+ }
}
- case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
- {
- rc = mem_paging_domctl(d, mec, u_domctl);
- break;
- }
- case XEN_DOMCTL_MEM_EVENT_OP_ACCESS:
- {
- rc = mem_access_domctl(d, mec, u_domctl);
- break;
- }
+ break;
}
return rc;
diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index 55b36f9155..5c02095163 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -28,10 +28,6 @@
int mem_paging_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
XEN_GUEST_HANDLE(void) u_domctl)
{
- /* Only HAP is supported */
- if ( !hap_enabled(d) )
- return -ENODEV;
-
switch( mec->op )
{
case XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE:
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index dd4825471e..c33795dc37 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -281,12 +281,12 @@ static struct page_info* mem_sharing_alloc_page(struct domain *d,
vcpu_pause_nosync(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- if(mem_event_check_ring(d, &d->mem_event)) return page;
+ if(mem_event_check_ring(d, &d->mem_share)) return page;
req.gfn = gfn;
req.p2mt = p2m_ram_shared;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event, &req);
+ mem_event_put_request(d, &d->mem_share, &req);
return page;
}
@@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct domain *d)
mem_event_response_t rsp;
/* Get request off the ring */
- mem_event_get_response(&d->mem_event, &rsp);
+ mem_event_get_response(&d->mem_share, &rsp);
/* Unpause domain/vcpu */
if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
@@ -697,14 +697,14 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
switch(mec->op)
{
- case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL:
{
d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
rc = 0;
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN:
{
unsigned long gfn = mec->u.nominate.u.gfn;
shr_handle_t handle;
@@ -715,7 +715,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF:
{
grant_ref_t gref = mec->u.nominate.u.grant_ref;
unsigned long gfn;
@@ -730,7 +730,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_SHARE:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE:
{
shr_handle_t sh = mec->u.share.source_handle;
shr_handle_t ch = mec->u.share.client_handle;
@@ -738,7 +738,7 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_RESUME:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME:
{
if(!mem_sharing_enabled(d))
return -EINVAL;
@@ -746,21 +746,21 @@ int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN:
{
unsigned long gfn = mec->u.debug.u.gfn;
rc = mem_sharing_debug_gfn(d, gfn);
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN:
{
unsigned long mfn = mec->u.debug.u.mfn;
rc = mem_sharing_debug_mfn(mfn);
}
break;
- case XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF:
+ case XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF:
{
grant_ref_t gref = mec->u.debug.u.gref;
rc = mem_sharing_debug_gref(d, gref);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index d8f622ffa3..c62aa12da7 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -755,7 +755,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
mem_event_request_t req;
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_event) == 0)
+ if ( mem_event_check_ring(d, &d->mem_paging) == 0)
{
/* Send release notification to pager */
memset(&req, 0, sizeof(req));
@@ -763,7 +763,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
req.gfn = gfn;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event, &req);
+ mem_event_put_request(d, &d->mem_paging, &req);
}
}
@@ -775,7 +775,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d, &d->mem_event) )
+ if ( mem_event_check_ring(d, &d->mem_paging) )
return;
memset(&req, 0, sizeof(req));
@@ -803,7 +803,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
{
/* gfn is already on its way back and vcpu is not paused */
- mem_event_put_req_producers(&d->mem_event);
+ mem_event_put_req_producers(&d->mem_paging);
return;
}
@@ -812,7 +812,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event, &req);
+ mem_event_put_request(d, &d->mem_paging, &req);
}
int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
@@ -842,7 +842,7 @@ void p2m_mem_paging_resume(struct domain *d)
mfn_t mfn;
/* Pull the response off the ring */
- mem_event_get_response(&d->mem_event, &rsp);
+ mem_event_get_response(&d->mem_paging, &rsp);
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -889,7 +889,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
p2m_unlock(p2m);
/* Otherwise, check if there is a memory event listener, and send the message along */
- res = mem_event_check_ring(d, &d->mem_event);
+ res = mem_event_check_ring(d, &d->mem_access);
if ( res < 0 )
{
/* No listener */
@@ -933,7 +933,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &d->mem_event, &req);
+ mem_event_put_request(d, &d->mem_access, &req);
/* VCPU paused, mem event request sent */
}
@@ -943,7 +943,7 @@ void p2m_mem_access_resume(struct p2m_domain *p2m)
struct domain *d = p2m->domain;
mem_event_response_t rsp;
- mem_event_get_response(&d->mem_event, &rsp);
+ mem_event_get_response(&d->mem_access, &rsp);
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index c0dbc0234e..84f236d563 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -708,20 +708,18 @@ struct xen_domctl_gdbsx_domstatus {
/* XEN_DOMCTL_mem_event_op */
-/* Add and remove memory handlers */
-#define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1
-
/*
+* Domain memory paging
* Page memory in and out.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
-/* Domain memory paging */
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 2
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 3
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 4
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 5
/*
* Access permissions.
@@ -734,11 +732,14 @@ struct xen_domctl_gdbsx_domstatus {
* ACCESS_RESUME mode for the following domctl.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 0
+
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 2
struct xen_domctl_mem_event_op {
- uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */
- uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */
+ uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
+ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
/* OP_ENABLE */
uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */
@@ -755,14 +756,16 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
*/
/* XEN_DOMCTL_mem_sharing_op */
-#define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0
-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1
-#define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2
-#define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3
-#define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6
-#define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
+
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL 0
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN 1
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF 2
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE 3
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME 4
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN 5
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN 6
+#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF 7
#define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10)
#define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2bda1edd7b..c6b3a5916f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -317,8 +317,12 @@ struct domain
/* Non-migratable and non-restoreable? */
bool_t disable_migrate;
+ /* Memory sharing support */
+ struct mem_event_domain mem_share;
/* Memory paging support */
- struct mem_event_domain mem_event;
+ struct mem_event_domain mem_paging;
+ /* Memory access support */
+ struct mem_event_domain mem_access;
/* Currently computed from union of all vcpu cpu-affinity masks. */
nodemask_t node_affinity;