aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlaf Hering <olaf@aepfle.de>2011-09-16 12:13:31 +0100
committerOlaf Hering <olaf@aepfle.de>2011-09-16 12:13:31 +0100
commitade1da50bcd9f686f32b5f92dc376260d650b396 (patch)
tree56b573d9a1381cfab33f1ac96bc2d81b49a3606c
parent2411017e28d64b5d8a37f1b911776ae4353c01ac (diff)
downloadxen-ade1da50bcd9f686f32b5f92dc376260d650b396.tar.gz
xen-ade1da50bcd9f686f32b5f92dc376260d650b396.tar.bz2
xen-ade1da50bcd9f686f32b5f92dc376260d650b396.zip
mem_event: pass mem_event_domain pointer to mem_event functions
Pass a struct mem_event_domain pointer to the various mem_event functions. This will be used in a subsequent patch which creates different ring buffers for the memshare, xenpaging and memaccess functionality. Remove the struct domain argument from some functions. Signed-off-by: Olaf Hering <olaf@aepfle.de> Acked-by: Tim Deegan <tim@xen.org> Committed-by: Tim Deegan <tim@xen.org>
-rw-r--r--xen/arch/x86/hvm/hvm.c4
-rw-r--r--xen/arch/x86/mm/mem_event.c95
-rw-r--r--xen/arch/x86/mm/mem_sharing.c6
-rw-r--r--xen/arch/x86/mm/p2m.c18
-rw-r--r--xen/include/asm-x86/mem_event.h8
5 files changed, 66 insertions, 65 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6df24ea87b..7f1372fde7 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4025,7 +4025,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
if ( (p & HVMPME_onchangeonly) && (value == old) )
return 1;
- rc = mem_event_check_ring(d);
+ rc = mem_event_check_ring(d, &d->mem_event);
if ( rc )
return rc;
@@ -4048,7 +4048,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason,
req.gla_valid = 1;
}
- mem_event_put_request(d, &req);
+ mem_event_put_request(d, &d->mem_event, &req);
return 1;
}
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index d4a58d53c8..05d5e0658d 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -33,21 +33,21 @@
#define xen_rmb() rmb()
#define xen_wmb() wmb()
-#define mem_event_ring_lock_init(_d) spin_lock_init(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock)
-#define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock)
+#define mem_event_ring_lock_init(_med) spin_lock_init(&(_med)->ring_lock)
+#define mem_event_ring_lock(_med) spin_lock(&(_med)->ring_lock)
+#define mem_event_ring_unlock(_med) spin_unlock(&(_med)->ring_lock)
-static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
+static int mem_event_enable(struct domain *d, struct mem_event_domain *med, mfn_t ring_mfn, mfn_t shared_mfn)
{
int rc;
/* Map ring and shared pages */
- d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn));
- if ( d->mem_event.ring_page == NULL )
+ med->ring_page = map_domain_page(mfn_x(ring_mfn));
+ if ( med->ring_page == NULL )
goto err;
- d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn));
- if ( d->mem_event.shared_page == NULL )
+ med->shared_page = map_domain_page(mfn_x(shared_mfn));
+ if ( med->shared_page == NULL )
goto err_ring;
/* Allocate event channel */
@@ -56,15 +56,15 @@ static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
if ( rc < 0 )
goto err_shared;
- ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc;
- d->mem_event.xen_port = rc;
+ ((mem_event_shared_page_t *)med->shared_page)->port = rc;
+ med->xen_port = rc;
/* Prepare ring buffer */
- FRONT_RING_INIT(&d->mem_event.front_ring,
- (mem_event_sring_t *)d->mem_event.ring_page,
+ FRONT_RING_INIT(&med->front_ring,
+ (mem_event_sring_t *)med->ring_page,
PAGE_SIZE);
- mem_event_ring_lock_init(d);
+ mem_event_ring_lock_init(med);
/* Wake any VCPUs paused for memory events */
mem_event_unpause_vcpus(d);
@@ -72,34 +72,34 @@ static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
return 0;
err_shared:
- unmap_domain_page(d->mem_event.shared_page);
- d->mem_event.shared_page = NULL;
+ unmap_domain_page(med->shared_page);
+ med->shared_page = NULL;
err_ring:
- unmap_domain_page(d->mem_event.ring_page);
- d->mem_event.ring_page = NULL;
+ unmap_domain_page(med->ring_page);
+ med->ring_page = NULL;
err:
return 1;
}
-static int mem_event_disable(struct domain *d)
+static int mem_event_disable(struct mem_event_domain *med)
{
- unmap_domain_page(d->mem_event.ring_page);
- d->mem_event.ring_page = NULL;
+ unmap_domain_page(med->ring_page);
+ med->ring_page = NULL;
- unmap_domain_page(d->mem_event.shared_page);
- d->mem_event.shared_page = NULL;
+ unmap_domain_page(med->shared_page);
+ med->shared_page = NULL;
return 0;
}
-void mem_event_put_request(struct domain *d, mem_event_request_t *req)
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req)
{
mem_event_front_ring_t *front_ring;
RING_IDX req_prod;
- mem_event_ring_lock(d);
+ mem_event_ring_lock(med);
- front_ring = &d->mem_event.front_ring;
+ front_ring = &med->front_ring;
req_prod = front_ring->req_prod_pvt;
/* Copy request */
@@ -107,23 +107,23 @@ void mem_event_put_request(struct domain *d, mem_event_request_t *req)
req_prod++;
/* Update ring */
- d->mem_event.req_producers--;
+ med->req_producers--;
front_ring->req_prod_pvt = req_prod;
RING_PUSH_REQUESTS(front_ring);
- mem_event_ring_unlock(d);
+ mem_event_ring_unlock(med);
- notify_via_xen_event_channel(d, d->mem_event.xen_port);
+ notify_via_xen_event_channel(d, med->xen_port);
}
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp)
{
mem_event_front_ring_t *front_ring;
RING_IDX rsp_cons;
- mem_event_ring_lock(d);
+ mem_event_ring_lock(med);
- front_ring = &d->mem_event.front_ring;
+ front_ring = &med->front_ring;
rsp_cons = front_ring->rsp_cons;
/* Copy response */
@@ -134,7 +134,7 @@ void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
front_ring->rsp_cons = rsp_cons;
front_ring->sring->rsp_event = rsp_cons + 1;
- mem_event_ring_unlock(d);
+ mem_event_ring_unlock(med);
}
void mem_event_unpause_vcpus(struct domain *d)
@@ -152,35 +152,35 @@ void mem_event_mark_and_pause(struct vcpu *v)
vcpu_sleep_nosync(v);
}
-void mem_event_put_req_producers(struct domain *d)
+void mem_event_put_req_producers(struct mem_event_domain *med)
{
- mem_event_ring_lock(d);
- d->mem_event.req_producers--;
- mem_event_ring_unlock(d);
+ mem_event_ring_lock(med);
+ med->req_producers--;
+ mem_event_ring_unlock(med);
}
-int mem_event_check_ring(struct domain *d)
+int mem_event_check_ring(struct domain *d, struct mem_event_domain *med)
{
struct vcpu *curr = current;
int free_requests;
int ring_full = 1;
- if ( !d->mem_event.ring_page )
+ if ( !med->ring_page )
return -1;
- mem_event_ring_lock(d);
+ mem_event_ring_lock(med);
- free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
- if ( d->mem_event.req_producers < free_requests )
+ free_requests = RING_FREE_REQUESTS(&med->front_ring);
+ if ( med->req_producers < free_requests )
{
- d->mem_event.req_producers++;
+ med->req_producers++;
ring_full = 0;
}
- if ( (curr->domain->domain_id == d->domain_id) && ring_full )
+ if ( ring_full && (curr->domain == d) )
mem_event_mark_and_pause(curr);
- mem_event_ring_unlock(d);
+ mem_event_ring_unlock(med);
return ring_full;
}
@@ -230,6 +230,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
{
struct domain *dom_mem_event = current->domain;
struct vcpu *v = current;
+ struct mem_event_domain *med = &d->mem_event;
unsigned long ring_addr = mec->ring_addr;
unsigned long shared_addr = mec->shared_addr;
l1_pgentry_t l1e;
@@ -242,7 +243,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
* the cache is in an undefined state and so is the guest
*/
rc = -EBUSY;
- if ( d->mem_event.ring_page )
+ if ( med->ring_page )
break;
/* Currently only EPT is supported */
@@ -270,7 +271,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
break;
rc = -EINVAL;
- if ( mem_event_enable(d, ring_mfn, shared_mfn) != 0 )
+ if ( mem_event_enable(d, med, ring_mfn, shared_mfn) != 0 )
break;
rc = 0;
@@ -279,7 +280,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
case XEN_DOMCTL_MEM_EVENT_OP_DISABLE:
{
- rc = mem_event_disable(d);
+ rc = mem_event_disable(&d->mem_event);
}
break;
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 4b71a002c9..dd4825471e 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -281,12 +281,12 @@ static struct page_info* mem_sharing_alloc_page(struct domain *d,
vcpu_pause_nosync(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
- if(mem_event_check_ring(d)) return page;
+ if(mem_event_check_ring(d, &d->mem_event)) return page;
req.gfn = gfn;
req.p2mt = p2m_ram_shared;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &req);
+ mem_event_put_request(d, &d->mem_event, &req);
return page;
}
@@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct domain *d)
mem_event_response_t rsp;
/* Get request off the ring */
- mem_event_get_response(d, &rsp);
+ mem_event_get_response(&d->mem_event, &rsp);
/* Unpause domain/vcpu */
if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c6cb98b2e5..d8f622ffa3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -755,7 +755,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
mem_event_request_t req;
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d) == 0)
+ if ( mem_event_check_ring(d, &d->mem_event) == 0)
{
/* Send release notification to pager */
memset(&req, 0, sizeof(req));
@@ -763,7 +763,7 @@ void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn)
req.gfn = gfn;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &req);
+ mem_event_put_request(d, &d->mem_event, &req);
}
}
@@ -775,7 +775,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there's space on the ring for this request */
- if ( mem_event_check_ring(d) )
+ if ( mem_event_check_ring(d, &d->mem_event) )
return;
memset(&req, 0, sizeof(req));
@@ -803,7 +803,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
{
/* gfn is already on its way back and vcpu is not paused */
- mem_event_put_req_producers(d);
+ mem_event_put_req_producers(&d->mem_event);
return;
}
@@ -812,7 +812,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
req.p2mt = p2mt;
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &req);
+ mem_event_put_request(d, &d->mem_event, &req);
}
int p2m_mem_paging_prep(struct domain *d, unsigned long gfn)
@@ -842,7 +842,7 @@ void p2m_mem_paging_resume(struct domain *d)
mfn_t mfn;
/* Pull the response off the ring */
- mem_event_get_response(d, &rsp);
+ mem_event_get_response(&d->mem_event, &rsp);
/* Fix p2m entry if the page was not dropped */
if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) )
@@ -889,7 +889,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
p2m_unlock(p2m);
/* Otherwise, check if there is a memory event listener, and send the message along */
- res = mem_event_check_ring(d);
+ res = mem_event_check_ring(d, &d->mem_event);
if ( res < 0 )
{
/* No listener */
@@ -933,7 +933,7 @@ void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla
req.vcpu_id = v->vcpu_id;
- mem_event_put_request(d, &req);
+ mem_event_put_request(d, &d->mem_event, &req);
/* VCPU paused, mem event request sent */
}
@@ -943,7 +943,7 @@ void p2m_mem_access_resume(struct p2m_domain *p2m)
struct domain *d = p2m->domain;
mem_event_response_t rsp;
- mem_event_get_response(d, &rsp);
+ mem_event_get_response(&d->mem_event, &rsp);
/* Unpause domain */
if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
diff --git a/xen/include/asm-x86/mem_event.h b/xen/include/asm-x86/mem_event.h
index a1bd14921c..7c2c19350a 100644
--- a/xen/include/asm-x86/mem_event.h
+++ b/xen/include/asm-x86/mem_event.h
@@ -26,10 +26,10 @@
/* Pauses VCPU while marking pause flag for mem event */
void mem_event_mark_and_pause(struct vcpu *v);
-int mem_event_check_ring(struct domain *d);
-void mem_event_put_req_producers(struct domain *d);
-void mem_event_put_request(struct domain *d, mem_event_request_t *req);
-void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
+int mem_event_check_ring(struct domain *d, struct mem_event_domain *med);
+void mem_event_put_req_producers(struct mem_event_domain *med);
+void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req);
+void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp);
void mem_event_unpause_vcpus(struct domain *d);
int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,