aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/public/domctl.h
diff options
context:
space:
mode:
authorAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-02-10 16:07:07 +0000
committerAndres Lagar-Cavilla <andres@lagarcavilla.org>2012-02-10 16:07:07 +0000
commit59a36d66a5d50a66f8a629b334a0cbd7af360f80 (patch)
treeeb7a565b5e32cc6542dbf49205e26d9772dc46b9 /xen/include/public/domctl.h
parent2da4c17b3b76d190da3dda35aa24910ff69984e5 (diff)
downloadxen-59a36d66a5d50a66f8a629b334a0cbd7af360f80.tar.gz
xen-59a36d66a5d50a66f8a629b334a0cbd7af360f80.tar.bz2
xen-59a36d66a5d50a66f8a629b334a0cbd7af360f80.zip
Use memops for mem paging, sharing, and access, instead of domctls
Per page operations in the paging, sharing, and access tracking subsystems are all implemented with domctls (e.g. a domctl to evict one page, or to share one page). Under heavy load, the domctl path reveals a lack of scalability. The domctl lock serializes dom0's vcpus in the hypervisor. When performing thousands of per-page operations on dozens of domains, these vcpus will spin in the hypervisor. Beyond the aggressive locking, an added inefficiency of blocking vcpus in the domctl lock is that dom0 is prevented from re-scheduling any of its other work-starved processes. We retain the domctl interface for setting up and tearing down paging/sharing/mem access for a domain. But we migrate all the per page operations to use the memory_op hypercalls (e.g XENMEM_*). Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla> Signed-off-by: Adin Scannell <adin@scannell.ca> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Ian Jackson <ian.jackson@eu.citrix.com> Committed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/include/public/domctl.h')
-rw-r--r--xen/include/public/domctl.h90
1 files changed, 19 insertions, 71 deletions
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 16f9fd2390..546b22fc45 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -711,47 +711,43 @@ struct xen_domctl_gdbsx_domstatus {
/*
* Domain memory paging
- * Page memory in and out.
+ * Page memory in and out.
+ * Domctl interface to set up and tear down the
+ * pager<->hypervisor interface. Use XENMEM_paging_op*
+ * to perform per-page operations.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 2
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 3
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 4
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 5
/*
* Access permissions.
*
+ * As with paging, use the domctl for teardown/setup of the
+ * helper<->hypervisor interface.
+ *
* There are HVM hypercalls to set the per-page access permissions of every
* page in a domain. When one of these permissions--independent, read,
* write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. (See public/mem_event.h) The memory event
- * handler can then resume the VCPU and redo the access with an
- * ACCESS_RESUME mode for the following domctl.
+ * is sent with what happened. (See public/mem_event.h) .
+ *
+ * The memory event handler can then resume the VCPU and redo the access
+ * with a XENMEM_access_op_resume hypercall.
*/
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0
#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1
-#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME 2
+/* Use for teardown/setup of helper<->hypervisor interface for paging,
+ * access and sharing.*/
struct xen_domctl_mem_event_op {
uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
- union {
- /* OP_ENABLE IN: Virtual address of shared page */
- uint64_aligned_t shared_addr;
- /* PAGING_PREP IN: buffer to immediately fill page in */
- uint64_aligned_t buffer;
- } u;
+ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */
uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */
-
- /* Other OPs */
- uint64_aligned_t gfn; /* IN: gfn of page being operated on */
};
typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
@@ -759,63 +755,15 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
/*
* Memory sharing operations
*/
-/* XEN_DOMCTL_mem_sharing_op */
-
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3
-
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_CONTROL 0
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GFN 1
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_NOMINATE_GREF 2
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_SHARE 3
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_RESUME 4
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GFN 5
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_MFN 6
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DEBUG_GREF 7
-#define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ADD_PHYSMAP 8
-
-#define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10)
-#define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9)
-
-/* The following allows sharing of grant refs. This is useful
- * for sharing utilities sitting as "filters" in IO backends
- * (e.g. memshr + blktap(2)). The IO backend is only exposed
- * to grant references, and this allows sharing of the grefs */
-#define XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF_FLAG (1ULL << 62)
-
-#define XEN_DOMCTL_MEM_SHARING_FIELD_MAKE_GREF(field, val) \
- (field) = (XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF_FLAG | val)
-#define XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF(field) \
- ((field) & XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF_FLAG)
-#define XEN_DOMCTL_MEM_SHARING_FIELD_GET_GREF(field) \
- ((field) & (~XEN_DOMCTL_MEM_SHARING_FIELD_IS_GREF_FLAG))
+/* XEN_DOMCTL_mem_sharing_op.
+ * The CONTROL sub-domctl is used for bringup/teardown. */
+#define XEN_DOMCTL_MEM_SHARING_CONTROL 0
struct xen_domctl_mem_sharing_op {
- uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */
+ uint8_t op; /* XEN_DOMCTL_MEM_SHARING_* */
union {
- uint8_t enable; /* OP_CONTROL */
-
- struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
- union {
- uint64_aligned_t gfn; /* IN: gfn to nominate */
- uint32_t grant_ref; /* IN: grant ref to nominate */
- } u;
- uint64_aligned_t handle; /* OUT: the handle */
- } nominate;
- struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
- uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
- uint64_aligned_t source_handle; /* IN: handle to the source page */
- domid_t client_domain; /* IN: the client domain id */
- uint64_aligned_t client_gfn; /* IN: the client gfn */
- uint64_aligned_t client_handle; /* IN: handle to the client page */
- } share;
- struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
- union {
- uint64_aligned_t gfn; /* IN: gfn to debug */
- uint64_aligned_t mfn; /* IN: mfn to debug */
- grant_ref_t gref; /* IN: gref to debug */
- } u;
- } debug;
+ uint8_t enable; /* CONTROL */
} u;
};
typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;