aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include
diff options
context:
space:
mode:
authorDario Faggioli <dario.faggioli@citrix.com>2013-04-17 10:57:32 +0000
committerIan Campbell <ian.campbell@citrix.com>2013-04-17 12:11:14 +0100
commitb5b79a12c41b5e76af9d47551027b56f210d9029 (patch)
tree05125acfb1348b09f63830577687a55f73d0b324 /xen/include
parentcfcc144ff1ce59a9f93a44bbc89d1e20f5011c3d (diff)
downloadxen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.gz
xen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.bz2
xen-b5b79a12c41b5e76af9d47551027b56f210d9029.zip
xen: allow for explicitly specifying node-affinity
Make it possible to pass the node-affinity of a domain to the hypervisor from the upper layers, instead of always being computed automatically. Note that this also required generalizing the Flask hooks for setting and getting the affinity, so that they now deal with both vcpu and node affinity. Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com> Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Acked-by: George Dunlap <george.dunlap@eu.citrix.com> Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/include')
-rw-r--r--xen/include/public/domctl.h13
-rw-r--r--xen/include/xen/nodemask.h14
-rw-r--r--xen/include/xen/sched-if.h2
-rw-r--r--xen/include/xen/sched.h9
4 files changed, 35 insertions, 3 deletions
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 9f5ed48a0e..4c5b2bbbbd 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -280,6 +280,16 @@ typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
+/* Get/set the NUMA node(s) with which the guest has affinity with. */
+/* XEN_DOMCTL_setnodeaffinity */
+/* XEN_DOMCTL_getnodeaffinity */
+struct xen_domctl_nodeaffinity {
+ struct xenctl_bitmap nodemap;/* IN */
+};
+typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
+
+
/* Get/set which physical cpus a vcpu can execute on. */
/* XEN_DOMCTL_setvcpuaffinity */
/* XEN_DOMCTL_getvcpuaffinity */
@@ -908,6 +918,8 @@ struct xen_domctl {
#define XEN_DOMCTL_audit_p2m 65
#define XEN_DOMCTL_set_virq_handler 66
#define XEN_DOMCTL_set_broken_page_p2m 67
+#define XEN_DOMCTL_setnodeaffinity 68
+#define XEN_DOMCTL_getnodeaffinity 69
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -921,6 +933,7 @@ struct xen_domctl {
struct xen_domctl_getpageframeinfo getpageframeinfo;
struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
+ struct xen_domctl_nodeaffinity nodeaffinity;
struct xen_domctl_vcpuaffinity vcpuaffinity;
struct xen_domctl_shadow_op shadow_op;
struct xen_domctl_max_mem max_mem;
diff --git a/xen/include/xen/nodemask.h b/xen/include/xen/nodemask.h
index 1b860165ce..2a90dc1aef 100644
--- a/xen/include/xen/nodemask.h
+++ b/xen/include/xen/nodemask.h
@@ -8,8 +8,9 @@
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these nodemasks are based.
*
- * For details of nodemask_scnprintf() and nodemask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of nodemask_scnprintf(), nodelist_scnpintf() and
+ * nodemask_parse(), see bitmap_scnprintf() and bitmap_parse()
+ * in lib/bitmap.c.
*
* The available nodemask operations are:
*
@@ -50,6 +51,7 @@
* unsigned long *nodes_addr(mask) Array of unsigned long's in mask
*
* int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
+ * int nodelist_scnprintf(buf, len, mask) Format nodemask as a list for printing
* int nodemask_parse(ubuf, ulen, mask) Parse ascii string as nodemask
*
* for_each_node_mask(node, mask) for-loop node over mask
@@ -292,6 +294,14 @@ static inline int __cycle_node(int n, const nodemask_t *maskp, int nbits)
#define nodes_addr(src) ((src).bits)
+#define nodelist_scnprintf(buf, len, src) \
+ __nodelist_scnprintf((buf), (len), (src), MAX_NUMNODES)
+static inline int __nodelist_scnprintf(char *buf, int len,
+ const nodemask_t *srcp, int nbits)
+{
+ return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+}
+
#if 0
#define nodemask_scnprintf(buf, len, src) \
__nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES)
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22cb66..2023ea393b 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -184,6 +184,8 @@ struct scheduler {
struct xen_domctl_scheduler_op *);
int (*adjust_global) (const struct scheduler *,
struct xen_sysctl_scheduler_op *);
+ void (*set_node_affinity) (const struct scheduler *,
+ struct domain *, nodemask_t *);
void (*dump_settings) (const struct scheduler *);
void (*dump_cpu_state) (const struct scheduler *, int);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index d15d567c9b..ad971d22fe 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -288,6 +288,8 @@ struct domain
/* Does this guest need iommu mappings? */
bool_t need_iommu;
#endif
+ /* is node-affinity automatically computed? */
+ bool_t auto_node_affinity;
/* Is this guest fully privileged (aka dom0)? */
bool_t is_privileged;
/* Which guest this guest has privileges on */
@@ -365,7 +367,10 @@ struct domain
/* Various mem_events */
struct mem_event_per_domain *mem_event;
- /* Currently computed from union of all vcpu cpu-affinity masks. */
+ /*
+ * Can be specified by the user. If that is not the case, it is
+ * computed from the union of all the vcpu cpu-affinity masks.
+ */
nodemask_t node_affinity;
unsigned int last_alloc_node;
spinlock_t node_affinity_lock;
@@ -435,6 +440,7 @@ static inline void get_knownalive_domain(struct domain *d)
ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
}
+int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity);
void domain_update_node_affinity(struct domain *d);
struct domain *domain_create(
@@ -555,6 +561,7 @@ void sched_destroy_domain(struct domain *d);
int sched_move_domain(struct domain *d, struct cpupool *c);
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
long sched_adjust_global(struct xen_sysctl_scheduler_op *);
+void sched_set_node_affinity(struct domain *, nodemask_t *);
int sched_id(void);
void sched_tick_suspend(void);
void sched_tick_resume(void);