diff options
author | Dario Faggioli <dario.faggioli@citrix.com> | 2013-04-17 10:57:32 +0000 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2013-04-17 12:11:14 +0100 |
commit | b5b79a12c41b5e76af9d47551027b56f210d9029 (patch) | |
tree | 05125acfb1348b09f63830577687a55f73d0b324 /xen/include/xen/sched.h | |
parent | cfcc144ff1ce59a9f93a44bbc89d1e20f5011c3d (diff) | |
download | xen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.gz xen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.bz2 xen-b5b79a12c41b5e76af9d47551027b56f210d9029.zip |
xen: allow for explicitly specifying node-affinity
Make it possible to pass the node-affinity of a domain to the hypervisor
from the upper layers, instead of always being computed automatically.
Note that this also required generalizing the Flask hooks for setting
and getting the affinity, so that they now deal with both vcpu and
node affinity.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/include/xen/sched.h')
-rw-r--r-- | xen/include/xen/sched.h | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index d15d567c9b..ad971d22fe 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -288,6 +288,8 @@ struct domain /* Does this guest need iommu mappings? */ bool_t need_iommu; #endif + /* is node-affinity automatically computed? */ + bool_t auto_node_affinity; /* Is this guest fully privileged (aka dom0)? */ bool_t is_privileged; /* Which guest this guest has privileges on */ @@ -365,7 +367,10 @@ struct domain /* Various mem_events */ struct mem_event_per_domain *mem_event; - /* Currently computed from union of all vcpu cpu-affinity masks. */ + /* + * Can be specified by the user. If that is not the case, it is + * computed from the union of all the vcpu cpu-affinity masks. + */ nodemask_t node_affinity; unsigned int last_alloc_node; spinlock_t node_affinity_lock; @@ -435,6 +440,7 @@ static inline void get_knownalive_domain(struct domain *d) ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED)); } +int domain_set_node_affinity(struct domain *d, const nodemask_t *affinity); void domain_update_node_affinity(struct domain *d); struct domain *domain_create( @@ -555,6 +561,7 @@ void sched_destroy_domain(struct domain *d); int sched_move_domain(struct domain *d, struct cpupool *c); long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *); long sched_adjust_global(struct xen_sysctl_scheduler_op *); +void sched_set_node_affinity(struct domain *, nodemask_t *); int sched_id(void); void sched_tick_suspend(void); void sched_tick_resume(void); |