diff options
author | Dario Faggioli <dario.faggioli@citrix.com> | 2013-04-17 10:57:32 +0000 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2013-04-17 12:11:14 +0100 |
commit | b5b79a12c41b5e76af9d47551027b56f210d9029 (patch) | |
tree | 05125acfb1348b09f63830577687a55f73d0b324 /xen/common/sched_credit.c | |
parent | cfcc144ff1ce59a9f93a44bbc89d1e20f5011c3d (diff) | |
download | xen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.gz xen-b5b79a12c41b5e76af9d47551027b56f210d9029.tar.bz2 xen-b5b79a12c41b5e76af9d47551027b56f210d9029.zip |
xen: allow for explicitly specifying node-affinity
Make it possible to pass the node-affinity of a domain to the hypervisor
from the upper layers, instead of always being computed automatically.
Note that this also required generalizing the Flask hooks for setting
and getting the affinity, so that they now deal with both vcpu and
node affinity.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r-- | xen/common/sched_credit.c | 48 |
1 files changed, 44 insertions, 4 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index be0b4b0c6c..318831054d 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -261,17 +261,50 @@ __runq_remove(struct csched_vcpu *svc) list_del_init(&svc->runq_elem); } +/* + * Translates node-affinity mask into a cpumask, so that we can use it during + * actual scheduling. That of course will contain all the cpus from all the + * set nodes in the original node-affinity mask. + * + * Note that any serialization needed to access mask safely is complete + * responsibility of the caller of this function/hook. + */ +static void csched_set_node_affinity( + const struct scheduler *ops, + struct domain *d, + nodemask_t *mask) +{ + struct csched_dom *sdom; + int node; + + /* Skip idle domain since it doesn't even have a node_affinity_cpumask */ + if ( unlikely(is_idle_domain(d)) ) + return; + + sdom = CSCHED_DOM(d); + cpumask_clear(sdom->node_affinity_cpumask); + for_each_node_mask( node, *mask ) + cpumask_or(sdom->node_affinity_cpumask, sdom->node_affinity_cpumask, + &node_to_cpumask(node)); +} + #define for_each_csched_balance_step(step) \ for ( (step) = 0; (step) <= CSCHED_BALANCE_CPU_AFFINITY; (step)++ ) /* * vcpu-affinity balancing is always necessary and must never be skipped. - * OTOH, if a domain's node-affinity spans all the nodes, we can safely - * avoid dealing with node-affinity entirely. + * OTOH, if a domain's node-affinity is said to be automatically computed + * (or if it just spans all the nodes), we can safely avoid dealing with + * node-affinity entirely. Ah, node-affinity is also deemed meaningless + * in case it has empty intersection with the vcpu's vcpu-affinity, as it + * would mean trying to schedule it on _no_ pcpu! */ -#define __vcpu_has_node_affinity(vc) \ - ( !cpumask_full(CSCHED_DOM(vc->domain)->node_affinity_cpumask) ) +#define __vcpu_has_node_affinity(vc) \ + ( !(cpumask_full(CSCHED_DOM(vc->domain)->node_affinity_cpumask) \ + || !cpumask_intersects(vc->cpu_affinity, \ + CSCHED_DOM(vc->domain)->node_affinity_cpumask) \ + || vc->domain->auto_node_affinity == 1) ) /* * Each csched-balance step uses its own cpumask. This function determines @@ -284,8 +317,13 @@ static void csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) { if ( step == CSCHED_BALANCE_NODE_AFFINITY ) + { cpumask_and(mask, CSCHED_DOM(vc->domain)->node_affinity_cpumask, vc->cpu_affinity); + + if ( unlikely(cpumask_empty(mask)) ) + cpumask_copy(mask, vc->cpu_affinity); + } else /* step == CSCHED_BALANCE_CPU_AFFINITY */ cpumask_copy(mask, vc->cpu_affinity); } @@ -1898,6 +1936,8 @@ const struct scheduler sched_credit_def = { .adjust = csched_dom_cntl, .adjust_global = csched_sys_cntl, + .set_node_affinity = csched_set_node_affinity, + .pick_cpu = csched_cpu_pick, .do_schedule = csched_schedule, |