aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
committerJan Beulich <jbeulich@novell.com>2011-04-05 13:00:54 +0100
commit4551775df58d42e2dcfd2a8ac4bcc713709e8b81 (patch)
tree5e10216631c44c2ec3102f85ca58646d766f46e5 /xen/include
parent2bbb3d0fe9149b302b42c98cffe3fbf1aa286548 (diff)
downloadxen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.gz
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.tar.bz2
xen-4551775df58d42e2dcfd2a8ac4bcc713709e8b81.zip
Remove direct cpumask_t members from struct vcpu and struct domain
The CPU masks embedded in these structures prevent NR_CPUS-independent sizing of these structures. Basic concept (in xen/include/cpumask.h) taken from recent Linux. For scalability purposes, many other uses of cpumask_t should be replaced by cpumask_var_t, particularly local variables of functions. This implies that no functions should have by-value cpumask_t parameters, and that the whole old cpumask interface (cpus_...()) should go away in favor of the new (cpumask_...()) one. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/include')
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h2
-rw-r--r--xen/include/xen/cpumask.h81
-rw-r--r--xen/include/xen/sched.h10
3 files changed, 77 insertions, 16 deletions
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index be9ca8ffd4..eb3a149dbb 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -68,7 +68,7 @@ struct vmx_domain {
};
u64 eptp;
} ept_control;
- cpumask_t ept_synced;
+ cpumask_var_t ept_synced;
};
#define ept_get_wl(d) \
diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
index f5ff64ad3a..031c83752b 100644
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -81,24 +81,26 @@
typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
-static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
+#define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst))
+static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
-#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
-static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+#define cpu_clear(cpu, dst) cpumask_clear_cpu(cpu, &(dst))
+static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
+#define cpumask_setall(dst) __cpus_setall(dst, NR_CPUS)
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
+#define cpumask_clear(dst) __cpus_clear(dst, NR_CPUS)
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
@@ -109,18 +111,21 @@ static inline void __cpus_clear(cpumask_t *dstp, int nbits)
#define cpumask_test_cpu(cpu, cpumask) test_bit(cpu, (cpumask)->bits)
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
-static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
+#define cpu_test_and_set(cpu, cpumask) \
+ cpumask_test_and_set_cpu(cpu, &(cpumask))
+static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
-#define cpu_test_and_clear(cpu, cpumask) __cpu_test_and_clear((cpu), &(cpumask))
-static inline int __cpu_test_and_clear(int cpu, cpumask_t *addr)
+#define cpu_test_and_clear(cpu, cpumask) \
+ cpumask_test_and_clear_cpu(cpu, &(cpumask))
+static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
{
return test_and_clear_bit(cpu, addr->bits);
}
+#define cpumask_and(dst, src1, src2) __cpus_and(dst, src1, src2, NR_CPUS)
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
@@ -128,6 +133,7 @@ static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define cpumask_or(dst, src1, src2) __cpus_or(dst, src1, src2, NR_CPUS)
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
@@ -135,6 +141,7 @@ static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define cpumask_xor(dst, src1, src2) __cpus_xor(dst, src1, src2, NR_CPUS)
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
@@ -142,6 +149,7 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define cpumask_andnot(dst, src1, src2) __cpus_andnot(dst, src1, src2, NR_CPUS)
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
@@ -150,6 +158,7 @@ static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define cpumask_complement(dst, src) __cpus_complement(dst, src, NR_CPUS)
#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
static inline void __cpus_complement(cpumask_t *dstp,
const cpumask_t *srcp, int nbits)
@@ -186,6 +195,7 @@ static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
return bitmap_empty(srcp->bits, nbits);
}
+#define cpumask_full(cpumask) __cpus_full(cpumask, NR_CPUS)
#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
static inline int __cpus_full(const cpumask_t *srcp, int nbits)
{
@@ -199,8 +209,8 @@ static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
return bitmap_weight(srcp->bits, nbits);
}
-#define cpus_copy(dest, src) __cpus_copy(&(dest), &(src))
-static inline void __cpus_copy(cpumask_t *dstp, const cpumask_t *srcp)
+#define cpus_copy(dest, src) cpumask_copy(&(dest), &(src))
+static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
{
bitmap_copy(dstp->bits, srcp->bits, NR_CPUS);
}
@@ -322,6 +332,57 @@ static inline int __cpulist_scnprintf(char *buf, int len,
return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
}
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * ie.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ */
+#if NR_CPUS > 2 * BITS_PER_LONG
+#include <xen/xmalloc.h>
+
+typedef cpumask_t *cpumask_var_t;
+
+static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
+{
+ return (*mask = xmalloc(cpumask_t)) != NULL;
+}
+
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+ xfree(mask);
+}
+#else
+typedef cpumask_t cpumask_var_t[1];
+
+static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
+{
+ return 1;
+}
+
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+}
+#endif
+
+static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
+{
+ if (!alloc_cpumask_var(mask))
+ return 0;
+ cpumask_clear(*mask);
+ return 1;
+}
+
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index c86759596f..17fac57dfd 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -161,12 +161,12 @@ struct vcpu
spinlock_t virq_lock;
/* Bitmask of CPUs on which this VCPU may run. */
- cpumask_t cpu_affinity;
+ cpumask_var_t cpu_affinity;
/* Used to change affinity temporarily. */
- cpumask_t cpu_affinity_tmp;
+ cpumask_var_t cpu_affinity_tmp;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
- cpumask_t vcpu_dirty_cpumask;
+ cpumask_var_t vcpu_dirty_cpumask;
/* Tasklet for continue_hypercall_on_cpu(). */
struct tasklet continue_hypercall_tasklet;
@@ -289,7 +289,7 @@ struct domain
struct vcpu **vcpu;
/* Bitmask of CPUs which are holding onto this domain's state. */
- cpumask_t domain_dirty_cpumask;
+ cpumask_var_t domain_dirty_cpumask;
struct arch_domain arch;
@@ -641,7 +641,7 @@ void watchdog_domain_destroy(struct domain *d);
#define is_hvm_domain(d) ((d)->is_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
- cpus_weight((v)->cpu_affinity) == 1)
+ cpumask_weight((v)->cpu_affinity) == 1)
#define need_iommu(d) ((d)->need_iommu)
void set_vcpu_migration_delay(unsigned int delay);