aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/domain_build.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2012-09-11 12:26:25 +0200
committerDavid Vrabel <david.vrabel@citrix.com>2012-09-11 12:26:25 +0200
commite626a8c7f06c85584f4285c51bea1a7ed76a25f7 (patch)
tree7be7b808b41f877b3749085c5958bba2d62bd517 /xen/arch/x86/domain_build.c
parentc1fc9e2ecb9111c305e459ddba0ff831ff032ad1 (diff)
downloadxen-e626a8c7f06c85584f4285c51bea1a7ed76a25f7.tar.gz
xen-e626a8c7f06c85584f4285c51bea1a7ed76a25f7.tar.bz2
xen-e626a8c7f06c85584f4285c51bea1a7ed76a25f7.zip
x86: make the dom0_max_vcpus option more flexible
The dom0_max_vcpus command line option only allows the exact number of VCPUs for dom0 to be set. It is not possible to say "up to N VCPUs but no more than the number physically present." Allow a range for the option to set a minimum number of VCPUs, and a maximum which does not exceed the number of PCPUs. For example, with "dom0_max_vcpus=4-8": PCPUs Dom0 VCPUs 2 4 4 4 6 6 8 8 10 8 Existing command lines with "dom0_max_vcpus=N" still work as before (and are equivalent to dom0_max_vcpus=N-N). Signed-off-by: David Vrabel <david.vrabel@citrix.com> Committed-by: Jan Beulich <jbeulich@suse.com>
Diffstat (limited to 'xen/arch/x86/domain_build.c')
-rw-r--r--xen/arch/x86/domain_build.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index d4c7bd6750..129099c456 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -82,20 +82,40 @@ static void __init parse_dom0_mem(const char *s)
}
custom_param("dom0_mem", parse_dom0_mem);
-static unsigned int __initdata opt_dom0_max_vcpus;
-integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
+static unsigned int __initdata opt_dom0_max_vcpus_min = 1;
+static unsigned int __initdata opt_dom0_max_vcpus_max = UINT_MAX;
+
+static void __init parse_dom0_max_vcpus(const char *s)
+{
+ if (*s == '-') /* -M */
+ opt_dom0_max_vcpus_max = simple_strtoul(s + 1, &s, 0);
+ else /* N, N-, or N-M */
+ {
+ opt_dom0_max_vcpus_min = simple_strtoul(s, &s, 0);
+ if (*s++ == '\0') /* N */
+ opt_dom0_max_vcpus_max = opt_dom0_max_vcpus_min;
+ else if (*s != '\0') /* N-M */
+ opt_dom0_max_vcpus_max = simple_strtoul(s, &s, 0);
+ }
+}
+custom_param("dom0_max_vcpus", parse_dom0_max_vcpus);
struct vcpu *__init alloc_dom0_vcpu0(void)
{
- if ( opt_dom0_max_vcpus == 0 )
- opt_dom0_max_vcpus = num_cpupool_cpus(cpupool0);
- if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
- opt_dom0_max_vcpus = MAX_VIRT_CPUS;
+ unsigned max_vcpus;
+
+ max_vcpus = num_cpupool_cpus(cpupool0);
+ if ( opt_dom0_max_vcpus_min > max_vcpus )
+ max_vcpus = opt_dom0_max_vcpus_min;
+ if ( opt_dom0_max_vcpus_max < max_vcpus )
+ max_vcpus = opt_dom0_max_vcpus_max;
+ if ( max_vcpus > MAX_VIRT_CPUS )
+ max_vcpus = MAX_VIRT_CPUS;
- dom0->vcpu = xzalloc_array(struct vcpu *, opt_dom0_max_vcpus);
+ dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus);
if ( !dom0->vcpu )
return NULL;
- dom0->max_vcpus = opt_dom0_max_vcpus;
+ dom0->max_vcpus = max_vcpus;
return alloc_vcpu(dom0, 0, 0);
}
@@ -185,11 +205,11 @@ static unsigned long __init compute_dom0_nr_pages(
unsigned long max_pages = dom0_max_nrpages;
/* Reserve memory for further dom0 vcpu-struct allocations... */
- avail -= (opt_dom0_max_vcpus - 1UL)
+ avail -= (d->max_vcpus - 1UL)
<< get_order_from_bytes(sizeof(struct vcpu));
/* ...and compat_l4's, if needed. */
if ( is_pv_32on64_domain(d) )
- avail -= opt_dom0_max_vcpus - 1;
+ avail -= d->max_vcpus - 1;
/* Reserve memory for iommu_dom0_init() (rough estimate). */
if ( iommu_enabled )
@@ -889,10 +909,10 @@ int __init construct_dom0(
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
- printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
+ printk("Dom0 has maximum %u VCPUs\n", d->max_vcpus);
cpu = cpumask_first(cpupool0->cpu_valid);
- for ( i = 1; i < opt_dom0_max_vcpus; i++ )
+ for ( i = 1; i < d->max_vcpus; i++ )
{
cpu = cpumask_cycle(cpu, cpupool0->cpu_valid);
(void)alloc_vcpu(d, i, cpu);