diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2009-11-10 13:03:42 +0000 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2009-11-10 13:03:42 +0000 |
commit | fee63693e0ea23d24f239a05c0607021de02dd7f (patch) | |
tree | a51815bff41fce05290d8fb31c613e561ec4cce2 /xen/arch/x86/platform_hypercall.c | |
parent | 66cc31ec352b38777396bd8689139e932c396e94 (diff) | |
download | xen-fee63693e0ea23d24f239a05c0607021de02dd7f.tar.gz xen-fee63693e0ea23d24f239a05c0607021de02dd7f.tar.bz2 xen-fee63693e0ea23d24f239a05c0607021de02dd7f.zip |
Hypercall to expose physical CPU information.
It also make some changes to current cpu online/offline logic:
1) Firstly, cpu online/offline will trigger a vIRQ to dom0 for status
changes notification.
2) It also add an interface to platform operation to online/offline
physical CPU. Currently the cpu online/offline interface is in sysctl,
which can't be triggered in kernel. With this change, it is possible
to trigger cpu online/offline in dom0 through sysfs interface.
Signed-off-by: Jiang, Yunhong <yunhong.jiang@intel.com>
Diffstat (limited to 'xen/arch/x86/platform_hypercall.c')
-rw-r--r-- | xen/arch/x86/platform_hypercall.c | 98 |
1 files changed, 97 insertions, 1 deletions
diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c index 1dc0017a23..5e4adb0b37 100644 --- a/xen/arch/x86/platform_hypercall.c +++ b/xen/arch/x86/platform_hypercall.c @@ -53,6 +53,12 @@ static long cpu_frequency_change_helper(void *data) return cpu_frequency_change(this_cpu(freq)); } +static long cpu_down_helper(void *data) +{ + int cpu = (unsigned long)data; + return cpu_down(cpu); +} + ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) { ret_t ret = 0; @@ -385,7 +391,97 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op) break; } break; - + + case XENPF_get_cpuinfo: + { + int i; + struct xenpf_pcpu_info *g_info; + struct xen_physical_cpuinfo pcpu; + XEN_GUEST_HANDLE(xen_physical_cpuinfo_t) g_cpus; + + g_info = &op->u.pcpu_info; + if (g_info->info_num <= 0 ) + { + op->u.pcpu_info.max_present = last_cpu(cpu_present_map); + op->u.pcpu_info.max_possible = last_cpu(cpu_possible_map); + goto done; + } + + guest_from_compat_handle(g_cpus, g_info->info); + + spin_lock(&cpu_add_remove_lock); + + ret = -EFAULT; + for (i = 0; i < g_info->info_num; i++) + { + if (copy_from_guest_offset(&pcpu, g_cpus, i, 1) ) + goto out; + + if ( (pcpu.xen_cpuid >= NR_CPUS) || + (pcpu.xen_cpuid < 0) || + !cpu_present(pcpu.xen_cpuid) ) + { + pcpu.flags |= XEN_PCPU_FLAGS_INVALID; + } + else + { + pcpu.apic_id = x86_cpu_to_apicid[pcpu.xen_cpuid]; + pcpu.acpi_id = acpi_get_processor_id(pcpu.xen_cpuid); + ASSERT(pcpu.apic_id != BAD_APICID); + if (cpu_online(pcpu.xen_cpuid)) + pcpu.flags |= XEN_PCPU_FLAGS_ONLINE; + } + + if ( copy_to_guest_offset(g_cpus, i, &pcpu, 1) ) + goto out; + } + op->u.pcpu_info.max_present = last_cpu(cpu_present_map); + op->u.pcpu_info.max_possible = last_cpu(cpu_possible_map); + spin_unlock(&cpu_add_remove_lock); +done: + ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; + } + break; + + case XENPF_resource_hotplug: + { + int cpu; + + switch ( op->u.resource.sub_cmd) + { + case XEN_CPU_online: + cpu = op->u.resource.u.cpu_ol.cpuid; + if (!cpu_present(cpu)) + { + ret = -EINVAL; + break; + } + else if (cpu_online(cpu)) + { + ret = 0; + break; + } + + ret = cpu_up(cpu); + break; + case XEN_CPU_offline: + cpu = op->u.resource.u.cpu_ol.cpuid; + if (!cpu_present(cpu)) + { + ret = -EINVAL; + break; + } else if (!cpu_online(cpu)) + { + ret = 0; + break; + } + ret = continue_hypercall_on_cpu( + 0, cpu_down_helper, (void *)(unsigned long)cpu); + break; + } + } + break; + default: ret = -ENOSYS; break; |