diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-03-17 09:17:27 +0000 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-03-17 09:17:27 +0000 |
commit | 5a4d4554c3014ceaab8f3b21f3357ce7200956e3 (patch) | |
tree | d2954276e80ac9af1fff0ea6ff1a976c6759814a /tools/libxc/xc_domain.c | |
parent | c4b4b6ac3baec64680ab3cb761d6691293ae0b1c (diff) | |
download | xen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.tar.gz xen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.tar.bz2 xen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.zip |
libxc: Support set affinity for more than 64 CPUs.
There are more than 64 cpus on new intel platform especially on NUMA
system, so that we need break the pcpu limit (that is 64) when set
affinity of a VCPU.
Signed-off-by: James (song wei) <jsong@novell.com>
Diffstat (limited to 'tools/libxc/xc_domain.c')
-rw-r--r-- | tools/libxc/xc_domain.c | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index bba147bad3..0329fdbf68 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -98,23 +98,28 @@ int xc_domain_shutdown(int xc_handle, int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t cpumap) + uint64_t *cpumap, int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t *local = malloc(cpusize); + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; - bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); + bitmap_64_to_byte(local, cpumap, cpusize * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; - if ( lock_pages(local, sizeof(local)) != 0 ) + if ( lock_pages(local, cpusize) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -122,9 +127,10 @@ int xc_vcpu_setaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); - unlock_pages(local, sizeof(local)); + unlock_pages(local, cpusize); out: + free(local); return ret; } @@ -132,18 +138,26 @@ int xc_vcpu_setaffinity(int xc_handle, int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t *cpumap) + uint64_t *cpumap, + int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t * local = malloc(cpusize); + + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_getvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; + set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { @@ -154,8 +168,9 @@ int xc_vcpu_getaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof (local)); - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); - out: + bitmap_byte_to_64(cpumap, local, cpusize * 8); +out: + free(local); return ret; } |