aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2010-03-17 09:17:27 +0000
committerKeir Fraser <keir.fraser@citrix.com>2010-03-17 09:17:27 +0000
commit5a4d4554c3014ceaab8f3b21f3357ce7200956e3 (patch)
treed2954276e80ac9af1fff0ea6ff1a976c6759814a
parentc4b4b6ac3baec64680ab3cb761d6691293ae0b1c (diff)
downloadxen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.tar.gz
xen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.tar.bz2
xen-5a4d4554c3014ceaab8f3b21f3357ce7200956e3.zip
libxc: Support set affinity for more than 64 CPUs.
There are more than 64 cpus on new intel platform especially on NUMA system, so that we need break the pcpu limit (that is 64) when set affinity of a VCPU. Signed-off-by: James (song wei) <jsong@novell.com>
-rw-r--r--tools/libxc/xc_domain.c37
-rw-r--r--tools/libxc/xenctrl.h6
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c69
3 files changed, 83 insertions, 29 deletions
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index bba147bad3..0329fdbf68 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -98,23 +98,28 @@ int xc_domain_shutdown(int xc_handle,
int xc_vcpu_setaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t cpumap)
+ uint64_t *cpumap, int cpusize)
{
DECLARE_DOMCTL;
int ret = -1;
- uint8_t local[sizeof (cpumap)];
+ uint8_t *local = malloc(cpusize);
+ if(local == NULL)
+ {
+ PERROR("Could not alloc memory for Xen hypercall");
+ goto out;
+ }
domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8);
+ bitmap_64_to_byte(local, cpumap, cpusize * 8);
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
- if ( lock_pages(local, sizeof(local)) != 0 )
+ if ( lock_pages(local, cpusize) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
goto out;
@@ -122,9 +127,10 @@ int xc_vcpu_setaffinity(int xc_handle,
ret = do_domctl(xc_handle, &domctl);
- unlock_pages(local, sizeof(local));
+ unlock_pages(local, cpusize);
out:
+ free(local);
return ret;
}
@@ -132,18 +138,26 @@ int xc_vcpu_setaffinity(int xc_handle,
int xc_vcpu_getaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap)
+ uint64_t *cpumap,
+ int cpusize)
{
DECLARE_DOMCTL;
int ret = -1;
- uint8_t local[sizeof (cpumap)];
+ uint8_t * local = malloc(cpusize);
+
+ if(local == NULL)
+ {
+ PERROR("Could not alloc memory for Xen hypercall");
+ goto out;
+ }
domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
+
set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8;
+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
if ( lock_pages(local, sizeof(local)) != 0 )
{
@@ -154,8 +168,9 @@ int xc_vcpu_getaffinity(int xc_handle,
ret = do_domctl(xc_handle, &domctl);
unlock_pages(local, sizeof (local));
- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
- out:
+ bitmap_byte_to_64(cpumap, local, cpusize * 8);
+out:
+ free(local);
return ret;
}
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 77566e27ff..1cb3ca1a01 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -309,11 +309,13 @@ int xc_domain_shutdown(int xc_handle,
int xc_vcpu_setaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t cpumap);
+ uint64_t *cpumap,
+ int cpusize);
int xc_vcpu_getaffinity(int xc_handle,
uint32_t domid,
int vcpu,
- uint64_t *cpumap);
+ uint64_t *cpumap,
+ int cpusize);
/**
* This function will return information about one or more domains. It is
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 09bb260800..9b1d4e253a 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -215,35 +215,54 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
{
uint32_t dom;
int vcpu = 0, i;
- uint64_t cpumap = ~0ULL;
+ uint64_t *cpumap;
PyObject *cpulist = NULL;
+ int nr_cpus, size;
+ xc_physinfo_t info;
+ xc_cpu_to_node_t map[1];
+ uint64_t cpumap_size = sizeof(cpumap);
static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
+
if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list,
&dom, &vcpu, &cpulist) )
return NULL;
+ set_xen_guest_handle(info.cpu_to_node, map);
+ info.max_cpu_id = 1;
+ if ( xc_physinfo(self->xc_handle, &info) != 0 )
+ return pyxc_error_to_exception();
+
+ nr_cpus = info.nr_cpus;
+
+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
+ cpumap = malloc(cpumap_size * size);
+ if(cpumap == NULL)
+ return pyxc_error_to_exception();
+
+
if ( (cpulist != NULL) && PyList_Check(cpulist) )
{
- cpumap = 0ULL;
+ for ( i = 0; i < size; i++)
+ {
+ cpumap[i] = 0ULL;
+ }
for ( i = 0; i < PyList_Size(cpulist); i++ )
{
long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i));
- if ( cpu >= 64 )
- {
- errno = EINVAL;
- PyErr_SetFromErrno(xc_error_obj);
- return NULL;
- }
- cpumap |= (uint64_t)1 << cpu;
+ *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8));
}
}
- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 )
+ {
+ free(cpumap);
return pyxc_error_to_exception();
-
+ }
+
Py_INCREF(zero);
+ free(cpumap);
return zero;
}
@@ -362,7 +381,11 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
uint32_t dom, vcpu = 0;
xc_vcpuinfo_t info;
int rc, i;
- uint64_t cpumap;
+ uint64_t *cpumap;
+ int nr_cpus, size;
+ xc_physinfo_t pinfo = { 0 };
+ xc_cpu_to_node_t map[1];
+ uint64_t cpumap_size = sizeof(cpumap);
static char *kwd_list[] = { "domid", "vcpu", NULL };
@@ -370,12 +393,25 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
&dom, &vcpu) )
return NULL;
+ set_xen_guest_handle(pinfo.cpu_to_node, map);
+ pinfo.max_cpu_id = 1;
+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
+ return pyxc_error_to_exception();
+ nr_cpus = pinfo.nr_cpus;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
return pyxc_error_to_exception();
- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap);
+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
+
+ if((cpumap = malloc(cpumap_size * size)) == NULL)
+ return pyxc_error_to_exception();
+
+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size);
if ( rc < 0 )
+ {
+ free(cpumap);
return pyxc_error_to_exception();
+ }
info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}",
"online", info.online,
@@ -385,17 +421,18 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
"cpu", info.cpu);
cpulist = PyList_New(0);
- for ( i = 0; cpumap != 0; i++ )
+ for ( i = 0; i < size * cpumap_size * 8; i++ )
{
- if ( cpumap & 1 ) {
+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) {
PyObject *pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap >>= 1;
+ *(cpumap + i / (cpumap_size * 8)) >>= 1;
}
PyDict_SetItemString(info_dict, "cpumap", cpulist);
Py_DECREF(cpulist);
+ free(cpumap);
return info_dict;
}