aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2006-12-12 09:56:06 -0600
committerHollis Blanchard <hollisb@us.ibm.com>2006-12-12 09:56:06 -0600
commitbdd61e705208609fee635ddf81a6920ec4f794b1 (patch)
treeeb54be64d14e267ad958b6f5dac969dc485d1409
parentce35473930b792f989bb576fd7c42cd7ecafd778 (diff)
downloadxen-bdd61e705208609fee635ddf81a6920ec4f794b1.tar.gz
xen-bdd61e705208609fee635ddf81a6920ec4f794b1.tar.bz2
xen-bdd61e705208609fee635ddf81a6920ec4f794b1.zip
[POWERPC] Merge in outstanding changes from xenppc-unstable.hg.
It's a long story, but basically a small divergence in xenppc-unstable meant a large number of changesets couldn't be directly imported to xen-unstable, so this changeset includes all of them. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
-rw-r--r--config/powerpc64.mk2
-rwxr-xr-x[-rw-r--r--]extras/mini-os/arch/x86/setup.c0
-rw-r--r--[-rwxr-xr-x]tools/libaio/src/syscall-ppc.h0
-rw-r--r--tools/libxc/powerpc64/flatdevtree.c23
-rw-r--r--tools/libxc/powerpc64/flatdevtree.h1
-rw-r--r--tools/libxc/powerpc64/utils.c2
-rw-r--r--tools/libxc/powerpc64/xc_linux_build.c50
-rw-r--r--tools/libxc/powerpc64/xc_prose_build.c4
-rw-r--r--tools/libxc/xenctrl.h4
-rw-r--r--tools/libxc/xenguest.h1
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c32
-rw-r--r--tools/python/xen/xend/FlatDeviceTree.py94
-rw-r--r--tools/python/xen/xend/image.py10
-rw-r--r--xen/arch/powerpc/Makefile3
-rw-r--r--xen/arch/powerpc/domain.c9
-rw-r--r--xen/arch/powerpc/memory.c4
-rw-r--r--xen/arch/powerpc/mm.c33
-rw-r--r--xen/arch/powerpc/numa.c1
-rw-r--r--xen/arch/powerpc/ofd_fixup_memory.c18
-rw-r--r--xen/arch/powerpc/papr/xlate.c2
-rw-r--r--xen/arch/powerpc/powerpc64/ppc970.c4
-rw-r--r--xen/arch/powerpc/setup.c19
-rw-r--r--xen/arch/powerpc/shadow.c3
-rw-r--r--xen/arch/powerpc/smpboot.c29
-rw-r--r--xen/arch/powerpc/usercopy.c221
-rw-r--r--xen/common/Makefile2
-rw-r--r--xen/common/xencomm.c316
-rw-r--r--xen/include/asm-powerpc/acpi.h2
-rw-r--r--xen/include/asm-powerpc/config.h1
-rw-r--r--xen/include/asm-powerpc/domain.h2
-rw-r--r--xen/include/asm-powerpc/guest_access.h78
-rw-r--r--xen/include/asm-powerpc/mm.h67
-rw-r--r--xen/include/asm-powerpc/numa.h2
-rw-r--r--xen/include/asm-powerpc/page.h3
-rw-r--r--xen/include/asm-powerpc/smp.h6
-rw-r--r--xen/include/asm-powerpc/spinlock.h6
-rw-r--r--xen/include/xen/xencomm.h115
37 files changed, 734 insertions, 435 deletions
diff --git a/config/powerpc64.mk b/config/powerpc64.mk
index 9773b24a6c..cba68fec19 100644
--- a/config/powerpc64.mk
+++ b/config/powerpc64.mk
@@ -1,5 +1,7 @@
CONFIG_POWERPC := y
CONFIG_POWERPC_$(XEN_OS) := y
+CONFIG_XENCOMM := y
+
CFLAGS += -DELFSIZE=64
LIBDIR := lib
diff --git a/extras/mini-os/arch/x86/setup.c b/extras/mini-os/arch/x86/setup.c
index db24b41ebc..db24b41ebc 100644..100755
--- a/extras/mini-os/arch/x86/setup.c
+++ b/extras/mini-os/arch/x86/setup.c
diff --git a/tools/libaio/src/syscall-ppc.h b/tools/libaio/src/syscall-ppc.h
index 435513e799..435513e799 100755..100644
--- a/tools/libaio/src/syscall-ppc.h
+++ b/tools/libaio/src/syscall-ppc.h
diff --git a/tools/libxc/powerpc64/flatdevtree.c b/tools/libxc/powerpc64/flatdevtree.c
index 4e9fa2fcde..f7a44bad7f 100644
--- a/tools/libxc/powerpc64/flatdevtree.c
+++ b/tools/libxc/powerpc64/flatdevtree.c
@@ -220,6 +220,29 @@ void ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size)
cxt->p_anchor = cxt->pres + 16; /* over the terminator */
}
+int ft_set_rsvmap(void *bphp, int m, u64 physaddr, u64 size)
+{
+ const struct boot_param_header *bph = bphp;
+ u64 *p_rsvmap = (u64 *)
+ ((char *)bph + be32_to_cpu(bph->off_mem_rsvmap));
+ u32 i;
+
+ for (i = 0;; i++) {
+ u64 addr, sz;
+
+ addr = be64_to_cpu(p_rsvmap[i * 2]);
+ sz = be64_to_cpu(p_rsvmap[i * 2 + 1]);
+ if (addr == 0 && size == 0)
+ break;
+ if (m == i) {
+ p_rsvmap[i * 2] = cpu_to_be64(physaddr);
+ p_rsvmap[i * 2 + 1] = cpu_to_be64(size);
+ return 0;
+ }
+ }
+ return -1;
+}
+
void ft_begin_tree(struct ft_cxt *cxt)
{
cxt->p_begin = cxt->p_anchor;
diff --git a/tools/libxc/powerpc64/flatdevtree.h b/tools/libxc/powerpc64/flatdevtree.h
index d8432dbb6f..794bcd4f78 100644
--- a/tools/libxc/powerpc64/flatdevtree.h
+++ b/tools/libxc/powerpc64/flatdevtree.h
@@ -66,6 +66,7 @@ void ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str);
void ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val);
void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size);
void ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size);
+int ft_set_rsvmap(void *bphp, int m, u64 physaddr, u64 size);
void ft_dump_blob(const void *bphp);
void ft_backtrack_node(struct ft_cxt *cxt);
diff --git a/tools/libxc/powerpc64/utils.c b/tools/libxc/powerpc64/utils.c
index 9835f10891..c9d2a31d11 100644
--- a/tools/libxc/powerpc64/utils.c
+++ b/tools/libxc/powerpc64/utils.c
@@ -77,7 +77,7 @@ int get_rma_page_array(int xc_handle, int domid, xen_pfn_t **page_array,
/* We know that the RMA is machine contiguous so lets just get the
* first MFN and fill the rest in ourselves */
rc = xc_get_pfn_list(xc_handle, domid, *page_array, 1);
- if (rc != 1) {
+ if (rc == -1) {
perror("Could not get the page frame list");
return -1;
}
diff --git a/tools/libxc/powerpc64/xc_linux_build.c b/tools/libxc/powerpc64/xc_linux_build.c
index b2cebb5eef..dc15336556 100644
--- a/tools/libxc/powerpc64/xc_linux_build.c
+++ b/tools/libxc/powerpc64/xc_linux_build.c
@@ -89,7 +89,7 @@ static int load_devtree(
start_info_t *start_info __attribute__((unused)),
unsigned long start_info_addr)
{
- uint32_t start_info[4] = {0, start_info_addr, 0, 0x1000};
+ uint32_t si[4] = {0, start_info_addr, 0, 0x1000};
struct boot_param_header *header;
void *chosen;
void *xen;
@@ -127,8 +127,14 @@ static int load_devtree(
return rc;
}
+ rc = ft_set_rsvmap(devtree, 1, initrd_base, initrd_len);
+ if (rc < 0) {
+ DPRINTF("couldn't set initrd reservation\n");
+ return ~0UL;
+ }
+
/* start-info (XXX being removed soon) */
- rc = ft_set_prop(&devtree, xen, "start-info", start_info, sizeof(start_info));
+ rc = ft_set_prop(&devtree, xen, "start-info", si, sizeof(si));
if (rc < 0) {
DPRINTF("couldn't set /xen/start-info\n");
return rc;
@@ -136,6 +142,15 @@ static int load_devtree(
header = devtree;
devtree_size = header->totalsize;
+ {
+ static const char dtb[] = "/tmp/xc_domU.dtb";
+ int dfd = creat(dtb, 0666);
+ if (dfd != -1) {
+ write(dfd, devtree, devtree_size);
+ close(dfd);
+ } else
+ DPRINTF("could not open(\"%s\")\n", dtb);
+ }
DPRINTF("copying device tree to 0x%lx[0x%x]\n", DEVTREE_ADDR, devtree_size);
return install_image(xc_handle, domid, page_array, devtree, DEVTREE_ADDR,
@@ -172,22 +187,35 @@ out:
}
static unsigned long create_start_info(
- start_info_t *start_info,
+ void *devtree, start_info_t *start_info,
unsigned int console_evtchn, unsigned int store_evtchn,
unsigned long nr_pages, unsigned long rma_pages)
{
unsigned long start_info_addr;
+ uint64_t rma_top;
+ int rc;
memset(start_info, 0, sizeof(*start_info));
- snprintf(start_info->magic, sizeof(start_info->magic), "xen-%d.%d-powerpc64HV", 3, 0);
+ snprintf(start_info->magic, sizeof(start_info->magic),
+ "xen-%d.%d-powerpc64HV", 3, 0);
+
+ rma_top = rma_pages << PAGE_SHIFT;
+ DPRINTF("RMA top = 0x%"PRIX64"\n", rma_top);
start_info->nr_pages = nr_pages;
- start_info->shared_info = (nr_pages - 1) << PAGE_SHIFT;
- start_info->store_mfn = start_info->nr_pages - 2;
+ start_info->shared_info = rma_top - PAGE_SIZE;
+ start_info->store_mfn = (rma_top >> PAGE_SHIFT) - 2;
start_info->store_evtchn = store_evtchn;
- start_info->console.domU.mfn = start_info->nr_pages - 3;
+ start_info->console.domU.mfn = (rma_top >> PAGE_SHIFT) - 3;
start_info->console.domU.evtchn = console_evtchn;
- start_info_addr = (start_info->nr_pages - 4) << PAGE_SHIFT;
+ start_info_addr = rma_top - 4*PAGE_SIZE;
+
+ rc = ft_set_rsvmap(devtree, 0, start_info_addr, 4*PAGE_SIZE);
+ if (rc < 0) {
+ DPRINTF("couldn't set start_info reservation\n");
+ return ~0UL;
+ }
+
return start_info_addr;
}
@@ -201,6 +229,7 @@ static void free_page_array(xen_pfn_t *page_array)
int xc_linux_build(int xc_handle,
uint32_t domid,
+ unsigned int mem_mb,
const char *image_name,
const char *initrd_name,
const char *cmdline,
@@ -226,8 +255,7 @@ int xc_linux_build(int xc_handle,
DPRINTF("%s\n", __func__);
- DPRINTF("xc_get_tot_pages\n");
- nr_pages = xc_get_tot_pages(xc_handle, domid);
+ nr_pages = mem_mb << (20 - PAGE_SHIFT);
DPRINTF("nr_pages 0x%lx\n", nr_pages);
rma_pages = get_rma_pages(devtree);
@@ -258,7 +286,7 @@ int xc_linux_build(int xc_handle,
}
/* start_info stuff: about to be removed */
- start_info_addr = create_start_info(&start_info, console_evtchn,
+ start_info_addr = create_start_info(devtree, &start_info, console_evtchn,
store_evtchn, nr_pages, rma_pages);
*console_mfn = page_array[start_info.console.domU.mfn];
*store_mfn = page_array[start_info.store_mfn];
diff --git a/tools/libxc/powerpc64/xc_prose_build.c b/tools/libxc/powerpc64/xc_prose_build.c
index 1d5bc7e053..c26b520670 100644
--- a/tools/libxc/powerpc64/xc_prose_build.c
+++ b/tools/libxc/powerpc64/xc_prose_build.c
@@ -230,6 +230,7 @@ static void free_page_array(xen_pfn_t *page_array)
int xc_prose_build(int xc_handle,
uint32_t domid,
+ unsigned int mem_mb,
const char *image_name,
const char *initrd_name,
const char *cmdline,
@@ -257,8 +258,7 @@ int xc_prose_build(int xc_handle,
DPRINTF("cmdline=%s\n", cmdline);
- DPRINTF("xc_get_tot_pages\n");
- nr_pages = xc_get_tot_pages(xc_handle, domid);
+ nr_pages = mem_mb << (20 - PAGE_SHIFT);
DPRINTF("nr_pages 0x%lx\n", nr_pages);
rma_pages = get_rma_pages(devtree);
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index c646d18ea8..c347aeebf1 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -439,6 +439,10 @@ int xc_domain_memory_populate_physmap(int xc_handle,
unsigned int address_bits,
xen_pfn_t *extent_start);
+int xc_alloc_real_mode_area(int xc_handle,
+ uint32_t domid,
+ unsigned int log);
+
int xc_domain_ioport_permission(int xc_handle,
uint32_t domid,
uint32_t first_port,
diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h
index 4637eefb86..cf5f370cc8 100644
--- a/tools/libxc/xenguest.h
+++ b/tools/libxc/xenguest.h
@@ -124,6 +124,7 @@ int xc_get_hvm_param(
int xc_prose_build(int xc_handle,
uint32_t domid,
+ unsigned int mem_mb,
const char *image_name,
const char *ramdisk_name,
const char *cmdline,
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 5b348a6ee1..59bdcdc4a5 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -809,6 +809,28 @@ static PyObject *pyxc_domain_memory_increase_reservation(XcObject *self,
return zero;
}
+#ifdef __powerpc__
+static PyObject *pyxc_alloc_real_mode_area(XcObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ uint32_t dom;
+ unsigned int log;
+
+ static char *kwd_list[] = { "dom", "log", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list,
+ &dom, &log) )
+ return NULL;
+
+ if ( xc_alloc_real_mode_area(self->xc_handle, dom, log) )
+ return PyErr_SetFromErrno(xc_error);
+
+ Py_INCREF(zero);
+ return zero;
+}
+#endif
+
static PyObject *pyxc_domain_ioport_permission(XcObject *self,
PyObject *args,
PyObject *kwds)
@@ -1185,6 +1207,16 @@ static PyMethodDef pyxc_methods[] = {
" mem_kb [long]: .\n"
"Returns: [int] 0 on success; -1 on error.\n" },
+#ifdef __powerpc__
+ { "arch_alloc_real_mode_area",
+ (PyCFunction)pyxc_alloc_real_mode_area,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Allocate a domain's real mode area.\n"
+ " dom [int]: Identifier of domain.\n"
+ " log [int]: Specifies the area's size.\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+#endif
+
{ "domain_ioport_permission",
(PyCFunction)pyxc_domain_ioport_permission,
METH_VARARGS | METH_KEYWORDS, "\n"
diff --git a/tools/python/xen/xend/FlatDeviceTree.py b/tools/python/xen/xend/FlatDeviceTree.py
index 37e457513d..deddfcb0a3 100644
--- a/tools/python/xen/xend/FlatDeviceTree.py
+++ b/tools/python/xen/xend/FlatDeviceTree.py
@@ -22,6 +22,10 @@ import sys
import struct
import stat
import re
+import glob
+import math
+
+_host_devtree_root = '/proc/device-tree'
_OF_DT_HEADER = int("d00dfeed", 16) # avoid signed/unsigned FutureWarning
_OF_DT_BEGIN_NODE = 0x1
@@ -33,8 +37,10 @@ def _bincat(seq, separator=''):
'''Concatenate the contents of seq into a bytestream.'''
strs = []
for item in seq:
- if type(item) == type(0):
+ if isinstance(item, int):
strs.append(struct.pack(">I", item))
+ elif isinstance(item, long):
+ strs.append(struct.pack(">Q", item))
else:
try:
strs.append(item.to_bin())
@@ -231,38 +237,51 @@ class Tree(_Node):
header.totalsize = len(payload) + _alignup(len(header.to_bin()), 8)
return _pad(header.to_bin(), 8) + payload
-_host_devtree_root = '/proc/device-tree'
-def _getprop(propname):
- '''Extract a property from the system's device tree.'''
- f = file(os.path.join(_host_devtree_root, propname), 'r')
+def _readfile(fullpath):
+ '''Return full contents of a file.'''
+ f = file(fullpath, 'r')
data = f.read()
f.close()
return data
+def _find_first_cpu(dirpath):
+ '''Find the first node of type 'cpu' in a directory tree.'''
+ cpulist = glob.glob(os.path.join(dirpath, 'cpus', '*'))
+ for node in cpulist:
+ try:
+ data = _readfile(os.path.join(node, 'device_type'))
+ except IOError:
+ continue
+ if 'cpu' in data:
+ return node
+ raise IOError("couldn't find any CPU nodes under " + dirpath)
+
def _copynode(node, dirpath, propfilter):
- '''Extract all properties from a node in the system's device tree.'''
+ '''Copy all properties and children nodes from a directory tree.'''
dirents = os.listdir(dirpath)
for dirent in dirents:
fullpath = os.path.join(dirpath, dirent)
st = os.lstat(fullpath)
if stat.S_ISDIR(st.st_mode):
child = node.addnode(dirent)
- _copytree(child, fullpath, propfilter)
+ _copynode(child, fullpath, propfilter)
elif stat.S_ISREG(st.st_mode) and propfilter(fullpath):
- node.addprop(dirent, _getprop(fullpath))
-
-def _copytree(node, dirpath, propfilter):
- path = os.path.join(_host_devtree_root, dirpath)
- _copynode(node, path, propfilter)
+ node.addprop(dirent, _readfile(fullpath))
def build(imghandler):
'''Construct a device tree by combining the domain's configuration and
the host's device tree.'''
root = Tree()
- # 4 pages: start_info, console, store, shared_info
+ # 1st reseravtion entry used for start_info, console, store, shared_info
root.reserve(0x3ffc000, 0x4000)
+ # 2nd reservation enrty used for initrd, later on when we load the
+ # initrd we may fill this in with zeroes which signifies the end
+ # of the reservation map. So as to avoid adding a zero map now we
+ # put some bogus yet sensible numbers here.
+ root.reserve(0x1000000, 0x1000)
+
root.addprop('device_type', 'chrp-but-not-really\0')
root.addprop('#size-cells', 2)
root.addprop('#address-cells', 2)
@@ -270,35 +289,52 @@ def build(imghandler):
root.addprop('compatible', 'Momentum,Maple\0')
xen = root.addnode('xen')
- xen.addprop('start-info', 0, 0x3ffc000, 0, 0x1000)
+ xen.addprop('start-info', long(0x3ffc000), long(0x1000))
xen.addprop('version', 'Xen-3.0-unstable\0')
- xen.addprop('reg', 0, imghandler.vm.domid, 0, 0)
+ xen.addprop('reg', long(imghandler.vm.domid), long(0))
xen.addprop('domain-name', imghandler.vm.getName() + '\0')
xencons = xen.addnode('console')
xencons.addprop('interrupts', 1, 0)
- # XXX split out RMA node
- mem = root.addnode('memory@0')
+ # add memory nodes
totalmem = imghandler.vm.getMemoryTarget() * 1024
- mem.addprop('reg', 0, 0, 0, totalmem)
- mem.addprop('device_type', 'memory\0')
-
+ rma_log = 26 ### imghandler.vm.info.get('powerpc_rma_log')
+ rma_bytes = 1 << rma_log
+
+ # RMA node
+ rma = root.addnode('memory@0')
+ rma.addprop('reg', long(0), long(rma_bytes))
+ rma.addprop('device_type', 'memory\0')
+
+ # all the rest in a single node
+ remaining = totalmem - rma_bytes
+ if remaining > 0:
+ mem = root.addnode('memory@1')
+ mem.addprop('reg', long(rma_bytes), long(remaining))
+ mem.addprop('device_type', 'memory\0')
+
+ # add CPU nodes
cpus = root.addnode('cpus')
cpus.addprop('smp-enabled')
cpus.addprop('#size-cells', 0)
cpus.addprop('#address-cells', 1)
# Copy all properties the system firmware gave us, except for 'linux,'
- # properties, from 'cpus/@0', once for every vcpu. Hopefully all cpus are
- # identical...
+ # properties, from the first CPU node in the device tree. Do this once for
+ # every vcpu. Hopefully all cpus are identical...
cpu0 = None
+ cpu0path = _find_first_cpu(_host_devtree_root)
def _nolinuxprops(fullpath):
return not os.path.basename(fullpath).startswith('linux,')
for i in range(imghandler.vm.getVCpuCount()):
- cpu = cpus.addnode('PowerPC,970@0')
- _copytree(cpu, 'cpus/PowerPC,970@0', _nolinuxprops)
- # and then overwrite what we need to
- pft_size = imghandler.vm.info.get('pft-size', 0x14)
+ # create new node and copy all properties
+ cpu = cpus.addnode('PowerPC,970@%d' % i)
+ _copynode(cpu, cpu0path, _nolinuxprops)
+
+ # overwrite what we need to
+ shadow_mb = imghandler.vm.info.get('shadow_memory', 1)
+ shadow_mb_log = int(math.log(shadow_mb, 2))
+ pft_size = shadow_mb_log + 20
cpu.setprop('ibm,pft-size', 0, pft_size)
# set default CPU
@@ -307,13 +343,13 @@ def build(imghandler):
chosen = root.addnode('chosen')
chosen.addprop('cpu', cpu0.get_phandle())
- chosen.addprop('memory', mem.get_phandle())
+ chosen.addprop('memory', rma.get_phandle())
chosen.addprop('linux,stdout-path', '/xen/console\0')
chosen.addprop('interrupt-controller', xen.get_phandle())
chosen.addprop('bootargs', imghandler.cmdline + '\0')
# xc_linux_load.c will overwrite these 64-bit properties later
- chosen.addprop('linux,initrd-start', 0, 0)
- chosen.addprop('linux,initrd-end', 0, 0)
+ chosen.addprop('linux,initrd-start', long(0))
+ chosen.addprop('linux,initrd-end', long(0))
if 1:
f = file('/tmp/domU.dtb', 'w')
diff --git a/tools/python/xen/xend/image.py b/tools/python/xen/xend/image.py
index 8f59f4b4b1..12621a8dfb 100644
--- a/tools/python/xen/xend/image.py
+++ b/tools/python/xen/xend/image.py
@@ -246,6 +246,15 @@ class PPC_LinuxImageHandler(LinuxImageHandler):
features = self.vm.getFeatures(),
arch_args = devtree.to_bin())
+ def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
+ """@param shadow_mem_kb The configured shadow memory, in KiB.
+ @param maxmem_kb The configured maxmem, in KiB.
+ @return The corresponding required amount of shadow memory, also in
+ KiB.
+ PowerPC currently uses "shadow memory" to refer to the hash table."""
+ return max(maxmem_kb / 64, shadow_mem_kb)
+
+
class PPC_ProseImageHandler(LinuxImageHandler):
ostype = "prose"
@@ -273,6 +282,7 @@ class PPC_ProseImageHandler(LinuxImageHandler):
devtree = FlatDeviceTree.build(self)
return xc.prose_build(dom = self.vm.getDomid(),
+ memsize = mem_mb,
image = self.kernel,
store_evtchn = store_evtchn,
console_evtchn = console_evtchn,
diff --git a/xen/arch/powerpc/Makefile b/xen/arch/powerpc/Makefile
index d828f0a4b4..65ee7c2aec 100644
--- a/xen/arch/powerpc/Makefile
+++ b/xen/arch/powerpc/Makefile
@@ -27,6 +27,7 @@ obj-y += memory.o
obj-y += mm.o
obj-y += mpic.o
obj-y += mpic_init.o
+obj-y += numa.o
obj-y += of-devtree.o
obj-y += of-devwalk.o
obj-y += ofd_fixup.o
@@ -36,6 +37,7 @@ obj-y += rtas.o
obj-y += setup.o
obj-y += shadow.o
obj-y += smp.o
+obj-y += smpboot.o
obj-y += smp-tbsync.o
obj-y += sysctl.o
obj-y += time.o
@@ -73,6 +75,7 @@ firmware_image.bin: firmware
#
irq.o: ../x86/irq.c
physdev.o: ../x86/physdev.c
+numa.o: ../x86/numa.c
HDRS += $(wildcard *.h)
diff --git a/xen/arch/powerpc/domain.c b/xen/arch/powerpc/domain.c
index 31a594d03c..45ed959945 100644
--- a/xen/arch/powerpc/domain.c
+++ b/xen/arch/powerpc/domain.c
@@ -105,15 +105,15 @@ void arch_domain_destroy(struct domain *d)
static void machine_fail(const char *s)
{
- printf("%s failed, manual powercycle required!\n", s);
+ printk("%s failed, manual powercycle required!\n", s);
for (;;)
sleep();
}
-
void machine_halt(void)
{
+ printk("machine_halt called: spinning....\n");
console_start_sync();
- printf("%s called\n", __func__);
+ printk("%s called\n", __func__);
rtas_halt();
machine_fail(__func__);
@@ -121,8 +121,9 @@ void machine_halt(void)
void machine_restart(char * __unused)
{
+ printk("machine_restart called: spinning....\n");
console_start_sync();
- printf("%s called\n", __func__);
+ printk("%s called\n", __func__);
rtas_reboot();
machine_fail(__func__);
}
diff --git a/xen/arch/powerpc/memory.c b/xen/arch/powerpc/memory.c
index 44739acdae..58b168c4fc 100644
--- a/xen/arch/powerpc/memory.c
+++ b/xen/arch/powerpc/memory.c
@@ -20,6 +20,7 @@
*/
#include <xen/sched.h>
#include <xen/mm.h>
+#include <xen/numa.h>
#include "of-devtree.h"
#include "oftree.h"
#include "rtas.h"
@@ -238,6 +239,9 @@ void memory_init(module_t *mod, int mcount)
DBG("total_pages: 0x%016lx\n", total_pages);
init_frametable();
+
+ numa_initmem_init(0, max_page);
+
end_boot_allocator();
/* Add memory between the beginning of the heap and the beginning
diff --git a/xen/arch/powerpc/mm.c b/xen/arch/powerpc/mm.c
index 4a0493978d..2a9499825a 100644
--- a/xen/arch/powerpc/mm.c
+++ b/xen/arch/powerpc/mm.c
@@ -284,21 +284,21 @@ extern void copy_page(void *dp, void *sp)
}
}
+/* XXX should probably replace with faster data structure */
static uint add_extent(struct domain *d, struct page_info *pg, uint order)
{
struct page_extents *pe;
pe = xmalloc(struct page_extents);
if (pe == NULL)
- return 0;
+ return -ENOMEM;
pe->pg = pg;
pe->order = order;
- pe->pfn = page_to_mfn(pg);
list_add_tail(&pe->pe_list, &d->arch.extent_list);
- return pe->pfn;
+ return 0;
}
void free_extents(struct domain *d)
@@ -337,7 +337,7 @@ uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
if (pg == NULL)
return total_nrpages;
- if (add_extent(d, pg, ext_order) == 0) {
+ if (add_extent(d, pg, ext_order) < 0) {
free_domheap_pages(pg, ext_order);
return total_nrpages;
}
@@ -390,6 +390,7 @@ int allocate_rma(struct domain *d, unsigned int order)
return 0;
}
+
void free_rma_check(struct page_info *page)
{
if (test_bit(_PGC_page_RMA, &page->count_info) &&
@@ -397,7 +398,6 @@ void free_rma_check(struct page_info *page)
panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
}
-
ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
@@ -415,8 +415,7 @@ ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
/* Its a grant table access */
t = PFN_TYPE_GNTTAB;
mfn = gnttab_shared_mfn(d, d->grant_table, (pfn - max_page));
- } else if (test_bit(_DOMF_privileged, &d->domain_flags) &&
- cpu_io_mfn(pfn)) {
+ } else if (d->is_privileged && cpu_io_mfn(pfn)) {
t = PFN_TYPE_IO;
mfn = pfn;
} else {
@@ -424,14 +423,18 @@ ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
t = PFN_TYPE_RMA;
mfn = pfn + rma_base_mfn;
} else {
+ ulong cur_pfn = rma_size_mfn;
+
list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- uint end_pfn = pe->pfn + (1 << pe->order);
+ uint pe_pages = 1UL << pe->order;
+ uint end_pfn = cur_pfn + pe_pages;
- if (pfn >= pe->pfn && pfn < end_pfn) {
+ if (pfn >= cur_pfn && pfn < end_pfn) {
t = PFN_TYPE_LOGICAL;
- mfn = page_to_mfn(pe->pg) + (pfn - pe->pfn);
+ mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
break;
}
+ cur_pfn += pe_pages;
}
}
#ifdef DEBUG
@@ -492,8 +495,7 @@ unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
return max_page + (mfn - gnttab_mfn);
/* IO? */
- if (test_bit(_DOMF_privileged, &d->domain_flags) &&
- cpu_io_mfn(mfn))
+ if (d->is_privileged && cpu_io_mfn(mfn))
return mfn;
rma_mfn = page_to_mfn(d->arch.rma_page);
@@ -530,3 +532,10 @@ void shadow_drop_references(
struct domain *d, struct page_info *page)
{
}
+
+int arch_domain_add_extent(struct domain *d, struct page_info *page, int order)
+{
+ if (add_extent(d, page, order) < 0)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/xen/arch/powerpc/numa.c b/xen/arch/powerpc/numa.c
new file mode 100644
index 0000000000..623b0c1536
--- /dev/null
+++ b/xen/arch/powerpc/numa.c
@@ -0,0 +1 @@
+#include "../x86/numa.c"
diff --git a/xen/arch/powerpc/ofd_fixup_memory.c b/xen/arch/powerpc/ofd_fixup_memory.c
index 6582938178..e2a77a6398 100644
--- a/xen/arch/powerpc/ofd_fixup_memory.c
+++ b/xen/arch/powerpc/ofd_fixup_memory.c
@@ -68,6 +68,8 @@ static ofdn_t ofd_memory_node_create(
reg.sz = size;
ofd_prop_add(m, n, "reg", &reg, sizeof (reg));
+ printk("Dom0: %s: %016lx, %016lx\n", path, start, size);
+
return n;
}
@@ -86,17 +88,19 @@ static void ofd_memory_extent_nodes(void *m, struct domain *d)
ulong size;
ofdn_t n;
struct page_extents *pe;
+ ulong cur_pfn = 1UL << d->arch.rma_order;
+ start = cur_pfn << PAGE_SHIFT;
+ size = 0;
list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- start = pe->pfn << PAGE_SHIFT;
- size = 1UL << (pe->order + PAGE_SHIFT);
-
- n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
- start, size);
-
- BUG_ON(n <= 0);
+ size += 1UL << (pe->order + PAGE_SHIFT);
+ if (pe->order != cpu_extent_order())
+ panic("we don't handle this yet\n");
}
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+ start, size);
+ BUG_ON(n <= 0);
}
void ofd_memory_props(void *m, struct domain *d)
diff --git a/xen/arch/powerpc/papr/xlate.c b/xen/arch/powerpc/papr/xlate.c
index 2b7390d252..1e95d4c879 100644
--- a/xen/arch/powerpc/papr/xlate.c
+++ b/xen/arch/powerpc/papr/xlate.c
@@ -178,7 +178,7 @@ long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn)
return H_Parameter;
}
- if (mtype == PFN_TYPE_IO &&!test_bit(_DOMF_privileged, &d->domain_flags)) {
+ if (mtype == PFN_TYPE_IO && !d->is_privileged) {
/* only a privilaged dom can access outside IO space */
DBG("%s: unprivileged access to physical page: 0x%lx\n",
__func__, pfn);
diff --git a/xen/arch/powerpc/powerpc64/ppc970.c b/xen/arch/powerpc/powerpc64/ppc970.c
index 5adefae616..9b8ad111ba 100644
--- a/xen/arch/powerpc/powerpc64/ppc970.c
+++ b/xen/arch/powerpc/powerpc64/ppc970.c
@@ -193,8 +193,8 @@ void cpu_initialize(int cpuid)
mtdec(timebase_freq);
mthdec(timebase_freq);
- /* FIXME Do not set the NAP and DPM bits in HID0 until we have had a
- * chance to audit the safe halt and idle loop code. */
+ /* FIXME Do not set the NAP bit in HID0 until we have had a chance
+ * to audit the safe halt and idle loop code. */
hid0.bits.nap = 0; /* NAP */
hid0.bits.dpm = 1; /* Dynamic Power Management */
diff --git a/xen/arch/powerpc/setup.c b/xen/arch/powerpc/setup.c
index aaceab164b..c22e6712bb 100644
--- a/xen/arch/powerpc/setup.c
+++ b/xen/arch/powerpc/setup.c
@@ -35,6 +35,7 @@
#include <xen/gdbstub.h>
#include <xen/symbols.h>
#include <xen/keyhandler.h>
+#include <xen/numa.h>
#include <acm/acm_hooks.h>
#include <public/version.h>
#include <asm/mpic.h>
@@ -77,10 +78,7 @@ ulong oftree_len;
ulong oftree_end;
uint cpu_hard_id[NR_CPUS] __initdata;
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-cpumask_t cpu_online_map; /* missing ifdef in schedule.c */
cpumask_t cpu_present_map;
-cpumask_t cpu_possible_map;
/* XXX get this from ISA node in device tree */
char *vgabase;
@@ -210,6 +208,15 @@ void startup_cpu_idle_loop(void)
reset_stack_and_jump(idle_loop);
}
+/* The boot_pa is enough "parea" for the boot CPU to get thru
+ * initialization, it will ultimately get replaced later */
+static __init void init_boot_cpu(void)
+{
+ static struct processor_area boot_pa;
+ boot_pa.whoami = 0;
+ parea = &boot_pa;
+}
+
static void init_parea(int cpuid)
{
/* Be careful not to shadow the global variable. */
@@ -250,6 +257,9 @@ static int kick_secondary_cpus(int maxcpus)
/* wait for it */
while (!cpu_online(cpuid))
cpu_relax();
+
+ numa_set_node(cpuid, 0);
+ numa_add_cpu(cpuid);
}
return 0;
@@ -293,6 +303,9 @@ static void __init __start_xen(multiboot_info_t *mbi)
if ((mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0))
cmdline_parse(__va((ulong)mbi->cmdline));
+ /* we need to be able to identify this CPU early on */
+ init_boot_cpu();
+
/* We initialise the serial devices very early so we can get debugging. */
ns16550.io_base = 0x3f8;
ns16550_init(0, &ns16550);
diff --git a/xen/arch/powerpc/shadow.c b/xen/arch/powerpc/shadow.c
index ed09b18461..64bc1c686c 100644
--- a/xen/arch/powerpc/shadow.c
+++ b/xen/arch/powerpc/shadow.c
@@ -101,9 +101,6 @@ unsigned int shadow_set_allocation(struct domain *d,
addr = htab_alloc(d, order);
- printk("%s: ibm,fpt-size should be: 0x%x\n", __func__,
- d->arch.htab.log_num_ptes + LOG_PTE_SIZE);
-
if (addr == 0)
return -ENOMEM;
diff --git a/xen/arch/powerpc/smpboot.c b/xen/arch/powerpc/smpboot.c
new file mode 100644
index 0000000000..90dff38d22
--- /dev/null
+++ b/xen/arch/powerpc/smpboot.c
@@ -0,0 +1,29 @@
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/types.h>
+#include <xen/cpumask.h>
+#include <asm/cache.h>
+
+/* representing HT siblings of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* representing HT and core siblings of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+/* bitmap of online cpus */
+cpumask_t cpu_online_map __read_mostly;
+EXPORT_SYMBOL(cpu_online_map);
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+cpumask_t cpu_possible_map = CPU_MASK_ALL;
+#else
+cpumask_t cpu_possible_map;
+#endif
+EXPORT_SYMBOL(cpu_possible_map);
+
+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0xff };
+EXPORT_SYMBOL(x86_cpu_to_apicid);
diff --git a/xen/arch/powerpc/usercopy.c b/xen/arch/powerpc/usercopy.c
index d7e33bcff0..4fb2546dcb 100644
--- a/xen/arch/powerpc/usercopy.c
+++ b/xen/arch/powerpc/usercopy.c
@@ -18,24 +18,14 @@
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
-#include <xen/config.h>
-#include <xen/mm.h>
#include <xen/sched.h>
+#include <xen/lib.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
+#include <asm/page.h>
#include <asm/debugger.h>
-#include <public/xen.h>
-#include <public/xencomm.h>
-
-#undef DEBUG
-#ifdef DEBUG
-static int xencomm_debug = 1; /* extremely verbose */
-#else
-#define xencomm_debug 0
-#endif
/* XXX need to return error, not panic, if domain passed a bad pointer */
-static unsigned long paddr_to_maddr(unsigned long paddr)
+unsigned long paddr_to_maddr(unsigned long paddr)
{
struct vcpu *v = get_current();
struct domain *d = v->domain;
@@ -58,208 +48,3 @@ static unsigned long paddr_to_maddr(unsigned long paddr)
return pa;
}
-
-/**
- * xencomm_copy_from_guest: Copy a block of data from domain space.
- * @to: Machine address.
- * @from: Physical address to a xencomm buffer descriptor.
- * @n: Number of bytes to copy.
- * @skip: Number of bytes from the start to skip.
- *
- * Copy data from domain to hypervisor.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long
-xencomm_copy_from_guest(void *to, const void *from, unsigned int n,
- unsigned int skip)
-{
- struct xencomm_desc *desc;
- unsigned int from_pos = 0;
- unsigned int to_pos = 0;
- unsigned int i = 0;
-
- /* first we need to access the descriptor */
- desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from);
- if (desc == NULL)
- return n;
-
- if (desc->magic != XENCOMM_MAGIC) {
- printk("%s: error: %p magic was 0x%x\n",
- __func__, desc, desc->magic);
- return n;
- }
-
- /* iterate through the descriptor, copying up to a page at a time */
- while ((to_pos < n) && (i < desc->nr_addrs)) {
- unsigned long src_paddr = desc->address[i];
- unsigned int pgoffset;
- unsigned int chunksz;
- unsigned int chunk_skip;
-
- if (src_paddr == XENCOMM_INVALID) {
- i++;
- continue;
- }
-
- pgoffset = src_paddr % PAGE_SIZE;
- chunksz = PAGE_SIZE - pgoffset;
-
- chunk_skip = min(chunksz, skip);
- from_pos += chunk_skip;
- chunksz -= chunk_skip;
- skip -= chunk_skip;
-
- if (skip == 0) {
- unsigned long src_maddr;
- unsigned long dest = (unsigned long)to + to_pos;
- unsigned int bytes = min(chunksz, n - to_pos);
-
- src_maddr = paddr_to_maddr(src_paddr + chunk_skip);
- if (src_maddr == 0)
- return n - to_pos;
-
- if (xencomm_debug)
- printk("%lx[%d] -> %lx\n", src_maddr, bytes, dest);
- memcpy((void *)dest, (void *)src_maddr, bytes);
- from_pos += bytes;
- to_pos += bytes;
- }
-
- i++;
- }
-
- return n - to_pos;
-}
-
-/**
- * xencomm_copy_to_guest: Copy a block of data to domain space.
- * @to: Physical address to xencomm buffer descriptor.
- * @from: Machine address.
- * @n: Number of bytes to copy.
- * @skip: Number of bytes from the start to skip.
- *
- * Copy data from hypervisor to domain.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long
-xencomm_copy_to_guest(void *to, const void *from, unsigned int n,
- unsigned int skip)
-{
- struct xencomm_desc *desc;
- unsigned int from_pos = 0;
- unsigned int to_pos = 0;
- unsigned int i = 0;
-
- /* first we need to access the descriptor */
- desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to);
- if (desc == NULL)
- return n;
-
- if (desc->magic != XENCOMM_MAGIC) {
- printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
- return n;
- }
-
- /* iterate through the descriptor, copying up to a page at a time */
- while ((from_pos < n) && (i < desc->nr_addrs)) {
- unsigned long dest_paddr = desc->address[i];
- unsigned int pgoffset;
- unsigned int chunksz;
- unsigned int chunk_skip;
-
- if (dest_paddr == XENCOMM_INVALID) {
- i++;
- continue;
- }
-
- pgoffset = dest_paddr % PAGE_SIZE;
- chunksz = PAGE_SIZE - pgoffset;
-
- chunk_skip = min(chunksz, skip);
- to_pos += chunk_skip;
- chunksz -= chunk_skip;
- skip -= chunk_skip;
-
- if (skip == 0) {
- unsigned long dest_maddr;
- unsigned long source = (unsigned long)from + from_pos;
- unsigned int bytes = min(chunksz, n - from_pos);
-
- dest_maddr = paddr_to_maddr(dest_paddr + chunk_skip);
- if (dest_maddr == 0)
- return -1;
-
- if (xencomm_debug)
- printk("%lx[%d] -> %lx\n", source, bytes, dest_maddr);
- memcpy((void *)dest_maddr, (void *)source, bytes);
- from_pos += bytes;
- to_pos += bytes;
- }
-
- i++;
- }
-
- return n - from_pos;
-}
-
-/* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely
- * exhausted pages to XENCOMM_INVALID. */
-int xencomm_add_offset(void *handle, unsigned int bytes)
-{
- struct xencomm_desc *desc;
- int i = 0;
-
- /* first we need to access the descriptor */
- desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle);
- if (desc == NULL)
- return -1;
-
- if (desc->magic != XENCOMM_MAGIC) {
- printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
- return -1;
- }
-
- /* iterate through the descriptor incrementing addresses */
- while ((bytes > 0) && (i < desc->nr_addrs)) {
- unsigned long dest_paddr = desc->address[i];
- unsigned int pgoffset;
- unsigned int chunksz;
- unsigned int chunk_skip;
-
- if (dest_paddr == XENCOMM_INVALID) {
- i++;
- continue;
- }
-
- pgoffset = dest_paddr % PAGE_SIZE;
- chunksz = PAGE_SIZE - pgoffset;
-
- chunk_skip = min(chunksz, bytes);
- if (chunk_skip == chunksz) {
- /* exhausted this page */
- desc->address[i] = XENCOMM_INVALID;
- } else {
- desc->address[i] += chunk_skip;
- }
- bytes -= chunk_skip;
-
- i++;
- }
- return 0;
-}
-
-int xencomm_handle_is_null(void *ptr)
-{
- struct xencomm_desc *desc;
-
- desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)ptr);
- if (desc == NULL)
- return 1;
-
- return (desc->nr_addrs == 0);
-}
-
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 770c4b376f..3cb9eb7855 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -31,5 +31,7 @@ obj-$(perfc) += perfc.o
obj-$(crash_debug) += gdbstub.o
obj-$(xenoprof) += xenoprof.o
+obj-$(CONFIG_XENCOMM) += xencomm.o
+
# Object file contains changeset and compiler information.
version.o: $(BASEDIR)/include/xen/compile.h
diff --git a/xen/common/xencomm.c b/xen/common/xencomm.c
new file mode 100644
index 0000000000..f33c753934
--- /dev/null
+++ b/xen/common/xencomm.c
@@ -0,0 +1,316 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Tristan Gingold <tristan.gingold@bull.net>
+ */
+
+#include <xen/config.h>
+#include <xen/mm.h>
+#include <xen/sched.h>
+#include <xen/xencomm.h>
+#include <public/xen.h>
+#include <public/xencomm.h>
+
+
+#undef DEBUG
+#ifdef DEBUG
+static int xencomm_debug = 1; /* extremely verbose */
+#else
+#define xencomm_debug 0
+#endif
+
+static unsigned long
+xencomm_inline_from_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ unsigned long src_paddr = xencomm_inline_addr(from);
+
+ src_paddr += skip;
+
+ while (n > 0) {
+ unsigned int chunksz;
+ unsigned long src_maddr;
+ unsigned int bytes;
+
+ chunksz = PAGE_SIZE - (src_paddr % PAGE_SIZE);
+
+ bytes = min(chunksz, n);
+
+ src_maddr = paddr_to_maddr(src_paddr);
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", src_maddr, bytes, (unsigned long)to);
+ memcpy(to, (void *)src_maddr, bytes);
+ src_paddr += bytes;
+ to += bytes;
+ n -= bytes;
+ }
+
+ /* Always successful. */
+ return 0;
+}
+
+/**
+ * xencomm_copy_from_guest: Copy a block of data from domain space.
+ * @to: Machine address.
+ * @from: Physical address to a xencomm buffer descriptor.
+ * @n: Number of bytes to copy.
+ * @skip: Number of bytes from the start to skip.
+ *
+ * Copy data from domain to hypervisor.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+xencomm_copy_from_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ struct xencomm_desc *desc;
+ unsigned int from_pos = 0;
+ unsigned int to_pos = 0;
+ unsigned int i = 0;
+
+ if (xencomm_is_inline(from))
+ return xencomm_inline_from_guest(to, from, n, skip);
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from);
+ if (desc == NULL)
+ return n;
+
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("%s: error: %p magic was 0x%x\n",
+ __func__, desc, desc->magic);
+ return n;
+ }
+
+ /* iterate through the descriptor, copying up to a page at a time */
+ while ((to_pos < n) && (i < desc->nr_addrs)) {
+ unsigned long src_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ if (src_paddr == XENCOMM_INVALID) {
+ i++;
+ continue;
+ }
+
+ pgoffset = src_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, skip);
+ from_pos += chunk_skip;
+ chunksz -= chunk_skip;
+ skip -= chunk_skip;
+
+ if (skip == 0) {
+ unsigned long src_maddr;
+ unsigned long dest = (unsigned long)to + to_pos;
+ unsigned int bytes = min(chunksz, n - to_pos);
+
+ src_maddr = paddr_to_maddr(src_paddr + chunk_skip);
+ if (src_maddr == 0)
+ return n - to_pos;
+
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", src_maddr, bytes, dest);
+ memcpy((void *)dest, (void *)src_maddr, bytes);
+ from_pos += bytes;
+ to_pos += bytes;
+ }
+
+ i++;
+ }
+
+ return n - to_pos;
+}
+
+static unsigned long
+xencomm_inline_to_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ unsigned long dest_paddr = xencomm_inline_addr(to);
+
+ dest_paddr += skip;
+
+ while (n > 0) {
+ unsigned int chunksz;
+ unsigned long dest_maddr;
+ unsigned int bytes;
+
+ chunksz = PAGE_SIZE - (dest_paddr % PAGE_SIZE);
+
+ bytes = min(chunksz, n);
+
+ dest_maddr = paddr_to_maddr(dest_paddr);
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", (unsigned long)from, bytes, dest_maddr);
+ memcpy((void *)dest_maddr, (void *)from, bytes);
+ dest_paddr += bytes;
+ from += bytes;
+ n -= bytes;
+ }
+
+ /* Always successful. */
+ return 0;
+}
+
+/**
+ * xencomm_copy_to_guest: Copy a block of data to domain space.
+ * @to: Physical address to xencomm buffer descriptor.
+ * @from: Machine address.
+ * @n: Number of bytes to copy.
+ * @skip: Number of bytes from the start to skip.
+ *
+ * Copy data from hypervisor to domain.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+xencomm_copy_to_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ struct xencomm_desc *desc;
+ unsigned int from_pos = 0;
+ unsigned int to_pos = 0;
+ unsigned int i = 0;
+
+ if (xencomm_is_inline(to))
+ return xencomm_inline_to_guest(to, from, n, skip);
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to);
+ if (desc == NULL)
+ return n;
+
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
+ return n;
+ }
+
+ /* iterate through the descriptor, copying up to a page at a time */
+ while ((from_pos < n) && (i < desc->nr_addrs)) {
+ unsigned long dest_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ if (dest_paddr == XENCOMM_INVALID) {
+ i++;
+ continue;
+ }
+
+ pgoffset = dest_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, skip);
+ to_pos += chunk_skip;
+ chunksz -= chunk_skip;
+ skip -= chunk_skip;
+
+ if (skip == 0) {
+ unsigned long dest_maddr;
+ unsigned long source = (unsigned long)from + from_pos;
+ unsigned int bytes = min(chunksz, n - from_pos);
+
+ dest_maddr = paddr_to_maddr(dest_paddr + chunk_skip);
+ if (dest_maddr == 0)
+ return -1;
+
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", source, bytes, dest_maddr);
+ memcpy((void *)dest_maddr, (void *)source, bytes);
+ from_pos += bytes;
+ to_pos += bytes;
+ }
+
+ i++;
+ }
+
+ return n - from_pos;
+}
+
+static int xencomm_inline_add_offset(void **handle, unsigned int bytes)
+{
+ *handle += bytes;
+ return 0;
+}
+
+/* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely
+ * exhausted pages to XENCOMM_INVALID. */
+int xencomm_add_offset(void **handle, unsigned int bytes)
+{
+ struct xencomm_desc *desc;
+ int i = 0;
+
+ if (xencomm_is_inline(*handle))
+ return xencomm_inline_add_offset(handle, bytes);
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)*handle);
+ if (desc == NULL)
+ return -1;
+
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
+ return -1;
+ }
+
+ /* iterate through the descriptor incrementing addresses */
+ while ((bytes > 0) && (i < desc->nr_addrs)) {
+ unsigned long dest_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ pgoffset = dest_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, bytes);
+ if (chunk_skip == chunksz) {
+ /* exhausted this page */
+ desc->address[i] = XENCOMM_INVALID;
+ } else {
+ desc->address[i] += chunk_skip;
+ }
+ bytes -= chunk_skip;
+ }
+ return 0;
+}
+
+int xencomm_handle_is_null(void *handle)
+{
+ struct xencomm_desc *desc;
+ int i;
+
+ if (xencomm_is_inline(handle))
+ return xencomm_inline_addr(handle) == 0;
+
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle);
+ if (desc == NULL)
+ return 1;
+
+ for (i = 0; i < desc->nr_addrs; i++)
+ if (desc->address[i] != XENCOMM_INVALID)
+ return 0;
+
+ return 1;
+}
+
diff --git a/xen/include/asm-powerpc/acpi.h b/xen/include/asm-powerpc/acpi.h
new file mode 100644
index 0000000000..a5cbef475b
--- /dev/null
+++ b/xen/include/asm-powerpc/acpi.h
@@ -0,0 +1,2 @@
+#define MAX_LOCAL_APIC 256
+
diff --git a/xen/include/asm-powerpc/config.h b/xen/include/asm-powerpc/config.h
index f2df6d83cc..82d1138b6f 100644
--- a/xen/include/asm-powerpc/config.h
+++ b/xen/include/asm-powerpc/config.h
@@ -50,6 +50,7 @@ extern char __bss_start[];
#define CONFIG_GDB 1
#define CONFIG_SMP 1
#define CONFIG_PCI 1
+#define CONFIG_NUMA 1
#define CONFIG_CMDLINE_SIZE 512
#define NR_CPUS 16
diff --git a/xen/include/asm-powerpc/domain.h b/xen/include/asm-powerpc/domain.h
index 4c7693e437..50f999c281 100644
--- a/xen/include/asm-powerpc/domain.h
+++ b/xen/include/asm-powerpc/domain.h
@@ -89,7 +89,7 @@ struct arch_vcpu {
struct slb_entry slb_entries[NUM_SLB_ENTRIES];
/* I/O-port access bitmap. */
- u8 *iobmp; /* Guest kernel virtual address of the bitmap. */
+ XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel virtual address of the bitmap. */
int iobmp_limit; /* Number of ports represented in the bitmap. */
int iopl; /* Current IOPL for this VCPU. */
diff --git a/xen/include/asm-powerpc/guest_access.h b/xen/include/asm-powerpc/guest_access.h
index fcae9d5d31..948d4507a1 100644
--- a/xen/include/asm-powerpc/guest_access.h
+++ b/xen/include/asm-powerpc/guest_access.h
@@ -21,82 +21,6 @@
#ifndef __PPC_GUEST_ACCESS_H__
#define __PPC_GUEST_ACCESS_H__
-extern unsigned long xencomm_copy_to_guest(void *to, const void *from,
- unsigned int len, unsigned int skip);
-extern unsigned long xencomm_copy_from_guest(void *to, const void *from,
- unsigned int len, unsigned int skip);
-extern int xencomm_add_offset(void *handle, unsigned int bytes);
-extern int xencomm_handle_is_null(void *ptr);
-
-
-/* Is the guest handle a NULL reference? */
-#define guest_handle_is_null(hnd) \
- ((hnd).p == NULL || xencomm_handle_is_null((hnd).p))
-
-/* Offset the given guest handle into the array it refers to. */
-#define guest_handle_add_offset(hnd, nr) ({ \
- const typeof((hnd).p) _ptr = (hnd).p; \
- xencomm_add_offset(_ptr, nr * sizeof(*_ptr)); \
-})
-
-/* Cast a guest handle to the specified type of handle. */
-#define guest_handle_cast(hnd, type) ({ \
- type *_x = (hnd).p; \
- XEN_GUEST_HANDLE(type) _y; \
- set_xen_guest_handle(_y, _x); \
- _y; \
-})
-
-/* Since we run in real mode, we can safely access all addresses. That also
- * means our __routines are identical to our "normal" routines. */
-#define guest_handle_okay(hnd, nr) 1
-
-/*
- * Copy an array of objects to guest context via a guest handle.
- * Optionally specify an offset into the guest array.
- */
-#define copy_to_guest_offset(hnd, idx, ptr, nr) \
- __copy_to_guest_offset(hnd, idx, ptr, nr)
-
-/* Copy sub-field of a structure to guest context via a guest handle. */
-#define copy_field_to_guest(hnd, ptr, field) \
- __copy_field_to_guest(hnd, ptr, field)
-
-/*
- * Copy an array of objects from guest context via a guest handle.
- * Optionally specify an offset into the guest array.
- */
-#define copy_from_guest_offset(ptr, hnd, idx, nr) \
- __copy_from_guest_offset(ptr, hnd, idx, nr)
-
-/* Copy sub-field of a structure from guest context via a guest handle. */
-#define copy_field_from_guest(ptr, hnd, field) \
- __copy_field_from_guest(ptr, hnd, field)
-
-#define __copy_to_guest_offset(hnd, idx, ptr, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- xencomm_copy_to_guest(_x, _y, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
-})
-
-#define __copy_field_to_guest(hnd, ptr, field) ({ \
- const int _off = offsetof(typeof(*ptr), field); \
- const typeof(&(ptr)->field) _x = &(hnd).p->field; \
- const typeof(&(ptr)->field) _y = &(ptr)->field; \
- xencomm_copy_to_guest(_x, _y, sizeof(*_x), sizeof(*_x)*(_off)); \
-})
-
-#define __copy_from_guest_offset(ptr, hnd, idx, nr) ({ \
- const typeof(ptr) _x = (hnd).p; \
- const typeof(ptr) _y = (ptr); \
- xencomm_copy_from_guest(_y, _x, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
-})
-
-#define __copy_field_from_guest(ptr, hnd, field) ({ \
- const int _off = offsetof(typeof(*ptr), field); \
- const typeof(&(ptr)->field) _x = &(hnd).p->field; \
- const typeof(&(ptr)->field) _y = &(ptr)->field; \
- xencomm_copy_to_guest(_y, _x, sizeof(*_x), sizeof(*_x)*(_off)); \
-})
+#include <xen/xencomm.h>
#endif /* __PPC_GUEST_ACCESS_H__ */
diff --git a/xen/include/asm-powerpc/mm.h b/xen/include/asm-powerpc/mm.h
index 78886bcc38..1cb77b658f 100644
--- a/xen/include/asm-powerpc/mm.h
+++ b/xen/include/asm-powerpc/mm.h
@@ -28,7 +28,7 @@
#include <xen/mm.h>
#include <asm/system.h>
#include <asm/flushtlb.h>
-#include <asm/uaccess.h>
+#include <asm/page.h>
#include <asm/debugger.h>
#define memguard_guard_range(_p,_l) ((void)0)
@@ -87,7 +87,6 @@ struct page_extents {
/* page extent */
struct page_info *pg;
uint order;
- ulong pfn;
};
/* The following page types are MUTUALLY EXCLUSIVE. */
@@ -273,72 +272,20 @@ extern int update_grant_va_mapping(unsigned long va,
struct domain *,
struct vcpu *);
-#define INVALID_MFN (~0UL)
-#define PFN_TYPE_NONE 0
-#define PFN_TYPE_RMA 1
-#define PFN_TYPE_LOGICAL 2
-#define PFN_TYPE_IO 3
-#define PFN_TYPE_FOREIGN 4
-
-extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
-
/* Arch-specific portion of memory_op hypercall. */
long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
-/* XXX implement me? */
-#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
-/* XXX only used for debug print right now... */
-#define get_gpfn_from_mfn(mfn) (mfn)
-
-static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
-{
- int mtype;
- ulong mfn;
-
- mfn = pfn2mfn(d, gmfn, &mtype);
- if (mfn != INVALID_MFN) {
- switch (mtype) {
- case PFN_TYPE_RMA:
- case PFN_TYPE_LOGICAL:
- break;
- default:
- WARN();
- mfn = INVALID_MFN;
- break;
- }
- }
- return mfn;
-}
-
-#define mfn_to_gmfn(_d, mfn) (mfn)
-
extern int allocate_rma(struct domain *d, unsigned int order_pages);
extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
extern void free_extents(struct domain *d);
+extern int arch_domain_add_extent(struct domain *d, struct page_info *page,
+ int order);
+
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
-static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
-{
- int mtype;
- ulong mfn;
-
- mfn = pfn2mfn(d, gmfn, &mtype);
- if (mfn != INVALID_MFN) {
- switch (mtype) {
- case PFN_TYPE_RMA:
- case PFN_TYPE_LOGICAL:
- break;
- default:
- WARN();
- mfn = INVALID_MFN;
- break;
- }
- }
- return mfn;
-}
-
-#define mfn_to_gmfn(_d, mfn) (mfn)
-
+/* XXX these just exist until we can stop #including x86 code */
+#define access_ok(addr,size) 1
+#define array_access_ok(addr,count,size) 1
#endif
diff --git a/xen/include/asm-powerpc/numa.h b/xen/include/asm-powerpc/numa.h
new file mode 100644
index 0000000000..1fa9f6c99e
--- /dev/null
+++ b/xen/include/asm-powerpc/numa.h
@@ -0,0 +1,2 @@
+#include "../asm-x86/numa.h"
+extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
diff --git a/xen/include/asm-powerpc/page.h b/xen/include/asm-powerpc/page.h
index 61a757fb79..f650849858 100644
--- a/xen/include/asm-powerpc/page.h
+++ b/xen/include/asm-powerpc/page.h
@@ -13,7 +13,7 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005, 2006
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Jimi Xenidis <jimix@watson.ibm.com>
@@ -129,5 +129,6 @@ static inline int get_order_from_pages(unsigned long nr_pages)
#define _PAGE_PAT 0x080UL
#define _PAGE_PSE 0x080UL
#define _PAGE_GLOBAL 0x100UL
+
#endif /* ! __ASSEMBLY__ */
#endif
diff --git a/xen/include/asm-powerpc/smp.h b/xen/include/asm-powerpc/smp.h
index 51b55f44fb..648501fbd1 100644
--- a/xen/include/asm-powerpc/smp.h
+++ b/xen/include/asm-powerpc/smp.h
@@ -25,6 +25,12 @@
#include <xen/cpumask.h>
#include <xen/init.h>
#include <asm/current.h>
+
+/* crap to make x86 "common code" happy */
+#define BAD_APICID 0xFFu
+extern u8 x86_cpu_to_apicid[];
+
+
extern int smp_num_siblings;
/* revisit when we support SMP */
diff --git a/xen/include/asm-powerpc/spinlock.h b/xen/include/asm-powerpc/spinlock.h
index 8302c92d25..a6dc16c82e 100644
--- a/xen/include/asm-powerpc/spinlock.h
+++ b/xen/include/asm-powerpc/spinlock.h
@@ -72,13 +72,13 @@ cas_u32(volatile u32 *ptr, u32 oval, u32 nval)
typedef struct {
volatile u32 lock;
- u16 recurse_cpu;
- u16 recurse_cnt;
+ s16 recurse_cpu;
+ u16 recurse_cnt;
} spinlock_t;
#define __UNLOCKED (0U)
#define __LOCKED (~__UNLOCKED)
-#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { __UNLOCKED }
+#define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { __UNLOCKED, -1, 0 }
static inline void spin_lock_init(spinlock_t *lock)
{
*lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
diff --git a/xen/include/xen/xencomm.h b/xen/include/xen/xencomm.h
new file mode 100644
index 0000000000..e961cc6681
--- /dev/null
+++ b/xen/include/xen/xencomm.h
@@ -0,0 +1,115 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __XENCOMM_H__
+#define __XENCOMM_H__
+
+#include <public/xen.h>
+
+extern unsigned long xencomm_copy_to_guest(void *to, const void *from,
+ unsigned int len, unsigned int skip);
+extern unsigned long xencomm_copy_from_guest(void *to, const void *from,
+ unsigned int len, unsigned int skip);
+extern int xencomm_add_offset(void **handle, unsigned int bytes);
+extern int xencomm_handle_is_null(void *ptr);
+
+
+static inline int xencomm_is_inline(const void *handle)
+{
+ unsigned long addr = (unsigned long)handle;
+ return (addr & XENCOMM_INLINE_FLAG) == XENCOMM_INLINE_FLAG;
+}
+
+static inline unsigned long xencomm_inline_addr(const void *handle)
+{
+ return (unsigned long)handle & ~XENCOMM_INLINE_FLAG;
+}
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd) \
+ ((hnd).p == NULL || xencomm_handle_is_null((hnd).p))
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ({ \
+ const typeof((hnd).p) _ptr; \
+ xencomm_add_offset((void **)&((hnd).p), nr * sizeof(*_ptr)); \
+})
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type) ({ \
+ type *_x = (hnd).p; \
+ XEN_GUEST_HANDLE(type) _y; \
+ set_xen_guest_handle(_y, _x); \
+ _y; \
+})
+
+/* Since we run in real mode, we can safely access all addresses. That also
+ * means our __routines are identical to our "normal" routines. */
+#define guest_handle_okay(hnd, nr) 1
+
+/*
+ * Copy an array of objects to guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, idx, ptr, nr) \
+ __copy_to_guest_offset(hnd, idx, ptr, nr)
+
+/* Copy sub-field of a structure to guest context via a guest handle. */
+#define copy_field_to_guest(hnd, ptr, field) \
+ __copy_field_to_guest(hnd, ptr, field)
+
+/*
+ * Copy an array of objects from guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, idx, nr) \
+ __copy_from_guest_offset(ptr, hnd, idx, nr)
+
+/* Copy sub-field of a structure from guest context via a guest handle. */
+#define copy_field_from_guest(ptr, hnd, field) \
+ __copy_field_from_guest(ptr, hnd, field)
+
+#define __copy_to_guest_offset(hnd, idx, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ xencomm_copy_to_guest(_x, _y, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
+})
+
+#define __copy_field_to_guest(hnd, ptr, field) ({ \
+ const int _off = offsetof(typeof(*ptr), field); \
+ const typeof(&(ptr)->field) _x = &(hnd).p->field; \
+ const typeof(&(ptr)->field) _y = &(ptr)->field; \
+ xencomm_copy_to_guest(_x, _y, sizeof(*_x), sizeof(*_x)*(_off)); \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, idx, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ xencomm_copy_from_guest(_y, _x, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
+})
+
+#define __copy_field_from_guest(ptr, hnd, field) ({ \
+ const int _off = offsetof(typeof(*ptr), field); \
+ const typeof(&(ptr)->field) _x = &(hnd).p->field; \
+ const typeof(&(ptr)->field) _y = &(ptr)->field; \
+ xencomm_copy_to_guest(_y, _x, sizeof(*_x), sizeof(*_x)*(_off)); \
+})
+
+#endif /* __XENCOMM_H__ */