aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-07-14 10:47:50 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-07-14 10:47:50 +0100
commit9e07a06f9b166b6f4f605d6f60ebc6ad24663bbd (patch)
tree96c9e326a785939b95e23efe825822ebbafdfdff
parent9e5acb051f4e9b326fcbe93faa7eacb3394887cc (diff)
downloadxen-9e07a06f9b166b6f4f605d6f60ebc6ad24663bbd.tar.gz
xen-9e07a06f9b166b6f4f605d6f60ebc6ad24663bbd.tar.bz2
xen-9e07a06f9b166b6f4f605d6f60ebc6ad24663bbd.zip
[powerpc] Initial checkin of new powerpc files.
From: Hollis Blanchard et al (IBM) Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--config/powerpc64.mk4
-rw-r--r--tools/libxc/xc_ppc_linux_build.c408
-rw-r--r--xen/arch/powerpc/0opt.c28
-rw-r--r--xen/arch/powerpc/Makefile117
-rw-r--r--xen/arch/powerpc/Rules.mk51
-rw-r--r--xen/arch/powerpc/audit.c45
-rw-r--r--xen/arch/powerpc/bitops.c94
-rw-r--r--xen/arch/powerpc/boot/boot32.S75
-rw-r--r--xen/arch/powerpc/boot/start.S51
-rw-r--r--xen/arch/powerpc/boot_of.c1016
-rw-r--r--xen/arch/powerpc/dart.c299
-rw-r--r--xen/arch/powerpc/dart.h36
-rw-r--r--xen/arch/powerpc/dart_u3.c108
-rw-r--r--xen/arch/powerpc/dart_u4.c177
-rw-r--r--xen/arch/powerpc/delay.c37
-rw-r--r--xen/arch/powerpc/dom0_ops.c87
-rw-r--r--xen/arch/powerpc/domain.c251
-rw-r--r--xen/arch/powerpc/domain_build.c285
-rw-r--r--xen/arch/powerpc/elf32.c5
-rw-r--r--xen/arch/powerpc/exceptions.c87
-rw-r--r--xen/arch/powerpc/exceptions.h57
-rw-r--r--xen/arch/powerpc/external.c247
-rw-r--r--xen/arch/powerpc/float.S243
-rw-r--r--xen/arch/powerpc/gdbstub.c207
-rw-r--r--xen/arch/powerpc/hcalls.c172
-rw-r--r--xen/arch/powerpc/htab.c69
-rw-r--r--xen/arch/powerpc/iommu.c79
-rw-r--r--xen/arch/powerpc/iommu.h28
-rw-r--r--xen/arch/powerpc/irq.c22
-rw-r--r--xen/arch/powerpc/mambo.S64
-rw-r--r--xen/arch/powerpc/mm.c141
-rw-r--r--xen/arch/powerpc/mpic.c1109
-rw-r--r--xen/arch/powerpc/mpic_init.c390
-rw-r--r--xen/arch/powerpc/mpic_init.h29
-rw-r--r--xen/arch/powerpc/of-devtree.c1088
-rw-r--r--xen/arch/powerpc/of-devtree.h139
-rw-r--r--xen/arch/powerpc/of-devwalk.c135
-rw-r--r--xen/arch/powerpc/of_handler/Makefile31
-rw-r--r--xen/arch/powerpc/of_handler/console.c233
-rw-r--r--xen/arch/powerpc/of_handler/control.c90
-rw-r--r--xen/arch/powerpc/of_handler/cpu.c82
-rw-r--r--xen/arch/powerpc/of_handler/devtree.c266
-rw-r--r--xen/arch/powerpc/of_handler/head.S152
-rw-r--r--xen/arch/powerpc/of_handler/io.c160
-rw-r--r--xen/arch/powerpc/of_handler/leap.S38
-rw-r--r--xen/arch/powerpc/of_handler/memcmp.c39
-rw-r--r--xen/arch/powerpc/of_handler/memory.c129
-rw-r--r--xen/arch/powerpc/of_handler/memset.c67
-rw-r--r--xen/arch/powerpc/of_handler/ofh.c454
-rw-r--r--xen/arch/powerpc/of_handler/ofh.h164
-rw-r--r--xen/arch/powerpc/of_handler/papr.S97
-rw-r--r--xen/arch/powerpc/of_handler/papr.h69
-rw-r--r--xen/arch/powerpc/of_handler/services.c96
-rw-r--r--xen/arch/powerpc/of_handler/snprintf.c332
-rw-r--r--xen/arch/powerpc/of_handler/strcmp.c36
-rw-r--r--xen/arch/powerpc/of_handler/strlen.c30
-rw-r--r--xen/arch/powerpc/of_handler/strncmp.c39
-rw-r--r--xen/arch/powerpc/of_handler/strncpy.c54
-rw-r--r--xen/arch/powerpc/of_handler/strnlen.c30
-rw-r--r--xen/arch/powerpc/of_handler/vdevice.c74
-rw-r--r--xen/arch/powerpc/of_handler/xen_hvcall.S28
-rw-r--r--xen/arch/powerpc/of_handler/xencomm.c84
-rw-r--r--xen/arch/powerpc/ofd_fixup.c509
-rw-r--r--xen/arch/powerpc/oftree.h33
-rw-r--r--xen/arch/powerpc/papr/Makefile10
-rw-r--r--xen/arch/powerpc/papr/debug.c84
-rw-r--r--xen/arch/powerpc/papr/tce.c84
-rw-r--r--xen/arch/powerpc/papr/vtce.c158
-rw-r--r--xen/arch/powerpc/papr/vterm.c70
-rw-r--r--xen/arch/powerpc/papr/xlate.c499
-rw-r--r--xen/arch/powerpc/physdev.c24
-rw-r--r--xen/arch/powerpc/powerpc64/Makefile11
-rw-r--r--xen/arch/powerpc/powerpc64/asm-offsets.c65
-rw-r--r--xen/arch/powerpc/powerpc64/domain.c143
-rw-r--r--xen/arch/powerpc/powerpc64/exceptions.S519
-rw-r--r--xen/arch/powerpc/powerpc64/hypercall_table.S83
-rw-r--r--xen/arch/powerpc/powerpc64/io.S142
-rw-r--r--xen/arch/powerpc/powerpc64/memcpy.S171
-rw-r--r--xen/arch/powerpc/powerpc64/ppc970.c164
-rw-r--r--xen/arch/powerpc/powerpc64/prom_call.S116
-rw-r--r--xen/arch/powerpc/powerpc64/string.S286
-rw-r--r--xen/arch/powerpc/powerpc64/traps.c50
-rw-r--r--xen/arch/powerpc/ppc32/prom_call.c41
-rw-r--r--xen/arch/powerpc/rtas.c24
-rw-r--r--xen/arch/powerpc/setup.c370
-rw-r--r--xen/arch/powerpc/smp.c60
-rw-r--r--xen/arch/powerpc/tce.h71
-rw-r--r--xen/arch/powerpc/time.c131
-rw-r--r--xen/arch/powerpc/usercopy.c232
-rw-r--r--xen/arch/powerpc/xen.lds226
-rw-r--r--xen/include/asm-powerpc/asm_defns.h28
-rw-r--r--xen/include/asm-powerpc/atomic.h211
-rw-r--r--xen/include/asm-powerpc/bitops.h309
-rw-r--r--xen/include/asm-powerpc/cache.h60
-rw-r--r--xen/include/asm-powerpc/config.h77
-rw-r--r--xen/include/asm-powerpc/current.h79
-rw-r--r--xen/include/asm-powerpc/debugger.h44
-rw-r--r--xen/include/asm-powerpc/delay.h28
-rw-r--r--xen/include/asm-powerpc/desc.h25
-rw-r--r--xen/include/asm-powerpc/div64.h33
-rw-r--r--xen/include/asm-powerpc/domain.h114
-rw-r--r--xen/include/asm-powerpc/event.h99
-rw-r--r--xen/include/asm-powerpc/flushtlb.h108
-rw-r--r--xen/include/asm-powerpc/grant_table.h64
-rw-r--r--xen/include/asm-powerpc/guest_access.h99
-rw-r--r--xen/include/asm-powerpc/hardirq.h21
-rw-r--r--xen/include/asm-powerpc/hcalls.h34
-rw-r--r--xen/include/asm-powerpc/htab.h142
-rw-r--r--xen/include/asm-powerpc/hypercall.h26
-rw-r--r--xen/include/asm-powerpc/init.h59
-rw-r--r--xen/include/asm-powerpc/io.h67
-rw-r--r--xen/include/asm-powerpc/iocap.h26
-rw-r--r--xen/include/asm-powerpc/irq.h31
-rw-r--r--xen/include/asm-powerpc/mach-default/irq_vectors.h105
-rw-r--r--xen/include/asm-powerpc/memory.h39
-rw-r--r--xen/include/asm-powerpc/misc.h33
-rw-r--r--xen/include/asm-powerpc/mm.h224
-rw-r--r--xen/include/asm-powerpc/mpic.h294
-rw-r--r--xen/include/asm-powerpc/msr.h66
-rw-r--r--xen/include/asm-powerpc/multicall.h27
-rw-r--r--xen/include/asm-powerpc/page.h116
-rw-r--r--xen/include/asm-powerpc/papr.h218
-rw-r--r--xen/include/asm-powerpc/pci.h35
-rw-r--r--xen/include/asm-powerpc/powerpc64/config.h45
-rw-r--r--xen/include/asm-powerpc/powerpc64/ppc970-hid.h107
-rw-r--r--xen/include/asm-powerpc/powerpc64/ppc970.h31
-rw-r--r--xen/include/asm-powerpc/powerpc64/procarea.h36
-rw-r--r--xen/include/asm-powerpc/powerpc64/processor.h193
-rw-r--r--xen/include/asm-powerpc/powerpc64/string.h40
-rw-r--r--xen/include/asm-powerpc/processor.h202
-rw-r--r--xen/include/asm-powerpc/reg_defs.h180
-rw-r--r--xen/include/asm-powerpc/regs.h25
-rw-r--r--xen/include/asm-powerpc/shadow.h45
-rw-r--r--xen/include/asm-powerpc/smp.h36
-rw-r--r--xen/include/asm-powerpc/smpboot.h21
-rw-r--r--xen/include/asm-powerpc/spinlock.h221
-rw-r--r--xen/include/asm-powerpc/string.h26
-rw-r--r--xen/include/asm-powerpc/system.h243
-rw-r--r--xen/include/asm-powerpc/time.h42
-rw-r--r--xen/include/asm-powerpc/types.h69
-rw-r--r--xen/include/asm-powerpc/uaccess.h38
-rw-r--r--xen/include/public/arch-powerpc.h119
-rw-r--r--xen/include/public/xencomm.h37
143 files changed, 19427 insertions, 0 deletions
diff --git a/config/powerpc64.mk b/config/powerpc64.mk
new file mode 100644
index 0000000000..ee717ecd64
--- /dev/null
+++ b/config/powerpc64.mk
@@ -0,0 +1,4 @@
+CONFIG_POWERPC := y
+
+CFLAGS += -DELFSIZE=64
+LIBDIR := lib
diff --git a/tools/libxc/xc_ppc_linux_build.c b/tools/libxc/xc_ppc_linux_build.c
new file mode 100644
index 0000000000..7230867fcc
--- /dev/null
+++ b/tools/libxc/xc_ppc_linux_build.c
@@ -0,0 +1,408 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <inttypes.h>
+
+#include <xen/dom0_ops.h>
+#include <xen/memory.h>
+#include <xc_private.h>
+#include <xg_private.h>
+#include <xenctrl.h>
+
+/* XXX 64M hack */
+#define MEMSIZE (64UL << 20)
+#define INITRD_ADDR (24UL << 20)
+
+int verbose;
+#define VERBOSE(stuff, ...) \
+ if (verbose) \
+ stuff __VA_ARGS__;
+
+#define ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+static void *load_file(const char *path, unsigned long *filesize)
+{
+ void *img;
+ ssize_t size;
+ int fd;
+
+ VERBOSE(printf("load_file(%s)\n", path));
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ perror(path);
+ return NULL;
+ }
+
+ size = lseek(fd, 0, SEEK_END);
+ if (size < 0) {
+ perror(path);
+ close(fd);
+ return NULL;
+ }
+ lseek(fd, 0, SEEK_SET);
+
+ img = malloc(size);
+ if (img == NULL) {
+ perror(path);
+ close(fd);
+ return NULL;
+ }
+
+ size = read(fd, img, size);
+ if (size <= 0) {
+ perror(path);
+ close(fd);
+ free(img);
+ return NULL;
+ }
+
+ if (filesize)
+ *filesize = size;
+ close(fd);
+ return img;
+}
+
+static int init_boot_vcpu(
+ int xc_handle,
+ int domid,
+ struct domain_setup_info *dsi,
+ unsigned long dtb,
+ unsigned long kaddr)
+{
+ vcpu_guest_context_t ctxt;
+ int rc;
+
+ memset(&ctxt.user_regs, 0x55, sizeof(ctxt.user_regs));
+ ctxt.user_regs.pc = dsi->v_kernentry;
+ ctxt.user_regs.msr = 0;
+ ctxt.user_regs.gprs[1] = 32<<20; /* XXX arbitrary stack address */
+ ctxt.user_regs.gprs[3] = dtb;
+ ctxt.user_regs.gprs[4] = kaddr;
+ ctxt.user_regs.gprs[5] = 0;
+
+ VERBOSE(printf("xc_vcpu_setvcpucontext:\n"
+ " pc 0x%"PRIx64", msr 0x016%"PRIx64"\n"
+ " r1-5 %016"PRIx64" %016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64"\n",
+ ctxt.user_regs.pc, ctxt.user_regs.msr,
+ ctxt.user_regs.gprs[1],
+ ctxt.user_regs.gprs[2],
+ ctxt.user_regs.gprs[3],
+ ctxt.user_regs.gprs[4],
+ ctxt.user_regs.gprs[5]));
+ rc = xc_vcpu_setcontext(xc_handle, domid, 0, &ctxt);
+ if (rc < 0)
+ perror("setdomaininfo");
+
+ return rc;
+}
+
+static int install_image(
+ int xc_handle,
+ int domid,
+ xen_pfn_t *page_array,
+ void *image,
+ unsigned long paddr,
+ unsigned long size)
+{
+ uint8_t *img = image;
+ int i;
+ int rc = 0;
+
+ if (paddr & ~PAGE_MASK) {
+ printf("*** unaligned address\n");
+ return -1;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ void *page = img + i;
+ xen_pfn_t pfn = (paddr + i) >> PAGE_SHIFT;
+ xen_pfn_t mfn = page_array[pfn];
+
+ rc = xc_copy_to_domain_page(xc_handle, domid, mfn, page);
+ if (rc < 0) {
+ perror("xc_copy_to_domain_page");
+ break;
+ }
+ }
+ return rc;
+}
+
+/* XXX be more flexible about placement in memory */
+static int load_dtb(
+ int xc_handle,
+ int domid,
+ const char *dtb_path,
+ unsigned long dtb_addr,
+ struct domain_setup_info *dsi,
+ xen_pfn_t *page_array)
+{
+ uint8_t *img;
+ unsigned long dtb_size;
+ int rc = 0;
+
+ img = load_file(dtb_path, &dtb_size);
+ if (img == NULL) {
+ rc = -1;
+ goto out;
+ }
+
+ VERBOSE(printf("copying device tree to 0x%lx[0x%lx]\n",
+ dtb_addr, dtb_size));
+ rc = install_image(xc_handle, domid, page_array, img, dtb_addr, dtb_size);
+
+out:
+ free(img);
+ return rc;
+}
+
+unsigned long spin_list[] = {
+#if 0
+ 0x100,
+ 0x200,
+ 0x300,
+ 0x380,
+ 0x400,
+ 0x480,
+ 0x500,
+ 0x700,
+ 0x900,
+ 0xc00,
+#endif
+ 0
+};
+
+/* XXX yes, this is a hack */
+static void hack_kernel_img(char *img)
+{
+ const off_t file_offset = 0x10000;
+ unsigned long *addr = spin_list;
+
+ while (*addr) {
+ uint32_t *instruction = (uint32_t *)(img + *addr + file_offset);
+ printf("installing spin loop at %lx (%x)\n", *addr, *instruction);
+ *instruction = 0x48000000;
+ addr++;
+ }
+}
+
+static int load_kernel(
+ int xc_handle,
+ int domid,
+ const char *kernel_path,
+ struct domain_setup_info *dsi,
+ xen_pfn_t *page_array)
+{
+ struct load_funcs load_funcs;
+ char *kernel_img;
+ unsigned long kernel_size;
+ int rc;
+
+ /* load the kernel ELF file */
+ kernel_img = load_file(kernel_path, &kernel_size);
+ if (kernel_img == NULL) {
+ rc = -1;
+ goto out;
+ }
+
+ hack_kernel_img(kernel_img);
+
+ VERBOSE(printf("probe_elf\n"));
+ rc = probe_elf(kernel_img, kernel_size, &load_funcs);
+ if (rc < 0) {
+ rc = -1;
+ printf("%s is not an ELF file\n", kernel_path);
+ goto out;
+ }
+
+ VERBOSE(printf("parseimage\n"));
+ rc = (load_funcs.parseimage)(kernel_img, kernel_size, dsi);
+ if (rc < 0) {
+ rc = -1;
+ goto out;
+ }
+
+ VERBOSE(printf("loadimage\n"));
+ (load_funcs.loadimage)(kernel_img, kernel_size, xc_handle, domid,
+ page_array, dsi);
+
+ VERBOSE(printf(" v_start %016"PRIx64"\n", dsi->v_start));
+ VERBOSE(printf(" v_end %016"PRIx64"\n", dsi->v_end));
+ VERBOSE(printf(" v_kernstart %016"PRIx64"\n", dsi->v_kernstart));
+ VERBOSE(printf(" v_kernend %016"PRIx64"\n", dsi->v_kernend));
+ VERBOSE(printf(" v_kernentry %016"PRIx64"\n", dsi->v_kernentry));
+
+out:
+ free(kernel_img);
+ return rc;
+}
+
+static int load_initrd(
+ int xc_handle,
+ int domid,
+ xen_pfn_t *page_array,
+ const char *initrd_path,
+ unsigned long *base,
+ unsigned long *len)
+{
+ uint8_t *initrd_img;
+ int rc = -1;
+
+ /* load the initrd file */
+ initrd_img = load_file(initrd_path, len);
+ if (initrd_img == NULL)
+ return -1;
+
+ VERBOSE(printf("copying initrd to 0x%lx[0x%lx]\n", INITRD_ADDR, *len));
+ if (install_image(xc_handle, domid, page_array, initrd_img, INITRD_ADDR,
+ *len))
+ goto out;
+
+ *base = INITRD_ADDR;
+ rc = 0;
+
+out:
+ free(initrd_img);
+ return rc;
+}
+
+static unsigned long create_start_info(start_info_t *si,
+ unsigned int console_evtchn, unsigned int store_evtchn)
+{
+ unsigned long eomem;
+ unsigned long si_addr;
+
+ memset(si, 0, sizeof(*si));
+ snprintf(si->magic, sizeof(si->magic), "xen-%d.%d-powerpc64HV", 3, 0);
+
+ eomem = MEMSIZE;
+ si->nr_pages = eomem >> PAGE_SHIFT;
+ si->shared_info = eomem - (PAGE_SIZE * 1);
+ si->store_mfn = si->nr_pages - 2;
+ si->store_evtchn = store_evtchn;
+ si->console_mfn = si->nr_pages - 3;
+ si->console_evtchn = console_evtchn;
+ si_addr = eomem - (PAGE_SIZE * 4);
+
+ return si_addr;
+}
+
+static int get_page_array(int xc_handle, int domid, xen_pfn_t **page_array)
+{
+ int nr_pages;
+ int rc;
+
+ VERBOSE(printf("xc_get_tot_pages\n"));
+ nr_pages = xc_get_tot_pages(xc_handle, domid);
+ VERBOSE(printf(" 0x%x\n", nr_pages));
+
+ *page_array = malloc(nr_pages * sizeof(xen_pfn_t));
+ if (*page_array == NULL) {
+ perror("malloc");
+ return -1;
+ }
+
+ VERBOSE(printf("xc_get_pfn_list\n"));
+ rc = xc_get_pfn_list(xc_handle, domid, *page_array, nr_pages);
+ if (rc != nr_pages) {
+ perror("Could not get the page frame list");
+ return -1;
+ }
+
+ return 0;
+}
+
+int xc_linux_build(int xc_handle,
+ uint32_t domid,
+ const char *image_name,
+ const char *initrd_name,
+ const char *cmdline,
+ const char *features,
+ unsigned long flags,
+ unsigned int store_evtchn,
+ unsigned long *store_mfn,
+ unsigned int console_evtchn,
+ unsigned long *console_mfn)
+{
+ struct domain_setup_info dsi;
+ xen_pfn_t *page_array = NULL;
+ unsigned long kern_addr;
+ unsigned long dtb_addr;
+ unsigned long si_addr;
+ unsigned long initrd_base = 0;
+ unsigned long initrd_len = 0;
+ start_info_t si;
+ int rc = 0;
+
+ if (get_page_array(xc_handle, domid, &page_array)) {
+ rc = -1;
+ goto out;
+ }
+
+ if (load_kernel(xc_handle, domid, image_name, &dsi, page_array)) {
+ rc = -1;
+ goto out;
+ }
+ kern_addr = 0;
+
+ if (initrd_name && initrd_name[0] != '\0' &&
+ load_initrd(xc_handle, domid, page_array, initrd_name, &initrd_base,
+ &initrd_len)) {
+ rc = -1;
+ goto out;
+ }
+ /* XXX install initrd addr/len into device tree */
+
+ dtb_addr = (16 << 20);
+ if (load_dtb(xc_handle, domid, "DomU.dtb", dtb_addr, &dsi, page_array)) {
+ dtb_addr = 0;
+ }
+
+ si_addr = create_start_info(&si, store_evtchn, console_evtchn);
+ *console_mfn = si.console_mfn;
+ *store_mfn = si.store_mfn;
+ if (install_image(xc_handle, domid, page_array, &si, si_addr,
+ sizeof(start_info_t))) {
+ rc = -1;
+ goto out;
+ }
+
+ if (init_boot_vcpu(xc_handle, domid, &dsi, dtb_addr, kern_addr)) {
+ rc = -1;
+ goto out;
+ }
+
+out:
+ return rc;
+}
diff --git a/xen/arch/powerpc/0opt.c b/xen/arch/powerpc/0opt.c
new file mode 100644
index 0000000000..4247941f58
--- /dev/null
+++ b/xen/arch/powerpc/0opt.c
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <asm/misc.h>
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void)
+{
+ trap();
+}
diff --git a/xen/arch/powerpc/Makefile b/xen/arch/powerpc/Makefile
new file mode 100644
index 0000000000..050d4d82ca
--- /dev/null
+++ b/xen/arch/powerpc/Makefile
@@ -0,0 +1,117 @@
+ifneq ($(DOM0_IMAGE),)
+builtin_dom0 := y
+endif
+
+subdir-$(HAS_PPC64) += powerpc64
+subdir-y += papr
+
+obj-y += audit.o
+obj-y += bitops.o
+obj-y += boot_of.o
+obj-y += dart.o
+obj-y += dart_u3.o
+obj-y += dart_u4.o
+obj-y += delay.o
+obj-y += dom0_ops.o
+obj-y += domain_build.o
+obj-y += domain.o
+obj-y += exceptions.o
+obj-y += external.o
+obj-y += float.o
+obj-y += hcalls.o
+obj-y += htab.o
+obj-y += iommu.o
+obj-y += irq.o
+obj-y += mambo.o
+obj-y += mm.o
+obj-y += mpic.o
+obj-y += mpic_init.o
+obj-y += of-devtree.o
+obj-y += of-devwalk.o
+obj-y += ofd_fixup.o
+obj-y += physdev.o
+obj-y += rtas.o
+obj-y += setup.o
+obj-y += smp.o
+obj-y += time.o
+obj-y += usercopy.o
+
+obj-$(debug) += 0opt.o
+obj-$(crash_debug) += gdbstub.o
+obj-$(builtin_dom0) += dom0.o
+
+obj-y += firmware_image.o
+
+obj-y += elf32.o
+
+# These are extra warnings like for the arch/ppc directory but may not
+# allow the rest of the tree to build.
+PPC_C_WARNINGS += -Wundef -Wmissing-prototypes -Wmissing-declarations
+CFLAGS += $(PPC_C_WARNINGS)
+
+LINK=0x3000000
+boot32_link_base = $(LINK)
+xen_link_offset = 100
+xen_link_base = $(patsubst %000,%$(xen_link_offset),$(LINK))
+
+#
+# The following flags are fed to gcc in order to link several
+# objects into a single ELF segment and to not link in any additional
+# objects that gcc would normally like to
+#
+OMAGIC = -N -nodefaultlibs -nostartfiles
+
+firmware: of_handler/built_in.o $(TARGET_SUBARCH)/memcpy.o of-devtree.o
+ $(CC) $(CFLAGS) $(OMAGIC) -e __ofh_start -Wl,-Ttext,0x0 $^ -o $@
+
+firmware_image: firmware
+ $(CROSS_COMPILE)objcopy --output-target=binary $< $@
+
+firmware_image.o: firmware_image
+ $(CROSS_COMPILE)objcopy --input-target=binary \
+ --output-target=elf64-powerpc \
+ --binary-architecture=powerpc \
+ --redefine-sym _binary_$<_start=$(@:%.o=%)_start \
+ --redefine-sym _binary_$<_end=$(@:%.o=%)_end \
+ --redefine-sym _binary_$<_size=$(@:%.o=%)_size $< $@
+
+#
+# Hacks for included C files
+#
+irq.o: ../x86/irq.c
+physdev.o: ../x86/physdev.c
+
+HDRS += $(wildcard *.h)
+
+start.o: boot/start.S
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
+
+$(TARGET)-syms: start.o $(ALL_OBJS) xen.lds
+ $(CC) $(CFLAGS) $(OMAGIC) -Wl,-Ttext,$(xen_link_base),-T,xen.lds start.o $(ALL_OBJS) -o $@
+
+$(TARGET).bin: $(TARGET)-syms
+ $(CROSS_COMPILE)objcopy --output-target=binary $< $@
+
+$(TARGET).bin.o: $(TARGET).bin
+ $(CROSS_COMPILE)objcopy --input-target=binary \
+ --output-target=elf32-powerpc \
+ --binary-architecture=powerpc $< $@
+
+boot32.o: boot/boot32.S
+ $(CC) -m32 -Wa,-a32,-mppc64bridge \
+ -D__ASSEMBLY__ -D__BRIDGE64__ $(CFLAGS) -c $< -o $@
+
+$(TARGET): boot32.o $(TARGET).bin.o
+ $(CC) -m32 -N -Wl,-melf32ppclinux -static -nostdlib \
+ -Wl,-Ttext,$(boot32_link_base) -Wl,-Tdata,$(xen_link_base) \
+ $(CFLAGS) $^ -o $@
+
+asm-offsets.s: $(TARGET_SUBARCH)/asm-offsets.c $(HDRS)
+ $(CC) $(CFLAGS) -S -o $@ $<
+
+dom0.bin: $(DOM0_IMAGE)
+ cp $< $@
+
+clean::
+ $(MAKE) -f $(BASEDIR)/Rules.mk -C of_handler clean
+ rm -f firmware firmware_image dom0.bin
diff --git a/xen/arch/powerpc/Rules.mk b/xen/arch/powerpc/Rules.mk
new file mode 100644
index 0000000000..592ebec003
--- /dev/null
+++ b/xen/arch/powerpc/Rules.mk
@@ -0,0 +1,51 @@
+HAS_PPC64 := y
+
+CC := $(CROSS_COMPILE)gcc
+LD := $(CROSS_COMPILE)ld
+
+# These are goodess that applies to all source.
+C_WARNINGS := -Wpointer-arith -Wredundant-decls
+
+# _no_ common code can have packed data structures or we are in touble.
+C_WARNINGS += -Wpacked
+
+CFLAGS := -m64 -ffreestanding -fno-builtin -fno-common -fno-strict-aliasing
+CFLAGS += -iwithprefix include -Wall -Werror -pipe
+CFLAGS += -I$(BASEDIR)/include
+CFLAGS += -I$(BASEDIR)/include/asm-powerpc/mach-generic
+CFLAGS += -I$(BASEDIR)/include/asm-powerpc/mach-default
+CFLAGS += $(C_WARNINGS)
+CFLAGS += -msoft-float -O2
+CFLAGS-$(debug) += -O0 # last one wins
+CFLAGS-$(papr_vterm) += -DPAPR_VDEVICE -DPAPR_VTERM
+
+LDFLAGS += -m elf64ppc
+
+#
+# command to embed a binary inside a .o
+#
+%.o: %.bin
+ $(CROSS_COMPILE)objcopy --input-target=binary \
+ --output-target=elf64-powerpc \
+ --binary-architecture=powerpc \
+ --redefine-sym _binary_$*_bin_start=$*_start \
+ --redefine-sym _binary_$*_bin_end=$*_end \
+ --redefine-sym _binary_$*_bin_size=$*_size \
+ $< $@
+
+# Test for at least GCC v3.2.x.
+gcc-ver = $(shell $(CC) -dumpversion | sed -e 's/^\(.\)\.\(.\)\.\(.\)/\$(1)/')
+ifeq ($(call gcc-ver,1),1)
+$(error gcc-1.x.x unsupported - upgrade to at least gcc-3.2.x)
+endif
+ifeq ($(call gcc-ver,1),2)
+$(error gcc-2.x.x unsupported - upgrade to at least gcc-3.2.x)
+endif
+ifeq ($(call gcc-ver,1),3)
+ifeq ($(call gcc-ver,2),0)
+$(error gcc-3.0.x unsupported - upgrade to at least gcc-3.2.x)
+endif
+ifeq ($(call gcc-ver,2),1)
+$(error gcc-3.1.x unsupported - upgrade to at least gcc-3.2.x)
+endif
+endif
diff --git a/xen/arch/powerpc/audit.c b/xen/arch/powerpc/audit.c
new file mode 100644
index 0000000000..2c6c12b598
--- /dev/null
+++ b/xen/arch/powerpc/audit.c
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef NDEBUG
+#include <xen/lib.h>
+#include <xen/sched.h>
+
+extern void audit_domain(struct domain *d);
+extern void audit_domains(void);
+extern void audit_domains_key(unsigned char key);
+
+void audit_domain(struct domain *d)
+{
+ panic("%s unimplemented\n", __func__);
+}
+
+void audit_domains(void)
+{
+ struct domain *d;
+ for_each_domain ( d )
+ audit_domain(d);
+}
+
+void audit_domains_key(unsigned char key)
+{
+ audit_domains();
+}
+#endif
diff --git a/xen/arch/powerpc/bitops.c b/xen/arch/powerpc/bitops.c
new file mode 100644
index 0000000000..1b731a9b66
--- /dev/null
+++ b/xen/arch/powerpc/bitops.c
@@ -0,0 +1,94 @@
+/* from linux/arch/powerpc/lib/bitops.c */
+
+#include <asm/types.h>
+#include <asm/bitops.h>
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/*
+ * This implementation of find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
diff --git a/xen/arch/powerpc/boot/boot32.S b/xen/arch/powerpc/boot/boot32.S
new file mode 100644
index 0000000000..1b594b5bc2
--- /dev/null
+++ b/xen/arch/powerpc/boot/boot32.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+### 32 bit strapping code so Of will like us
+ .section ".text"
+ .align 3
+ .globl _start
+
+_start:
+ ## Double word align the MSR value below
+ nop
+ bl _real_start
+ ## static value for MSR
+ .llong 0x9000000000001000
+
+ ## see also docs/reference/ppc/msr.txt
+##bit C Hex Name Desc
+## 0 63 80000000 00000000 SF 64-bit Mode
+## 3 60 10000000 00000000 HV Hypervisor State iff PR = 0 in hypervisor state.
+## 51 12 00000000 00001000 ME Machine Check Enable
+
+_real_start:
+ # pass the original msr as argument to hype_init
+ mfmsr 8
+
+ ## Set PC
+ li 21, 0
+ oris 21, 21, _hype64@h
+ ori 21, 21, _hype64@l
+#ifdef __BRIDGE64__
+ ## In 64bit we use rfid to switch from 32bit to 64 bit
+ mtsrr0 21
+
+ ## Set MSR
+ mflr 21
+ ld 22, 0(21)
+ mtsrr1 22
+ bl __leap
+ /* should never return */
+ trap
+__leap:
+ rfid
+#else
+ mtctr 21
+ bctrl
+ /* should never return */
+ trap
+#endif
+
+
+_real_end:
+ .data
+ .align 3
+ ## Hypervisor starts here, at the first data address
+ ## linker magic positions _hype64 0x100 after _start
+ ## hype/ppc64/Makefile.isa
+_hype64:
+
+
diff --git a/xen/arch/powerpc/boot/start.S b/xen/arch/powerpc/boot/start.S
new file mode 100644
index 0000000000..aec4a48289
--- /dev/null
+++ b/xen/arch/powerpc/boot/start.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <asm/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+ .globl _start
+_start:
+ /* load up the stack */
+ SET_REG_TO_LABEL(r1, cpu0_stack)
+
+ /* call the init function */
+ LOADADDR(r21,__start_xen_ppc)
+
+#ifdef __PPC64__
+ ld r2, 8(r21)
+ ld r21, 0(r21)
+#endif
+ mtctr r21
+ bctrl
+ /* should never return */
+ trap
+
+ /* Note! GDB 6.3 makes the very stupid assumption that PC > SP means we are
+ * in a Linux signal trampoline, and it begins groping for a struct
+ * rt_sigframe on the stack. Naturally, this fails miserably for our
+ * backtrace. To work around this behavior, we must make certain that our
+ * stack is always above our text, e.g. in the data section. */
+ .data /* DO NOT REMOVE; see GDB note above */
+ .align 4
+cpu0_stack_bottom:
+ .space STACK_SIZE
+cpu0_stack:
+ .space STACK_FRAME_OVERHEAD
diff --git a/xen/arch/powerpc/boot_of.c b/xen/arch/powerpc/boot_of.c
new file mode 100644
index 0000000000..1b22c2a7fb
--- /dev/null
+++ b/xen/arch/powerpc/boot_of.c
@@ -0,0 +1,1016 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/multiboot.h>
+#include <xen/compile.h>
+#include <xen/spinlock.h>
+#include <xen/serial.h>
+#include <xen/time.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include "exceptions.h"
+#include "of-devtree.h"
+
+static ulong of_vec;
+static ulong of_msr;
+static int of_out;
+static ofdn_t boot_cpu;
+static char bootargs[256];
+
+extern struct ns16550_defaults ns16550;
+
+#undef OF_DEBUG
+
+#ifdef OF_DEBUG
+#define DBG(args...) of_printf(args)
+#else
+#define DBG(args...)
+#endif
+
+#define of_panic(MSG...) \
+ do { of_printf(MSG); of_printf("\nHANG\n"); for (;;); } while (0)
+
+struct of_service {
+ u32 ofs_service;
+ u32 ofs_nargs;
+ u32 ofs_nrets;
+ u32 ofs_args[10];
+};
+
+static int bof_chosen;
+
+static struct of_service s;
+extern s32 prom_call(void *arg, ulong rtas_base, ulong func, ulong msr);
+
+static int __init of_call(
+ const char *service, u32 nargs, u32 nrets, s32 rets[], ...)
+{
+ int rc;
+
+ if (of_vec != 0) {
+ va_list args;
+ int i;
+
+ memset(&s, 0, sizeof (s));
+ s.ofs_service = (ulong)service;
+ s.ofs_nargs = nargs;
+ s.ofs_nrets = nrets;
+ s.ofs_nargs = nargs;
+
+ /* copy all the params into the args array */
+ va_start(args, rets);
+
+ for (i = 0; i < nargs; i++) {
+ s.ofs_args[i] = va_arg(args, u32);
+ }
+
+ va_end(args);
+
+ rc = prom_call(&s, 0, of_vec, of_msr);
+
+ /* yes always to the copy, just in case */
+ for (i = 0; i < nrets; i++) {
+ rets[i] = s.ofs_args[i + nargs];
+ }
+ } else {
+ rc = OF_FAILURE;
+ }
+ return rc;
+}
+
+/* popular OF methods */
+static int __init _of_write(int ih, const char *addr, u32 len)
+{
+ int rets[1] = { OF_FAILURE };
+ if (of_call("write", 3, 1, rets, ih, addr, len) == OF_FAILURE) {
+ return OF_FAILURE;
+ }
+ return rets[0];
+}
+
+/* popular OF methods */
+static int __init of_write(int ih, const char *addr, u32 len)
+{
+ int rc;
+ int i = 0;
+ int sum = 0;
+
+ while (i < len) {
+ if (addr[i] == '\n') {
+ if (i > 0) {
+ rc = _of_write(ih, addr, i);
+ if (rc == OF_FAILURE)
+ return rc;
+ sum += rc;
+ }
+ rc = _of_write(ih, "\r\n", 2);
+ if (rc == OF_FAILURE)
+ return rc;
+ sum += rc;
+ i++;
+ addr += i;
+ len -= i;
+ i = 0;
+ continue;
+ }
+ i++;
+ }
+ if (len > 0) {
+ rc = _of_write(ih, addr, len);
+ if (rc == OF_FAILURE)
+ return rc;
+ sum += rc;
+ }
+
+ return sum;
+}
+
+static int of_printf(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+static int __init of_printf(const char *fmt, ...)
+{
+ static char buf[1024];
+ va_list args;
+ int sz;
+
+ if (of_out == 0) {
+ return OF_FAILURE;
+ }
+
+ va_start(args, fmt);
+
+ sz = vsnprintf(buf, sizeof (buf), fmt, args);
+ if (sz <= sizeof (buf)) {
+ of_write(of_out, buf, sz);
+ } else {
+ static const char trunc[] = "\n(TRUNCATED)\n";
+
+ sz = sizeof (buf);
+ of_write(of_out, buf, sz);
+ of_write(of_out, trunc, sizeof (trunc));
+ }
+ return sz;
+}
+
+static int __init of_finddevice(const char *devspec)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("finddevice", 1, 1, rets, devspec);
+ if (rets[0] == OF_FAILURE) {
+ DBG("finddevice %s -> FAILURE %d\n",devspec,rets[0]);
+ return OF_FAILURE;
+ }
+ DBG("finddevice %s -> %d\n",devspec, rets[0]);
+ return rets[0];
+}
+
+static int __init of_getprop(int ph, const char *name, void *buf, u32 buflen)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("getprop", 4, 1, rets, ph, name, buf, buflen);
+
+ if (rets[0] == OF_FAILURE) {
+ DBG("getprop 0x%x %s -> FAILURE\n", ph, name);
+ return OF_FAILURE;
+ }
+
+ DBG("getprop 0x%x %s -> 0x%x (%s)\n", ph, name, rets[0], (char *)buf);
+ return rets[0];
+}
+
+static int __init of_setprop(
+ int ph, const char *name, const void *buf, u32 buflen)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("setprop", 4, 1, rets, ph, name, buf, buflen);
+
+ if (rets[0] == OF_FAILURE) {
+ DBG("setprop 0x%x %s -> FAILURE\n", ph, name);
+ return OF_FAILURE;
+ }
+
+ DBG("setprop 0x%x %s -> %s\n", ph, name, (char *)buf);
+ return rets[0];
+}
+
+/*
+ * returns 0 if there are no children (of spec)
+ */
+static int __init of_getchild(int ph)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("child", 1, 1, rets, ph);
+ DBG("getchild 0x%x -> 0x%x\n", ph, rets[0]);
+
+ return rets[0];
+}
+
+/*
+ * returns 0 is there are no peers
+ */
+static int __init of_getpeer(int ph)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("peer", 1, 1, rets, ph);
+ DBG("getpeer 0x%x -> 0x%x\n", ph, rets[0]);
+
+ return rets[0];
+}
+
+static int __init of_getproplen(int ph, const char *name)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("getproplen", 2, 1, rets, ph, name);
+ if (rets[0] == OF_FAILURE) {
+ DBG("getproplen 0x%x %s -> FAILURE\n", ph, name);
+ return OF_FAILURE;
+ }
+ DBG("getproplen 0x%x %s -> 0x%x\n", ph, name, rets[0]);
+ return rets[0];
+}
+
+static int __init of_package_to_path(int ph, char *buffer, u32 buflen)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("package-to-path", 3, 1, rets, ph, buffer, buflen);
+ if (rets[0] == OF_FAILURE) {
+ DBG("%s 0x%x -> FAILURE\n", __func__, ph);
+ return OF_FAILURE;
+ }
+ DBG("%s 0x%x %s -> 0x%x\n", __func__, ph, buffer, rets[0]);
+ if (rets[0] <= buflen)
+ buffer[rets[0]] = '\0';
+ return rets[0];
+}
+
+static int __init of_nextprop(int ph, const char *name, void *buf)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("nextprop", 3, 1, rets, ph, name, buf);
+
+ if (rets[0] == OF_FAILURE) {
+ DBG("nextprop 0x%x %s -> FAILURE\n", ph, name);
+ return OF_FAILURE;
+ }
+
+ DBG("nextprop 0x%x %s -> %s\n", ph, name, (char *)buf);
+ return rets[0];
+}
+
+static int __init of_instance_to_path(int ih, char *buffer, u32 buflen)
+{
+ int rets[1] = { OF_FAILURE };
+
+ if (of_call("instance-to-path", 3, 1, rets, ih, buffer, buflen)
+ == OF_FAILURE)
+ return OF_FAILURE;
+
+ if (rets[0] <= buflen)
+ buffer[rets[0]] = '\0';
+ return rets[0];
+}
+
+static int __init of_start_cpu(int cpu, u32 pc, u32 reg)
+{
+ int rets[1] = { OF_FAILURE };
+
+ if ( of_call("start-cpu", 3, 0, rets, cpu, pc, reg) == OF_FAILURE )
+ return OF_FAILURE;
+
+ return rets[0];
+}
+
+static void __init of_test(const char *of_method_name)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("test", 1, 1, rets, of_method_name);
+ if (rets[0] == OF_FAILURE ) {
+ of_printf("Warning: possibly no OF method %s.\n"
+ "(Ignore this warning on PIBS.)\n", of_method_name);
+ }
+}
+
+static int __init of_claim(void * virt, u32 size)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("claim", 3, 1, rets, virt, size, 0/*align*/);
+ if (rets[0] == OF_FAILURE) {
+ DBG("%s 0x%p 0x%08x -> FAIL\n", __func__, virt, size);
+ return OF_FAILURE;
+ }
+
+ DBG("%s 0x%p 0x%08x -> 0x%x\n", __func__, virt, size, rets[0]);
+ return rets[0];
+}
+
+static int __init of_instance_to_package(int ih)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("instance-to-package", 1, 1, rets, ih);
+ if (rets[0] == OF_FAILURE)
+ return OF_FAILURE;
+
+ return rets[0];
+}
+
+static int __init of_getparent(int ph)
+{
+ int rets[1] = { OF_FAILURE };
+
+ of_call("parent", 1, 1, rets, ph);
+
+ DBG("getparent 0x%x -> 0x%x\n", ph, rets[0]);
+ return rets[0];
+}
+
+static void boot_of_probemem(multiboot_info_t *mbi)
+{
+ int root;
+ int p;
+ u32 addr_cells = 1;
+ u32 size_cells = 1;
+ int rc;
+ int mcount = 0;
+ static memory_map_t mmap[16];
+
+ root = of_finddevice("/");
+ p = of_getchild(root);
+
+ /* code is writen to assume sizes of 1 */
+ of_getprop(root, "#address-cells", &addr_cells, sizeof (addr_cells));
+ of_getprop(root, "#size-cells", &size_cells, sizeof (size_cells));
+ DBG("%s: address_cells=%d size_cells=%d\n",
+ __func__, addr_cells, size_cells);
+
+ do {
+ const char memory[] = "memory";
+ char type[32];
+
+ type[0] = '\0';
+
+ of_getprop(p, "device_type", type, sizeof (type));
+ if (strncmp(type, memory, sizeof (memory)) == 0) {
+ u32 reg[48];
+ u32 al, ah, ll, lh;
+ int r;
+
+ rc = of_getprop(p, "reg", reg, sizeof (reg));
+ if (rc == OF_FAILURE) {
+ of_panic("no reg property for memory node: 0x%x.\n", p);
+ }
+ int l = rc/sizeof(u32); /* number reg element */
+ DBG("%s: number of bytes in property 'reg' %d\n",
+ __func__, rc);
+
+ r = 0;
+ while (r < l) {
+ al = ah = ll = lh = 0;
+ if (addr_cells == 2) {
+ ah = reg[r++];
+ if (r >= l)
+ break; /* partial line. Skip */
+ al = reg[r++];
+ if (r >= l)
+ break; /* partial line. Skip */
+ } else {
+ al = reg[r++];
+ if (r >= l)
+ break; /* partial line. Skip */
+ }
+ if (size_cells == 2) {
+ lh = reg[r++];
+ if (r >= l)
+ break; /* partial line. Skip */
+ ll = reg[r++];
+ } else {
+ ll = reg[r++];
+ }
+
+ if ((ll != 0) || (lh != 0)) {
+ mmap[mcount].size = 20; /* - size field */
+ mmap[mcount].type = 1; /* Regular ram */
+ mmap[mcount].length_high = lh;
+ mmap[mcount].length_low = ll;
+ mmap[mcount].base_addr_high = ah;
+ mmap[mcount].base_addr_low = al;
+ of_printf("%s: memory 0x%016lx[0x%08lx]\n",
+ __func__,
+ (u64)(((u64)mmap[mcount].base_addr_high << 32)
+ | mmap[mcount].base_addr_low),
+ (u64)(((u64)mmap[mcount].length_high << 32)
+ | mmap[mcount].length_low));
+ ++mcount;
+ }
+ }
+ }
+ p = of_getpeer(p);
+ } while (p != OF_FAILURE && p != 0);
+
+ if (mcount > 0) {
+ mbi->flags |= MBI_MEMMAP;
+ mbi->mmap_length = sizeof (mmap[0]) * mcount;
+ mbi->mmap_addr = (ulong)mmap;
+ }
+}
+
+static void boot_of_bootargs(multiboot_info_t *mbi)
+{
+ int rc;
+
+ rc = of_getprop(bof_chosen, "bootargs", &bootargs, sizeof (bootargs));
+ if (rc == OF_FAILURE) {
+ strcpy(bootargs, "xen");
+ }
+
+ mbi->flags |= MBI_CMDLINE;
+ mbi->cmdline = (u32)bootargs;
+
+ of_printf("bootargs = %s\n", bootargs);
+}
+
+static int save_props(void *m, ofdn_t n, int pkg)
+{
+ int ret;
+ char name[128];
+ int result = 1;
+ int found_name = 0;
+ int found_device_type = 0;
+ const char name_str[] = "name";
+ const char devtype_str[] = "device_type";
+
+ /* get first */
+ result = of_nextprop(pkg, 0, name);
+
+ while (result > 0) {
+ int sz;
+ u64 obj[1024];
+
+ sz = of_getproplen(pkg, name);
+ if (sz >= 0) {
+ ret = OF_SUCCESS;
+ } else {
+ ret = OF_FAILURE;
+ }
+
+ if (ret == OF_SUCCESS) {
+ int actual = 0;
+ ofdn_t pos;
+
+ if (sz > 0) {
+ if (sz > sizeof (obj)) {
+ of_panic("obj array not big enough for 0x%x\n", sz);
+ }
+ actual = of_getprop(pkg, name, obj, sz);
+ if (actual > sz) of_panic("obj too small");
+ }
+
+ if (strncmp(name, name_str, sizeof(name_str)) == 0) {
+ found_name = 1;
+ }
+
+ if (strncmp(name, devtype_str, sizeof(devtype_str)) == 0) {
+ found_device_type = 1;
+ }
+
+ pos = ofd_prop_add(m, n, name, obj, actual);
+ if (pos == 0) of_panic("prop_create");
+ }
+
+ result = of_nextprop(pkg, name, name);
+ }
+
+ return 1;
+}
+
+
+static void do_pkg(void *m, ofdn_t n, int p, char *path, size_t psz)
+{
+ int pnext;
+ ofdn_t nnext;
+ int sz;
+
+retry:
+ save_props(m, n, p);
+
+ /* do children first */
+ pnext = of_getchild(p);
+
+ if (pnext != 0) {
+ sz = of_package_to_path(pnext, path, psz);
+ if (sz == OF_FAILURE) of_panic("bad path\n");
+
+ nnext = ofd_node_child_create(m, n, path, sz);
+ if (nnext == 0) of_panic("out of mem\n");
+
+ do_pkg(m, nnext, pnext, path, psz);
+ }
+
+ /* do peer */
+ pnext = of_getpeer(p);
+
+ if (pnext != 0) {
+ sz = of_package_to_path(pnext, path, psz);
+
+ nnext = ofd_node_peer_create(m, n, path, sz);
+ if (nnext <= 0) of_panic("out of space in OFD tree.\n");
+
+ n = nnext;
+ p = pnext;
+ goto retry;
+ }
+}
+
+static int pkg_save(void *mem)
+{
+ int root;
+ char path[256];
+ int r;
+
+ path[0]='/';
+ path[1]='\0';
+
+ /* get root */
+ root = of_getpeer(0);
+ if (root == OF_FAILURE) of_panic("no root package\n");
+
+ do_pkg(mem, OFD_ROOT, root, path, sizeof(path));
+
+ r = (((ofdn_t *)mem)[1] + 1) * sizeof (u64);
+
+ of_printf("%s: saved device tree in 0x%x bytes\n", __func__, r);
+
+ return r;
+}
+
+static int boot_of_fixup_refs(void *mem)
+{
+ static const char *fixup_props[] = {
+ "interrupt-parent",
+ };
+ int i;
+ int count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fixup_props); i++) {
+ ofdn_t c;
+ const char *name = fixup_props[i];
+
+ c = ofd_node_find_by_prop(mem, OFD_ROOT, name, NULL, 0);
+ while (c > 0) {
+ const char *path;
+ int rp;
+ int ref;
+ ofdn_t dp;
+ int rc;
+ ofdn_t upd;
+ char ofpath[256];
+
+ path = ofd_node_path(mem, c);
+ if (path == NULL) of_panic("no path to found prop: %s\n", name);
+
+ rp = of_finddevice(path);
+ if (rp == OF_FAILURE)
+ of_panic("no real device for: name %s, path %s\n",
+ name, path);
+ /* Note: In theory 0 is a valid node handle but it is highly
+ * unlikely.
+ */
+ if (rp == 0) {
+ of_panic("%s: of_finddevice returns 0 for path %s\n",
+ __func__, path);
+ }
+
+ rc = of_getprop(rp, name, &ref, sizeof(ref));
+ if ((rc == OF_FAILURE) || (rc == 0))
+ of_panic("no prop: name %s, path %s, device 0x%x\n",
+ name, path, rp);
+
+ rc = of_package_to_path(ref, ofpath, sizeof (ofpath));
+ if (rc == OF_FAILURE)
+ of_panic("no package: name %s, path %s, device 0x%x,\n"
+ "ref 0x%x\n", name, path, rp, ref);
+
+ dp = ofd_node_find(mem, ofpath);
+ if (dp <= 0) of_panic("no ofd node for OF node[0x%x]: %s\n",
+ ref, ofpath);
+
+ ref = dp;
+
+ upd = ofd_prop_add(mem, c, name, &ref, sizeof(ref));
+ if (upd <= 0) of_panic("update failed: %s\n", name);
+
+#ifdef DEBUG
+ of_printf("%s: %s/%s -> %s\n", __func__,
+ path, name, ofpath);
+#endif
+ ++count;
+ c = ofd_node_find_next(mem, c);
+ }
+ }
+ return count;
+}
+
+static int boot_of_fixup_chosen(void *mem)
+{
+ int ch;
+ ofdn_t dn;
+ ofdn_t dc;
+ int val;
+ int rc;
+ char ofpath[256];
+
+ ch = of_finddevice("/chosen");
+ if (ch == OF_FAILURE) of_panic("/chosen not found\n");
+
+ rc = of_getprop(ch, "cpu", &val, sizeof (val));
+
+ if (rc != OF_FAILURE) {
+ rc = of_instance_to_path(val, ofpath, sizeof (ofpath));
+
+ if (rc > 0) {
+ dn = ofd_node_find(mem, ofpath);
+ if (dn <= 0) of_panic("no node for: %s\n", ofpath);
+
+ boot_cpu = dn;
+ val = dn;
+
+ dn = ofd_node_find(mem, "/chosen");
+ if (dn <= 0) of_panic("no /chosen node\n");
+
+ dc = ofd_prop_add(mem, dn, "cpu", &val, sizeof (val));
+ if (dc <= 0) of_panic("could not fix /chosen/cpu\n");
+ rc = 1;
+ } else {
+ of_printf("*** can't find path to booting cpu, "
+ "SMP is disabled\n");
+ boot_cpu = -1;
+ }
+ }
+ return rc;
+}
+
+static ulong space_base;
+static ulong find_space(u32 size, ulong align, multiboot_info_t *mbi)
+{
+ memory_map_t *map = (memory_map_t *)((ulong)mbi->mmap_addr);
+ ulong eomem = ((u64)map->length_high << 32) | (u64)map->length_low;
+ ulong base;
+
+ of_printf("%s base=0x%016lx eomem=0x%016lx size=0x%08x align=0x%lx\n",
+ __func__, space_base, eomem, size, align);
+ base = ALIGN_UP(space_base, PAGE_SIZE);
+ if ((base + size) >= 0x4000000) return 0;
+ if (base + size > eomem) of_panic("not enough RAM\n");
+
+ if (size == 0) return base;
+ if (of_claim((void*)base, size) != OF_FAILURE) {
+ space_base = base + size;
+ return base;
+ } else {
+ for(base += 0x100000; (base+size) < 0x4000000; base += 0x100000) {
+ of_printf("Trying 0x%016lx\n", base);
+ if (of_claim((void*)base, size) != OF_FAILURE) {
+ space_base = base + size;
+ return base;
+ }
+ }
+ return 0;
+ }
+}
+
+/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges
+ * property. The values are bad, and it doesn't even have the
+ * right number of cells. */
+
+static void __init boot_of_fix_maple(void)
+{
+ int isa;
+ const char *ranges = "ranges";
+ u32 isa_ranges[3];
+ const u32 isa_test[] = { 0x00000001, 0xf4000000, 0x00010000 };
+ const u32 isa_fixed[] = {
+ 0x00000001,
+ 0x00000000,
+ 0x00000000, /* 0xf4000000, matt says this */
+ 0x00000000,
+ 0x00000000,
+ 0x00010000
+ };
+
+ isa = of_finddevice("/ht@0/isa@4");
+ if (isa != OF_FAILURE) {
+ if (of_getproplen(isa, ranges) == sizeof (isa_test)) {
+ of_getprop(isa, ranges, isa_ranges, sizeof (isa_ranges));
+ if (memcmp(isa_ranges, isa_test, sizeof (isa_test)) == 0) {
+ int rc;
+
+ of_printf("OF: fixing bogus ISA range on maple\n");
+ rc = of_setprop(isa, ranges, isa_fixed, sizeof (isa_fixed));
+ if (rc == OF_FAILURE) {
+ of_panic("of_setprop() failed\n");
+ }
+ }
+ }
+ }
+}
+
+static int __init boot_of_serial(void *oftree)
+{
+ int n;
+ int p;
+ int rc;
+ u32 val[3];
+ char buf[128];
+
+ n = of_instance_to_package(of_out);
+ if (n == OF_FAILURE) {
+ of_panic("instance-to-package of /chosen/stdout: failed\n");
+ }
+
+ /* prune this from the oftree */
+ rc = of_package_to_path(n, buf, sizeof(buf));
+ if (rc == OF_FAILURE) {
+ of_panic("package-to-path of /chosen/stdout: failed\n");
+ }
+ of_printf("Pruning from devtree: %s\n"
+ " since Xen will be using it for console\n", buf);
+ rc = ofd_prune_path(oftree, buf);
+ if (rc < 0) {
+ of_panic("prune path \"%s\" failed\n", buf);
+ }
+
+
+ p = of_getparent(n);
+ if (p == OF_FAILURE) {
+ of_panic("no parent for: 0x%x\n", n);
+ }
+
+ buf[0] = '\0';
+ of_getprop(p, "device_type", buf, sizeof (buf));
+ if (strstr(buf, "isa") == NULL) {
+ of_panic("only ISA UARTS supported\n");
+ }
+
+ /* should get this from devtree */
+ isa_io_base = 0xf4000000;
+ of_printf("%s: ISA base: 0x%lx\n", __func__, isa_io_base);
+
+ buf[0] = '\0';
+ of_getprop(n, "device_type", buf, sizeof (buf));
+ if (strstr(buf, "serial") == NULL) {
+ of_panic("only UARTS supported\n");
+ }
+
+ rc = of_getprop(n, "reg", val, sizeof (val));
+ if (rc == OF_FAILURE) {
+ of_panic("%s: no location for serial port\n", __func__);
+ }
+ ns16550.io_base = val[1];
+
+ ns16550.baud = BAUD_AUTO;
+ ns16550.data_bits = 8;
+ ns16550.parity = 'n';
+ ns16550.stop_bits = 1;
+
+ rc = of_getprop(n, "interrupts", val, sizeof (val));
+ if (rc == OF_FAILURE) {
+ of_printf("%s: no ISRC, forcing poll mode\n", __func__);
+ ns16550.irq = 0;
+ } else {
+ ns16550.irq = val[0];
+ of_printf("%s: ISRC=0x%x, but forcing poll mode\n",
+ __func__, ns16550.irq);
+ ns16550.irq = 0;
+ }
+
+ return 1;
+}
+
+static void boot_of_module(ulong r3, ulong r4, multiboot_info_t *mbi)
+{
+ static module_t mods[3];
+ void *oftree;
+ ulong oftree_sz = 48 * PAGE_SIZE;
+ char *mod0_start;
+ ulong mod0_size;
+ ulong mod0;
+ static const char sepr[] = " -- ";
+ extern char dom0_start[] __attribute__ ((weak));
+ extern char dom0_size[] __attribute__ ((weak));
+ const char *p;
+
+ if ((r3 > 0) && (r4 > 0)) {
+ /* was it handed to us in registers ? */
+ mod0_start = (void *)r3;
+ mod0_size = r4;
+ } else {
+ /* see if it is in the boot params */
+ p = strstr((char *)((ulong)mbi->cmdline), "dom0_start=");
+ if ( p != NULL) {
+ p += 11;
+ mod0_start = (char *)simple_strtoul(p, NULL, 0);
+
+ p = strstr((char *)((ulong)mbi->cmdline), "dom0_size=");
+ p += 10;
+ mod0_size = simple_strtoul(p, NULL, 0);
+
+ of_printf("mod0: %o %c %c %c\n",
+ mod0_start[0],
+ mod0_start[1],
+ mod0_start[2],
+ mod0_start[3]);
+
+ } else if ( ((ulong)dom0_start != 0) && ((ulong)dom0_size != 0) ) {
+ /* was it linked in ? */
+
+ mod0_start = dom0_start;
+ mod0_size = (ulong)dom0_size;
+ of_printf("%s: linked in module copied after _end "
+ "(start 0x%p size 0x%lx)\n",
+ __func__, mod0_start, mod0_size);
+ } else {
+ mod0_start = _end;
+ mod0_size = 0;
+ }
+ }
+
+ space_base = (ulong)_end;
+ mod0 = find_space(mod0_size, PAGE_SIZE, mbi);
+
+ /* three cases
+ * 1) mod0_size is not 0 and the image can be copied
+ * 2) mod0_size is not 0 and the image cannot be copied
+ * 3) mod0_size is 0
+ */
+ if (mod0_size > 0) {
+ if (mod0 != 0) {
+ memcpy((void *)mod0, mod0_start, mod0_size);
+ mods[0].mod_start = mod0;
+ mods[0].mod_end = mod0 + mod0_size;
+ } else {
+ of_panic("No space to copy mod0\n");
+ }
+ } else {
+ mods[0].mod_start = mod0;
+ mods[0].mod_end = mod0;
+ }
+
+ of_printf("%s: mod[0] @ 0x%016x[0x%x]\n", __func__,
+ mods[0].mod_start, mods[0].mod_end);
+ p = strstr((char *)(ulong)mbi->cmdline, sepr);
+ if (p != NULL) {
+ p += sizeof (sepr) - 1;
+ mods[0].string = (u32)(ulong)p;
+ of_printf("%s: mod[0].string: %s\n", __func__, p);
+ }
+
+ /* snapshot the tree */
+ oftree = (void*)find_space(oftree_sz, PAGE_SIZE, mbi);
+ if (oftree == 0) of_panic("Could not allocate OFD tree\n");
+
+ of_printf("creating oftree\n");
+ of_test("package-to-path");
+ ofd_create(oftree, oftree_sz);
+ pkg_save(oftree);
+
+ boot_of_fixup_refs(oftree);
+ boot_of_fixup_chosen(oftree);
+
+ ofd_walk(oftree, OFD_ROOT, /* add_hype_props */ NULL, 2);
+
+ mods[1].mod_start = (ulong)oftree;
+ mods[1].mod_end = mods[1].mod_start + oftree_sz;
+ of_printf("%s: mod[1] @ 0x%016x[0x%x]\n", __func__,
+ mods[1].mod_start, mods[1].mod_end);
+
+
+ mbi->flags |= MBI_MODULES;
+ mbi->mods_count = 2;
+ mbi->mods_addr = (u32)mods;
+
+ boot_of_serial(oftree);
+}
+
+static int __init boot_of_cpus(void)
+{
+ int cpus;
+ int cpu;
+ int result;
+ u32 cpu_clock[2];
+
+ cpus = of_finddevice("/cpus");
+ cpu = of_getchild(cpus);
+ result = of_getprop(cpu, "timebase-frequency", &timebase_freq,
+ sizeof(timebase_freq));
+ if (result == OF_FAILURE) {
+ of_panic("Couldn't get timebase frequency!\n");
+ }
+ of_printf("OF: timebase-frequency = %d Hz\n", timebase_freq);
+
+ result = of_getprop(cpu, "clock-frequency", &cpu_clock, sizeof(cpu_clock));
+ if (result == OF_FAILURE || (result !=4 && result != 8)) {
+ of_panic("Couldn't get clock frequency!\n");
+ }
+ cpu_khz = cpu_clock[0];
+ if (result == 8) {
+ cpu_khz <<= 32;
+ cpu_khz |= cpu_clock[1];
+ }
+ cpu_khz /= 1000;
+ of_printf("OF: clock-frequency = %ld KHz\n", cpu_khz);
+
+ /* FIXME: should not depend on the boot CPU bring the first child */
+ cpu = of_getpeer(cpu);
+ while (cpu > 0) {
+ of_start_cpu(cpu, (ulong)spin_start, 0);
+ cpu = of_getpeer(cpu);
+ }
+ return 1;
+}
+
+static int __init boot_of_rtas(void)
+{
+ return 1;
+}
+
+multiboot_info_t __init *boot_of_init(
+ ulong r3, ulong r4, ulong vec, ulong r6, ulong r7, ulong orig_msr)
+{
+ static multiboot_info_t mbi;
+
+ of_vec = vec;
+ of_msr = orig_msr;
+
+ bof_chosen = of_finddevice("/chosen");
+ of_getprop(bof_chosen, "stdout", &of_out, sizeof (of_out));
+
+ of_printf("%s\n", "---------------------------------------------------");
+ of_printf("OF: Xen/PPC version %d.%d%s (%s@%s) (%s) %s\n",
+ XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION,
+ XEN_COMPILE_BY, XEN_COMPILE_DOMAIN,
+ XEN_COMPILER, XEN_COMPILE_DATE);
+
+ of_printf("%s args: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n"
+ "boot msr: 0x%lx\n",
+ __func__,
+ r3, r4, vec, r6, r7, orig_msr);
+
+ if ((vec >= (ulong)_start) && (vec <= (ulong)_end)) {
+ of_printf("Hmm.. OF[0x%lx] seems to have stepped on our image "
+ "that ranges: %p .. %p.\n HANG!\n",
+ vec, _start, _end);
+ }
+ of_printf("%s: _start %p _end %p 0x%lx\n", __func__, _start, _end, r6);
+
+ boot_of_fix_maple();
+ boot_of_probemem(&mbi);
+ boot_of_bootargs(&mbi);
+ boot_of_module(r3, r4, &mbi);
+ boot_of_cpus();
+ boot_of_rtas();
+
+ /* end of OF */
+ of_call("quiesce", 0, 0, NULL);
+
+ return &mbi;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/powerpc/dart.c b/xen/arch/powerpc/dart.c
new file mode 100644
index 0000000000..50d644122f
--- /dev/null
+++ b/xen/arch/powerpc/dart.c
@@ -0,0 +1,299 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/mm.h>
+#include <asm/cache.h>
+#include <xen/init.h>
+#include "tce.h"
+#include "iommu.h"
+#include "dart.h"
+#include "oftree.h"
+#include "of-devtree.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+static int dbg_after;
+#define DBG_SET_AFTER dbg_after = 1;
+#define DBG_AFTER(fmt...) if (dbg_after) DBG(fmt)
+#else
+#define DBG(fmt...)
+#define DBG_SET_AFTER
+#define DBG_AFTER(fmt...)
+#endif
+
+/* Max size of 512 pages */
+#define U3_LOG_MAX_PAGES 9
+
+#define DART_DEF_BASE 0xf8033000UL
+#define DART_NONE 0
+#define DART_U3 3
+#define DART_U4 4
+#define DART_WRITE 0x1
+#define DART_READ 0x2
+
+static ulong dummy_page;
+static ulong dart_entries;
+static struct dart_ops *dops;
+static u32 *dart_table;
+
+union dart_entry {
+ u32 de_word;
+ struct {
+ u32 de_v:1; /* valid */
+ u32 de_rp:1; /* read protected*/
+ u32 de_wp:1; /* write protected*/
+ u32 _de_res:5;
+ u32 de_ppn:24; /* 24 bit Physical Page Number
+ * representing address [28:51] */
+ } de_bits;
+};
+
+struct dma_window {
+ u32 dw_liobn;
+ u32 dw_base_hi;
+ u64 dw_base;
+ u64 dw_size;
+};
+
+struct dart_info {
+ struct dma_window di_window;
+ ulong di_base;
+ int di_model;
+};
+
+static u32 dart_encode(int perm, ulong rpn)
+{
+ union dart_entry e;
+
+ e.de_word = 0;
+ e.de_bits.de_v = 1;
+ e.de_bits.de_ppn = rpn;
+
+ /* protect the page */
+ e.de_bits.de_rp = 1;
+ e.de_bits.de_wp = 1;
+ if (perm & DART_READ) {
+ e.de_bits.de_rp = 0;
+ }
+ if (perm & DART_WRITE) {
+ e.de_bits.de_wp = 0;
+ }
+
+ return e.de_word;
+}
+
+static void dart_fill(ulong index, int perm, ulong rpg, ulong num_pg)
+{
+ u32 volatile *entry = dart_table + index;
+ ulong i = 0;
+ ulong last_flush = 0;
+
+ while (1) {
+ entry[i] = dart_encode(perm, rpg);
+ ++i;
+ ++rpg;
+ if (i == num_pg) break;
+
+ if (((ulong)&entry[i]) % CACHE_LINE_SIZE == 0) {
+ last_flush = (ulong)&entry[i - 1];
+ dcbst(last_flush);
+ }
+ }
+ dcbst((ulong) &entry[i - 1]);
+}
+
+static void dart_clear(ulong index, ulong num_pg)
+{
+ u32 *entry = dart_table + index;
+ ulong i = 0;
+ ulong rpg = dummy_page;
+ ulong last_flush = 0;
+
+ while (1) {
+ entry[i] = dart_encode(DART_READ | DART_WRITE, rpg);
+ ++i;
+ if (i == num_pg) break;
+
+ if (((ulong)&entry[i]) % CACHE_LINE_SIZE == 0) {
+ last_flush = (ulong)&entry[i - 1];
+ dcbst(last_flush);
+ }
+ }
+ dcbst((ulong)&entry[i - 1]);
+}
+
+static int dart_put(ulong ioba, union tce tce)
+{
+ ulong index = ioba >> PAGE_SHIFT;
+
+ if (index > dart_entries) {
+ return -1;
+ }
+
+ if (tce.tce_bits.tce_vlps != 0 || tce.tce_bits.tce_lpx != 0) {
+ panic("no support for large TCEs\n");
+ }
+
+ if (tce.tce_bits.tce_read == 0 &&
+ tce.tce_bits.tce_write == 0) {
+ /* the TCE table is inited by the domain by a bunch of 0
+ * perminssion puts. We are only interesting in debugging the
+ * ones after the first put */
+ DBG_AFTER(">DART[0x%lx] clear\n", index);
+ dart_clear(index, 1);
+ } else {
+ unsigned perm = 0;
+
+ if (tce.tce_bits.tce_read)
+ perm |= DART_READ;
+ if (tce.tce_bits.tce_write)
+ perm |= DART_WRITE;
+
+ DBG("<DART[0x%lx]: ioba: 0x%lx perm:%x[%c%c] rpn:0x%lx\n",
+ index, ioba, perm,
+ (perm & DART_READ) ? 'R' : '-',
+ (perm & DART_WRITE) ? 'W' : '-',
+ (ulong)tce.tce_bits.tce_rpn);
+ DBG_SET_AFTER;
+
+ dart_fill(index, perm, tce.tce_bits.tce_rpn, 1);
+ }
+ dops->do_inv_entry(tce.tce_bits.tce_rpn);
+
+ return 0;
+}
+
+static int find_dart(struct dart_info *di)
+{
+ int rc;
+ void *ofd_p;
+ ofdn_t n;
+ char compat[128];
+
+
+ if (on_mambo()) {
+ /* mambo has no dart */
+ DBG("%s: Mambo does not support a dart\n", __func__);
+ return -1;
+ }
+
+ ofd_p = (void *)oftree;
+ n = ofd_node_find(ofd_p, "/ht");
+ if (n <= 0)
+ return -1;
+
+ /* get the defaults from the HT node model */
+ rc = ofd_getprop(ofd_p, n, "compatible", compat, sizeof (compat));
+ if (rc <= 0)
+ return -1;
+
+ di->di_base = DART_DEF_BASE;
+
+ if (strstr(compat, "u3")) {
+ di->di_model = DART_U3;
+ } else if (strstr(compat, "u4")) {
+ di->di_model = DART_U4;
+ } else {
+ DBG("%s: not a U3 or U4\n", __func__);
+ return -1;
+ }
+ /* FIXME: this should actually be the HT reg value */
+ di->di_window.dw_liobn = 0;
+ di->di_window.dw_base_hi = 0;
+ di->di_window.dw_base = 0;
+
+ /* lets see if the devtree has more info */
+ n = ofd_node_find(ofd_p, "/dart");
+ if (n > 0) {
+ ulong base;
+
+ rc = ofd_getprop(ofd_p, n, "compatible", compat, sizeof (compat));
+ if (rc > 0) {
+ if (strstr(compat, "u4")) {
+ di->di_model = DART_U4;
+ }
+ }
+
+ rc = ofd_getprop(ofd_p, n, "reg", &base, sizeof (base));
+ if (rc > 0) {
+ di->di_base = base;
+ }
+ }
+ return 0;
+}
+
+static int init_dart(void)
+{
+ ulong log_pgs;
+ void *ofd_p;
+ ofdn_t n;
+ struct dart_info di;
+
+ if (find_dart(&di))
+ return 0;
+
+ /* Max size of 512 pages == 2MB == 1<<21. That siz is good enough for U4 */
+ log_pgs = U3_LOG_MAX_PAGES;
+ dart_table = alloc_xenheap_pages(log_pgs);
+ BUG_ON(dart_table == NULL);
+
+ dart_entries = (1UL << (log_pgs + PAGE_SHIFT)) / sizeof (union dart_entry);
+ di.di_window.dw_size = dart_entries << PAGE_SHIFT;
+
+ /* Linux uses a dummy page, filling "empty" DART entries with a
+ reference to this page to capture stray DMA's */
+ dummy_page = (ulong)alloc_xenheap_pages(1);
+ memset((void *)dummy_page, 0, PAGE_SIZE);
+ dummy_page >>= PAGE_SHIFT;
+
+ printk("Initializing DART 0x%lx: tbl: %p[0x%lx] entries: 0x%lx\n",
+ di.di_base, dart_table, 1UL << log_pgs, dart_entries);
+
+ /* register this iommu */
+ iommu_register(di.di_window.dw_liobn, dart_put);
+
+ switch (di.di_model) {
+ case DART_U3:
+ dops = u3_init(di.di_base, (ulong)dart_table, 1UL << log_pgs);
+ break;
+ case DART_U4:
+ dops = u4_init(di.di_base, (ulong)dart_table, 1UL << log_pgs);
+ break;
+ }
+
+ dart_clear(0, dart_entries);
+ dops->do_inv_all();
+
+ /* fix up the devtree */
+ ofd_p = (void *)oftree;
+ n = ofd_node_find(ofd_p, "/ht");
+ if (n > 0) {
+ di.di_window.dw_size = dart_entries << PAGE_SHIFT;
+ ofd_prop_add(ofd_p, n, "ibm,dma-window", &di.di_window,
+ sizeof (di.di_window));
+ } else {
+ panic("%s: no /ht node\n", __func__);
+ }
+ return 0;
+}
+__initcall(init_dart);
diff --git a/xen/arch/powerpc/dart.h b/xen/arch/powerpc/dart.h
new file mode 100644
index 0000000000..a023dabbfb
--- /dev/null
+++ b/xen/arch/powerpc/dart.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _DART_H
+#define _DART_H
+
+#include <xen/config.h>
+#include <xen/types.h>
+
+struct dart_ops {
+ void (*do_inv_all)(void);
+ void (*do_inv_entry)(ulong pg);
+};
+
+extern struct dart_ops *u3_init(ulong base, ulong table, ulong dart_pages);
+extern struct dart_ops *u4_init(ulong base, ulong table, ulong dart_pages);
+
+#endif /* _DART_H */
+
diff --git a/xen/arch/powerpc/dart_u3.c b/xen/arch/powerpc/dart_u3.c
new file mode 100644
index 0000000000..d1b38665b3
--- /dev/null
+++ b/xen/arch/powerpc/dart_u3.c
@@ -0,0 +1,108 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <public/xen.h>
+#include <asm/io.h>
+#include <asm/current.h>
+#include "tce.h"
+#include "iommu.h"
+#include "dart.h"
+
+union dart_ctl {
+ u32 dc_word;
+ struct {
+ u32 dc_base:20;
+ u32 dc_stop_access:1;
+ u32 dc_invtlb:1;
+ u32 dc_enable:1;
+ u32 dc_size:9;
+ } reg;
+};
+
+static u32 volatile *dart_ctl_reg;
+
+static void u3_inv_all(void)
+{
+ union dart_ctl dc;
+ ulong r = 0;
+ int l = 0;
+
+ for (;;) {
+ dc.dc_word = in_32(dart_ctl_reg);
+ dc.reg.dc_invtlb = 1;
+ out_32(dart_ctl_reg, dc.dc_word);
+
+ do {
+ dc.dc_word = in_32(dart_ctl_reg);
+ r++;
+ } while ((dc.reg.dc_invtlb == 1) && (r < (1 << l)));
+
+ if (r == (1 << l)) {
+ if (l < 4) {
+ l++;
+ dc.dc_word = in_32(dart_ctl_reg);
+ dc.reg.dc_invtlb = 0;
+ out_32(dart_ctl_reg, dc.dc_word);
+ continue;
+ } else {
+ panic(" broken U3???\n");
+ }
+ }
+ return;
+ }
+}
+
+static void u3_inv_entry(ulong pg)
+{
+ /* sadly single entry invalidation has been reported not to work */
+ u3_inv_all();
+}
+
+static struct dart_ops u3_ops = {
+ .do_inv_all = u3_inv_all,
+ .do_inv_entry = u3_inv_entry,
+};
+
+struct dart_ops *u3_init(ulong base, ulong table, ulong dart_pages)
+{
+ union dart_ctl dc;
+
+ dart_ctl_reg = (u32 *)base;
+
+ dc.dc_word = 0;
+
+ dc.reg.dc_base = table >> PAGE_SHIFT;
+ dc.reg.dc_size = dart_pages;
+ dc.reg.dc_enable = 1;
+
+
+ printk("Initializing DART Model U3: reg: %p word: %x\n",
+ dart_ctl_reg, dc.dc_word);
+
+ out_32(dart_ctl_reg, dc.dc_word);
+
+ return &u3_ops;
+}
diff --git a/xen/arch/powerpc/dart_u4.c b/xen/arch/powerpc/dart_u4.c
new file mode 100644
index 0000000000..c3495cfce3
--- /dev/null
+++ b/xen/arch/powerpc/dart_u4.c
@@ -0,0 +1,177 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <public/xen.h>
+#include <asm/io.h>
+#include <asm/current.h>
+#include "tce.h"
+#include "iommu.h"
+#include "dart.h"
+
+#define TOO_MANY_RETRIES ~0
+
+union dart_ctl {
+ u32 dc_word;
+ struct {
+ u32 dc_darten:1; /* DART Enable (0:disabled) */
+ u32 dc_ione:1; /* Invalidate one DART TLB entry (using ILPN) */
+ u32 dc_iall:1; /* Invalidate all DART TLB entries */
+ u32 dc_idle:1; /* DART is idle */
+ u32 dc_peen:1; /* Parity Checking is enabled */
+ u32 dc_ilpn:27; /* 27-bit Logical Page Address for
+ * invalidating one TLB entry */
+ } dc_bits;
+};
+
+union dart_base {
+ u32 db_word;
+ struct {
+ u32 _db_resv:8;
+ u32 db_dartbase:24; /* Base Address of DART (4K byte Alignment) */
+ } db_bits;
+};
+
+union dart_size {
+ u32 ds_word;
+ struct {
+ u32 _ds_resv:15;
+ u32 ds_dartsize:17; /* Size of Dart in 4K-Byte Pages */
+ } ds_bits;
+};
+
+union dart_excp {
+ u32 de_word;
+ struct {
+ u32 de_rqsrc:1; /* Request Source. [0:PCIE, 1:HT] */
+ u32 de_lpn:27; /* 27Ðbit Logical Address of Exception [25:51] */
+ u32 de_rqop:1; /* Request operation. [0:Read, 1:Write] */
+ u32 de_xcd:3; /* Exception code */
+ } de_bits;
+};
+
+struct dart {
+ /* 0x00 */
+ union dart_ctl d_dartcntl;
+ u32 _pad0x04_0x10[3];
+ /* 0x10 */
+ union dart_base d_dartbase;
+ u32 _pad0x14_0x20[3];
+ /* 0x20 */
+ union dart_size d_dartsize;
+ u32 _pad0x24_0x30[3];
+ /* 0x30 */
+ union dart_excp d_dartexcp;
+ u32 _pad0x34_0x40[3];
+};
+
+static volatile struct dart *dart;
+
+static void u4_inv_all(void)
+{
+ union dart_ctl dc;
+ ulong r = 0;
+ int l = 0;
+
+ for (;;) {
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ dc.dc_bits.dc_iall = 1;
+ out_32(&dart->d_dartcntl.dc_word, dc.dc_word);
+
+ do {
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ r++;
+ } while ((dc.dc_bits.dc_iall == 1) && (r < (1 << l)));
+
+ if (r == (1 << l)) {
+ if (l < 4) {
+ l++;
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ dc.dc_bits.dc_iall = 0;
+ out_32(&dart->d_dartcntl.dc_word, dc.dc_word);
+ continue;
+ } else {
+ panic(" broken U4???\n");
+ }
+ }
+ return;
+ }
+}
+
+static void u4_inv_entry(ulong pgn)
+{
+ union dart_ctl dc;
+ ulong retries = 0;
+
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ dc.dc_bits.dc_ilpn = pgn;
+ dc.dc_bits.dc_ione = 1;
+ out_32(&dart->d_dartcntl.dc_word, dc.dc_word);
+
+ /* wait for completion */
+ /* FIXME: since we do this from the HV do we need to wait?! */
+ do {
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ retries++;
+ if (retries > 1000000)
+ panic("WAY! too long\n");
+ } while (dc.dc_bits.dc_ione != 0);
+}
+
+static struct dart_ops u4_ops = {
+ .do_inv_all = u4_inv_all,
+ .do_inv_entry = u4_inv_entry,
+};
+
+struct dart_ops *u4_init(ulong base, ulong table, ulong dart_pages)
+{
+ union dart_base db;
+ union dart_size ds;
+ union dart_ctl dc;
+
+ dart = (struct dart *)base;
+
+ db.db_word = 0;
+ db.db_bits.db_dartbase = table >> PAGE_SHIFT;
+
+ ds.ds_word = 0;
+ ds.ds_bits.ds_dartsize = dart_pages;
+
+ dc.dc_word = in_32(&dart->d_dartcntl.dc_word);
+ if (dc.dc_bits.dc_darten == 1) {
+ panic("%s: dart is already enabled: 0x%x\n", __func__, dc.dc_word);
+ }
+ dc.dc_bits.dc_darten = 1; /* enable it */
+
+ printk("Initializing DART Model U4: ctl: 0x%x base: 0x%x size: 0x%x\n",
+ dc.dc_word, db.db_word, ds.ds_word);
+
+ out_32(&dart->d_dartbase.db_word, db.db_word);
+ out_32(&dart->d_dartsize.ds_word, ds.ds_word);
+ out_32(&dart->d_dartcntl.dc_word, dc.dc_word);
+
+ return &u4_ops;
+}
diff --git a/xen/arch/powerpc/delay.c b/xen/arch/powerpc/delay.c
new file mode 100644
index 0000000000..e83b687dce
--- /dev/null
+++ b/xen/arch/powerpc/delay.c
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/delay.h>
+#include <xen/time.h>
+#include <asm/processor.h>
+
+void udelay(unsigned long usecs)
+{
+ ulong ticks = usecs * ticks_per_usec;
+ ulong s;
+ ulong e;
+
+ s = get_timebase();
+ do {
+ asm volatile("or 1,1,1"); /* also puts the thread to low priority */
+ e = get_timebase();
+ } while ((e-s) < ticks);
+}
diff --git a/xen/arch/powerpc/dom0_ops.c b/xen/arch/powerpc/dom0_ops.c
new file mode 100644
index 0000000000..c83ed3c338
--- /dev/null
+++ b/xen/arch/powerpc/dom0_ops.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/guest_access.h>
+#include <public/xen.h>
+#include <public/dom0_ops.h>
+
+extern void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c);
+extern long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op);
+
+void arch_getdomaininfo_ctxt(struct vcpu *v, vcpu_guest_context_t *c)
+{
+ memcpy(&c->user_regs, &v->arch.ctxt, sizeof(struct cpu_user_regs));
+ /* XXX fill in rest of vcpu_guest_context_t */
+}
+
+long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
+{
+ long ret = 0;
+
+ switch (op->cmd) {
+ case DOM0_GETMEMLIST: {
+ /* XXX 64M hackage */
+ const int memsize = (64UL<<20);
+ int domain_pfns = memsize>>12;
+ int max_pfns = op->u.getmemlist.max_pfns;
+ int domid = op->u.getmemlist.domain;
+ int i;
+
+ for (i = 0; (i < max_pfns) && (i < domain_pfns); i++) {
+ xen_pfn_t mfn = (((domid + 1) * memsize) >> 12) + i;
+ if (copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1)) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+ op->u.getmemlist.num_pfns = i;
+ copy_to_guest(u_dom0_op, op, 1);
+ }
+ break;
+
+ case DOM0_PHYSINFO:
+ {
+ dom0_physinfo_t *pi = &op->u.physinfo;
+
+ pi->threads_per_core = 1;
+ pi->cores_per_socket = 1;
+ pi->sockets_per_node = 1;
+ pi->nr_nodes = 1;
+ pi->total_pages = total_pages;
+ pi->free_pages = avail_domheap_pages();
+ pi->cpu_khz = cpu_khz;
+ memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
+ ret = 0;
+ if ( copy_to_guest(u_dom0_op, op, 1) )
+ ret = -EFAULT;
+ }
+ break;
+
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
diff --git a/xen/arch/powerpc/domain.c b/xen/arch/powerpc/domain.c
new file mode 100644
index 0000000000..8e4041982f
--- /dev/null
+++ b/xen/arch/powerpc/domain.c
@@ -0,0 +1,251 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <stdarg.h>
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <xen/serial.h>
+#include <xen/domain.h>
+#include <xen/console.h>
+#include <xen/reboot.h>
+#include <asm/htab.h>
+#include <asm/current.h>
+#include <asm/hcalls.h>
+
+extern void idle_loop(void);
+
+#define next_arg(fmt, args) ({ \
+ unsigned long __arg; \
+ switch ( *(fmt)++ ) \
+ { \
+ case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
+ case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
+ case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
+ case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
+ default: __arg = 0; BUG(); \
+ } \
+ __arg; \
+})
+
+unsigned long hypercall_create_continuation(unsigned int op,
+ const char *format, ...)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const char *p = format;
+ va_list args;
+ int gprnum = 4;
+ int i;
+
+ va_start(args, format);
+
+ regs->pc -= 4; /* re-execute 'sc' */
+
+ for (i = 0; *p != '\0'; i++) {
+ regs->gprs[gprnum++] = next_arg(p, args);
+ }
+
+ va_end(args);
+
+ /* As luck would have it, we use the same register for hcall opcodes and
+ * for hcall return values. The return value from this function is placed
+ * in r3 on return, so modifying regs->gprs[3] would have no effect. */
+ return XEN_MARK(op);
+}
+
+int arch_domain_create(struct domain *d)
+{
+
+ if (d->domain_id == IDLE_DOMAIN_ID) {
+ d->shared_info = (void *)alloc_xenheap_page();
+ clear_page(d->shared_info);
+
+ return 0;
+ }
+
+ /* XXX the hackage... hardcode 64M domains */
+ d->arch.rma_base = (64<<20) * (d->domain_id + 1);
+ d->arch.rma_size = (64<<20);
+
+ printk("clearing RMO: 0x%lx[0x%lx]\n", d->arch.rma_base, d->arch.rma_size);
+ memset((void*)d->arch.rma_base, 0, d->arch.rma_size);
+
+ htab_alloc(d, LOG_DEFAULT_HTAB_BYTES);
+
+ d->shared_info = (shared_info_t *)
+ (rma_addr(&d->arch, RMA_SHARED_INFO) + d->arch.rma_base);
+
+ d->arch.large_page_sizes = 1;
+ d->arch.large_page_shift[0] = 24; /* 16 M for 970s */
+
+ return 0;
+}
+
+void arch_domain_destroy(struct domain *d)
+{
+ unimplemented();
+}
+
+void machine_halt(void)
+{
+ printf("machine_halt called: spinning....\n");
+ console_start_sync();
+ while(1);
+}
+
+void machine_restart(char * __unused)
+{
+ printf("machine_restart called: spinning....\n");
+ console_start_sync();
+ while(1);
+}
+
+struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
+{
+ struct vcpu *v;
+
+ if ( (v = xmalloc(struct vcpu)) == NULL )
+ return NULL;
+
+ memset(v, 0, sizeof(*v));
+ v->vcpu_id = vcpu_id;
+
+ return v;
+}
+
+void free_vcpu_struct(struct vcpu *v)
+{
+ BUG_ON(v->next_in_list != NULL);
+ if ( v->vcpu_id != 0 )
+ v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
+ xfree(v);
+}
+
+int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c)
+{
+ memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
+
+ set_bit(_VCPUF_initialised, &v->vcpu_flags);
+
+ cpu_init_vcpu(v);
+
+ return 0;
+}
+
+void dump_pageframe_info(struct domain *d)
+{
+ struct page_info *page;
+
+ printk("Memory pages belonging to domain %u:\n", d->domain_id);
+
+ if ( d->tot_pages >= 10 )
+ {
+ printk(" DomPage list too long to display\n");
+ }
+ else
+ {
+ list_for_each_entry ( page, &d->page_list, list )
+ {
+ printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
+ _p(page_to_maddr(page)), _p(page_to_mfn(page)),
+ page->count_info, page->u.inuse.type_info);
+ }
+ }
+
+ list_for_each_entry ( page, &d->xenpage_list, list )
+ {
+ printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
+ _p(page_to_maddr(page)), _p(page_to_mfn(page)),
+ page->count_info, page->u.inuse.type_info);
+ }
+}
+
+
+void context_switch(struct vcpu *prev, struct vcpu *next)
+{
+ struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
+ cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
+ unsigned int cpu = smp_processor_id();
+
+#if 0
+ printf("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
+ next->domain->domain_id);
+#endif
+
+ /* Allow at most one CPU at a time to be dirty. */
+ ASSERT(cpus_weight(dirty_mask) <= 1);
+ if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
+ {
+ /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
+ if (!cpus_empty(next->vcpu_dirty_cpumask))
+ flush_tlb_mask(next->vcpu_dirty_cpumask);
+ }
+
+ /* copy prev guest state off the stack into its vcpu */
+ memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
+
+ set_current(next);
+
+ /* copy next guest state onto the stack */
+ memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
+
+ /* save old domain state */
+ save_sprs(prev);
+ save_float(prev);
+ save_segments(prev);
+
+ context_saved(prev);
+
+ /* load up new domain */
+ load_sprs(next);
+ load_float(next);
+ load_segments(next);
+
+ mtsdr1(next->domain->arch.htab.sdr1);
+ local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
+
+ if (is_idle_vcpu(next)) {
+ reset_stack_and_jump(idle_loop);
+ }
+
+ reset_stack_and_jump(full_resume);
+ /* not reached */
+}
+
+void continue_running(struct vcpu *same)
+{
+ /* nothing to do */
+}
+
+void sync_vcpu_execstate(struct vcpu *v)
+{
+ /* XXX for now, for domain destruction, make this non-fatal */
+ printf("%s: called\n", __func__);
+}
+
+void domain_relinquish_resources(struct domain *d)
+{
+ /* nothing to do? */
+}
+
+void arch_dump_domain_info(struct domain *d)
+{
+}
diff --git a/xen/arch/powerpc/domain_build.c b/xen/arch/powerpc/domain_build.c
new file mode 100644
index 0000000000..dfe51f62cd
--- /dev/null
+++ b/xen/arch/powerpc/domain_build.c
@@ -0,0 +1,285 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/elf.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <xen/ctype.h>
+#include <xen/iocap.h>
+#include <xen/compile.h>
+#include <asm/processor.h>
+#include <asm/papr.h>
+#include "oftree.h"
+
+extern int parseelfimage_32(struct domain_setup_info *dsi);
+extern int loadelfimage_32(struct domain_setup_info *dsi);
+
+/* opt_dom0_mem: memory allocated to domain 0. */
+static unsigned int opt_dom0_mem;
+static void parse_dom0_mem(char *s)
+{
+ unsigned long long bytes = parse_size_and_unit(s);
+ /* If no unit is specified we default to kB units, not bytes. */
+ if (isdigit(s[strlen(s)-1]))
+ opt_dom0_mem = (unsigned int)bytes;
+ else
+ opt_dom0_mem = (unsigned int)(bytes >> 10);
+}
+custom_param("dom0_mem", parse_dom0_mem);
+
+int elf_sanity_check(Elf_Ehdr *ehdr)
+{
+ if (IS_ELF(*ehdr))
+ /* we are happy with either */
+ if ((ehdr->e_ident[EI_CLASS] == ELFCLASS32
+ && ehdr->e_machine == EM_PPC)
+ || (ehdr->e_ident[EI_CLASS] == ELFCLASS64
+ && ehdr->e_machine == EM_PPC64)) {
+ if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB
+ && ehdr->e_type == ET_EXEC)
+ return 1;
+ }
+ printk("DOM0 image is not a Xen-compatible Elf image.\n");
+ return 0;
+}
+
+/* adapted from common/elf.c */
+#define RM_MASK(a,l) ((a) & ((1UL << (l)) - 1))
+
+static int rm_loadelfimage_64(struct domain_setup_info *dsi, ulong rma)
+{
+ char *elfbase = (char *)dsi->image_addr;
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)dsi->image_addr;
+ Elf64_Phdr *phdr;
+ int h;
+
+ for (h = 0; h < ehdr->e_phnum; h++ )
+ {
+ phdr = (Elf64_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
+ if (!((phdr->p_type == PT_LOAD) &&
+ ((phdr->p_flags & (PF_W|PF_X)) != 0)))
+ continue;
+
+ if (phdr->p_filesz != 0)
+ memcpy((char *)(rma + RM_MASK(phdr->p_paddr, 42)),
+ elfbase + phdr->p_offset,
+ phdr->p_filesz);
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset((char *)(rma + RM_MASK(phdr->p_paddr, 42) + phdr->p_filesz),
+ 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+#ifdef NOT_YET
+ loadelfsymtab(dsi, 1);
+#endif
+
+ return 0;
+}
+
+int construct_dom0(struct domain *d,
+ unsigned long image_start, unsigned long image_len,
+ unsigned long initrd_start, unsigned long initrd_len,
+ char *cmdline)
+{
+ int rc;
+ struct vcpu *v = d->vcpu[0];
+ struct domain_setup_info dsi;
+ ulong dst;
+ u64 *ofh_tree;
+ ulong rma_sz = d->arch.rma_size;
+ ulong rma = d->arch.rma_base;
+ start_info_t *si;
+ ulong eomem;
+ int am64 = 1;
+ ulong msr;
+ ulong pc;
+ ulong r2;
+
+ /* Sanity! */
+ BUG_ON(d->domain_id != 0);
+ BUG_ON(d->vcpu[0] == NULL);
+
+ cpu_init_vcpu(v);
+
+ memset(&dsi, 0, sizeof(struct domain_setup_info));
+ dsi.image_addr = image_start;
+ dsi.image_len = image_len;
+
+ if ((rc = parseelfimage(&dsi)) != 0) {
+ if ((rc = parseelfimage_32(&dsi)) != 0)
+ return rc;
+ am64 = 0;
+ }
+
+ /* elf contains virtual addresses that can have the upper bits
+ * masked while running in real mode, so we do the masking as well
+ * as well */
+ dsi.v_kernstart = RM_MASK(dsi.v_kernstart, 42);
+ dsi.v_kernend = RM_MASK(dsi.v_kernend, 42);
+ dsi.v_kernentry = RM_MASK(dsi.v_kernentry, 42);
+
+ if (dsi.xen_section_string == NULL) {
+ printk("Not a Xen-ELF image: '__xen_guest' section not found.\n");
+ return -EINVAL;
+ }
+ printk("*** LOADING DOMAIN 0 ***\n");
+
+ /* By default DOM0 is allocated all available memory. */
+ d->max_pages = ~0U;
+ d->tot_pages = (d->arch.rma_size >> PAGE_SHIFT);
+
+ ASSERT( image_len < rma_sz );
+
+ si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
+ printk("xen_start_info: %p\n", si);
+
+ sprintf(si->magic, "xen-%i.%i-powerpc%d%s",
+ XEN_VERSION, XEN_SUBVERSION, BITS_PER_LONG, "HV");
+ si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
+
+ si->shared_info = ((ulong)d->shared_info) - rma;
+ printk("shared_info: 0x%lx,%p\n", si->shared_info, d->shared_info);
+
+ eomem = si->shared_info;
+
+ /* allow dom0 to access all of system RAM */
+ d->arch.logical_base_pfn = 128 << (20 - PAGE_SHIFT); /* 128 MB */
+ d->arch.logical_end_pfn = max_page;
+
+ /* number of pages accessible */
+ si->nr_pages = rma_sz >> PAGE_SHIFT;
+
+ si->pt_base = 0;
+ si->nr_pt_frames = 0;
+ si->mfn_list = 0;
+
+ /* OF usually sits here:
+ * - Linux needs it to be loaded before the vmlinux or initrd
+ * - AIX demands it to be @ 32M.
+ */
+ dst = (32 << 20);
+
+ /* put stack below everything */
+ v->arch.ctxt.gprs[1] = dst - STACK_FRAME_OVERHEAD;
+
+ /* copy relative to Xen */
+ dst += rma;
+
+ ASSERT((dst - rma) + (ulong)firmware_image_size < eomem);
+ printk("loading OFH: 0x%lx, RMA: 0x%lx\n", dst, dst - rma);
+ memcpy((void *)dst, firmware_image_start, (ulong)firmware_image_size);
+
+ v->arch.ctxt.gprs[5] = (dst - rma);
+ ofh_tree = (u64 *)(dst + 0x10);
+ ASSERT(*ofh_tree == 0xdeadbeef00000000);
+
+ /* accomodate for a modest bss section */
+ dst = ALIGN_UP(dst + (ulong)firmware_image_size + PAGE_SIZE, PAGE_SIZE);
+ ASSERT((dst - rma) + oftree_len < eomem);
+
+ *ofh_tree = dst - rma;
+ printk("loading OFD: 0x%lx RMA: 0x%lx, 0x%lx\n", dst, dst - rma,
+ oftree_len);
+ memcpy((void *)dst, (void *)oftree, oftree_len);
+
+ dst = ALIGN_UP(dst + oftree_len, PAGE_SIZE);
+
+ if (am64) {
+ ulong kbase;
+ ulong *fdesc;
+
+ printk("loading 64-bit Dom0: 0x%lx, in RMA:0x%lx\n", dst, dst - rma);
+ rm_loadelfimage_64(&dsi, dst);
+
+ kbase = dst;
+ /* move dst to end of bss */
+ dst = ALIGN_UP(dsi.v_kernend + dst, PAGE_SIZE);
+
+ if ( initrd_len > 0 ) {
+ ASSERT( (dst - rma) + image_len < eomem );
+
+ printk("loading initrd: 0x%lx, 0x%lx\n", dst, initrd_len);
+ memcpy((void *)dst, (void *)initrd_start, initrd_len);
+
+ si->mod_start = dst - rma;
+ si->mod_len = image_len;
+
+ dst = ALIGN_UP(dst + initrd_len, PAGE_SIZE);
+ } else {
+ printk("no initrd\n");
+ si->mod_start = 0;
+ si->mod_len = 0;
+ }
+ /* it may be a function descriptor */
+ fdesc = (ulong *)(dsi.v_kernstart + dsi.v_kernentry + kbase);
+
+ if (fdesc[2] == 0
+ && ((fdesc[0] >= dsi.v_kernstart)
+ && (fdesc[0] < dsi.v_kernend)) /* text entry is in range */
+ && ((fdesc[1] >= dsi.v_kernstart) /* toc can be > image */
+ && (fdesc[1] < (dsi.v_kernend + (0x7fff * sizeof (ulong)))))) {
+ /* it is almost certainly a function descriptor */
+ pc = RM_MASK(fdesc[0], 42) + kbase - rma;
+ r2 = RM_MASK(fdesc[1], 42) + kbase - rma;
+ } else {
+ pc = ((ulong)fdesc) - rma;
+ r2 = 0;
+ }
+ msr = MSR_SF;
+ } else {
+ printk("loading 32-bit Dom0: 0x%lx, in RMA:0x%lx\n",
+ dsi.v_kernstart + rma, dsi.v_kernstart);
+ dsi.v_start = rma;
+ loadelfimage_32(&dsi);
+
+ pc = dsi.v_kernentry;
+ r2 = 0;
+ msr = 0;
+ }
+
+ v->arch.ctxt.gprs[3] = si->mod_start;
+ v->arch.ctxt.gprs[4] = si->mod_len;
+
+ memset(si->cmd_line, 0, sizeof(si->cmd_line));
+ if ( cmdline != NULL )
+ strncpy((char *)si->cmd_line, cmdline, sizeof(si->cmd_line)-1);
+
+ v->arch.ctxt.msr = msr;
+ v->arch.ctxt.pc = pc;
+ v->arch.ctxt.gprs[2] = r2;
+
+ printk("DOM: pc = 0x%lx, r2 = 0x%lx\n", pc, r2);
+
+ ofd_dom0_fixup(d, *ofh_tree + rma, si, dst - rma);
+
+ set_bit(_VCPUF_initialised, &v->vcpu_flags);
+
+ rc = 0;
+
+ /* DOM0 is permitted full I/O capabilities. */
+ rc |= iomem_permit_access(dom0, 0UL, ~0UL);
+ rc |= irqs_permit_access(dom0, 0, NR_IRQS-1);
+
+ BUG_ON(rc != 0);
+
+ return 0;
+}
diff --git a/xen/arch/powerpc/elf32.c b/xen/arch/powerpc/elf32.c
new file mode 100644
index 0000000000..9a88fabc7e
--- /dev/null
+++ b/xen/arch/powerpc/elf32.c
@@ -0,0 +1,5 @@
+#define parseelfimage parseelfimage_32
+#define loadelfimage loadelfimage_32
+#define ELFSIZE 32
+#include "../../common/elf.c"
+
diff --git a/xen/arch/powerpc/exceptions.c b/xen/arch/powerpc/exceptions.c
new file mode 100644
index 0000000000..0ddfb19565
--- /dev/null
+++ b/xen/arch/powerpc/exceptions.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/softirq.h>
+#include <xen/sched.h>
+#include <xen/serial.h>
+#include <xen/gdbstub.h>
+#include <public/xen.h>
+#include <asm/time.h>
+
+#undef DEBUG
+#define HDEC_PREEMPT
+
+extern ulong ppc_do_softirq(ulong orig_msr);
+extern void do_timer(struct cpu_user_regs *regs);
+extern void do_dec(struct cpu_user_regs *regs);
+extern void program_exception(struct cpu_user_regs *regs, unsigned long cookie);
+
+int hdec_sample = 0;
+
+void do_timer(struct cpu_user_regs *regs)
+{
+ /* XXX this is just here to keep HDEC from firing until
+ * reprogram_ac_timer() sets the proper next-tick time */
+ mthdec(timebase_freq);
+
+#ifdef HDEC_PREEMPT
+ raise_softirq(TIMER_SOFTIRQ);
+#endif
+#ifdef DEBUG
+ {
+ int d;
+ if (regs->msr & MSR_HV) {
+ d = -1;
+ } else {
+ d = get_current()->domain->domain_id;
+ }
+ extern char serial_getc_nb(int handle);
+ if (0 && serial_getc_nb(0) > 0) {
+ printk("H: pc: 0x%lx lr: 0x%lx \n", regs->pc, regs->lr);
+ }
+ if (hdec_sample) {
+ printk("H: pc: 0x%lx lr: 0x%lx \n", regs->pc, regs->lr);
+ hdec_sample = 0;
+ }
+ }
+#endif
+}
+
+void do_dec(struct cpu_user_regs *regs)
+{
+ if (!(regs->msr & MSR_HV)) {
+ panic("HV dec from domain\n");
+ }
+ printk("DEC_HV: pc: 0x%lx lr: 0x%lx \n", regs->pc, regs->lr);
+ mtdec(INT_MAX);
+}
+
+void program_exception(struct cpu_user_regs *regs, unsigned long cookie)
+{
+#ifdef CRASH_DEBUG
+ __trap_to_gdb(regs, cookie);
+#else /* CRASH_DEBUG */
+ show_registers(regs);
+ printk("dar 0x%016lx, dsisr 0x%08x\n", mfdar(), mfdsisr());
+ printk("hid4 0x%016lx\n", regs->hid4);
+ panic("%s: 0x%lx\n", __func__, cookie);
+#endif /* CRASH_DEBUG */
+}
diff --git a/xen/arch/powerpc/exceptions.h b/xen/arch/powerpc/exceptions.h
new file mode 100644
index 0000000000..25ffc1ace7
--- /dev/null
+++ b/xen/arch/powerpc/exceptions.h
@@ -0,0 +1,57 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ARCH_PPC_EXCEPTIONS_H_
+#define _ARCH_PPC_EXCEPTIONS_H_
+
+#include <xen/types.h>
+#include <public/xen.h>
+#include <xen/multiboot.h>
+
+extern void do_hcall(struct cpu_user_regs *regs);
+extern void do_IRQ(struct cpu_user_regs *regs);
+extern void deliver_ee(struct cpu_user_regs *regs);
+extern void do_external(struct cpu_user_regs *regs);
+extern void init_IRQ(void);
+extern void ack_APIC_irq(void);
+extern int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval);
+extern int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val);
+extern void __start_xen_ppc(
+ ulong r3, ulong r4, ulong r5, ulong r6, ulong r7, ulong orig_msr);
+extern multiboot_info_t *boot_of_init(ulong r3, ulong r4, ulong vec, ulong r6, ulong r7, ulong orig_msr);
+
+extern void do_timer(struct cpu_user_regs *regs);
+extern void do_dec(struct cpu_user_regs *regs);
+extern void program_exception(
+ struct cpu_user_regs *regs, unsigned long cookie);
+
+extern long xen_hvcall_jump(struct cpu_user_regs *regs, ulong address);
+extern void *mambo_memset(void *, int, ulong);
+extern void *mambo_memcpy(void *, const void *, ulong);
+
+extern ulong *__hypercall_table[];
+
+extern char exception_vectors[];
+extern char exception_vectors_end[];
+extern int spin_start[];
+extern int firmware_image_start[0];
+extern int firmware_image_size[0];
+
+#endif
diff --git a/xen/arch/powerpc/external.c b/xen/arch/powerpc/external.c
new file mode 100644
index 0000000000..8d60766b8d
--- /dev/null
+++ b/xen/arch/powerpc/external.c
@@ -0,0 +1,247 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/lib.h>
+#include <xen/event.h>
+#include <xen/irq.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/hardirq.h>
+#include <asm/mpic.h>
+#include "mpic_init.h"
+#include "exceptions.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
+
+unsigned long io_apic_irqs;
+int ioapic_ack_new = 1;
+
+static struct hw_interrupt_type *hc_irq;
+
+/* deliver_ee: called with interrupts off when resuming every vcpu */
+void deliver_ee(struct cpu_user_regs *regs)
+{
+ const ulong srr_mask = ~(MSR_IR | MSR_DR | MSR_FE0 | MSR_FE1 | MSR_EE |
+ MSR_RI |
+ MSR_BE | MSR_FP | MSR_PMM | MSR_PR | MSR_SE);
+
+ BUG_ON(mfmsr() & MSR_EE);
+ BUG_ON(regs->msr & MSR_HV);
+
+ if (!local_events_need_delivery())
+ return;
+
+ /* XXX OS error: EE was set but RI was not. We could trigger a machine
+ * check, or kill the domain... for now just crash Xen so we notice. */
+ BUG_ON(!(regs->msr & MSR_RI));
+
+ regs->srr0 = regs->pc;
+ /* zero SRR1[33:36] and SRR1[42:47] */
+ regs->srr1 = regs->msr & ~0x00000000783f0000;
+ regs->pc = 0x500;
+ regs->msr &= srr_mask;
+ regs->msr |= MSR_SF | MSR_ME;
+
+ DBG("<HV: pc=0x%lx, msr=0x%lx\n", regs->pc, regs->msr);
+}
+
+void do_external(struct cpu_user_regs *regs)
+{
+ int vec;
+
+ BUG_ON(!(regs->msr & MSR_EE));
+ BUG_ON(mfmsr() & MSR_EE);
+
+ vec = xen_mpic_get_irq(regs);
+
+ if (vec != -1) {
+ DBG("EE:0x%lx isrc: %d\n", regs->msr, vec);
+ regs->entry_vector = vec;
+ do_IRQ(regs);
+
+ BUG_ON(mfmsr() & MSR_EE);
+ }
+}
+
+static int xen_local_irq(unsigned int irq)
+{
+ irq_desc_t *desc;
+ unsigned int vector;
+
+ vector = irq_to_vector(irq);
+ desc = &irq_desc[vector];
+
+ return !(desc->status & IRQ_GUEST);
+}
+
+static unsigned int xen_startup_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ return hc_irq->startup(irq);
+ }
+ return 0;
+}
+
+static void xen_shutdown_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ hc_irq->shutdown(irq);
+ }
+}
+
+static void xen_enable_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ hc_irq->enable(irq);
+ }
+}
+
+static void xen_disable_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ hc_irq->disable(irq);
+ }
+}
+
+static void xen_ack_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ if (hc_irq->ack) hc_irq->ack(irq);
+ }
+}
+
+static void xen_end_irq(unsigned int irq)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ hc_irq->end(irq);
+ }
+}
+
+static void xen_set_affinity(unsigned int irq, cpumask_t mask)
+{
+ DBG("%s(%d)\n", __func__, irq);
+ if (xen_local_irq(irq)) {
+ if (hc_irq->set_affinity) hc_irq->set_affinity(irq, mask);
+ }
+}
+
+static struct hw_interrupt_type xen_irq = {
+ .startup = xen_startup_irq,
+ .enable = xen_enable_irq,
+ .disable = xen_disable_irq,
+ .shutdown = xen_shutdown_irq,
+ .ack = xen_ack_irq,
+ .end = xen_end_irq,
+ .set_affinity = xen_set_affinity,
+};
+
+void init_IRQ(void)
+{
+ hc_irq = xen_mpic_init(&xen_irq);
+}
+
+void ack_APIC_irq(void)
+{
+ printk("%s: EOI the whole MPIC?\n", __func__);
+ for (;;);
+}
+
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ trap at vector %02x\n", irq);
+ /*
+ * Currently unexpected vectors happen only on SMP and APIC.
+ * We _must_ ack these because every local APIC has only N
+ * irq slots per priority level, and a 'hanging, unacked' IRQ
+ * holds up an irq slot - in excessive cases (when multiple
+ * unexpected vectors occur) that might lock up the APIC
+ * completely.
+ */
+ ack_APIC_irq();
+}
+
+extern void dump_ioapic_irq_info(void);
+void dump_ioapic_irq_info(void)
+{
+ printk("%s: can't dump yet\n", __func__);
+}
+
+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
+int assign_irq_vector(int irq)
+{
+ static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
+
+ BUG_ON(irq >= NR_IRQ_VECTORS);
+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
+ return IO_APIC_VECTOR(irq);
+next:
+ current_vector += 8;
+
+ /* Skip the hypercall vector. */
+ if (current_vector == HYPERCALL_VECTOR)
+ goto next;
+
+ /* Skip the Linux/BSD fast-trap vector. */
+ if (current_vector == FAST_TRAP)
+ goto next;
+
+ if (current_vector >= FIRST_SYSTEM_VECTOR) {
+ offset++;
+ if (!(offset%8))
+ return -ENOSPC;
+ current_vector = FIRST_DEVICE_VECTOR + offset;
+ }
+
+ vector_irq[current_vector] = irq;
+ if (irq != AUTO_ASSIGN)
+ IO_APIC_VECTOR(irq) = current_vector;
+
+ return current_vector;
+}
+
+int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
+{
+ BUG_ON(pval != pval);
+
+ return 0;
+}
+
+int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
+{
+ BUG_ON(val != val);
+ return 0;
+}
diff --git a/xen/arch/powerpc/float.S b/xen/arch/powerpc/float.S
new file mode 100644
index 0000000000..21c70ff472
--- /dev/null
+++ b/xen/arch/powerpc/float.S
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/config.h>
+#include <asm/asm-offsets.h>
+#include <asm/reg_defs.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#ifdef HAS_FLOAT
+save_fp:
+ addi r4, r3, VCPU_fprs - FPR_WIDTH
+ stfdu fr0,FPR_WIDTH(r4)
+ stfdu fr1,FPR_WIDTH(r4)
+ stfdu fr2,FPR_WIDTH(r4)
+ stfdu fr3,FPR_WIDTH(r4)
+ stfdu fr4,FPR_WIDTH(r4)
+ stfdu fr5,FPR_WIDTH(r4)
+ stfdu fr6,FPR_WIDTH(r4)
+ stfdu fr7,FPR_WIDTH(r4)
+ stfdu fr8,FPR_WIDTH(r4)
+ stfdu fr9,FPR_WIDTH(r4)
+ stfdu fr10,FPR_WIDTH(r4)
+ stfdu fr11,FPR_WIDTH(r4)
+ stfdu fr12,FPR_WIDTH(r4)
+ stfdu fr13,FPR_WIDTH(r4)
+ stfdu fr14,FPR_WIDTH(r4)
+ stfdu fr15,FPR_WIDTH(r4)
+ stfdu fr16,FPR_WIDTH(r4)
+ stfdu fr17,FPR_WIDTH(r4)
+ stfdu fr18,FPR_WIDTH(r4)
+ stfdu fr19,FPR_WIDTH(r4)
+ stfdu fr20,FPR_WIDTH(r4)
+ stfdu fr21,FPR_WIDTH(r4)
+ stfdu fr22,FPR_WIDTH(r4)
+ stfdu fr23,FPR_WIDTH(r4)
+ stfdu fr24,FPR_WIDTH(r4)
+ stfdu fr25,FPR_WIDTH(r4)
+ stfdu fr26,FPR_WIDTH(r4)
+ stfdu fr27,FPR_WIDTH(r4)
+ stfdu fr28,FPR_WIDTH(r4)
+ stfdu fr29,FPR_WIDTH(r4)
+ stfdu fr30,FPR_WIDTH(r4)
+ stfdu fr31,FPR_WIDTH(r4)
+ mffs fr0
+ stfd fr0,VCPU_fpscr(r3)
+ blr
+
+load_fp:
+ lfd fr0,VCPU_fpscr(r3)
+ mtfsf 0xff,fr0
+
+ addi r4, r3, VCPU_fprs - FPR_WIDTH
+ lfdu fr0,FPR_WIDTH(r4)
+ lfdu fr1,FPR_WIDTH(r4)
+ lfdu fr2,FPR_WIDTH(r4)
+ lfdu fr3,FPR_WIDTH(r4)
+ lfdu fr4,FPR_WIDTH(r4)
+ lfdu fr5,FPR_WIDTH(r4)
+ lfdu fr6,FPR_WIDTH(r4)
+ lfdu fr7,FPR_WIDTH(r4)
+ lfdu fr8,FPR_WIDTH(r4)
+ lfdu fr9,FPR_WIDTH(r4)
+ lfdu fr10,FPR_WIDTH(r4)
+ lfdu fr11,FPR_WIDTH(r4)
+ lfdu fr12,FPR_WIDTH(r4)
+ lfdu fr13,FPR_WIDTH(r4)
+ lfdu fr14,FPR_WIDTH(r4)
+ lfdu fr15,FPR_WIDTH(r4)
+ lfdu fr16,FPR_WIDTH(r4)
+ lfdu fr17,FPR_WIDTH(r4)
+ lfdu fr18,FPR_WIDTH(r4)
+ lfdu fr19,FPR_WIDTH(r4)
+ lfdu fr20,FPR_WIDTH(r4)
+ lfdu fr21,FPR_WIDTH(r4)
+ lfdu fr22,FPR_WIDTH(r4)
+ lfdu fr23,FPR_WIDTH(r4)
+ lfdu fr24,FPR_WIDTH(r4)
+ lfdu fr25,FPR_WIDTH(r4)
+ lfdu fr26,FPR_WIDTH(r4)
+ lfdu fr27,FPR_WIDTH(r4)
+ lfdu fr28,FPR_WIDTH(r4)
+ lfdu fr29,FPR_WIDTH(r4)
+ lfdu fr30,FPR_WIDTH(r4)
+ lfdu fr31,FPR_WIDTH(r4)
+ blr
+#endif /* HAS_FLOAT */
+
+#ifdef HAS_VMX
+
+#define VCPU_vr(n) (VCPU_vrs + ((n) * 16))
+
+/*
+ * We cannot rely on the domain to correctly use VRSAVE
+ * so it is required that all VMX registers are saved and restored.
+ */
+save_vmx:
+ mfspr r0,SPRN_VRSAVE
+ stw r0,VCPU_vrsave(r3)
+
+ addi r0,r3,VCPU_vr(0); stvxl vr0,0,r0
+ addi r0,r3,VCPU_vr(1); stvxl vr1,0,r0
+ addi r0,r3,VCPU_vr(2); stvxl vr2,0,r0
+ addi r0,r3,VCPU_vr(3); stvxl vr3,0,r0
+ addi r0,r3,VCPU_vr(4); stvxl vr4,0,r0
+ addi r0,r3,VCPU_vr(5); stvxl vr5,0,r0
+ addi r0,r3,VCPU_vr(6); stvxl vr6,0,r0
+ addi r0,r3,VCPU_vr(7); stvxl vr7,0,r0
+ addi r0,r3,VCPU_vr(8); stvxl vr8,0,r0
+
+ /*
+ * By now vr0 should be pushed out so now is a good time to
+ * get the VRSCR which can take a long time and has no dependcies
+ * on the following operations.
+ */
+ mfvscr vr0
+ addi r0,r3,VCPU_vscr ; stvxl vr0,0,r0
+
+ addi r0,r3,VCPU_vr(9); stvxl vr9,0,r0
+ addi r0,r3,VCPU_vr(10); stvxl vr10,0,r0
+ addi r0,r3,VCPU_vr(11); stvxl vr11,0,r0
+ addi r0,r3,VCPU_vr(12); stvxl vr12,0,r0
+ addi r0,r3,VCPU_vr(13); stvxl vr13,0,r0
+ addi r0,r3,VCPU_vr(14); stvxl vr14,0,r0
+ addi r0,r3,VCPU_vr(15); stvxl vr15,0,r0
+ addi r0,r3,VCPU_vr(16); stvxl vr16,0,r0
+ addi r0,r3,VCPU_vr(17); stvxl vr17,0,r0
+ addi r0,r3,VCPU_vr(18); stvxl vr18,0,r0
+ addi r0,r3,VCPU_vr(19); stvxl vr19,0,r0
+ addi r0,r3,VCPU_vr(20); stvxl vr20,0,r0
+ addi r0,r3,VCPU_vr(21); stvxl vr21,0,r0
+ addi r0,r3,VCPU_vr(22); stvxl vr22,0,r0
+ addi r0,r3,VCPU_vr(23); stvxl vr23,0,r0
+ addi r0,r3,VCPU_vr(24); stvxl vr24,0,r0
+ addi r0,r3,VCPU_vr(25); stvxl vr25,0,r0
+ addi r0,r3,VCPU_vr(26); stvxl vr26,0,r0
+ addi r0,r3,VCPU_vr(27); stvxl vr27,0,r0
+ addi r0,r3,VCPU_vr(28); stvxl vr28,0,r0
+ addi r0,r3,VCPU_vr(29); stvxl vr29,0,r0
+ addi r0,r3,VCPU_vr(30); stvxl vr30,0,r0
+ addi r0,r3,VCPU_vr(31); stvxl vr31,0,r0
+ blr
+
+load_vmx:
+ lwz r0,VCPU_vrsave(r3)
+ mtspr SPRN_VRSAVE,r0
+
+ /*
+ * This operation can take a long time so we use vr31 to
+ * eliminate the depency on r0 for the next load
+ */
+ addi r0,r3,VCPU_vscr ; lvxl vr31,0,r0
+ mtvscr vr31
+
+ addi r0,r3,VCPU_vr(0); lvxl vr0,0,r0
+ addi r0,r3,VCPU_vr(1); lvxl vr1,0,r0
+ addi r0,r3,VCPU_vr(2); lvxl vr2,0,r0
+ addi r0,r3,VCPU_vr(3); lvxl vr3,0,r0
+ addi r0,r3,VCPU_vr(4); lvxl vr4,0,r0
+ addi r0,r3,VCPU_vr(5); lvxl vr5,0,r0
+ addi r0,r3,VCPU_vr(6); lvxl vr6,0,r0
+ addi r0,r3,VCPU_vr(7); lvxl vr7,0,r0
+ addi r0,r3,VCPU_vr(8); lvxl vr8,0,r0
+ addi r0,r3,VCPU_vr(9); lvxl vr9,0,r0
+ addi r0,r3,VCPU_vr(10); lvxl vr10,0,r0
+ addi r0,r3,VCPU_vr(11); lvxl vr11,0,r0
+ addi r0,r3,VCPU_vr(12); lvxl vr12,0,r0
+ addi r0,r3,VCPU_vr(13); lvxl vr13,0,r0
+ addi r0,r3,VCPU_vr(14); lvxl vr14,0,r0
+ addi r0,r3,VCPU_vr(15); lvxl vr15,0,r0
+ addi r0,r3,VCPU_vr(16); lvxl vr16,0,r0
+ addi r0,r3,VCPU_vr(17); lvxl vr17,0,r0
+ addi r0,r3,VCPU_vr(18); lvxl vr18,0,r0
+ addi r0,r3,VCPU_vr(19); lvxl vr19,0,r0
+ addi r0,r3,VCPU_vr(20); lvxl vr20,0,r0
+ addi r0,r3,VCPU_vr(21); lvxl vr21,0,r0
+ addi r0,r3,VCPU_vr(22); lvxl vr22,0,r0
+ addi r0,r3,VCPU_vr(23); lvxl vr23,0,r0
+ addi r0,r3,VCPU_vr(24); lvxl vr24,0,r0
+ addi r0,r3,VCPU_vr(25); lvxl vr25,0,r0
+ addi r0,r3,VCPU_vr(26); lvxl vr26,0,r0
+ addi r0,r3,VCPU_vr(27); lvxl vr27,0,r0
+ addi r0,r3,VCPU_vr(28); lvxl vr28,0,r0
+ addi r0,r3,VCPU_vr(29); lvxl vr29,0,r0
+ addi r0,r3,VCPU_vr(30); lvxl vr30,0,r0
+ addi r0,r3,VCPU_vr(31); lvxl vr31,0,r0
+ blr
+#endif /* HAS_VMX */
+
+/* void save_float(struct exec_domain *ed) */
+_GLOBAL(save_float)
+ mflr r8
+#ifdef HAS_FLOAT
+ mfmsr r9 # save msr
+ ori r0,r9,MSR_FP # turn on FPU
+ mtmsr r0
+ bl save_fp # uses r3, r4
+ mtmsr r9 # restore msr
+#endif /* HAS_FLOAT */
+#ifdef HAS_VMX
+ mfmsr r9 # save msr
+ oris r0,r9,MSR_VMX@h # turn on VMX
+ mtmsr r0
+ bl save_vmx # uses r3
+ mtmsr r9 # restore msr
+#endif /* HAS_VMX */
+ mtlr r8
+ blr
+
+/* void load_float(struct exec_domain *ed) */
+_GLOBAL(load_float)
+ mflr r8
+#ifdef HAS_FLOAT
+ mfmsr r9 # save msr
+ ori r0,r9,MSR_FP # turn on FPU
+ mtmsr r0
+ bl load_fp # uses r3, r4
+ mtmsr r9 # restore msr
+#endif /* HAS_FLOAT */
+#ifdef HAS_VMX
+ mfmsr r9 # save msr
+ oris r0,r9,MSR_VMX@h # turn on VMX
+ mtmsr r0
+ bl load_vmx # uses r3
+ mtmsr r9 # restore msr
+#endif /* HAS_VMX */
+ mtlr r8
+ blr
diff --git a/xen/arch/powerpc/gdbstub.c b/xen/arch/powerpc/gdbstub.c
new file mode 100644
index 0000000000..5bd60bf859
--- /dev/null
+++ b/xen/arch/powerpc/gdbstub.c
@@ -0,0 +1,207 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/types.h>
+#include <xen/lib.h>
+#include <xen/gdbstub.h>
+#include <public/xen.h>
+#include <asm/msr.h>
+#include <asm/bitops.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+
+asm(".globl trap_instruction\n"
+ "trap_instruction:\n"
+ "trap\n");
+extern u32 trap_instruction[];
+
+static unsigned int dec_entry;
+static unsigned int hdec_entry;
+
+static inline ulong
+gdb_ppc_0x700(struct cpu_user_regs *state)
+{
+ ulong instr;
+
+ switch (state->msr & MSR_TRAP_BITS) {
+ case MSR_TRAP_FE:
+ return SIGFPE;
+ case MSR_TRAP_IOP:
+ case MSR_TRAP_PRIV:
+ return SIGILL;
+ case MSR_TRAP:
+ instr = *((u32 *)state->pc);
+
+ /* if this was a hardcoded trap in the source, step past it */
+ if (instr == *trap_instruction) {
+ state->pc += sizeof (u32);
+ }
+ return SIGTRAP;
+ }
+ return SIGBUS;
+}
+
+u16 gdb_arch_signal_num(struct cpu_user_regs *regs, unsigned long cookie)
+{
+ /* exception type identifies, trap or bad address */
+ switch (cookie) {
+ case 0x200: /* Machine Check */
+ return SIGTERM;
+ case 0x300: /* DSI */
+ case 0x380: /* Data SLB */
+ case 0x400: /* ISI */
+ case 0x480: /* Instruction SLB */
+ return SIGSEGV;
+ case 0x600: /* Alignment SLB */
+ return SIGBUS;
+ case 0x700: /* Program */
+ return gdb_ppc_0x700(regs);
+ case 0x800: /* Float */
+ return SIGFPE;
+ case 0x900: /* Decrementer */
+ return SIGALRM; /* is this right? */
+ case 0xd00: /* TRAP */
+ return SIGTRAP;
+ case 0xe00: /* FP */
+ return SIGFPE;
+ }
+ return SIGBUS;
+}
+
+void
+gdb_arch_resume(struct cpu_user_regs *regs,
+ unsigned long addr, unsigned long type,
+ struct gdb_context *ctx)
+{
+ if (addr != ~((ulong)0)) {
+ regs->pc = addr;
+ }
+
+ if (type == GDB_CONTINUE) {
+ regs->msr &= ~MSR_SE;
+ } else {
+ regs->msr |= MSR_SE;
+ }
+}
+
+void
+gdb_arch_read_reg(unsigned long regnum, struct cpu_user_regs *regs,
+ struct gdb_context *ctx)
+{
+ unimplemented();
+ gdb_send_reply("", ctx);
+}
+
+void
+gdb_arch_read_reg_array(struct cpu_user_regs *state, struct gdb_context *ctx)
+{
+ ulong i = 0;
+
+ for (i = 0; i < 32; ++i) {
+ gdb_write_to_packet_hex(state->gprs[i], sizeof(state->gprs[i]), ctx);
+ }
+ /* Avoid floating point for now */
+ for (i = 0; i < 32; ++i) {
+ gdb_write_to_packet_hex(0, sizeof(u64), ctx);
+ }
+ gdb_write_to_packet_hex(state->pc, sizeof (state->pc), ctx);
+ gdb_write_to_packet_hex(state->msr, sizeof (state->msr), ctx);
+ gdb_write_to_packet_hex(state->cr, sizeof (state->cr), ctx);
+ gdb_write_to_packet_hex(state->lr, sizeof (state->lr), ctx);
+ gdb_write_to_packet_hex(state->ctr, sizeof (state->ctr), ctx);
+ gdb_write_to_packet_hex(state->xer, sizeof (u32), ctx);
+ gdb_write_to_packet_hex(0, sizeof(u32), ctx); /* fpscr */
+ gdb_send_packet(ctx);
+}
+
+void
+gdb_arch_write_reg_array(struct cpu_user_regs *regs, const char *buf,
+ struct gdb_context *ctx)
+{
+ ulong i;
+
+ for (i = 0; i < 32; ++i) {
+ regs->gprs[i] = str2ulong(buf, sizeof (ulong));
+ buf += sizeof (regs->gprs[0]) * 2;
+ }
+ /* Avoid floating point for now */
+ for (i = 0; i < 32; ++i) {
+ buf += sizeof (u64) * 2;
+ }
+
+ regs->pc = str2ulong(buf, sizeof (regs->pc));
+ buf += sizeof (regs->pc) * 2;
+ regs->msr = str2ulong(buf, sizeof (regs->msr));
+ buf += sizeof (regs->msr) * 2;
+ regs->cr = str2ulong(buf, sizeof (regs->cr));
+ buf += sizeof (regs->cr) * 2;
+ regs->lr = str2ulong(buf, sizeof (regs->lr));
+ buf += sizeof (regs->lr) * 2;
+ regs->ctr = str2ulong(buf, sizeof (regs->ctr));
+ buf += sizeof (regs->ctr) * 2;
+ regs->xer = str2ulong(buf, sizeof (u32));
+ buf += sizeof (u32) * 2;
+}
+
+unsigned int
+gdb_arch_copy_from_user(void *dest, const void *src, unsigned len)
+{
+ memcpy(dest, src, len);
+ return 0;
+}
+
+unsigned int
+gdb_arch_copy_to_user(void *dest, const void *src, unsigned len)
+{
+ memcpy(dest, src, len);
+ synchronize_caches((ulong)dest, len);
+ return 0;
+}
+
+void
+gdb_arch_print_state(struct cpu_user_regs *state)
+{
+ int i = 0;
+ printk("PC: 0x%016lx MSR: 0x%016lx\n", state->pc, state->msr);
+ printk("LR: 0x%016lx CTR: 0x%016lx\n", state->lr, state->ctr);
+ /* XXX
+ printk("DAR: 0x%016lx DSISR: 0x%016lx\n", state->dar, state->dsisr);
+ */
+ printk("CR: 0x%08x XER: 0x%016lx\n", state->cr, state->xer);
+ for (; i < 32; i+=4) {
+ printk("%02d: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+ i, state->gprs[i], state->gprs[i+1],
+ state->gprs[i+2], state->gprs[i+3]);
+ }
+}
+
+void
+gdb_arch_enter(struct cpu_user_regs *state)
+{
+ dec_entry = mfdec();
+ hdec_entry = mfhdec();
+}
+
+void
+gdb_arch_exit(struct cpu_user_regs *state)
+{
+ mtdec(dec_entry);
+ mthdec(hdec_entry);
+}
diff --git a/xen/arch/powerpc/hcalls.c b/xen/arch/powerpc/hcalls.c
new file mode 100644
index 0000000000..e48ed9601f
--- /dev/null
+++ b/xen/arch/powerpc/hcalls.c
@@ -0,0 +1,172 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/multicall.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+#include <asm/debugger.h>
+#include <asm/msr.h>
+#include "exceptions.h"
+
+u32 *papr_hcalls; /* PAPR Hypervisor Calls */
+u32 *hypercall_table; /* Xen Hypervisor Calls */
+
+static void hcall_papr(ulong num, struct cpu_user_regs *regs)
+{
+ u32 address;
+
+ if (regs->msr & MSR_PR) {
+ regs->gprs[3] = H_Privilege;
+ return;
+ }
+
+ if ((num & 0x3) || (num > RPA_HCALL_END)) {
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+
+ address = papr_hcalls[num/4];
+ papr_hcall_jump(regs, address);
+}
+
+static void hcall_xen(ulong num, struct cpu_user_regs *regs)
+{
+ u32 address;
+
+ if (regs->msr & MSR_PR) {
+ regs->gprs[3] = -EPERM;
+ return;
+ }
+
+ if ((num >= NR_hypercalls)) {
+ regs->gprs[3] = -ENOSYS;
+ return;
+ }
+ address = hypercall_table[num];
+ if (address == 0) {
+ printk("unsupported Xen hypercall: 0x%lx\n", num);
+ regs->gprs[3] = -ENOSYS;
+ return;
+ }
+
+ regs->gprs[3] = xen_hvcall_jump(regs, address);
+}
+
+void do_multicall_call(multicall_entry_t *call)
+{
+ struct cpu_user_regs regs;
+
+ regs.gprs[3] = call->args[0];
+ regs.gprs[4] = call->args[1];
+ regs.gprs[5] = call->args[2];
+ regs.gprs[6] = call->args[3];
+ regs.gprs[7] = call->args[4];
+ regs.gprs[8] = call->args[5];
+
+ hcall_xen(call->op, &regs);
+
+ call->result = regs.gprs[3];
+}
+
+void do_hcall(struct cpu_user_regs *regs)
+{
+ ulong num = regs->gprs[3];
+
+ local_irq_enable();
+
+ if ((num & XEN_MARK(0)) == XEN_MARK(0)) {
+ /* it's a Xen call */
+ num &= ~XEN_MARK(0);
+ hcall_xen(num, regs);
+ } else {
+ /* it's a PAPR call */
+ hcall_papr(num, regs);
+ }
+}
+
+static void do_ni_papr_hypercall(struct cpu_user_regs *regs)
+{
+ struct vcpu *v = get_current();
+
+ printk("unsupported hcall 0x%lx was called by dom0x%x\n",
+ regs->gprs[3], v->domain->domain_id);
+ debugger_trap_immediate();
+
+ regs->gprs[3] = H_Parameter;
+}
+
+/* store low 32 bits of 64-bit address in hcall table (this is safe because we
+ * know we will not link above 4GB). We don't need to preserve the TOC
+ * because that only changes when calling dynamically linked objects. */
+static void register_papr_hcall(ulong num, hcall_handler_t handler)
+{
+ int index = num/4;
+
+ papr_hcalls[index] = (u32)(*(u64 *)handler);
+}
+
+static void init_papr_hcalls(void)
+{
+ inithcall_t *hcall;
+ int i;
+
+ /* initialize PAPR hcall table */
+ papr_hcalls = xmalloc_array(u32, RPA_HCALL_END/4);
+ ASSERT(papr_hcalls != NULL);
+ for (i = 0; i <= RPA_HCALL_END; i += 4)
+ register_papr_hcall(i, do_ni_papr_hypercall);
+
+ /* register the PAPR hcalls */
+ for (hcall = &__inithcall_start; hcall < &__inithcall_end; hcall++) {
+ register_papr_hcall(hcall->number, hcall->handler);
+ }
+}
+
+static void init_hypercall_table(void)
+{
+ int i;
+
+ hypercall_table = xmalloc_array(u32, NR_hypercalls);
+ ASSERT(hypercall_table != NULL);
+
+ for (i = 0; i < NR_hypercalls; i++) {
+ if (__hypercall_table[i] == NULL ) {
+ hypercall_table[i] = 0;
+ } else {
+ hypercall_table[i] = (u32)(*__hypercall_table[i]);
+ }
+ }
+}
+
+static int init_hcalls(void)
+{
+ init_papr_hcalls();
+ init_hypercall_table();
+
+ return 0;
+}
+__initcall(init_hcalls);
diff --git a/xen/arch/powerpc/htab.c b/xen/arch/powerpc/htab.c
new file mode 100644
index 0000000000..5d33e03288
--- /dev/null
+++ b/xen/arch/powerpc/htab.c
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+
+static ulong htab_calc_sdr1(ulong htab_addr, ulong log_htab_size)
+{
+ ulong sdr1_htabsize;
+
+ ASSERT((htab_addr & ((1UL << log_htab_size) - 1)) == 0);
+ ASSERT(log_htab_size <= SDR1_HTABSIZE_MAX);
+ ASSERT(log_htab_size >= HTAB_MIN_LOG_SIZE);
+
+ sdr1_htabsize = log_htab_size - LOG_PTEG_SIZE - SDR1_HTABSIZE_BASEBITS;
+
+ return (htab_addr | (sdr1_htabsize & SDR1_HTABSIZE_MASK));
+}
+
+void htab_alloc(struct domain *d, int log_htab_bytes)
+{
+ ulong htab_raddr;
+ ulong htab_bytes = 1UL << log_htab_bytes;
+
+ /* XXX use alloc_domheap_pages instead? */
+ htab_raddr = (ulong)alloc_xenheap_pages(log_htab_bytes - PAGE_SHIFT);
+ ASSERT(htab_raddr != 0);
+ /* XXX check alignment guarantees */
+ ASSERT((htab_raddr & (htab_bytes-1)) == 0);
+
+ /* XXX slow. move memset out to service partition? */
+ memset((void *)htab_raddr, 0, htab_bytes);
+
+ d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE;
+ d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes);
+ d->arch.htab.map = (union pte *)htab_raddr;
+ d->arch.htab.shadow = xmalloc_array(ulong,
+ 1UL << d->arch.htab.log_num_ptes);
+ ASSERT(d->arch.htab.shadow != NULL);
+
+ printf("%s: dom%x sdr1: %lx\n", __func__, d->domain_id, d->arch.htab.sdr1);
+}
+
+void htab_free(struct domain *d)
+{
+ ulong htab_raddr = GET_HTAB(d);
+
+ free_xenheap_pages((void *)htab_raddr,
+ (1UL << d->arch.htab.log_num_ptes) << LOG_PTE_SIZE);
+ xfree(d->arch.htab.shadow);
+}
+
diff --git a/xen/arch/powerpc/iommu.c b/xen/arch/powerpc/iommu.c
new file mode 100644
index 0000000000..2c335371f9
--- /dev/null
+++ b/xen/arch/powerpc/iommu.c
@@ -0,0 +1,79 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+#include <public/xen.h>
+#include "tce.h"
+#include "iommu.h"
+
+struct iommu_funcs {
+ int (*iommu_put)(ulong, union tce);
+};
+
+/* individual host bridges */
+static struct iommu_funcs iommu_phbs[16];
+static u32 iommu_phbs_num = ARRAY_SIZE(iommu_phbs);
+
+int iommu_put(u32 buid, ulong ioba, union tce tce)
+{
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+
+ if (buid < iommu_phbs_num && iommu_phbs[buid].iommu_put != NULL) {
+ ulong pfn;
+ ulong mfn;
+ int mtype;
+
+ pfn = tce.tce_bits.tce_rpn;
+ mfn = pfn2mfn(d, pfn, &mtype);
+ if (mtype != 0) {
+ panic("we don't do non-RMO memory yet\n");
+ }
+
+#ifdef DEBUG
+ printk("%s: ioba=0x%lx pfn=0x%lx mfn=0x%lx\n", __func__,
+ ioba, pfn, mfn);
+#endif
+ tce.tce_bits.tce_rpn = mfn;
+
+ return iommu_phbs[buid].iommu_put(ioba, tce);
+ }
+ return -1;
+}
+
+int iommu_register(u32 buid, int (*put)(ulong ioba, union tce ltce))
+{
+
+ if (buid < iommu_phbs_num && iommu_phbs[buid].iommu_put == NULL) {
+ iommu_phbs[0].iommu_put = put;
+ return 0;
+ }
+ panic("bad IOMMU registration\n");
+ return -1;
+}
diff --git a/xen/arch/powerpc/iommu.h b/xen/arch/powerpc/iommu.h
new file mode 100644
index 0000000000..727441bb07
--- /dev/null
+++ b/xen/arch/powerpc/iommu.h
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _IOMMU_H
+#define _IOMMU_H
+
+extern int iommu_put(u32 buid, ulong ioba, union tce tce);
+extern int iommu_register(u32 buid, int (*put)(ulong, union tce));
+
+#endif /* _IOMMU_H */
+
diff --git a/xen/arch/powerpc/irq.c b/xen/arch/powerpc/irq.c
new file mode 100644
index 0000000000..8ebd6f5b71
--- /dev/null
+++ b/xen/arch/powerpc/irq.c
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "exceptions.h"
+#include "../x86/irq.c"
diff --git a/xen/arch/powerpc/mambo.S b/xen/arch/powerpc/mambo.S
new file mode 100644
index 0000000000..d3259811ab
--- /dev/null
+++ b/xen/arch/powerpc/mambo.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <asm/config.h>
+#include <asm/processor.h>
+
+_GLOBAL(mambo_callthru)
+ .long 0x000eaeb0
+ blr
+
+_GLOBAL(mambo_write)
+ mr r5, r4
+ mr r4, r3
+ li r3, 0 # Write console code
+
+ li r6, 0
+ /* need to fix return value */
+ mflr r7
+ bl _ENTRY(mambo_callthru)
+ mtlr r7
+ mr r3, r5
+ blr
+
+_GLOBAL(mambo_memset)
+ mr r6, r5
+ mr r5, r4
+ mr r4, r3
+ li r3, 0x47 # memset
+ /* need to fix return value */
+ mflr r7
+ bl _ENTRY(mambo_callthru)
+ mtlr r7
+ mr r3, r4
+ blr
+
+_GLOBAL(mambo_memcpy)
+ mr r6, r5
+ mr r5, r4
+ mr r4, r3
+ li r3, 0x45 # memcpy
+ /* need to fix return value */
+ mflr r7
+ bl _ENTRY(mambo_callthru)
+ mtlr r7
+ mr r3, r4
+ blr
+
+
diff --git a/xen/arch/powerpc/mm.c b/xen/arch/powerpc/mm.c
new file mode 100644
index 0000000000..00e072747b
--- /dev/null
+++ b/xen/arch/powerpc/mm.c
@@ -0,0 +1,141 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/mm.h>
+#include <xen/kernel.h>
+#include <xen/sched.h>
+#include <asm/misc.h>
+#include <asm/init.h>
+#include <asm/page.h>
+
+/* Frame table and its size in pages. */
+struct page_info *frame_table;
+unsigned long frame_table_size;
+unsigned long max_page;
+unsigned long total_pages;
+
+int create_grant_host_mapping(
+ unsigned long addr, unsigned long frame, unsigned int flags)
+{
+ panic("%s called\n", __func__);
+ return 1;
+}
+
+int destroy_grant_host_mapping(
+ unsigned long addr, unsigned long frame, unsigned int flags)
+{
+ panic("%s called\n", __func__);
+ return 1;
+}
+
+int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
+{
+ panic("%s called\n", __func__);
+ return 1;
+}
+
+
+int get_page_type(struct page_info *page, u32 type)
+{
+ panic("%s called\n", __func__);
+ return 1;
+}
+
+void put_page_type(struct page_info *page)
+{
+ panic("%s called\n", __func__);
+}
+
+void __init init_frametable(void)
+{
+ unsigned long p;
+
+ frame_table_size = PFN_UP(max_page * sizeof(struct page_info));
+
+ p = alloc_boot_pages(min(frame_table_size, 4UL << 20), 1);
+ if (p == 0)
+ panic("Not enough memory for frame table\n");
+
+ frame_table = (struct page_info *)(p << PAGE_SHIFT);
+ frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
+
+ memset(frame_table, 0, frame_table_size);
+}
+
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+ printk("%s: no PPC specific memory ops\n", __func__);
+ return -ENOSYS;
+}
+
+void clear_page(void *page)
+{
+ if (on_mambo()) {
+ extern void *mambo_memset(void *,int ,__kernel_size_t);
+ mambo_memset(page, 0, PAGE_SIZE);
+ } else {
+ memset(page, 0, PAGE_SIZE);
+ }
+}
+
+extern void copy_page(void *dp, void *sp)
+{
+ if (on_mambo()) {
+ extern void *mambo_memcpy(void *,const void *,__kernel_size_t);
+ mambo_memcpy(dp, sp, PAGE_SIZE);
+ } else {
+ memcpy(dp, sp, PAGE_SIZE);
+ }
+}
+
+ulong pfn2mfn(struct domain *d, long pfn, int *type)
+{
+ ulong rma_base_mfn = d->arch.rma_base >> PAGE_SHIFT;
+ ulong rma_size_mfn = d->arch.rma_size >> PAGE_SHIFT;
+ ulong mfn;
+ int t;
+
+ if (pfn < rma_size_mfn) {
+ mfn = pfn + rma_base_mfn;
+ t = PFN_TYPE_RMA;
+ } else if (pfn >= d->arch.logical_base_pfn &&
+ pfn < d->arch.logical_end_pfn) {
+ if (test_bit(_DOMF_privileged, &d->domain_flags)) {
+ /* This hack allows dom0 to map all memory, necessary to
+ * initialize domU state. */
+ mfn = pfn;
+ } else {
+ panic("we do not handle the logical area yet\n");
+ mfn = 0;
+ }
+
+ t = PFN_TYPE_LOGICAL;
+ } else {
+ /* don't know */
+ mfn = pfn;
+ t = PFN_TYPE_IO;
+ }
+
+ if (type != NULL)
+ *type = t;
+
+ return mfn;
+}
diff --git a/xen/arch/powerpc/mpic.c b/xen/arch/powerpc/mpic.c
new file mode 100644
index 0000000000..b2a9ca3a9a
--- /dev/null
+++ b/xen/arch/powerpc/mpic.c
@@ -0,0 +1,1109 @@
+/*
+ * arch/powerpc/kernel/mpic.c
+ *
+ * Driver for interrupt controllers following the OpenPIC standard, the
+ * common implementation beeing IBM's MPIC. This driver also can deal
+ * with various broken implementations of this HW.
+ *
+ * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/* XXX Xen hacks ... */
+/* make this generic */
+
+#define le32_to_cpu(x) \
+({ \
+ __u32 __x = (x); \
+ ((__u32)( \
+ (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
+})
+
+
+#define alloc_bootmem(x) xmalloc_bytes(x)
+#define request_irq(irq, handler, f, devname, dev_id) \
+ panic("IPI requested: %d: %p: %s: %p\n", irq, handler, devname, dev_id)
+
+typedef int irqreturn_t;
+
+#define IRQ_NONE (0)
+#define IRQ_HANDLED (1)
+#define IRQ_RETVAL(x) ((x) != 0)
+
+#define IRQ_SENSE_MASK 0x1
+#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
+#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
+
+#define IRQ_POLARITY_MASK 0x2
+#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
+#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
+
+#define CONFIG_IRQ_ALL_CPUS 0
+#define distribute_irqs CONFIG_IRQ_ALL_CPUS
+#define CONFIG_MPIC_BROKEN_U3
+
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
+#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_CAP_LIST_ID 0 /* Capability ID */
+#define PCI_CAP_ID_HT_IRQCONF 0x08 /* HyperTransport IRQ Configuration */
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define MSG_ALL 0x8001
+#define MSG_ALL_BUT_SELF 0x8000
+
+/* keeps file even closer to the original */
+#define pt_regs cpu_user_regs
+/* XXX ... Xen hacks */
+
+#undef DEBUG
+#undef DEBUG_IPI
+#undef DEBUG_IRQ
+#undef DEBUG_LOW
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/kernel.h>
+#include <xen/init.h>
+#include <xen/irq.h>
+#include <xen/smp.h>
+#ifndef __XEN__
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#endif
+#include <xen/spinlock.h>
+#ifndef __XEN__
+#include <asm/pci.h>
+
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+#endif
+#include <asm/io.h>
+#ifndef __XEN__
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#endif
+#include <asm/mpic.h>
+#include <asm/smp.h>
+
+static inline void smp_message_recv(int msg, struct pt_regs *regs)
+{
+ return;
+}
+
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static struct mpic *mpics;
+static struct mpic *mpic_primary;
+static DEFINE_SPINLOCK(mpic_lock);
+
+#ifdef CONFIG_PPC32 /* XXX for now */
+#ifdef CONFIG_IRQ_ALL_CPUS
+#define distribute_irqs (1)
+#else
+#define distribute_irqs (0)
+#endif
+#endif
+
+/*
+ * Register accessor functions
+ */
+
+
+static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
+ unsigned int reg)
+{
+ if (be)
+ return in_be32(base + (reg >> 2));
+ else
+ return in_le32(base + (reg >> 2));
+}
+
+static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
+ unsigned int reg, u32 value)
+{
+ if (be)
+ out_be32(base + (reg >> 2), value);
+ else
+ out_le32(base + (reg >> 2), value);
+}
+
+static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
+{
+ unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
+ unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
+
+ if (mpic->flags & MPIC_BROKEN_IPI)
+ be = !be;
+ return _mpic_read(be, mpic->gregs, offset);
+}
+
+static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
+{
+ unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
+
+ _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
+}
+
+static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
+{
+ unsigned int cpu = 0;
+
+ if (mpic->flags & MPIC_PRIMARY)
+ cpu = hard_smp_processor_id();
+
+ return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
+}
+
+static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
+{
+ unsigned int cpu = 0;
+
+ if (mpic->flags & MPIC_PRIMARY)
+ cpu = hard_smp_processor_id();
+
+ _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
+}
+
+static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
+{
+ unsigned int isu = src_no >> mpic->isu_shift;
+ unsigned int idx = src_no & mpic->isu_mask;
+
+ return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+ reg + (idx * MPIC_IRQ_STRIDE));
+}
+
+static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
+ unsigned int reg, u32 value)
+{
+ unsigned int isu = src_no >> mpic->isu_shift;
+ unsigned int idx = src_no & mpic->isu_mask;
+
+ _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+ reg + (idx * MPIC_IRQ_STRIDE), value);
+}
+
+#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
+#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
+#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
+#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
+#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
+#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
+#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
+#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
+
+
+/*
+ * Low level utility functions
+ */
+
+
+
+/* Check if we have one of those nice broken MPICs with a flipped endian on
+ * reads from IPI registers
+ */
+static void __init mpic_test_broken_ipi(struct mpic *mpic)
+{
+ u32 r;
+
+ mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
+ r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
+
+ if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
+ printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
+ mpic->flags |= MPIC_BROKEN_IPI;
+ }
+}
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+
+/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
+ * to force the edge setting on the MPIC and do the ack workaround.
+ */
+static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
+{
+ if (source >= 128 || !mpic->fixups)
+ return 0;
+ return mpic->fixups[source].base != NULL;
+}
+
+
+static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+
+ if (fixup->applebase) {
+ unsigned int soff = (fixup->index >> 3) & ~3;
+ unsigned int mask = 1U << (fixup->index & 0x1f);
+ writel(mask, fixup->applebase + soff);
+ } else {
+ spin_lock(&mpic->fixup_lock);
+ writeb(0x11 + 2 * fixup->index, fixup->base + 2);
+ writel(fixup->data, fixup->base + 4);
+ spin_unlock(&mpic->fixup_lock);
+ }
+}
+
+static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
+ u32 tmp;
+
+ if (fixup->base == NULL)
+ return;
+
+ DBG("startup_ht_interrupt(%u, %u) index: %d\n",
+ source, irqflags, fixup->index);
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ /* Enable and configure */
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp &= ~(0x23U);
+ if (irqflags & IRQ_LEVEL)
+ tmp |= 0x22;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+}
+
+static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
+ u32 tmp;
+
+ if (fixup->base == NULL)
+ return;
+
+ DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
+
+ /* Disable */
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp |= 1;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+}
+
+static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
+ unsigned int devfn, u32 vdid)
+{
+ int i, irq, n;
+ u8 __iomem *base;
+ u32 tmp;
+ u8 pos;
+
+ for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
+ pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
+ u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
+ if (id == PCI_CAP_ID_HT_IRQCONF) {
+ id = readb(devbase + pos + 3);
+ if (id == 0x80)
+ break;
+ }
+ }
+ if (pos == 0)
+ return;
+
+ base = devbase + pos;
+ writeb(0x01, base + 2);
+ n = (readl(base + 4) >> 16) & 0xff;
+
+ printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
+ " has %d irqs\n",
+ devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
+
+ for (i = 0; i <= n; i++) {
+ writeb(0x10 + 2 * i, base + 2);
+ tmp = readl(base + 4);
+ irq = (tmp >> 16) & 0xff;
+ DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
+ /* mask it , will be unmasked later */
+ tmp |= 0x1;
+ writel(tmp, base + 4);
+ mpic->fixups[irq].index = i;
+ mpic->fixups[irq].base = base;
+ /* Apple HT PIC has a non-standard way of doing EOIs */
+ if ((vdid & 0xffff) == 0x106b)
+ mpic->fixups[irq].applebase = devbase + 0x60;
+ else
+ mpic->fixups[irq].applebase = NULL;
+ writeb(0x11 + 2 * i, base + 2);
+ mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
+ }
+}
+
+
+static void __init mpic_scan_ht_pics(struct mpic *mpic)
+{
+ unsigned int devfn;
+ u8 __iomem *cfgspace;
+
+ printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
+
+ /* Allocate fixups array */
+ mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
+ BUG_ON(mpic->fixups == NULL);
+ memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
+
+ /* Init spinlock */
+ spin_lock_init(&mpic->fixup_lock);
+
+ /* Map U3 config space. We assume all IO-APICs are on the primary bus
+ * so we only need to map 64kB.
+ */
+ cfgspace = ioremap(0xf2000000, 0x10000);
+ BUG_ON(cfgspace == NULL);
+
+ /* Now we scan all slots. We do a very quick scan, we read the header
+ * type, vendor ID and device ID only, that's plenty enough
+ */
+ for (devfn = 0; devfn < 0x100; devfn++) {
+ u8 __iomem *devbase = cfgspace + (devfn << 8);
+ u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
+ u32 l = readl(devbase + PCI_VENDOR_ID);
+ u16 s;
+
+ DBG("devfn %x, l: %x\n", devfn, l);
+
+ /* If no device, skip */
+ if (l == 0xffffffff || l == 0x00000000 ||
+ l == 0x0000ffff || l == 0xffff0000)
+ goto next;
+ /* Check if is supports capability lists */
+ s = readw(devbase + PCI_STATUS);
+ if (!(s & PCI_STATUS_CAP_LIST))
+ goto next;
+
+ mpic_scan_ht_pic(mpic, devbase, devfn, l);
+
+ next:
+ /* next device, if function 0 */
+ if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
+ devfn += 7;
+ }
+}
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+
+/* Find an mpic associated with a given linux interrupt */
+static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
+{
+ struct mpic *mpic = mpics;
+
+ while(mpic) {
+ /* search IPIs first since they may override the main interrupts */
+ if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
+ if (is_ipi)
+ *is_ipi = 1;
+ return mpic;
+ }
+ if (irq >= mpic->irq_offset &&
+ irq < (mpic->irq_offset + mpic->irq_count)) {
+ if (is_ipi)
+ *is_ipi = 0;
+ return mpic;
+ }
+ mpic = mpic -> next;
+ }
+ return NULL;
+}
+
+/* Convert a cpu mask from logical to physical cpu numbers. */
+static inline u32 mpic_physmask(u32 cpumask)
+{
+ int i;
+ u32 mask = 0;
+
+ for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
+ mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
+ return mask;
+}
+
+#ifdef CONFIG_SMP
+/* Get the mpic structure from the IPI number */
+static inline struct mpic * mpic_from_ipi(unsigned int ipi)
+{
+ return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
+}
+#endif
+
+/* Get the mpic structure from the irq number */
+static inline struct mpic * mpic_from_irq(unsigned int irq)
+{
+ return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
+}
+
+/* Send an EOI */
+static inline void mpic_eoi(struct mpic *mpic)
+{
+ mpic_cpu_write(MPIC_CPU_EOI, 0);
+ (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct mpic *mpic = dev_id;
+
+ smp_message_recv(irq - mpic->ipi_offset, regs);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Linux descriptor level callbacks
+ */
+
+
+static void mpic_enable_irq(unsigned int irq)
+{
+ unsigned int loops = 100000;
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
+
+ mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
+ mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
+ ~MPIC_VECPRI_MASK);
+
+ /* make sure mask gets to controller before we return to user */
+ do {
+ if (!loops--) {
+ printk(KERN_ERR "mpic_enable_irq timeout\n");
+ break;
+ }
+ } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic->flags & MPIC_BROKEN_U3) {
+ unsigned int src = irq - mpic->irq_offset;
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
+ }
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+}
+
+static unsigned int mpic_startup_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_enable_irq(irq);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ return 0;
+}
+
+static void mpic_disable_irq(unsigned int irq)
+{
+ unsigned int loops = 100000;
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
+
+ mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
+ mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
+ MPIC_VECPRI_MASK);
+
+ /* make sure mask gets to controller before we return to user */
+ do {
+ if (!loops--) {
+ printk(KERN_ERR "mpic_enable_irq timeout\n");
+ break;
+ }
+ } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
+}
+
+static void mpic_shutdown_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_disable_irq(irq);
+}
+
+static void mpic_end_irq(unsigned int irq)
+{
+ struct mpic *mpic = mpic_from_irq(irq);
+
+#ifdef DEBUG_IRQ
+ DBG("%s: end_irq: %d\n", mpic->name, irq);
+#endif
+ /* We always EOI on end_irq() even for edge interrupts since that
+ * should only lower the priority, the MPIC should have properly
+ * latched another edge interrupt coming in anyway
+ */
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic->flags & MPIC_BROKEN_U3) {
+ unsigned int src = irq - mpic->irq_offset;
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
+ }
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_eoi(mpic);
+}
+
+#ifdef CONFIG_SMP
+
+static void mpic_enable_ipi(unsigned int irq)
+{
+ struct mpic *mpic = mpic_from_ipi(irq);
+ unsigned int src = irq - mpic->ipi_offset;
+
+ DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
+ mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
+}
+
+static void mpic_disable_ipi(unsigned int irq)
+{
+ /* NEVER disable an IPI... that's just plain wrong! */
+}
+
+static void mpic_end_ipi(unsigned int irq)
+{
+ struct mpic *mpic = mpic_from_ipi(irq);
+
+ /*
+ * IPIs are marked IRQ_PER_CPU. This has the side effect of
+ * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
+ * applying to them. We EOI them late to avoid re-entering.
+ * We mark IPI's with SA_INTERRUPT as they must run with
+ * irqs disabled.
+ */
+ mpic_eoi(mpic);
+}
+
+#endif /* CONFIG_SMP */
+
+static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ struct mpic *mpic = mpic_from_irq(irq);
+
+ cpumask_t tmp;
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+
+ mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
+ mpic_physmask(cpus_addr(tmp)[0]));
+}
+
+
+/*
+ * Exported functions
+ */
+
+
+struct mpic * __init mpic_alloc(unsigned long phys_addr,
+ unsigned int flags,
+ unsigned int isu_size,
+ unsigned int irq_offset,
+ unsigned int irq_count,
+ unsigned int ipi_offset,
+ unsigned char *senses,
+ unsigned int senses_count,
+ const char *name)
+{
+ struct mpic *mpic;
+ u32 reg;
+ const char *vers;
+ int i;
+
+ mpic = alloc_bootmem(sizeof(struct mpic));
+ if (mpic == NULL)
+ return NULL;
+
+
+ memset(mpic, 0, sizeof(struct mpic));
+ mpic->name = name;
+
+ mpic->hc_irq.typename = name;
+ mpic->hc_irq.startup = mpic_startup_irq;
+ mpic->hc_irq.shutdown = mpic_shutdown_irq;
+ mpic->hc_irq.enable = mpic_enable_irq;
+ mpic->hc_irq.disable = mpic_disable_irq;
+ mpic->hc_irq.end = mpic_end_irq;
+ if (flags & MPIC_PRIMARY)
+ mpic->hc_irq.set_affinity = mpic_set_affinity;
+#ifdef CONFIG_SMP
+ mpic->hc_ipi.typename = name;
+ mpic->hc_ipi.enable = mpic_enable_ipi;
+ mpic->hc_ipi.disable = mpic_disable_ipi;
+ mpic->hc_ipi.end = mpic_end_ipi;
+#endif /* CONFIG_SMP */
+
+ mpic->flags = flags;
+ mpic->isu_size = isu_size;
+ mpic->irq_offset = irq_offset;
+ mpic->irq_count = irq_count;
+ mpic->ipi_offset = ipi_offset;
+ mpic->num_sources = 0; /* so far */
+ mpic->senses = senses;
+ mpic->senses_count = senses_count;
+
+ /* Map the global registers */
+ mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
+ mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
+ BUG_ON(mpic->gregs == NULL);
+
+ /* Reset */
+ if (flags & MPIC_WANTS_RESET) {
+ mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
+ mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+ | MPIC_GREG_GCONF_RESET);
+ while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+ & MPIC_GREG_GCONF_RESET)
+ mb();
+ }
+
+ /* Read feature register, calculate num CPUs and, for non-ISU
+ * MPICs, num sources as well. On ISU MPICs, sources are counted
+ * as ISUs are added
+ */
+ reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
+ mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
+ >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
+ if (isu_size == 0)
+ mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
+ >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
+
+ /* Map the per-CPU registers */
+ for (i = 0; i < mpic->num_cpus; i++) {
+ mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
+ i * MPIC_CPU_STRIDE, 0x1000);
+ BUG_ON(mpic->cpuregs[i] == NULL);
+ }
+
+ /* Initialize main ISU if none provided */
+ if (mpic->isu_size == 0) {
+ mpic->isu_size = mpic->num_sources;
+ mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
+ MPIC_IRQ_STRIDE * mpic->isu_size);
+ BUG_ON(mpic->isus[0] == NULL);
+ }
+ mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
+ mpic->isu_mask = (1 << mpic->isu_shift) - 1;
+
+ /* Display version */
+ switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
+ case 1:
+ vers = "1.0";
+ break;
+ case 2:
+ vers = "1.2";
+ break;
+ case 3:
+ vers = "1.3";
+ break;
+ default:
+ vers = "<unknown>";
+ break;
+ }
+ printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
+ name, vers, phys_addr, mpic->num_cpus);
+ printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
+ mpic->isu_shift, mpic->isu_mask);
+
+ mpic->next = mpics;
+ mpics = mpic;
+
+ if (flags & MPIC_PRIMARY)
+ mpic_primary = mpic;
+
+ return mpic;
+}
+
+void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+ unsigned long phys_addr)
+{
+ unsigned int isu_first = isu_num * mpic->isu_size;
+
+ BUG_ON(isu_num >= MPIC_MAX_ISU);
+
+ mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
+ if ((isu_first + mpic->isu_size) > mpic->num_sources)
+ mpic->num_sources = isu_first + mpic->isu_size;
+}
+
+void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
+ void *data)
+{
+ struct mpic *mpic = mpic_find(irq, NULL);
+ unsigned long flags;
+
+ /* Synchronization here is a bit dodgy, so don't try to replace cascade
+ * interrupts on the fly too often ... but normally it's set up at boot.
+ */
+ spin_lock_irqsave(&mpic_lock, flags);
+ if (mpic->cascade)
+ mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
+ mpic->cascade = NULL;
+ wmb();
+ mpic->cascade_vec = irq - mpic->irq_offset;
+ mpic->cascade_data = data;
+ wmb();
+ mpic->cascade = handler;
+ mpic_enable_irq(irq);
+ spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+void __init mpic_init(struct mpic *mpic)
+{
+ int i;
+
+ BUG_ON(mpic->num_sources == 0);
+
+ printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
+
+ /* Set current processor priority to max */
+ mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
+
+ /* Initialize timers: just disable them all */
+ for (i = 0; i < 4; i++) {
+ mpic_write(mpic->tmregs,
+ i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
+ mpic_write(mpic->tmregs,
+ i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
+ MPIC_VECPRI_MASK |
+ (MPIC_VEC_TIMER_0 + i));
+ }
+
+ /* Initialize IPIs to our reserved vectors and mark them disabled for now */
+ mpic_test_broken_ipi(mpic);
+ for (i = 0; i < 4; i++) {
+ mpic_ipi_write(i,
+ MPIC_VECPRI_MASK |
+ (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
+ (MPIC_VEC_IPI_0 + i));
+#ifdef CONFIG_SMP
+ if (!(mpic->flags & MPIC_PRIMARY))
+ continue;
+ irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
+ irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
+#endif /* CONFIG_SMP */
+ }
+
+ /* Initialize interrupt sources */
+ if (mpic->irq_count == 0)
+ mpic->irq_count = mpic->num_sources;
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ /* Do the HT PIC fixups on U3 broken mpic */
+ DBG("MPIC flags: %x\n", mpic->flags);
+ if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
+ mpic_scan_ht_pics(mpic);
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ for (i = 0; i < mpic->num_sources; i++) {
+ /* start with vector = source number, and masked */
+ u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
+ int level = 0;
+
+ /* if it's an IPI, we skip it */
+ if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
+ (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
+ continue;
+
+ /* do senses munging */
+ if (mpic->senses && i < mpic->senses_count) {
+ if (mpic->senses[i] & IRQ_SENSE_LEVEL)
+ vecpri |= MPIC_VECPRI_SENSE_LEVEL;
+ if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
+ vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
+ } else
+ vecpri |= MPIC_VECPRI_SENSE_LEVEL;
+
+ /* remember if it was a level interrupts */
+ level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
+
+ /* deal with broken U3 */
+ if (mpic->flags & MPIC_BROKEN_U3) {
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic_is_ht_interrupt(mpic, i)) {
+ vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
+ MPIC_VECPRI_POLARITY_MASK);
+ vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
+ }
+#else
+ printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
+#endif
+ }
+
+ DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
+ (level != 0));
+
+ /* init hw */
+ mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
+ mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+ 1 << hard_smp_processor_id());
+
+ /* init linux descriptors */
+ if (i < mpic->irq_count) {
+ irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
+ irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
+ }
+ }
+
+ /* Init spurrious vector */
+ mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
+
+ /* Disable 8259 passthrough */
+ mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
+ mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
+ | MPIC_GREG_GCONF_8259_PTHROU_DIS);
+
+ /* Set current processor priority to 0 */
+ mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
+}
+
+
+
+void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
+{
+ unsigned is_ipi;
+ struct mpic *mpic = mpic_find(irq, &is_ipi);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpic_lock, flags);
+ if (is_ipi) {
+ reg = mpic_ipi_read(irq - mpic->ipi_offset) &
+ ~MPIC_VECPRI_PRIORITY_MASK;
+ mpic_ipi_write(irq - mpic->ipi_offset,
+ reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
+ } else {
+ reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
+ & ~MPIC_VECPRI_PRIORITY_MASK;
+ mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
+ reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
+ }
+ spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+unsigned int mpic_irq_get_priority(unsigned int irq)
+{
+ unsigned is_ipi;
+ struct mpic *mpic = mpic_find(irq, &is_ipi);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpic_lock, flags);
+ if (is_ipi)
+ reg = mpic_ipi_read(irq - mpic->ipi_offset);
+ else
+ reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
+ spin_unlock_irqrestore(&mpic_lock, flags);
+ return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
+}
+
+void mpic_setup_this_cpu(void)
+{
+#ifdef CONFIG_SMP
+ struct mpic *mpic = mpic_primary;
+ unsigned long flags;
+ u32 msk = 1 << hard_smp_processor_id();
+ unsigned int i;
+
+ BUG_ON(mpic == NULL);
+
+ DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
+
+ spin_lock_irqsave(&mpic_lock, flags);
+
+ /* let the mpic know we want intrs. default affinity is 0xffffffff
+ * until changed via /proc. That's how it's done on x86. If we want
+ * it differently, then we should make sure we also change the default
+ * values of irq_affinity in irq.c.
+ */
+ if (distribute_irqs) {
+ for (i = 0; i < mpic->num_sources ; i++)
+ mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+ mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
+ }
+
+ /* Set current processor priority to 0 */
+ mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
+
+ spin_unlock_irqrestore(&mpic_lock, flags);
+#endif /* CONFIG_SMP */
+}
+
+int mpic_cpu_get_priority(void)
+{
+ struct mpic *mpic = mpic_primary;
+
+ return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
+}
+
+void mpic_cpu_set_priority(int prio)
+{
+ struct mpic *mpic = mpic_primary;
+
+ prio &= MPIC_CPU_TASKPRI_MASK;
+ mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
+}
+
+/*
+ * XXX: someone who knows mpic should check this.
+ * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
+ * or can we reset the mpic in the new kernel?
+ */
+void mpic_teardown_this_cpu(int secondary)
+{
+ struct mpic *mpic = mpic_primary;
+ unsigned long flags;
+ u32 msk = 1 << hard_smp_processor_id();
+ unsigned int i;
+
+ BUG_ON(mpic == NULL);
+
+ DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
+ spin_lock_irqsave(&mpic_lock, flags);
+
+ /* let the mpic know we don't want intrs. */
+ for (i = 0; i < mpic->num_sources ; i++)
+ mpic_irq_write(i, MPIC_IRQ_DESTINATION,
+ mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
+
+ /* Set current processor priority to max */
+ mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
+
+ spin_unlock_irqrestore(&mpic_lock, flags);
+}
+
+
+void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
+{
+ struct mpic *mpic = mpic_primary;
+
+ BUG_ON(mpic == NULL);
+
+#ifdef DEBUG_IPI
+ DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
+
+ mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
+ mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
+}
+
+int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
+{
+ u32 irq;
+
+ irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
+#ifdef DEBUG_LOW
+ DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
+#endif
+ if (mpic->cascade && irq == mpic->cascade_vec) {
+#ifdef DEBUG_LOW
+ DBG("%s: cascading ...\n", mpic->name);
+#endif
+ irq = mpic->cascade(regs, mpic->cascade_data);
+ mpic_eoi(mpic);
+ return irq;
+ }
+ if (unlikely(irq == MPIC_VEC_SPURRIOUS))
+ return -1;
+ if (irq < MPIC_VEC_IPI_0) {
+#ifdef DEBUG_IRQ
+ DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
+#endif
+ return irq + mpic->irq_offset;
+ }
+#ifdef DEBUG_IPI
+ DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
+#endif
+ return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
+}
+
+int mpic_get_irq(struct pt_regs *regs)
+{
+ struct mpic *mpic = mpic_primary;
+
+ BUG_ON(mpic == NULL);
+
+ return mpic_get_one_irq(mpic, regs);
+}
+
+
+#ifdef CONFIG_SMP
+void mpic_request_ipis(void)
+{
+ struct mpic *mpic = mpic_primary;
+
+ BUG_ON(mpic == NULL);
+
+ printk("requesting IPIs ... \n");
+
+ /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
+ request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
+ "IPI0 (call function)", mpic);
+ request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
+ "IPI1 (reschedule)", mpic);
+ request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
+ "IPI2 (unused)", mpic);
+ request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
+ "IPI3 (debugger break)", mpic);
+
+ printk("IPIs requested... \n");
+}
+
+void smp_mpic_message_pass(int target, int msg)
+{
+ /* make sure we're sending something that translates to an IPI */
+ if ((unsigned int)msg > 3) {
+ printk("SMP %d: smp_message_pass: unknown msg %d\n",
+ smp_processor_id(), msg);
+ return;
+ }
+ switch (target) {
+ case MSG_ALL:
+ mpic_send_ipi(msg, 0xffffffff);
+ break;
+ case MSG_ALL_BUT_SELF:
+ mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+ break;
+ default:
+ mpic_send_ipi(msg, 1 << target);
+ break;
+ }
+}
+#endif /* CONFIG_SMP */
diff --git a/xen/arch/powerpc/mpic_init.c b/xen/arch/powerpc/mpic_init.c
new file mode 100644
index 0000000000..59cb8d80d2
--- /dev/null
+++ b/xen/arch/powerpc/mpic_init.c
@@ -0,0 +1,390 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/compile.h>
+#include <asm/mpic.h>
+#include "mpic_init.h"
+#include "oftree.h"
+#include "of-devtree.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#define PANIC(fmt...) DBG(fmt)
+
+static struct mpic *mpic;
+static unsigned long opic_addr;
+static unsigned int opic_flags;
+
+/*
+ * from OF_IEEE_1275
+ *
+ * pg 175, property "ranges"
+ *
+ * The number of integers in each size entry is determined by the
+ * value of the #size-cells property of this node (the node in which
+ * the ranges property appears) or 1 if the #size-cells property is
+ * absent.
+ *
+ *
+ * pg 177, property "reg"
+ *
+ * The number of integers in each size entry is determined by the
+ * value of the "#size-cells" property in the parent node. If the
+ * parent node has no such property, the value is one.
+ */
+static unsigned long reg2(void *oft_p, ofdn_t c)
+{
+ int rc;
+ /* the struct isa_reg_property is for a value of 2 for
+ * #address-cells and a value of 1 for #size-cells (of the
+ * parent).
+ */
+ struct isa_reg_property {
+ u32 space;
+ u32 address;
+ u32 size;
+ } isa_reg;
+
+ rc = ofd_getprop(oft_p, c, "reg", &isa_reg, sizeof(isa_reg));
+
+ DBG("%s: reg property address=0x%08x size=0x%08x\n", __func__,
+ isa_reg.address, isa_reg.size);
+ return isa_reg.address;
+}
+
+static unsigned long reg1(void *oft_p, ofdn_t c)
+{
+ int rc;
+ /* the struct reg_property32 is for a value of 1 for
+ * #address-cells and a value of 1 for #size-cells.
+ */
+ struct reg_property32 {
+ u32 address;
+ u32 size;
+ } reg;
+
+ rc = ofd_getprop(oft_p, c, "reg", &reg, sizeof(reg));
+
+ DBG("%s: reg property address=0x%08x size=0x%08x\n", __func__,
+ reg.address, reg.size);
+ return reg.address;
+}
+
+static unsigned long find_reg_addr_from_node(void *oft_p, ofdn_t c)
+{
+ int p_len;
+ unsigned long reg_addr = 0;
+ u32 size_c = 1;
+ u32 addr_c = 2;
+ ofdn_t parent;
+
+ if (c == OFD_ROOT) {
+ parent = c;
+ } else {
+ parent = ofd_node_parent(oft_p, c);
+ }
+
+ p_len = ofd_getprop(oft_p, parent, "#size-cells", &size_c, sizeof(size_c));
+ DBG("%s size is %d\n", __func__, size_c);
+
+ p_len = ofd_getprop(oft_p, parent, "#address-cells", &addr_c,
+ sizeof(addr_c));
+ DBG("%s address is %d\n", __func__, addr_c);
+
+ if ( 1 != size_c ) {
+ PANIC("Unsupported size for reg property\n");
+ }
+
+ if ( 1 == addr_c) {
+ reg_addr = reg1(oft_p, c);
+ } else if ( 2 == addr_c ) {
+ reg_addr = reg2(oft_p, c);
+ } else {
+ PANIC("Unsupported address size for reg property\n");
+ }
+ DBG("%s: address 0x%lx\n", __func__, reg_addr);
+ return reg_addr;
+}
+
+/*
+ * from OF_IEEE_1275
+ *
+ * pg 175, property "ranges"
+ *
+ * The ranges property value is a sequence of child-phys parent-phys
+ * size specifications. Child-phys is an address, encoded as with
+ * encode-phys, in the child address space. Parent-phys is an address
+ * (likewise encoded as with encode-phys) in the parent address
+ * space. Size is a list of integers, each encoded as with encode-int,
+ * denoting the length of the child's address range.
+ */
+static unsigned long find_ranges_addr_from_node(void *oft_p, ofdn_t c)
+{
+ unsigned long ranges_addr = 0;
+ int ranges_i;
+ ofdn_t parent;
+ u32 addr_c = 2;
+ u32 ranges[64];
+ int p_len;
+
+ parent = ofd_node_parent(oft_p, c);
+ parent = ofd_node_parent(oft_p, parent);
+
+ p_len = ofd_getprop(oft_p, parent, "ranges", &ranges, sizeof(ranges));
+ DBG("%s: ranges\n", __func__);
+ int i; for (i=0; i<p_len; i++) {DBG("%08x ", ranges[i]);}
+ DBG("\n");
+
+ p_len = ofd_getprop(oft_p, parent, "#address-cells",
+ &addr_c, sizeof(addr_c));
+ DBG("%s address is %d\n", __func__, addr_c);
+ ranges_i = addr_c; /* skip over the child address */
+
+ DBG("%s address is %d\n", __func__, addr_c);
+ switch (addr_c) {
+ case 1:
+ ranges_addr = ranges[ranges_i];
+ break;
+ case 2:
+ ranges_addr = (((u64)ranges[ranges_i]) << 32) |
+ ranges[ranges_i + 1];
+ break;
+ case 3: /* the G5 case, how to squeeze 96 bits into 64 */
+ ranges_addr = (((u64)ranges[ranges_i+1]) << 32) |
+ ranges[ranges_i + 2];
+ break;
+ case 4:
+ ranges_addr = (((u64)ranges[ranges_i+2]) << 32) |
+ ranges[ranges_i + 4];
+ break;
+ default:
+ PANIC("#address-cells out of range\n");
+ break;
+ }
+
+ DBG("%s: address 0x%lx\n", __func__, ranges_addr);
+ return ranges_addr;
+}
+
+static unsigned long find_pic_address_from_node(void *oft_p, ofdn_t c)
+{
+ unsigned long reg_addr, range_addr, addr;
+
+ /*
+ * The address is the sum of the address in the reg property of this node
+ * and the ranges property of the granparent node.
+ */
+ reg_addr = find_reg_addr_from_node(oft_p, c);
+ range_addr = find_ranges_addr_from_node(oft_p, c);
+ addr = reg_addr + range_addr;
+ DBG("%s: address 0x%lx\n", __func__, addr);
+ return addr;
+}
+
+static unsigned int find_pic_flags_from_node(void *oft_p, ofdn_t c)
+{
+ int be_len;
+ unsigned int flags = 0;
+
+ /* does it have the property big endian? */
+ be_len = ofd_getprop(oft_p, c, "big_endian", NULL, 0);
+ if (be_len >= 0) {
+ DBG("%s: Big Endian found\n", __func__);
+ flags |= MPIC_BIG_ENDIAN;
+ }
+ DBG("%s: flags 0x%x\n", __func__, flags);
+ return flags;
+}
+
+static int find_mpic_simple_probe(void *oft_p)
+{
+ u32 addr_cells;
+ int rc;
+ u32 addr[2];
+
+ rc = ofd_getprop(oft_p, OFD_ROOT, "#address-cells",
+ &addr_cells, sizeof(addr_cells));
+ if ( rc < 0 ) {
+ /* if the property does not exist use its default value, 2 */
+ addr_cells = 2;
+ }
+
+ rc = ofd_getprop(oft_p, OFD_ROOT, "platform-open-pic", addr, sizeof(addr));
+ if (rc < 0) {
+ return rc;
+ }
+
+ opic_addr = addr[0];
+ if (addr_cells == 2) {
+ opic_addr <<= 32;
+ opic_addr |= addr[1];
+ }
+ DBG("%s: found OpenPIC at: 0x%lx\n", __func__, opic_addr);
+ /* we did not really find the pic device, only its address.
+ * We use big endian and broken u3 by default.
+ */
+ opic_flags |= MPIC_BIG_ENDIAN | MPIC_BROKEN_U3;
+ return 0;
+}
+
+static int find_mpic_canonical_probe(void *oft_p)
+{
+ ofdn_t c;
+ const char mpic_type[] = "open-pic";
+ /* some paths are special and we cannot find the address
+ * by the usual method */
+ const char *excluded_paths[] = { "/interrupt-controller" };
+
+ /*
+ * Search through the OFD tree for all devices of type 'open_pic'.
+ * We select the one without an 'interrupt' property.
+ */
+ c = ofd_node_find_by_prop(oft_p, OFD_ROOT, "device_type", mpic_type,
+ sizeof(mpic_type));
+ while (c > 0) {
+ int int_len;
+ int good_mpic;
+ const char * path = ofd_node_path(oft_p, c);
+
+ good_mpic = 0;
+ int_len = ofd_getprop(oft_p, c, "interrupts", NULL, 0);
+ if (int_len < 0) {
+ int i;
+
+ /* there is no property interrupt. This could be the pic */
+ DBG("%s: potential OpenPIC in: %s\n", __func__, path);
+ good_mpic = 1;
+
+ for (i = 0; i < ARRAY_SIZE(excluded_paths) && good_mpic; i++) {
+ const char *excluded_path = excluded_paths[i];
+ if (!strncmp(path, excluded_path, strlen(excluded_path)))
+ good_mpic = 0;
+ }
+ }
+
+ if (good_mpic) {
+ DBG("%s: found OpenPIC in: %s\n", __func__, path);
+ opic_addr = find_pic_address_from_node(oft_p, c);
+ opic_flags = find_pic_flags_from_node(oft_p, c);
+ return 0;
+ }
+
+ c = ofd_node_find_next(oft_p, c);
+ }
+
+ DBG("%s: Could not find a pic\n", __func__);
+ return -1;
+}
+
+static int find_mpic(void)
+{
+ void *oft_p;
+ int rc;
+
+ opic_addr = (unsigned long)-1;
+ opic_flags = 0;
+
+ oft_p = (void *)oftree;
+ rc = find_mpic_simple_probe(oft_p);
+
+ if (rc < 0) {
+ DBG("%s: Searching for pic ...\n", __func__);
+ rc = find_mpic_canonical_probe(oft_p);
+ }
+
+ return rc;
+}
+
+static struct hw_interrupt_type hc_irq;
+
+struct hw_interrupt_type *xen_mpic_init(struct hw_interrupt_type *xen_irq)
+{
+ unsigned int isu_size;
+ unsigned int irq_offset;
+ unsigned int irq_count;
+ unsigned int ipi_offset;
+ unsigned char *senses;
+ unsigned int senses_count;
+
+ printk("%s: start\n", __func__);
+
+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
+ irq_vector[0] = FIRST_DEVICE_VECTOR;
+ vector_irq[FIRST_DEVICE_VECTOR] = 0;
+
+ isu_size = 0;
+ irq_offset = 0;
+ irq_count = 128;
+ ipi_offset = 128;
+ senses = NULL;
+ senses_count = 0;
+
+ if (find_mpic()) {
+ printk("%s: ERROR: Could not find open pic.\n", __func__);
+ return NULL;
+ }
+
+ mpic = mpic_alloc(opic_addr,
+ opic_flags | MPIC_PRIMARY | MPIC_WANTS_RESET,
+ isu_size, irq_offset, irq_count,
+ ipi_offset, senses, senses_count, "Xen-U3-MPIC");
+
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+
+ hc_irq.startup = mpic->hc_irq.startup;
+ mpic->hc_irq.startup = xen_irq->startup;
+
+ hc_irq.enable = mpic->hc_irq.enable;
+ mpic->hc_irq.enable = xen_irq->enable;
+
+ hc_irq.disable = mpic->hc_irq.disable;
+ mpic->hc_irq.disable = xen_irq->disable;
+
+ hc_irq.shutdown = mpic->hc_irq.shutdown;
+ mpic->hc_irq.shutdown = xen_irq->shutdown;
+
+ hc_irq.ack = mpic->hc_irq.ack;
+ mpic->hc_irq.ack = xen_irq->ack;
+
+ hc_irq.end = mpic->hc_irq.end;
+ mpic->hc_irq.end = xen_irq->end;
+
+ hc_irq.set_affinity = mpic->hc_irq.set_affinity;
+ mpic->hc_irq.set_affinity = xen_irq->set_affinity;
+
+ printk("%s: success\n", __func__);
+ return &hc_irq;
+}
+
+int xen_mpic_get_irq(struct cpu_user_regs *regs)
+{
+ BUG_ON(mpic == NULL);
+
+ return mpic_get_one_irq(mpic, regs);
+}
diff --git a/xen/arch/powerpc/mpic_init.h b/xen/arch/powerpc/mpic_init.h
new file mode 100644
index 0000000000..8a16b88653
--- /dev/null
+++ b/xen/arch/powerpc/mpic_init.h
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _MPIC_INIT_H
+#define _MPIC_INIT_H
+
+extern struct hw_interrupt_type *xen_mpic_init(
+ struct hw_interrupt_type *xen_irq);
+
+extern int xen_mpic_get_irq(struct cpu_user_regs *regs);
+
+#endif /* #ifndef _MPIC_INIT_H */
diff --git a/xen/arch/powerpc/of-devtree.c b/xen/arch/powerpc/of-devtree.c
new file mode 100644
index 0000000000..03ac256738
--- /dev/null
+++ b/xen/arch/powerpc/of-devtree.c
@@ -0,0 +1,1088 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+/* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ * This code is intended to be used but relocatable routines So PLEASE
+ * do not place any global data here including const integrals or
+ * literals.
+ * The local assert() is ok for string literal usage.. but thats it.
+ */
+
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include "of-devtree.h"
+
+static int (*ofd_write)(const char *, size_t len) = NULL;
+
+void ofd_init(int (*write)(const char *, size_t len))
+{
+ ofd_write = write;
+}
+
+
+static void ofd_stop(void)
+{
+ for ( ; ; ) ;
+}
+
+/* this is so it can be called from anywhere */
+static void ofd_assprint(int line)
+{
+ char a[13];
+ char num[20];
+ int i;
+
+ a[0] = '\n';
+ a[1] = '\n';
+ a[2] = 'O';
+ a[3] = 'F';
+ a[4] = 'D';
+ a[5] = ':';
+ a[6] = 'A';
+ a[7] = 'S';
+ a[8] = 'S';
+ a[9] = 'E';
+ a[10] = 'R';
+ a[11] = 'T';
+ a[12] = ':';
+
+
+ ofd_write(a, sizeof (a) - 1);
+
+ /* put the number in backwards */
+ i = 0;
+ while ( line > 0 ) {
+ num[i++] = '0' + (line % 10);
+ line /= 10;
+ }
+ /* print it */
+ /* number */
+ while (i-- > 0) {
+ ofd_write(&num[i], 1);
+ }
+ ofd_write("\n", 1);
+
+ ofd_stop();
+}
+
+#ifdef assert
+#undef assert
+#endif
+
+#define assert(EX) \
+ do { \
+ if ( !(EX) ) { \
+ ofd_assprint(__LINE__); \
+ } \
+ } while (0)
+
+/*
+ * We treat memory like an array of u64. For the sake of
+ * compactness we assume that a short is big enough as an index.
+ */
+struct ofd_node {
+ ofdn_t on_ima;
+ ofdn_t on_parent;
+ ofdn_t on_child;
+ ofdn_t on_peer;
+ ofdn_t on_io;
+ ofdn_t on_next; /* for search lists */
+ ofdn_t on_prev;
+ ofdn_t on_prop;
+ u32 on_pathlen;
+ u32 on_last;
+ char on_path[0];
+};
+
+struct ofd_prop {
+ ofdn_t op_ima;
+ ofdn_t op_next;
+ u32 op_objsz;
+ u32 op_namesz;
+ /* must have 64bit alignment */
+ char op_data[0] __attribute__ ((aligned(8)));
+};
+
+struct ofd_io {
+ ofdn_t oi_ima;
+ ofdn_t oi_node;
+ u64 oi_open __attribute__ ((aligned(8)));
+};
+
+struct ofd_free {
+ ofdn_t of_cells;
+ ofdn_t of_next;
+};
+
+struct ofd_mem {
+ ofdn_t om_num;
+ ofdn_t om_next;
+ ofdn_t om_free; /* Future site of a free list */
+ ofdn_t _om_pad;
+ u64 om_mem[0] __attribute__((aligned(8)));
+};
+
+#define NODE_PAT 0x0f01
+#define PROP_PAT 0x0f03
+#define IO_PAT 0x0f05
+
+
+size_t ofd_size(void *mem)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ size_t sz;
+
+ sz = m->om_next * sizeof (u64) + sizeof(*m);
+ return sz;
+}
+
+size_t ofd_space(void *mem)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ size_t sz;
+
+ sz = m->om_num * sizeof (u64);
+ return sz;
+}
+
+
+static int ofd_pathsplit_right(const char *s, int c, size_t max)
+{
+ int i = 0;
+
+ if ( max == 0 ) {
+ --max;
+ }
+
+ while ( *s != '\0' && *s != c && max != 0 ) {
+ ++i;
+ ++s;
+ --max;
+ }
+ return i;
+}
+
+static int ofd_pathsplit_left(const char *p, int c, size_t len)
+{
+ const char *s;
+
+ if ( len > 0 ) {
+ /* move s to the end */
+ s = p + len - 1;
+
+ /* len could include a null */
+ if ( *s == '\0' ) {
+ --s;
+ }
+ while ( s >= p ) {
+ if ( *s == c ) {
+ ++s;
+ break;
+ }
+ --s;
+ }
+ if ( s < p ) {
+ return 0;
+ }
+ return (s - p);
+ }
+ return 0;
+}
+
+void *ofd_create(void *mem, size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *n;
+ size_t sum;
+ ofdn_t cells;
+
+ if ( sz < (sizeof (*n) * 4) ) {
+ return NULL;
+ }
+
+ memset(mem, 0, sz);
+
+ m->om_num = (sz - sizeof(*m)) / sizeof (u64);
+
+ /* skip the first cell */
+ m->om_next = OFD_ROOT;
+ n = (struct ofd_node *)&m->om_mem[m->om_next];
+ n->on_ima = NODE_PAT;
+ n->on_pathlen = 2;
+ n->on_last = 1;
+ n->on_path[0] = '/';
+ n->on_path[1] = '\0';
+
+ sum = sizeof (*n) + 2; /* Don't forget the path */
+ cells = (sum + sizeof (m->om_mem[0]) - 1) / sizeof (m->om_mem[0]);
+ m->om_next += cells;
+
+ return m;
+}
+
+static struct ofd_node *ofd_node_get(struct ofd_mem *m, ofdn_t n)
+{
+ if ( n < m->om_next ) {
+ struct ofd_node *r;
+
+ r = (struct ofd_node *)&m->om_mem[n];
+ if ( r->on_ima == NODE_PAT ) {
+ return r;
+ }
+ }
+ return NULL;
+}
+
+ofdn_t ofd_node_parent(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, n);
+
+ if ( r == NULL) return 0;
+ return r->on_parent;
+}
+
+ofdn_t ofd_node_peer(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r;
+
+ if ( n == 0 ) {
+ return OFD_ROOT;
+ }
+
+ r = ofd_node_get(m, n);
+ if ( r == NULL) return 0;
+ return r->on_peer;
+}
+
+const char *ofd_node_path(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, n);
+
+ if ( r == NULL) return NULL;
+ return r->on_path;
+}
+
+static ofdn_t ofd_node_prop(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, n);
+
+ if ( r == NULL) return 0;
+ return r->on_prop;
+}
+
+ofdn_t ofd_node_child(void *mem, ofdn_t p)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, p);
+
+ if ( r == NULL) return 0;
+ return r->on_child;
+}
+
+int ofd_node_to_path(void *mem, ofdn_t p, void *buf, size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, p);
+
+ if ( sz > r->on_pathlen ) {
+ sz = r->on_pathlen;
+ }
+
+ memcpy(buf, r->on_path, sz);
+
+ if ( r == NULL) return -1;
+ return r->on_pathlen;
+}
+
+static int ofd_check(void *p, size_t l)
+{
+ int i;
+ u64 *v = (u64 *)p;
+
+ for ( i = 0; i < l; i++ ) {
+ if ( v[i] != 0ULL ) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+
+static ofdn_t ofd_node_create(
+ struct ofd_mem *m, const char *path, size_t pathlen)
+{
+ struct ofd_node *n;
+ ofdn_t pos;
+ size_t sum = pathlen + 1 + sizeof (*n); /* add trailing zero to path */
+ ofdn_t cells = (sum + sizeof(m->om_mem[0]) - 1) / sizeof(m->om_mem[0]);
+
+ if ( m->om_next + cells >= m->om_num ) {
+ return 0;
+ }
+
+ pos = m->om_next;
+
+ assert(ofd_check(&m->om_mem[pos], cells)); /* non-zero */
+ m->om_next += cells;
+
+ n = (struct ofd_node *)&m->om_mem[pos];
+ assert(n->on_ima == 0); /* new node not empty */
+
+ n->on_ima = NODE_PAT;
+ n->on_peer = 0;
+ n->on_child = 0;
+ n->on_io = 0;
+ n->on_pathlen = pathlen;
+ n->on_last = ofd_pathsplit_left(path, '/', pathlen);
+ strncpy(n->on_path, path, pathlen);
+ n->on_path[n->on_pathlen] = 0;
+
+ return pos;
+}
+
+/* prunes a node and all its children simply by wasting memory and
+ * unlinking it from the tree */
+int ofd_node_prune(void *mem, ofdn_t node)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *n;
+ struct ofd_node *p;
+
+ n = ofd_node_get(m, node);
+ if (n == NULL) return -1;
+
+ p = ofd_node_get(m, n->on_parent);
+ assert(p != NULL);
+
+ if ( p->on_child == node ) {
+ /* easy case */
+ p->on_child = n->on_peer;
+ } else {
+ struct ofd_node *s;
+
+ s = ofd_node_get(m, p->on_child);
+ assert(s != NULL);
+ while ( s->on_peer != node ) {
+ s = ofd_node_get(m, s->on_peer);
+ assert(s != NULL);
+ }
+ s->on_peer = n->on_peer;
+ }
+ return 1;
+}
+
+ofdn_t ofd_prune_path(void *m, const char *path)
+{
+ ofdn_t n;
+ int rc = -1;
+ while ((n = ofd_node_find(m, path)) > 0) {
+ rc = ofd_node_prune(m, n);
+ }
+
+ return rc;
+}
+
+ofdn_t ofd_node_child_create(
+ void *mem, ofdn_t parent, const char *path, size_t pathlen)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *p;
+ struct ofd_node *n;
+ ofdn_t pos;
+
+ p = ofd_node_get(m, parent);
+ if (p == NULL) return 0;
+
+ pos = ofd_node_create(m, path, pathlen);
+ n = ofd_node_get(m, pos);
+ assert(n != NULL);
+
+ assert(p->on_child == 0); /* child exists */
+ if ( p->on_child == 0 ) {
+ p->on_child = pos;
+ n->on_parent = parent;
+ } else {
+ pos = 0;
+ }
+
+ return pos;
+}
+
+ofdn_t ofd_node_peer_create(
+ void *mem, ofdn_t sibling, const char *path, size_t pathlen)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *s;
+ struct ofd_node *n;
+ ofdn_t pos;
+
+ s = ofd_node_get(m, sibling);
+ if (s == NULL) return 0;
+
+ pos = ofd_node_create(m, path, pathlen);
+ n = ofd_node_get(m, pos);
+ assert(n != NULL);
+
+ if ( s->on_peer == 0 ) {
+ s->on_peer = pos;
+ n->on_parent = s->on_parent;
+ } else {
+ assert(0); /* peer exists */
+ pos = 0;
+ }
+ return pos;
+}
+
+static ofdn_t ofd_node_peer_last(void *mem, ofdn_t c)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *n;
+
+ n = ofd_node_get(m, c);
+ if (n == NULL) return 0;
+
+ while ( n->on_peer > 0 ) {
+ c = n->on_peer;
+ n = ofd_node_get(m, c);
+ assert(n != NULL);
+ }
+
+ return c;
+}
+
+static ofdn_t ofd_node_walk(struct ofd_mem *m, ofdn_t p, const char *s)
+{
+ struct ofd_node *np;
+ ofdn_t n;
+ ofdn_t r;
+
+ if ( *s == '/' ) {
+ ++s;
+ if ( *s == '\0' ) {
+ assert(0); /* ends in / */
+ return 0;
+ }
+ }
+
+ np = ofd_node_get(m, p);
+ if (np == NULL) return 0;
+
+ r = p;
+ do {
+ int last = np->on_last;
+ size_t lsz = np->on_pathlen - last;
+ size_t sz;
+
+ sz = ofd_pathsplit_right(s, '/', 0);
+
+ if ( lsz > 0 && strncmp(s, &np->on_path[last], sz) == 0 ) {
+ if ( s[sz] == '\0' ) {
+ return r;
+ }
+ /* there is more to the path */
+ n = ofd_node_child(m, p);
+ if ( n != 0 ) {
+ r = ofd_node_walk(m, n, &s[sz]);
+ return r;
+ }
+ /* there are no children */
+ return 0;
+ }
+ } while ( 0 );
+
+ /*
+ * we know that usually we are only serching for top level peers
+ * so we do peers first peer
+ */
+ n = ofd_node_peer(m, p);
+ if ( n > 0 ) {
+ r = ofd_node_walk(m, n, s);
+ } else {
+ r = 0;
+ }
+
+ return r;
+}
+
+
+ofdn_t ofd_node_find(void *mem, const char *devspec)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t n = OFD_ROOT;
+ const char *s = devspec;
+ size_t sz;
+
+ if ( s == NULL || s[0] == '\0' ) {
+ return OFD_ROOT;
+ }
+
+ if ( s[0] != '/' ) {
+ size_t asz;
+
+ /* get the component length */
+ sz = ofd_pathsplit_right(s, '/', 0);
+
+ /* check for an alias */
+ asz = ofd_pathsplit_right(s, ':', sz);
+
+ if ( s[asz] == ':' ) {
+ /*
+ * s points to an alias and &s[sz] points to the alias
+ * args.
+ */
+ assert(0); /* aliases no supported */
+ return 0;
+ }
+ } else if ( s[1] == '\0' ) {
+ return n;
+ }
+
+ n = ofd_node_child(m, n);
+ if ( n == 0 ) {
+ return 0;
+ }
+
+ return ofd_node_walk(m, n, s);
+}
+
+
+static struct ofd_prop *ofd_prop_get(struct ofd_mem *m, ofdn_t p)
+{
+ if ( p < m->om_next ) {
+ struct ofd_prop *r;
+
+ r = (struct ofd_prop *)&m->om_mem[p];
+ if ( r->op_ima == PROP_PAT ) {
+ return r;
+ }
+ assert(r->op_ima == PROP_PAT); /* bad object */
+ }
+ return NULL;
+}
+
+static ofdn_t ofd_prop_create(
+ struct ofd_mem *m,
+ ofdn_t node,
+ const char *name,
+ const void *src,
+ size_t sz)
+{
+ struct ofd_node *n;
+ struct ofd_prop *p;
+ size_t len = strlen(name) + 1;
+ size_t sum = sizeof (*p) + sz + len;
+ ofdn_t cells;
+ char *dst;
+ ofdn_t pos;
+
+ cells = (sum + sizeof (m->om_mem[0]) - 1) / sizeof (m->om_mem[0]);
+
+ if ( m->om_next + cells >= m->om_num ) {
+ return 0;
+ }
+
+ /* actual data structure */
+ pos = m->om_next;
+ assert(ofd_check(&m->om_mem[pos], cells)); /* non-zero */
+
+ p = (struct ofd_prop *)&m->om_mem[pos];
+ m->om_next += cells;
+
+ assert(p->op_ima == 0); /* new node not empty */
+ p->op_ima = PROP_PAT;
+ p->op_next = 0;
+ p->op_objsz = sz;
+ p->op_namesz = len;
+
+ /* the rest of the data */
+ dst = p->op_data;
+
+ /* zero what will be the pad, cheap and cannot hurt */
+ m->om_mem[m->om_next - 1] = 0;
+
+ if ( sz > 0 ) {
+ /* some props have no data, just a name */
+ memcpy(dst, src, sz);
+ dst += sz;
+ }
+
+ memcpy(dst, name, len);
+
+ /* now place it in the tree */
+ n = ofd_node_get(m, node);
+ assert(n != NULL);
+
+ if ( n->on_prop == 0 ) {
+ n->on_prop = pos;
+ } else {
+ ofdn_t pn = n->on_prop;
+ struct ofd_prop *nxt;
+
+ for (;;) {
+ nxt = ofd_prop_get(m, pn);
+ if (nxt->op_next == 0) {
+ nxt->op_next = pos;
+ break;
+ }
+ pn = nxt->op_next;
+ }
+ }
+
+ return pos;
+}
+
+void ofd_prop_remove(void *mem, ofdn_t node, ofdn_t prop)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *n = ofd_node_get(m, node);
+ struct ofd_prop *p = ofd_prop_get(m, prop);
+
+ if (n == NULL) return;
+ if (p == NULL) return;
+
+ if ( n->on_prop == prop ) {
+ n->on_prop = p->op_next;
+ } else {
+ ofdn_t pn = n->on_prop;
+ struct ofd_prop *nxt;
+
+ for ( ; ; ) {
+ nxt = ofd_prop_get(m, pn);
+ if ( nxt->op_next == prop ) {
+ nxt->op_next = p->op_next;
+ break;
+ }
+ pn = nxt->op_next;
+ }
+ }
+ return;
+}
+
+ofdn_t ofd_prop_find(void *mem, ofdn_t n, const char *name)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t p = ofd_node_prop(m, n);
+ struct ofd_prop *r;
+ char *s;
+ size_t len;
+
+ if ( name == NULL || *name == '\0' ) {
+ return OFD_ROOT;
+ }
+
+ len = strlen(name) + 1;
+
+ while ( p != 0 ) {
+ r = ofd_prop_get(m, p);
+ s = &r->op_data[r->op_objsz];
+ if ( len == r->op_namesz ) {
+ if ( strncmp(name, s, r->op_namesz) == 0 ) {
+ break;
+ }
+ }
+ p = r->op_next;
+ }
+ return p;
+}
+
+static ofdn_t ofd_prop_next(struct ofd_mem *m, ofdn_t n, const char *prev)
+{
+ ofdn_t p;
+
+ if ( prev == NULL || *prev == '\0' ) {
+ /* give the first */
+ p = ofd_node_prop(m, n);
+ } else {
+ struct ofd_prop *r;
+
+ /* look for the name */
+ p = ofd_prop_find(m, n, prev);
+ if ( p != 0 ) {
+ /* get the data for prev */
+ r = ofd_prop_get(m, p);
+
+ /* now get next */
+ p = r->op_next;
+ } else {
+ p = -1;
+ }
+ }
+
+ return p;
+}
+
+ofdn_t ofd_nextprop(void *mem, ofdn_t n, const char *prev, char *name)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t p = ofd_prop_next(m, n, prev);
+ struct ofd_prop *r;
+ char *s;
+
+ if ( p > 0 ) {
+ r = ofd_prop_get(m, p);
+ s = &r->op_data[r->op_objsz];
+ memcpy(name, s, r->op_namesz);
+ }
+
+ return p;
+}
+
+/*
+ * It is valid to call with NULL pointers, in case you want only one
+ * cell size.
+ */
+int ofd_getcells(void* mem, ofdn_t n, u32* addr_cells, u32* size_cells)
+{
+ if ( addr_cells != NULL ) *addr_cells = 0;
+ if ( size_cells != NULL ) *size_cells = 0;
+
+retry:
+ if ( addr_cells != NULL && *addr_cells == 0 ) {
+ ofd_getprop(mem, n, "#address-cells",
+ addr_cells, sizeof(u32));
+ }
+
+ if ( size_cells != NULL && *size_cells == 0 ) {
+ ofd_getprop(mem, n, "#size-cells", size_cells, sizeof(u32));
+ }
+
+ if ( ( size_cells != NULL && *size_cells == 0 )
+ || ( addr_cells != NULL && *addr_cells == 0 ) ) {
+ if ( n != OFD_ROOT ) {
+ n = ofd_node_parent(mem, n);
+ goto retry;
+ }
+ return -1;
+ }
+
+ return 1;
+}
+
+int ofd_getprop(void *mem, ofdn_t n, const char *name, void *buf, size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t p = ofd_prop_find(m, n, name);
+ struct ofd_prop *r;
+
+ if ( p == 0 ) {
+ return -1;
+ }
+
+ r = ofd_prop_get(m, p);
+
+ if ( sz > r->op_objsz ) {
+ sz = r->op_objsz;
+ }
+ memcpy(buf, r->op_data, sz);
+
+ return r->op_objsz;
+}
+
+int ofd_getproplen(void *mem, ofdn_t n, const char *name)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t p = ofd_prop_find(m, n, name);
+ struct ofd_prop *r;
+
+ if ( p == 0 ) {
+ return -1;
+ }
+
+ r = ofd_prop_get(m, p);
+
+ return r->op_objsz;
+}
+
+static ofdn_t ofd_prop_set(
+ void *mem, ofdn_t n, const char *name, const void *src, size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t p = ofd_prop_find(m, n, name);
+ struct ofd_prop *r;
+ char *dst;
+
+ r = ofd_prop_get(m, p);
+
+ if ( sz <= r->op_objsz ) {
+ /* we can reuse */
+ memcpy(r->op_data, src, sz);
+ if ( sz < r->op_objsz ) {
+ /* need to move name */
+ dst = r->op_data + sz;
+ /*
+ * use the name arg size we may have overlap with the
+ * original
+ */
+ memcpy(dst, name, r->op_namesz);
+ r->op_objsz = sz;
+ }
+ } else {
+ /*
+ * Sadly, we remove from the list, wasting the space and then
+ * we can creat a new one
+ */
+ ofd_prop_remove(m, n, p);
+ p = ofd_prop_create(mem, n, name, src, sz);
+ }
+
+ return p;
+}
+
+int ofd_setprop(
+ void *mem, ofdn_t n, const char *name, const void *buf, size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t r;
+
+ r = ofd_prop_find(m, n, name);
+ if ( r == 0 ) {
+ r = ofd_prop_create(mem, n, name, buf, sz);
+ } else {
+ r = ofd_prop_set(mem, n, name, buf, sz);
+ }
+
+ if ( r > 0 ) {
+ struct ofd_prop *pp = ofd_prop_get(m, r);
+ return pp->op_objsz;
+ }
+
+ return OF_FAILURE;
+}
+
+
+static ofdn_t ofd_find_by_prop(
+ struct ofd_mem *m,
+ ofdn_t head,
+ ofdn_t *prev_p,
+ ofdn_t n,
+ const char *name,
+ const void *val,
+ size_t sz)
+{
+ struct ofd_node *np;
+ struct ofd_prop *pp;
+ ofdn_t p;
+
+retry:
+ p = ofd_prop_find(m, n, name);
+
+ if ( p > 0 ) {
+ int match = 0;
+
+ /* a property exists by that name */
+ if ( val == NULL ) {
+ match = 1;
+ } else {
+ /* need to compare values */
+ pp = ofd_prop_get(m, p);
+ if ( sz == pp->op_objsz
+ && memcmp(pp->op_data, val, sz) == 0 ) {
+ match = 1;
+ }
+ }
+ if ( match == 1 ) {
+ if ( *prev_p >= 0 ) {
+ np = ofd_node_get(m, *prev_p);
+ np->on_next = n;
+ } else {
+ head = n;
+ }
+ np = ofd_node_get(m, n);
+ np->on_prev = *prev_p;
+ np->on_next = -1;
+ *prev_p = n;
+ }
+ }
+
+ p = ofd_node_child(m, n);
+ if ( p > 0 ) {
+ head = ofd_find_by_prop(m, head, prev_p, p, name, val, sz);
+ }
+
+ p = ofd_node_peer(m, n);
+ if ( p > 0 ) {
+ n = p;
+ goto retry;
+ }
+
+ return head;
+}
+
+ofdn_t ofd_node_find_by_prop(
+ void *mem,
+ ofdn_t n,
+ const char *name,
+ const void *val,
+ size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+
+ if ( n <= 0 ) {
+ n = OFD_ROOT;
+ }
+
+ ofdn_t prev = -1;
+ return ofd_find_by_prop(m, -1, &prev, n, name, val, sz);
+}
+
+ofdn_t ofd_node_find_next(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *np;
+
+ np = ofd_node_get(m, n);
+
+ if (np == NULL) return 0;
+ return np->on_next;
+}
+
+ofdn_t ofd_node_find_prev(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *np;
+
+ np = ofd_node_get(m, n);
+ if (np == NULL) return 0;
+
+ return np->on_prev;
+}
+
+ofdn_t ofd_io_create(void *mem, ofdn_t node, u64 open)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *n;
+ struct ofd_io *i;
+ ofdn_t pos;
+ ofdn_t cells;
+
+ cells = (sizeof (*i) + sizeof (m->om_mem[0]) - 1) / sizeof(m->om_mem[0]);
+
+ n = ofd_node_get(m, node);
+ if ( n == NULL ) return 0;
+
+ if ( m->om_next + cells >= m->om_num ) {
+ return 0;
+ }
+
+ pos = m->om_next;
+ assert(ofd_check(&m->om_mem[pos], cells)); /* non-zero */
+
+ m->om_next += cells;
+
+ i = (struct ofd_io *)&m->om_mem[pos];
+ assert(i->oi_ima == 0); /* new node not empty */
+
+ i->oi_ima = IO_PAT;
+ i->oi_node = node;
+ i->oi_open = open;
+
+ n->on_io = pos;
+
+ return pos;
+}
+
+static struct ofd_io *ofd_io_get(struct ofd_mem *m, ofdn_t i)
+{
+ if ( i < m->om_next ) {
+ struct ofd_io *r;
+
+ r = (struct ofd_io *)&m->om_mem[i];
+ if ( r->oi_ima == IO_PAT ) {
+ return r;
+ }
+ assert(r->oi_ima == IO_PAT); /* bad object */
+ }
+
+ return NULL;
+}
+
+ofdn_t ofd_node_io(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_node *r = ofd_node_get(m, n);
+
+ if (r == NULL) return 0;
+ return r->on_io;
+}
+
+uint ofd_io_open(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_io *r = ofd_io_get(m, n);
+
+ if (r == NULL) return 0;
+ return r->oi_open;
+}
+
+void ofd_io_close(void *mem, ofdn_t n)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ struct ofd_io *o = ofd_io_get(m, n);
+ struct ofd_node *r = ofd_node_get(m, o->oi_node);
+
+ assert(o != NULL);
+ assert(r != NULL);
+ o->oi_open = 0;
+ r->on_io = 0;
+}
+
+ofdn_t ofd_node_add(void *m, ofdn_t p, const char *path, size_t sz)
+{
+ ofdn_t n;
+
+ n = ofd_node_child(m, p);
+ if ( n > 0 ) {
+ n = ofd_node_peer_last(m, n);
+ if ( n > 0 ) {
+ n = ofd_node_peer_create(m, n, path, sz);
+ }
+ } else {
+ n = ofd_node_child_create(m, p, path, sz);
+ }
+
+ return n;
+}
+
+ofdn_t ofd_prop_add(
+ void *mem,
+ ofdn_t n,
+ const char *name,
+ const void *buf,
+ size_t sz)
+{
+ struct ofd_mem *m = (struct ofd_mem *)mem;
+ ofdn_t r;
+
+ r = ofd_prop_find(m, n, name);
+ if ( r == 0 ) {
+ r = ofd_prop_create(mem, n, name, buf, sz);
+ } else {
+ r = ofd_prop_set(mem, n, name, buf, sz);
+ }
+
+ return r;
+}
diff --git a/xen/arch/powerpc/of-devtree.h b/xen/arch/powerpc/of-devtree.h
new file mode 100644
index 0000000000..3b0439c23c
--- /dev/null
+++ b/xen/arch/powerpc/of-devtree.h
@@ -0,0 +1,139 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _OF_DEVTREE_H
+#define _OF_DEVTREE_H
+
+#include <xen/types.h>
+#include <public/xen.h>
+
+enum {
+ OF_FAILURE = -1,
+ OF_SUCCESS = 0,
+};
+
+union of_pci_hi {
+ u32 word;
+ struct {
+ u32 opa_n: 1; /* relocatable */
+ u32 opa_p: 1; /* prefetchable */
+ u32 opa_t: 1; /* aliased */
+ u32 _opa_res: 3;
+ u32 opa: 2; /* space code */
+ u32 opa_b: 8; /* bus number */
+ u32 opa_d: 5; /* device number */
+ u32 opa_f: 3; /* function number */
+ u32 opa_r: 8; /* register number */
+ } bits;
+};
+
+struct of_pci_addr {
+ union of_pci_hi opa_hi;
+ u32 opa_mid;
+ u32 opa_lo;
+};
+
+struct of_pci_range32 {
+ struct of_pci_addr opr_addr;
+ u32 opr_phys;
+ u32 opr_size;
+};
+
+struct of_pci_range64 {
+ struct of_pci_addr opr_addr;
+ u32 opr_phys_hi;
+ u32 opr_phys_lo;
+ u32 opr_size_hi;
+ u32 opr_size_lo;
+};
+
+struct of_pci_addr_range64 {
+ struct of_pci_addr opr_addr;
+ u32 opr_size_hi;
+ u32 opr_size_lo;
+};
+
+struct reg_property32 {
+ u32 address;
+ u32 size;
+};
+
+typedef s32 ofdn_t;
+
+#define OFD_ROOT 1
+#define OFD_DUMP_NAMES 0x1
+#define OFD_DUMP_VALUES 0x2
+#define OFD_DUMP_ALL (OFD_DUMP_VALUES|OFD_DUMP_NAMES)
+
+extern void *ofd_create(void *mem, size_t sz);
+extern ofdn_t ofd_node_parent(void *mem, ofdn_t n);
+extern ofdn_t ofd_node_peer(void *mem, ofdn_t n);
+extern ofdn_t ofd_node_child(void *mem, ofdn_t p);
+extern const char *ofd_node_path(void *mem, ofdn_t p);
+extern int ofd_node_to_path(void *mem, ofdn_t p, void *buf, size_t sz);
+extern ofdn_t ofd_node_child_create(void *mem, ofdn_t parent,
+ const char *path, size_t pathlen);
+extern ofdn_t ofd_node_peer_create(void *mem, ofdn_t sibling,
+ const char *path, size_t pathlen);
+extern ofdn_t ofd_node_find(void *mem, const char *devspec);
+extern ofdn_t ofd_node_add(void *m, ofdn_t n, const char *path, size_t sz);
+extern int ofd_node_prune(void *m, ofdn_t n);
+extern int ofd_prune_path(void *m, const char *path);
+extern ofdn_t ofd_node_io(void *mem, ofdn_t n);
+
+extern ofdn_t ofd_nextprop(void *mem, ofdn_t n, const char *prev, char *name);
+extern ofdn_t ofd_prop_find(void *mem, ofdn_t n, const char *name);
+extern int ofd_getprop(void *mem, ofdn_t n, const char *name,
+ void *buf, size_t sz);
+extern int ofd_getproplen(void *mem, ofdn_t n, const char *name);
+
+extern int ofd_setprop(void *mem, ofdn_t n, const char *name,
+ const void *buf, size_t sz);
+extern void ofd_prop_remove(void *mem, ofdn_t node, ofdn_t prop);
+extern ofdn_t ofd_prop_add(void *mem, ofdn_t n, const char *name,
+ const void *buf, size_t sz);
+extern ofdn_t ofd_io_create(void *m, ofdn_t node, u64 open);
+extern u32 ofd_io_open(void *mem, ofdn_t n);
+extern void ofd_io_close(void *mem, ofdn_t n);
+
+
+typedef void (*walk_fn)(void *m, ofdn_t p, int arg);
+extern void ofd_dump_props(void *m, ofdn_t p, int dump);
+
+extern void ofd_walk(void *m, ofdn_t p, walk_fn fn, int arg);
+
+
+/* Recursively look up #address_cells and #size_cells properties */
+extern int ofd_getcells(void *mem, ofdn_t n,
+ u32 *addr_cells, u32 *size_cells);
+
+extern size_t ofd_size(void *mem);
+extern size_t ofd_space(void *mem);
+
+extern void ofd_prop_print(const char *head, const char *path,
+ const char *name, const char *prop, size_t sz);
+
+extern ofdn_t ofd_node_find_by_prop(void *mem, ofdn_t n, const char *name,
+ const void *val, size_t sz);
+extern ofdn_t ofd_node_find_next(void *mem, ofdn_t n);
+extern ofdn_t ofd_node_find_prev(void *mem, ofdn_t n);
+extern void ofd_init(int (*write)(const char *, size_t len));
+
+#endif /* _OF_DEVTREE_H */
diff --git a/xen/arch/powerpc/of-devwalk.c b/xen/arch/powerpc/of-devwalk.c
new file mode 100644
index 0000000000..d39a8df73a
--- /dev/null
+++ b/xen/arch/powerpc/of-devwalk.c
@@ -0,0 +1,135 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/ctype.h>
+#include <xen/kernel.h>
+#include "of-devtree.h"
+
+void ofd_prop_print(
+ const char *head,
+ const char *path,
+ const char *name,
+ const char *prop,
+ size_t sz)
+{
+ if ( path[0] == '/' && path[1] == '\0' ) {
+ path = "";
+ }
+ printf("%s: %s/%s: 0x%lx\n", head, path, name, sz);
+
+#define DEBUG_PROP
+#ifdef DEBUG_PROP
+ int i;
+ int isstr = sz;
+ const char *b = prop;
+
+ for ( i = 0; i < sz; i++ ) {
+ /* see if there is any non printable characters */
+ if ( !isprint(b[i]) ) {
+ /* not printable */
+ if (b[i] != '\0' || (i + 1) != sz) {
+ /* not the end of string */
+ isstr = 0;
+ break;
+ }
+ }
+ }
+
+ if ( isstr > 0 ) {
+ printf("%s: \t%s\n", head, b);
+ } else if ( sz != 0 ) {
+ printf("%s: \t0x", head);
+
+ for ( i = 0; i < sz; i++ ) {
+ if ( (i % 4) == 0 && i != 0 ) {
+ if ( (i % 16) == 0 && i != 0 ) {
+ printf("\n%s: \t0x", head);
+ } else {
+ printf(" 0x");
+ }
+ }
+ if (b[i] < 0x10) {
+ printf("0");
+ }
+ printf("%x", b[i]);
+ }
+ printf("\n");
+ }
+#else
+ (void)prop;
+#endif
+}
+
+void ofd_dump_props(void *mem, ofdn_t n, int dump)
+{
+ ofdn_t p;
+ char name[128];
+ char prop[256] __attribute__ ((aligned (__alignof__ (u64))));
+ int sz;
+ const char *path;
+
+ if ( n == OFD_ROOT ) {
+ path = "";
+ } else {
+ path = ofd_node_path(mem, n);
+ }
+
+ if (dump & OFD_DUMP_NAMES) {
+ printf("of_walk: %s: phandle 0x%x\n", path, n);
+ }
+
+ p = ofd_nextprop(mem, n, NULL, name);
+ while ( p > 0 ) {
+ sz = ofd_getprop(mem, n, name, prop, sizeof (prop));
+ if ( sz > 0 && sz > sizeof (prop) ) {
+ sz = sizeof (prop);
+ }
+
+ if ( dump & OFD_DUMP_VALUES ) {
+ ofd_prop_print("of_walk", path, name, prop, sz);
+ }
+
+ p = ofd_nextprop(mem, n, name, name);
+ }
+}
+
+void ofd_walk(void *m, ofdn_t p, walk_fn fn, int arg)
+{
+ ofdn_t n;
+
+ if ( fn != NULL ) {
+ (*fn)(m, p, arg);
+ }
+
+ /* child */
+ n = ofd_node_child(m, p);
+ if ( n != 0 ) {
+ ofd_walk(m, n, fn, arg);
+ }
+
+ /* peer */
+ n = ofd_node_peer(m, p);
+ if ( n != 0 ) {
+ ofd_walk(m, n, fn, arg);
+ }
+}
diff --git a/xen/arch/powerpc/of_handler/Makefile b/xen/arch/powerpc/of_handler/Makefile
new file mode 100644
index 0000000000..4954e374de
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/Makefile
@@ -0,0 +1,31 @@
+#
+# Build the Open Firmware handler
+#
+
+CFLAGS += -I..
+
+# head.o must be first
+obj-y = head.o
+obj-y += console.o
+obj-y += control.o
+obj-y += cpu.o
+obj-y += devtree.o
+obj-y += head.o
+obj-y += io.o
+obj-y += leap.o
+obj-y += memory.o
+obj-y += ofh.o
+obj-y += papr.o
+obj-y += services.o
+obj-y += vdevice.o
+obj-y += xencomm.o
+obj-y += xen_hvcall.o
+
+obj-y += memcmp.o
+obj-y += memset.o
+obj-y += snprintf.o
+obj-y += strcmp.o
+obj-y += strlen.o
+obj-y += strncmp.o
+obj-y += strncpy.o
+obj-y += strnlen.o
diff --git a/xen/arch/powerpc/of_handler/console.c b/xen/arch/powerpc/of_handler/console.c
new file mode 100644
index 0000000000..798f028a66
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/console.c
@@ -0,0 +1,233 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+#include "papr.h"
+#include <xen/string.h>
+#include <asm/system.h>
+
+union chpack {
+ u64 oct[2];
+ u32 quad[4];
+ char c[16];
+};
+
+/* used for internal printing */
+static struct ofh_ihandle *ofh_ihp;
+
+static s32 ofh_papr_read(s32 chan, void *buf, u32 count, s32 *actual, ulong b)
+{
+ s32 rc;
+ ulong ret[5];
+ ulong sz = 0;
+
+ rc = papr_get_term_char(ret, chan);
+ if (rc == H_Success && ret[0] > 0) {
+ sz = MIN(count, ret[0]);
+ memcpy(buf, &ret[1], sz);
+ }
+ *actual = sz;
+ return OF_SUCCESS;
+}
+
+static s32 ofh_papr_write(s32 chan, const void *buf, u32 count, s32 *actual,
+ ulong b)
+{
+ const char *str = (const char *)buf;
+ u32 i;
+ union chpack ch;
+ s32 ret;
+
+ for (i = 0; i < count; i++) {
+ int m = i % sizeof(ch);
+ ch.c[m] = str[i];
+ if (m == sizeof(ch) - 1 || i == count - 1) {
+ for (;;) {
+ if (sizeof (ulong) == sizeof (u64)) {
+ ret = papr_put_term_char(NULL,
+ chan,
+ m + 1,
+ ch.oct[0],
+ ch.oct[1]);
+ } else {
+ ret = papr_put_term_char(NULL,
+ chan,
+ m + 1,
+ ch.quad[0],
+ ch.quad[1],
+ ch.quad[2],
+ ch.quad[3]);
+ }
+ if (ret != H_Busy) {
+ break;
+ }
+ /* yielding here would be nice */
+ }
+ if (ret != H_Success) {
+ return -1;
+ }
+ }
+ }
+ *actual = count;
+ if (*actual == -1) {
+ return OF_FAILURE;
+ }
+ return OF_SUCCESS;
+}
+
+#define __HYPERVISOR_console_io 18
+#define CONSOLEIO_write 0
+#define CONSOLEIO_read 1
+#define XEN_MARK(a) ((a) | (~0UL << 16))
+extern long xen_hvcall(ulong code, ...);
+
+#define XENCOMM_MINI_AREA (sizeof(struct xencomm_mini) * 2)
+static s32 ofh_xen_dom0_read(s32 chan, void *buf, u32 count, s32 *actual,
+ ulong b)
+{
+ char __storage[XENCOMM_MINI_AREA];
+ struct xencomm_desc *desc;
+ s32 rc;
+ char *s = buf;
+ s32 ret = 0;
+
+ while (count > 0) {
+ if (xencomm_create_mini(__storage, XENCOMM_MINI_AREA, s, count, &desc))
+ return ret;
+
+ rc = xen_hvcall(XEN_MARK(__HYPERVISOR_console_io), CONSOLEIO_read,
+ count, desc);
+ if (rc <= 0) {
+ return ret;
+ }
+ count -= rc;
+ s += rc;
+ ret += rc;
+ }
+ *actual = ret;
+ return OF_SUCCESS;
+}
+
+static s32 ofh_xen_dom0_write(s32 chan, const void *buf, u32 count,
+ s32 *actual, ulong b)
+{
+ char __storage[XENCOMM_MINI_AREA];
+ struct xencomm_desc *desc;
+ s32 rc;
+ char *s = (char *)buf;
+ s32 ret = 0;
+
+ while (count > 0) {
+ if (xencomm_create_mini(__storage, XENCOMM_MINI_AREA, s, count, &desc))
+ return ret;
+
+ rc = xen_hvcall(XEN_MARK(__HYPERVISOR_console_io), CONSOLEIO_write,
+ count, desc);
+ if (rc <= 0) {
+ return ret;
+ }
+ count -= rc;
+ s += rc;
+ ret += rc;
+ }
+ *actual = ret;
+ if (*actual == -1) {
+ return OF_FAILURE;
+ }
+ return OF_SUCCESS;
+}
+
+static s32 ofh_xen_domu_read(s32 chan, void *buf, u32 count, s32 *actual,
+ ulong b)
+{
+ struct xencons_interface *intf;
+ XENCONS_RING_IDX cons, prod;
+ s32 ret;
+
+ intf = DRELA(ofh_ihp, b)->ofi_intf;
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ mb();
+
+ ret = prod - cons;
+
+ if (ret > 0) {
+ ret = (ret < count) ? ret : count;
+ memcpy(buf, intf->in+MASK_XENCONS_IDX(cons,intf->in), ret);
+ }
+
+ *actual = (ret < 0) ? 0 : ret;
+ return OF_SUCCESS;
+}
+
+static s32 ofh_xen_domu_write(s32 chan, const void *buf, u32 count,
+ s32 *actual, ulong b)
+{
+ struct xencons_interface *intf;
+ XENCONS_RING_IDX cons, prod;
+ s32 ret;
+
+ intf = DRELA(ofh_ihp, b)->ofi_intf;
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ mb();
+
+ ret = prod - cons;
+ /* FIXME: Do we have to write the whole thing or are partial writes ok? */
+ if (ret > 0) {
+ ret = (ret < count) ? ret : count;
+ memcpy(intf->in+MASK_XENCONS_IDX(cons,intf->in), buf, ret);
+ }
+
+ *actual = (ret < 0) ? 0 : ret;
+ return OF_SUCCESS;
+}
+
+/* for emergency printing in the OFH */
+s32 ofh_cons_write(const void *buf, u32 count, s32 *actual)
+{
+ ulong b = get_base();
+ struct ofh_ihandle *ihp = DRELA(ofh_ihp, b);
+
+ return ihp->ofi_write(ihp->ofi_chan, buf, count, actual, b);
+}
+
+s32 ofh_cons_close(void)
+{
+ return OF_SUCCESS;
+}
+
+void
+ofh_cons_init(struct ofh_ihandle *ihp, ulong b)
+{
+ if (ihp->ofi_chan == OFH_CONS_XEN) {
+ if (ihp->ofi_intf == NULL) {
+ ihp->ofi_write = ofh_xen_dom0_write;
+ ihp->ofi_read = ofh_xen_dom0_read;
+ } else {
+ ihp->ofi_write = ofh_xen_domu_write;
+ ihp->ofi_read = ofh_xen_domu_read;
+ }
+ } else {
+ ihp->ofi_write = ofh_papr_write;
+ ihp->ofi_read = ofh_papr_read;
+ }
+ *DRELA(&ofh_ihp, b) = ihp;
+}
diff --git a/xen/arch/powerpc/of_handler/control.c b/xen/arch/powerpc/of_handler/control.c
new file mode 100644
index 0000000000..02dbab6946
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/control.c
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+
+s32
+ofh_boot(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ b=b;
+ nargs = nargs;
+ nrets = nrets;
+ argp = argp;
+ retp = retp;
+ return OF_FAILURE;
+}
+
+s32
+ofh_enter(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ b=b;
+ nargs = nargs;
+ nrets = nrets;
+ argp = argp;
+ retp = retp;
+ return OF_FAILURE;
+}
+
+s32
+ofh_exit(u32 nargs __attribute__ ((unused)),
+ u32 nrets __attribute__ ((unused)),
+ s32 argp[] __attribute__ ((unused)),
+ s32 retp[] __attribute__ ((unused)),
+ ulong b)
+{
+ static const char msg[] = "OFH: exit method called\n";
+ s32 dummy;
+
+ ofh_cons_write(DRELA(&msg[0], b), sizeof (msg), &dummy);
+
+ for (;;) {
+ /* kill domain here */
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_chain(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ b=b;
+ nargs = nargs;
+ nrets = nrets;
+ argp = argp;
+ retp = retp;
+ return OF_FAILURE;
+}
+
+s32
+ofh_quiesce(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 0) {
+ if (nrets == 0) {
+ void *mem = ofd_mem(b);
+ (void)nargs;
+ (void)nrets;
+ (void)argp;
+ (void)retp;
+ (void)mem;
+
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
diff --git a/xen/arch/powerpc/of_handler/cpu.c b/xen/arch/powerpc/of_handler/cpu.c
new file mode 100644
index 0000000000..0fcc5d8ace
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/cpu.c
@@ -0,0 +1,82 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+
+s32
+ofh_start_cpu(u32 nargs, u32 nrets, s32 argp[],
+ s32 retp[] __attribute__ ((unused)),
+ ulong b __attribute__ ((unused)))
+{
+ if (nargs == 3) {
+ if (nrets == 0) {
+ ofdn_t ph = argp[0];
+ u32 pc = argp[1];
+ u32 arg = argp[2];
+
+ (void)ph; (void)pc; (void)arg;
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_stop_self(u32 nargs, u32 nrets,
+ s32 argp[] __attribute__ ((unused)),
+ s32 retp[] __attribute__ ((unused)),
+ ulong b __attribute__ ((unused)))
+{
+ if (nargs == 0) {
+ if (nrets == 0) {
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_idle_self(u32 nargs, u32 nrets,
+ s32 argp[] __attribute__ ((unused)),
+ s32 retp[] __attribute__ ((unused)),
+ ulong b __attribute__ ((unused)))
+{
+ if (nargs == 0) {
+ if (nrets == 0) {
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
+s32
+ofh_resume_cpu(u32 nargs, u32 nrets, s32 argp[],
+ s32 retp[] __attribute__ ((unused)),
+ ulong b __attribute__ ((unused)))
+{
+ if (nargs == 1) {
+ if (nrets == 0) {
+ ofdn_t ph = argp[0];
+
+ (void)ph;
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
diff --git a/xen/arch/powerpc/of_handler/devtree.c b/xen/arch/powerpc/of_handler/devtree.c
new file mode 100644
index 0000000000..6d52b433a4
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/devtree.c
@@ -0,0 +1,266 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+#include <of-devtree.h>
+
+s32
+ofh_peer(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ s32 *sib_ph = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *sib_ph = ofd_node_peer(mem, ph);
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_child(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ s32 *ch_ph = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *ch_ph = ofd_node_child(mem, ph);
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_parent(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ s32 *parent_ph = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *parent_ph = ofd_node_parent(mem, ph);
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_instance_to_package(u32 nargs, u32 nrets, s32 argp[], s32 retp[],
+ ulong b __attribute__ ((unused)))
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ struct ofh_ihandle *ih =
+ (struct ofh_ihandle *)(ulong)argp[0];
+ s32 *p = &retp[0];
+
+ *p = (s32)ih->ofi_node;
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_getproplen(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 2) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ const char *name = (const char *)(ulong)argp[1];
+ s32 *size = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *size = ofd_getproplen(mem, ph, name);
+ if (*size >= 0) {
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_getprop(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 4) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ const char *name = (const char *)(ulong)argp[1];
+ void *buf = (void *)(ulong)argp[2];
+ ulong buflen = argp[3];
+ s32 *size = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *size = ofd_getprop(mem, ph, name, buf, buflen);
+ if (*size > 0) {
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_nextprop(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ const char *prev = (const char *)(ulong)argp[1];
+ char *name = (char *)(ulong)argp[2];
+ s32 *flag = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *flag = ofd_nextprop(mem, ph, prev, name);
+ if (*flag > 0) {
+ *flag = 1;
+ }
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_setprop(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 4) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ const char *name = (const char *)(ulong)argp[1];
+ const void *buf = (void *)(ulong)argp[2];
+ ulong buflen = argp[3];
+ s32 *size = &retp[0];
+ void *mem = ofd_mem(b);
+
+ *size = ofd_setprop(mem, ph, name, buf, buflen);
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_canon(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ const char *dev_spec = (const char *)(ulong)argp[0];
+ char *buf = (char *)(ulong)argp[1];
+ u32 sz = argp[2];
+ s32 *len = &retp[0];
+ void *mem = ofd_mem(b);
+ ofdn_t ph;
+
+ ph = ofd_node_find(mem, dev_spec);
+ if (ph > 0) {
+ *len = ofd_node_to_path(mem, ph, buf, sz);
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32 ofh_active_package = -1;
+
+s32
+ofh_finddevice(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ s32 *ap = DRELA(&ofh_active_package, b);
+ const char *devspec = (const char *)(ulong)argp[0];
+ s32 *ph = &retp[0];
+ void *mem = ofd_mem(b);
+
+ /* good enuff */
+ if (devspec[0] == '\0') {
+ if (*ap == -1) {
+ return OF_FAILURE;
+ }
+ *ph = *ap;
+ } else {
+ *ph = ofd_node_find(mem, devspec);
+ if (*ph <= 0) {
+ return OF_FAILURE;
+ }
+ }
+ *ap = *ph;
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_instance_to_path(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ struct ofh_ihandle *ih =
+ (struct ofh_ihandle *)((ulong)argp[0]);
+ char *buf = (char *)(ulong)argp[1];
+ u32 sz = argp[2];
+ s32 *len = &retp[0];
+ ofdn_t ph;
+ void *mem = ofd_mem(b);
+
+ ph = ih->ofi_node;
+ if (ph > 0) {
+ *len = ofd_node_to_path(mem, ph, buf, sz);
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_package_to_path(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ ofdn_t ph = argp[0];
+ char *buf = (char *)(ulong)argp[1];
+ u32 sz = argp[2];
+ s32 *len = &retp[0];
+ void *mem = ofd_mem(b);
+
+ if (ph > 0) {
+ *len = ofd_node_to_path(mem, ph, buf, sz);
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+
+
diff --git a/xen/arch/powerpc/of_handler/head.S b/xen/arch/powerpc/of_handler/head.S
new file mode 100644
index 0000000000..f71ebb2121
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/head.S
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * Glue code for open-firmware client interface implementation.
+ */
+
+#define OF_STACK_SIZE (32*1024)
+
+#include <asm/config.h>
+#include <asm/processor.h>
+
+#define SAVE_STACK 0
+#define SAVE_SRR0 1
+#define SAVE_SRR1 2
+#define SAVE_MSR 3
+#define SAVE_LR 4
+#define SAVE_TOC 5
+#define SAVE_BASE 6
+#define SAVE_BCHAIN 7 /* MUST be last */
+
+/* This code is NOT MP safe and does not need to be */
+ .p2align 3
+ . = 0x0
+_GLOBAL(ofh_start) # make the linker happy
+_ofh_cih:
+ ## r3 holds the parameter. All other volatiles are available.
+ mflr r0
+ # Obtain address of _ofh_work_space via a branch-and-link
+ bl _ofh_cih_continue
+_ofh_work_space:
+ nop
+
+ . = 0x10
+ .global _ofh_tree
+_ofh_tree:
+ .long 0xdeadbeef
+ .long 0x0
+
+ . = 0x18
+ .global _ofh_inited
+_ofh_inited:
+ .long 0x0
+
+ . = 0x20
+ .global _ofh_lastarg
+_ofh_lastarg:
+ .long 0x0
+ .long 0x0
+
+ . = 0x30
+_ofh_cih_stack_end:
+ .space OF_STACK_SIZE
+_ofh_cih_stack:
+
+_ofh_cih_continue:
+ mflr r12 # r12 = &_ofh_work_space
+ mr r11, r1 # r11 = orig stk ptr
+
+ /* load base address in r4 */
+ LOADADDR(r4, _ofh_work_space)
+ sub r4, r12, r4
+
+
+ # save srr0/1
+ mfsrr0 r9
+ mfsrr1 r8
+ mfmsr r7
+ LOADADDR(r5, _ofh_cih_64bit)
+ add r5, r5, r4 # offset base
+ mtsrr0 r5
+ # r5 = MSR_SF
+ li r5,-1
+ rldicr r5,r5,0,0
+ or r5,r5,r7
+ mtsrr1 r5
+ rfid
+ trap
+
+_ofh_cih_64bit:
+ # move to local stack
+ lis r1, (_ofh_cih_stack - _ofh_cih_stack_end) >> 16
+ ori r1, r1, (_ofh_cih_stack - _ofh_cih_stack_end) & 0xffff
+ add r1, r12, r1
+ # create an initial chain
+ li r10, 0
+ # frame with 3 slots
+ stdu r10, -(STACK_FRAME_OVERHEAD + (SAVE_BCHAIN * 8))(r1)
+ # preserve base
+ std r4, (STACK_FRAME_OVERHEAD + (SAVE_BASE * 8))(r1)
+ # preserve orig stk ptr
+ std r11, (STACK_FRAME_OVERHEAD + (SAVE_STACK * 8))(r1)
+ # preserve orig srr0
+ std r9, (STACK_FRAME_OVERHEAD + (SAVE_SRR0 * 8))(r1)
+ # preserve orig srr1
+ std r8, (STACK_FRAME_OVERHEAD + (SAVE_SRR1 * 8))(r1)
+ # preserve orig msr
+ std r7, (STACK_FRAME_OVERHEAD + (SAVE_MSR * 8))(r1)
+ # preserve orig lr
+ std r0, (STACK_FRAME_OVERHEAD + (SAVE_LR * 8))(r1)
+ # preserve orig toc
+ std r2, (STACK_FRAME_OVERHEAD + (SAVE_TOC * 8))(r1)
+
+ LOADADDR(r2, ofh_start) # get the address of any function
+ add r2, r2, r4 # add the base
+ ld r2, 8(r2) # get the TOC for that funtion
+ add r2, r2, r4 # add the base
+
+ bl _ENTRY(ofh_handler) # call handler
+
+ ld r4, (STACK_FRAME_OVERHEAD + (SAVE_BASE * 8))(r1)
+ ld r9, (STACK_FRAME_OVERHEAD + (SAVE_SRR0 * 8))(r1)
+ ld r8, (STACK_FRAME_OVERHEAD + (SAVE_SRR1 * 8))(r1)
+ ld r7, (STACK_FRAME_OVERHEAD + (SAVE_MSR * 8))(r1)
+ ld r2, (STACK_FRAME_OVERHEAD + (SAVE_TOC * 8))(r1)
+ ld r0, (STACK_FRAME_OVERHEAD + (SAVE_LR * 8))(r1)
+ ld r1, (STACK_FRAME_OVERHEAD + (SAVE_STACK * 8))(r1)
+
+ LOADADDR(r5, _ofh_cih_orig_msr)
+ add r5, r5, r4
+ mtsrr0 r5
+ mtsrr1 r7
+ rfid
+
+_ofh_cih_orig_msr:
+ mtsrr0 r9
+ mtsrr1 r8
+ mtlr r0
+ blr
+
+_GLOBAL(get_base)
+ mflr r0
+ bl 1f
+1: mflr r5
+ LOADADDR(r4,1b)
+ subf r3,r4,r5
+ mtlr r0
+ blr
diff --git a/xen/arch/powerpc/of_handler/io.c b/xen/arch/powerpc/of_handler/io.c
new file mode 100644
index 0000000000..492e4d6c6c
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/io.c
@@ -0,0 +1,160 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+#include "xen/lib.h"
+
+s32
+ofh_open(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 1) {
+ const char *devspec = (const char *)(ulong)argp[0];
+ s32 *ih = &retp[0];
+ ofdn_t p;
+ void *mem = ofd_mem(b);
+
+ p = ofd_node_find(mem, devspec);
+ if (p > 0) {
+ ofdn_t io;
+ io = ofd_node_io(mem, p);
+ if (io > 0) {
+ void *f = (void *)(ulong)ofd_io_open(mem, io);
+ if (f != 0) {
+ *ih = leap(b, 0, NULL, NULL,
+ b, f);
+ return OF_SUCCESS;
+ }
+ }
+ }
+ *ih = 0;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_close(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 1) {
+ if (nrets == 0) {
+ argp = argp;
+ retp = retp;
+ b = b;
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
+s32
+ofh_read(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ struct ofh_ihandle *ih =
+ (struct ofh_ihandle *)(ulong)argp[0];
+
+ if (ih->ofi_read != NULL) {
+ void *addr = (void *)(ulong)argp[1];
+ u32 sz = argp[2];
+ s32 *actual = &retp[0];
+ void *f = ih->ofi_read;
+
+ if (f != 0) {
+ return io_leap(ih->ofi_chan, addr, sz, actual,
+ b, f);
+ }
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_write(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ struct ofh_ihandle *ih =
+ (struct ofh_ihandle *)(ulong)argp[0];
+
+ if (ih->ofi_write != NULL) {
+ void *addr = (void *)(ulong)argp[1];
+ u32 sz = argp[2];
+ s32 *actual = &retp[0];
+ void *f = ih->ofi_write;
+
+ if (f != 0) {
+ return io_leap(ih->ofi_chan, addr, sz, actual,
+ b, f);
+ }
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_seek(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ b=b;
+ nargs = nargs;
+ nrets = nrets;
+ argp = argp;
+ retp = retp;
+ return OF_FAILURE;
+}
+
+static ofh_func_t *
+method_lookup(struct ofh_ihandle *ih, const char *name, ulong b)
+{
+ struct ofh_methods *m = DRELA(ih->ofi_methods, b);
+
+ while (m != NULL && m->ofm_name != NULL ) {
+ if (strcmp(name, DRELA(m->ofm_name, b)) == 0) {
+ return m->ofm_method;
+ }
+ }
+ return NULL;
+}
+
+
+s32
+ofh_call_method(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs > 2) {
+ if (nrets > 1) {
+ const char *method = (const char *)(ulong)argp[0];
+ struct ofh_ihandle *ih =
+ (struct ofh_ihandle *)(ulong)argp[1];
+ ofh_func_t *f;
+
+ f = method_lookup(ih, method, b);
+ if (f != NULL) {
+ /* set catch methods return 0 on success */
+ retp[0] = leap(nargs - 2, nrets - 1,
+ &argp[2], &retp[1], b, f);
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+}
+
diff --git a/xen/arch/powerpc/of_handler/leap.S b/xen/arch/powerpc/of_handler/leap.S
new file mode 100644
index 0000000000..2bbb2d044e
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/leap.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <asm/config.h>
+#include <asm/processor.h>
+
+/*
+ * sval
+ * rh_leap(uval nargs, uval nrets, uval args[], uval rets[], uval ba,
+ * rh_func_t f)
+ * We need to deal with f actually bein a function descriptor, we can
+ * assume that TOC is correct.
+ */
+
+
+_GLOBAL(io_leap)
+_GLOBAL(leap)
+ ## r8 contains the base address for everyone
+ add r8,r8,r7 # add
+ ld r8, 0(r8) # get the entry point
+ add r8,r8,r7 # add
+ mtctr r8 # and
+ bctr # leap
+ /* never get here */
diff --git a/xen/arch/powerpc/of_handler/memcmp.c b/xen/arch/powerpc/of_handler/memcmp.c
new file mode 100644
index 0000000000..f6c23b75f3
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/memcmp.c
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+int
+memcmp(const void *v1, const void *v2, size_t n)
+{
+ const char *s1 = (const char *)v1;
+ const char *s2 = (const char *)v2;
+
+ while (n > 0) {
+ if (*s1 != *s2) {
+ return (*s1 - *s2);
+ }
+ /* advance pointers to next character */
+ ++s1;
+ ++s2;
+ --n;
+ }
+ return 0;
+}
diff --git a/xen/arch/powerpc/of_handler/memory.c b/xen/arch/powerpc/of_handler/memory.c
new file mode 100644
index 0000000000..184945737f
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/memory.c
@@ -0,0 +1,129 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+
+struct of_malloc_s {
+ u32 ofm_start;
+ u32 ofm_end;
+};
+static struct of_malloc_s claimed[64];
+
+static s32
+claim(ulong b, u32 virt, u32 size, u32 align, s32 *baseaddr)
+{
+ struct of_malloc_s *cp;
+ u32 i;
+ s32 e;
+ u32 end;
+
+ if (align != 0) {
+ /* we don't do this now */
+ return OF_FAILURE;
+ }
+
+ end = virt + size;
+
+ /* you cannot claim OF's own space */
+ if (virt >= (u32)ofh_start && end < (u32)_end) {
+ return OF_FAILURE;
+ }
+
+ cp = DRELA(&claimed[0], b);
+ /* don't care about speed at the moment */
+ e = -1;
+ for (i = 0; i < sizeof (claimed)/sizeof (claimed[0]); i++) {
+ if (cp[i].ofm_end == 0) {
+ if (e == -1) {
+ e = i;
+ }
+ continue;
+ }
+ if (virt >= cp[i].ofm_start && virt < cp[i].ofm_end) {
+ return OF_FAILURE;
+ }
+ if (end >= cp[i].ofm_start && end < cp[i].ofm_end) {
+ return OF_FAILURE;
+ }
+ }
+ /* e points to the first empty */
+ cp[e].ofm_start = virt;
+ cp[e].ofm_end = end;
+ *baseaddr = virt;
+ return OF_SUCCESS;
+}
+
+s32
+ofh_claim(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 3) {
+ if (nrets == 1) {
+ u32 virt = argp[0];
+ u32 size = argp[1];
+ u32 align = argp[2];
+ s32 *baseaddr = &retp[0];
+
+ return claim(b, virt, size, align, baseaddr);
+ }
+ }
+ return OF_FAILURE;
+}
+
+static s32
+release(ulong b, u32 virt, u32 size)
+{
+ struct of_malloc_s *cp;
+ u32 i;
+ u32 end;
+
+ end = virt + size;
+
+ /* you cannot release OF's own space */
+ if (virt >= (u32)ofh_start && end < (u32)_end) {
+ return OF_FAILURE;
+ }
+
+ cp = DRELA(&claimed[0], b);
+ /* don't care about speed at the moment */
+ for (i = 0; i < sizeof (claimed)/sizeof (claimed[0]); i++) {
+ if (virt == cp[i].ofm_start && end == cp[i].ofm_end) {
+ cp[i].ofm_start = 0;
+ cp[i].ofm_end = 0;
+ return OF_SUCCESS;
+ }
+ }
+ return OF_FAILURE;
+}
+
+s32
+ofh_release(u32 nargs, u32 nrets, s32 argp[],
+ s32 retp[] __attribute__ ((unused)),
+ ulong b)
+{
+ if (nargs == 2) {
+ if (nrets == 0) {
+ u32 virt = argp[0];
+ u32 size = argp[1];
+
+ return release(b, virt, size);
+ }
+ }
+ return OF_FAILURE;
+}
diff --git a/xen/arch/powerpc/of_handler/memset.c b/xen/arch/powerpc/of_handler/memset.c
new file mode 100644
index 0000000000..addaf80c09
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/memset.c
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+void *
+memset(void *s, int c, size_t n)
+{
+ uint8_t *ss = (uint8_t *)s;
+
+ if (n == 0) {
+ return s;
+ }
+
+ /* yes, I pulled the 2 out of this air */
+ if (n >= (2 * sizeof (ulong))) {
+ ulong val = 0;
+ ulong i;
+
+ /* construct val assignment from c */
+ if (c != 0) {
+ for (i = 0; i < sizeof (ulong); i++) {
+ val = (val << 8) | c;
+ }
+ }
+
+ /* do by character until aligned */
+ while (((ulong)ss & (sizeof (ulong) - 1)) > 0) {
+ *ss = c;
+ ++ss;
+ --n;
+ }
+
+ /* now do the aligned stores */
+ while (n >= sizeof (ulong)) {
+ *(ulong *)ss = val;
+ ss += sizeof (ulong);
+ n -= sizeof (ulong);
+ }
+ }
+ /* do that last unaligned bit */
+ while (n > 0) {
+ *ss = c;
+ ++ss;
+ --n;
+
+ }
+
+ return s;
+}
diff --git a/xen/arch/powerpc/of_handler/ofh.c b/xen/arch/powerpc/of_handler/ofh.c
new file mode 100644
index 0000000000..49febbb5e9
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/ofh.c
@@ -0,0 +1,454 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+#include <stdarg.h>
+#include <xen/lib.h>
+
+/*
+ * 6.3.1 Access to the client interface functions
+ * This is the spec'd maximum
+ */
+#define PFW_MAXSRVCLEN 31
+
+static u32 ofh_maxsrvclen;
+
+extern s32 debug(const char *fmt, ...);
+
+s32 debug(const char *fmt, ...)
+{
+ s32 sz;
+ va_list ap;
+ char buf[512];
+ va_start(ap, fmt);
+ sz = vsnprintf(buf, 512, fmt, ap);
+ va_end(ap);
+ ofh_cons_write(buf, sz, &sz);
+
+ return sz;
+}
+
+
+
+void
+assprint(const char *expr, const char *file, int line, const char *fmt, ...)
+{
+ char a[15];
+
+ a[0] = '\n';
+ a[1] = '\n';
+ a[2] = 'O';
+ a[3] = 'F';
+ a[4] = 'H';
+ a[5] = ':';
+ a[6] = 'A';
+ a[7] = 'S';
+ a[8] = 'S';
+ a[9] = 'E';
+ a[10] = 'R';
+ a[11] = 'T';
+ a[12] = '!';
+ a[13] = '\n';
+ a[14] = '\n';
+
+ s32 actual;
+ u32 t = 1;
+ volatile u32 *tp = &t;
+
+ (void)expr; (void)file; (void)line; (void)fmt;
+
+ ofh_cons_write(a, sizeof (a), &actual);
+
+ /* maybe I can break out of this loop manually (like with a
+ * debugger) */
+ while (*tp) {
+ continue;
+ }
+}
+
+/*
+ * we use elf hash since it is pretty standard
+ */
+static u32
+of_hash(const char *s)
+{
+ u32 hash = 0;
+ u32 hnib;
+
+ if (s != NULL) {
+ while (*s != '\0') {
+ hash = (hash << 4) + *s++;
+ hnib = hash & 0xf0000000UL;
+ if (hnib != 0) {
+ hash ^= hnib >> 24;
+ }
+ hash &= ~hnib;
+ }
+ }
+ return hash;
+}
+
+static void
+ofh_service_init(ulong b)
+{
+ ulong sz;
+ int i;
+ int j = 0;
+ struct ofh_srvc *o;
+ struct ofh_srvc *ofs[] = {
+ DRELA(&ofh_srvc[0], b),
+ DRELA(&ofh_isa_srvc[0], b),
+ NULL
+ };
+
+ j = 0;
+ while (ofs[j] != NULL) {
+ /* find the maximum string length for services */
+ o = &ofs[j][0];
+ while (o->ofs_name != NULL) {
+ const char *n;
+
+ n = DRELA(&o->ofs_name[0], b);
+ /* fix it up so we don't have to fix it anymore */
+ o->ofs_name = n;
+
+ sz = strlen(n);
+ if (sz > *DRELA(&ofh_maxsrvclen, b)) {
+ *DRELA(&ofh_maxsrvclen, b) = sz;
+ }
+ o->ofs_hash =
+ of_hash(n);
+ ++i;
+ ++o;
+ }
+ ++j;
+ }
+}
+
+
+static void
+ofh_cpu_init(ofdn_t chosen, ulong b)
+{
+ static struct ofh_ihandle _ih_cpu_0;
+ void *mem = ofd_mem(b);
+ u32 ih = DRELA((ulong)&_ih_cpu_0, b);
+ struct ofh_ihandle *ihp = (struct ofh_ihandle *)((ulong)ih);
+ const char *cpu_type = DRELA((const char*)"cpu",b);
+
+ ofdn_t cpu = ofd_node_find_by_prop(mem, OFD_ROOT,
+ DRELA((const char*)"device_type",b),
+ cpu_type, 4);
+ ihp->ofi_node = cpu;
+ ofd_prop_add(mem, chosen, DRELA((const char *)"cpu", b),
+ &ih, sizeof (ih));
+}
+static s32
+mmu_translate(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ /* FIXME: need a little more here */
+ nargs = nargs;
+ nrets = nrets;
+ argp = argp;
+ retp = retp;
+ b = b;
+ return OF_SUCCESS;
+}
+
+static void
+ofh_mmu_init(ofdn_t chosen, ulong b)
+{
+ static struct ofh_methods _mmu_methods[] = {
+ { "translate", mmu_translate },
+ { NULL, NULL},
+ };
+ static struct ofh_ihandle _ih_mmu = {
+ .ofi_methods = _mmu_methods,
+ };
+ void *mem = ofd_mem(b);
+ u32 ih = DRELA((ulong)&_ih_mmu, b);
+
+ ofd_prop_add(mem, chosen, DRELA((const char *)"mmu", b),
+ &ih, sizeof (ih));
+}
+
+static void
+ofh_chosen_init(ulong b)
+{
+ ofdn_t ph;
+ void *mem = ofd_mem(b);
+
+ ph = ofd_node_find(mem, DRELA((const char *)"/chosen", b));
+
+ ofh_vty_init(ph, b);
+ ofh_cpu_init(ph, b);
+ ofh_mmu_init(ph, b);
+}
+
+static void
+ofh_options_init(ulong b)
+{
+ void *mem = ofd_mem(b);
+ ofdn_t options;
+ u32 size = 1 << 20;
+ u32 base = b;
+ char buf[20];
+ int i;
+
+
+ /* fixup the ihandle */
+ options = ofd_node_find(mem,
+ DRELA((const char *)"options", b));
+
+ i = snprintf(buf, sizeof (buf), "0x%x", base);
+ ofd_prop_add(mem, options, DRELA((const char *)"real-base", b),
+ buf, i);
+
+ i = snprintf(buf,sizeof (buf), "0x%x", size);
+ ofd_prop_add(mem, options, DRELA((const char *)"real-size", b),
+ buf, i);
+}
+
+static void
+ofh_init(ulong b)
+{
+ ulong sz = (ulong)_end - (ulong)__bss_start;
+ /* clear bss */
+ memset(__bss_start + b, 0, sz);
+
+ ofh_service_init(b);
+ ofh_chosen_init(b);
+ ofh_options_init(b);
+}
+
+static ofh_func_t *
+ofh_lookup(const char *service, ulong b)
+{
+ int j;
+ u32 hash;
+ struct ofh_srvc *o;
+ struct ofh_srvc *ofs[] = {
+ DRELA(&ofh_srvc[0], b),
+ DRELA(&ofh_isa_srvc[0], b),
+ NULL
+ };
+ u32 sz;
+
+ sz = *DRELA(&ofh_maxsrvclen, b);
+
+ if (strnlen(service, sz + 1) > sz) {
+ return NULL;
+ }
+
+ hash = of_hash(service);
+
+ j = 0;
+ while (ofs[j] != NULL) {
+ /* yes this could be quicker */
+ o = &ofs[j][0];
+ while (o->ofs_name != NULL) {
+ if (o->ofs_hash == hash) {
+ const char *n = o->ofs_name;
+ if (strcmp(service, n) == 0) {
+ return o->ofs_func;
+ }
+ }
+ ++o;
+ }
+ ++j;
+ }
+ return NULL;
+}
+
+s32
+ofh_nosup(u32 nargs __attribute__ ((unused)),
+ u32 nrets __attribute__ ((unused)),
+ s32 argp[] __attribute__ ((unused)),
+ s32 retp[] __attribute__ ((unused)),
+ ulong b __attribute__ ((unused)))
+{
+ return OF_FAILURE;
+}
+
+s32
+ofh_test_method(u32 nargs, u32 nrets, s32 argp[], s32 retp[], ulong b)
+{
+ if (nargs == 2) {
+ if (nrets == 1) {
+ s32 *ap = DRELA(&ofh_active_package, b);
+ u32 service = (s32)argp[0];
+ const char *method = (const char *)(ulong)argp[1];
+ s32 *stat = &retp[0];
+
+ (void)ap; (void)service; (void)method;
+
+ *stat = 0;
+ /* we do not do this yet */
+ return OF_FAILURE;
+ }
+ }
+ return OF_FAILURE;
+}
+extern u32 _ofh_inited[0];
+extern u32 _ofh_lastarg[0];
+
+s32
+ofh_handler(struct ofh_args *args, ulong b)
+{
+ u32 *inited = (u32 *)DRELA(&_ofh_inited[0],b);
+ u32 *lastarg = (u32 *)DRELA(&_ofh_lastarg[0],b);
+ ofh_func_t *f;
+
+ if (*inited == 0) {
+ ofh_init(b);
+
+ if ((ulong)ofd_mem(b) < (ulong)_end + b) {
+ static const char msg[] = "PANIC: OFD and BSS collide\n";
+ s32 dummy;
+
+ ofh_cons_write(DRELA(&msg[0], b), sizeof (msg), &dummy);
+ for (;;);
+ }
+
+ *inited = 1;
+ }
+
+ *lastarg = (ulong)args;
+
+ f = ofh_lookup((char *)((ulong)args->ofa_service), b);
+
+ if (f == ((ofh_func_t *)~0UL)) {
+ /* do test */
+ if (args->ofa_nargs == 1) {
+ if (args->ofa_nreturns == 1) {
+ char *name = (char *)(ulong)args->ofa_args[0];
+ if (ofh_lookup(name, b) != NULL) {
+ args->ofa_args[args->ofa_nargs] =
+ OF_SUCCESS;
+ return OF_SUCCESS;
+ }
+ }
+ }
+ return OF_FAILURE;
+
+ } else if (f != NULL) {
+ return leap(args->ofa_nargs,
+ args->ofa_nreturns,
+ args->ofa_args,
+ &args->ofa_args[args->ofa_nargs],
+ b, f);
+ }
+ return OF_FAILURE;
+}
+
+/*
+ * The following code exists solely to run the handler code standalone
+ */
+void
+__ofh_start(void)
+{
+ s32 ret;
+ u32 of_stdout;
+ u32 ihandle;
+ char buf[1024];
+ u32 args_buf[sizeof (struct ofh_args) + (sizeof (u32) * 10)];
+ struct ofh_args *args;
+
+ args = (struct ofh_args *)args_buf;
+
+ args->ofa_service = (u32)"finddevice";
+ args->ofa_nargs = 1;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = (u32)"/";
+ args->ofa_args[1] = -1;
+ ret = ofh_start(args);
+
+ if (ret == OF_SUCCESS) {
+ args->ofa_service = (u32)"finddevice";
+ args->ofa_nargs = 1;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = (u32)"/chosen";
+ args->ofa_args[1] = -1;
+ ret = ofh_start(args);
+ }
+
+ if (ret == OF_SUCCESS) {
+ u32 phandle = args->ofa_args[1];
+
+ args->ofa_service = (u32)"getprop";
+ args->ofa_nargs = 4;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = phandle;
+ args->ofa_args[1] = (ulong)"stdout";
+ args->ofa_args[2] = (ulong)&of_stdout;
+ args->ofa_args[3] = sizeof(of_stdout);
+ args->ofa_args[4] = -1;
+ ret = ofh_start(args);
+ }
+
+ ihandle = *(u32 *)((ulong)args->ofa_args[2]);
+
+ if (ret == OF_SUCCESS) {
+ /* instance to path */
+ args->ofa_service = (u32)"instance-to-path";
+ args->ofa_nargs = 3;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = ihandle;
+ args->ofa_args[1] = (ulong)buf;
+ args->ofa_args[2] = sizeof (buf);
+ args->ofa_args[3] = -1;
+ ret = ofh_start(args);
+
+ }
+
+ if (ret == OF_SUCCESS) {
+ /* open rtas */
+ args->ofa_service = (u32)"open";
+ args->ofa_nargs = 1;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = (u32)"/rtas";
+ ret = ofh_start(args);
+ if (ret == OF_SUCCESS) {
+ u32 ir = args->ofa_args[1];
+ args->ofa_service = (u32)"call-method";
+ args->ofa_nargs = 3;
+ args->ofa_nreturns = 2;
+ args->ofa_args[0] = (ulong)"instantiate-rtas";
+ args->ofa_args[1] = ir;
+ args->ofa_args[2] = (ulong)buf;
+
+ ret = ofh_start(args);
+ }
+ }
+
+ if (ret == OF_SUCCESS) {
+ const char msg[] = "This is a test";
+ u32 msgsz = sizeof(msg) - 1; /* Includes \0 */
+
+ args->ofa_service = (u32)"write";
+ args->ofa_nargs = 3;
+ args->ofa_nreturns = 1;
+ args->ofa_args[0] = ihandle;
+ args->ofa_args[1] = (ulong)msg;
+ args->ofa_args[2] = msgsz;
+ args->ofa_args[3] = -1;
+ ret = ofh_start(args);
+ }
+
+}
diff --git a/xen/arch/powerpc/of_handler/ofh.h b/xen/arch/powerpc/of_handler/ofh.h
new file mode 100644
index 0000000000..1aab134d6d
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/ofh.h
@@ -0,0 +1,164 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _PFW_H
+#define _PFW_H
+
+#include <xen/types.h>
+#include <public/xencomm.h>
+#include <public/io/console.h>
+#include <of-devtree.h>
+
+#define MIN(x,y) (((x)<(y))?(x):(y))
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL<<PAGE_SHIFT)
+
+struct ofh_args {
+ u32 ofa_service;
+ u32 ofa_nargs;
+ u32 ofa_nreturns;
+ s32 ofa_args[0];
+};
+
+typedef s32 (ofh_func_t)(u32, u32, s32 [], s32 [], ulong b);
+
+struct ofh_srvc {
+ const char *ofs_name;
+ ofh_func_t *ofs_func;
+ u32 ofs_hash;
+};
+
+extern ofh_func_t ofh_test_method;
+extern ofh_func_t ofh_nosup;
+
+/* device tree */
+extern ofh_func_t ofh_peer;
+extern ofh_func_t ofh_child;
+extern ofh_func_t ofh_parent;
+extern ofh_func_t ofh_instance_to_package;
+extern ofh_func_t ofh_getproplen;
+extern ofh_func_t ofh_getprop;
+extern ofh_func_t ofh_nextprop;
+extern ofh_func_t ofh_setprop;
+extern ofh_func_t ofh_canon;
+extern ofh_func_t ofh_finddevice;
+extern ofh_func_t ofh_instance_to_path;
+extern ofh_func_t ofh_package_to_path;
+extern ofh_func_t ofh_call_method;
+
+/* IO */
+extern ofh_func_t ofh_open;
+extern ofh_func_t ofh_close;
+extern ofh_func_t ofh_read;
+extern ofh_func_t ofh_write;
+extern ofh_func_t ofh_seek;
+
+/* memory */
+extern ofh_func_t ofh_claim;
+extern ofh_func_t ofh_release;
+
+/* control */
+extern ofh_func_t ofh_boot;
+extern ofh_func_t ofh_enter;
+extern ofh_func_t ofh_exit; /* __attribute__ ((noreturn)); */
+extern ofh_func_t ofh_chain;
+extern ofh_func_t ofh_quiesce;
+
+extern struct ofh_srvc ofh_srvc[];
+extern struct ofh_srvc ofh_isa_srvc[];
+extern s32 ofh_active_package;
+
+struct ofh_methods {
+ const char *ofm_name;
+ ofh_func_t *ofm_method;
+};
+
+struct ofh_ihandle {
+ s32 (*ofi_close)(void);
+ s32 (*ofi_read)(s32 chan, void *buf, u32 count, s32 *actual, ulong b);
+ s32 (*ofi_write)(s32 chan, const void *buf, u32 count, s32 *actual,
+ ulong b);
+ s32 (*ofi_seek)(u32 pos_hi, u32 pos_lo, u32 *status);
+ struct ofh_methods *ofi_methods;
+ struct xencons_interface *ofi_intf;
+ s32 ofi_node;
+ s32 ofi_chan;
+};
+
+struct ofh_imem {
+ s32 (*ofi_xlate)(void *addr, u32 ret[4]);
+};
+
+
+enum prop_type {
+ pt_byte_array,
+ pt_value,
+ pt_string,
+ pt_composite,
+ /* these are for our own use */
+ pt_func,
+};
+
+extern s32 ofh_start(struct ofh_args *);
+
+#define OFH_CONS_XEN -1
+extern void ofh_cons_init(struct ofh_ihandle *ihp, ulong b);
+extern s32 ofh_cons_read(s32 chan, void *buf, u32 count, s32 *actual);
+extern s32 ofh_cons_write(const void *buf, u32 count, s32 *actual);
+extern s32 ofh_cons_close(void);
+extern s32 ofh_handler(struct ofh_args *args, ulong ifh_base);
+extern s32 leap(u32 nargs, u32 nrets, s32 args[], s32 rets[],
+ ulong ba, void *f);
+
+extern s32 io_leap(s32 chan, void *buf, u32 sz, s32 *actual,
+ ulong ba, void *f);
+
+extern void ofh_vty_init(ofdn_t chosen, ulong b);
+extern void ofh_rtas_init(ulong b);
+
+extern void *_ofh_tree;
+
+#if 1
+#define DRELA(p,b) ((__typeof__ (p))((((ulong)(p)) + (b))))
+#else
+#define DRELA(p,b) (b == b ? p : 0)
+#endif
+extern ulong get_base(void);
+
+static inline void *ofd_mem(ulong base) { return *DRELA(&_ofh_tree, base); }
+
+extern ofh_func_t ofh_start_cpu;
+extern ofh_func_t ofh_stop_self;
+extern ofh_func_t ofh_idle_self;
+extern ofh_func_t ofh_resume_cpu;
+
+/* In Open Firmware, we only use xencomm for reading/writing console data.
+ * Since that's always small, we can use this fixed-size structure. */
+#define XENCOMM_MINI_ADDRS 3
+struct xencomm_mini {
+ struct xencomm_desc _desc;
+ u64 address[XENCOMM_MINI_ADDRS];
+};
+
+extern int xencomm_create_mini(void *area, int arealen, void *buffer,
+ unsigned long bytes, struct xencomm_desc **ret);
+
+#endif
diff --git a/xen/arch/powerpc/of_handler/papr.S b/xen/arch/powerpc/of_handler/papr.S
new file mode 100644
index 0000000000..16ba6cfed4
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/papr.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/config.h>
+#include <asm/processor.h>
+#include <asm/papr.h>
+#include <asm/asm-offsets.h>
+
+#define HSC .long 0x44000022
+
+
+/* in is unsused */
+#define PAPR(in, out, name, func_code) \
+ _GLOBAL(name); \
+ std r3,-GPR_WIDTH(r1); \
+ li r3,func_code; \
+ HSC; \
+ ld r12,-GPR_WIDTH(r1); \
+ cmpi 0,r12,0; \
+ bne ret ## out; /* only store regs if r12 != NULL */ \
+ b ret0
+
+ret8: std r11, 7 * GPR_WIDTH(r12)
+ret7: std r10, 6 * GPR_WIDTH(r12)
+ret6: std r9, 5 * GPR_WIDTH(r12)
+ret5: std r8, 4 * GPR_WIDTH(r12)
+ret4: std r7, 3 * GPR_WIDTH(r12)
+ret3: std r6, 2 * GPR_WIDTH(r12)
+ret2: std r5, 1 * GPR_WIDTH(r12)
+ret1: std r4, 0 * GPR_WIDTH(r12)
+ nop
+ret0: blr
+
+PAPR(5, 2,papr_remove, H_REMOVE)
+PAPR(5, 1,papr_clear_mod, H_CLEAR_MOD)
+PAPR(5, 1,papr_clear_ref, H_CLEAR_REF)
+PAPR(5, 0,papr_protect, H_PROTECT)
+PAPR(1, 0,papr_eoi, H_EOI)
+PAPR(5, 1,papr_cppr, H_CPPR)
+PAPR(5, 2,papr_ipi, H_IPI)
+PAPR(5, 1,papr_ipoll, H_IPOLL)
+PAPR(5, 1,papr_xirr, H_XIRR)
+PAPR(2, 0,papr_interrupt, H_INTERRUPT)
+PAPR(5, 1,papr_logical_ci_load_64, H_LOGICAL_CI_LOAD)
+PAPR(5, 0,papr_logical_ci_store_64, H_LOGICAL_CI_STORE)
+PAPR(5, 1,papr_logical_cache_load_64, H_LOGICAL_CACHE_LOAD)
+PAPR(5, 0,papr_logical_cache_store_64, H_LOGICAL_CACHE_STORE)
+PAPR(5, 0,papr_logical_icbi, H_LOGICAL_ICBI)
+PAPR(5, 0,papr_logical_dcbf, H_LOGICAL_DCBF)
+PAPR(5, 1,papr_set_dabr, H_SET_DABR)
+PAPR(5, 1,papr_real_to_logical, H_REAL_TO_LOGICAL)
+PAPR(5, 1,papr_pci_config_read, H_PCI_CONFIG_READ)
+PAPR(5, 0,papr_pci_config_write, H_PCI_CONFIG_WRITE)
+
+ PAPR(5, 1,papr_grant_logical, H_GRANT_LOGICAL)
+PAPR(1, 1,papr_accept_logical, H_ACCEPT_LOGICAL)
+PAPR(0, 2,papr_rescind_logical, H_RESCIND_LOGICAL)
+PAPR(3, 0,papr_register_vterm, H_REGISTER_VTERM)
+PAPR(4, 0,papr_vterm_partner_info, H_VTERM_PARTNER_INFO)
+PAPR(1, 0,papr_free_vterm, H_FREE_VTERM)
+
+/* Definitions for hypervisor functions. Note that we do not use the
+ * first macro arg */
+
+PAPR(x, 1,papr_enter, H_ENTER)
+PAPR(x, 8,papr_read, H_READ)
+PAPR(x, 1,papr_thread_control, H_THREAD_CONTROL)
+PAPR(x, 0,papr_cede, H_CEDE)
+
+PAPR(x, 0,papr_page_init, H_PAGE_INIT)
+PAPR(x, 1,papr_set_asr, H_SET_ASR) /* ISTAR only. */
+PAPR(x, 0,papr_asr_on, H_ASR_ON) /* ISTAR only. */
+PAPR(x, 0,papr_asr_off, H_ASR_OFF) /* ISTAR only. */
+
+PAPR(x, 8,papr_hypervisor_data, H_HYPERVISOR_DATA)
+
+PAPR(x, 2,papr_get_xive, H_GET_XIVE)
+PAPR(x, 0,papr_set_xive, H_SET_XIVE)
+
+
+PAPR(x, 0,papr_put_term_char, H_PUT_TERM_CHAR)
+PAPR(x, 3,papr_get_term_char, H_GET_TERM_CHAR)
diff --git a/xen/arch/powerpc/of_handler/papr.h b/xen/arch/powerpc/of_handler/papr.h
new file mode 100644
index 0000000000..19e4478efd
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/papr.h
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _OFH_PAPR_H
+#define _OFH_PAPR_H
+
+#include <asm/papr.h>
+
+#ifndef __ASSEMBLY__
+
+extern long papr_enter(ulong *retvals, ulong flags, ulong idx, ...);
+extern long papr_read(ulong *retvals, ulong flags, ulong idx);
+extern long papr_remove(ulong *retvals, ulong flags, ulong pte_index,
+ ulong avpn);
+extern long papr_clear_mod(ulong *retvals, ulong flags, ulong pte_index);
+extern long papr_clear_ref(ulong *retvals, ulong flags, ulong pte_index);
+extern long papr_protect(ulong *retvals, ulong flags, ulong pte_index,
+ ulong avpn);
+extern long papr_get_term_char(ulong *retvals, ulong idx);
+extern long papr_put_term_char(ulong *retvals, ulong idx, ulong count, ...);
+extern long papr_register_vterm(ulong *retvals, ulong ua, ulong plpid, ulong pua);
+extern long papr_vterm_partner_info(ulong *retvals, ulong ua, ulong plpid,
+ ulong pua, ulong lpage);
+extern long papr_free_vterm(ulong *retvals, ulong uaddr);
+
+extern long papr_cede(ulong *retvals);
+extern long papr_page_init(ulong *retvals, ulong flags,
+ ulong destination, ulong source);
+extern long papr_set_asr(ulong *retvals, ulong value); /* ISTAR only. */
+extern long papr_asr_on(ulong *retvals); /* ISTAR only. */
+extern long papr_asr_off(ulong *retvals); /* ISTAR only. */
+extern long papr_eoi(ulong *retvals, ulong xirr);
+extern long papr_cppr(ulong *retvals, ulong cppr);
+extern long papr_ipi(ulong *retvals, ulong sn, ulong mfrr);
+extern long papr_ipoll(ulong *retvals, ulong sn);
+extern long papr_xirr(ulong *retvals);
+extern long papr_logical_ci_load_64(ulong *retvals, ulong size,
+ ulong addrAndVal);
+extern long papr_logical_ci_store_64(ulong *retvals, ulong size,
+ ulong addr, ulong value);
+extern long papr_logical_cache_load_64(ulong *retvals, ulong size,
+ ulong addrAndVal);
+extern long papr_logical_cache_store_64(ulong *retvals, ulong size,
+ ulong addr, ulong value);
+extern long papr_logical_icbi(ulong *retvals, ulong addr);
+extern long papr_logical_dcbf(ulong *retvals, ulong addr);
+extern long papr_set_dabr(ulong *retvals, ulong dabr);
+extern long papr_hypervisor_data(ulong *retvals, u64 control);
+extern long papr_real_to_logical(ulong *retvals, ulong raddr);
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* ! _OFH_PAPR_H */
diff --git a/xen/arch/powerpc/of_handler/services.c b/xen/arch/powerpc/of_handler/services.c
new file mode 100644
index 0000000000..068d08c8e6
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/services.c
@@ -0,0 +1,96 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+
+/*
+ * These are ISA independent OF services
+ */
+
+struct ofh_srvc ofh_srvc[] = {
+ /* Document Section 6.3.2.1 Client Interface */
+ { .ofs_name = "test", .ofs_func = ((ofh_func_t *)~0UL) },
+ { .ofs_name = "test-method", .ofs_func = ofh_test_method },
+
+ /* Document Section 6.3.2.2 Device Tree */
+ { .ofs_name = "peer", .ofs_func = ofh_peer },
+ { .ofs_name = "child", .ofs_func = ofh_child },
+ { .ofs_name = "parent", .ofs_func = ofh_parent },
+ { .ofs_name = "instance-to-package",
+ .ofs_func = ofh_instance_to_package },
+ { .ofs_name = "getproplen", .ofs_func = ofh_getproplen },
+ { .ofs_name = "getprop", .ofs_func = ofh_getprop },
+ { .ofs_name = "nextprop", .ofs_func = ofh_nextprop },
+ { .ofs_name = "setprop", .ofs_func = ofh_setprop },
+ { .ofs_name = "canon", .ofs_func = ofh_canon },
+ { .ofs_name = "finddevice", .ofs_func = ofh_finddevice },
+ { .ofs_name = "instance-to-path", .ofs_func = ofh_instance_to_path },
+ { .ofs_name = "package-to-path", .ofs_func = ofh_package_to_path },
+ { .ofs_name = "call-method", .ofs_func = ofh_call_method },
+
+ /* Document Section 6.3.2.3 Device I/O */
+ { .ofs_name = "open", .ofs_func = ofh_open },
+ { .ofs_name = "close", .ofs_func = ofh_close },
+ { .ofs_name = "read", .ofs_func = ofh_read },
+ { .ofs_name = "write", .ofs_func = ofh_write },
+ { .ofs_name = "seek", .ofs_func = ofh_seek },
+
+ /* Document Section 6.3.2.4 Memory */
+ { .ofs_name = "claim", .ofs_func = ofh_claim },
+ { .ofs_name = "release", .ofs_func = ofh_release },
+
+ /* Document Section 6.3.2.5 Control Transfer */
+ { .ofs_name = "boot", .ofs_func = ofh_boot },
+ { .ofs_name = "enter", .ofs_func = ofh_enter },
+ { .ofs_name = "exit", .ofs_func = ofh_exit },
+ { .ofs_name = "chain", .ofs_func = ofh_chain },
+ { .ofs_name = "quiesce", .ofs_func = ofh_quiesce },
+
+ /* Document Section 6.3.2.6 User Interface */
+ { .ofs_name = "interpret", .ofs_func = ofh_nosup },
+ { .ofs_name = "set-callback", .ofs_func = ofh_nosup },
+ { .ofs_name = "set-symbol-lookup", .ofs_func = ofh_nosup },
+
+ /* Document Section 6.3.2.7 Time */
+ { .ofs_name = "milliseconds", .ofs_func = ofh_nosup },
+ { .ofs_name = NULL, .ofs_func = NULL}
+};
+
+/*
+ * These are services particular to poweprc 32/64
+ */
+
+struct ofh_srvc ofh_isa_srvc[] = {
+ /* Document Section 8.5.1 Real-Mode physical memory ... */
+ { .ofs_name = "alloc-real_mem", .ofs_func = ofh_nosup },
+
+ /* Document Section 8.5.2 Virtual address translation ... */
+ { .ofs_name = "map", .ofs_func = ofh_nosup },
+ { .ofs_name = "unmap", .ofs_func = ofh_nosup },
+ { .ofs_name = "translate", .ofs_func = ofh_nosup },
+
+ /* Document Section 11.3 Client Interface Services */
+ { .ofs_name = "start-cpu", .ofs_func = ofh_start_cpu },
+ { .ofs_name = "stop-self", .ofs_func = ofh_stop_self },
+ { .ofs_name = "idle-self", .ofs_func = ofh_idle_self },
+ { .ofs_name = "resume-cpu", .ofs_func = ofh_resume_cpu },
+ { .ofs_name = NULL, .ofs_func = NULL}
+};
+
diff --git a/xen/arch/powerpc/of_handler/snprintf.c b/xen/arch/powerpc/of_handler/snprintf.c
new file mode 100644
index 0000000000..2f3bee5602
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/snprintf.c
@@ -0,0 +1,332 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/types.h>
+#include <stdarg.h>
+
+#define UPPER 0x00001
+#define SIGNED 0x00010
+#define ZERO 0x00100
+#define PTR 0x01000
+
+#define NUMBUFSZ 24 /* fits a 64bit value formatted in octal */
+
+/* do we need to handle 128 bits? */
+static ulong
+digits(char *buf, uint64_t val, ulong radix, ulong width, ulong flgs)
+{
+ const char hex[] = "0123456789abcdefx";
+ const char Hex[] = "0123456789ABCDEFX";
+ const char *dig;
+ char *b = buf;
+ char num[NUMBUFSZ];
+ ulong i;
+
+ if (radix == 0 || radix > 16) {
+ radix = 16;
+ }
+
+ if (flgs & UPPER) {
+ dig = Hex;
+ } else {
+ dig = hex;
+ }
+
+ /* sign */
+ if (flgs & SIGNED && radix == 10) {
+ /* there are corner cases here, for sure */
+ if ((int64_t)val < 0) {
+ *b++ = '-';
+ val *= -1;
+ }
+ }
+
+ /* ptr */
+ if (flgs & PTR && radix == 16) {
+ *b++ = '0';
+ *b++ = dig[16];
+ }
+
+ /* put it in t backwards */
+ i = 0;
+ if (val == 0) {
+ num[i++] = '0';
+ } else {
+ while (val > 0) {
+ num[i++] = dig[val % radix];
+ val /= radix;
+ }
+ }
+
+ /* pad */
+ if (flgs & ZERO && width > i) {
+ while (width-- > i) {
+ *b++ = '0';
+ }
+ }
+
+ /* number */
+ while (i-- > 0) {
+ *b++ = num[i];
+ }
+
+ return (b - buf);
+}
+
+/*
+ * yeah, I dislike goto's too, but ...
+ */
+int
+vsnprintf(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ int c;
+ int nullify;
+ ulong used = 0;
+ ulong sz;
+ unsigned ells;
+ ulong flgs;
+ const char *str;
+ uint64_t val = 0;
+ ulong radix;
+ ulong width;
+ char num[NUMBUFSZ];
+
+ /* there must always be a trailing null */
+ if (size == 0) {
+ /* but don't write anything is size is zero */
+ nullify = 0;
+ } else {
+ --size;
+ nullify = 1;
+ }
+
+ while ((c = *fmt++) != '\0') {
+ if (c != '%') {
+ if (used++ < size) {
+ *buf++ = c;
+ }
+ continue;
+ }
+ /* deal with format */
+ ells = 0;
+ flgs = 0;
+
+ /* check for a given width */
+ width = 0;
+
+ c = *fmt;
+ if (c >= '0' && c <= '9') {
+ flgs |= ZERO;
+ ++fmt;
+ while (c >= '0' && c <= '9') {
+ width = (width * 10) + (c - '0');
+ c = *fmt++;
+ }
+ --fmt;
+ }
+
+loop:
+ c = *fmt++;
+ switch (c) {
+ case 'l':
+ ++ells;
+ goto loop;
+ /*NOTREACHED*/
+ break;
+
+ case 'h': /* support linux kernel 'h' for short */
+ ells = 0;
+ goto loop;
+ /*NOTREACHED*/
+ break;
+
+ case 'L': /* support linux kernel 'L' for long long */
+ ells = 2;
+ goto loop;
+ /*NOTREACHED*/
+ break;
+
+ case 'Z': /* support linux kernel 'Z' for [s]size_t */
+ /* I think it is safe to assume that 'long'
+ * just gets it right but, the compiler should
+ * do the right thing here anyway */
+ if (sizeof (size_t) > sizeof (unsigned)) {
+ ells = 1;
+ }
+ goto loop;
+ /*NOTREACHED*/
+ break;
+ case 's':
+ str = va_arg(ap, char *);
+ if (str == NULL) {
+ str = "(nil)";
+ }
+
+ /* copy over only what fits */
+ sz = 0;
+ while (*str != '\0') {
+ c = *str++;
+ if (used++ < size) {
+ *buf++ = c;
+ }
+ }
+ break;
+ case 'c':
+ c = (char)va_arg(ap, int);
+ /*FALLTHRU*/
+ case '%':
+ if (used++ < size) {
+ *buf++ = c;
+ }
+ break;
+
+ case 'n':
+ /* totally untested */
+ switch (ells) {
+ case 0: {
+ unsigned *pval = va_arg(ap, unsigned *);
+ *pval = used;
+ }
+ break;
+ case 1: {
+ unsigned long *pval;
+ pval = va_arg(ap, unsigned long *);
+ *pval = used;
+ }
+ break;
+ default: {
+ unsigned long long *pval;
+ pval = va_arg(ap, unsigned long long *);
+ *pval = used;
+ }
+ break;
+ }
+
+ break;
+ case 'p':
+ flgs |= (PTR | ZERO);
+ radix = 16;
+ val = (unsigned long) va_arg(ap, void *);
+ /* pad to max type by default */
+ if (sizeof (long) == sizeof (long long)) {
+ width = 16;
+ } else {
+ width = 8;
+ }
+ goto print_value;
+
+ case 'd': case 'i':
+ flgs |= SIGNED;
+ radix = 10;
+ switch (ells) {
+ case 0:
+ val = va_arg(ap, int);
+ break;
+ case 1:
+ val = va_arg(ap, long);
+ break;
+ default:
+ val = va_arg(ap, long long);
+ break;
+ }
+ goto print_value;
+
+ case 'u':
+ radix = 10;
+ goto print_ulongue;
+ break;
+
+ case 'o':
+ radix = 8;
+ goto print_ulongue;
+ break;
+
+ case 'X':
+ flgs |= UPPER;
+ /*FALLTHRU*/
+ case 'x':
+ radix = 16;
+print_ulongue:
+ switch (ells) {
+ case 0:
+ val = va_arg(ap, unsigned);
+ break;
+ case 1:
+ val = va_arg(ap, unsigned long);
+ break;
+ default:
+ val = va_arg(ap, unsigned long long);
+ break;
+ }
+
+print_value:
+ /* get the number */
+ sz = digits(num, val, radix, width, flgs);
+
+ str = num;
+ while (sz-- > 0) {
+ c = *str++;
+ if (used++ < size) {
+ *buf++ = c;
+ }
+ }
+ break;
+
+
+ default:
+ break;
+ }
+ }
+ if (nullify) {
+ /* stuff a nul char but don't include it in return */
+ *buf++ = '\0';
+ }
+ return used;
+}
+
+int
+snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list ap;
+ signed int ret;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(buf, size, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int
+vsprintf(char *buf, const char *fmt, va_list ap)
+{
+ return vsnprintf(buf, ~0UL, fmt, ap);
+}
+
+int
+sprintf(char *buf, const char *fmt, ...)
+{
+ va_list ap;
+ signed int ret;
+
+ va_start(ap, fmt);
+ ret = vsprintf(buf, fmt, ap);
+ va_end(ap);
+ return ret;
+}
diff --git a/xen/arch/powerpc/of_handler/strcmp.c b/xen/arch/powerpc/of_handler/strcmp.c
new file mode 100644
index 0000000000..5125dc7636
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/strcmp.c
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 == *s2) {
+ /* characters are equal; if we are at the end, return EQUAL */
+ if (*s1 == '\0') {
+ return(0);
+ }
+ /* advance pointers to next character */
+ s1++;
+ s2++;
+ }
+ return(*s1 - *s2);
+}
diff --git a/xen/arch/powerpc/of_handler/strlen.c b/xen/arch/powerpc/of_handler/strlen.c
new file mode 100644
index 0000000000..ca9b13fbaf
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/strlen.c
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+size_t
+strlen(const char *s)
+{
+ ulong i = 0;
+ while (*s++ != '\0')
+ i++;
+ return i;
+}
diff --git a/xen/arch/powerpc/of_handler/strncmp.c b/xen/arch/powerpc/of_handler/strncmp.c
new file mode 100644
index 0000000000..4d03374344
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/strncmp.c
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+ while (n > 0) {
+ if (*s1 != *s2) {
+ return (*s1 - *s2);
+ }
+ if (*s1 == '\0') {
+ return (0);
+ }
+ /* advance pointers to next character */
+ ++s1;
+ ++s2;
+ --n;
+ }
+ return 0;
+}
diff --git a/xen/arch/powerpc/of_handler/strncpy.c b/xen/arch/powerpc/of_handler/strncpy.c
new file mode 100644
index 0000000000..76c5c7182a
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/strncpy.c
@@ -0,0 +1,54 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+char *
+strncpy(char *dest, const char *src, size_t n)
+{
+ char *dp;
+
+ /* cases to consider:
+ * dest is NULL, s is NULL;
+ * src is empty (0);
+ * src is not empty, less than n;
+ * src is not empty, equal to n;
+ * src is not empty, greater than n;
+ */
+
+ if (n <= 0) {
+ return dest;
+ }
+
+ dp = dest;
+
+ do {
+ *dp++ = *src;
+ --n;
+ ++src;
+ } while ((*src != '\0') && (n > 0));
+
+ /* clear remainder of buffer (if any); ANSI semantics */
+ while (n > 0) {
+ *dp++ = '\0';
+ --n;
+ }
+ return dest;
+}
diff --git a/xen/arch/powerpc/of_handler/strnlen.c b/xen/arch/powerpc/of_handler/strnlen.c
new file mode 100644
index 0000000000..828f4d084f
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/strnlen.c
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/string.h>
+
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+ ulong i = 0;
+ while (*s++ != '\0' && i < maxlen)
+ i++;
+ return i;
+}
diff --git a/xen/arch/powerpc/of_handler/vdevice.c b/xen/arch/powerpc/of_handler/vdevice.c
new file mode 100644
index 0000000000..5ad5999cbf
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/vdevice.c
@@ -0,0 +1,74 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "ofh.h"
+
+static struct ofh_ihandle _ih_cons;
+
+void
+ofh_vty_init(ofdn_t chosen, ulong b)
+{
+ void *mem = ofd_mem(b);
+ u32 ih = DRELA((u32)&_ih_cons, b);
+ struct ofh_ihandle *ihp = (struct ofh_ihandle *)((ulong)ih);
+ ofdn_t n = 0;
+ s32 ret;
+ u32 chan = OFH_CONS_XEN;
+
+ ihp->ofi_intf = NULL;
+
+ /* find the vty */
+ n = ofd_node_find(mem,
+ DRELA((const char *)"/vdevice/vty", b));
+ if (n > 0) {
+ /* PAPR VTERM */
+ ret = ofd_getprop(mem, n, DRELA((const char *)"reg", b),
+ &chan, sizeof (chan));
+ if (ret != (s32)sizeof (chan)) {
+ chan = 0;
+ }
+ } else {
+ /* xen console */
+ u32 addr;
+
+ n = ofd_node_find(mem, DRELA((const char *)"/xen/console", b));
+ if (n > 0) {
+ ret = ofd_getprop(mem, n, DRELA((const char *)"reg", b),
+ &addr, sizeof (addr));
+ if (addr == 0) {
+ ihp->ofi_intf = NULL;
+ } else {
+ ihp->ofi_intf = (struct xencons_interface *)(ulong)addr;
+ }
+ }
+ }
+ if (n > 0) {
+ ihp->ofi_node = n;
+ }
+ ihp->ofi_chan = chan;
+ ofh_cons_init(ihp, b);
+
+ ofd_prop_add(mem, chosen, DRELA((const char *)"stdout", b),
+ &ih, sizeof (ih));
+ ofd_prop_add(mem, chosen, DRELA((const char *)"stdin", b),
+ &ih, sizeof (ih));
+}
+
+
diff --git a/xen/arch/powerpc/of_handler/xen_hvcall.S b/xen/arch/powerpc/of_handler/xen_hvcall.S
new file mode 100644
index 0000000000..0182f83c7a
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/xen_hvcall.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/config.h>
+#include <asm/processor.h>
+#include <asm/papr.h>
+#include <asm/asm-offsets.h>
+
+#define HSC .long 0x44000022
+
+_GLOBAL(xen_hvcall)
+ HSC
+ blr
diff --git a/xen/arch/powerpc/of_handler/xencomm.c b/xen/arch/powerpc/of_handler/xencomm.c
new file mode 100644
index 0000000000..867fcd77b3
--- /dev/null
+++ b/xen/arch/powerpc/of_handler/xencomm.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include "ofh.h"
+
+static int __xencomm_init(struct xencomm_desc *desc, void *buffer,
+ unsigned long bytes)
+{
+ int recorded = 0;
+ int i = 0;
+
+ /* record the physical pages used */
+ while ((recorded < bytes) && (i < desc->nr_addrs)) {
+ unsigned long paddr = (unsigned long)buffer + recorded;
+ int offset;
+ int chunksz;
+
+ offset = (unsigned long)paddr % PAGE_SIZE; /* handle partial pages */
+ chunksz = MIN(PAGE_SIZE - offset, (unsigned long)bytes - recorded);
+
+ desc->address[i++] = paddr;
+ recorded += chunksz;
+ }
+
+ if (recorded < bytes)
+ return -1;
+
+ desc->magic = XENCOMM_MAGIC;
+
+ return 0;
+}
+
+static void *__xencomm_alloc_mini(void *area, int arealen)
+{
+ unsigned long base = (unsigned long)area;
+ unsigned int pageoffset;
+
+ pageoffset = base % PAGE_SIZE;
+
+ /* we probably fit right at the front of area */
+ if ((PAGE_SIZE - pageoffset) >= sizeof(struct xencomm_mini)) {
+ return area;
+ }
+
+ /* if not, see if area is big enough to advance to the next page */
+ if ((arealen - pageoffset) >= sizeof(struct xencomm_mini))
+ return (void *)(base + pageoffset);
+
+ /* area was too small */
+ return NULL;
+}
+
+/* allocate a xencomm_mini out of a preallocated memory area */
+int xencomm_create_mini(void *area, int arealen, void *buffer,
+ unsigned long bytes, struct xencomm_desc **ret)
+{
+ struct xencomm_desc *desc = __xencomm_alloc_mini(area, arealen);
+ if (!desc)
+ return -1;
+
+ desc->nr_addrs = XENCOMM_MINI_ADDRS;
+ if (__xencomm_init(desc, buffer, bytes))
+ return -1;
+
+ *ret = desc;
+ return 0;
+}
diff --git a/xen/arch/powerpc/ofd_fixup.c b/xen/arch/powerpc/ofd_fixup.c
new file mode 100644
index 0000000000..6178a1fabe
--- /dev/null
+++ b/xen/arch/powerpc/ofd_fixup.c
@@ -0,0 +1,509 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/compile.h>
+#include <public/xen.h>
+#include "of-devtree.h"
+
+#undef RTAS
+
+#ifdef PAPR_VTERM
+static ofdn_t ofd_vdevice_vty(void *m, ofdn_t p, struct domain *d)
+{
+ ofdn_t n;
+ static const char pathfmt[] = "/vdevice/vty@%x";
+ static const char name[] = "vty";
+ static const char compatible[] = "hvterm1";
+ static const char device_type[] = "serial";
+ char path[sizeof (pathfmt) + 8 - 2];
+ int client = 0;
+
+ snprintf(path, sizeof (path), pathfmt, client);
+ n = ofd_node_add(m, p, path, sizeof (path));
+
+ if (n > 0) {
+ u32 val32;
+
+ val32 = client;
+ ofd_prop_add(m, n, "name", name, sizeof (name));
+ ofd_prop_add(m, n, "reg", &val32, sizeof (val32));
+ ofd_prop_add(m, n, "compatible",
+ compatible, sizeof (compatible));
+ ofd_prop_add(m, n, "device_type",
+ device_type, sizeof (device_type));
+ }
+
+ return n;
+}
+#endif
+
+#ifdef PAPR_VDEVICE
+static ofdn_t ofd_vdevice(void *m, struct domain *d)
+{
+ ofdn_t n;
+ static const char path[] = "/vdevice";
+ static const char name[] = "vdevice";
+ static const char compatible[] = "IBM,vdevice";
+ u32 val;
+
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+
+ if (n > 0) {
+
+ ofd_prop_add(m, n, "name", name, sizeof (name));
+ val = 1;
+ ofd_prop_add(m, n, "#address-cells", &val, sizeof (val));
+ val = 0;
+ ofd_prop_add(m, n, "#size-cells", &val, sizeof (val));
+ ofd_prop_add(m, n, "compatible",
+ compatible, sizeof (compatible));
+ ofd_prop_add(m, n, "device_type", name, sizeof (name));
+ ofd_prop_add(m, n, "interupt-controller", NULL, 0);
+
+#ifdef PAPR_VDEVICE
+ ofdn_t r;
+
+ /* add vty */
+ r = ofd_vdevice_vty(m, n, d);
+ printk("vdevice r: %x\n", r);
+ n = r;
+#endif
+ }
+ return n;
+}
+#endif
+
+static ofdn_t ofd_openprom_props(void *m)
+{
+ static const char path[] = "/openprom";
+ static const char vernum[] = "IBM,XenOF0.1";
+ ofdn_t n;
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+ /* I want to override */
+ ofd_prop_add(m, n, "model", vernum, sizeof(vernum));
+ ofd_prop_add(m, n, "ibm,fw-vernum_encoded", vernum, sizeof(vernum));
+ ofd_prop_add(m, n, "relative-addressing", NULL, 0);
+ return n;
+
+}
+
+#ifdef PAPR_VTERM
+static ofdn_t ofd_aliases_props(void *m)
+{
+ static const char path[] = "/aliases";
+ static const char screen[] = "/vdevice/vty@0";
+ ofdn_t n;
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+ ofd_prop_add(m, n, "screen", screen, sizeof(screen));
+ return n;
+}
+#endif
+
+static ofdn_t ofd_options_props(void *m)
+{
+ static const char path[] = "/options";
+ static const char boot[] = "true";
+ ofdn_t n;
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+ ofd_prop_add(m, n, "auto-boot?", boot, sizeof(boot));
+ return n;
+}
+
+static ofdn_t ofd_cpus_props(void *m, struct domain *d)
+{
+ static const char path[] = "/cpus";
+ static const char cpu[] = "cpu";
+ u32 val = 1;
+ ofdn_t n;
+ ofdn_t c;
+ static u32 ibm_pft_size[] = { 0x0, 0x0 };
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+ ofd_prop_add(m, n, "#address-cells", &val, sizeof(val));
+ ofd_prop_add(m, n, "#size-cells", &val, sizeof(val));
+ ofd_prop_add(m, n, "smp-enabled", NULL, 0);
+
+#ifdef HV_EXPOSE_PERFORMANCE_MONITOR
+ ofd_prop_add(m, n, "performance-monitor", NULL, 0);
+#endif
+
+ c = ofd_node_find_by_prop(m, n, "device_type", cpu, sizeof (cpu));
+ while (c > 0) {
+ ibm_pft_size[1] = d->arch.htab.log_num_ptes + LOG_PTE_SIZE;
+ ofd_prop_add(m, c, "ibm,pft-size",
+ ibm_pft_size, sizeof (ibm_pft_size));
+
+ /* FIXME: Check the the "l2-cache" property who's
+ * contents is an orphaned phandle? */
+ c = ofd_node_find_next(m, c);
+
+ /* Since we are not MP yet we can prune the rest of the CPUs */
+ while (c > 0) {
+ ofdn_t nc;
+
+ nc = ofd_node_find_next(m, c);
+ ofd_node_prune(m, c);
+
+ c = nc;
+ }
+ }
+
+ return n;
+}
+
+#ifdef ADD_XICS
+static ofdn_t ofd_xics_props(void *m)
+{
+ ofdn_t n;
+ static const char path[] = "/interrupt-controller";
+ static const char compat[] = "IBM,ppc-xicp";
+ static const char model[] = "IBM, BoaC, PowerPC-PIC, 00";
+ static const char dtype[] =
+ "PowerPC-External-Interrupt-Presentation";
+ /*
+ * I don't think these are used for anything but linux wants
+ * it. I seems to describe some per processor location for
+ * IPIs but that is a complete guess.
+ */
+ static const u32 reg[] = {
+ 0x000003e0, 0x0f000000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f001000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f002000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f003000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f004000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f005000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f006000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f007000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f008000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f009000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00a000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00b000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00c000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00d000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00e000, 0x00000000, 0x00001000,
+ 0x000003e0, 0x0f00f000, 0x00000000, 0x00001000,
+ };
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+ ofd_prop_add(m, n, "built-in", NULL, 0);
+ ofd_prop_add(m, n, "compatible", compat, sizeof(compat));
+ ofd_prop_add(m, n, "device_type", dtype, sizeof(dtype));
+ ofd_prop_add(m, n, "model", model, sizeof(model));
+ ofd_prop_add(m, n, "reg", reg, sizeof(reg));
+
+ return n;
+}
+#endif
+
+/*
+ * Good things you can stick here:
+ * init=/bin/bash ip=dhcp root=/dev/hda2 ide=nodma
+ */
+static char default_bootargs[] = "";
+
+static ofdn_t ofd_chosen_props(void *m, const char *cmdline)
+{
+ ofdn_t n;
+ ofdn_t p;
+ static const char path[] = "/chosen";
+ char bootargs[256];
+ int bsz;
+ int sz;
+ int rm;
+
+ n = ofd_node_find(m, path);
+ if (n == 0) {
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ ofd_prop_add(m, n, "name",
+ &path[1], sizeof (path) - 1);
+ }
+
+ strcpy(bootargs, cmdline);
+ bsz = strlen(bootargs) + 1;
+ rm = sizeof (bootargs) - bsz;
+
+ if (default_bootargs != NULL) {
+ sz = strlen(default_bootargs);
+ if (sz > rm) {
+ panic("default_bootargs is too big: 0x%x > 0x%x\n",
+ sz, rm);
+ } else if (sz > 0) {
+ memcpy(&bootargs[bsz - 1], default_bootargs, sz + 1);
+ bsz += sz;
+ rm -= sz;
+ }
+ }
+
+ printk("DOM0 bootargs: %s\n", bootargs);
+ ofd_prop_add(m, n, "bootargs", bootargs, bsz);
+
+ ofd_prop_add(m, n, "bootpath", NULL, 0);
+
+ printk("Remove /chosen/mmu, stub will replace\n");
+ p = ofd_prop_find(m, n, "mmu");
+ if (p > 0) {
+ ofd_prop_remove(m, n, p);
+ }
+
+ return n;
+}
+
+#ifdef RTAS
+static ofdn_t ofd_rtas_props(void *m)
+{
+ static const char path[] = "/rtas";
+ static const char hypertas[] = "dummy";
+ ofdn_t p;
+ ofdn_t n;
+
+ /* just enough to make linux think its on LPAR */
+
+ p = ofd_node_find(m, "/");
+
+ n = ofd_node_add(m, p, path, sizeof(path));
+ ofd_prop_add(m, n, "name", &path[1], sizeof (path) - 1);
+ ofd_prop_add(m, n, "ibm,hypertas-functions", hypertas, sizeof (hypertas));
+
+ return n;
+}
+#endif
+
+struct mem_reg {
+ u64 addr;
+ u64 sz;
+};
+
+static ofdn_t ofd_memory_chunk_create(void *m, ofdn_t p,
+ const char *ppath,
+ const char *name,
+ const char *dt,
+ ulong start, ulong size)
+{
+ struct mem_reg reg;
+ char path[128];
+ ulong l;
+ u32 v;
+ ofdn_t n;
+ ulong nl = strlen(name) + 1;
+ ulong dtl = strlen(dt) + 1;
+
+ l = snprintf(path, sizeof (path), "%s/%s@%lx", ppath, name, start);
+ n = ofd_node_add(m, p, path, l + 1);
+ ofd_prop_add(m, n, "name", name, nl);
+
+ v = 1;
+ ofd_prop_add(m, n, "#address-cells", &v, sizeof (v));
+ v = 0;
+ ofd_prop_add(m, n, "#size-cells", &v, sizeof (v));
+
+ ofd_prop_add(m, n, "device_type", dt, dtl);
+
+ /* physical addresses usable without regard to OF */
+ reg.addr = start;
+ reg.sz = size;
+ ofd_prop_add(m, n, "reg", &reg, sizeof (reg));
+
+ return n;
+}
+
+static ofdn_t ofd_memory_props(void *m, struct domain *d, ulong eoload)
+{
+ ofdn_t n = -1;
+ ulong start = 0;
+ static char name[] = "memory";
+ ulong mem_size = d->arch.rma_size;
+ ulong chunk_size = d->arch.rma_size;
+
+ /* Remove all old memory props */
+ do {
+ ofdn_t old;
+
+ old = ofd_node_find_by_prop(m, OFD_ROOT, "device_type",
+ name, sizeof(name));
+ if (old <= 0) break;
+
+ ofd_node_prune(m, old);
+ } while (1);
+
+ while (start < mem_size) {
+ ulong size = (mem_size < chunk_size) ? mem_size : chunk_size;
+
+ n = ofd_memory_chunk_create(m, OFD_ROOT, "", "memory", "memory",
+ start, size);
+
+ if (start == 0) {
+ /* We are processing the first and RMA chunk */
+
+ /* free list of physical addresses available after OF and
+ * client program have been accounted for */
+ struct mem_reg avail[] = {
+ /* 0 til OF @ 32MiB - 16KiB stack */
+ { .addr = 0, .sz = ((32 << 20) - (16 << 10)) },
+ /* end of loaded material to the end the chunk - 1 page */
+ { .addr = eoload, .sz = chunk_size - eoload - PAGE_SIZE },
+ /* the last page is reserved for xen_start_info */
+ };
+ ofd_prop_add(m, n, "available", &avail,
+ sizeof (avail));
+ }
+
+ start += size;
+ mem_size -= size;
+ }
+ return n;
+}
+
+static ofdn_t ofd_xen_props(void *m, struct domain *d, start_info_t *si)
+{
+ ofdn_t n;
+ static const char path[] = "/xen";
+ static const char console[] = "/xen/console";
+
+ n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
+ if (n > 0) {
+ char xen[256];
+ int xl;
+ u64 val[2];
+ s32 dom_id;
+
+ dom_id = d->domain_id;
+
+ ofd_prop_add(m, n, "reg", &dom_id, sizeof (dom_id));
+ ofd_prop_add(m, n, "name", &path[1], sizeof (path) - 1);
+
+ xl = snprintf(xen, sizeof (xen), "Xen-%d.%d%s",
+ XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION);
+ ASSERT(xl < sizeof (xen));
+ ofd_prop_add(m, n, "version", xen, xl + 1);
+
+ val[0] = (ulong)si - d->arch.rma_base;
+ val[1] = PAGE_SIZE;
+ ofd_prop_add(m, n, "start-info", val, sizeof (val));
+
+ val[1] = RMA_LAST_DOM0 * PAGE_SIZE;
+ val[0] = d->arch.rma_size - val[1];
+ ofd_prop_add(m, n, "reserved", val, sizeof (val));
+
+ n = ofd_node_add(m, n, console, sizeof (console));
+ if (n > 0) {
+ val[0] = 0;
+ ofd_prop_add(m, n, "interrupts", &val[0], sizeof (val[0]));
+ }
+ }
+ return n;
+}
+extern int ofd_dom0_fixup(
+ struct domain *d, ulong oftree, start_info_t *si, ulong dst);
+int ofd_dom0_fixup(struct domain *d, ulong mem, start_info_t *si, ulong eoload)
+{
+ void *m;
+ const ofdn_t n = OFD_ROOT;
+ ofdn_t r;
+
+ m = (void *)mem;
+
+#ifdef PAPR_VDEVICE
+ printk("Add /vdevice\n");
+ ofd_vdevice(m, d);
+
+ printk("Add /aliases props\n");
+ ofd_aliases_props(m);
+#endif
+
+ printk("Add /openprom props\n");
+ ofd_openprom_props(m);
+
+ printk("Add /options props\n");
+ ofd_options_props(m);
+
+ printk("Add /cpus props\n");
+ ofd_cpus_props(m, d);
+
+ printk("Add /chosen props\n");
+ ofd_chosen_props(m, (char *)si->cmd_line);
+
+ printk("fix /memory@0 props\n");
+ ofd_memory_props(m, d, eoload);
+
+ printk("fix /xen props\n");
+ ofd_xen_props(m, d, si);
+
+ printk("Remove original /dart\n");
+ ofd_prune_path(m, "/dart");
+
+ printk("Remove original /rtas\n");
+ ofd_prune_path(m, "/rtas");
+
+#ifdef RTAS
+ printk("Create a new RTAS with just enough stuff to convince "
+ "Linux that its on LPAR\n");
+ ofd_rtas_props(m);
+#endif
+#ifdef FIX_COMPAT
+ const char compat[] = "Hypervisor,Maple";
+ r = ofd_prop_add(m, n, "compatible", compat, sizeof (compat));
+ ASSERT( r > 0 );
+#endif
+
+ u32 did = d->domain_id;
+ r = ofd_prop_add(m, n, "ibm,partition-no", &did, sizeof(did));
+ ASSERT( r > 0 );
+
+ const char dom0[] = "dom0";
+ r = ofd_prop_add(m, n, "ibm,partition-name", dom0, sizeof (dom0));
+ ASSERT( r > 0 );
+
+
+#ifdef DEBUG
+ ofd_walk(m, OFD_ROOT, ofd_dump_props, OFD_DUMP_ALL);
+#endif
+ return 1;
+}
diff --git a/xen/arch/powerpc/oftree.h b/xen/arch/powerpc/oftree.h
new file mode 100644
index 0000000000..07fc3c0430
--- /dev/null
+++ b/xen/arch/powerpc/oftree.h
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _OFTREE_H
+#define _OFTREE_H
+
+extern ulong oftree;
+extern ulong oftree_len;
+
+extern int ofd_dom0_fixup(
+ struct domain *d, ulong oftree, start_info_t *si, ulong dst);
+
+extern int firmware_image_start[0];
+extern int firmware_image_size[0];
+
+#endif /* #ifndef _OFTREE_H */
diff --git a/xen/arch/powerpc/papr/Makefile b/xen/arch/powerpc/papr/Makefile
new file mode 100644
index 0000000000..8ea5183d3f
--- /dev/null
+++ b/xen/arch/powerpc/papr/Makefile
@@ -0,0 +1,10 @@
+#
+# Build the Open Firmware handler
+#
+
+# PAPR support
+obj-y += debug.o
+obj-y += tce.o
+obj-y += vtce.o
+obj-$(papr_vterm) += vterm.o
+obj-y += xlate.o
diff --git a/xen/arch/powerpc/papr/debug.c b/xen/arch/powerpc/papr/debug.c
new file mode 100644
index 0000000000..cd0eb978d4
--- /dev/null
+++ b/xen/arch/powerpc/papr/debug.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+
+#define DABR_BT (1UL << 2)
+#define DABR_DW (1UL << 1)
+#define DABR_DR (1UL << 0)
+
+#define DABRX_BT (1UL << 3)
+#define DABRX_HYP (1UL << 2)
+#define DABRX_PNH (1UL << 1)
+#define DABRX_PRO (1UL << 0)
+
+static inline int has_dabrx(void) { return 1; }
+
+static void h_set_dabr(struct cpu_user_regs *regs)
+{
+ ulong dabr = regs->gprs[4];
+
+ if (!has_dabrx()) {
+ if (!(dabr & DABR_BT)) {
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+ } else {
+ asm volatile("mtspr %0,%1" : : "I" (SPRN_DABRX), "r" (2) : "memory");
+ }
+ asm volatile("mtspr %0,%1" : : "I" (SPRN_DABR), "r" (dabr) : "memory");
+ regs->gprs[3] = H_Success;
+}
+
+static void h_set_xdabr(struct cpu_user_regs *regs)
+{
+ ulong dabr = regs->gprs[4];
+ ulong dabrx = regs->gprs[5];
+
+ if (!has_dabrx()) {
+ regs->gprs[3] = H_Function;
+ return;
+ }
+ /* make sure reserved bits are 0 */
+ if ((dabrx & ~((DABRX_BT << 1) - 1)) != 0) {
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+ if ((dabrx & DABRX_HYP) || dabrx == 0) {
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+ asm volatile("mtspr %0,%1; mtspr %2,%3"
+ : /* output */ :
+ "I" (SPRN_DABR), "r" (dabr),
+ "I" (SPRN_DABRX), "r" (dabrx) : "memory");
+
+ regs->gprs[3] = H_Success;
+}
+
+__init_papr_hcall(H_SET_DABR, h_set_dabr);
+__init_papr_hcall(H_SET_XDABR, h_set_xdabr);
diff --git a/xen/arch/powerpc/papr/tce.c b/xen/arch/powerpc/papr/tce.c
new file mode 100644
index 0000000000..2da90ba7c9
--- /dev/null
+++ b/xen/arch/powerpc/papr/tce.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#define DEBUG
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+#include "../tce.h"
+#include "../iommu.h"
+
+static void h_put_tce(struct cpu_user_regs *regs)
+{
+ u32 liobn = regs->gprs[4];
+ ulong ioba = regs->gprs[5];
+ u64 tce_dword = regs->gprs[6];
+ union tce tce;
+
+ tce.tce_dword = tce_dword;
+
+ if (iommu_put(liobn, ioba, tce) == -1) {
+ regs->gprs[3] = H_Parameter;
+ } else {
+ regs->gprs[3] = H_Success;
+ }
+}
+
+static void h_get_tce(struct cpu_user_regs *regs)
+{
+ u32 liobn = regs->gprs[4];
+ ulong ioba = regs->gprs[5];
+
+#ifdef DEBUG
+ printk("%s: liobn: 0x%x ioba: 0x%lx \n", __func__, liobn, ioba);
+#endif
+ regs->gprs[3] = H_Function;
+ for(;;) ;
+}
+
+static void h_stuff_tce(struct cpu_user_regs *regs)
+{
+ u32 liobn = regs->gprs[4];
+ ulong ioba = regs->gprs[5];
+ u64 tce_dword = regs->gprs[6];
+ ulong count = regs->gprs[7];
+ union tce tce;
+
+ tce.tce_dword = tce_dword;
+#ifdef DEBUG
+ printk("%s: liobn: 0x%x ioba: 0x%lx tce: 0x%"
+ PRIx64"(0x%"PRIx64") count: %lu\n",
+ __func__, liobn, ioba, tce.tce_dword, (long)tce.tce_bits.tce_rpn,
+ count);
+#endif
+ regs->gprs[3] = H_Function;
+ for(;;);
+}
+
+__init_papr_hcall(H_PUT_TCE, h_put_tce);
+__init_papr_hcall(H_GET_TCE, h_get_tce);
+__init_papr_hcall(H_STUFF_TCE, h_stuff_tce);
diff --git a/xen/arch/powerpc/papr/vtce.c b/xen/arch/powerpc/papr/vtce.c
new file mode 100644
index 0000000000..2d7c3657f3
--- /dev/null
+++ b/xen/arch/powerpc/papr/vtce.c
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#define DEBUG
+
+/* support for creating virual TCE tables for VIO */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+#include "../tce.h"
+
+static inline ulong calc_pages(ulong dma_window_size)
+{
+ ulong pages_in_window = PFN_DOWN(dma_window_size);
+
+ return PFN_DOWN(pages_in_window * sizeof (union tce));
+}
+
+void vtce_ia(struct tce_data *tce_data)
+{
+ ulong size = tce_data->t_entries * sizeof (tce_data->t_tce[0]);
+ memset(tce_data->t_tce, 0, size);
+}
+
+ulong vtce_alloc(
+ struct tce_data *tce_data,
+ ulong base,
+ ulong dma_window_size)
+{
+ ulong entries = PFN_DOWN(dma_window_size);
+ ulong size = calc_pages(dma_window_size) * PAGE_SIZE;
+
+ tce_data->t_tce = alloc_xenheap_pages(get_order(size));
+ if (NULL != tce_data->t_tce) {
+ memset(tce_data->t_tce, 0, size);
+ tce_data->t_entries = entries;
+ tce_data->t_base = base;
+ tce_data->t_alloc_size = size;
+ return dma_window_size;
+ }
+ return 0;
+}
+
+void vtce_free(struct tce_data *tce_data)
+{
+ BUG_ON(NULL != tce_data);
+ BUG_ON(NULL != tce_data->t_tce);
+ free_xenheap_pages(tce_data->t_tce, get_order(tce_data->t_alloc_size));
+ tce_data->t_entries = 0;
+ tce_data->t_base = 0;
+ tce_data->t_alloc_size = 0;
+ tce_data->t_tce = NULL;
+}
+
+int vtce_put(struct tce_data *tce_data, ulong ioba, union tce ltce)
+{
+ int pg;
+ volatile union tce *ptce;
+ union tce *tce;
+ int entries;
+
+ BUG_ON(tce_data != NULL);
+
+ tce = tce_data->t_tce;
+ entries = tce_data->t_entries;
+
+ pg = ioba >> PAGE_SHIFT;
+ BUG_ON(pg < entries);
+ if (pg >= entries) {
+ return H_Parameter;
+ }
+ ptce = &tce[pg];
+
+ /* needs to occur atomically, we don;t care what was there before */
+
+ ptce->tce_dword = ltce.tce_dword;
+
+ return H_Success;
+}
+
+void *vtce_bd_xlate(struct tce_data *tce_data, union tce_bdesc bd)
+{
+ ulong pg;
+ ulong s = bd.lbd_bits.lbd_addr;
+ ulong sz = bd.lbd_bits.lbd_len;
+ ulong ep;
+ ulong bytes;
+ union tce *tce;
+ ulong entries;
+
+ BUG_ON(tce_data != NULL);
+
+ tce = tce_data->t_tce;
+ entries = tce_data->t_entries;
+
+ pg = s >> PAGE_SHIFT;
+ bytes = s - ALIGN_DOWN(s, PAGE_SIZE);
+
+ ep = ALIGN_UP(s + sz, PAGE_SIZE) >> PAGE_SHIFT;
+ s = ALIGN_DOWN(s, PAGE_SIZE) >> PAGE_SHIFT;
+
+ /* make sure all consecutive pages are represented */
+ while (s < ep) {
+ ulong rw;
+
+ if (s >= entries) {
+ return NULL;
+ }
+ rw = tce[s].tce_bits.tce_read < 1;
+ rw |= tce[s].tce_bits.tce_write;
+
+ switch (rw) {
+ case 0:
+ return NULL;
+ break;
+
+#ifdef DEBUG
+ case 1:
+ printk("%s: tce WO\n", __func__);
+ break;
+ case 2:
+ printk("%s: tce RO\n", __func__);
+ break;
+#endif
+ case 3:
+ default:
+ break;
+ }
+ ++s;
+ }
+
+ pg = (tce[pg].tce_bits.tce_rpn << PAGE_SHIFT) + bytes;
+ return (void *)pg;
+}
diff --git a/xen/arch/powerpc/papr/vterm.c b/xen/arch/powerpc/papr/vterm.c
new file mode 100644
index 0000000000..eb6f67ff0a
--- /dev/null
+++ b/xen/arch/powerpc/papr/vterm.c
@@ -0,0 +1,70 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+
+static void h_put_term_char(struct cpu_user_regs *regs)
+{
+ char data[(sizeof (u64) * 4) + 1];
+ ulong count;
+ extern void serial_puts(int handle, const char *s);
+
+ /* XXX what to do with 'channel' in r4? */
+
+ count = regs->gprs[5];
+ if (count > 16) {
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+
+ memcpy(data, &regs->gprs[6], count);
+ data[count] = '\0';
+
+ serial_puts(0, data);
+ regs->gprs[3] = H_Success;
+}
+
+static void h_get_term_char(struct cpu_user_regs *regs)
+{
+ /* temporary hack to let us use xmon in dom0 */
+ extern char serial_getc_nb(int handle);
+ char c;
+
+ c = serial_getc_nb(0);
+ if (c > 0) {
+ regs->gprs[4] = 1;
+ regs->gprs[5] = (ulong)c << (7 * 8);
+ regs->gprs[6] = 0; /* paranoid */
+ } else {
+ regs->gprs[4] = 0;
+ }
+
+ regs->gprs[3] = H_Success;
+}
+
+__init_papr_hcall(H_PUT_TERM_CHAR, h_put_term_char);
+__init_papr_hcall(H_GET_TERM_CHAR, h_get_term_char);
diff --git a/xen/arch/powerpc/papr/xlate.c b/xen/arch/powerpc/papr/xlate.c
new file mode 100644
index 0000000000..fe5be75e18
--- /dev/null
+++ b/xen/arch/powerpc/papr/xlate.c
@@ -0,0 +1,499 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#undef DEBUG
+#undef DEBUG_FAIL
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/init.h>
+#include <public/xen.h>
+#include <asm/current.h>
+#include <asm/papr.h>
+#include <asm/hcalls.h>
+
+static void not_yet(struct cpu_user_regs *regs)
+{
+ printk("not implemented yet: 0x%lx\n", regs->gprs[3]);
+ for (;;);
+}
+
+#ifdef USE_PTE_INSERT
+static inline void pte_insert(union pte volatile *pte,
+ ulong vsid, ulong rpn, ulong lrpn)
+{
+ /*
+ * It's required that external locking be done to provide
+ * exclusion between the choices of insertion points. Any valid
+ * choice of pte requires that the pte be invalid upon entry to
+ * this function.
+ */
+
+ ASSERT( (pte->bits.v == 0) );
+
+ /* Set shadow word. */
+ (void)lrpn;
+
+ /* Set the second word first so the valid bit is the last thing set */
+ pte->words.rpn = rpn;
+
+ /* Guarantee the second word is visible before the valid bit */
+ __asm__ __volatile__("eieio" : : : "memory");
+
+ /* Now set the first word including the valid bit */
+ pte->words.vsid = vsid;
+ /* Architecturally this instruction will cause a heavier operation
+ * if this one is not supported. note: on come machines like Cell
+ * this coul dbe a nop */
+ __asm__ __volatile__("ptesync" : : : "memory");
+}
+#endif
+
+static void pte_tlbie(union pte volatile *pte, ulong ptex)
+{
+ ulong va;
+ ulong vsid;
+ ulong group;
+ ulong pi;
+ ulong pi_high;
+
+ vsid = pte->bits.avpn >> 5;
+ group = ptex >> 3;
+ if (pte->bits.h) {
+ group = ~group;
+ }
+ pi = (vsid ^ group) & 0x7ff;
+ pi_high = (pte->bits.avpn & 0x1f) << 11;
+ pi |= pi_high;
+ va = (pi << 12) | (vsid << 28);
+ va &= ~(0xffffULL << 48);
+
+#ifndef FLUSH_THE_WHOLE_THING
+ if (pte->bits.l) {
+ va |= (pte->bits.rpn & 1);
+ asm volatile("ptesync ;tlbie %0,1" : : "r"(va) : "memory");
+ } else {
+ asm volatile("ptesync; tlbie %0,0" : : "r"(va) : "memory");
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+#else
+ {
+ unsigned i;
+ ulong rb;
+
+ for (i = 0; i < 256; i++) {
+ rb = i;
+ rb <<= 12;
+ asm volatile("ptesync; tlbie %0,0; eieio; tlbsync; ptesync; isync"
+ : "=r" (rb): : "memory");
+ asm volatile("ptesync; tlbie %0,1; eieio; tlbsync; ptesync; isync"
+ : "=r" (rb): : "memory");
+ }
+ }
+#endif
+
+}
+
+static void h_enter(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+
+ union pte pte;
+ union pte volatile *ppte;
+ struct domain_htab *htab;
+ int lp_bits = 0;
+ int pgshift = PAGE_SHIFT;
+ ulong idx;
+ int limit = 0; /* how many PTEs to examine in the PTEG */
+ ulong lpn;
+ ulong rpn;
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ int mtype;
+
+ htab = &d->arch.htab;
+ if (ptex > (1UL << htab->log_num_ptes)) {
+ regs->gprs[3] = H_Parameter;
+ printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
+ return;
+ }
+
+ /* use local HPTE to avoid manual shifting & masking */
+ pte.words.vsid = regs->gprs[6];
+ pte.words.rpn = regs->gprs[7];
+
+ if ( pte.bits.l ) { /* large page? */
+ /* figure out the page size for the selected large page */
+ ulong lp_rpn = pte.bits.rpn;
+ uint lp_size = 0;
+
+ while ( lp_rpn & 0x1 ) {
+ lp_rpn >>= 1;
+ lp_bits = ((lp_bits << 1) | 0x1);
+ lp_size++;
+ }
+
+ if ( lp_size >= d->arch.large_page_sizes ) {
+ printk("%s: attempt to use unsupported lp_size %d\n",
+ __func__, lp_size);
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+
+ /* get correct pgshift value */
+ pgshift = d->arch.large_page_shift[lp_size];
+ }
+
+ /* get the correct logical RPN in terms of 4K pages need to mask
+ * off lp bits and unused arpn bits if this is a large page */
+
+ lpn = ~0ULL << (pgshift - 12);
+ lpn = pte.bits.rpn & lpn;
+
+ rpn = pfn2mfn(d, lpn, &mtype);
+
+ if (mtype == PFN_TYPE_IO) {
+ /* only a privilaged dom can access outside IO space */
+ if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
+ regs->gprs[3] = H_Privilege;
+ printk("%s: unprivileged access to logical page: 0x%lx\n",
+ __func__, lpn);
+ return;
+ }
+
+ if ( !((pte.bits.w == 0)
+ && (pte.bits.i == 1)
+ && (pte.bits.g == 1)) ) {
+#ifdef DEBUG_FAIL
+ printk("%s: expecting an IO WIMG "
+ "w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__,
+ pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g,
+ pte.words.rpn);
+#endif
+ regs->gprs[3] = H_Parameter;
+ return;
+ }
+ }
+ /* fixup the RPN field of our local PTE copy */
+ pte.bits.rpn = rpn | lp_bits;
+
+ /* clear reserved bits in high word */
+ pte.bits.lock = 0x0;
+ pte.bits.res = 0x0;
+
+ /* clear reserved bits in low word */
+ pte.bits.pp0 = 0x0;
+ pte.bits.ts = 0x0;
+ pte.bits.res2 = 0x0;
+
+ if ( !(flags & H_EXACT) ) {
+ /* PTEG (not specific PTE); clear 3 lowest bits */
+ ptex &= ~0x7UL;
+ limit = 7;
+ }
+
+ /* data manipulations should be done prior to the pte insertion. */
+ if ( flags & H_ZERO_PAGE ) {
+ memset((void *)(rpn << PAGE_SHIFT), 0, 1UL << pgshift);
+ }
+
+ if ( flags & H_ICACHE_INVALIDATE ) {
+ ulong k;
+ ulong addr = rpn << PAGE_SHIFT;
+
+ for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
+ dcbst(addr + k);
+ sync();
+ icbi(addr + k);
+ sync();
+ isync();
+ }
+ }
+
+ if ( flags & H_ICACHE_SYNCHRONIZE ) {
+ ulong k;
+ ulong addr = rpn << PAGE_SHIFT;
+ for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
+ icbi(addr + k);
+ sync();
+ isync();
+ }
+ }
+
+ for (idx = ptex; idx <= ptex + limit; idx++) {
+ ppte = &htab->map[idx];
+
+ if ( ppte->bits.v == 0 && ppte->bits.lock == 0) {
+ /* got it */
+
+ asm volatile(
+ "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
+ :
+ : "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid)
+ : "memory");
+
+ regs->gprs[3] = H_Success;
+ regs->gprs[4] = idx;
+
+ return;
+ }
+ }
+
+ /* If the PTEG is full then no additional values are returned. */
+ printk("%s: PTEG FULL\n", __func__);
+
+ regs->gprs[3] = H_PTEG_Full;
+}
+
+static void h_protect(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ ulong avpn = regs->gprs[6];
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ struct domain_htab *htab = &d->arch.htab;
+ union pte volatile *ppte;
+ union pte lpte;
+
+#ifdef DEBUG
+ printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
+ flags, ptex, avpn);
+#endif
+ if ( ptex > (1UL << htab->log_num_ptes) ) {
+ regs->gprs[3] = H_Parameter;
+ printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
+ return;
+ }
+ ppte = &htab->map[ptex];
+
+ lpte.words.vsid = ppte->words.vsid;
+ lpte.words.rpn = ppte->words.rpn;
+
+ /* the AVPN param occupies the bit-space of the word */
+ if ( (flags & H_AVPN) && lpte.bits.avpn != avpn >> 7 ) {
+#ifdef DEBUG_FAIL
+ printk("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
+ ppte, lpte.words.vsid, lpte.words.rpn);
+#endif
+ regs->gprs[3] = H_Not_Found;
+ return;
+ }
+
+ if (lpte.bits.v == 0) {
+ /* the PAPR does not specify what to do here, this is because
+ * we invalidate entires where the PAPR says to 0 the whole hi
+ * dword, so the AVPN should catch this first */
+
+#ifdef DEBUG_FAIL
+ printk("%s: pte invalid\n", __func__);
+#endif
+ regs->gprs[3] = H_Not_Found;
+ return;
+ }
+
+ lpte.bits.v = 0;
+
+ /* ppte->words.vsid = lpte.words.vsid; */
+ asm volatile(
+ "eieio; std %1, 0(%0); ptesync"
+ :
+ : "b" (ppte), "r" (0)
+ : "memory");
+
+ pte_tlbie(&lpte, ptex);
+
+ /* We never touch pp0, and PP bits in flags are in the right
+ * order */
+ lpte.bits.pp1 = flags & (H_PP1 | H_PP2);
+ lpte.bits.n = (flags & H_N) ? 1 : 0;
+
+ lpte.bits.v = 1;
+ lpte.bits.r = 0;
+
+ asm volatile(
+ "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
+ :
+ : "b" (ppte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
+ : "memory");
+
+ regs->gprs[3] = H_Success;
+}
+
+static void h_clear_ref(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ struct domain_htab *htab = &d->arch.htab;
+ union pte volatile *pte;
+ union pte lpte;
+
+#ifdef DEBUG
+ printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
+ flags, ptex);
+#endif
+
+ if (flags != 0) {
+ printk("WARNING: %s: "
+ "flags are undefined and should be 0: 0x%lx\n",
+ __func__, flags);
+ }
+
+ if (ptex > (1UL << htab->log_num_ptes)) {
+ regs->gprs[3] = H_Parameter;
+ printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
+ return;
+ }
+ pte = &htab->map[ptex];
+ lpte.words.rpn = pte->words.rpn;
+
+ regs->gprs[4] = lpte.words.rpn;
+
+ if (lpte.bits.r != 0) {
+ lpte.bits.r = 0;
+
+ asm volatile("std %1, 8(%0); eieio; ptesync"
+ :
+ : "b" (pte), "r" (lpte.words.rpn) : "memory");
+
+ pte_tlbie(&lpte, ptex);
+ }
+ regs->gprs[3] = H_Success;
+}
+
+static void h_clear_mod(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ struct domain_htab *htab = &d->arch.htab;
+ union pte volatile *pte;
+ union pte lpte;
+
+#ifdef DEBUG
+ printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
+ flags, ptex);
+#endif
+ if (flags != 0) {
+ printk("WARNING: %s: "
+ "flags are undefined and should be 0: 0x%lx\n",
+ __func__, flags);
+ }
+
+ if (ptex > (1UL << htab->log_num_ptes)) {
+ regs->gprs[3] = H_Parameter;
+ printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
+ return;
+ }
+ pte = &htab->map[ptex];
+ lpte.words.vsid = pte->words.vsid;
+ lpte.words.rpn = pte->words.rpn;
+
+ regs->gprs[3] = H_Success;
+ regs->gprs[4] = lpte.words.rpn;
+
+ if (lpte.bits.c != 0) {
+ /* invalidate */
+ asm volatile(
+ "eieio; std %1, 0(%0); ptesync"
+ :
+ : "b" (pte), "r" (0)
+ : "memory");
+
+ pte_tlbie(&lpte, ptex);
+
+ lpte.bits.c = 0;
+ asm volatile(
+ "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
+ :
+ : "b" (pte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
+ : "memory");
+ }
+}
+
+static void h_remove(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ ulong avpn = regs->gprs[6];
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ struct domain_htab *htab = &d->arch.htab;
+ union pte volatile *pte;
+ union pte lpte;
+
+#ifdef DEBUG
+ printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
+ flags, ptex, avpn);
+#endif
+ if ( ptex > (1UL << htab->log_num_ptes) ) {
+ regs->gprs[3] = H_Parameter;
+ printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
+ return;
+ }
+ pte = &htab->map[ptex];
+ lpte.words.vsid = pte->words.vsid;
+ lpte.words.rpn = pte->words.rpn;
+
+ if ((flags & H_AVPN) && lpte.bits.avpn != (avpn >> 7)) {
+#ifdef DEBUG_FAIL
+ printk("%s: avpn doesn not match\n", __func__);
+#endif
+ regs->gprs[3] = H_Not_Found;
+ return;
+ }
+
+ if ((flags & H_ANDCOND) && ((avpn & pte->words.vsid) != 0)) {
+#ifdef DEBUG_FAIL
+ printk("%s: andcond does not match\n", __func__);
+#endif
+ regs->gprs[3] = H_Not_Found;
+ return;
+ }
+
+ regs->gprs[3] = H_Success;
+ /* return old PTE in regs 4 and 5 */
+ regs->gprs[4] = lpte.words.vsid;
+ regs->gprs[5] = lpte.words.rpn;
+
+ /* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
+ /* XXX - I think the spec should be questioned in this case (MFM) */
+ if (pte->bits.v == 0) {
+ printk("%s: removing invalid entry\n", __func__);
+ }
+ asm volatile("eieio; std %1, 0(%0); ptesync"
+ :
+ : "b" (pte), "r" (0)
+ : "memory");
+
+ pte_tlbie(&lpte, ptex);
+}
+
+__init_papr_hcall(H_ENTER, h_enter);
+__init_papr_hcall(H_READ, not_yet);
+__init_papr_hcall(H_REMOVE, h_remove);
+__init_papr_hcall(H_CLEAR_MOD, h_clear_mod);
+__init_papr_hcall(H_CLEAR_REF, h_clear_ref);
+__init_papr_hcall(H_PROTECT, h_protect);
diff --git a/xen/arch/powerpc/physdev.c b/xen/arch/powerpc/physdev.c
new file mode 100644
index 0000000000..b7dd98b827
--- /dev/null
+++ b/xen/arch/powerpc/physdev.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/types.h>
+#include <public/xen.h>
+extern long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
+#include "../x86/physdev.c"
diff --git a/xen/arch/powerpc/powerpc64/Makefile b/xen/arch/powerpc/powerpc64/Makefile
new file mode 100644
index 0000000000..a659410db8
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/Makefile
@@ -0,0 +1,11 @@
+io.o: CFLAGS += -Wa,-mppc64bridge
+
+obj-y += domain.o
+obj-y += exceptions.o
+obj-y += hypercall_table.o
+obj-y += io.o
+obj-y += memcpy.o
+obj-y += ppc970.o
+obj-y += prom_call.o
+obj-y += string.o
+obj-y += traps.o
diff --git a/xen/arch/powerpc/powerpc64/asm-offsets.c b/xen/arch/powerpc/powerpc64/asm-offsets.c
new file mode 100644
index 0000000000..60fa4c631c
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/asm-offsets.c
@@ -0,0 +1,65 @@
+/* from xen/arch/x86/x86_32/asm-offsets.c */
+
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <xen/sched.h>
+#include <public/xen.h>
+#include <asm/powerpc64/procarea.h>
+#include <asm/hardirq.h>
+
+#define DEFINE(_sym, _val) \
+ __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
+#define BLANK() \
+ __asm__ __volatile__ ( "\n->" : : )
+#define OFFSET(_sym, _str, _mem) \
+ DEFINE(_sym, offsetof(_str, _mem));
+
+/* base-2 logarithm */
+#define __L2(_x) (((_x) & 0x00000002) ? 1 : 0)
+#define __L4(_x) (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
+#define __L8(_x) (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
+#define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
+#define LOG_2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
+
+extern void __dummy__(void);
+void __dummy__(void)
+{
+ DEFINE(GPR_WIDTH, sizeof(unsigned long));
+ DEFINE(FPR_WIDTH, sizeof(double));
+
+ OFFSET(PAREA_vcpu, struct processor_area, cur_vcpu);
+ OFFSET(PAREA_stack, struct processor_area, hyp_stack_base);
+ OFFSET(PAREA_r1, struct processor_area, saved_regs[0]);
+ OFFSET(PAREA_cr, struct processor_area, saved_regs[1]);
+
+ OFFSET(UREGS_gprs, struct cpu_user_regs, gprs);
+ OFFSET(UREGS_r0, struct cpu_user_regs, gprs[0]);
+ OFFSET(UREGS_r1, struct cpu_user_regs, gprs[1]);
+ OFFSET(UREGS_r13, struct cpu_user_regs, gprs[13]);
+ OFFSET(UREGS_srr0, struct cpu_user_regs, srr0);
+ OFFSET(UREGS_srr1, struct cpu_user_regs, srr1);
+ OFFSET(UREGS_pc, struct cpu_user_regs, pc);
+ OFFSET(UREGS_msr, struct cpu_user_regs, msr);
+ OFFSET(UREGS_lr, struct cpu_user_regs, lr);
+ OFFSET(UREGS_ctr, struct cpu_user_regs, ctr);
+ OFFSET(UREGS_xer, struct cpu_user_regs, xer);
+ OFFSET(UREGS_hid4, struct cpu_user_regs, hid4);
+ OFFSET(UREGS_cr, struct cpu_user_regs, cr);
+ OFFSET(UREGS_fpscr, struct cpu_user_regs, fpscr);
+ DEFINE(UREGS_sizeof, sizeof(struct cpu_user_regs));
+
+ OFFSET(VCPU_fprs, struct vcpu, arch.fprs);
+ OFFSET(VCPU_fpscr, struct vcpu, arch.ctxt.fpscr);
+ OFFSET(VCPU_vrs, struct vcpu, arch.vrs);
+ OFFSET(VCPU_vscr, struct vcpu, arch.vscr);
+ OFFSET(VCPU_vrsave, struct vcpu, arch.vrsave);
+ OFFSET(VCPU_dec, struct vcpu, arch.dec);
+ OFFSET(VCPU_processor, struct vcpu, processor);
+
+ DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
+ OFFSET(IRQSTAT_pending, irq_cpustat_t, __softirq_pending);
+}
diff --git a/xen/arch/powerpc/powerpc64/domain.c b/xen/arch/powerpc/powerpc64/domain.c
new file mode 100644
index 0000000000..4b205f969e
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/domain.c
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <xen/mm.h>
+#include <asm/current.h>
+
+void save_sprs(struct vcpu *v)
+{
+ v->arch.timebase = mftb();
+
+ v->arch.sprg[0] = mfsprg0();
+ v->arch.sprg[1] = mfsprg1();
+ v->arch.sprg[2] = mfsprg2();
+ v->arch.sprg[3] = mfsprg3();
+
+ v->arch.dar = mfdar();
+ v->arch.dsisr = mfdsisr();
+
+ save_cpu_sprs(v);
+}
+
+void load_sprs(struct vcpu *v)
+{
+ ulong timebase_delta;
+
+ mtsprg0(v->arch.sprg[0]);
+ mtsprg1(v->arch.sprg[1]);
+ mtsprg2(v->arch.sprg[2]);
+ mtsprg3(v->arch.sprg[3]);
+ mtdar(v->arch.dar);
+ mtdsisr(v->arch.dsisr);
+
+ load_cpu_sprs(v);
+
+ /* adjust the DEC value to account for cycles while not
+ * running this OS */
+ timebase_delta = mftb() - v->arch.timebase;
+ v->arch.dec -= timebase_delta;
+}
+
+/* XXX evaluate all isyncs in segment code */
+
+static void flush_slb(struct vcpu *v)
+{
+ struct slb_entry *slb0 = &v->arch.slb_entries[0];
+
+ slbia();
+
+ /* we manually have to invalidate SLB[0] since slbia doesn't. */
+ /* XXX name magic constants! */
+ if (slb0->slb_esid & (1 << (63 - 36))) {
+ ulong rb;
+ ulong class;
+
+ class = (slb0->slb_vsid >> (63 - 56)) & 1ULL;
+ rb = slb0->slb_esid & (~0ULL << (63 - 35));
+ rb |= class << (63 - 36);
+
+ slbie(rb);
+ }
+}
+
+void save_segments(struct vcpu *v)
+{
+ struct slb_entry *slb_entry = v->arch.slb_entries;
+ int i;
+
+ /* save all extra SLBs */
+ for (i = 0; i < NUM_SLB_ENTRIES; i++) {
+ ulong vsid;
+ ulong esid;
+
+ __asm__ __volatile__(
+ "slbmfev %0,%2\n"
+ "slbmfee %1,%2\n"
+ :"=&r"(vsid), "=&r"(esid)
+ :"r"(i)
+ :"memory");
+
+ /* FIXME: should we bother to save invalid entries? */
+ slb_entry[i].slb_vsid = vsid;
+ slb_entry[i].slb_esid = esid;
+#ifdef SLB_DEBUG
+ if (vsid != 0) {
+ printf("%s: DOM[0x%x]: S%02d: 0x%016lx 0x%016lx\n",
+ __func__, v->domain->domain_id, i, vsid, esid);
+ }
+#endif
+ }
+
+ flush_slb(v);
+}
+
+void load_segments(struct vcpu *v)
+{
+ struct slb_entry *slb_entry = v->arch.slb_entries;
+ int i;
+
+ /* restore all extra SLBs */
+ for (i = 0; i < NUM_SLB_ENTRIES; i++) {
+ ulong vsid = slb_entry[i].slb_vsid;
+ ulong esid = slb_entry[i].slb_esid;
+
+ /* FIXME: should we bother to restore invalid entries */
+ /* stuff in the index here */
+ esid |= i & ((0x1UL << (63 - 52 + 1)) - 1);
+
+ __asm__ __volatile__(
+ "isync\n"
+ "slbmte %0,%1\n"
+ "isync\n"
+ :
+ :"r" (vsid), "r"(esid)
+ :"memory");
+
+#ifdef SLB_DEBUG
+ if (vsid != 0) {
+ printf("%s: DOM[0x%x]: R%02d: 0x%016lx 0x%016lx\n",
+ __func__, v->domain->domain_id, i, vsid, esid);
+ }
+#endif
+ }
+}
diff --git a/xen/arch/powerpc/powerpc64/exceptions.S b/xen/arch/powerpc/powerpc64/exceptions.S
new file mode 100644
index 0000000000..94181a1a24
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/exceptions.S
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <asm/config.h>
+#include <asm/asm-offsets.h>
+#include <asm/reg_defs.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+.macro SAVE_GPR regno uregs
+std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
+.endm
+
+.macro SAVE_GPRS from to uregs
+.ifge \to-\from
+SAVE_GPR \from, \uregs
+SAVE_GPRS "(\from+1)", \to, \uregs
+.endif
+.endm
+
+.macro LOAD_GPR regno uregs
+ld \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
+.endm
+
+.macro LOAD_GPRS from to uregs
+.ifge \to-\from
+LOAD_GPR \from, \uregs
+LOAD_GPRS "(\from+1)", \to, \uregs
+.endif
+.endm
+
+.macro GET_STACK parea srr1
+ /* get processor area pointer and save off a couple registers there */
+ mtspr SPRN_HSPRG1, \parea
+ mfspr \parea, SPRN_HSPRG0
+ std r1, PAREA_r1(\parea)
+ mfcr r1
+ std r1, PAREA_cr(\parea)
+ mfspr r1, \srr1
+ rldicl. r1, r1, 4, 63 /* test (H)SRR1:HV */
+ /* assume we interrupted the guest, in which case we start at top of this
+ * processsor's hypervisor stack (as found in parea). */
+ ld r1, PAREA_stack(\parea)
+ beq 1f
+ /* nope, we interrupted the hypervisor. continue on that stack. */
+ ld r1, PAREA_r1(\parea)
+1:
+.endm
+
+/* SAVE_C_STATE: set up enough state to jump to C code
+ * r14-r31 are non-volatile in the C ABI, so not saved here
+ */
+.macro SAVE_C_STATE uregs
+ SAVE_GPRS r2, r12, \uregs /* save r2-r12 */
+
+ mflr r0
+ std r0, UREGS_lr(\uregs) /* save LR */
+ mfxer r0
+ std r0, UREGS_xer(\uregs) /* save XER */
+.endm
+
+.macro LOAD_C_STATE uregs
+ ld r0, UREGS_lr(\uregs) /* load LR */
+ mtlr r0
+ ld r0, UREGS_xer(\uregs) /* load XER */
+ mtxer r0
+ lwz r0, UREGS_cr(\uregs) /* load CR */
+ mtcr r0
+
+ LOAD_GPRS r2, r12, \uregs /* load r2-r12 */
+.endm
+
+.macro LOADADDR reg symbol
+ lis \reg,\symbol@highest
+ ori \reg,\reg,\symbol@higher
+ rldicr \reg,\reg,32,31
+ oris \reg,\reg,\symbol@h
+ ori \reg,\reg,\symbol@l
+.endm
+
+.macro CALL_CFUNC reg
+ ld r2, 8(\reg) /* load function's TOC value */
+ ld \reg, 0(\reg)
+ mtctr \reg
+ bctrl
+ nop
+.endm
+
+.macro EXCEPTION_HEAD parea continue
+ /* make room for cpu_user_regs */
+ subi r1, r1, STACK_VOLATILE_AREA + UREGS_sizeof
+
+ /* get all we need from the processor_area */
+ std r0, UREGS_r0(r1) /* get scratch register */
+ ld r0, PAREA_r1(\parea)
+ std r0, UREGS_r1(r1) /* save R1 */
+ ld r0, PAREA_cr(\parea)
+ stw r0, UREGS_cr(r1) /* save CR */
+ mfspr r0, SPRN_HSPRG1
+ std r0, UREGS_r13(r1) /* save R13 from HSPRG1 */
+
+ /* done with processor_area; re-enable MSR:RI */
+ mfmsr r0
+ ori r0, r0, MSR_RI@l
+ mtmsrd r0
+
+ /* save CTR and use it to jump */
+ mfctr r0
+ std r0, UREGS_ctr(r1)
+ LOADADDR r0, \continue
+ mtctr r0
+.endm
+
+/* For normal exceptions. */
+.macro EXCEPTION_SAVE_STATE uregs
+ SAVE_C_STATE \uregs
+
+ /* save DEC */
+ mfdec r0
+ ld r3, PAREA_vcpu(r13)
+ stw r0, VCPU_dec(r3)
+
+ /* save PC, MSR */
+ mfspr r0, SPRN_SRR0
+ std r0, UREGS_pc(\uregs)
+ mfspr r0, SPRN_SRR1
+ std r0, UREGS_msr(\uregs)
+ li r0, -1 /* we clobbered the OS's SRR0/SRR1 to get here. */
+ std r0, UREGS_srr0(\uregs)
+ std r0, UREGS_srr1(\uregs)
+.endm
+
+/* For exceptions that use HSRR0/1 (preserving the OS's SRR0/1). */
+.macro H_EXCEPTION_SAVE_STATE uregs
+ SAVE_C_STATE \uregs
+
+ /* save DEC */
+ mfdec r0
+ ld r3, PAREA_vcpu(r13)
+ stw r0, VCPU_dec(r3)
+
+ /* save PC, MSR */
+ mfspr r0, SPRN_HSRR0
+ std r0, UREGS_pc(\uregs)
+ mfspr r0, SPRN_HSRR1
+ std r0, UREGS_msr(\uregs)
+ mfspr r0, SPRN_SRR0
+ std r0, UREGS_srr0(\uregs)
+ mfspr r0, SPRN_SRR1
+ std r0, UREGS_srr1(\uregs)
+.endm
+
+/* Hypervisor exception handling code; copied to physical address zero. */
+ .align 3
+ .globl exception_vectors
+exception_vectors:
+
+ . = 0x0 # wild branch to 0
+zero:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x0 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x100 # System Reset
+ex_reset:
+ /* XXX thread initialization */
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x100 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x200 # Machine Check
+ex_machcheck:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x200 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x300
+ex_dsi:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x300 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x380
+ex_data_slb:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x380 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x400
+ex_isi:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x400 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x480
+ex_inst_slb:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x480 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x500
+ex_external:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_external_continued
+ bctr
+
+ . = 0x600
+ex_alignment:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x600 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x700
+ex_program:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x700 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x800
+ex_float:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0x800 /* exception vector for GDB stub */
+ bctr
+
+ . = 0x900
+ex_dec:
+ /* delivered to hypervisor when MSR:EE is set... */
+#ifdef SLOW_TRAP
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_dec_continued
+ bctr
+#else
+ /* XXX for now just reset DEC and return */
+ mtspr SPRN_HSPRG1, r3
+ lis r3, 0x7fff
+ mtdec r3
+ mfspr r3, SPRN_HSPRG1
+ rfid
+#endif
+
+ . = 0x980
+ex_hdec:
+ GET_STACK r13 SPRN_HSRR1
+ EXCEPTION_HEAD r13 ex_hdec_continued
+ bctr
+
+ . = 0xc00
+ex_syscall:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_hcall_continued
+ bctr
+
+ . = 0xd00
+ex_trace:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0xd00 /* exception vector for GDB stub */
+ bctr
+
+ . = 0xe00
+ex_fp:
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_program_continued
+ li r0, 0xe00 /* exception vector for GDB stub */
+ bctr
+
+ .align 3
+ .globl exception_vectors_end
+
+exception_vectors_end:
+ /* put some stuff here so we see the next symbol */
+ .long 0xdeadbeef
+ .long 0xdeadbeef
+
+.macro FAST_RESUME
+ LOAD_C_STATE r1 /* restore most C volatiles */
+
+ ld r0, UREGS_ctr(r1)
+ mtctr r0
+
+ /* clear MSR:RI/EE to set SRR0/SRR1 */
+ li r0, 0
+ mtmsrd r0, 1
+
+ ld r0, UREGS_pc(r1)
+ mtspr SPRN_HSRR0, r0
+ ld r0, UREGS_msr(r1)
+ mtspr SPRN_HSRR1, r0
+
+ ld r0, UREGS_srr0(r1)
+ mtspr SPRN_SRR0, r0
+ ld r0, UREGS_srr1(r1)
+ mtspr SPRN_SRR1, r0
+
+ ld r13, UREGS_r13(r1)
+ ld r0, UREGS_r0(r1)
+ ld r1, UREGS_r1(r1)
+ HRFID
+ b . /* prevent speculative icache fetch */
+.endm
+
+/* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro
+ * clobbers r0 though, so we have to move it around a little bit. Not ideal,
+ * but hopefully program exception is not performance-critical... Maybe there's
+ * a better way, but this works for now. */
+ex_program_continued:
+ SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */
+ /* save hid4 for debug */
+ mfspr r14, SPRN_HID4
+ std r14, UREGS_hid4(r1)
+ mr r14, r0
+ EXCEPTION_SAVE_STATE r1
+ mr r4, r14
+ LOADADDR r12, program_exception
+ mr r3, r1 /* pass pointer to cpu_user_regs */
+ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
+ CALL_CFUNC r12
+
+ /* reload state and rfid */
+ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
+ LOAD_GPRS r14, r31, r1
+ FAST_RESUME
+
+ex_external_continued:
+ EXCEPTION_SAVE_STATE r1
+ LOADADDR r12, do_external
+ mr r3, r1 /* pass pointer to cpu_user_regs */
+ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
+ CALL_CFUNC r12
+
+ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
+ b fast_resume
+
+ex_hcall_continued:
+ /* We have to save the non-volatiles here in case of a block hcall (which
+ * will end up in context_switch()). */
+ SAVE_GPRS r14, r31, r1
+ EXCEPTION_SAVE_STATE r1
+ LOADADDR r12, do_hcall
+ mr r3, r1 /* pass pointer to cpu_user_regs */
+ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
+ CALL_CFUNC r12 /* call hcall handler */
+
+ /* test for pending softirqs, and loop until there are no more. */
+ mfmsr r14
+ ori r14, r14, MSR_EE
+ xori r15, r14, MSR_EE
+hcall_test_all_events:
+ mtmsrd r15, 1 /* disable interrupts */
+ ld r3, PAREA_vcpu(r13)
+ lwz r3, VCPU_processor(r3)
+ LOADADDR r4, irq_stat
+ sldi r3, r3, IRQSTAT_shift
+ add r4, r3, r4
+ ld r5, IRQSTAT_pending(r4)
+ cmpldi r5, 0
+ beq hcall_out /* no more softirqs; exit loop */
+
+ LOADADDR r6, do_softirq
+ mtmsrd r14, 1 /* enable interrupts */
+ CALL_CFUNC r6 /* process softirqs */
+ b hcall_test_all_events /* look for more */
+
+hcall_out:
+ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
+ LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */
+ b fast_resume
+
+
+ex_dec_continued:
+ EXCEPTION_SAVE_STATE r1
+ LOADADDR r12, do_dec
+ mr r3, r1 /* pass pointer to cpu_user_regs */
+ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
+ CALL_CFUNC r12
+
+ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
+ b fast_resume
+
+ex_hdec_continued:
+ /* When we get an HDEC, we (almost?) always context_switch, so we need to
+ * save the nonvolatiles. */
+ SAVE_GPRS r14, r31, r1
+ H_EXCEPTION_SAVE_STATE r1
+
+ LOADADDR r12, do_timer
+ mr r3, r1
+ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */
+ CALL_CFUNC r12
+
+ /* if we are resuming into hypervisor, don't handle softirqs */
+ ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1)
+ rldicl. r11, r10, 4, 63 /* test SRR1:HV */
+ bne hdec_out
+
+ /* test for pending softirqs, and loop until there are no more. */
+ mfmsr r14
+ ori r14, r14, MSR_EE
+ xori r15, r14, MSR_EE
+test_all_events:
+ mtmsrd r15, 1 /* disable interrupts */
+ ld r3, PAREA_vcpu(r13)
+ lwz r3, VCPU_processor(r3)
+ LOADADDR r4, irq_stat
+ sldi r3, r3, IRQSTAT_shift
+ add r4, r3, r4
+ ld r5, IRQSTAT_pending(r4)
+ cmpldi r5, 0
+ beq hdec_out /* no more softirqs; exit loop */
+
+ LOADADDR r6, do_softirq
+ mtmsrd r14, 1 /* enable interrupts */
+ CALL_CFUNC r6 /* process softirqs */
+ b test_all_events /* look for more */
+
+hdec_out:
+ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */
+ LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop */
+
+/* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor
+ * stack (returning into elsewhere in Xen) or at the top of the stack
+ * (restoring the domain). */
+_GLOBAL(full_resume)
+ /* disable MSR:EE, since we could have come from do_softirq() */
+ mfmsr r7
+ ori r7, r7, MSR_EE
+ xori r7, r7, MSR_EE
+ mtmsrd r7, 1
+
+ LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */
+
+fast_resume:
+ ld r10, UREGS_msr(r1)
+ rldicl. r11, r10, 4, 63 /* test SRR1:HV */
+ bne 1f /* returning to hypervisor */
+
+ /* check for pending irqs */
+ mr r3, r1
+ subi r1, r1, STACK_FRAME_OVERHEAD
+ bl .deliver_ee
+ addi r1, r1, STACK_FRAME_OVERHEAD
+
+ /* if we took a DEC in hypervisor mode, we don't want to reload the DEC
+ * until we return to the domain. MSR_EE is clear, so the domain will take
+ * any impending DEC. */
+ ld r3, PAREA_vcpu(r13)
+ lwz r0, VCPU_dec(r3)
+ mtdec r0
+
+1:
+ FAST_RESUME
+ /* not reached */
+
+/* move all of the below somewhere else */
+
+_GLOBAL(papr_hcall_jump)
+ mtctr r4
+ bctr
+ /* return to caller via LR */
+
+/* XXX don't need to load all the registers */
+_GLOBAL(xen_hvcall_jump)
+ mtctr r4
+ ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3)
+ ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3)
+ ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3)
+ ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3)
+ ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3)
+ ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3)
+ ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3)
+ ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3)
+ bctr
+
+_GLOBAL(_reset_stack_and_jump)
+ ld r2, 8(r3)
+ ld r3, 0(r3)
+ mtctr r3
+ mr r1, r4
+ bctr
+
+_GLOBAL(sleep)
+ mfmsr r3
+ ori r4, r3, MSR_EE
+ oris r4, r4, MSR_POW@h
+ sync
+ mtmsrd r4
+ isync
+ mtmsrd r3
+ blr
+
+ .globl spin_start
+spin_start:
+ b .
diff --git a/xen/arch/powerpc/powerpc64/hypercall_table.S b/xen/arch/powerpc/powerpc64/hypercall_table.S
new file mode 100644
index 0000000000..5562b021cd
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/hypercall_table.S
@@ -0,0 +1,83 @@
+#include <asm/config.h>
+
+ .data
+ .align 3
+ .globl __hypercall_table
+__hypercall_table:
+ .quad 0 /* do_set_trap_table */ /* 0 */
+ .quad 0 /* do_mmu_update */
+ .quad 0 /* do_set_gdt */
+ .quad 0 /* do_stack_switch */
+ .quad 0 /* do_set_callbacks */
+ .quad 0 /* do_fpu_taskswitch */ /* 5 */
+ .quad do_sched_op
+ .quad do_dom0_op
+ .quad 0 /* do_set_debugreg */
+ .quad 0 /* do_get_debugreg */
+ .quad 0 /* do_update_descriptor */ /* 10 */
+ .quad do_ni_hypercall
+ .quad do_memory_op
+ .quad 0 /* do_multicall */
+ .quad 0 /* do_update_va_mapping */
+ .quad do_set_timer_op /* 15 */
+ .quad 0 /* do_event_channel_op_compat */
+ .quad do_xen_version
+ .quad do_console_io
+ .quad 0 /* do_physdev_op_compat */
+ .quad do_grant_table_op /* 20 */
+ .quad do_vm_assist
+ .quad 0 /* do_update_va_mapping_otherdomain */
+ .quad 0 /* do_switch_vm86 */
+ .quad do_vcpu_op
+ .quad do_ni_hypercall /* 25 */
+ .quad 0 /* do_mmuext_op */
+ .quad do_acm_op
+ .quad do_nmi_op
+ .quad do_sched_op /* do_arch_sched_op */
+ .quad 0 /* do_callback_op */ /* 30 */
+ .quad 0 /* do_xenoprof_op */
+ .quad do_event_channel_op
+ .quad do_physdev_op
+ .rept NR_hypercalls-((.-__hypercall_table)/8)
+ .quad do_ni_hypercall
+ .endr
+
+ .globl hypercall_args_table
+hypercall_args_table:
+ .byte 1 /* do_set_trap_table */ /* 0 */
+ .byte 4 /* do_mmu_update */
+ .byte 2 /* do_set_gdt */
+ .byte 2 /* do_stack_switch */
+ .byte 4 /* do_set_callbacks */
+ .byte 1 /* do_fpu_taskswitch */ /* 5 */
+ .byte 2 /* do_arch_sched_op */
+ .byte 1 /* do_dom0_op */
+ .byte 2 /* do_set_debugreg */
+ .byte 1 /* do_get_debugreg */
+ .byte 4 /* do_update_descriptor */ /* 10 */
+ .byte 0 /* do_ni_hypercall */
+ .byte 2 /* do_memory_op */
+ .byte 2 /* do_multicall */
+ .byte 4 /* do_update_va_mapping */
+ .byte 2 /* do_set_timer_op */ /* 15 */
+ .byte 1 /* do_event_channel_op */
+ .byte 2 /* do_xen_version */
+ .byte 3 /* do_console_io */
+ .byte 1 /* do_physdev_op */
+ .byte 3 /* do_grant_table_op */ /* 20 */
+ .byte 2 /* do_vm_assist */
+ .byte 5 /* do_update_va_mapping_otherdomain */
+ .byte 0 /* do_switch_vm86 */
+ .byte 2 /* do_boot_vcpu */
+ .byte 0 /* do_ni_hypercall */ /* 25 */
+ .byte 4 /* do_mmuext_op */
+ .byte 1 /* do_acm_op */
+ .byte 2 /* do_nmi_op */
+ .byte 2 /* do_arch_sched_op */
+ .byte 2 /* do_callback_op */ /* 30 */
+ .byte 2 /* do_xenoprof_op */
+ .byte 2 /* do_event_channel_op */
+ .byte 2 /* do_physdev_op */
+ .rept NR_hypercalls-(.-hypercall_args_table)
+ .byte 0 /* do_ni_hypercall */
+ .endr
diff --git a/xen/arch/powerpc/powerpc64/io.S b/xen/arch/powerpc/powerpc64/io.S
new file mode 100644
index 0000000000..c6b911e8ce
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/io.S
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2005 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/config.h>
+#include <asm/processor.h>
+
+/* Xen runs in real mode (i.e. untranslated, MMU disabled). This avoids TLB
+ * flushes and also makes it easy to access all domains' memory. However, on
+ * PowerPC real mode accesses are cacheable, which is good for general
+ * performance, but not good when trying to do IO.
+ *
+ * POWER4 and PowerPC 970 have a bit (RM_CI: Real Mode Cache Inhibit) in HID4
+ * which disables the data cache when running in real mode. Unfortunately, to
+ * change RM_CI we need to go through a fairly complicated set of instructions,
+ * make the access, then re-enable it...
+ */
+
+/* Not all useful assemblers understand 'tlbiel'.
+ * 'addr' is a GPR containing the address being accessed.
+ */
+.macro tlbiel addr
+ .long 0x7c000224 | (\addr << 11)
+.endm
+
+.macro DISABLE_DCACHE addr
+ mfmsr r8
+ /* disable interrupts */
+ li r6, 0
+ ori r6, r6, MSR_EE
+ andc r5, r8, r6
+ mtmsr r5
+
+ /* set HID4.RM_CI */
+ mfspr r9, SPRN_HID4
+ li r6, 0x100
+ sldi r6, r6, 32
+ or r5, r9, r6
+ tlbiel \addr /* invalidate the ERAT entry */
+ sync
+ mtspr SPRN_HID4, r5
+ isync
+.endm
+
+.macro ENABLE_DCACHE addr
+ /* re-zero HID4.RM_CI */
+ tlbiel \addr /* invalidate the ERAT entry */
+ sync
+ mtspr SPRN_HID4, r9
+ isync
+
+ /* re-enable interrupts */
+ mtmsr r8
+.endm
+
+/* The following assembly cannot use r8 or r9 since they hold original
+ * values of msr and hid4 repectively
+ */
+#undef r8
+#define r8 do_not_use_r8
+#undef r9
+#define r9 do_not_use_r9
+
+/* XXX remove isyncs */
+
+_GLOBAL(in_8)
+ DISABLE_DCACHE r3
+ lbz r4,0(r3)
+ twi 0,r4,0
+ isync
+ ENABLE_DCACHE r3
+ mr r3, r4
+ blr
+
+_GLOBAL(out_8)
+ DISABLE_DCACHE r3
+ stb r4,0(r3)
+ sync
+ ENABLE_DCACHE r3
+ blr
+
+_GLOBAL(in_32)
+ DISABLE_DCACHE r3
+ lwz r4,0(r3)
+ twi 0,r4,0
+ isync
+ ENABLE_DCACHE r3
+ mr r3, r4
+ blr
+
+_GLOBAL(out_32)
+ DISABLE_DCACHE r3
+ stw r4,0(r3)
+ sync
+ ENABLE_DCACHE r3
+ blr
+
+_GLOBAL(in_le16)
+ DISABLE_DCACHE r3
+ lhbrx r4,0,r3
+ twi 0,r4,0
+ isync
+ ENABLE_DCACHE r3
+ mr r3, r4
+ blr
+
+_GLOBAL(out_le16)
+ DISABLE_DCACHE r3
+ sthbrx r4,0,r3
+ sync
+ ENABLE_DCACHE r3
+ blr
+
+_GLOBAL(in_le32)
+ DISABLE_DCACHE r3
+ lwbrx r4,0,r3
+ twi 0,r4,0
+ isync
+ ENABLE_DCACHE r3
+ mr r3, r4
+ blr
+
+_GLOBAL(out_le32)
+ DISABLE_DCACHE r3
+ stwbrx r4,0,r3
+ sync
+ ENABLE_DCACHE r3
+ blr
diff --git a/xen/arch/powerpc/powerpc64/memcpy.S b/xen/arch/powerpc/powerpc64/memcpy.S
new file mode 100644
index 0000000000..e2a2c400cc
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/memcpy.S
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/config.h>
+#include <asm/processor.h>
+
+ .align 7
+_GLOBAL(memcpy)
+ mtcrf 0x01,r5
+ cmpldi cr1,r5,16
+ neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
+ andi. r6,r6,7
+ dcbt 0,r4
+ blt cr1,.Lshort_copy
+ bne .Ldst_unaligned
+.Ldst_aligned:
+ andi. r0,r4,7
+ addi r3,r3,-16
+ bne .Lsrc_unaligned
+ srdi r7,r5,4
+ ld r9,0(r4)
+ addi r4,r4,-8
+ mtctr r7
+ andi. r5,r5,7
+ bf cr7*4+0,2f
+ addi r3,r3,8
+ addi r4,r4,8
+ mr r8,r9
+ blt cr1,3f
+1: ld r9,8(r4)
+ std r8,8(r3)
+2: ldu r8,16(r4)
+ stdu r9,16(r3)
+ bdnz 1b
+3: std r8,8(r3)
+ beqlr
+ addi r3,r3,16
+ ld r9,8(r4)
+.Ldo_tail:
+ bf cr7*4+1,1f
+ rotldi r9,r9,32
+ stw r9,0(r3)
+ addi r3,r3,4
+1: bf cr7*4+2,2f
+ rotldi r9,r9,16
+ sth r9,0(r3)
+ addi r3,r3,2
+2: bf cr7*4+3,3f
+ rotldi r9,r9,8
+ stb r9,0(r3)
+3: blr
+
+.Lsrc_unaligned:
+ srdi r6,r5,3
+ addi r5,r5,-16
+ subf r4,r0,r4
+ srdi r7,r5,4
+ sldi r10,r0,3
+ cmpdi cr6,r6,3
+ andi. r5,r5,7
+ mtctr r7
+ subfic r11,r10,64
+ add r5,r5,r0
+
+ bt cr7*4+0,0f
+
+ ld r9,0(r4) # 3+2n loads, 2+2n stores
+ ld r0,8(r4)
+ sld r6,r9,r10
+ ldu r9,16(r4)
+ srd r7,r0,r11
+ sld r8,r0,r10
+ or r7,r7,r6
+ blt cr6,4f
+ ld r0,8(r4)
+ # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
+ b 2f
+
+0: ld r0,0(r4) # 4+2n loads, 3+2n stores
+ ldu r9,8(r4)
+ sld r8,r0,r10
+ addi r3,r3,-8
+ blt cr6,5f
+ ld r0,8(r4)
+ srd r12,r9,r11
+ sld r6,r9,r10
+ ldu r9,16(r4)
+ or r12,r8,r12
+ srd r7,r0,r11
+ sld r8,r0,r10
+ addi r3,r3,16
+ beq cr6,3f
+
+ # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
+1: or r7,r7,r6
+ ld r0,8(r4)
+ std r12,8(r3)
+2: srd r12,r9,r11
+ sld r6,r9,r10
+ ldu r9,16(r4)
+ or r12,r8,r12
+ stdu r7,16(r3)
+ srd r7,r0,r11
+ sld r8,r0,r10
+ bdnz 1b
+
+3: std r12,8(r3)
+ or r7,r7,r6
+4: std r7,16(r3)
+5: srd r12,r9,r11
+ or r12,r8,r12
+ std r12,24(r3)
+ beqlr
+ cmpwi cr1,r5,8
+ addi r3,r3,32
+ sld r9,r9,r10
+ ble cr1,.Ldo_tail
+ ld r0,8(r4)
+ srd r7,r0,r11
+ or r9,r7,r9
+ b .Ldo_tail
+
+.Ldst_unaligned:
+ mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
+ subf r5,r6,r5
+ li r7,0
+ cmpldi r1,r5,16
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+ addi r7,r7,1
+1: bf cr7*4+2,2f
+ lhzx r0,r7,r4
+ sthx r0,r7,r3
+ addi r7,r7,2
+2: bf cr7*4+1,3f
+ lwzx r0,r7,r4
+ stwx r0,r7,r3
+3: mtcrf 0x01,r5
+ add r4,r6,r4
+ add r3,r6,r3
+ b .Ldst_aligned
+
+.Lshort_copy:
+ bf cr7*4+0,1f
+ lwz r0,0(r4)
+ lwz r9,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r9,4(r3)
+ addi r3,r3,8
+1: bf cr7*4+1,2f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+2: bf cr7*4+2,3f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+3: bf cr7*4+3,4f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+4: blr
+
diff --git a/xen/arch/powerpc/powerpc64/ppc970.c b/xen/arch/powerpc/powerpc64/ppc970.c
new file mode 100644
index 0000000000..f1b828fd57
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/ppc970.c
@@ -0,0 +1,164 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/mm.h>
+#include <xen/sched.h>
+#include <xen/lib.h>
+#include <asm/time.h>
+#include <asm/current.h>
+#include <asm/powerpc64/procarea.h>
+#include <asm/powerpc64/processor.h>
+#include <asm/powerpc64/ppc970-hid.h>
+
+#undef SERIALIZE
+
+void cpu_initialize(void)
+{
+ ulong stack;
+
+ parea = xmalloc(struct processor_area);
+ ASSERT(parea != NULL);
+
+ stack = (ulong)alloc_xenheap_pages(STACK_ORDER);
+
+ ASSERT(stack != 0);
+ parea->hyp_stack_base = (void *)(stack + STACK_SIZE);
+ printk("stack is here: %p\n", parea->hyp_stack_base);
+
+ mthsprg0((ulong)parea); /* now ready for exceptions */
+
+ /* Set decrementers for 1 second to keep them out of the way during
+ * intialization. */
+ /* XXX make tickless */
+ mtdec(timebase_freq);
+ mthdec(timebase_freq);
+
+ union hid0 hid0;
+
+ hid0.word = mfhid0();
+ hid0.bits.nap = 1;
+ hid0.bits.dpm = 1;
+ hid0.bits.nhr = 1;
+ hid0.bits.hdice = 1; /* enable HDEC */
+ hid0.bits.eb_therm = 1;
+ hid0.bits.en_attn = 1;
+#ifdef SERIALIZE
+ ulong s = 0;
+
+ s |= 1UL << (63-0); /* one_ppc */
+ s |= 1UL << (63-2); /* isync_sc */
+ s |= 1UL << (63-16); /* inorder */
+ /* may not want these */
+ s |= 1UL << (63-1); /* do_single */
+ s |= 1UL << (63-3); /* ser-gp */
+ hid0.word |= s;
+#endif
+ printk("hid0: 0x%016lx\n", hid0.word);
+ mthid0(hid0.word);
+
+ union hid1 hid1;
+
+ hid1.word = mfhid1();
+ hid1.bits.bht_pm = 7;
+ hid1.bits.en_ls = 1;
+
+ hid1.bits.en_cc = 1;
+ hid1.bits.en_ic = 1;
+
+ hid1.bits.pf_mode = 2;
+
+ hid1.bits.en_if_cach = 1;
+ hid1.bits.en_ic_rec = 1;
+ hid1.bits.en_id_rec = 1;
+ hid1.bits.en_er_rec = 1;
+
+ hid1.bits.en_sp_itw = 1;
+ mthid1(hid1.word);
+
+ union hid5 hid5;
+
+ hid5.word = mfhid5();
+ hid5.bits.DCBZ_size = 0;
+ hid5.bits.DCBZ32_ill = 0;
+ mthid5(hid5.word);
+
+ __asm__ __volatile__("isync; slbia; isync" : : : "memory");
+
+}
+
+void cpu_init_vcpu(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ union hid4 hid4;
+ ulong rma_base = d->arch.rma_base;
+ ulong rma_size = d->arch.rma_size;
+
+ hid4.word = mfhid4();
+
+ hid4.bits.lpes0 = 0; /* exceptions set MSR_HV=1 */
+ hid4.bits.lpes1 = 1; /* RMA applies */
+
+ hid4.bits.rmor = rma_base >> 26;
+
+ hid4.bits.lpid01 = d->domain_id & 3;
+ hid4.bits.lpid25 = (d->domain_id >> 2) & 0xf;
+
+ switch (rma_size) {
+ case 256ULL << 30: /* 256 GB */
+ hid4.bits.rmlr0 = 0;
+ hid4.bits.rmlr12 = 0;
+ break;
+ case 16ULL << 30: /* 16 GB */
+ hid4.bits.rmlr0 = 0;
+ hid4.bits.rmlr12 = 1;
+ break;
+ case 1ULL << 30: /* 1 GB */
+ hid4.bits.rmlr0 = 0;
+ hid4.bits.rmlr12 = 2;
+ break;
+ case 64ULL << 20: /* 64 MB */
+ hid4.bits.rmlr0 = 0;
+ hid4.bits.rmlr12 = 3;
+ break;
+ case 256ULL << 20: /* 256 MB */
+ hid4.bits.rmlr0 = 1;
+ hid4.bits.rmlr12 = 0;
+ break;
+ case 128ULL << 20: /* 128 MB */
+ hid4.bits.rmlr0 = 1;
+ hid4.bits.rmlr12 = 3;
+ break;
+ }
+
+ v->arch.cpu.hid4.word = hid4.word;
+}
+
+void save_cpu_sprs(struct vcpu *v)
+{
+ /* HID4 is initialized with a per-domain value at domain creation time, and
+ * does not change after that. */
+}
+
+void load_cpu_sprs(struct vcpu *v)
+{
+ mthid4(v->arch.cpu.hid4.word);
+}
diff --git a/xen/arch/powerpc/powerpc64/prom_call.S b/xen/arch/powerpc/powerpc64/prom_call.S
new file mode 100644
index 0000000000..f92fae34bf
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/prom_call.S
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2005 Jimi Xenidis <jimix@watson.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <asm/config.h>
+#include <asm/processor.h>
+
+ .macro save_gprs rsave, rend, off, rbase
+ std \rsave, \off(\rbase)
+ .if \rend-\rsave
+ save_gprs "(\rsave+1)",\rend,"(\off+8)",\rbase
+ .endif
+ .endm
+
+ .macro restore_gprs rsave, rend, off, rbase
+ ld \rsave, \off(\rbase)
+ .if \rend-\rsave
+ restore_gprs "(\rsave+1)",\rend,"(\off+8)",\rbase
+ .endif
+ .endm
+
+/*
+ * s32 prom_call(void *arg, ulong base, ulong func, ulong msr);
+ * r3 is arg pointer
+ * r4 is RTAS base, should be 0 for OF
+ * r5 is Prom vector
+ * r6 is the MSR we should use
+ */
+_GLOBAL(prom_call)
+ SET_REG_TO_LABEL(r7, gpr_store)
+
+ std r1, 0(r7)
+ std r2, 8(r7)
+ SAVE_GPRS r13, r31, 16, r7 # save all volatiles
+
+ /*
+ * We can stuff the LT, MSR, SRR0/1 into GPRS that the caller
+ * must retore
+ */
+
+ mflr r18
+ sradi r19, r18, 32 /* store lr in r18, r19 */
+
+ mfmsr r20
+ sradi r21, r20, 32 /* r20,r21 contain caller's msr */
+
+ /* save SRR0/1 */
+ mfsrr0 r24
+ sradi r25, r24, 32
+ mfsrr1 r26
+ sradi r27, r26, 32
+
+ bl go_to_real
+
+ /* come back here after rfid in "native_of_call" */
+ insrdi r18, r19, 32, 0 /* restore lr */
+ mtlr r18
+
+ /* restore SRR0/1 */
+ insrdi r24, r25, 32, 0
+ mtsrr0 r24
+ insrdi r26, r27, 32, 0
+ mtsrr1 r26
+
+
+
+ SET_REG_TO_LABEL(r7, gpr_store)
+
+ ld r1, 0(r7)
+ ld r2, 8(r7)
+ RESTORE_GPRS r13, r31, 16, r7
+ blr
+
+native_call:
+ mtctr r5
+ bctrl /* call of or rtas */
+
+ insrdi r22, r23, 32, 0 /* reconstruct return point iar */
+ insrdi r20, r21, 32, 0 /* reconstruct caller's msr */
+
+ mtsrr0 r22
+ mtsrr1 r20
+ rfid
+
+
+go_to_real:
+ mflr r22
+ sradi r23, r22, 32 /* save address we rfid back to*/
+
+ SET_REG_TO_LABEL(r8, native_call)
+
+ mtsrr0 r8
+ mtsrr1 r6
+ rfid
+
+
+ .data
+ .p2align 3
+gpr_store:
+ .space 8 # r1
+ .space 8 # r2
+ .space (32 - 13) * 8 # r13 - r31
diff --git a/xen/arch/powerpc/powerpc64/string.S b/xen/arch/powerpc/powerpc64/string.S
new file mode 100644
index 0000000000..4baaecc96f
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/string.S
@@ -0,0 +1,286 @@
+/*
+ * String handling functions for PowerPC.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/config.h>
+#include <asm/processor.h>
+#include <xen/errno.h>
+
+_GLOBAL(strcpy)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+_GLOBAL(strncpy)
+ cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r6)
+ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
+ blr
+
+_GLOBAL(strcat)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r0,1(r5)
+ cmpwi 0,r0,0
+ bne 1b
+ addi r5,r5,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r5)
+ bne 1b
+ blr
+
+_GLOBAL(strcmp)
+ addi r5,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r5)
+ cmpwi 1,r3,0
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ beqlr 1
+ beq 1b
+ blr
+
+_GLOBAL(strlen)
+ addi r4,r3,-1
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ bne 1b
+ subf r3,r3,r4
+ blr
+
+_GLOBAL(memset)
+ neg r0,r3
+ rlwimi r4,r4,8,16,23
+ andi. r0,r0,7 /* # bytes to be 8-byte aligned */
+ rlwimi r4,r4,16,0,15
+ cmplw cr1,r5,r0 /* do we get that far? */
+ rldimi r4,r4,32,0
+ mtcrf 1,r0
+ mr r6,r3
+ blt cr1,8f
+ beq+ 3f /* if already 8-byte aligned */
+ subf r5,r0,r5
+ bf 31,1f
+ stb r4,0(r6)
+ addi r6,r6,1
+1: bf 30,2f
+ sth r4,0(r6)
+ addi r6,r6,2
+2: bf 29,3f
+ stw r4,0(r6)
+ addi r6,r6,4
+3: srdi. r0,r5,6
+ clrldi r5,r5,58
+ mtctr r0
+ beq 5f
+4: std r4,0(r6)
+ std r4,8(r6)
+ std r4,16(r6)
+ std r4,24(r6)
+ std r4,32(r6)
+ std r4,40(r6)
+ std r4,48(r6)
+ std r4,56(r6)
+ addi r6,r6,64
+ bdnz 4b
+5: srwi. r0,r5,3
+ clrlwi r5,r5,29
+ mtcrf 1,r0
+ beq 8f
+ bf 29,6f
+ std r4,0(r6)
+ std r4,8(r6)
+ std r4,16(r6)
+ std r4,24(r6)
+ addi r6,r6,32
+6: bf 30,7f
+ std r4,0(r6)
+ std r4,8(r6)
+ addi r6,r6,16
+7: bf 31,8f
+ std r4,0(r6)
+ addi r6,r6,8
+8: cmpwi r5,0
+ mtcrf 1,r5
+ beqlr+
+ bf 29,9f
+ stw r4,0(r6)
+ addi r6,r6,4
+9: bf 30,10f
+ sth r4,0(r6)
+ addi r6,r6,2
+10: bflr 31
+ stb r4,0(r6)
+ blr
+
+_GLOBAL(memmove)
+ cmplw 0,r3,r4
+ bgt .backwards_memcpy
+ b .memcpy
+
+_GLOBAL(backwards_memcpy)
+ rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+ add r6,r3,r5
+ add r4,r4,r5
+ beq 2f
+ andi. r0,r6,3
+ mtctr r7
+ bne 5f
+1: lwz r7,-4(r4)
+ lwzu r8,-8(r4)
+ stw r7,-4(r6)
+ stwu r8,-8(r6)
+ bdnz 1b
+ andi. r5,r5,7
+2: cmplwi 0,r5,4
+ blt 3f
+ lwzu r0,-4(r4)
+ subi r5,r5,4
+ stwu r0,-4(r6)
+3: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+4: lbzu r0,-1(r4)
+ stbu r0,-1(r6)
+ bdnz 4b
+ blr
+5: mtctr r0
+6: lbzu r7,-1(r4)
+ stbu r7,-1(r6)
+ bdnz 6b
+ subf r5,r0,r5
+ rlwinm. r7,r5,32-3,3,31
+ beq 2b
+ mtctr r7
+ b 1b
+
+_GLOBAL(memcmp)
+ cmpwi 0,r5,0
+ ble- 2f
+ mtctr r5
+ addi r6,r3,-1
+ addi r4,r4,-1
+1: lbzu r3,1(r6)
+ lbzu r0,1(r4)
+ subf. r3,r0,r3
+ bdnzt 2,1b
+ blr
+2: li r3,0
+ blr
+
+_GLOBAL(memchr)
+ cmpwi 0,r5,0
+ ble- 2f
+ mtctr r5
+ addi r3,r3,-1
+1: lbzu r0,1(r3)
+ cmpw 0,r0,r4
+ bdnzf 2,1b
+ beqlr
+2: li r3,0
+ blr
+
+_GLOBAL(__clear_user)
+ addi r6,r3,-4
+ li r3,0
+ li r5,0
+ cmplwi 0,r4,4
+ blt 7f
+ /* clear a single word */
+11: stwu r5,4(r6)
+ beqlr
+ /* clear word sized chunks */
+ andi. r0,r6,3
+ add r4,r0,r4
+ subf r6,r0,r6
+ srwi r0,r4,2
+ andi. r4,r4,3
+ mtctr r0
+ bdz 7f
+1: stwu r5,4(r6)
+ bdnz 1b
+ /* clear byte sized chunks */
+7: cmpwi 0,r4,0
+ beqlr
+ mtctr r4
+ addi r6,r6,3
+8: stbu r5,1(r6)
+ bdnz 8b
+ blr
+90: mr r3,r4
+ blr
+91: mfctr r3
+ slwi r3,r3,2
+ add r3,r3,r4
+ blr
+92: mfctr r3
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 11b,90b
+ .llong 1b,91b
+ .llong 8b,92b
+ .text
+
+/* r3 = dst, r4 = src, r5 = count */
+_GLOBAL(__strncpy_from_user)
+ addi r6,r3,-1
+ addi r4,r4,-1
+ cmpwi 0,r5,0
+ beq 2f
+ mtctr r5
+1: lbzu r0,1(r4)
+ cmpwi 0,r0,0
+ stbu r0,1(r6)
+ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
+ beq 3f
+2: addi r6,r6,1
+3: subf r3,r3,r6
+ blr
+99: li r3,-EFAULT
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,99b
+ .text
+
+/* r3 = str, r4 = len (> 0) */
+_GLOBAL(__strnlen_user)
+ addi r7,r3,-1
+ mtctr r4 /* ctr = len */
+1: lbzu r0,1(r7) /* get next byte */
+ cmpwi 0,r0,0
+ bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
+ addi r7,r7,1
+ subf r3,r3,r7 /* number of bytes we have looked at */
+ beqlr /* return if we found a 0 byte */
+ cmpw 0,r3,r4 /* did we look at all len bytes? */
+ blt 99f /* if not, must have hit top */
+ addi r3,r4,1 /* return len + 1 to indicate no null found */
+ blr
+99: li r3,0 /* bad address, return 0 */
+ blr
+
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,99b
+
diff --git a/xen/arch/powerpc/powerpc64/traps.c b/xen/arch/powerpc/powerpc64/traps.c
new file mode 100644
index 0000000000..e469e61dd3
--- /dev/null
+++ b/xen/arch/powerpc/powerpc64/traps.c
@@ -0,0 +1,50 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/console.h>
+#include <public/xen.h>
+#include <xen/compile.h>
+#include <xen/sched.h>
+
+void show_registers(struct cpu_user_regs *regs)
+{
+ int i;
+
+ console_start_sync();
+
+ printk("----[ Xen-%d.%d%s ]----\n",
+ XEN_VERSION, XEN_SUBVERSION, XEN_EXTRAVERSION);
+ printk("CPU: %08x DOMID: %08x\n",
+ smp_processor_id(), current->domain->domain_id);
+ printk("pc %016lx msr %016lx\n lr %016lx ctr %016lx\n",
+ regs->pc, regs->msr, regs->lr, regs->ctr);
+ for (i=0; i<32; i+=4) {
+ printk("r%02i: %016lx %016lx %016lx %016lx\n", i,
+ regs->gprs[i], regs->gprs[i+1], regs->gprs[i+2], regs->gprs[i+3]);
+ }
+ console_end_sync();
+}
+
+void show_execution_state(struct cpu_user_regs *regs)
+{
+ show_registers(regs);
+}
diff --git a/xen/arch/powerpc/ppc32/prom_call.c b/xen/arch/powerpc/ppc32/prom_call.c
new file mode 100644
index 0000000000..6c6a34917b
--- /dev/null
+++ b/xen/arch/powerpc/ppc32/prom_call.c
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <asm/processor.h>
+
+typedef s32 (*prom_call_t)(void *, ulong);
+
+s32
+prom_call(void *arg, uval base, uval func, uval msr __attribute__ ((unused)))
+{
+ prom_call_t f = (prom_call_t)func;
+ ulong srr0 = mfsrr0();
+ ulong srr1 = mfsrr1();
+ s32 ret;
+
+ ret = f(arg, base);
+
+ mtsrr0(srr0);
+ mtsrr1(srr1);
+
+ return ret;
+}
diff --git a/xen/arch/powerpc/rtas.c b/xen/arch/powerpc/rtas.c
new file mode 100644
index 0000000000..507182a5a9
--- /dev/null
+++ b/xen/arch/powerpc/rtas.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+
+int rtas_halt = -1;
+int rtas_reboot = -1;
diff --git a/xen/arch/powerpc/setup.c b/xen/arch/powerpc/setup.c
new file mode 100644
index 0000000000..176328c827
--- /dev/null
+++ b/xen/arch/powerpc/setup.c
@@ -0,0 +1,370 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/cpumask.h>
+#include <xen/sched.h>
+#include <xen/multiboot.h>
+#include <xen/serial.h>
+#include <xen/softirq.h>
+#include <xen/console.h>
+#include <xen/trace.h>
+#include <xen/mm.h>
+#include <xen/domain.h>
+#include <xen/gdbstub.h>
+#include <xen/symbols.h>
+#include <xen/keyhandler.h>
+#include <acm/acm_hooks.h>
+#include <public/version.h>
+#include <asm/processor.h>
+#include <asm/desc.h>
+#include <asm/cache.h>
+#include <asm/debugger.h>
+#include <asm/delay.h>
+#include "exceptions.h"
+#include "of-devtree.h"
+
+#define DEBUG
+unsigned long xenheap_phys_end;
+
+/* opt_noht: If true, Hyperthreading is ignored. */
+int opt_noht = 0;
+boolean_param("noht", opt_noht);
+
+int opt_earlygdb = 0;
+boolean_param("earlygdb", opt_earlygdb);
+
+u32 tlbflush_clock = 1U;
+u32 tlbflush_time[NR_CPUS];
+
+unsigned int watchdog_on;
+unsigned long wait_init_idle;
+ulong oftree;
+ulong oftree_len;
+
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+cpumask_t cpu_online_map; /* missing ifdef in schedule.c */
+
+/* XXX get this from ISA node in device tree */
+ulong isa_io_base;
+struct ns16550_defaults ns16550;
+
+extern void idle_loop(void);
+
+/* move us to a header file */
+extern void initialize_keytable(void);
+
+int is_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long) &_start &&
+ addr <= (unsigned long) &_etext)
+ return 1;
+ return 0;
+}
+
+unsigned long kernel_text_end(void)
+{
+ return (unsigned long) &_etext;
+}
+
+void idle_loop(void)
+{
+ int cpu = smp_processor_id();
+
+ for ( ; ; )
+ {
+ while (!softirq_pending(cpu)) {
+ void sleep(void);
+ page_scrub_schedule_work();
+ sleep();
+ }
+ do_softirq();
+ }
+}
+
+static void __init do_initcalls(void)
+{
+ initcall_t *call;
+ for (call = &__initcall_start; call < &__initcall_end; call++) {
+ (*call)();
+ }
+}
+
+static void hw_probe_attn(unsigned char key, struct cpu_user_regs *regs)
+{
+ /* To continue the probe will step over the ATTN instruction. The
+ * NOP is there to make sure there is something sane to "step
+ * over" to. */
+ asm volatile(".long 0x00000200; nop");
+}
+
+static void __init start_of_day(void)
+{
+ struct domain *idle_domain;
+
+ init_IRQ();
+
+ scheduler_init();
+
+ /* create idle domain */
+ idle_domain = domain_create(IDLE_DOMAIN_ID);
+ if ((idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL))
+ BUG();
+ set_current(idle_domain->vcpu[0]);
+ idle_vcpu[0] = current;
+
+ /* for some reason we need to set our own bit in the thread map */
+ cpu_set(0, cpu_sibling_map[0]);
+
+ initialize_keytable();
+ /* Register another key that will allow for the the Harware Probe
+ * to be contacted, this works with RiscWatch probes and should
+ * work with Chronos and FSPs */
+ register_irq_keyhandler('^', hw_probe_attn, "Trap to Hardware Probe");
+
+ timer_init();
+ serial_init_postirq();
+ do_initcalls();
+ schedulers_start();
+}
+
+void startup_cpu_idle_loop(void)
+{
+ struct vcpu *v = current;
+
+ ASSERT(is_idle_vcpu(v));
+ cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask);
+ cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask);
+
+ /* Finally get off the boot stack. */
+ reset_stack_and_jump(idle_loop);
+}
+
+static void __init __start_xen(multiboot_info_t *mbi)
+{
+ char *cmdline;
+ module_t *mod = (module_t *)((ulong)mbi->mods_addr);
+ ulong heap_start;
+ ulong modules_start, modules_size;
+ ulong eomem = 0;
+ ulong heap_size = 0;
+ ulong bytes = 0;
+ ulong freemem = (ulong)_end;
+ ulong oftree_end;
+
+ memcpy(0, exception_vectors, exception_vectors_end - exception_vectors);
+ synchronize_caches(0, exception_vectors_end - exception_vectors);
+
+ ticks_per_usec = timebase_freq / 1000000ULL;
+
+ /* Parse the command-line options. */
+ if ((mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0))
+ cmdline_parse(__va((ulong)mbi->cmdline));
+
+ ns16550_init(0, &ns16550);
+ serial_init_preirq();
+
+ init_console();
+#ifdef CONSOLE_SYNC
+ console_start_sync();
+#endif
+
+ /* Check that we have at least one Multiboot module. */
+ if (!(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0)) {
+ panic("FATAL ERROR: Require at least one Multiboot module.\n");
+ }
+
+ if (!(mbi->flags & MBI_MEMMAP)) {
+ panic("FATAL ERROR: Bootloader provided no memory information.\n");
+ }
+
+ /* mark the begining of images */
+ modules_start = mod[0].mod_start;
+ modules_size = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
+
+ /* OF dev tree is the last module */
+ oftree = mod[mbi->mods_count-1].mod_start;
+ oftree_end = mod[mbi->mods_count-1].mod_end;
+ oftree_len = oftree_end - oftree;
+
+ /* remove it from consideration */
+ mod[mbi->mods_count-1].mod_start = 0;
+ mod[mbi->mods_count-1].mod_end = 0;
+ --mbi->mods_count;
+
+ printk("Physical RAM map:\n");
+
+ /* lets find out how much memory there is */
+ while (bytes < mbi->mmap_length) {
+ u64 end;
+ u64 addr;
+ u64 size;
+
+ memory_map_t *map = (memory_map_t *)((ulong)mbi->mmap_addr + bytes);
+ addr = ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
+ size = ((u64)map->length_high << 32) | (u64)map->length_low;
+ end = addr + size;
+
+ printk(" %016lx - %016lx (usable)\n", addr, end);
+
+ if (addr > eomem) {
+ printk("found a hole skipping remainder of memory at:\n"
+ " %016lx and beyond\n", addr);
+ break;
+ }
+ if (end > eomem) {
+ eomem = end;
+ }
+ bytes += map->size + 4;
+ }
+
+ printk("System RAM: %luMB (%lukB)\n", eomem >> 20, eomem >> 10);
+
+ max_page = PFN_DOWN(ALIGN_DOWN(eomem, PAGE_SIZE));
+ total_pages = max_page;
+
+ /* skip the exception handlers */
+ heap_start = init_boot_allocator(4 << PAGE_SHIFT);
+
+ /* move the modules to just after _end */
+ if (modules_start) {
+ printk("modules at: %016lx - %016lx\n", modules_start,
+ modules_start + modules_size);
+ freemem = ALIGN_UP(freemem, PAGE_SIZE);
+ memmove((void *)freemem, (void *)modules_start, modules_size);
+
+ oftree -= modules_start - freemem;
+ modules_start = freemem;
+ freemem += modules_size;
+ printk(" moved to: %016lx - %016lx\n", modules_start,
+ modules_start + modules_size);
+ }
+
+#ifdef OF_DEBUG
+ printk("ofdump:\n");
+ /* make sure the OF devtree is good */
+ ofd_walk((void *)oftree, OFD_ROOT, ofd_dump_props, OFD_DUMP_ALL);
+#endif
+
+ /* mark all memory from modules onward as unused */
+ init_boot_pages(freemem, eomem);
+
+ init_frametable();
+ end_boot_allocator();
+
+ /* place the heap from after the allocator bitmap to _start */
+ xenheap_phys_end = (ulong)_start;
+ init_xenheap_pages(heap_start, xenheap_phys_end);
+ heap_size = xenheap_phys_end - heap_start;
+
+ printk("Xen heap: %luMB (%lukB)\n", heap_size >> 20, heap_size >> 10);
+
+ cpu_initialize();
+
+#ifdef CONFIG_GDB
+ initialise_gdb();
+ if (opt_earlygdb)
+ debugger_trap_immediate();
+#endif
+
+ start_of_day();
+
+ /* Create initial domain 0. */
+ dom0 = domain_create(0);
+ if ((dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL))
+ panic("Error creating domain 0\n");
+
+ set_bit(_DOMF_privileged, &dom0->domain_flags);
+ /* post-create hooks sets security label */
+ acm_post_domain0_create(dom0->domain_id);
+
+ cmdline = (char *)(mod[0].string ? __va((ulong)mod[0].string) : NULL);
+
+ /* scrub_heap_pages() requires IRQs enabled, and we're post IRQ setup... */
+ local_irq_enable();
+ /* Scrub RAM that is still free and so may go to an unprivileged domain. */
+ scrub_heap_pages();
+
+ /*
+ * We're going to setup domain0 using the module(s) that we
+ * stashed safely above our heap. The second module, if present,
+ * is an initrd ramdisk. The last module is the OF devtree.
+ */
+ if (construct_dom0(dom0,
+ modules_start,
+ mod[0].mod_end-mod[0].mod_start,
+ (mbi->mods_count == 1) ? 0 :
+ modules_start +
+ (mod[1].mod_start-mod[0].mod_start),
+ (mbi->mods_count == 1) ? 0 :
+ mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
+ cmdline) != 0) {
+ panic("Could not set up DOM0 guest OS\n");
+ }
+
+ init_trace_bufs();
+
+ console_endboot();
+
+ /* Hide UART from DOM0 if we're using it */
+ serial_endboot();
+
+ domain_unpause_by_systemcontroller(dom0);
+
+ startup_cpu_idle_loop();
+}
+
+void __init __start_xen_ppc(
+ ulong r3, ulong r4, ulong r5, ulong r6, ulong r7, ulong orig_msr)
+{
+ multiboot_info_t *mbi = NULL;
+
+ /* clear bss */
+ memset(__bss_start, 0, (ulong)_end - (ulong)__bss_start);
+
+ if (r5 > 0) {
+ /* we were booted by OpenFirmware */
+ mbi = boot_of_init(r3, r4, r5, r6, r7, orig_msr);
+
+ } else {
+ /* booted by someone else that hopefully has a trap handler */
+ trap();
+ }
+
+ __start_xen(mbi);
+
+}
+
+extern void arch_get_xen_caps(xen_capabilities_info_t info);
+void arch_get_xen_caps(xen_capabilities_info_t info)
+{
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/powerpc/smp.c b/xen/arch/powerpc/smp.c
new file mode 100644
index 0000000000..937b1c59c2
--- /dev/null
+++ b/xen/arch/powerpc/smp.c
@@ -0,0 +1,60 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/misc.h>
+#include <xen/cpumask.h>
+#include <xen/smp.h>
+#include <asm/flushtlb.h>
+
+int smp_num_siblings = 1;
+int smp_num_cpus = 1;
+int ht_per_core = 1;
+
+void __flush_tlb_mask(cpumask_t mask, unsigned long addr)
+{
+ unimplemented();
+}
+
+void smp_send_event_check_mask(cpumask_t cpu_mask)
+{
+ unimplemented();
+}
+
+int smp_call_function(void (*func) (void *info), void *info, int unused,
+ int wait)
+{
+ unimplemented();
+ return 0;
+}
+
+void smp_send_stop(void)
+{
+ unimplemented();
+}
+
+int on_selected_cpus(
+ cpumask_t selected,
+ void (*func) (void *info),
+ void *info,
+ int retry,
+ int wait)
+{
+ return 0;
+}
diff --git a/xen/arch/powerpc/tce.h b/xen/arch/powerpc/tce.h
new file mode 100644
index 0000000000..f0e76e40ad
--- /dev/null
+++ b/xen/arch/powerpc/tce.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _TCE_H
+#define _TCE_H
+
+#include <xen/config.h>
+#include <xen/types.h>
+
+union tce {
+ u64 tce_dword;
+ struct tce_bits {
+ /* the bits here reflect the definition in Linux */
+ /* the RPA considers all 52 bits to be the RPN */
+ u64 tce_cache : 6;
+ u64 _tce_r0 : 6; /* reserved */
+ u64 tce_rpn :40; /* Real Page Number */
+
+ /* The RPA considers the next 10 bits reserved */
+ u64 tce_v : 1; /* Valid bit */
+ u64 tce_vlps : 1; /* Valid for LPs */
+ u64 tce_lpx : 8; /* LP index */
+
+ /* the RPA defines the following two bits as:
+ * 00: no access
+ * 01: System Address read only
+ * 10: System Address write only
+ * 11: read/write
+ */
+ u64 tce_write : 1;
+ u64 tce_read : 1;
+ } tce_bits;
+};
+
+union tce_bdesc {
+ u64 lbd_dword;
+ struct lbd_bits {
+ u64 lbd_ctrl_v : 1;
+ u64 lbd_ctrl_vtoggle : 1;
+ u64 _lbd_ctrl_res0 : 6;
+ u64 lbd_len :24;
+ u64 lbd_addr :32;
+ } lbd_bits;
+};
+
+struct tce_data {
+ ulong t_entries;
+ ulong t_base;
+ ulong t_alloc_size;
+ union tce *t_tce;
+};
+
+#endif /* _TCE_H */
+
diff --git a/xen/arch/powerpc/time.c b/xen/arch/powerpc/time.c
new file mode 100644
index 0000000000..17c89f13ea
--- /dev/null
+++ b/xen/arch/powerpc/time.c
@@ -0,0 +1,131 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/time.h>
+#include <xen/smp.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <asm/processor.h>
+#include <asm/current.h>
+#include <asm/misc.h>
+
+#define Dprintk(x...) printk(x)
+
+static int cpu_has_hdec = 1;
+ulong ticks_per_usec;
+unsigned long cpu_khz;
+unsigned int timebase_freq;
+
+u64 get_timebase(void)
+{
+ u64 s;
+
+#ifdef __PPC64__
+ s = mftb();
+#else
+ do {
+ unsigned up;
+ unsigned lo;
+ unsigned up2;
+
+ up = mftbu();
+ lo = mftbl();
+ up2 = mftbu();
+ } while (up1 != up2);
+ s = ((ulong)up << 32) | lo;
+#endif
+ return s;
+}
+
+static ulong ns_to_tb(ulong ns)
+{
+ return (ns * timebase_freq) / 1000000000ULL;
+}
+
+static ulong tb_to_ns(ulong tb)
+{
+ return tb * (1000000000ULL / timebase_freq);
+}
+
+/*
+ * Return nanoseconds from time of boot
+ */
+s_time_t get_s_time(void)
+{
+ return tb_to_ns(get_timebase());
+}
+
+static void set_preempt(unsigned ns)
+{
+ ulong ticks = ns_to_tb(ns);
+ if (cpu_has_hdec) {
+ mthdec(ticks);
+ } else {
+ mtdec(ticks);
+ }
+}
+
+/*
+ * set preemption timer Timeout value is in ticks from start of boot
+ * returns 1 on success
+ * returns 0 if the timeout value is too small or in the past.
+ */
+extern int reprogram_timer(s_time_t timeout);
+int reprogram_timer(s_time_t timeout)
+{
+ s_time_t expire;
+
+ if (timeout == 0) {
+ expire = 0;
+ } else {
+ s_time_t now;
+
+ now = get_s_time();
+ expire = timeout - now; /* value from now */
+
+ if (expire <= 0) {
+ Dprintk("%s[%02d] Timeout in the past "
+ "0x%08X%08X > 0x%08X%08X\n", __func__,
+ smp_processor_id(), (u32)(now >> 32),
+ (u32)now, (u32)(timeout >> 32), (u32)timeout);
+ return 0;
+ }
+ }
+ set_preempt(expire);
+ return 1;
+}
+
+void send_timer_event(struct vcpu *v)
+{
+ v->arch.dec = 1;
+ vcpu_unblock(v);
+}
+
+/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
+void do_settime(unsigned long secs, unsigned long usecs, u64 system_time_base)
+{
+ unimplemented();
+}
+
+void update_vcpu_system_time(struct vcpu *v)
+{
+ unimplemented();
+}
diff --git a/xen/arch/powerpc/usercopy.c b/xen/arch/powerpc/usercopy.c
new file mode 100644
index 0000000000..667c90eaed
--- /dev/null
+++ b/xen/arch/powerpc/usercopy.c
@@ -0,0 +1,232 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <xen/config.h>
+#include <xen/mm.h>
+#include <xen/sched.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <public/xen.h>
+#include <public/xencomm.h>
+
+#undef DEBUG
+#ifdef DEBUG
+static int xencomm_debug = 1; /* extremely verbose */
+#else
+#define xencomm_debug 0
+#endif
+
+/* XXX need to return error, not panic, if domain passed a bad pointer */
+static unsigned long paddr_to_maddr(unsigned long paddr)
+{
+ struct vcpu *v = get_current();
+ struct domain *d = v->domain;
+ int mtype;
+ ulong pfn;
+ ulong offset;
+ ulong pa = paddr;
+
+ offset = pa & ~PAGE_MASK;
+ pfn = pa >> PAGE_SHIFT;
+
+ pa = pfn2mfn(d, pfn, &mtype);
+ switch (mtype) {
+ case PFN_TYPE_RMA:
+ case PFN_TYPE_LOGICAL:
+ break;
+ default:
+ panic("%s: called with bad memory address type: 0x%lx\n",
+ __func__, paddr);
+ break;
+ }
+ pa <<= PAGE_SHIFT;
+ pa |= offset;
+
+ return pa;
+}
+
+/**
+ * xencomm_copy_from_guest: Copy a block of data from domain space.
+ * @to: Machine address.
+ * @from: Physical address to a xencomm buffer descriptor.
+ * @n: Number of bytes to copy.
+ * @skip: Number of bytes from the start to skip.
+ *
+ * Copy data from domain to hypervisor.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+xencomm_copy_from_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ struct xencomm_desc *desc;
+ unsigned int from_pos = 0;
+ unsigned int to_pos = 0;
+ unsigned int i = 0;
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from);
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("xencomm error: %p magic was 0x%x\n", desc, desc->magic);
+ return n;
+ }
+
+ /* iterate through the descriptor, copying up to a page at a time */
+ while ((to_pos < n) && (i < desc->nr_addrs)) {
+ unsigned long src_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ if (src_paddr == XENCOMM_INVALID) {
+ i++;
+ continue;
+ }
+
+ pgoffset = src_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, skip);
+ from_pos += chunk_skip;
+ chunksz -= chunk_skip;
+ skip -= chunk_skip;
+
+ if (skip == 0) {
+ unsigned long src_maddr;
+ unsigned long dest = (unsigned long)to + to_pos;
+ unsigned int bytes = min(chunksz, n - to_pos);
+
+ src_maddr = paddr_to_maddr(src_paddr + chunk_skip);
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", src_maddr, bytes, dest);
+ memcpy((void *)dest, (void *)src_maddr, bytes);
+ from_pos += bytes;
+ to_pos += bytes;
+ }
+
+ i++;
+ }
+
+ return n - to_pos;
+}
+
+/**
+ * xencomm_copy_to_guest: Copy a block of data to domain space.
+ * @to: Physical address to xencomm buffer descriptor.
+ * @from: Machine address.
+ * @n: Number of bytes to copy.
+ * @skip: Number of bytes from the start to skip.
+ *
+ * Copy data from hypervisor to domain.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long
+xencomm_copy_to_guest(void *to, const void *from, unsigned int n,
+ unsigned int skip)
+{
+ struct xencomm_desc *desc;
+ unsigned int from_pos = 0;
+ unsigned int to_pos = 0;
+ unsigned int i = 0;
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to);
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("xencomm error: %p magic was 0x%x\n", desc, desc->magic);
+ return n;
+ }
+
+ /* iterate through the descriptor, copying up to a page at a time */
+ while ((from_pos < n) && (i < desc->nr_addrs)) {
+ unsigned long dest_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ if (dest_paddr == XENCOMM_INVALID) {
+ i++;
+ continue;
+ }
+
+ pgoffset = dest_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, skip);
+ to_pos += chunk_skip;
+ chunksz -= chunk_skip;
+ skip -= chunk_skip;
+
+ if (skip == 0) {
+ unsigned long dest_maddr;
+ unsigned long source = (unsigned long)from + from_pos;
+ unsigned int bytes = min(chunksz, n - from_pos);
+
+ dest_maddr = paddr_to_maddr(dest_paddr + chunk_skip);
+ if (xencomm_debug)
+ printk("%lx[%d] -> %lx\n", source, bytes, dest_maddr);
+ memcpy((void *)dest_maddr, (void *)source, bytes);
+ from_pos += bytes;
+ to_pos += bytes;
+ }
+
+ i++;
+ }
+
+ return n - from_pos;
+}
+
+/* Offset page addresses in 'handle' to skip 'bytes' bytes. Set completely
+ * exhausted pages to XENCOMM_INVALID. */
+void xencomm_add_offset(void *handle, unsigned int bytes)
+{
+ struct xencomm_desc *desc;
+ int i = 0;
+
+ /* first we need to access the descriptor */
+ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle);
+ if (desc->magic != XENCOMM_MAGIC) {
+ printk("xencomm error: %p magic was 0x%x\n", desc, desc->magic);
+ return;
+ }
+
+ /* iterate through the descriptor incrementing addresses */
+ while ((bytes > 0) && (i < desc->nr_addrs)) {
+ unsigned long dest_paddr = desc->address[i];
+ unsigned int pgoffset;
+ unsigned int chunksz;
+ unsigned int chunk_skip;
+
+ pgoffset = dest_paddr % PAGE_SIZE;
+ chunksz = PAGE_SIZE - pgoffset;
+
+ chunk_skip = min(chunksz, bytes);
+ if (chunk_skip == chunksz) {
+ /* exhausted this page */
+ desc->address[i] = XENCOMM_INVALID;
+ } else {
+ desc->address[i] += chunk_skip;
+ }
+ bytes -= chunk_skip;
+ }
+}
diff --git a/xen/arch/powerpc/xen.lds b/xen/arch/powerpc/xen.lds
new file mode 100644
index 0000000000..b4ac9c48a6
--- /dev/null
+++ b/xen/arch/powerpc/xen.lds
@@ -0,0 +1,226 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc",
+ "elf64-powerpc")
+OUTPUT_ARCH(powerpc:common64)
+ENTRY(_start)
+SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x10000000); . = 0x10000000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*)
+ *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*)
+ *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*)
+ *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.toc)
+ *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*)
+ *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*)
+ *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*)
+ *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .rela.tocbss : { *(.rela.tocbss) }
+ .init :
+ {
+ KEEP (*(.init))
+ } =0x60000000
+ .text :
+ {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.sfpr .glink)
+ } =0x60000000
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0x60000000
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) }
+ .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (0x10000) - ((0x10000 - .) & (0x10000 - 1)); . = DATA_SEGMENT_ALIGN (0x10000, 0x1000);
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(64 / 8);
+ PROVIDE (__preinit_array_start = .);
+ .preinit_array : { *(.preinit_array) }
+ PROVIDE (__preinit_array_end = .);
+ PROVIDE (__init_array_start = .);
+ .init_array : { *(.init_array) }
+ PROVIDE (__init_array_end = .);
+ PROVIDE (__fini_array_start = .);
+ .fini_array : { *(.fini_array) }
+ PROVIDE (__fini_array_end = .);
+ .data :
+ {
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+
+ /* Xen addition */
+ . = ALIGN(32);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ __initcall_end = .;
+ __inithcall_start = .;
+ .inithcall.text : { *(.inithcall.text) }
+ __inithcall_end = .;
+ /* end Xen addition */
+
+ .data1 : { *(.data1) }
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
+ .gcc_except_table : { *(.gcc_except_table) }
+ .toc1 ALIGN(8) : { *(.toc1) }
+ .opd ALIGN(8) : { KEEP (*(.opd)) }
+ .dynamic : { *(.dynamic) }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ from the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .got ALIGN(8) : { *(.got .toc) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata .sdata.* .gnu.linkonce.s.*)
+ }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ .tocbss ALIGN(8) : { *(.tocbss)}
+ .sbss :
+ {
+ PROVIDE (__sbss_start = .);
+ PROVIDE (___sbss_start = .);
+ *(.dynsbss)
+ *(.sbss .sbss.* .gnu.linkonce.sb.*)
+ *(.scommon)
+ PROVIDE (__sbss_end = .);
+ PROVIDE (___sbss_end = .);
+ }
+ .plt : { *(.plt) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(64 / 8);
+ }
+ . = ALIGN(64 / 8);
+ _end = .;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /DISCARD/ : { *(.note.GNU-stack) }
+}
diff --git a/xen/include/asm-powerpc/asm_defns.h b/xen/include/asm-powerpc/asm_defns.h
new file mode 100644
index 0000000000..1c677a2a16
--- /dev/null
+++ b/xen/include/asm-powerpc/asm_defns.h
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef __PPC_ASM_DEFNS_H__
+#define __PPC_ASM_DEFNS_H__
+
+/* NB. Auto-generated from arch/.../asm-offsets.c */
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+#endif /* __PPC_ASM_DEFNS_H__ */
diff --git a/xen/include/asm-powerpc/atomic.h b/xen/include/asm-powerpc/atomic.h
new file mode 100644
index 0000000000..78df061498
--- /dev/null
+++ b/xen/include/asm-powerpc/atomic.h
@@ -0,0 +1,211 @@
+/*
+ * PowerPC64 atomic operations
+ *
+ * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PPC64_ATOMIC_H_
+#define _ASM_PPC64_ATOMIC_H_
+
+#include <asm/memory.h>
+#include <asm/system.h>
+
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define _atomic_read(v) atomic_read(&v)
+#define atomic_set(v,i) (((v)->counter) = (i))
+#define _atomic_set(v,i) atomic_set(&v,i)
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n\
+ stwcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+
+static __inline__ atomic_t atomic_compareandswap(
+ atomic_t old, atomic_t new, atomic_t *v)
+{
+ atomic_t rc;
+ rc.counter =
+ __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
+ return rc;
+}
+
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#endif /* _ASM_PPC64_ATOMIC_H_ */
diff --git a/xen/include/asm-powerpc/bitops.h b/xen/include/asm-powerpc/bitops.h
new file mode 100644
index 0000000000..ec7e54d1f0
--- /dev/null
+++ b/xen/include/asm-powerpc/bitops.h
@@ -0,0 +1,309 @@
+/*
+ * PowerPC64 atomic bit operations.
+ * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
+ * Anton Blanchard
+ *
+ * Originally taken from the 32b PPC code. Modified to use 64b values for
+ * the various counters & memory references.
+ *
+ * Bitops are odd when viewed on big-endian systems. They were designed
+ * on little endian so the size of the bitset doesn't matter (low order bytes
+ * come first) as long as the bit in question is valid.
+ *
+ * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
+ * our best to stay compatible with that. The assumption is that val will
+ * be unsigned long for such tests. As such, we assume the bits are stored
+ * as an array of unsigned long (the usual case is a single unsigned long,
+ * of course). Here's an example bitset with bit numbering:
+ *
+ * |63..........0|127........64|195.......128|255.......196|
+ *
+ * This leads to a problem. If an int, short or char is passed as a bitset
+ * it will be a bad memory reference since we want to store in chunks
+ * of unsigned long (64 bits here) size.
+ *
+ * There are a few little-endian macros used mostly for filesystem bitmaps,
+ * these work on similar bit arrays layouts, but byte-oriented:
+ *
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 in the bit number field needs to be
+ * reversed compared to the big-endian bit fields. This can be achieved
+ * by XOR with 0b111000 (0x38).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC64_BITOPS_H
+#define _PPC64_BITOPS_H
+
+#include <asm/memory.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr)
+{
+ volatile unsigned long *laddr = (volatile unsigned long *)addr;
+ return (1UL & (laddr[nr >> 6] >> (nr & 63)));
+}
+
+static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # set_bit\n\
+ or %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # clear_bit\n\
+ andc %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # change_bit\n\
+ xor %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc");
+}
+
+static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_set_bit\n\
+ or %1,%0,%2 \n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_clear_bit\n\
+ andc %1,%0,%2\n\
+ stdcx. %1,0,%3\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # test_and_change_bit\n\
+ xor %1,%0,%2\n\
+ stdcx. %1,0,%3\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
+{
+ unsigned long old;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # set_bit\n\
+ or %0,%0,%2\n\
+ stdcx. %0,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=m" (*addr)
+ : "r" (mask), "r" (addr), "m" (*addr)
+ : "cc");
+}
+
+/*
+ * non-atomic versions
+ */
+static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p |= mask;
+}
+
+static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p &= ~mask;
+}
+
+static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+
+ *p ^= mask;
+}
+
+static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x3f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/*
+ * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
+ * most significant (left-most) 1-bit in a double word.
+ */
+static __inline__ int __ilog2(unsigned long x)
+{
+ int lz;
+
+ asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 63 - lz;
+}
+
+/*
+ * Determines the bit position of the least significant (rightmost) 0 bit
+ * in the specified double word. The returned bit position will be zero-based,
+ * starting from the right side (63 - 0).
+ */
+static __inline__ unsigned long ffz(unsigned long x)
+{
+ /* no zero exists anywhere in the 8 byte area. */
+ if ((x = ~x) == 0)
+ return 64;
+
+ /*
+ * Calculate the bit position of the least signficant '1' bit in x
+ * (since x has been changed this will actually be the least signficant
+ * '0' bit in * the original x). Note: (x & -x) gives us a mask that
+ * is the least significant * (RIGHT-most) 1-bit of the value in x.
+ */
+ return __ilog2(x & -x);
+}
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __ilog2(x & -x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ unsigned long i = (unsigned long)x;
+ return __ilog2(i & -i) + 1;
+}
+
+#define find_first_set_bit(word) (ffs(word)-1)
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+#define fls(x) generic_fls(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset);
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+#endif /* _PPC64_BITOPS_H */
diff --git a/xen/include/asm-powerpc/cache.h b/xen/include/asm-powerpc/cache.h
new file mode 100644
index 0000000000..9151f84000
--- /dev/null
+++ b/xen/include/asm-powerpc/cache.h
@@ -0,0 +1,60 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_CACHE_H
+#define __ASM_CACHE_H
+
+#include <xen/config.h>
+#include <asm/processor.h>
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT (CONFIG_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+static __inline__ void dcbst(ulong line)
+{
+ __asm__ __volatile__ ("dcbst 0, %0" : : "r"(line) : "memory");
+}
+
+static __inline__ void icbi(ulong line)
+{
+ __asm__ __volatile__ ("icbi 0, %0" : : "r"(line) : "memory");
+}
+
+static __inline__ void synchronize_caches(ulong start, size_t len)
+{
+ ulong addr;
+
+ for (addr = start; addr < start + len; addr += L1_CACHE_BYTES) {
+ dcbst(addr);
+ }
+
+ /* allow dcbsts to complete */
+ sync();
+
+ for (addr = start; addr < start + len; addr += L1_CACHE_BYTES) {
+ icbi(addr);
+ }
+
+ /* discard instructions partially decoded from old icache contents */
+ isync();
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/config.h b/xen/include/asm-powerpc/config.h
new file mode 100644
index 0000000000..94caa6bea3
--- /dev/null
+++ b/xen/include/asm-powerpc/config.h
@@ -0,0 +1,77 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __PPC_CONFIG_H__
+#define __PPC_CONFIG_H__
+
+#define CONFIG_MAMBO 1
+#define HYPERVISOR_VIRT_START 0x0 /* XXX temp hack for common/kernel.c */
+
+
+#ifdef __ASSEMBLY__
+/* older assemblers do not like UL */
+#define U(x) (x)
+#define UL(x) (x)
+
+#else /* __ASSEMBLY__ */
+
+#define U(x) (x ## U)
+#define UL(x) (x ## UL)
+extern char _start[];
+extern char _end[];
+extern char _etext[];
+extern char __bss_start[];
+#endif
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+#define ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+
+/* this should be per processor, but for now */
+#define CACHE_LINE_SIZE 128
+
+#define CONFIG_GDB 1
+#define CONFIG_SMP 1
+#define CONFIG_PCI 1
+#define NR_CPUS 1
+
+#ifndef ELFSIZE
+#define ELFSIZE 64
+#endif
+
+#define asmlinkage
+
+#define NO_UART_CONFIG_OK
+#define OPT_CONSOLE_STR "com1"
+
+#define __user
+
+#define LINEAR_PT_VIRT_START (0xdeadbeefUL)
+#define XENHEAP_DEFAULT_MB (16)
+
+#define NR_hypercalls 64
+
+#define supervisor_mode_kernel (0)
+
+#define MAX_DMADOM_PFN (~0UL)
+
+#include <asm/powerpc64/config.h>
+
+#endif
diff --git a/xen/include/asm-powerpc/current.h b/xen/include/asm-powerpc/current.h
new file mode 100644
index 0000000000..cac3e26342
--- /dev/null
+++ b/xen/include/asm-powerpc/current.h
@@ -0,0 +1,79 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_CURRENT_H_
+#define _ASM_CURRENT_H_
+
+#include <public/xen.h>
+#include <asm/processor.h>
+#include <asm/powerpc64/procarea.h>
+
+struct vcpu;
+
+register struct processor_area *parea asm("r13");
+
+static inline struct vcpu *get_current(void)
+{
+ return parea->cur_vcpu;
+}
+#define current get_current()
+
+static inline void set_current(struct vcpu *v)
+{
+ parea->cur_vcpu = v;
+}
+
+/* The *currently running* guest's register state has been saved at the top of
+ * this processor's hypervisor stack. */
+static inline struct cpu_user_regs *guest_cpu_user_regs(void)
+{
+ ulong stack_top = (ulong)parea->hyp_stack_base;
+
+ return (struct cpu_user_regs *)(stack_top - STACK_VOLATILE_AREA
+ - sizeof (struct cpu_user_regs));
+}
+
+/* XXX *#%(ing circular header dependencies force this to be a macro */
+/* If the vcpu is running, its state is still on the stack, and the vcpu
+ * structure's copy is obsolete. If the vcpu isn't running, the vcpu structure
+ * holds the only copy. This routine always does the right thing. */
+#define vcpu_regs(v) ({ \
+ struct cpu_user_regs *regs; \
+ if (v == current) \
+ regs = guest_cpu_user_regs(); \
+ else \
+ regs = &v->arch.ctxt; \
+ regs; \
+})
+
+
+static inline void reset_stack_and_jump(void (*f)(void))
+{
+ void _reset_stack_and_jump(void (*f)(void), struct cpu_user_regs *regs);
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+#ifdef TRACK_RESUME
+ printk("PC: 0x%lx, MSR: 0x%lx\n", regs->pc, regs->msr);
+#endif
+
+ _reset_stack_and_jump(f, regs);
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/debugger.h b/xen/include/asm-powerpc/debugger.h
new file mode 100644
index 0000000000..8431359ce0
--- /dev/null
+++ b/xen/include/asm-powerpc/debugger.h
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_DEBUGGER_H_
+#define _ASM_DEBUGGER_H_
+
+#ifdef CRASH_DEBUG
+
+#include <xen/gdbstub.h>
+
+static inline int debugger_trap_fatal(
+ unsigned int vector, struct cpu_user_regs *regs)
+{
+ (void)__trap_to_gdb(regs, vector);
+ return vector;
+}
+
+#define debugger_trap_immediate() __asm__ __volatile__ ("trap");
+
+#else /* CRASH_DEBUG */
+
+#define debugger_trap_fatal(_v, _r) (0)
+#define debugger_trap_immediate() ((void)0)
+
+#endif /* CRASH_DEBUG */
+
+#endif
diff --git a/xen/include/asm-powerpc/delay.h b/xen/include/asm-powerpc/delay.h
new file mode 100644
index 0000000000..f6dcde95ad
--- /dev/null
+++ b/xen/include/asm-powerpc/delay.h
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_DELAY_H_
+#define _ASM_DELAY_H_
+
+extern unsigned long ticks_per_usec;
+#define __udelay udelay
+extern void udelay(unsigned long usecs);
+
+#endif
diff --git a/xen/include/asm-powerpc/desc.h b/xen/include/asm-powerpc/desc.h
new file mode 100644
index 0000000000..600fb56268
--- /dev/null
+++ b/xen/include/asm-powerpc/desc.h
@@ -0,0 +1,25 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_DESC_H_
+#define _ASM_DESC_H_
+
+
+#endif
diff --git a/xen/include/asm-powerpc/div64.h b/xen/include/asm-powerpc/div64.h
new file mode 100644
index 0000000000..12bc392b28
--- /dev/null
+++ b/xen/include/asm-powerpc/div64.h
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Maria Butrico <butrico@us.ibm.com>
+ */
+
+#ifndef _DIV64_H_
+#define _DIV64_H_
+
+#include <xen/types.h>
+
+#define do_div(num,base) ({ \
+ uint32_t _remainder = (uint64_t)(num) % \
+ (uint32_t)(base); \
+ num = (uint64_t)(num) / (uint32_t)(base); \
+ _remainder; \
+})
+
+#endif /* #ifndef _DIV64_H_ */
diff --git a/xen/include/asm-powerpc/domain.h b/xen/include/asm-powerpc/domain.h
new file mode 100644
index 0000000000..1bb7195f41
--- /dev/null
+++ b/xen/include/asm-powerpc/domain.h
@@ -0,0 +1,114 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_DOMAIN_H_
+#define _ASM_DOMAIN_H_
+
+#include <xen/cache.h>
+#include <xen/sched.h>
+#include <xen/list.h>
+#include <xen/errno.h>
+#include <xen/mm.h>
+#include <public/arch-powerpc.h>
+#include <asm/htab.h>
+#include <asm/powerpc64/ppc970.h>
+
+struct arch_domain {
+ struct domain_htab htab;
+ /* The RMO area is fixed to the domain and is accessible while the
+ * processor is in real mode */
+ ulong rma_base;
+ ulong rma_size;
+
+ /* This is regular memory, only available thru translataion */
+ ulong logical_base_pfn;
+ ulong logical_end_pfn;
+
+ /* I/O-port access bitmap mask. */
+ u8 *iobmp_mask; /* Address of IO bitmap mask, or NULL. */
+
+ uint large_page_sizes;
+ char large_page_shift[4];
+} __cacheline_aligned;
+
+struct slb_entry {
+ ulong slb_vsid;
+ ulong slb_esid;
+};
+
+struct xencomm;
+
+typedef struct {
+ u32 u[4];
+} __attribute__((aligned(16))) vector128;
+
+struct arch_vcpu {
+ cpu_user_regs_t ctxt; /* User-level CPU registers */
+
+#ifdef HAS_FLOAT
+ double fprs[NUM_FPRS];
+#endif
+#ifdef HAS_VMX
+ vector128 vrs[32];
+ vector128 vscr;
+ u32 vrsave;
+#endif
+
+ /* Special-Purpose Registers */
+ ulong sprg[4];
+ ulong timebase;
+ ulong dar;
+ ulong dsisr;
+
+ /* Segment Lookaside Buffer */
+ struct slb_entry slb_entries[NUM_SLB_ENTRIES];
+
+ /* I/O-port access bitmap. */
+ u8 *iobmp; /* Guest kernel virtual address of the bitmap. */
+ int iobmp_limit; /* Number of ports represented in the bitmap. */
+ int iopl; /* Current IOPL for this VCPU. */
+
+ u32 dec;
+ struct cpu_vcpu cpu; /* CPU-specific bits */
+ struct xencomm *xencomm;
+} __cacheline_aligned;
+
+extern void full_resume(void);
+
+extern void save_sprs(struct vcpu *);
+extern void load_sprs(struct vcpu *);
+extern void save_segments(struct vcpu *);
+extern void load_segments(struct vcpu *);
+extern void save_float(struct vcpu *);
+extern void load_float(struct vcpu *);
+
+#define RMA_SHARED_INFO 1
+#define RMA_START_INFO 2
+#define RMA_LAST_DOM0 2
+/* these are not used for dom0 so they should be last */
+#define RMA_CONSOLE 3
+#define RMA_LAST_DOMU 3
+
+static inline ulong rma_addr(struct arch_domain *d, int type)
+{
+ return d->rma_size - (type * PAGE_SIZE);
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/event.h b/xen/include/asm-powerpc/event.h
new file mode 100644
index 0000000000..1dd4e5b3c3
--- /dev/null
+++ b/xen/include/asm-powerpc/event.h
@@ -0,0 +1,99 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_EVENT_H__
+#define __ASM_EVENT_H__
+
+#include <asm/current.h>
+
+/* copied from x86 evtchn_notify() */
+static inline void evtchn_notify(struct vcpu *v)
+{
+#ifdef XXX_NO_SMP_YET
+ int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ vcpu_unblock(v);
+ if (running)
+ smp_send_event_check_cpu(v->processor);
+#else
+ vcpu_unblock(v);
+#endif
+}
+
+static inline int local_event_delivery_is_enabled(void)
+{
+ return !!(vcpu_regs(current)->msr & MSR_EE);
+}
+
+static inline void local_event_delivery_disable(void)
+{
+ vcpu_regs(current)->msr &= ~MSR_EE;
+}
+
+static inline void local_event_delivery_enable(void)
+{
+ vcpu_regs(current)->msr |= MSR_EE;
+}
+
+static inline int local_events_need_delivery(void)
+{
+ struct vcpu *v = current;
+ /* Note: Bitwise operations result in fast code with no branches. */
+ return (!!v->vcpu_info->evtchn_upcall_pending &
+ local_event_delivery_is_enabled());
+}
+
+/* No arch specific virq definition now. Default to global. */
+static inline int arch_virq_is_global(int virq)
+{
+ return 1;
+}
+
+static inline void vcpu_kick(struct vcpu *v)
+{
+ /*
+ * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+ * pending flag. These values may fluctuate (after all, we hold no
+ * locks) but the key insight is that each change will cause
+ * evtchn_upcall_pending to be polled.
+ *
+ * NB2. We save VCPUF_running across the unblock to avoid a needless
+ * IPI for domains that we IPI'd to unblock.
+ */
+ int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ vcpu_unblock(v);
+ if (running)
+ smp_send_event_check_cpu(v->processor);
+}
+
+/* HACK: evtchn_upcall_pending is only a byte, but our atomic instructions
+ * only store in 4/8 byte quantities. However, because evtchn_upcall_pending
+ * is part of the guest ABI, we can't change its size without breaking
+ * backwards compatibility. In this particular case, struct vcpu_info is big
+ * enough that we can safely store a full long into it. However, note that bit
+ * 0 of evtchn_upcall_pending is bit 56 when cast to a long.
+ */
+static inline void vcpu_mark_events_pending(struct vcpu *v)
+{
+ unsigned long *l = (unsigned long *)&v->vcpu_info->evtchn_upcall_pending;
+ if (!test_and_set_bit(BITS_PER_LONG - 8, l))
+ vcpu_kick(v);
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/flushtlb.h b/xen/include/asm-powerpc/flushtlb.h
new file mode 100644
index 0000000000..67e454a0fd
--- /dev/null
+++ b/xen/include/asm-powerpc/flushtlb.h
@@ -0,0 +1,108 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_FLUSHTLB_H_
+#define _ASM_FLUSHTLB_H_
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <asm/misc.h>
+
+/* The current time as shown by the virtual TLB clock. */
+extern u32 tlbflush_clock;
+#define tlbflush_current_time() tlbflush_clock
+
+/* Time at which each CPU's TLB was last flushed. */
+extern u32 tlbflush_time[NR_CPUS];
+
+static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
+{
+#if 0
+ trap();
+#endif
+ return 0;
+}
+
+/*
+ * Filter the given set of CPUs, removing those that definitely flushed their
+ * TLB since @page_timestamp.
+ */
+#define tlbflush_filter(mask, page_timestamp) \
+do { \
+ unsigned int cpu; \
+ for_each_cpu_mask ( cpu, mask ) \
+ if ( !NEED_FLUSH(tlbflush_time[cpu], page_timestamp) ) \
+ cpu_clear(cpu, mask); \
+} while ( 0 )
+
+
+static inline void tlbiel(unsigned long eaddr)
+{
+#if 0
+ asm volatile("tlbiel %0" : : "r"(eaddr) : "memory");
+#else
+ asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(eaddr) : "memory");
+#endif
+}
+
+/* Lots of paranoia in flush_tlb_*; could probably be relaxed later. */
+static inline void local_flush_tlb_one(unsigned long eaddr)
+{
+ asm volatile("ptesync" : : : "memory");
+ tlbiel(eaddr);
+ asm volatile("eieio; tlbsync" : : : "memory");
+}
+
+static inline void local_flush_tlb(void)
+{
+ ulong rb;
+ int i;
+
+ asm volatile("ptesync" : : : "memory");
+
+ for (i = 0; i < 256; i++) {
+ rb = i;
+ rb <<= 12;
+ tlbiel(rb);
+ }
+ asm volatile("eieio": : : "memory");
+}
+
+#ifndef CONFIG_SMP
+#define flush_tlb_mask(_mask) local_flush_tlb()
+#define flush_tlb_one_mask(_mask,_addr) local_flush_tlb_one(_addr)
+#else
+extern void __flush_tlb_mask(cpumask_t mask, unsigned long addr);
+
+#define FLUSH_ALL_ADDRS (~0UL)
+#define flush_tlb_mask(_mask) __flush_tlb_mask(_mask,FLUSH_ALL_ADDRS)
+#define flush_tlb_one_mask(_mask,_addr) __flush_tlb_mask(_mask,_addr)
+#endif /* CONFIG_SMP */
+
+/*
+ * Filter the given set of CPUs, returning only those that may not have
+ * flushed their TLBs since @page_timestamp.
+ */
+static inline unsigned long tlbflush_filter_cpuset(
+ unsigned long cpuset, u32 page_timestamp)
+{
+ return 0;
+}
+#endif
diff --git a/xen/include/asm-powerpc/grant_table.h b/xen/include/asm-powerpc/grant_table.h
new file mode 100644
index 0000000000..b439b53d64
--- /dev/null
+++ b/xen/include/asm-powerpc/grant_table.h
@@ -0,0 +1,64 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef __ASM_PPC_GRANT_TABLE_H__
+#define __ASM_PPC_GRANT_TABLE_H__
+
+#include <asm/mm.h>
+
+#define ORDER_GRANT_FRAMES 2
+
+/*
+ * Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
+ * must hold a reference to the page.
+ */
+int create_grant_host_mapping(
+ unsigned long addr, unsigned long frame, unsigned int flags);
+int destroy_grant_host_mapping(
+ unsigned long addr, unsigned long frame, unsigned int flags);
+
+#define gnttab_create_shared_page(d, t, i) \
+ do { \
+ share_xen_page_with_guest( \
+ virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), \
+ (d), XENSHARE_writable); \
+ } while ( 0 )
+
+#define gnttab_shared_mfn(d, t, i) \
+ ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
+
+#define gnttab_shared_gmfn(d, t, i) \
+ (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
+
+#define gnttab_log_dirty(d, f) ((void )0)
+
+static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr)
+{
+ unsigned long *laddr;
+ unsigned long lnr;
+
+ BUG_ON((ulong)addr % sizeof(ulong));
+
+ lnr = (BITS_PER_LONG - (sizeof(*addr) * 8)) + nr;
+ laddr = (unsigned long *)addr;
+ clear_bit(lnr, laddr);
+}
+
+#endif /* __ASM_PPC_GRANT_TABLE_H__ */
diff --git a/xen/include/asm-powerpc/guest_access.h b/xen/include/asm-powerpc/guest_access.h
new file mode 100644
index 0000000000..7b77642318
--- /dev/null
+++ b/xen/include/asm-powerpc/guest_access.h
@@ -0,0 +1,99 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __PPC_GUEST_ACCESS_H__
+#define __PPC_GUEST_ACCESS_H__
+
+extern unsigned long xencomm_copy_to_guest(void *to, const void *from,
+ unsigned int len, unsigned int skip);
+extern unsigned long xencomm_copy_from_guest(void *to, const void *from,
+ unsigned int len, unsigned int skip);
+extern void xencomm_add_offset(void *handle, unsigned int bytes);
+
+/* Is the guest handle a NULL reference? */
+#define guest_handle_is_null(hnd) ((hnd).p == NULL)
+
+/* Offset the given guest handle into the array it refers to. */
+#define guest_handle_add_offset(hnd, nr) ({ \
+ const typeof((hnd).p) _ptr = (hnd).p; \
+ xencomm_add_offset(_ptr, nr * sizeof(*_ptr)); \
+})
+
+/* Cast a guest handle to the specified type of handle. */
+#define guest_handle_cast(hnd, type) ({ \
+ type *_x = (hnd).p; \
+ XEN_GUEST_HANDLE(type) _y; \
+ set_xen_guest_handle(_y, _x); \
+ _y; \
+})
+
+/* Since we run in real mode, we can safely access all addresses. That also
+ * means our __routines are identical to our "normal" routines. */
+#define guest_handle_okay(hnd, nr) 1
+
+/*
+ * Copy an array of objects to guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_to_guest_offset(hnd, idx, ptr, nr) \
+ __copy_to_guest_offset(hnd, idx, ptr, nr)
+
+/* Copy sub-field of a structure to guest context via a guest handle. */
+#define copy_field_to_guest(hnd, ptr, field) \
+ __copy_field_to_guest(hnd, ptr, field)
+
+/*
+ * Copy an array of objects from guest context via a guest handle.
+ * Optionally specify an offset into the guest array.
+ */
+#define copy_from_guest_offset(ptr, hnd, idx, nr) \
+ __copy_from_guest_offset(ptr, hnd, idx, nr)
+
+/* Copy sub-field of a structure from guest context via a guest handle. */
+#define copy_field_from_guest(ptr, hnd, field) \
+ __copy_field_from_guest(ptr, hnd, field)
+
+#define __copy_to_guest_offset(hnd, idx, ptr, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ xencomm_copy_to_guest(_x, _y, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
+})
+
+#define __copy_field_to_guest(hnd, ptr, field) ({ \
+ const int _off = offsetof(typeof(*ptr), field); \
+ const typeof(&(ptr)->field) _x = &(hnd).p->field; \
+ const typeof(&(ptr)->field) _y = &(ptr)->field; \
+ xencomm_copy_to_guest(_x, _y, sizeof(*_x), sizeof(*_x)*(_off)); \
+})
+
+#define __copy_from_guest_offset(ptr, hnd, idx, nr) ({ \
+ const typeof(ptr) _x = (hnd).p; \
+ const typeof(ptr) _y = (ptr); \
+ xencomm_copy_from_guest(_y, _x, sizeof(*_x)*(nr), sizeof(*_x)*(idx)); \
+})
+
+#define __copy_field_from_guest(ptr, hnd, field) ({ \
+ const int _off = offsetof(typeof(*ptr), field); \
+ const typeof(&(ptr)->field) _x = &(hnd).p->field; \
+ const typeof(&(ptr)->field) _y = &(ptr)->field; \
+ xencomm_copy_to_guest(_y, _x, sizeof(*_x), sizeof(*_x)*(_off)); \
+})
+
+#endif /* __PPC_GUEST_ACCESS_H__ */
diff --git a/xen/include/asm-powerpc/hardirq.h b/xen/include/asm-powerpc/hardirq.h
new file mode 100644
index 0000000000..eda64792f4
--- /dev/null
+++ b/xen/include/asm-powerpc/hardirq.h
@@ -0,0 +1,21 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "../asm-x86/hardirq.h"
diff --git a/xen/include/asm-powerpc/hcalls.h b/xen/include/asm-powerpc/hcalls.h
new file mode 100644
index 0000000000..a0a33f6bf4
--- /dev/null
+++ b/xen/include/asm-powerpc/hcalls.h
@@ -0,0 +1,34 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_HCALLS_H_
+#define _ASM_HCALLS_H_
+
+#include <xen/config.h>
+#include <xen/types.h>
+
+/* table of standard PAPR hcalls */
+extern u32 *papr_hcalls;
+
+extern void papr_hcall_jump(struct cpu_user_regs *regs, u32 address);
+
+#define XEN_MARK(a) ((a) | (~0UL << 16))
+
+#endif
diff --git a/xen/include/asm-powerpc/htab.h b/xen/include/asm-powerpc/htab.h
new file mode 100644
index 0000000000..55b3259459
--- /dev/null
+++ b/xen/include/asm-powerpc/htab.h
@@ -0,0 +1,142 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_HTAB_H_
+#define _ASM_HTAB_H_
+
+#include <xen/config.h>
+#include <xen/types.h>
+
+/***** general PowerPC architecture limits ******/
+
+#define LOG_DEFAULT_HTAB_BYTES 20
+#define DEFAULT_HTAB_BYTES (1UL << LOG_HTAB_BYTES)
+
+/* 256KB, from PowerPC Architecture specification */
+#define HTAB_MIN_LOG_SIZE 18
+
+#define LOG_NUM_PTES_IN_PTEG 3
+#define NUM_PTES_IN_PTEG (1 << LOG_NUM_PTES_IN_PTEG)
+#define LOG_PTE_SIZE 4
+#define LOG_PTEG_SIZE (LOG_NUM_PTES_IN_PTEG + LOG_PTE_SIZE)
+#define LOG_HTAB_HASH (LOG_HTAB_SIZE - LOG_PTEG_SIZE)
+
+/* real page number shift to create the rpn field of the pte */
+#define RPN_SHIFT 12
+
+/* page protection bits in pp1 (name format: MSR:PR=0 | MSR:PR=1) */
+#define PP_RWxx 0x0UL
+#define PP_RWRW 0x2UL
+#define PP_RWRx 0x4UL
+#define PP_RxRx 0x6UL
+
+/***** 64-bit PowerPC architecture limits ******/
+
+#define SDR1_HTABORG_MASK 0xfffffffffff80000ULL
+#define SDR1_HTABSIZE_MASK 0x1fUL
+#define SDR1_HTABSIZE_MAX 46
+#define SDR1_HTABSIZE_BASEBITS 11
+
+/* used to turn a vsid into a number usable in the hash function */
+#define VSID_HASH_MASK 0x0000007fffffffffUL
+
+/* used to turn a vaddr into an api for a pte */
+#define VADDR_TO_API(vaddr) (((vaddr) & API_MASK) >> API_SHIFT)
+#define API_VEC 0x1fUL
+#define API_SHIFT 23
+#define API_MASK (API_VEC << API_SHIFT)
+
+/***** hypervisor internals ******/
+
+/* 64M: reasonable hypervisor limit? */
+#define HTAB_MAX_LOG_SIZE 26
+
+#define GET_HTAB(domain) ((domain)->arch.htab.sdr1 & SDR1_HTABORG_MASK)
+
+union pte {
+ struct pte_words {
+ ulong vsid;
+ ulong rpn;
+ } words;
+ struct pte_bits {
+ /* *INDENT-OFF* */
+ /* high word */
+ ulong avpn: 57; /* [0-56] abbreviated virtual page number */
+ ulong lock: 1; /* [57] hypervisor lock bit */
+ ulong res: 1; /* [58] reserved for hypervisor */
+ ulong bolted: 1; /* [59] XXX software-reserved; temp hack */
+ ulong sw: 1; /* [60] reserved for software */
+ ulong l: 1; /* [61] Large Page */
+ ulong h: 1; /* [62] hash function id */
+ ulong v: 1; /* [63] valid */
+
+ /* low word */
+ ulong pp0: 1; /* [0] page protection bit 0 (current PowerPC
+ * specification says it can always be 0) */
+ ulong ts: 1; /* [1] tag select */
+ ulong rpn: 50; /* [2-51] real page number */
+ ulong res2: 2; /* [52,53] reserved */
+ ulong ac: 1; /* [54] address compare */
+ ulong r: 1; /* [55] referenced */
+ ulong c: 1; /* [56] changed */
+ ulong w: 1; /* [57] write through */
+ ulong i: 1; /* [58] cache inhibited */
+ ulong m: 1; /* [59] memory coherent */
+ ulong g: 1; /* [60] guarded */
+ ulong n: 1; /* [61] no-execute */
+ ulong pp1: 2; /* [62,63] page protection bits 1:2 */
+ /* *INDENT-ON* */
+ } bits;
+};
+
+union ptel {
+ ulong word;
+ struct ptel_bits {
+ /* *INDENT-OFF* */
+
+ ulong pp0: 1; /* page protection bit 0 (current PPC
+ * AS says it can always be 0) */
+ ulong ts: 1; /* tag select */
+ ulong rpn: 50; /* real page number */
+ ulong res2: 2; /* reserved */
+ ulong ac: 1; /* address compare */
+ ulong r: 1; /* referenced */
+ ulong c: 1; /* changed */
+ ulong w: 1; /* write through */
+ ulong i: 1; /* cache inhibited */
+ ulong m: 1; /* memory coherent */
+ ulong g: 1; /* guarded */
+ ulong n: 1; /* no-execute */
+ ulong pp1: 2; /* page protection bits 1:2 */
+ /* *INDENT-ON* */
+ } bits;
+};
+
+struct domain_htab {
+ ulong sdr1;
+ ulong log_num_ptes; /* log number of PTEs in HTAB. */
+ union pte *map; /* access the htab like an array */
+ ulong *shadow; /* idx -> logical translation array */
+};
+
+struct domain;
+extern void htab_alloc(struct domain *d, int log_htab_bytes);
+extern void htab_free(struct domain *d);
+#endif
diff --git a/xen/include/asm-powerpc/hypercall.h b/xen/include/asm-powerpc/hypercall.h
new file mode 100644
index 0000000000..90dbe39279
--- /dev/null
+++ b/xen/include/asm-powerpc/hypercall.h
@@ -0,0 +1,26 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_PPC_HYPERCALL_H__
+#define __ASM_PPC_HYPERCALL_H__
+
+#define arch_do_vcpu_op(cmd, vcpu, arg) (-ENOSYS)
+
+#endif /* __ASM_PPC_HYPERCALL_H__ */
diff --git a/xen/include/asm-powerpc/init.h b/xen/include/asm-powerpc/init.h
new file mode 100644
index 0000000000..7ac4f0cbf7
--- /dev/null
+++ b/xen/include/asm-powerpc/init.h
@@ -0,0 +1,59 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _XEN_ASM_INIT_H
+#define _XEN_ASM_INIT_H
+
+/*
+ * Mark functions and data as being only used at initialization
+ * or exit time.
+ */
+#define __init \
+ __attribute__ ((__section__ (".init.text")))
+#define __exit \
+ __attribute_used__ __attribute__ ((__section__(".text.exit")))
+#define __initdata \
+ __attribute__ ((__section__ (".init.data")))
+#define __exitdata \
+ __attribute_used__ __attribute__ ((__section__ (".data.exit")))
+#define __initsetup \
+ __attribute_used__ __attribute__ ((__section__ (".setup.init")))
+#define __init_call \
+ __attribute_used__ __attribute__ ((__section__ (".initcall.init")))
+#define __exit_call \
+ __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
+
+struct cpu_user_regs;
+typedef void (*hcall_handler_t)(struct cpu_user_regs *regs);
+
+typedef struct {
+ unsigned long number;
+ hcall_handler_t handler;
+} inithcall_t;
+extern inithcall_t __inithcall_start, __inithcall_end;
+
+#define __init_papr_hcall(nr, fn) \
+ static inithcall_t __inithcall_##fn __init_hcall \
+ = { .number = nr, .handler = fn }
+
+#define __init_hcall \
+ __attribute_used__ __attribute__ ((__section__ (".inithcall.text")))
+
+#endif /* _XEN_ASM_INIT_H */
diff --git a/xen/include/asm-powerpc/io.h b/xen/include/asm-powerpc/io.h
new file mode 100644
index 0000000000..1e2d31cbb2
--- /dev/null
+++ b/xen/include/asm-powerpc/io.h
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_IO_H_
+#define _ASM_IO_H_
+
+#include <xen/types.h>
+#include <asm/processor.h>
+
+#define IO_SPACE_LIMIT 0xffff
+
+extern unsigned int watchdog_on;
+extern ulong isa_io_base;
+
+#define inb(port) in_8((void *)((ulong)(port) + isa_io_base))
+#define outb(val, port) out_8((void *)((ulong)(port) + isa_io_base), val)
+#define inw(port) in_le16((void *)((ulong)(port) + isa_io_base))
+#define outw(val, port) out_le16((void *)((ulong)(port) + isa_io_base), val)
+#define inl(port) in_le32((void *)((ulong)(port) + isa_io_base))
+#define outl(val, port) out_le32((void *)((ulong)(port) + isa_io_base), val)
+
+#define ioremap(x,l) (void __iomem *)(x)
+#define readb(port) in_8((void *)(port))
+#define writeb(val, port) out_8((void *)(port), val)
+
+
+extern u8 in_8(const volatile u8 *addr);
+extern void out_8(volatile u8 *addr, int val);
+extern u32 in_32(const volatile u32 *addr);
+extern void out_32(volatile u32 *addr, int val);
+extern int in_le16(const volatile unsigned short *addr);
+extern void out_le16(volatile unsigned short *addr, int val);
+extern unsigned in_le32(const volatile unsigned *addr);
+extern void out_le32(volatile unsigned *addr, int val);
+
+#define in_be8 in_8
+#define in_be16 in_16
+#define in_be32 in_32
+#define out_be8 out_8
+#define out_be16 out_16
+#define out_be32 out_32
+
+#define readw(port) in_le16((void *)(port))
+#define readl(port) in_le32((void *)(port))
+#define writew(val, port) out_le16((void *)(port), val)
+#define writel(val, port) out_le32((void *)(port), val)
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#endif
diff --git a/xen/include/asm-powerpc/iocap.h b/xen/include/asm-powerpc/iocap.h
new file mode 100644
index 0000000000..da9a433c47
--- /dev/null
+++ b/xen/include/asm-powerpc/iocap.h
@@ -0,0 +1,26 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _PPC_IOCAP_H
+#define _PPC_IOCAP_H
+
+/* All PowerPC IO is MMIO. */
+
+#endif /* _PPC_IOCAP_H */
diff --git a/xen/include/asm-powerpc/irq.h b/xen/include/asm-powerpc/irq.h
new file mode 100644
index 0000000000..9b90ac1cb1
--- /dev/null
+++ b/xen/include/asm-powerpc/irq.h
@@ -0,0 +1,31 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _ASM_PPC_HW_IRQ_H
+#define _ASM_PPC_HW_IRQ_H
+#include "../asm-x86/irq.h"
+
+#undef vector_to_irq
+#define vector_to_irq(vec) (vec)
+#undef irq_to_vector
+#define irq_to_vector(irq) (irq)
+
+extern int assign_irq_vector (int irq);
+#endif
diff --git a/xen/include/asm-powerpc/mach-default/irq_vectors.h b/xen/include/asm-powerpc/mach-default/irq_vectors.h
new file mode 100644
index 0000000000..9a851cab42
--- /dev/null
+++ b/xen/include/asm-powerpc/mach-default/irq_vectors.h
@@ -0,0 +1,105 @@
+/* from xen/include/asm-x86/mach-default/irq_vectors.h */
+
+/*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ * FIRST_EXTERNAL_VECTOR:
+ * The first free place for external interrupts
+ *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+ * TIMER_IRQ:
+ * The IRQ number the timer interrupt comes in at.
+ *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+ *
+ */
+#ifndef _ASM_IRQ_VECTORS_H
+#define _ASM_IRQ_VECTORS_H
+
+/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x0:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x0
+#define FIRST_DEVICE_VECTOR 0
+#define NR_IRQS 256
+#define NR_VECTORS NR_IRQS
+#define NR_IRQ_VECTORS NR_IRQS
+#define HYPERCALL_VECTOR -1
+#define FAST_TRAP -1 /* 0x80 */
+#define FIRST_SYSTEM_VECTOR -1
+
+#if 0
+
+/*
+ * Vectors 0-16 in some cases are used for ISA interrupts.
+ */
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+#define SPURIOUS_APIC_VECTOR 0xff
+#define ERROR_APIC_VECTOR 0xfe
+#define INVALIDATE_TLB_VECTOR 0xfd
+#define EVENT_CHECK_VECTOR 0xfc
+#define CALL_FUNCTION_VECTOR 0xfb
+
+#define THERMAL_APIC_VECTOR 0xf0
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+
+/*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+#define FIRST_DEVICE_VECTOR 0x31
+#define FIRST_SYSTEM_VECTOR 0xef
+
+#define TIMER_IRQ 0
+
+/*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
+/*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+#define NR_VECTORS 256
+
+#include "irq_vectors_limits.h"
+
+#define FPU_IRQ 13
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+
+#endif // 0
+#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/xen/include/asm-powerpc/memory.h b/xen/include/asm-powerpc/memory.h
new file mode 100644
index 0000000000..369a627201
--- /dev/null
+++ b/xen/include/asm-powerpc/memory.h
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _ASM_MEMORY_H_
+#define _ASM_MEMORY_H_
+
+#include <xen/config.h>
+
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP "eieio\n"
+#define ISYNC_ON_SMP "\n\tisync"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#endif
+
+#endif
diff --git a/xen/include/asm-powerpc/misc.h b/xen/include/asm-powerpc/misc.h
new file mode 100644
index 0000000000..b3ec14d44e
--- /dev/null
+++ b/xen/include/asm-powerpc/misc.h
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_MISC_H_
+#define _ASM_MISC_H_
+
+static inline void unimplemented(void)
+{
+}
+
+static inline void trap(void)
+{
+ asm volatile("trap");
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/mm.h b/xen/include/asm-powerpc/mm.h
new file mode 100644
index 0000000000..caac7fd6d8
--- /dev/null
+++ b/xen/include/asm-powerpc/mm.h
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_MM_H_
+#define _ASM_MM_H_
+
+#include <public/xen.h>
+#include <xen/list.h>
+#include <xen/types.h>
+#include <asm/misc.h>
+#include <asm/system.h>
+#include <asm/flushtlb.h>
+#include <asm/uaccess.h>
+
+#define memguard_guard_range(_p,_l) ((void)0)
+#define memguard_unguard_range(_p,_l) ((void)0)
+
+extern unsigned long xenheap_phys_end;
+#define IS_XEN_HEAP_FRAME(_pfn) (page_to_mfn(_pfn) < xenheap_phys_end)
+
+#define cleanup_writable_pagetable(_d)
+
+/*
+ * Per-page-frame information.
+ *
+ * Every architecture must ensure the following:
+ * 1. 'struct page_info' contains a 'struct list_head list'.
+ * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
+ */
+#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+#define PRtype_info "016lx"
+
+/* XXX copy-and-paste job; re-examine me */
+struct page_info
+{
+ /* Each frame can be threaded onto a doubly-linked list. */
+ struct list_head list;
+
+ /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
+ u32 tlbflush_timestamp;
+
+ /* Reference count and various PGC_xxx flags and fields. */
+ unsigned long count_info;
+
+ /* Context-dependent fields follow... */
+ union {
+
+ /* Page is in use: ((count_info & PGC_count_mask) != 0). */
+ struct {
+ /* Owner of this page (NULL if page is anonymous). */
+ struct domain *_domain;
+ /* Type reference count and various PGT_xxx flags and fields. */
+ unsigned long type_info;
+ } inuse;
+
+ /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
+ struct {
+ /* Mask of possibly-tainted TLBs. */
+ cpumask_t cpumask;
+ /* Order-size of the free chunk this page is the head of. */
+ u8 order;
+ } free;
+
+ } u;
+
+};
+
+ /* The following page types are MUTUALLY EXCLUSIVE. */
+#define PGT_none (0<<29) /* no special uses of this page */
+#define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
+#define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
+#define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
+#define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
+#define PGT_gdt_page (5<<29) /* using this page in a GDT? */
+#define PGT_ldt_page (6<<29) /* using this page in an LDT? */
+#define PGT_writable_page (7<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (7<<29) /* Bits 29-31. */
+ /* Has this page been validated for use as its current type? */
+#define _PGT_validated 28
+#define PGT_validated (1U<<_PGT_validated)
+ /* Owning guest has pinned this page to its current type? */
+#define _PGT_pinned 27
+#define PGT_pinned (1U<<_PGT_pinned)
+ /* The 10 most significant bits of virt address if this is a page table. */
+#define PGT_va_shift 17
+#define PGT_va_mask (((1U<<10)-1)<<PGT_va_shift)
+ /* Is the back pointer still mutable (i.e. not fixed yet)? */
+#define PGT_va_mutable (((1U<<10)-1)<<PGT_va_shift)
+ /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
+#define PGT_va_unknown (((1U<<10)-2)<<PGT_va_shift)
+ /* 17-bit count of uses of this frame as its current type. */
+#define PGT_count_mask ((1U<<17)-1)
+
+ /* Cleared when the owning guest 'frees' this page. */
+#define _PGC_allocated 31
+#define PGC_allocated (1U<<_PGC_allocated)
+ /* 31-bit count of references to this frame. */
+#define PGC_count_mask ((1U<<31)-1)
+
+static inline void put_page(struct page_info *page)
+{
+#if 0
+ int count;
+
+ count = atomic_dec_return(&page->count_info);
+
+ if ( unlikely((count & PGC_count_mask) == 0) )
+ free_domheap_page(page);
+#else
+ trap();
+#endif
+}
+
+static inline int get_page(struct page_info *page,
+ struct domain *domain)
+{
+#if 0
+ int count;
+
+ count = atomic_inc_return(&page->count_info);
+
+ if (((count & PGC_count_mask) == 0) || /* Count overflow? */
+ ((count & PGC_count_mask) == 1) || /* Wasn't allocated? */
+ ((page->domain != domain))) /* Wrong owner? */
+ {
+ atomic_dec(&page->count_info);
+ return 0;
+ }
+
+#else
+ trap();
+#endif
+ return 1;
+}
+
+static inline int get_page_and_type(struct page_info *page,
+ struct domain *domain,
+ u32 type)
+{
+ trap();
+ return 1;
+}
+
+int get_page_type(struct page_info *page, u32 type);
+
+#define set_machinetophys(_mfn, _pfn) (trap(), 0)
+
+extern void synchronise_pagetables(unsigned long cpu_mask);
+
+static inline void put_page_and_type(struct page_info *page)
+{
+ trap();
+}
+
+/* XXX don't know what this is for */
+typedef struct {
+ void (*enable)(struct domain *);
+ void (*disable)(struct domain *);
+} vm_assist_info_t;
+extern vm_assist_info_t vm_assist_info[];
+
+#define page_get_owner(_p) ((_p)->u.inuse._domain)
+#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = _d)
+
+#define share_xen_page_with_guest(p, d, r) do { } while (0)
+#define share_xen_page_with_privileged_guests(p, r) do { } while (0)
+
+extern struct page_info *frame_table;
+extern unsigned long frame_table_size;
+extern unsigned long max_page;
+extern unsigned long total_pages;
+void init_frametable(void);
+
+/* hope that accesses to this will fail spectacularly */
+#define machine_to_phys_mapping ((u32 *)-1UL)
+
+extern int update_grant_va_mapping(unsigned long va,
+ unsigned long val,
+ struct domain *,
+ struct vcpu *);
+
+extern void put_page_type(struct page_info *page);
+
+#define PFN_TYPE_RMA 0
+#define PFN_TYPE_LOGICAL 1
+#define PFN_TYPE_IO 2
+extern ulong pfn2mfn(struct domain *d, long mfn, int *type);
+
+/* Arch-specific portion of memory_op hypercall. */
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
+
+/* XXX implement me? */
+#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
+/* XXX only used for debug print right now... */
+#define get_gpfn_from_mfn(mfn) (mfn)
+
+static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
+{
+ return pfn2mfn(d, gmfn, NULL);
+}
+
+#define mfn_to_gmfn(_d, mfn) (mfn)
+
+extern int steal_page(struct domain *d, struct page_info *page,
+ unsigned int memflags);
+
+#endif
diff --git a/xen/include/asm-powerpc/mpic.h b/xen/include/asm-powerpc/mpic.h
new file mode 100644
index 0000000000..45cf5f692b
--- /dev/null
+++ b/xen/include/asm-powerpc/mpic.h
@@ -0,0 +1,294 @@
+/* from linux/include/asm-ppc/mpic.h */
+
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+//#ifdef __KERNEL__
+
+//#include <linux/irq.h>
+#include <xen/irq.h>
+
+/*
+ * Global registers
+ */
+
+#define MPIC_GREG_BASE 0x01000
+
+#define MPIC_GREG_FEATURE_0 0x00000
+#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
+#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
+#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
+#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
+#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
+#define MPIC_GREG_FEATURE_1 0x00010
+#define MPIC_GREG_GLOBAL_CONF_0 0x00020
+#define MPIC_GREG_GCONF_RESET 0x80000000
+#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
+#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
+#define MPIC_GREG_GLOBAL_CONF_1 0x00030
+#define MPIC_GREG_VENDOR_0 0x00040
+#define MPIC_GREG_VENDOR_1 0x00050
+#define MPIC_GREG_VENDOR_2 0x00060
+#define MPIC_GREG_VENDOR_3 0x00070
+#define MPIC_GREG_VENDOR_ID 0x00080
+#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+#define MPIC_GREG_PROCESSOR_INIT 0x00090
+#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
+#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
+#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
+#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
+#define MPIC_GREG_SPURIOUS 0x000e0
+#define MPIC_GREG_TIMER_FREQ 0x000f0
+
+/*
+ *
+ * Timer registers
+ */
+#define MPIC_TIMER_BASE 0x01100
+#define MPIC_TIMER_STRIDE 0x40
+
+#define MPIC_TIMER_CURRENT_CNT 0x00000
+#define MPIC_TIMER_BASE_CNT 0x00010
+#define MPIC_TIMER_VECTOR_PRI 0x00020
+#define MPIC_TIMER_DESTINATION 0x00030
+
+/*
+ * Per-Processor registers
+ */
+
+#define MPIC_CPU_THISBASE 0x00000
+#define MPIC_CPU_BASE 0x20000
+#define MPIC_CPU_STRIDE 0x01000
+
+#define MPIC_CPU_IPI_DISPATCH_0 0x00040
+#define MPIC_CPU_IPI_DISPATCH_1 0x00050
+#define MPIC_CPU_IPI_DISPATCH_2 0x00060
+#define MPIC_CPU_IPI_DISPATCH_3 0x00070
+#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
+#define MPIC_CPU_TASKPRI_MASK 0x0000000f
+#define MPIC_CPU_WHOAMI 0x00090
+#define MPIC_CPU_WHOAMI_MASK 0x0000001f
+#define MPIC_CPU_INTACK 0x000a0
+#define MPIC_CPU_EOI 0x000b0
+
+/*
+ * Per-source registers
+ */
+
+#define MPIC_IRQ_BASE 0x10000
+#define MPIC_IRQ_STRIDE 0x00020
+#define MPIC_IRQ_VECTOR_PRI 0x00000
+#define MPIC_VECPRI_MASK 0x80000000
+#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
+#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
+#define MPIC_VECPRI_PRIORITY_SHIFT 16
+#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
+#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
+#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define MPIC_VECPRI_POLARITY_MASK 0x00800000
+#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
+#define MPIC_VECPRI_SENSE_EDGE 0x00000000
+#define MPIC_VECPRI_SENSE_MASK 0x00400000
+#define MPIC_IRQ_DESTINATION 0x00010
+
+#define MPIC_MAX_IRQ_SOURCES 2048
+#define MPIC_MAX_CPUS 32
+#define MPIC_MAX_ISU 32
+
+/*
+ * Special vector numbers (internal use only)
+ */
+#define MPIC_VEC_SPURRIOUS 255
+#define MPIC_VEC_IPI_3 254
+#define MPIC_VEC_IPI_2 253
+#define MPIC_VEC_IPI_1 252
+#define MPIC_VEC_IPI_0 251
+
+/* unused */
+#define MPIC_VEC_TIMER_3 250
+#define MPIC_VEC_TIMER_2 249
+#define MPIC_VEC_TIMER_1 248
+#define MPIC_VEC_TIMER_0 247
+
+/* Type definition of the cascade handler */
+typedef int (*mpic_cascade_t)(struct cpu_user_regs /* pt_regs */ *regs, void *data);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+/* Fixup table entry */
+struct mpic_irq_fixup
+{
+ u8 __iomem *base;
+ u8 __iomem *applebase;
+ u32 data;
+ unsigned int index;
+};
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+
+/* The instance data of a given MPIC */
+struct mpic
+{
+ /* The "linux" controller struct */
+ hw_irq_controller hc_irq;
+#ifdef CONFIG_SMP
+ hw_irq_controller hc_ipi;
+#endif
+ const char *name;
+ /* Flags */
+ unsigned int flags;
+ /* How many irq sources in a given ISU */
+ unsigned int isu_size;
+ unsigned int isu_shift;
+ unsigned int isu_mask;
+ /* Offset of irq vector numbers */
+ unsigned int irq_offset;
+ unsigned int irq_count;
+ /* Offset of ipi vector numbers */
+ unsigned int ipi_offset;
+ /* Number of sources */
+ unsigned int num_sources;
+ /* Number of CPUs */
+ unsigned int num_cpus;
+ /* cascade handler */
+ mpic_cascade_t cascade;
+ void *cascade_data;
+ unsigned int cascade_vec;
+ /* senses array */
+ unsigned char *senses;
+ unsigned int senses_count;
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ /* The fixup table */
+ struct mpic_irq_fixup *fixups;
+ spinlock_t fixup_lock;
+#endif
+
+ /* The various ioremap'ed bases */
+ volatile u32 __iomem *gregs;
+ volatile u32 __iomem *tmregs;
+ volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
+ volatile u32 __iomem *isus[MPIC_MAX_ISU];
+
+ /* link */
+ struct mpic *next;
+};
+
+/* This is the primary controller, only that one has IPIs and
+ * has afinity control. A non-primary MPIC always uses CPU0
+ * registers only
+ */
+#define MPIC_PRIMARY 0x00000001
+/* Set this for a big-endian MPIC */
+#define MPIC_BIG_ENDIAN 0x00000002
+/* Broken U3 MPIC */
+#define MPIC_BROKEN_U3 0x00000004
+/* Broken IPI registers (autodetected) */
+#define MPIC_BROKEN_IPI 0x00000008
+/* MPIC wants a reset */
+#define MPIC_WANTS_RESET 0x00000010
+
+/* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
+ * actually performed.
+ *
+ * @phys_addr: physial base address of the MPIC
+ * @flags: flags, see constants above
+ * @isu_size: number of interrupts in an ISU. Use 0 to use a
+ * standard ISU-less setup (aka powermac)
+ * @irq_offset: first irq number to assign to this mpic
+ * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
+ * to match the number of sources
+ * @ipi_offset: first irq number to assign to this mpic IPI sources,
+ * used only on primary mpic
+ * @senses: array of sense values
+ * @senses_num: number of entries in the array
+ *
+ * Note about the sense array. If none is passed, all interrupts are
+ * setup to be level negative unless MPIC_BROKEN_U3 is set in which
+ * case they are edge positive (and the array is ignored anyway).
+ * The values in the array start at the first source of the MPIC,
+ * that is senses[0] correspond to linux irq "irq_offset".
+ */
+extern struct mpic *mpic_alloc(unsigned long phys_addr,
+ unsigned int flags,
+ unsigned int isu_size,
+ unsigned int irq_offset,
+ unsigned int irq_count,
+ unsigned int ipi_offset,
+ unsigned char *senses,
+ unsigned int senses_num,
+ const char *name);
+
+/* Assign ISUs, to call before mpic_init()
+ *
+ * @mpic: controller structure as returned by mpic_alloc()
+ * @isu_num: ISU number
+ * @phys_addr: physical address of the ISU
+ */
+extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+ unsigned long phys_addr);
+
+/* Initialize the controller. After this has been called, none of the above
+ * should be called again for this mpic
+ */
+extern void mpic_init(struct mpic *mpic);
+
+/* Setup a cascade. Currently, only one cascade is supported this
+ * way, though you can always do a normal request_irq() and add
+ * other cascades this way. You should call this _after_ having
+ * added all the ISUs
+ *
+ * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
+ * @handler: cascade handler function
+ */
+extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
+ void *data);
+
+/*
+ * All of the following functions must only be used after the
+ * ISUs have been assigned and the controller fully initialized
+ * with mpic_init()
+ */
+
+
+/* Change/Read the priority of an interrupt. Default is 8 for irqs and
+ * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
+ * IPI number is then the offset'ed (linux irq number mapped to the IPI)
+ */
+extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
+extern unsigned int mpic_irq_get_priority(unsigned int irq);
+
+/* Setup a non-boot CPU */
+extern void mpic_setup_this_cpu(void);
+
+/* Clean up for kexec (or cpu offline or ...) */
+extern void mpic_teardown_this_cpu(int secondary);
+
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
+/* Request IPIs on primary mpic */
+extern void mpic_request_ipis(void);
+
+/* Send an IPI (non offseted number 0..3) */
+extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
+
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
+/* Fetch interrupt from a given mpic */
+extern int mpic_get_one_irq(struct mpic *mpic, struct cpu_user_regs /* pt_regs */ *regs);
+/* This one gets to the primary mpic */
+extern int mpic_get_irq(struct cpu_user_regs /* pt_regs */ *regs);
+
+/* global mpic for pSeries */
+extern struct mpic *pSeries_mpic;
+
+//#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/xen/include/asm-powerpc/msr.h b/xen/include/asm-powerpc/msr.h
new file mode 100644
index 0000000000..b0efc454ee
--- /dev/null
+++ b/xen/include/asm-powerpc/msr.h
@@ -0,0 +1,66 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _POWERPC_MSR_H
+#define _POWERPC_MSR_H
+
+#ifdef __ASSEMBLY__
+#define ULL(x) x
+#else
+#define ULL(x) x ## ULL
+#endif
+
+/* Flags in MSR: */
+#define MSR_SF ULL(0x8000000000000000)
+#define MSR_TA ULL(0x4000000000000000)
+#define MSR_ISF ULL(0x2000000000000000)
+#define MSR_HV ULL(0x1000000000000000)
+#define MSR_VMX ULL(0x0000000002000000)
+#define MSR_MER ULL(0x0000000000200000)
+#define MSR_POW ULL(0x0000000000040000)
+#define MSR_ILE ULL(0x0000000000010000)
+#define MSR_EE ULL(0x0000000000008000)
+#define MSR_PR ULL(0x0000000000004000)
+#define MSR_FP ULL(0x0000000000002000)
+#define MSR_ME ULL(0x0000000000001000)
+#define MSR_FE0 ULL(0x0000000000000800)
+#define MSR_SE ULL(0x0000000000000400)
+#define MSR_BE ULL(0x0000000000000200)
+#define MSR_FE1 ULL(0x0000000000000100)
+#define MSR_IP ULL(0x0000000000000040)
+#define MSR_IR ULL(0x0000000000000020)
+#define MSR_DR ULL(0x0000000000000010)
+#define MSR_PMM ULL(0x0000000000000004)
+#define MSR_RI ULL(0x0000000000000002)
+#define MSR_LE ULL(0x0000000000000001)
+
+/* MSR bits set on the Mambo simulator */
+#define MSR_SIM ULL(0x0000000020000000)
+#define MSR_MAMBO ULL(0x0000000010000000)
+
+/* On a trap, srr1's copy of msr defines some bits as follows: */
+#define MSR_TRAP_FE ULL(0x0000000000100000) /* Floating Point Exception */
+#define MSR_TRAP_IOP ULL(0x0000000000080000) /* Illegal Instruction */
+#define MSR_TRAP_PRIV ULL(0x0000000000040000) /* Privileged Instruction */
+#define MSR_TRAP ULL(0x0000000000020000) /* Trap Instruction */
+#define MSR_TRAP_NEXT ULL(0x0000000000010000) /* PC is next instruction */
+#define MSR_TRAP_BITS (MSR_TRAP_FE|MSR_TRAP_IOP|MSR_TRAP_PRIV|MSR_TRAP)
+
+#endif /* _POWERPC_MSR_H */
diff --git a/xen/include/asm-powerpc/multicall.h b/xen/include/asm-powerpc/multicall.h
new file mode 100644
index 0000000000..5a383056be
--- /dev/null
+++ b/xen/include/asm-powerpc/multicall.h
@@ -0,0 +1,27 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef __ASM_PPC_MULTICALL_H__
+#define __ASM_PPC_MULTICALL_H__
+
+/* defined in arch/ppc/hcalls.c */
+extern void do_multicall_call(multicall_entry_t *call);
+
+#endif
diff --git a/xen/include/asm-powerpc/page.h b/xen/include/asm-powerpc/page.h
new file mode 100644
index 0000000000..f7951d44d3
--- /dev/null
+++ b/xen/include/asm-powerpc/page.h
@@ -0,0 +1,116 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1<<PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+#include <xen/config.h>
+#include <asm/misc.h>
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+
+typedef struct { unsigned long l1_lo; } l1_pgentry_t;
+#define linear_l1_table \
+ ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
+
+#define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT)
+
+/*
+ * NB. We don't currently track I/O holes in the physical RAM space.
+ */
+#define mfn_valid(mfn) ((mfn) < max_page)
+
+#define virt_to_maddr(va) ((unsigned long)(va))
+#define maddr_to_virt(ma) ((void *)((unsigned long)(ma)))
+/* Shorthand versions of the above functions. */
+#define __pa(x) (virt_to_maddr(x))
+#define __va(x) (maddr_to_virt(x))
+
+/* Convert between Xen-heap virtual addresses and machine frame numbers. */
+#define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
+#define mfn_to_virt(mfn) (maddr_to_virt(mfn << PAGE_SHIFT))
+
+/* Convert between machine frame numbers and page-info structures. */
+#define mfn_to_page(mfn) (frame_table + (mfn))
+#define page_to_mfn(pg) ((unsigned long)((pg) - frame_table))
+
+/* Convert between machine addresses and page-info structures. */
+#define maddr_to_page(ma) (frame_table + ((ma) >> PAGE_SHIFT))
+#define page_to_maddr(pg) ((paddr_t)((pg) - frame_table) << PAGE_SHIFT)
+
+/* Convert between Xen-heap virtual addresses and page-info structures. */
+#define virt_to_page(va) (frame_table + (__pa(va) >> PAGE_SHIFT))
+#define page_to_virt(pg) (maddr_to_virt(page_to_maddr(pg)))
+
+/* Convert between frame number and address formats. */
+#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
+#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
+
+extern void clear_page(void *p);
+extern void copy_page(void *dp, void *sp);
+
+#define linear_pg_table linear_l1_table
+
+static inline int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+/* XXX combine with get_order() above */
+#define get_order_from_bytes get_order
+static inline int get_order_from_pages(unsigned long nr_pages)
+{
+ int order;
+ nr_pages--;
+ for ( order = 0; nr_pages; order++ )
+ nr_pages >>= 1;
+ return order;
+}
+
+#define __flush_tlb_one(__addr) \
+ __asm__ __volatile__("tlbie %0": :"r" (__addr): "memory")
+
+#define _PAGE_PRESENT 0x001UL
+#define _PAGE_RW 0x002UL
+#define _PAGE_USER 0x004UL
+#define _PAGE_PWT 0x008UL
+#define _PAGE_PCD 0x010UL
+#define _PAGE_ACCESSED 0x020UL
+#define _PAGE_DIRTY 0x040UL
+#define _PAGE_PAT 0x080UL
+#define _PAGE_PSE 0x080UL
+#define _PAGE_GLOBAL 0x100UL
+#endif /* ! __ASSEMBLY__ */
+#endif
diff --git a/xen/include/asm-powerpc/papr.h b/xen/include/asm-powerpc/papr.h
new file mode 100644
index 0000000000..03679ca2fa
--- /dev/null
+++ b/xen/include/asm-powerpc/papr.h
@@ -0,0 +1,218 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _PAPR_H
+#define _PAPR_H
+
+/*
+ * Class is one of the following:
+ *
+ * Crit: Continuous forward progress must be made, encountering any
+ * busy resource must cause the function to be backed out and return
+ * with a "hardware busy" return code.
+ *
+ * Norm: Similar to Crit, however, wait loops for slow hardware
+ * access are allowed.
+ *
+ */
+/* NAME Token Class Mandatory Set */
+#define H_UNUSED 0x0000 /* Crit Yes pft */
+#define H_REMOVE 0x0004 /* Crit Yes pft */
+#define H_ENTER 0x0008 /* Crit Yes pft */
+#define H_READ 0x000c /* Crit Yes pft */
+#define H_CLEAR_MOD 0x0010 /* Crit Yes pft */
+#define H_CLEAR_REF 0x0014 /* Crit Yes pft */
+#define H_PROTECT 0x0018 /* Crit Yes pft */
+#define H_GET_TCE 0x001c /* Crit Yes tce */
+#define H_PUT_TCE 0x0020 /* Crit Yes tce */
+#define H_SET_SPRG0 0x0024 /* Crit Yes sprg0 */
+#define H_SET_DABR 0x0028 /* Crit Yes-dabr exists dabr */
+#define H_PAGE_INIT 0x002c /* Crit Yes copy */
+#define H_SET_ASR 0x0030 /* Crit Yes-on Istar asr */
+#define H_ASR_ON 0x0034 /* Crit Yes-on Istar asr */
+#define H_ASR_OFF 0x0038 /* Crit Yes-on Istar asr */
+#define H_LOGICAL_CI_LOAD 0x003c /* Norm Yes debug */
+#define H_LOGICAL_CI_STORE 0x0040 /* Norm Yes debug */
+#define H_LOGICAL_CACHE_LOAD 0x0044 /* Crit Yes debug */
+#define H_LOGICAL_CACHE_STORE 0x0048 /* Crit Yes debug */
+#define H_LOGICAL_ICBI 0x004c /* Norm Yes debug */
+#define H_LOGICAL_DCBF 0x0050 /* Norm Yes debug */
+#define H_GET_TERM_CHAR 0x0054 /* Crit Yes term */
+#define H_PUT_TERM_CHAR 0x0058 /* Crit Yes term */
+#define H_REAL_TO_LOGICAL 0x005c /* Norm Yes perf */
+#define H_HYPERVISOR_DATA 0x0060 /* Norm See below dump */
+ /* is mandatory if enabled by HSC
+ * and is disabled by default */
+#define H_EOI 0x0064 /* Crit Yes int */
+#define H_CPPR 0x0068 /* Crit Yes int */
+#define H_IPI 0x006c /* Crit Yes int */
+#define H_IPOLL 0x0070 /* Crit Yes int */
+#define H_XIRR 0x0074 /* Crit Yes int */
+#define H_MIGRATE_PCI_TCE 0x0078 /* Norm Yes-if LRDR migrate */
+#define H_CEDE 0x00e0 /* Crit Yes splpar */
+#define H_CONFER 0x00e4
+#define H_PROD 0x00e8
+#define H_GET_PPP 0x00ec
+#define H_SET_PPP 0x00f0
+#define H_PURR 0x00f4
+#define H_PIC 0x00f8
+#define H_REG_CRQ 0x00fc
+#define H_FREE_CRQ 0x0100
+#define H_VIO_SIGNAL 0x0104
+#define H_SEND_CRQ 0x0108
+#define H_PUTRTCE 0x010c
+#define H_COPY_RDMA 0x0110
+#define H_REGISTER_LOGICAL_LAN 0x0114
+#define H_FREE_LOGICAL_LAN 0x0118
+#define H_ADD_LOGICAL_LAN_BUFFER 0x011c
+#define H_SEND_LOGICAL_LAN 0x0120
+#define H_BULK_REMOVE 0x0124
+#define H_WRITE_RDMA 0x0128
+#define H_READ_RDMA 0x012c
+#define H_MULTICAST_CTRL 0x0130
+#define H_SET_XDABR 0x0134
+#define H_STUFF_TCE 0x0138
+#define H_PUT_TCE_INDIRECT 0x013c
+#define H_PUT_RTCE_INDERECT 0x0140
+#define H_MASS_MAP_TCE 0x0144
+#define H_ALRDMA 0x0148
+#define H_CHANGE_LOGICAL_LAN_MAC 0x014c
+#define H_VTERM_PARTNER_INFO 0x0150
+#define H_REGISTER_VTERM 0x0154
+#define H_FREE_VTERM 0x0158
+#define H_HCA_RESV_BEGIN 0x015c
+#define H_HCA_RESV_END 0x01c0
+#define H_GRANT_LOGICAL 0x01c4
+#define H_RESCIND_LOGICAL 0x01c8
+#define H_ACCEPT_LOGICAL 0x01cc
+#define H_RETURN_LOGICAL 0x01d0
+#define H_FREE_LOGICAL_LAN_BUFFER 0x01d4
+
+#define RPA_HCALL_END 0x01d4 /* set to last entry */
+
+/*
+ * Hidden
+ */
+#define H_PCI_CONFIG_READ 0x610c /* PHYP # 0x500c */
+#define H_PCI_CONFIG_WRITE 0x6110 /* PHYP # 0x5010 */
+#define H_THREAD_CONTROL 0x6144 /* PHYP # 0x5044 H_CPU_CONTROL */
+#define H_GET_XIVE 0x614c /* PHYP # 0x504c */
+#define H_SET_XIVE 0x6150 /* PHYP # 0x5050 */
+#define H_INTERRUPT 0x6154 /* PHYP # 0x5054 */
+
+#define HYPE_HCALL_END H_INTERRUPT /* XXX */
+
+/*
+ * Hypervisor Call Return Codes
+ */
+#define H_PARTIAL_STORE 16
+#define H_PAGE_REGISTERED 15
+#define H_IN_PROGRESS 14
+#define H_Sensor_CH 13 /* Sensor value >= Critical high */
+#define H_Sensor_WH 12 /* Sensor value >= Warning high */
+#define H_Sensor_Norm 11 /* Sensor value normal */
+#define H_Sensor_WL 10 /* Sensor value <= Warning low */
+#define H_Sensor_CL 9 /* Sensor value <= Critical low */
+#define H_Partial 5
+#define H_Constrained 4
+#define H_Closed 2 /* virtual terminal session is closed */
+#define H_Busy 1 /* Hardware Busy -- Retry Later */
+#define H_Success 0
+#define H_Hardware -1 /* Error */
+#define H_Function -2 /* Not Supported */
+#define H_Privilege -3 /* Caller not in privileged mode */
+#define H_Parameter -4 /* Outside Valid Range for Partition
+ * or conflicting */
+#define H_Bad_Mode -5 /* Illegal MSR value */
+#define H_PTEG_FULL -6 /* The requested pteg was full */
+#define H_NOT_FOUND -7 /* The requested pte was not found */
+#define H_RESERVED_DABR -8 /* The requested address is reserved
+ * by the Hypervisor on this
+ * processor */
+#define H_UNAVAIL -9 /* Requested resource unavailable */
+#define H_INVAL -10 /* Requested parameter is invalid */
+#define H_Permission -11
+#define H_Dropped -12
+#define H_S_Parm -13
+#define H_D_Parm -14
+#define H_R_Parm -15
+#define H_Resource -16
+#define H_ADAPTER_PARM -17
+
+#define H_Rescinded -18 /* FIXME: check RPA value */
+
+/*
+ * compatibility With Linux Labels, perhpas we should ifdef this linux
+ * and/or kernel.
+ */
+#define H_Not_Found H_NOT_FOUND
+#define H_ANDCOND H_andcond
+#define H_LARGE_PAGE H_Large_Page
+#define H_ICACHE_INVALIDATE H_I_Cache_Inv
+#define H_ICACHE_SYNCHRONIZE H_I_Cache_Sync
+#define H_ZERO_PAGE H_Zero_Page
+#define H_COPY_PAGE H_Copy_Page
+#define H_EXACT H_Exact
+#define H_PTEG_Full H_PTEG_FULL
+#define H_PP1 H_pp1
+#define H_PP2 H_pp2
+
+#ifndef __ASSEMBLY__
+
+#include <xen/types.h>
+/*
+ * The Hcall() Flags Field Definition
+ */
+#define H_NUMA_CEC (~(1UL << (63 - 15 + 1) - 1)) /* bits 0-15 */
+
+#define H_Blank_1 (1UL<<(63-17))
+
+#define H_Exact (1UL<<(63-24))
+#define H_R_XLATE (1UL<<(63-25))
+#define H_READ_4 (1UL<<(63-26))
+
+#define H_AVPN (1UL<<(63-32))
+#define H_andcond (1UL<<(63-33))
+
+#define H_I_Cache_Inv (1UL<<(63-40))
+#define H_I_Cache_Sync (1UL<<(63-41))
+#define H_Blank_2 (1UL<<(63-42))
+
+#define H_Zero_Page (1UL<<(63-48))
+#define H_Copy_Page (1UL<<(63-49))
+#define H_Blank_3 (1UL<<(63-50))
+
+#define H_N (1UL<<(63-61))
+#define H_pp1 (1UL<<(63-62))
+#define H_pp2 (1UL<<(63-63))
+
+#define H_VM_MAP_ICACHE_INVALIDATE (1UL<<(63-40))
+#define H_VM_MAP_ICACHE_SYNCRONIZE (1UL<<(63-41))
+#define H_VM_MAP_INVALIDATE_TRANSLATION (1UL<<(63-42))
+#define H_VM_MAP_INSERT_TRANSLATION (1UL<<(63-43))
+#define H_VM_MAP_LARGE_PAGE (1UL<<(63-44))
+#define H_VM_MAP_ZERO_PAGE (1UL<<(63-48))
+
+enum {
+ INVALID_LOGICAL_ADDRESS = (~((ulong)0)),
+};
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* ! _HYPE_CALLS_H */
diff --git a/xen/include/asm-powerpc/pci.h b/xen/include/asm-powerpc/pci.h
new file mode 100644
index 0000000000..24e806e9e8
--- /dev/null
+++ b/xen/include/asm-powerpc/pci.h
@@ -0,0 +1,35 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_PCI_H_
+#define _ASM_PCI_H_
+
+#include <xen/pci.h>
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+extern void pcibios_set_master(struct pci_dev *dev);
+extern unsigned int pcibios_assign_all_busses(void);
+extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn);
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
+
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/config.h b/xen/include/asm-powerpc/powerpc64/config.h
new file mode 100644
index 0000000000..cf16dac479
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/config.h
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __PPC_64_CONFIG_H__
+#define __PPC_64_CONFIG_H__
+
+#define CONFIG_L1_CACHE_SHIFT 7
+
+/* 288 bytes below the stack pointer must be preserved by interrupt handlers */
+#define STACK_VOLATILE_AREA 288
+/* size of minimum stack frame; C code can write into the caller's stack */
+#define STACK_FRAME_OVERHEAD 112
+
+#define STACK_ORDER 2
+#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
+
+#define NUM_SLB_ENTRIES 64
+#define NUM_FPRS 32
+#define HAS_FLOAT 1
+#define HAS_VMX 1
+
+#ifndef __ASSEMBLY__
+
+#define FORCE_CRASH() __asm__ __volatile__ ( "trap" )
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/ppc970-hid.h b/xen/include/asm-powerpc/powerpc64/ppc970-hid.h
new file mode 100644
index 0000000000..c5054e92dd
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/ppc970-hid.h
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+/*
+ * Details of the 970-specific HID registers.
+ */
+
+#ifndef _ASM_HID_H_
+#define _ASM_HID_H_
+
+#include <xen/types.h>
+
+union hid0 {
+ struct hid0_bits {
+ ulong _unused_0_8: 9;
+ ulong nap: 1;
+ ulong _unused_10: 1;
+ ulong dpm: 1; /* Dynamic Power Management */
+ ulong _unused_12_14: 3;
+ ulong nhr: 1; /* Not Hard Reset */
+ ulong inorder: 1;
+ ulong _reserved17: 1;
+ ulong tb_ctrl: 1;
+ ulong ext_tb_enb: 1; /* timebase is linked to external clock */
+ ulong _unused_20_22: 3;
+ ulong hdice: 1; /* HDEC enable */
+ ulong eb_therm: 1; /* Enable ext thermal ints */
+ ulong _unused_25_30: 6;
+ ulong en_attn: 1; /* Enable attn instruction */
+ ulong _unused_32_63: 32;
+ } bits;
+ ulong word;
+};
+
+union hid1 {
+ struct hid1_bits {
+ ulong bht_pm: 3; /* branch history table prediction mode */
+ ulong en_ls: 1; /* enable link stack */
+ ulong en_cc: 1; /* enable count cache */
+ ulong en_ic: 1; /* enable inst cache */
+ ulong _reserved_6: 1;
+ ulong pf_mode: 2; /* prefetch mode */
+ ulong en_icbi: 1; /* enable forced icbi match mode */
+ ulong en_if_cach: 1; /* i-fetch cacheability control */
+ ulong en_ic_rec: 1; /* i-cache parity error recovery */
+ ulong en_id_rec: 1; /* i-dir parity error recovery */
+ ulong en_er_rec: 1; /* i-ERAT parity error recovery */
+ ulong ic_pe: 1;
+ ulong icd0_pe: 1;
+ ulong _reserved_16: 1;
+ ulong ier_pe: 1;
+ ulong en_sp_itw: 1;
+ ulong _reserved_19_63: 45;
+ } bits;
+ ulong word;
+};
+
+union hid4 {
+ struct hid4_bits {
+ ulong lpes0: 1; /* LPAR Environment Selector bit 0 */
+ ulong rmlr12: 2; /* RMLR 1:2 */
+ ulong lpid25: 4; /* LPAR ID bits 2:5 */
+ ulong rmor: 16; /* real mode offset region */
+ ulong rm_ci: 1; /* real mode cache-inhibit */
+ ulong force_ai: 1; /* Force alignment interrupt */
+ ulong _unused: 32;
+ ulong lpes1: 1; /* LPAR Environment Selector bit 1 */
+ ulong rmlr0: 1; /* RMLR 0 */
+ ulong _reserved: 1;
+ ulong dis_splarx: 1; /* Disable spec. lwarx/ldarx */
+ ulong lg_pg_dis: 1; /* Disable large page support */
+ ulong lpid01: 2; /* LPAR ID bits 0:1 */
+ } bits;
+ ulong word;
+};
+
+union hid5 {
+ struct hid5_bits {
+ ulong _reserved_0_31: 32;
+ ulong hrmor: 16;
+ ulong _reserver_48_49:2;
+ ulong _unused_50_55: 6;
+ ulong DCBZ_size: 1;
+ ulong DCBZ32_ill: 1;
+ ulong _unused_58_63: 6;
+ } bits;
+ ulong word;
+};
+
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/ppc970.h b/xen/include/asm-powerpc/powerpc64/ppc970.h
new file mode 100644
index 0000000000..69c5063289
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/ppc970.h
@@ -0,0 +1,31 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_PPC970_H_
+#define _ASM_PPC970_H_
+
+#include <xen/types.h>
+#include <asm/powerpc64/ppc970-hid.h>
+
+struct cpu_vcpu {
+ union hid4 hid4;
+};
+
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/procarea.h b/xen/include/asm-powerpc/powerpc64/procarea.h
new file mode 100644
index 0000000000..9715df3b35
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/procarea.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_PROCAREA_H_
+#define _ASM_PROCAREA_H_
+
+#include <xen/types.h>
+
+struct vcpu;
+struct gdb_state;
+
+struct processor_area
+{
+ struct vcpu *cur_vcpu;
+ void *hyp_stack_base;
+ ulong saved_regs[2];
+};
+
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/processor.h b/xen/include/asm-powerpc/powerpc64/processor.h
new file mode 100644
index 0000000000..5036ed388e
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/processor.h
@@ -0,0 +1,193 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_64_PROCESSOR_H_
+#define _ASM_64_PROCESSOR_H_
+
+#ifdef __ASSEMBLY__
+
+#define LOADADDR(rn,name) \
+ lis rn,name##@highest; \
+ ori rn,rn,name##@higher; \
+ rldicr rn,rn,32,31; \
+ oris rn,rn,name##@h; \
+ ori rn,rn,name##@l
+
+#define SET_REG_TO_CONST(reg, value) \
+ lis reg,(((value)>>48)&0xFFFF); \
+ ori reg,reg,(((value)>>32)&0xFFFF); \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(((value)>>16)&0xFFFF); \
+ ori reg,reg,((value)&0xFFFF);
+
+#define SET_REG_TO_LABEL(reg, label) \
+ lis reg,(label)@highest; \
+ ori reg,reg,(label)@higher; \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(label)@h; \
+ ori reg,reg,(label)@l;
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _STATIC(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _ENTRY(name) GLUE(.,name)
+#else /* __ASSEMBLY__ */
+
+#include <xen/types.h>
+#include <asm/powerpc64/procarea.h>
+
+static inline void mtmsrd(ulong msr)
+{
+ __asm__ __volatile__ ("mtmsrd %0" : : "r" (msr));
+}
+
+static inline unsigned long mftb(void)
+{
+ unsigned long tb;
+ __asm__ __volatile__ ("mftb %0" : "=r" (tb));
+ return tb;
+}
+
+static inline void mthdec(unsigned ticks)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_HDEC), "r" (ticks));
+}
+static inline unsigned int mfhdec(void)
+{
+ unsigned int val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_HDEC));
+ return val;
+}
+
+static inline void mthsprg0(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_HSPRG0), "r"(val));
+}
+static inline ulong mfhsprg0(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_HSPRG0));
+ return val;
+}
+
+static inline void slbia(void)
+{
+ __asm__ __volatile__ ("isync; slbia; isync":::"memory");
+}
+
+static inline void slbie(ulong entry)
+{
+ __asm__ __volatile__ (
+ "isync\n"
+ "slbie %0\n"
+ "isync\n"
+ : : "r" (entry) : "memory");
+}
+
+static inline ulong mfhid0(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_HID0));
+ return val;
+}
+static inline void mthid0(ulong val)
+{
+ __asm__ __volatile__ (
+ "sync\n"
+ "mtspr %0, %1\n"
+ "isync\n"
+ : : "i"(SPRN_HID0), "r"(val));
+}
+
+static inline ulong mfhid1(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_HID1));
+ return val;
+}
+static inline void mthid1(ulong val)
+{
+ __asm__ __volatile__ (
+ "sync\n"
+ "mtspr %0, %1\n"
+ "isync\n"
+ : : "i"(SPRN_HID1), "r"(val));
+}
+
+static inline ulong mfhid4(void)
+{
+ ulong hid4;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(hid4) : "i"(SPRN_HID4));
+ return hid4;
+}
+
+static inline void mthid4(ulong hid4)
+{
+ __asm__ __volatile__ (
+ "sync\n"
+ "mtspr %0, %1\n"
+ "isync\n"
+ : : "i"(SPRN_HID4), "r"(hid4));
+}
+
+static inline ulong mfhid5(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_HID5));
+ return val;
+}
+
+static inline void mthid5(ulong val)
+{
+ __asm__ __volatile__ (
+ "sync\n"
+ "mtspr %0, %1\n"
+ "isync\n"
+ : : "i"(SPRN_HID5), "r"(val));
+}
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/xen/include/asm-powerpc/powerpc64/string.h b/xen/include/asm-powerpc/powerpc64/string.h
new file mode 100644
index 0000000000..3e8af89876
--- /dev/null
+++ b/xen/include/asm-powerpc/powerpc64/string.h
@@ -0,0 +1,40 @@
+/* from linux/include/asm-powerpc/string.h */
+
+#ifndef _PPC64_STRING_H_
+#define _PPC64_STRING_H_
+
+#include <xen/types.h>
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRNCPY
+#define __HAVE_ARCH_STRLEN
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRCAT
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_BCOPY
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMCHR
+
+extern int strcasecmp(const char *, const char *);
+extern int strncasecmp(const char *, const char *, int);
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern int strcmp(const char *,const char *);
+extern char * strcat(char *, const char *);
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern int memcmp(const void *,const void *,__kernel_size_t);
+extern void * memchr(const void *,int,__kernel_size_t);
+
+#endif
diff --git a/xen/include/asm-powerpc/processor.h b/xen/include/asm-powerpc/processor.h
new file mode 100644
index 0000000000..5944c13c57
--- /dev/null
+++ b/xen/include/asm-powerpc/processor.h
@@ -0,0 +1,202 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_PROCESSOR_H_
+#define _ASM_PROCESSOR_H_
+
+#include <xen/config.h>
+#include <asm/reg_defs.h>
+#include <asm/msr.h>
+
+#define IOBMP_BYTES 8192
+#define IOBMP_INVALID_OFFSET 0x8000
+
+/* most assembler do not know this instruction */
+#define HRFID .long 0x4c000224
+
+#ifndef __ASSEMBLY__
+#include <xen/types.h>
+
+struct domain;
+struct vcpu;
+struct cpu_user_regs;
+extern void show_registers(struct cpu_user_regs *);
+extern void show_execution_state(struct cpu_user_regs *);
+extern void cpu_initialize(void);
+extern void cpu_init_vcpu(struct vcpu *);
+extern void save_cpu_sprs(struct vcpu *);
+extern void load_cpu_sprs(struct vcpu *);
+
+/* XXX this could also land us in GDB */
+#define dump_execution_state() trap()
+
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *x) {;}
+
+static __inline__ void sync(void)
+{
+ __asm__ __volatile__ ("sync");
+}
+
+static __inline__ void isync(void)
+{
+ __asm__ __volatile__ ("isync");
+}
+
+static inline ulong mfmsr(void) {
+ ulong msr;
+ __asm__ __volatile__ ("mfmsr %0" : "=&r"(msr));
+ return msr;
+}
+
+static inline void nop(void) {
+ __asm__ __volatile__ ("nop");
+}
+#define cpu_relax() nop()
+
+static inline unsigned int mftbu(void)
+{
+ unsigned int tbu;
+ __asm__ __volatile__ ("mftbu %0" : "=r" (tbu));
+ return tbu;
+}
+
+static inline unsigned int mftbl(void)
+{
+ unsigned int tbl;
+ __asm__ __volatile__ ("mftbl %0" : "=r" (tbl));
+ return tbl;
+}
+
+static inline unsigned int mfdec(void)
+{
+ unsigned int tmp;
+ __asm__ __volatile__ ("mfdec %0" : "=r"(tmp));
+ return tmp;
+}
+static inline void mtdec(unsigned int ticks)
+{
+ __asm__ __volatile__ ("mtdec %0" : : "r" (ticks));
+}
+
+static inline u32 mfpvr(void) {
+ u32 pvr;
+ asm volatile("mfpvr %0" : "=&r" (pvr));
+ return pvr;
+}
+
+static inline ulong mfr1(void)
+{
+ ulong r1;
+ asm volatile("mr %0, 1" : "=&r" (r1));
+ return r1;
+}
+
+static inline void mtsprg0(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_SPRG0), "r"(val));
+}
+static inline ulong mfsprg0(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_SPRG0));
+ return val;
+}
+
+static inline void mtsprg1(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_SPRG1), "r"(val));
+}
+static inline ulong mfsprg1(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_SPRG1));
+ return val;
+}
+
+static inline void mtsprg2(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_SPRG2), "r"(val));
+}
+static inline ulong mfsprg2(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_SPRG2));
+ return val;
+}
+
+static inline void mtsprg3(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_SPRG3), "r"(val));
+}
+static inline ulong mfsprg3(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_SPRG3));
+ return val;
+}
+
+static inline void mtsdr1(ulong val)
+{
+ __asm__ __volatile__ ("mtsdr1 %0" : : "r"(val));
+}
+static inline ulong mfsdr1(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfsdr1 %0" : "=r"(val));
+ return val;
+}
+
+static inline void mtdar(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_DAR), "r"(val));
+}
+static inline ulong mfdar(void)
+{
+ ulong val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_DAR));
+ return val;
+}
+
+static inline void mtdsisr(ulong val)
+{
+ __asm__ __volatile__ ("mtspr %0, %1" : : "i"(SPRN_DSISR), "r"(val));
+}
+static inline unsigned mfdsisr(void)
+{
+ unsigned val;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r"(val) : "i"(SPRN_DSISR));
+ return val;
+}
+
+#ifdef CONFIG_MAMBO
+static inline int on_mambo(void)
+{
+ return !!(mfmsr() & MSR_MAMBO);
+}
+#else /* CONFIG_MAMBO */
+static inline int on_mambo(void) { return 0; }
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm/powerpc64/processor.h>
+
+#endif
diff --git a/xen/include/asm-powerpc/reg_defs.h b/xen/include/asm-powerpc/reg_defs.h
new file mode 100644
index 0000000000..6d1769e8a1
--- /dev/null
+++ b/xen/include/asm-powerpc/reg_defs.h
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _ASM_REG_DEFS_H_
+#define _ASM_REG_DEFS_H_
+
+#ifdef __ASSEMBLY__
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/* General Purpose Registers (GPRs) */
+
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+/* Floating Point Registers (FPRs) */
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+/* Vector Registers (FPRs) */
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
+
+#endif
+
+/* Special Purpose Registers */
+#define SPRN_VRSAVE 256
+#define SPRN_DSISR 18
+#define SPRN_DAR 19
+#define SPRN_DEC 22
+#define SPRN_SRR0 26
+#define SPRN_SRR1 27
+#define SPRN_SPRG0 272
+#define SPRN_SPRG1 273
+#define SPRN_SPRG2 274
+#define SPRN_SPRG3 275
+
+#define SPRN_HSPRG0 304
+#define SPRN_HSPRG1 305
+#define SPRN_HDEC 310
+#define SPRN_RMOR 312
+#define SPRN_HRMOR 313
+#define SPRN_HSRR0 314
+#define SPRN_HSRR1 315
+#define SPRN_LPCR 318
+#define SPRN_LPIDR 319
+
+#define SPRN_SIAR 796
+#define SPRN_SDAR 797
+
+/* As defined for PU G4 */
+#define SPRN_HID0 1008
+#define SPRN_HID1 1009
+#define SPRN_HID4 1012
+
+#define SPRN_DABR 1013
+#define SPRN_HID5 1014
+#define SPRN_DABRX 1015
+#define SPRN_HID6 1017
+#define SPRN_HID7 1018
+#define SPRN_HID8 1019
+#define SPRN_PIR 1023
+
+#endif /* _ASM_REG_DEFS_H_ */
diff --git a/xen/include/asm-powerpc/regs.h b/xen/include/asm-powerpc/regs.h
new file mode 100644
index 0000000000..16f0ba9cd0
--- /dev/null
+++ b/xen/include/asm-powerpc/regs.h
@@ -0,0 +1,25 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_REGS_H_
+#define _ASM_REGS_H_
+
+#include <asm/processor.h>
+#endif
diff --git a/xen/include/asm-powerpc/shadow.h b/xen/include/asm-powerpc/shadow.h
new file mode 100644
index 0000000000..c247659f5e
--- /dev/null
+++ b/xen/include/asm-powerpc/shadow.h
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_SHADOW_H_
+#define _ASM_SHADOW_H_
+
+#include <xen/sched.h>
+
+#define shadow_mode_translate(_d) 1
+
+#define __translate_gpfn_to_mfn(_d, gpfn) \
+ ( (shadow_mode_translate(_d)) \
+ ? translate_gpfn_to_mfn(_d, gpfn) \
+ : (gpfn) )
+
+#define __mfn_to_gpfn(_d, mfn) \
+ ( (shadow_mode_translate(_d)) \
+ ? machine_to_phys_mapping[(mfn)] \
+ : (mfn) )
+
+static inline unsigned long
+translate_gpfn_to_mfn(struct domain *rd, unsigned long gpfn)
+{
+ trap();
+ return 0;
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/smp.h b/xen/include/asm-powerpc/smp.h
new file mode 100644
index 0000000000..6010aac99a
--- /dev/null
+++ b/xen/include/asm-powerpc/smp.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_SMP_H
+#define _ASM_SMP_H
+
+#include <xen/types.h>
+#include <xen/cpumask.h>
+#include <asm/current.h>
+extern int smp_num_siblings;
+
+/* revisit when we support SMP */
+#define get_hard_smp_processor_id(i) i
+#define hard_smp_processor_id() 0
+#define raw_smp_processor_id() 0
+extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
+
+#endif
diff --git a/xen/include/asm-powerpc/smpboot.h b/xen/include/asm-powerpc/smpboot.h
new file mode 100644
index 0000000000..b158d5b74f
--- /dev/null
+++ b/xen/include/asm-powerpc/smpboot.h
@@ -0,0 +1,21 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#include "../asm-x86/smpboot.h"
diff --git a/xen/include/asm-powerpc/spinlock.h b/xen/include/asm-powerpc/spinlock.h
new file mode 100644
index 0000000000..f6574d6b76
--- /dev/null
+++ b/xen/include/asm-powerpc/spinlock.h
@@ -0,0 +1,221 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_SPINLOCK_H
+#define _ASM_SPINLOCK_H
+
+#include <xen/types.h>
+#include <xen/smp.h>
+#include <asm/atomic.h>
+
+static inline void
+sync_after_acquire(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+static inline void
+sync_before_release(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/*
+ * CAS : Compare and Store 32bits.. works for everyone
+ *
+ * NOTE: The ptr parameters to these routines are cast to character pointers
+ * in order to prevent any strict-aliasing optimizations the compiler
+ * might otherwise attempt.
+ */
+static inline u32
+cas_u32(volatile u32 *ptr, u32 oval, u32 nval)
+{
+ u32 tmp;
+
+ sync_before_release();
+ __asm__ ("\n"
+ "# cas_u32 \n"
+ "1: lwarx %1,0,%4 # tmp = (*ptr) [linked] \n"
+ " cmplw %1,%2 # if (tmp != oval) \n"
+ " bne- 2f # goto failure \n"
+ " stwcx. %3,0,%4 # (*ptr) = nval [conditional] \n"
+ " bne- 1b # if (store failed) retry \n"
+ " li %1,1 # tmp = SUCCESS \n"
+ " b $+8 # goto end \n"
+ "2: li %1,0 # tmp = FAILURE \n"
+ "# end cas_u32 \n"
+ : "=m" (*(volatile char *)ptr), "=&r" (tmp)
+ : "r" (oval), "r" (nval), "r" (ptr), "m" (*(volatile char*)ptr)
+ : "cc"
+ );
+ sync_after_acquire();
+
+ return tmp;
+}
+
+typedef union {
+ volatile u32 lock;
+ struct {
+ s8 recurse_cpu;
+ u8 recurse_cnt;
+ s16 lock;
+ } fields;
+} spinlock_t;
+
+#define __UNLOCKED (0U)
+#define __LOCKED (~__UNLOCKED)
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { __UNLOCKED }
+static inline void spin_lock_init(spinlock_t *lock)
+{
+ *lock = SPIN_LOCK_UNLOCKED;
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return lock->lock != __UNLOCKED;
+}
+
+static inline void _raw_spin_lock(spinlock_t *lock)
+{
+ while (!cas_u32(&lock->lock, __UNLOCKED, __LOCKED)) {
+ continue;
+ }
+ sync_after_acquire();
+}
+
+static inline void _raw_spin_unlock(spinlock_t *lock)
+{
+ sync_before_release();
+ *lock = SPIN_LOCK_UNLOCKED;
+}
+
+static inline int _raw_spin_trylock(spinlock_t *lock)
+{
+ int ret = 0;
+
+ if (cas_u32(&lock->lock, __UNLOCKED, __LOCKED)) {
+ ret = 1;
+ }
+ sync_after_acquire();
+ return ret;
+}
+
+typedef struct {
+ volatile unsigned int lock;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { __UNLOCKED }
+static inline void rwlock_init(rwlock_t *lock)
+{
+ *lock = RW_LOCK_UNLOCKED;
+}
+
+static inline void _raw_read_lock(rwlock_t *lock)
+{
+ u32 val;
+
+ /* Lock is acquired if we can increment lower 31 bits, while
+ * uppermost bit is 0. */
+ do {
+ val = lock->lock & ((1UL << 31) - 1);
+ } while (!cas_u32(&lock->lock, val, val + 1));
+ sync_after_acquire();
+}
+
+static inline void _raw_write_lock(rwlock_t *lock)
+{
+ /* Lock is acquired if we can set 32nd bit, while all other
+ * bits are 0 */
+ while (!cas_u32(&lock->lock, 0, 1 << 31)) {
+ continue;
+ }
+ sync_after_acquire();
+}
+
+static inline void _raw_write_unlock(rwlock_t *lock)
+{
+ sync_before_release();
+ *lock = RW_LOCK_UNLOCKED;
+}
+
+static inline void _raw_read_unlock(rwlock_t *lock)
+{
+ u32 val;
+
+ /* We want to decrement the low-order 31-bits atomically */
+ sync_before_release();
+ do {
+ val = lock->lock;
+ } while (!cas_u32(&lock->lock, val, val - 1));
+
+ /* necessary? */
+ sync_after_acquire();
+}
+
+/*
+ * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
+ * reentered recursively on the same CPU. All critical regions that may form
+ * part of a recursively-nested set must be protected by these forms. If there
+ * are any critical regions that cannot form part of such a set, they can use
+ * standard spin_[un]lock().
+ */
+
+#if 0
+static inline void _raw_spin_unlock_recursive(spinlock_t *lock)
+{
+ int cpu = smp_processor_id();
+ if (likely(lock->fields.recurse_cpu != cpu)) {
+ spin_lock(lock);
+ lock->fields.recurse_cpu = cpu;
+ }
+ lock->fields.recurse_cnt++;
+}
+
+static inline void _raw_spin_unlock_recursive(spinlock_t *lock)
+{
+ if (likely(--lock->fields.recurse_cnt == 0)) {
+ lock->fields.recurse_cpu = -1;
+ spin_unlock(lock);
+ }
+}
+#else
+
+#define _raw_spin_lock_recursive(_lock) \
+ do { \
+ int cpu = smp_processor_id(); \
+ if ( likely((_lock)->fields.recurse_cpu != cpu) ) \
+ { \
+ spin_lock(_lock); \
+ (_lock)->fields.recurse_cpu = cpu; \
+ } \
+ (_lock)->fields.recurse_cnt++; \
+ } while ( 0 )
+
+#define _raw_spin_unlock_recursive(_lock) \
+ do { \
+ if ( likely(--(_lock)->fields.recurse_cnt == 0) ) \
+ { \
+ (_lock)->fields.recurse_cpu = -1; \
+ spin_unlock(_lock); \
+ } \
+ } while ( 0 )
+#endif
+
+#endif
diff --git a/xen/include/asm-powerpc/string.h b/xen/include/asm-powerpc/string.h
new file mode 100644
index 0000000000..b70d53f38e
--- /dev/null
+++ b/xen/include/asm-powerpc/string.h
@@ -0,0 +1,26 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _PPC_STRING_H_
+#define _PPC_STRING_H_
+
+#include <asm/powerpc64/string.h>
+
+#endif /* _PPC_STRING_H_ */
diff --git a/xen/include/asm-powerpc/system.h b/xen/include/asm-powerpc/system.h
new file mode 100644
index 0000000000..3d88d77ce0
--- /dev/null
+++ b/xen/include/asm-powerpc/system.h
@@ -0,0 +1,243 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
+ */
+
+#ifndef _ASM_SYSTEM_H_
+#define _ASM_SYSTEM_H_
+
+#include <xen/config.h>
+#include <asm/memory.h>
+#include <asm/time.h>
+#include <asm/processor.h>
+#include <asm/misc.h>
+#include <asm/msr.h>
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+static __inline__ unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%3 # __xchg_u32\n\
+ stwcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+static __inline__ unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%3 # __xchg_u64\n\
+ stdcx. %2,0,%3\n\
+2: bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (dummy), "=m" (*m)
+ : "r" (val), "r" (m)
+ : "cc", "memory");
+
+ return (dummy);
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(volatile void *ptr, unsigned long x, int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+ case 8:
+ return __xchg_u64(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n\
+ stwcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __inline__ unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static always_inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 2:
+ trap(); return 0; /* XXX implement __cmpxchg_u16 ? */
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg_user(ptr,o,n) cmpxchg(ptr,o,n)
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores. Lwsync is fine for
+ * rmb(), though.
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight eieio barrier on
+ * SMP since it is only used to order updates to system memory.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() __asm__ __volatile__("": : :"memory")
+#define smp_rmb() __asm__ __volatile__("": : :"memory")
+#define smp_wmb() __asm__ __volatile__("": : :"memory")
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+#define local_save_flags(flags) ((flags) = mfmsr())
+#define local_irq_restore(flags) do { \
+ __asm__ __volatile__("": : :"memory"); \
+ mtmsrd((flags)); \
+} while(0)
+
+static inline void local_irq_disable(void)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ mtmsrd(msr & ~MSR_EE);
+ __asm__ __volatile__("" : : : "memory");
+}
+
+static inline void local_irq_enable(void)
+{
+ unsigned long msr;
+ __asm__ __volatile__("" : : : "memory");
+ msr = mfmsr();
+ mtmsrd(msr | MSR_EE);
+}
+
+static inline void __do_save_and_cli(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+ mtmsrd(msr & ~MSR_EE);
+ __asm__ __volatile__("" : : : "memory");
+}
+
+#define local_irq_save(flags) __do_save_and_cli(&flags)
+
+static inline int local_irq_is_enabled(void)
+{
+ return !!(mfmsr() & MSR_EE);
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/time.h b/xen/include/asm-powerpc/time.h
new file mode 100644
index 0000000000..d0692230a5
--- /dev/null
+++ b/xen/include/asm-powerpc/time.h
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef _ASM_TIME_H_
+#define _ASM_TIME_H_
+
+#include <xen/types.h>
+
+extern unsigned int timebase_freq;
+#define CLOCK_TICK_RATE timebase_freq
+
+#define watchdog_disable() ((void)0)
+#define watchdog_enable() ((void)0)
+
+extern u64 get_timebase(void);
+
+typedef u64 cycles_t;
+static inline cycles_t get_cycles(void)
+{
+ cycles_t c;
+ c = get_timebase();
+ return c;
+}
+
+#endif
diff --git a/xen/include/asm-powerpc/types.h b/xen/include/asm-powerpc/types.h
new file mode 100644
index 0000000000..4260cd0fa3
--- /dev/null
+++ b/xen/include/asm-powerpc/types.h
@@ -0,0 +1,69 @@
+/* from xen/include/asm-x86/types.h */
+
+#ifndef _PPC_TYPES_H
+#define _PPC_TYPES_H
+
+typedef unsigned short umode_t;
+
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(__ppc__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+
+#elif defined(__PPC64__)
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#endif
+#endif
+
+#include <xen/config.h>
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+#if defined(__ppc__)
+typedef signed long long s64;
+typedef unsigned long long u64;
+#define BYTES_PER_LONG 4
+#define BITS_PER_LONG 32
+typedef unsigned int size_t;
+#elif defined(__PPC64__)
+typedef signed long s64;
+typedef unsigned long u64;
+#define BYTES_PER_LONG 8
+#define BITS_PER_LONG 64
+typedef unsigned long size_t;
+#endif
+
+typedef unsigned long paddr_t;
+#define PRIpaddr "08lx"
+
+/* DMA addresses come in generic and 64-bit flavours. */
+
+typedef unsigned long dma_addr_t;
+typedef u64 dma64_addr_t;
+
+typedef unsigned short xmem_bufctl_t;
+
+#endif
diff --git a/xen/include/asm-powerpc/uaccess.h b/xen/include/asm-powerpc/uaccess.h
new file mode 100644
index 0000000000..77760953d0
--- /dev/null
+++ b/xen/include/asm-powerpc/uaccess.h
@@ -0,0 +1,38 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __PPC_UACCESS_H__
+#define __PPC_UACCESS_H__
+
+#include <xen/errno.h>
+#include <asm/page.h>
+#include <asm/guest_access.h>
+
+/* since we run in real mode, we can safely access all addresses.
+ * XXX well, except IO. should we check for that here? */
+#define access_ok(addr,size) 1
+#define array_access_ok(addr,count,size) 1
+
+#define __copy_to_user copy_to_user
+#define __copy_from_user copy_from_user
+#define copy_to_user(to,from,len) xencomm_copy_to_guest(to,from,len,0)
+#define copy_from_user(to,from,len) xencomm_copy_from_guest(to,from,len,0)
+
+#endif /* __PPC_UACCESS_H__ */
diff --git a/xen/include/public/arch-powerpc.h b/xen/include/public/arch-powerpc.h
new file mode 100644
index 0000000000..17b6bbd257
--- /dev/null
+++ b/xen/include/public/arch-powerpc.h
@@ -0,0 +1,119 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_PPC_64_H__
+#define __XEN_PUBLIC_ARCH_PPC_64_H__
+
+#if !(defined(__XEN__) || defined(__XEN_TOOLS__))
+/* not sure how this is supposed to get asserted */
+#define __XEN_INTERFACE_VERSION__ 0x00030202
+#endif
+
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
+ typedef struct { \
+ int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \
+ type *p; \
+ } __attribute__((__aligned__(8))) __guest_handle_ ## name
+
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
+#define set_xen_guest_handle(hnd, val) \
+ do { \
+ if (sizeof ((hnd).__pad)) \
+ (hnd).__pad[0] = 0; \
+ (hnd).p = val; \
+ } while (0)
+
+#ifdef __XEN_TOOLS__
+#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
+#endif
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_XEN_GUEST_HANDLE(char);
+DEFINE_XEN_GUEST_HANDLE(int);
+DEFINE_XEN_GUEST_HANDLE(long);
+DEFINE_XEN_GUEST_HANDLE(void);
+
+typedef unsigned long long xen_pfn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+#endif
+
+/*
+ * Pointers and other address fields inside interface structures are padded to
+ * 64 bits. This means that field alignments aren't different between 32- and
+ * 64-bit architectures.
+ */
+/* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */
+#define __MEMORY_PADDING(_X)
+#define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X)
+#define MEMORY_PADDING _MEMORY_PADDING(__LINE__)
+
+/* And the trap vector is... */
+#define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */
+
+#ifndef __ASSEMBLY__
+
+typedef uint64_t xen_ulong_t;
+
+/* User-accessible registers: need to be saved/restored for every nested Xen
+ * invocation. */
+struct cpu_user_regs
+{
+ uint64_t gprs[32];
+ uint64_t lr;
+ uint64_t ctr;
+ uint64_t srr0;
+ uint64_t srr1;
+ uint64_t pc;
+ uint64_t msr;
+ uint64_t fpscr;
+ uint64_t xer;
+ uint64_t hid4;
+ uint32_t cr;
+ uint32_t entry_vector;
+};
+typedef struct cpu_user_regs cpu_user_regs_t;
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */
+
+/* ONLY used to communicate with dom0! See also struct exec_domain. */
+struct vcpu_guest_context {
+ cpu_user_regs_t user_regs; /* User-level CPU registers */
+ uint64_t sdr1; /* Pagetable base */
+ /* XXX etc */
+};
+typedef struct vcpu_guest_context vcpu_guest_context_t;
+DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
+
+struct arch_shared_info {
+};
+
+struct arch_vcpu_info {
+};
+
+/* Support for multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+#endif
+
+#endif
diff --git a/xen/include/public/xencomm.h b/xen/include/public/xencomm.h
new file mode 100644
index 0000000000..fc2dda7734
--- /dev/null
+++ b/xen/include/public/xencomm.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _XEN_XENCOMM_H_
+#define _XEN_XENCOMM_H_
+
+/* A xencomm descriptor is a scatter/gather list containing physical
+ * addresses corresponding to a virtually contiguous memory area. The
+ * hypervisor translates these physical addresses to machine addresses to copy
+ * to and from the virtually contiguous area.
+ */
+
+#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
+#define XENCOMM_INVALID (~0UL)
+
+struct xencomm_desc {
+ uint32_t magic;
+ uint32_t nr_addrs; /* the number of entries in address[] */
+ uint64_t address[0];
+};
+
+#endif /* _XEN_XENCOMM_H_ */