aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-12-24 22:08:11 +0000
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-12-24 22:08:11 +0000
commit5dc6acb5fd57658377eb0e081ba7df8bfa35b558 (patch)
treee66b8a53ea709d3946169f1709ab3f9e70340e86
parent28f33c5460619b6c06c540b008b05dadc77d33a5 (diff)
downloadxen-5dc6acb5fd57658377eb0e081ba7df8bfa35b558.tar.gz
xen-5dc6acb5fd57658377eb0e081ba7df8bfa35b558.tar.bz2
xen-5dc6acb5fd57658377eb0e081ba7df8bfa35b558.zip
bitkeeper revision 1.1159.187.81 (41cc934bUot13IO0-1LsmhYl0sW9Zw)
Add plan9 builder support.
-rw-r--r--.rootkeys2
-rw-r--r--tools/libxc/Makefile1
-rwxr-xr-xtools/libxc/plan9a.out.h28
-rw-r--r--tools/libxc/xc.h8
-rwxr-xr-xtools/libxc/xc_plan9_build.c744
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c35
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py29
7 files changed, 847 insertions, 0 deletions
diff --git a/.rootkeys b/.rootkeys
index 2d0c24586c..dfa5242d53 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -320,6 +320,7 @@
41090ec8Pj_bkgCBpg2W7WfmNkumEA tools/examples/xmexample1
40cf2937oKlROYOJTN8GWwWM5AmjBg tools/examples/xmexample2
3fbba6dbDfYvJSsw9500b4SZyUhxjQ tools/libxc/Makefile
+41cc934abX-QLXJXW_clV_wRjM0zYg tools/libxc/plan9a.out.h
3fbba6dc1uU7U3IFeF6A-XEOYF2MkQ tools/libxc/rpm.spec
3fbba6dcrNxtygEcgJYAJJ1gCQqfsA tools/libxc/xc.h
40589968oCfoUlXd460CjVAkBE8IBA tools/libxc/xc_atropos.c
@@ -334,6 +335,7 @@
3fbba6db7li3FJiABYtCmuGxOJxEGw tools/libxc/xc_linux_save.c
3fbba6db7WnnJr0KFrIFrqNlSKvFYg tools/libxc/xc_misc.c
4051bce6CHAsYh8P5t2OHDtRWOP9og tools/libxc/xc_physdev.c
+41cc934aO1m6NxEh_8eDr9bJIMoLFA tools/libxc/xc_plan9_build.c
3fbba6dctWRWlFJkYb6hdix2X4WMuw tools/libxc/xc_private.c
3fbba6dcbVrG2hPzEzwdeV_UC8kydQ tools/libxc/xc_private.h
40589968UQFnJeOMn8UIFLbXBuwXjw tools/libxc/xc_rrobin.c
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index 9065bb1bd6..6ee078644c 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -18,6 +18,7 @@ SRCS += xc_domain.c
SRCS += xc_evtchn.c
SRCS += xc_io.c
SRCS += xc_linux_build.c
+SRCS += xc_plan9_build.c
SRCS += xc_linux_restore.c
SRCS += xc_linux_save.c
SRCS += xc_misc.c
diff --git a/tools/libxc/plan9a.out.h b/tools/libxc/plan9a.out.h
new file mode 100755
index 0000000000..d53f636517
--- /dev/null
+++ b/tools/libxc/plan9a.out.h
@@ -0,0 +1,28 @@
+
+typedef struct Exec
+{
+ long magic; /* magic number */
+ long text; /* size of text segment */
+ long data; /* size of initialized data */
+ long bss; /* size of uninitialized data */
+ long syms; /* size of symbol table */
+ long entry; /* entry point */
+ long spsz; /* size of pc/sp offset table */
+ long pcsz; /* size of pc/line number table */
+} Exec;
+
+#define _MAGIC(b) ((((4*b)+0)*b)+7)
+#define A_MAGIC _MAGIC(8) /* 68020 */
+#define I_MAGIC _MAGIC(11) /* intel 386 */
+#define J_MAGIC _MAGIC(12) /* intel 960 */
+#define K_MAGIC _MAGIC(13) /* sparc */
+#define V_MAGIC _MAGIC(16) /* mips 3000 */
+#define X_MAGIC _MAGIC(17) /* att dsp 3210 */
+#define M_MAGIC _MAGIC(18) /* mips 4000 */
+#define D_MAGIC _MAGIC(19) /* amd 29000 */
+#define E_MAGIC _MAGIC(20) /* arm 7-something */
+#define Q_MAGIC _MAGIC(21) /* powerpc */
+#define N_MAGIC _MAGIC(22) /* mips 4000 LE */
+#define L_MAGIC _MAGIC(23) /* dec alpha */
+#define P_MAGIC _MAGIC(24) /* mips 3000 LE */
+
diff --git a/tools/libxc/xc.h b/tools/libxc/xc.h
index 30ac7c5c99..72dfd3c4e6 100644
--- a/tools/libxc/xc.h
+++ b/tools/libxc/xc.h
@@ -97,6 +97,14 @@ int xc_linux_build(int xc_handle,
unsigned int control_evtchn,
unsigned long flags);
+int
+xc_plan9_build (int xc_handle,
+ u32 domid,
+ const char *image_name,
+ const char *cmdline,
+ unsigned int control_evtchn,
+ unsigned long flags);
+
int xc_bvtsched_global_set(int xc_handle,
unsigned long ctx_allow);
diff --git a/tools/libxc/xc_plan9_build.c b/tools/libxc/xc_plan9_build.c
new file mode 100755
index 0000000000..33c9a54914
--- /dev/null
+++ b/tools/libxc/xc_plan9_build.c
@@ -0,0 +1,744 @@
+/******************************************************************************
+ * xc_plan9_build.c
+ * derived from xc_linux_build.c
+ */
+
+#include "xc_private.h"
+
+#include <zlib.h>
+
+#define DEBUG 1
+#ifdef DEBUG
+#define DPRINTF(x) printf x; fflush(stdout);
+#else
+#define DPRINTF(x)
+#endif
+
+#include "plan9a.out.h"
+
+/* really TOS which means stack starts at 0x2000, and uses page 1*/
+#define STACKPAGE 2
+struct Exec header, origheader;
+
+typedef struct page {
+ char data[PAGE_SIZE];
+} PAGE;
+
+
+int
+memcpy_toguest(int xc_handle, u32 dom, void *v, int size,
+ unsigned long *page_array, unsigned int to_page)
+{
+ int ret;
+ unsigned char *cp = v;
+ unsigned int whichpage;
+ unsigned char *vaddr;
+
+// DPRINTF(("memcpy_to_guest: to_page 0x%x, count %d\n", to_page, size));
+ for (ret = 0, whichpage = to_page; size > 0;
+ whichpage++, size -= PAGE_SIZE, cp += PAGE_SIZE) {
+
+ // DPRINTF (("map_pfn_writeable(%p, 0x%lx)\n", pm_handle,
+// page_array[whichpage]));
+ vaddr = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ page_array[whichpage]);
+ // DPRINTF (("vaddr is %p\n", vaddr));
+ if (vaddr == NULL) {
+ ret = -1;
+ ERROR("Couldn't map guest memory");
+ goto out;
+ }
+ // DPRINTF (("copy %p to %p, count 0x%x\n", cp, vaddr, 4096));
+ memcpy(vaddr, cp, 4096);
+ munmap(vaddr, PAGE_SIZE);
+ // DPRINTF (("Did %ud'th pages\n", whichpage));
+ }
+ out:
+ return ret;
+}
+
+/* this is a function which can go away. It dumps a hunk of
+ * guest pages to a file (/tmp/dumpit); handy for debugging
+ * your image builder.
+ * Xen guys, nuke this if you wish.
+ */
+void
+dumpit(int xc_handle, u32 dom,
+ int start_page, int tot, unsigned long *page_array)
+{
+ int i, ofd;
+ unsigned char *vaddr;
+
+ ofd = open("/tmp/dumpit", O_RDWR);
+ for (i = start_page; i < tot; i++) {
+ vaddr = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ page_array[i]);
+ if (!vaddr) {
+ fprintf(stderr, "Page %d\n", i);
+ perror("shit");
+ read(0, &i, 1);
+ return;
+ }
+ write(ofd, vaddr, 4096);
+ munmap(vaddr, PAGE_SIZE);
+ }
+}
+int
+blah(char *b)
+{
+ fprintf(stderr, "Error in xc_plan9_build!\n");
+ perror(b);
+ return errno;
+}
+
+/* swap bytes. For plan 9 headers */
+void
+swabby(unsigned long *s, char *name)
+{
+ unsigned long it;
+ it = ((*s & 0xff000000) >> 24) | ((*s & 0xff0000) >> 8) |
+ ((*s & 0xff00) << 8) | ((*s & 0xff) << 24);
+ DPRINTF(("Item %s is 0x%lx\n", name, it));
+ *s = it;
+}
+
+void
+plan9header(Exec * header)
+{
+ /* header is big-endian */
+ swabby(&header->magic, "magic");
+ swabby(&header->text, "text");
+ swabby(&header->data, "data");
+ swabby(&header->bss, "bss");
+ swabby(&header->syms, "syms");
+ swabby(&header->entry, "entry");
+ swabby(&header->spsz, "spsz");
+ swabby(&header->pcsz, "pcsz");
+
+}
+
+static int
+ loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom,
+ unsigned long *page_array,
+ unsigned long tot_pages, unsigned long *virt_load_addr,
+ unsigned long *ksize, unsigned long *symtab_addr,
+ unsigned long *symtab_len,
+ unsigned long *first_data_page, unsigned long *pdb_page);
+
+#define P9ROUND (P9SIZE / 8)
+
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+
+static long
+get_tot_pages(int xc_handle, u32 domid)
+{
+ dom0_op_t op;
+ op.cmd = DOM0_GETDOMAININFO;
+ op.u.getdomaininfo.domain = (domid_t) domid;
+ op.u.getdomaininfo.ctxt = NULL;
+ return (do_dom0_op(xc_handle, &op) < 0) ?
+ -1 : op.u.getdomaininfo.tot_pages;
+}
+
+static int
+get_pfn_list(int xc_handle,
+ u32 domid, unsigned long *pfn_buf, unsigned long max_pfns)
+{
+ dom0_op_t op;
+ int ret;
+ op.cmd = DOM0_GETMEMLIST;
+ op.u.getmemlist.domain = (domid_t) domid;
+ op.u.getmemlist.max_pfns = max_pfns;
+ op.u.getmemlist.buffer = pfn_buf;
+
+ if (mlock(pfn_buf, max_pfns * sizeof (unsigned long)) != 0)
+ return -1;
+
+ ret = do_dom0_op(xc_handle, &op);
+
+ (void) munlock(pfn_buf, max_pfns * sizeof (unsigned long));
+
+#if 0
+#ifdef DEBUG
+ DPRINTF(("Ret for get_pfn_list is %d\n", ret));
+ if (ret >= 0) {
+ int i, j;
+ for (i = 0; i < op.u.getmemlist.num_pfns; i += 16) {
+ fprintf(stderr, "0x%x: ", i);
+ for (j = 0; j < 16; j++)
+ fprintf(stderr, "0x%lx ", pfn_buf[i + j]);
+ fprintf(stderr, "\n");
+ }
+ }
+#endif
+#endif
+ return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
+}
+
+static int
+setup_guestos(int xc_handle,
+ u32 dom,
+ gzFile kernel_gfd,
+ unsigned long tot_pages,
+ unsigned long *virt_startinfo_addr,
+ unsigned long *virt_load_addr,
+ full_execution_context_t * ctxt,
+ const char *cmdline,
+ unsigned long shared_info_frame,
+ unsigned int control_evtchn,
+ int flags)
+{
+ l1_pgentry_t *vl1e = NULL;
+ l2_pgentry_t *vl2tab = NULL, *vl2e = NULL;
+ unsigned long *cpage_array = NULL;
+ unsigned long *pte_array = NULL;
+ unsigned long l2tab;
+ unsigned long l1tab;
+ unsigned long count;
+ unsigned long symtab_addr = 0, symtab_len = 0;
+ start_info_t *start_info;
+ shared_info_t *shared_info;
+ unsigned long ksize;
+ mmu_t *mmu = NULL;
+ int i;
+ unsigned long first_page_after_kernel,
+ first_data_page,
+ page_array_page;
+ unsigned long cpu0pdb, cpu0pte, cpu0ptelast;
+ unsigned long /*last_pfn, */ tot_pte_pages;
+
+ DPRINTF(("tot pages is %ld\n", tot_pages));
+ if ((cpage_array = malloc(tot_pages * sizeof (unsigned long))) == NULL) {
+ PERROR("Could not allocate cpage array");
+ goto error_out;
+ }
+
+ if (get_pfn_list(xc_handle, dom, cpage_array, tot_pages) != tot_pages) {
+ PERROR("Could not get the page frame list");
+ goto error_out;
+ }
+
+ for (i = 0; i < 64; i++)
+ DPRINTF(("First %d page is 0x%lx\n", i, cpage_array[i]));
+
+ tot_pte_pages = tot_pages >> 10;
+ DPRINTF(("Page range is 0 to 0x%lx, which requires 0x%lx pte pages\n",
+ tot_pte_pages, tot_pte_pages));
+
+ if (loadp9image(kernel_gfd, xc_handle, dom, cpage_array, tot_pages,
+ virt_load_addr, &ksize, &symtab_addr, &symtab_len,
+ &first_data_page, &first_page_after_kernel))
+ goto error_out;
+ DPRINTF(("First data page is 0x%lx\n", first_data_page));
+ DPRINTF(("First page after kernel is 0x%lx\n",
+ first_page_after_kernel));
+
+ /*
+ NEED TO INCREMENT first page after kernel by:
+ + 1 (pdb)
+ + tot_pte_pages (pte)
+ + tot_pte_pages (page_array)
+ */
+ /* SO, have to copy the first kernel pages pfns right into the
+ * page_array, then do identity maps for the rest.
+ */
+ DPRINTF(("mapped kernel pages\n"));
+
+ /* now loop over all ptes and store into the page_array, so as
+ * to get the identity map.
+ */
+ if ((pte_array =
+ malloc(tot_pte_pages * 1024 * sizeof (unsigned long))) == NULL) {
+ PERROR("Could not allocate pte array");
+ goto error_out;
+ }
+
+ /* plan 9 on startup expects a "l2" (xen parlance) at 0x2000,
+ * this "l2" should have one PTE pointer for a va of 0x80000000.
+ * and an l1 (PTEs to you) at 0x3000. (physical).
+ * the PTEs should map the first 4M of memory.
+ */
+ /* get a physical address for the L2. This means take the PFN and
+ * shift left.
+ */
+ /* this terminology is plan 9 terminology.
+ * pdb is essentially the Xen L2. 'Page Directory Block'?
+ * I need to ask JMK.
+ * cpupte is the pte array.
+ * Plan 9 counts on these being set up for cpu0.
+ * SO: cpu0pdb (Xen L2)
+ * and cpupte (Xen L1)
+ */
+ /* cpu0pdb is right after kernel */
+ cpu0pdb = first_page_after_kernel;
+ /* cpu0pte comes right after cpu0pdb */
+ cpu0pte = cpu0pdb + 1;
+ /* number of the past cpu0pte page */
+ cpu0ptelast = cpu0pte + tot_pte_pages - 1;
+ /* first page of the page array (mfn) */
+ page_array_page = cpu0ptelast + 1;
+
+ DPRINTF(("cpu0pdb 0x%lx, cpu0pte 0x%lx cpu0ptelast 0x%lx\n", cpu0pdb,
+ cpu0pte, cpu0ptelast));
+ l2tab = cpage_array[cpu0pdb] << PAGE_SHIFT;
+ DPRINTF(("l2tab 0x%lx\n", l2tab));
+ ctxt->pt_base = l2tab;
+
+ /* get a physical address for the L1. This means take the PFN and
+ * shift left.
+ */
+ l1tab = cpage_array[cpu0pte] << PAGE_SHIFT;
+ DPRINTF(("l1tab 0x%lx\n", l1tab));
+ if ((mmu = init_mmu_updates(xc_handle, dom)) == NULL)
+ goto error_out;
+ DPRINTF(("now map in l2tab\n"));
+
+ /* Initialise the page tables. */
+ /* mmap in the l2tab */
+ if ((vl2tab = xc_map_foreign_range(xc_handle, dom,
+ PAGE_SIZE, PROT_READ | PROT_WRITE,
+ l2tab >> PAGE_SHIFT)) == NULL)
+ goto error_out;
+ DPRINTF(("vl2tab 0x%p\n", vl2tab));
+ /* now we have the cpu0pdb for the kernel, starting at 0x2000,
+ * so we can plug in the physical pointer to the 0x3000 pte
+ */
+ /* zero it */
+ memset(vl2tab, 0, PAGE_SIZE);
+ /* get a pointer in the l2tab for the virt_load_addr */
+ DPRINTF(("&vl2tab[l2_table_offset(*virt_load_addr)] is 0x%p[0x%lx]\n",
+ &vl2tab[l2_table_offset(*virt_load_addr)],
+ l2_table_offset(*virt_load_addr)));
+
+ vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
+
+ /* OK, for all the available PTE, set the PTE pointer up */
+ DPRINTF(("For i = %ld to %ld ...\n", cpu0pte, cpu0ptelast));
+ for (i = cpu0pte; i <= cpu0ptelast; i++) {
+ DPRINTF(("Index %d Set %p to 0x%lx\n", i, vl2e,
+ (cpage_array[i] << PAGE_SHIFT) | L2_PROT));
+ *vl2e++ = (cpage_array[i] << PAGE_SHIFT) | L2_PROT;
+ }
+
+ /* unmap it ... */
+ munmap(vl2tab, PAGE_SIZE);
+
+ /* for the pages from virt_load_pointer to the end of this
+ * set of PTEs, map in the PFN for that VA
+ */
+ for (vl1e = (l1_pgentry_t *) pte_array, count = 0;
+ count < tot_pte_pages * 1024; count++, vl1e++) {
+
+ *vl1e = cpage_array[count];
+ if (!cpage_array[count])
+ continue;
+ /* set in the PFN for this entry */
+ *vl1e = (cpage_array[count] << PAGE_SHIFT) | L1_PROT;
+/*
+ DPRINTF (("vl1e # %d 0x%lx gets 0x%lx\n",
+ count, vl1e, *vl1e));
+*/
+ if ((count >= cpu0pdb) && (count <= cpu0ptelast)) {
+ //DPRINTF((" Fix up page %d as it is in pte ville: ", count));
+ *vl1e &= ~_PAGE_RW;
+ DPRINTF(("0x%lx\n", *vl1e));
+ }
+ if ((count >= (0x100000 >> 12))
+ && (count < (first_data_page >> 12))) {
+ //DPRINTF((" Fix up page %d as it is in text ", count));
+ *vl1e &= ~_PAGE_RW;
+ //DPRINTF (("0x%lx\n", *vl1e));
+ }
+ }
+ /* special thing. Pre-map the shared info page */
+ vl1e = &pte_array[2];
+ *vl1e = (shared_info_frame << PAGE_SHIFT) | L1_PROT;
+ DPRINTF(("v1l1 %p, has value 0x%lx\n", vl1e, *(unsigned long *) vl1e));
+ /* another special thing. VA 80005000 has to point to 80006000 */
+ /* this is a Plan 9 thing -- the 'mach' pointer */
+ /* 80005000 is the mach pointer per-cpu, and the actual
+ * mach pointers are 80006000, 80007000 etc.
+ */
+ vl1e = &pte_array[5];
+ *vl1e = (cpage_array[6] << PAGE_SHIFT) | L1_PROT;
+
+ /* OK, it's all set up, copy it in */
+ memcpy_toguest(xc_handle, dom, pte_array,
+ (tot_pte_pages * 1024 * sizeof (unsigned long) /**/),
+ cpage_array, cpu0pte);
+
+ /* We really need to have the vl1tab unmapped or the add_mmu_update
+ * below will fail bigtime.
+ */
+ /* Xen guys: remember my errors on domain exit? Something I'm doing
+ * wrong in here? We never did find out ...
+ */
+ /* get rid of the entries we can not use ... */
+ memcpy_toguest(xc_handle, dom, cpage_array,
+ (tot_pte_pages * 1024 * sizeof (unsigned long) /**/),
+ cpage_array, page_array_page);
+ /* last chance to dump all of memory */
+ // dumpit(xc_handle, dom, 0 /*0x100000>>12*/, tot_pages, cpage_array) ;
+ /*
+ * Pin down l2tab addr as page dir page - causes hypervisor to provide
+ * correct protection for the page
+ */
+ if (add_mmu_update(xc_handle, mmu,
+ l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE))
+ goto error_out;
+
+ for (count = 0; count < tot_pages; count++) {
+/*
+ DPRINTF (("add_mmu_update(0x%x, 0x%x, 0x%x, %d)\n", xc_handle, mmu,
+ (cpage_array[count]
+ << PAGE_SHIFT) |
+ MMU_MACHPHYS_UPDATE,
+ count));
+*/
+ if (add_mmu_update(xc_handle, mmu,
+ (cpage_array[count] << PAGE_SHIFT) |
+ MMU_MACHPHYS_UPDATE, count))
+ goto error_out;
+ //DPRINTF(("Do the next one\n"));
+ }
+/*
+ */
+
+ //dumpit(pm_handle, 3, 4, page_array);
+ /* put the virt_startinfo_addr at KZERO */
+ /* just hard-code for now */
+ *virt_startinfo_addr = 0x80000000;
+
+ DPRINTF(("virt_startinfo_addr = 0x%lx\n", *virt_startinfo_addr));
+ start_info = xc_map_foreign_range(xc_handle, dom,
+ PAGE_SIZE, PROT_READ | PROT_WRITE,
+ cpage_array[0]);
+ DPRINTF(("startinfo = 0x%p\n", start_info));
+ DPRINTF(("shared_info_frame is %lx\n", shared_info_frame));
+ memset(start_info, 0, sizeof (*start_info));
+ start_info->pt_base = 0x80000000 | cpu0pdb << PAGE_SHIFT;
+ start_info->mfn_list = 0x80000000 | (page_array_page) << PAGE_SHIFT;
+ DPRINTF(("mfn_list 0x%lx\n", start_info->mfn_list));
+ start_info->mod_start = 0;
+ start_info->mod_len = 0;
+ start_info->nr_pages = tot_pte_pages * 1024;
+ start_info->nr_pt_frames = tot_pte_pages + 1;
+ start_info->shared_info = shared_info_frame;
+ start_info->flags = 0;
+ DPRINTF((" control event channel is %d\n", control_evtchn));
+ start_info->domain_controller_evtchn = control_evtchn;
+ strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
+ start_info->cmd_line[MAX_CMDLINE - 1] = '\0';
+ munmap(start_info, PAGE_SIZE);
+
+ DPRINTF(("done setting up start_info\n"));
+ DPRINTF(("shared_info_frame = 0x%lx\n", shared_info_frame));
+ /* shared_info page starts its life empty. */
+
+ shared_info = xc_map_foreign_range(xc_handle, dom,
+ PAGE_SIZE, PROT_READ | PROT_WRITE,
+ shared_info_frame);
+ memset(shared_info, 0, PAGE_SIZE);
+ /* Mask all upcalls... */
+ DPRINTF(("mask all upcalls\n"));
+ for (i = 0; i < MAX_VIRT_CPUS; i++)
+ shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
+ munmap(shared_info, PAGE_SIZE);
+
+ /* Send the page update requests down to the hypervisor. */
+ DPRINTF(("send page update reqs down.\n"));
+ if (finish_mmu_updates(xc_handle, mmu))
+ goto error_out;
+
+ //DPRINTF (("call dumpit.\n"));
+ //dumpit(pm_handle, 0x100000>>12, tot_pages, page_array) ;
+ //dumpit (pm_handle, 2, 0x100, page_array);
+ free(mmu);
+
+ /* we don't bother freeing anything at this point --
+ * we're exiting and it is pointless
+ */
+ return 0;
+
+ error_out:
+ /* oh well we still free some things -- I oughtta nuke this */
+ if (mmu != NULL)
+ free(mmu);
+ ;
+ return -1;
+}
+
+int
+xc_plan9_build(int xc_handle,
+ u32 domid,
+ const char *image_name,
+ const char *cmdline,
+ unsigned int control_evtchn, unsigned long flags)
+{
+ dom0_op_t launch_op, op;
+ unsigned long load_addr;
+ long tot_pages;
+ int kernel_fd = -1;
+ gzFile kernel_gfd = NULL;
+ int rc, i;
+ full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
+ unsigned long virt_startinfo_addr;
+
+ if ((tot_pages = get_tot_pages(xc_handle, domid)) < 0) {
+ PERROR("Could not find total pages for domain");
+ return 1;
+ }
+ DPRINTF(("get_tot_pages returns %ld pages\n", tot_pages));
+
+ kernel_fd = open(image_name, O_RDONLY);
+ if (kernel_fd < 0) {
+ PERROR("Could not open kernel image");
+ return 1;
+ }
+
+ if ((kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL) {
+ PERROR("Could not allocate decompression state for state file");
+ close(kernel_fd);
+ return 1;
+ }
+
+ DPRINTF(("get_tot_pages returns %ld pages\n", tot_pages));
+ if (mlock(&st_ctxt, sizeof (st_ctxt))) {
+ PERROR("Unable to mlock ctxt");
+ return 1;
+ }
+
+ op.cmd = DOM0_GETDOMAININFO;
+ op.u.getdomaininfo.domain = (domid_t) domid;
+ op.u.getdomaininfo.ctxt = ctxt;
+ if ((do_dom0_op(xc_handle, &op) < 0) ||
+ ((u32) op.u.getdomaininfo.domain != domid)) {
+ PERROR("Could not get info on domain");
+ goto error_out;
+ }
+ DPRINTF(("get_tot_pages returns %ld pages\n", tot_pages));
+
+ if (!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED)
+ || (op.u.getdomaininfo.ctxt->pt_base != 0)) {
+ ERROR("Domain is already constructed");
+ goto error_out;
+ }
+
+ DPRINTF(("get_tot_pages returns %ld pages\n", tot_pages));
+ if (setup_guestos(xc_handle, domid, kernel_gfd, tot_pages,
+ &virt_startinfo_addr,
+ &load_addr, &st_ctxt, cmdline,
+ op.u.getdomaininfo.shared_info_frame,
+ control_evtchn, flags) < 0) {
+ ERROR("Error constructing guest OS");
+ goto error_out;
+ }
+
+ /* leave the leak in here for now
+ if ( kernel_fd >= 0 )
+ close(kernel_fd);
+ if( kernel_gfd )
+ gzclose(kernel_gfd);
+ */
+ ctxt->flags = 0;
+
+ /*
+ * Initial register values:
+ * DS,ES,FS,GS = FLAT_GUESTOS_DS
+ * CS:EIP = FLAT_GUESTOS_CS:start_pc
+ * SS:ESP = FLAT_GUESTOS_DS:start_stack
+ * ESI = start_info
+ * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
+ * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
+ */
+ ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
+ ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
+ ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
+ ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
+ ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
+ ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
+ ctxt->cpu_ctxt.eip = load_addr;
+ ctxt->cpu_ctxt.eip = 0x80100020;
+ /* put stack at top of second page */
+ ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
+
+ /* why is this set? */
+ ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp;
+ ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2);
+
+ /* FPU is set up to default initial state. */
+ memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt));
+
+ /* Virtual IDT is empty at start-of-day. */
+ for (i = 0; i < 256; i++) {
+ ctxt->trap_ctxt[i].vector = i;
+ ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ }
+ ctxt->fast_trap_idx = 0;
+
+ /* No LDT. */
+ ctxt->ldt_ents = 0;
+
+ /* Use the default Xen-provided GDT. */
+ ctxt->gdt_ents = 0;
+
+ /* Ring 1 stack is the initial stack. */
+ /* put stack at top of second page */
+ ctxt->guestos_ss = FLAT_GUESTOS_DS;
+ ctxt->guestos_esp = ctxt->cpu_ctxt.esp;
+
+ /* No debugging. */
+ memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
+
+ /* No callback handlers. */
+ ctxt->event_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->event_callback_eip = 0;
+ ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->failsafe_callback_eip = 0;
+
+ memset(&launch_op, 0, sizeof (launch_op));
+
+ launch_op.u.builddomain.domain = (domid_t) domid;
+ // launch_op.u.builddomain.num_vifs = 1;
+ launch_op.u.builddomain.ctxt = ctxt;
+ launch_op.cmd = DOM0_BUILDDOMAIN;
+ rc = do_dom0_op(xc_handle, &launch_op);
+
+ fprintf(stderr, "RC is %d\n", rc);
+ return rc;
+
+ error_out:
+ if (kernel_fd >= 0)
+ close(kernel_fd);
+ if (kernel_gfd)
+ gzclose(kernel_gfd);
+
+ return -1;
+}
+
+/*
+ * Plan 9 memory layout (initial)
+ * ----------------
+ * | info from xen| @0
+ * ----------------
+ * | stack |
+ * ----------------<--- page 2
+ * | empty |
+ * ---------------<---- page 5 MACHADDR (always points to machp[cpuno]
+ * | aliased |
+ * ---------------<----- page 6 CPU0MACH
+ * | CPU0MACH |
+ * ----------------
+ * | empty |
+ * ---------------- *virt_load_addr = ehdr.e_entry (0x80100000)
+ * | kernel |
+ * | |
+ * ---------------- <----- page aligned boundary.
+ * | data |
+ * | |
+ * ----------------
+ * | bss |
+ * ----------------<--- end of kernel (page aligned)
+ * | PMD cpu0pdb |
+ * ----------------<--- page +1
+ * | PTE cpu0pte |
+ * ----------------<--- page (tot_pte_pages)/1024
+ * | page_array |
+ * ---------------- <--- page (tot_pte_pages)/1024
+ * | empty to TOM |
+ * ----------------
+ */
+
+static int
+loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom,
+ unsigned long *page_array,
+ unsigned long tot_pages, unsigned long *virt_load_addr,
+ unsigned long *ksize, unsigned long *symtab_addr,
+ unsigned long *symtab_len,
+ unsigned long *first_data_page, unsigned long *pdb_page)
+{
+ unsigned long datapage;
+ Exec ehdr;
+
+ char *p;
+ unsigned long maxva;
+ int curpos, ret;
+ PAGE *image = 0;
+ unsigned long image_tot_pages = 0;
+ unsigned long textround;
+
+ ret = -1;
+
+ p = NULL;
+ maxva = 0;
+
+ if (gzread(kernel_gfd, &ehdr, sizeof (Exec)) != sizeof (Exec)) {
+ PERROR("Error reading kernel image P9 header.");
+ goto out;
+ }
+
+ plan9header(&ehdr);
+ curpos = sizeof (Exec);
+
+ if (ehdr.magic != I_MAGIC) {
+ PERROR("Image does not have an P9 header.");
+ goto out;
+ }
+
+ textround = ((ehdr.text + 0x20 + 4095) >> 12) << 12;
+ *first_data_page = 0x100000 + textround;
+ DPRINTF(("ehrd.text is 0x%lx, textround is 0x%lx\n",
+ ehdr.text, textround));
+
+ image_tot_pages =
+ (textround + ehdr.data + ehdr.bss + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ DPRINTF(("tot pages is %ld\n", image_tot_pages));
+
+ *virt_load_addr = 0x80100000;
+
+ if ((*virt_load_addr & (PAGE_SIZE - 1)) != 0) {
+ ERROR("We can only deal with page-aligned load addresses");
+ goto out;
+ }
+
+ if ((*virt_load_addr + (image_tot_pages << PAGE_SHIFT)) >
+ HYPERVISOR_VIRT_START) {
+ ERROR("Cannot map all domain memory without hitting Xen space");
+ goto out;
+ }
+
+ /* just malloc an image that is image_tot_pages in size. Then read in
+ * the image -- text, data, -- to page-rounded alignments.
+ * then copy into xen .
+ * this gets BSS zeroed for free
+ */
+ DPRINTF(("Allocate %ld bytes\n", image_tot_pages * sizeof (*image)));
+ image = calloc(image_tot_pages, sizeof (*image));
+ if (!image)
+ return blah("alloc data");
+ /* text starts at 0x20, after the header, just like Unix long ago */
+ if (gzread(kernel_gfd, &image[0].data[sizeof (Exec)], ehdr.text) <
+ ehdr.text)
+ return blah("read text");
+ DPRINTF(("READ TEXT %ld bytes\n", ehdr.text));
+ datapage = ((ehdr.text + sizeof (Exec)) / PAGE_SIZE) + 1;
+ if (gzread(kernel_gfd, image[datapage].data, ehdr.data) < ehdr.data)
+ return blah("read data");
+ DPRINTF(("READ DATA %ld bytes\n", ehdr.data));
+
+ /* nice contig stuff */
+ /* oops need to start at 0x100000 */
+
+ ret = memcpy_toguest(xc_handle, dom,
+ image, image_tot_pages * 4096, page_array, 0x100);
+ DPRINTF(("done copying kernel to guest memory\n"));
+
+ out:
+ if (image)
+ free(image);
+ *pdb_page = image_tot_pages + (0x100000 >> PAGE_SHIFT);
+ return ret;
+}
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 2dd8680b4f..d2b7da0eba 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -366,6 +366,33 @@ static PyObject *pyxc_linux_build(PyObject *self,
return zero;
}
+static PyObject *pyxc_plan9_build(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ u32 dom;
+ char *image, *ramdisk = NULL, *cmdline = "";
+ int control_evtchn, flags = 0;
+
+ static char *kwd_list[] = { "dom", "control_evtchn",
+ "image", "ramdisk", "cmdline", "flags",
+ NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|ssi", kwd_list,
+ &dom, &control_evtchn,
+ &image, &ramdisk, &cmdline, &flags) )
+ return NULL;
+
+ if ( xc_plan9_build(xc->xc_handle, dom, image,
+ cmdline, control_evtchn, flags) != 0 )
+ return PyErr_SetFromErrno(xc_error);
+
+ Py_INCREF(zero);
+ return zero;
+}
+
static PyObject *pyxc_bvtsched_global_set(PyObject *self,
PyObject *args,
PyObject *kwds)
@@ -889,6 +916,14 @@ static PyMethodDef pyxc_methods[] = {
" state_file [str]: Name of state file. Must not currently exist.\n"
" progress [int, 1]: Bool - display a running progress indication?\n\n"
"Returns: [int] 0 on success; -1 on error.\n" },
+ { "plan9_build",
+ (PyCFunction)pyxc_plan9_build,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Build a new Plan 9 guest OS.\n"
+ " dom [long]: Identifier of domain to build into.\n"
+ " image [str]: Name of kernel image file. May be gzipped.\n"
+ " cmdline [str, n/a]: Kernel parameters, if any.\n\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
{ "linux_restore",
(PyCFunction)pyxc_linux_restore,
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index 335da5e486..1a229a5599 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -1065,6 +1065,34 @@ def vm_image_linux(vm, image):
vm.create_domain("linux", kernel, ramdisk, cmdline)
return vm
+def vm_image_plan9(vm, image):
+ """Create a VM for a Plan 9 image.
+
+ name vm name
+ memory vm memory
+ image image config
+
+ returns vm
+ """
+ #todo: Same as for linux. Is that right? If so can unify them.
+ kernel = sxp.child_value(image, "kernel")
+ cmdline = ""
+ ip = sxp.child_value(image, "ip", "dhcp")
+ if ip:
+ cmdline += "ip=" + ip
+ root = sxp.child_value(image, "root")
+ if root:
+ cmdline += "root=" + root
+ args = sxp.child_value(image, "args")
+ if args:
+ cmdline += " " + args
+ ramdisk = sxp.child_value(image, "ramdisk", '')
+ vifs = vm.config_devices("vif")
+ vm.create_domain("plan9", kernel, ramdisk, cmdline)
+ return vm
+
+
+
def vm_dev_vif(vm, val, index, change=0):
"""Create a virtual network interface (vif).
@@ -1186,6 +1214,7 @@ def vm_field_maxmem(vm, config, val, index):
# Register image handlers.
add_image_handler('linux', vm_image_linux)
+add_image_handler('plan9', vm_image_plan9)
# Register device handlers.
add_device_handler('vif', vm_dev_vif)