aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-24 10:46:24 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-24 10:46:24 +0000
commited30037e48073a7d855fce58c1dfc563e446ff6c (patch)
tree1a9a3439c4acbd1d420c222f26acfd8aea873c66
parenta3adbd6c3cabd7e5fa757827c0f058c7b19b889b (diff)
downloadxen-ed30037e48073a7d855fce58c1dfc563e446ff6c.tar.gz
xen-ed30037e48073a7d855fce58c1dfc563e446ff6c.tar.bz2
xen-ed30037e48073a7d855fce58c1dfc563e446ff6c.zip
bitkeeper revision 1.1751 (42bbe480z9Fp_L5Tc500W8c8CL3g9A)
Rationalise x86 CRn guest state into a ctrlreg array in the per-vcpu context structure. Most noticeably this means the pt_base field has gone away -- replaced by ctrlreg[3] (CR3). VCPU_guest_stts is also gone -- it was never arch-independent anyway. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--.rootkeys1
-rw-r--r--freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c2
-rw-r--r--linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c2
-rw-r--r--tools/debugger/libxendebug/xendebug.c6
-rw-r--r--tools/libxc/Makefile1
-rw-r--r--tools/libxc/xc_linux_build.c6
-rw-r--r--tools/libxc/xc_linux_restore.c4
-rw-r--r--tools/libxc/xc_linux_save.c6
-rw-r--r--tools/libxc/xc_plan9_build.c694
-rw-r--r--tools/libxc/xc_ptrace.c4
-rw-r--r--tools/libxc/xc_ptrace_core.c2
-rw-r--r--tools/libxc/xc_vmx_build.c4
-rw-r--r--xen/arch/x86/dom0_ops.c21
-rw-r--r--xen/arch/x86/domain.c42
-rw-r--r--xen/arch/x86/i387.c2
-rw-r--r--xen/arch/x86/traps.c17
-rw-r--r--xen/arch/x86/vmx_vmcs.c1
-rw-r--r--xen/include/asm-x86/domain.h3
-rw-r--r--xen/include/public/arch-x86_32.h4
-rw-r--r--xen/include/public/arch-x86_64.h4
-rw-r--r--xen/include/public/dom0_ops.h2
-rw-r--r--xen/include/xen/sched.h3
22 files changed, 52 insertions, 779 deletions
diff --git a/.rootkeys b/.rootkeys
index be88036027..4ad2a84038 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -749,7 +749,6 @@
42a40bc4diWfFsPGf0RW7qXMufU4YQ tools/libxc/xc_load_elf.c
3fbba6db7WnnJr0KFrIFrqNlSKvFYg tools/libxc/xc_misc.c
4051bce6CHAsYh8P5t2OHDtRWOP9og tools/libxc/xc_physdev.c
-41cc934aO1m6NxEh_8eDr9bJIMoLFA tools/libxc/xc_plan9_build.c
3fbba6dctWRWlFJkYb6hdix2X4WMuw tools/libxc/xc_private.c
3fbba6dcbVrG2hPzEzwdeV_UC8kydQ tools/libxc/xc_private.h
42337174PxyzzPk62raDiYCIsfStDg tools/libxc/xc_ptrace.c
diff --git a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c
index d084a54303..80e0a7a98c 100644
--- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c
+++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c
@@ -974,7 +974,7 @@ start_ap(int apic_id)
ctxt.failsafe_callback_cs = __KERNEL_CS;
ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
- ctxt.pt_base = (vm_paddr_t)IdlePTD;
+ ctxt.ctrlreg[3] = (vm_paddr_t)IdlePTD;
boot_error = HYPERVISOR_boot_vcpu(bootAP, &ctxt);
diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
index 14342b66de..18aa777dc7 100644
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
@@ -908,7 +908,7 @@ static int __init do_boot_cpu(int apicid)
ctxt.failsafe_callback_cs = __KERNEL_CS;
ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
- ctxt.pt_base = (unsigned long)virt_to_machine(swapper_pg_dir);
+ ctxt.ctrlreg[3] = (unsigned long)virt_to_machine(swapper_pg_dir);
boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
diff --git a/tools/debugger/libxendebug/xendebug.c b/tools/debugger/libxendebug/xendebug.c
index 844cdf0e03..6b116dfbc8 100644
--- a/tools/debugger/libxendebug/xendebug.c
+++ b/tools/debugger/libxendebug/xendebug.c
@@ -342,9 +342,9 @@ xendebug_memory_page (domain_context_p ctxt, int xc_handle, u32 vcpu,
}
}
- if ( vcpu_ctxt->pt_base != ctxt->cr3_phys[vcpu])
+ if ( vcpu_ctxt->ctrlreg[3] != ctxt->cr3_phys[vcpu])
{
- ctxt->cr3_phys[vcpu] = vcpu_ctxt->pt_base;
+ ctxt->cr3_phys[vcpu] = vcpu_ctxt->ctrlreg[3];
if ( ctxt->cr3_virt[vcpu] )
munmap(ctxt->cr3_virt[vcpu], PAGE_SIZE);
ctxt->cr3_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid,
@@ -383,7 +383,7 @@ xendebug_memory_page (domain_context_p ctxt, int xc_handle, u32 vcpu,
if ( ctxt->page_virt[vcpu] == NULL )
{
printf("cr3 %lx pde %lx page %lx pti %lx\n",
- vcpu_ctxt->pt_base, pde, page, vtopti(va));
+ vcpu_ctxt->ctrlreg[3], pde, page, vtopti(va));
ctxt->page_phys[vcpu] = 0;
return 0;
}
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index e8c6058b7d..2a955865e3 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -22,7 +22,6 @@ SRCS += xc_gnttab.c
SRCS += xc_load_bin.c
SRCS += xc_load_elf.c
SRCS += xc_linux_build.c
-SRCS += xc_plan9_build.c
SRCS += xc_linux_restore.c
SRCS += xc_linux_save.c
SRCS += xc_misc.c
diff --git a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c
index dc92fff3f6..1ce72d3dae 100644
--- a/tools/libxc/xc_linux_build.c
+++ b/tools/libxc/xc_linux_build.c
@@ -227,7 +227,7 @@ static int setup_guest(int xc_handle,
/* First allocate page for page dir. */
ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
- ctxt->pt_base = l2tab;
+ ctxt->ctrlreg[3] = l2tab;
/* Initialise the page tables. */
if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
@@ -282,7 +282,7 @@ static int setup_guest(int xc_handle,
/* First allocate page for page dir. */
ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
l4tab = page_array[ppt_alloc++] << PAGE_SHIFT;
- ctxt->pt_base = l4tab;
+ ctxt->ctrlreg[3] = l4tab;
/* Intiliaize page table */
if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
@@ -502,7 +502,7 @@ int xc_linux_build(int xc_handle,
}
if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
- (ctxt->pt_base != 0) )
+ (ctxt->ctrlreg[3] != 0) )
{
ERROR("Domain is already constructed");
goto error_out;
diff --git a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c
index 57814ca0a8..92e57f2c27 100644
--- a/tools/libxc/xc_linux_restore.c
+++ b/tools/libxc/xc_linux_restore.c
@@ -489,7 +489,7 @@ int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns)
}
/* Uncanonicalise the page table base pointer. */
- pfn = ctxt.pt_base >> PAGE_SHIFT;
+ pfn = ctxt.ctrlreg[3] >> PAGE_SHIFT;
if ( (pfn >= nr_pfns) || ((pfn_type[pfn]&LTABTYPE_MASK) != L2TAB) )
{
printf("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx\n",
@@ -497,7 +497,7 @@ int xc_linux_restore(int xc_handle, int io_fd, u32 dom, unsigned long nr_pfns)
ERR("PT base is bad.");
goto out;
}
- ctxt.pt_base = pfn_to_mfn_table[pfn] << PAGE_SHIFT;
+ ctxt.ctrlreg[3] = pfn_to_mfn_table[pfn] << PAGE_SHIFT;
/* clear any pending events and the selector */
memset(&(shared_info->evtchn_pending[0]), 0,
diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c
index feb051884d..f761141170 100644
--- a/tools/libxc/xc_linux_save.c
+++ b/tools/libxc/xc_linux_save.c
@@ -459,7 +459,7 @@ int xc_linux_save(int xc_handle, int io_fd, u32 dom)
shared_info_frame = info.shared_info_frame;
/* A cheesy test to see whether the domain contains valid state. */
- if ( ctxt.pt_base == 0 ){
+ if ( ctxt.ctrlreg[3] == 0 ){
ERR("Domain is not in a valid Linux guest OS state");
goto out;
}
@@ -1015,11 +1015,11 @@ int xc_linux_save(int xc_handle, int io_fd, u32 dom)
}
/* Canonicalise the page table base pointer. */
- if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.pt_base >> PAGE_SHIFT) ) {
+ if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.ctrlreg[3] >> PAGE_SHIFT) ) {
ERR("PT base is not in range of pseudophys map");
goto out;
}
- ctxt.pt_base = live_mfn_to_pfn_table[ctxt.pt_base >> PAGE_SHIFT] <<
+ ctxt.ctrlreg[3] = live_mfn_to_pfn_table[ctxt.ctrlreg[3] >> PAGE_SHIFT] <<
PAGE_SHIFT;
if (write(io_fd, &ctxt, sizeof(ctxt)) != sizeof(ctxt) ||
diff --git a/tools/libxc/xc_plan9_build.c b/tools/libxc/xc_plan9_build.c
deleted file mode 100644
index 7f697d2115..0000000000
--- a/tools/libxc/xc_plan9_build.c
+++ /dev/null
@@ -1,694 +0,0 @@
-/******************************************************************************
- * xc_plan9_build.c
- * derived from xc_linux_build.c
- */
-
-#include "xc_private.h"
-
-#include <zlib.h>
-
-#define DEBUG 1
-#ifdef DEBUG
-#define DPRINTF(x) printf x; fflush(stdout);
-#else
-#define DPRINTF(x)
-#endif
-
-#include "plan9a.out.h"
-
-/* really TOS which means stack starts at 0x2000, and uses page 1*/
-#define STACKPAGE 2
-struct Exec header, origheader;
-
-typedef struct page {
- char data[PAGE_SIZE];
-} PAGE;
-
-
-int
-memcpy_toguest(int xc_handle, u32 dom, void *v, int size,
- unsigned long *page_array, unsigned int to_page)
-{
- int ret;
- unsigned char *cp = v;
- unsigned int whichpage;
- unsigned char *vaddr;
-
-// DPRINTF(("memcpy_to_guest: to_page 0x%x, count %d\n", to_page, size));
- for (ret = 0, whichpage = to_page; size > 0;
- whichpage++, size -= PAGE_SIZE, cp += PAGE_SIZE) {
-
- // DPRINTF (("map_pfn_writeable(%p, 0x%lx)\n", pm_handle,
-// page_array[whichpage]));
- vaddr = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- page_array[whichpage]);
- // DPRINTF (("vaddr is %p\n", vaddr));
- if (vaddr == NULL) {
- ret = -1;
- ERROR("Couldn't map guest memory");
- goto out;
- }
- // DPRINTF (("copy %p to %p, count 0x%x\n", cp, vaddr, 4096));
- memcpy(vaddr, cp, 4096);
- munmap(vaddr, PAGE_SIZE);
- // DPRINTF (("Did %ud'th pages\n", whichpage));
- }
- out:
- return ret;
-}
-
-int
-blah(char *b)
-{
- fprintf(stderr, "Error in xc_plan9_build!\n");
- perror(b);
- return errno;
-}
-
-/* swap bytes. For plan 9 headers */
-void
-swabby(unsigned long *s, char *name)
-{
- unsigned long it;
- it = ((*s & 0xff000000) >> 24) | ((*s & 0xff0000) >> 8) |
- ((*s & 0xff00) << 8) | ((*s & 0xff) << 24);
- DPRINTF(("Item %s is 0x%lx\n", name, it));
- *s = it;
-}
-
-void
-plan9header(Exec * header)
-{
- /* header is big-endian */
- swabby((unsigned long *)&header->magic, "magic");
- swabby((unsigned long *)&header->text, "text");
- swabby((unsigned long *)&header->data, "data");
- swabby((unsigned long *)&header->bss, "bss");
- swabby((unsigned long *)&header->syms, "syms");
- swabby((unsigned long *)&header->entry, "entry");
- swabby((unsigned long *)&header->spsz, "spsz");
- swabby((unsigned long *)&header->pcsz, "pcsz");
-
-}
-
-static int
- loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom,
- unsigned long *page_array,
- unsigned long tot_pages, unsigned long *virt_load_addr,
- unsigned long *ksize, unsigned long *symtab_addr,
- unsigned long *symtab_len,
- unsigned long *first_data_page, unsigned long *pdb_page,
- const char *cmdline);
-
-#define P9ROUND (P9SIZE / 8)
-
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
-
-static int
-setup_guest(int xc_handle,
- u32 dom,
- gzFile kernel_gfd,
- unsigned long tot_pages,
- unsigned long *virt_startinfo_addr,
- unsigned long *virt_load_addr,
- vcpu_guest_context_t * ctxt,
- const char *cmdline,
- unsigned long shared_info_frame,
- unsigned int control_evtchn,
- int flags)
-{
- l1_pgentry_t *vl1e = NULL;
- l2_pgentry_t *vl2tab = NULL, *vl2e = NULL;
- unsigned long *cpage_array = NULL;
- unsigned long *pte_array = NULL;
- unsigned long l2tab;
- unsigned long l1tab;
- unsigned long count;
- unsigned long symtab_addr = 0, symtab_len = 0;
- start_info_t *start_info;
- shared_info_t *shared_info;
- unsigned long ksize;
- mmu_t *mmu = NULL;
- int i;
- unsigned long first_page_after_kernel = 0,
- first_data_page = 0,
- page_array_page;
- unsigned long cpu0pdb, cpu0pte, cpu0ptelast;
- unsigned long /*last_pfn, */ tot_pte_pages;
-
- DPRINTF(("tot pages is %ld\n", tot_pages));
- if ((cpage_array = malloc(tot_pages * sizeof (unsigned long))) == NULL) {
- PERROR("Could not allocate cpage array");
- goto error_out;
- }
-
- if (xc_get_pfn_list(xc_handle, dom, cpage_array, tot_pages) != tot_pages) {
- PERROR("Could not get the page frame list");
- goto error_out;
- }
-
- for (i = 0; i < 64; i++)
- DPRINTF(("First %d page is 0x%lx\n", i, cpage_array[i]));
-
- tot_pte_pages = tot_pages >> 10;
- DPRINTF(("Page range is 0 to 0x%lx, which requires 0x%lx pte pages\n",
- tot_pte_pages, tot_pte_pages));
-
- if (loadp9image(kernel_gfd, xc_handle, dom, cpage_array, tot_pages,
- virt_load_addr, &ksize, &symtab_addr, &symtab_len,
- &first_data_page, &first_page_after_kernel, cmdline))
- goto error_out;
- DPRINTF(("First data page is 0x%lx\n", first_data_page));
- DPRINTF(("First page after kernel is 0x%lx\n",
- first_page_after_kernel));
-
- /*
- NEED TO INCREMENT first page after kernel by:
- + 1 (pdb)
- + tot_pte_pages (pte)
- + tot_pte_pages (page_array)
- */
- /* SO, have to copy the first kernel pages pfns right into the
- * page_array, then do identity maps for the rest.
- */
- DPRINTF(("mapped kernel pages\n"));
-
- /* now loop over all ptes and store into the page_array, so as
- * to get the identity map.
- */
- if ((pte_array =
- malloc(tot_pte_pages * 1024 * sizeof (unsigned long))) == NULL) {
- PERROR("Could not allocate pte array");
- goto error_out;
- }
-
- /* plan 9 on startup expects a "l2" (xen parlance) at 0x2000,
- * this "l2" should have one PTE pointer for a va of 0x80000000.
- * and an l1 (PTEs to you) at 0x3000. (physical).
- * the PTEs should map the first 4M of memory.
- */
- /* get a physical address for the L2. This means take the PFN and
- * shift left.
- */
- /* this terminology is plan 9 terminology.
- * pdb is essentially the Xen L2. 'Page Directory Block'?
- * I need to ask JMK.
- * cpupte is the pte array.
- * Plan 9 counts on these being set up for cpu0.
- * SO: cpu0pdb (Xen L2)
- * and cpupte (Xen L1)
- */
- /* cpu0pdb is right after kernel */
- cpu0pdb = first_page_after_kernel;
- /* cpu0pte comes right after cpu0pdb */
- cpu0pte = cpu0pdb + 1;
- /* number of the past cpu0pte page */
- cpu0ptelast = cpu0pte + tot_pte_pages - 1;
- /* first page of the page array (mfn) */
- page_array_page = cpu0ptelast + 1;
-
- DPRINTF(("cpu0pdb 0x%lx, cpu0pte 0x%lx cpu0ptelast 0x%lx\n", cpu0pdb,
- cpu0pte, cpu0ptelast));
- l2tab = cpage_array[cpu0pdb] << PAGE_SHIFT;
- DPRINTF(("l2tab 0x%lx\n", l2tab));
- ctxt->pt_base = l2tab;
-
- /* get a physical address for the L1. This means take the PFN and
- * shift left.
- */
- l1tab = cpage_array[cpu0pte] << PAGE_SHIFT;
- DPRINTF(("l1tab 0x%lx\n", l1tab));
- if ((mmu = init_mmu_updates(xc_handle, dom)) == NULL)
- goto error_out;
- DPRINTF(("now map in l2tab\n"));
-
- /* Initialise the page tables. */
- /* mmap in the l2tab */
- if ((vl2tab = xc_map_foreign_range(xc_handle, dom,
- PAGE_SIZE, PROT_READ | PROT_WRITE,
- l2tab >> PAGE_SHIFT)) == NULL)
- goto error_out;
- DPRINTF(("vl2tab 0x%p\n", vl2tab));
- /* now we have the cpu0pdb for the kernel, starting at 0x2000,
- * so we can plug in the physical pointer to the 0x3000 pte
- */
- /* zero it */
- memset(vl2tab, 0, PAGE_SIZE);
- /* get a pointer in the l2tab for the virt_load_addr */
- DPRINTF(("&vl2tab[l2_table_offset(*virt_load_addr)] is 0x%p[0x%lx]\n",
- &vl2tab[l2_table_offset(*virt_load_addr)],
- l2_table_offset(*virt_load_addr)));
-
- vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
-
- /* OK, for all the available PTE, set the PTE pointer up */
- DPRINTF(("For i = %ld to %ld ...\n", cpu0pte, cpu0ptelast));
- for (i = cpu0pte; i <= cpu0ptelast; i++) {
- DPRINTF(("Index %d Set %p to 0x%lx\n", i, vl2e,
- (cpage_array[i] << PAGE_SHIFT) | L2_PROT));
- *vl2e++ = (cpage_array[i] << PAGE_SHIFT) | L2_PROT;
- }
-
- /* unmap it ... */
- munmap(vl2tab, PAGE_SIZE);
-
- /* for the pages from virt_load_pointer to the end of this
- * set of PTEs, map in the PFN for that VA
- */
- for (vl1e = (l1_pgentry_t *) pte_array, count = 0;
- count < tot_pte_pages * 1024; count++, vl1e++) {
-
- *vl1e = cpage_array[count];
- if (!cpage_array[count])
- continue;
- /* set in the PFN for this entry */
- *vl1e = (cpage_array[count] << PAGE_SHIFT) | L1_PROT;
-/*
- DPRINTF (("vl1e # %d 0x%lx gets 0x%lx\n",
- count, vl1e, *vl1e));
-*/
- if ((count >= cpu0pdb) && (count <= cpu0ptelast)) {
- //DPRINTF((" Fix up page %d as it is in pte ville: ", count));
- *vl1e &= ~_PAGE_RW;
- DPRINTF(("0x%lx\n", *vl1e));
- }
- if ((count >= (0x100000 >> 12))
- && (count < (first_data_page >> 12))) {
- //DPRINTF((" Fix up page %d as it is in text ", count));
- *vl1e &= ~_PAGE_RW;
- //DPRINTF (("0x%lx\n", *vl1e));
- }
- }
- /* special thing. Pre-map the shared info page */
- vl1e = &pte_array[2];
- *vl1e = (shared_info_frame << PAGE_SHIFT) | L1_PROT;
- DPRINTF(("v1l1 %p, has value 0x%lx\n", vl1e, *(unsigned long *) vl1e));
- /* another special thing. VA 80005000 has to point to 80006000 */
- /* this is a Plan 9 thing -- the 'mach' pointer */
- /* 80005000 is the mach pointer per-cpu, and the actual
- * mach pointers are 80006000, 80007000 etc.
- */
- vl1e = &pte_array[5];
- *vl1e = (cpage_array[6] << PAGE_SHIFT) | L1_PROT;
-
- /* OK, it's all set up, copy it in */
- memcpy_toguest(xc_handle, dom, pte_array,
- (tot_pte_pages * 1024 * sizeof (unsigned long) /**/),
- cpage_array, cpu0pte);
-
- /* We really need to have the vl1tab unmapped or the add_mmu_update
- * below will fail bigtime.
- */
- /* Xen guys: remember my errors on domain exit? Something I'm doing
- * wrong in here? We never did find out ...
- */
- /* get rid of the entries we can not use ... */
- memcpy_toguest(xc_handle, dom, cpage_array,
- (tot_pte_pages * 1024 * sizeof (unsigned long) /**/),
- cpage_array, page_array_page);
- /* last chance to dump all of memory */
- // dumpit(xc_handle, dom, 0 /*0x100000>>12*/, tot_pages, cpage_array) ;
- /*
- * Pin down l2tab addr as page dir page - causes hypervisor to provide
- * correct protection for the page
- */
- if (pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom))
- goto error_out;
-
- for (count = 0; count < tot_pages; count++) {
-/*
- DPRINTF (("add_mmu_update(0x%x, 0x%x, 0x%x, %d)\n", xc_handle, mmu,
- (cpage_array[count]
- << PAGE_SHIFT) |
- MMU_MACHPHYS_UPDATE,
- count));
-*/
- if (add_mmu_update(xc_handle, mmu,
- (cpage_array[count] << PAGE_SHIFT) |
- MMU_MACHPHYS_UPDATE, count))
- goto error_out;
- //DPRINTF(("Do the next one\n"));
- }
-/*
- */
-
- //dumpit(pm_handle, 3, 4, page_array);
- /* put the virt_startinfo_addr at KZERO */
- /* just hard-code for now */
- *virt_startinfo_addr = 0x80000000;
-
- DPRINTF(("virt_startinfo_addr = 0x%lx\n", *virt_startinfo_addr));
- start_info = xc_map_foreign_range(xc_handle, dom,
- PAGE_SIZE, PROT_READ | PROT_WRITE,
- cpage_array[0]);
- DPRINTF(("startinfo = 0x%p\n", start_info));
- DPRINTF(("shared_info_frame is %lx\n", shared_info_frame));
- memset(start_info, 0, sizeof (*start_info));
- start_info->pt_base = 0x80000000 | cpu0pdb << PAGE_SHIFT;
- start_info->mfn_list = 0x80000000 | (page_array_page) << PAGE_SHIFT;
- DPRINTF(("mfn_list 0x%lx\n", start_info->mfn_list));
- start_info->mod_start = 0;
- start_info->mod_len = 0;
- start_info->nr_pages = tot_pte_pages * 1024;
- start_info->nr_pt_frames = tot_pte_pages + 1;
- start_info->shared_info = shared_info_frame;
- start_info->flags = 0;
- DPRINTF((" control event channel is %d\n", control_evtchn));
- start_info->domain_controller_evtchn = control_evtchn;
- strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
- start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
- munmap(start_info, PAGE_SIZE);
-
- DPRINTF(("done setting up start_info\n"));
- DPRINTF(("shared_info_frame = 0x%lx\n", shared_info_frame));
- /* shared_info page starts its life empty. */
-
- shared_info = xc_map_foreign_range(xc_handle, dom,
- PAGE_SIZE, PROT_READ | PROT_WRITE,
- shared_info_frame);
- memset(shared_info, 0, PAGE_SIZE);
- /* Mask all upcalls... */
- DPRINTF(("mask all upcalls\n"));
- for (i = 0; i < MAX_VIRT_CPUS; i++)
- shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- munmap(shared_info, PAGE_SIZE);
-
- /* Send the page update requests down to the hypervisor. */
- DPRINTF(("send page update reqs down.\n"));
- if (finish_mmu_updates(xc_handle, mmu))
- goto error_out;
-
- //DPRINTF (("call dumpit.\n"));
- //dumpit(pm_handle, 0x100000>>12, tot_pages, page_array) ;
- //dumpit (pm_handle, 2, 0x100, page_array);
- free(mmu);
-
- /* we don't bother freeing anything at this point --
- * we're exiting and it is pointless
- */
- return 0;
-
- error_out:
- /* oh well we still free some things -- I oughtta nuke this */
- if (mmu != NULL)
- free(mmu);
- ;
- return -1;
-}
-
-int
-xc_plan9_build(int xc_handle,
- u32 domid,
- const char *image_name,
- const char *cmdline,
- unsigned int control_evtchn, unsigned long flags)
-{
- dom0_op_t launch_op, op;
- unsigned long load_addr = 0;
- long tot_pages;
- int kernel_fd = -1;
- gzFile kernel_gfd = NULL;
- int rc, i;
- vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
- unsigned long virt_startinfo_addr;
-
- if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) {
- PERROR("Could not find total pages for domain");
- return 1;
- }
- DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
-
- kernel_fd = open(image_name, O_RDONLY);
- if (kernel_fd < 0) {
- PERROR("Could not open kernel image");
- return 1;
- }
-
- if ((kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL) {
- PERROR("Could not allocate decompression state for state file");
- close(kernel_fd);
- return 1;
- }
-
- DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
- if (mlock(&st_ctxt, sizeof (st_ctxt))) {
- PERROR("xc_plan9_build: ctxt mlock failed");
- return 1;
- }
-
- op.cmd = DOM0_GETDOMAININFO;
- op.u.getdomaininfo.domain = (domid_t) domid;
- if ((do_dom0_op(xc_handle, &op) < 0) ||
- ((u32) op.u.getdomaininfo.domain != domid)) {
- PERROR("Could not get info on domain");
- goto error_out;
- }
- DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
-
- if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) )
- {
- PERROR("Could not get vcpu context");
- goto error_out;
- }
-
- if (!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED)
- || (ctxt->pt_base != 0)) {
- ERROR("Domain is already constructed");
- goto error_out;
- }
-
- DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
- if (setup_guest(xc_handle, domid, kernel_gfd, tot_pages,
- &virt_startinfo_addr,
- &load_addr, &st_ctxt, cmdline,
- op.u.getdomaininfo.shared_info_frame,
- control_evtchn, flags) < 0) {
- ERROR("Error constructing guest OS");
- goto error_out;
- }
-
- /* leave the leak in here for now
- if ( kernel_fd >= 0 )
- close(kernel_fd);
- if( kernel_gfd )
- gzclose(kernel_gfd);
- */
- ctxt->flags = 0;
-
- /*
- * Initial register values:
- * DS,ES,FS,GS = FLAT_KERNEL_DS
- * CS:EIP = FLAT_KERNEL_CS:start_pc
- * SS:ESP = FLAT_KERNEL_DS:start_stack
- * ESI = start_info
- * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
- * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
- */
- ctxt->user_regs.ds = FLAT_KERNEL_DS;
- ctxt->user_regs.es = FLAT_KERNEL_DS;
- ctxt->user_regs.fs = FLAT_KERNEL_DS;
- ctxt->user_regs.gs = FLAT_KERNEL_DS;
- ctxt->user_regs.ss = FLAT_KERNEL_DS;
- ctxt->user_regs.cs = FLAT_KERNEL_CS;
- ctxt->user_regs.eip = load_addr;
- ctxt->user_regs.eip = 0x80100020;
- /* put stack at top of second page */
- ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
-
- /* why is this set? */
- ctxt->user_regs.esi = ctxt->user_regs.esp;
- ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
-
- /* FPU is set up to default initial state. */
- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
- /* Virtual IDT is empty at start-of-day. */
- for (i = 0; i < 256; i++) {
- ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
-
- /* No LDT. */
- ctxt->ldt_ents = 0;
-
- /* Use the default Xen-provided GDT. */
- ctxt->gdt_ents = 0;
-
- /* Ring 1 stack is the initial stack. */
- /* put stack at top of second page */
- ctxt->kernel_ss = FLAT_KERNEL_DS;
- ctxt->kernel_sp = ctxt->user_regs.esp;
-
- /* No debugging. */
- memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
-
- /* No callback handlers. */
-#if defined(__i386__)
- ctxt->event_callback_cs = FLAT_KERNEL_CS;
- ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
- ctxt->failsafe_callback_eip = 0;
-#elif defined(__x86_64__)
- ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_eip = 0;
- ctxt->syscall_callback_eip = 0;
-#endif
-
- memset(&launch_op, 0, sizeof (launch_op));
-
- launch_op.u.setdomaininfo.domain = (domid_t) domid;
- launch_op.u.setdomaininfo.vcpu = 0;
- // launch_op.u.setdomaininfo.num_vifs = 1;
- launch_op.u.setdomaininfo.ctxt = ctxt;
- launch_op.cmd = DOM0_SETDOMAININFO;
- rc = do_dom0_op(xc_handle, &launch_op);
-
- fprintf(stderr, "RC is %d\n", rc);
- return rc;
-
- error_out:
- if (kernel_fd >= 0)
- close(kernel_fd);
- if (kernel_gfd)
- gzclose(kernel_gfd);
-
- return -1;
-}
-
-/*
- * Plan 9 memory layout (initial)
- * ----------------
- * | info from xen| @0
- * ---------------|<--- boot args (start at 0x1200 + 64)
- * | stack |
- * ----------------<--- page 2
- * | empty |
- * ---------------<---- page 5 MACHADDR (always points to machp[cpuno]
- * | aliased |
- * ---------------<----- page 6 CPU0MACH
- * | CPU0MACH |
- * ----------------
- * | empty |
- * ---------------- *virt_load_addr = ehdr.e_entry (0x80100000)
- * | kernel |
- * | |
- * ---------------- <----- page aligned boundary.
- * | data |
- * | |
- * ----------------
- * | bss |
- * ----------------<--- end of kernel (page aligned)
- * | PMD cpu0pdb |
- * ----------------<--- page +1
- * | PTE cpu0pte |
- * ----------------<--- page (tot_pte_pages)/1024
- * | page_array |
- * ---------------- <--- page (tot_pte_pages)/1024
- * | empty to TOM |
- * ----------------
- */
-
-static int
-loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom,
- unsigned long *page_array,
- unsigned long tot_pages, unsigned long *virt_load_addr,
- unsigned long *ksize, unsigned long *symtab_addr,
- unsigned long *symtab_len,
- unsigned long *first_data_page, unsigned long *pdb_page,
- const char *cmdline)
-{
- unsigned long datapage;
- Exec ehdr;
-
- char *p;
- unsigned long maxva;
- int curpos, ret;
- PAGE *image = 0;
- unsigned long image_tot_pages = 0;
- unsigned long textround;
- static PAGE args;
-
- ret = -1;
-
- p = NULL;
- maxva = 0;
-
- if (gzread(kernel_gfd, &ehdr, sizeof (Exec)) != sizeof (Exec)) {
- PERROR("Error reading kernel image P9 header.");
- goto out;
- }
-
- plan9header(&ehdr);
- curpos = sizeof (Exec);
-
- if (ehdr.magic != I_MAGIC) {
- PERROR("Image does not have an P9 header.");
- goto out;
- }
-
- textround = ((ehdr.text + 0x20 + 4095) >> 12) << 12;
- *first_data_page = 0x100000 + textround;
- DPRINTF(("ehrd.text is 0x%lx, textround is 0x%lx\n",
- ehdr.text, textround));
-
- image_tot_pages =
- (textround + ehdr.data + ehdr.bss + PAGE_SIZE - 1) >> PAGE_SHIFT;
- DPRINTF(("tot pages is %ld\n", image_tot_pages));
-
- *virt_load_addr = 0x80100000;
-
- if ((*virt_load_addr & (PAGE_SIZE - 1)) != 0) {
- ERROR("We can only deal with page-aligned load addresses");
- goto out;
- }
-
- if ((*virt_load_addr + (image_tot_pages << PAGE_SHIFT)) >
- HYPERVISOR_VIRT_START) {
- ERROR("Cannot map all domain memory without hitting Xen space");
- goto out;
- }
-
- /* just malloc an image that is image_tot_pages in size. Then read in
- * the image -- text, data, -- to page-rounded alignments.
- * then copy into xen .
- * this gets BSS zeroed for free
- */
- DPRINTF(("Allocate %ld bytes\n", image_tot_pages * sizeof (*image)));
- image = calloc(image_tot_pages, sizeof (*image));
- if (!image)
- return blah("alloc data");
- /* text starts at 0x20, after the header, just like Unix long ago */
- if (gzread(kernel_gfd, &image[0].data[sizeof (Exec)], ehdr.text) <
- ehdr.text)
- return blah("read text");
- DPRINTF(("READ TEXT %ld bytes\n", ehdr.text));
- datapage = ((ehdr.text + sizeof (Exec)) / PAGE_SIZE) + 1;
- if (gzread(kernel_gfd, image[datapage].data, ehdr.data) < ehdr.data)
- return blah("read data");
- DPRINTF(("READ DATA %ld bytes\n", ehdr.data));
-
- /* nice contig stuff */
- /* oops need to start at 0x100000 */
-
- ret = memcpy_toguest(xc_handle, dom,
- image, image_tot_pages * 4096, page_array, 0x100);
- DPRINTF(("done copying kernel to guest memory\n"));
-
- /* now do the bootargs */
- /* in plan 9, the x=y bootargs start at 0x1200 + 64 in real memory */
- /* we'll copy to page 1, so we offset into the page struct at
- * 0x200 + 64
- */
- memset(&args, 0, sizeof(args));
- memcpy(&args.data[0x200 + 64], cmdline, strlen(cmdline));
- printf("Copied :%s: to page for args\n", cmdline);
- ret = memcpy_toguest(xc_handle, dom, &args, sizeof(args), page_array,1);
- //dumpit(xc_handle, dom, 0 /*0x100000>>12*/, 4, page_array) ;
- out:
- if (image)
- free(image);
- *pdb_page = image_tot_pages + (0x100000 >> PAGE_SHIFT);
- return ret;
-}
diff --git a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c
index a8b39a648d..33312434cf 100644
--- a/tools/libxc/xc_ptrace.c
+++ b/tools/libxc/xc_ptrace.c
@@ -75,7 +75,7 @@ struct gdb_regs {
int retval = xc_domain_get_vcpu_context(xc_handle, domid, cpu, &ctxt[cpu]); \
if (retval) \
goto error_out; \
- cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \
+ cr3[cpu] = ctxt[cpu].ctrlreg[3]; /* physical address */ \
regs_valid[cpu] = 1; \
} \
@@ -136,7 +136,7 @@ static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
static inline int paging_enabled(vcpu_guest_context_t *v)
{
- unsigned long cr0 = v->cr0;
+ unsigned long cr0 = v->ctrlreg[0];
return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
}
diff --git a/tools/libxc/xc_ptrace_core.c b/tools/libxc/xc_ptrace_core.c
index ec7a6980c3..71d2b3595a 100644
--- a/tools/libxc/xc_ptrace_core.c
+++ b/tools/libxc/xc_ptrace_core.c
@@ -193,7 +193,7 @@ xc_waitdomain_core(int domfd, int *status, int options)
return -1;
for (i = 0; i < nr_vcpus; i++) {
- cr3[i] = ctxt[i].pt_base;
+ cr3[i] = ctxt[i].ctrlreg[3];
}
if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
printf("Could not allocate p2m_array\n");
diff --git a/tools/libxc/xc_vmx_build.c b/tools/libxc/xc_vmx_build.c
index c30fd4c49b..4d34d0ed8d 100644
--- a/tools/libxc/xc_vmx_build.c
+++ b/tools/libxc/xc_vmx_build.c
@@ -271,7 +271,7 @@ static int setup_guest(int xc_handle,
/* First allocate page for page dir. */
ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
- ctxt->pt_base = l2tab;
+ ctxt->ctrlreg[3] = l2tab;
/* Initialise the page tables. */
if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
@@ -549,7 +549,7 @@ int xc_vmx_build(int xc_handle,
}
if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
- (ctxt->pt_base != 0) )
+ (ctxt->ctrlreg[3] != 0) )
{
ERROR("Domain is already constructed");
goto error_out;
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index fdefebd4b6..7ca358d69b 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -378,12 +378,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
void arch_getdomaininfo_ctxt(
struct vcpu *v, struct vcpu_guest_context *c)
-{
-#ifdef __i386__ /* Remove when x86_64 VMX is implemented */
-#ifdef CONFIG_VMX
+{
extern void save_vmx_cpu_user_regs(struct cpu_user_regs *);
-#endif
-#endif
memcpy(c, &v->arch.guest_context, sizeof(*c));
@@ -391,27 +387,22 @@ void arch_getdomaininfo_ctxt(
BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
c->user_regs.eflags |= v->arch.iopl << 12;
-#ifdef __i386__
-#ifdef CONFIG_VMX
- if ( VMX_DOMAIN(v) ) {
+ if ( VMX_DOMAIN(v) )
+ {
save_vmx_cpu_user_regs(&c->user_regs);
- __vmread(CR0_READ_SHADOW, &c->cr0);
- __vmread(CR4_READ_SHADOW, &c->cr4);
+ __vmread(CR0_READ_SHADOW, &c->ctrlreg[0]);
+ __vmread(CR4_READ_SHADOW, &c->ctrlreg[4]);
}
-#endif
-#endif
c->flags = 0;
if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
c->flags |= VGCF_I387_VALID;
if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
-#ifdef CONFIG_VMX
if (VMX_DOMAIN(v))
c->flags |= VGCF_VMX_GUEST;
-#endif
- c->pt_base = pagetable_get_paddr(v->arch.guest_table);
+ c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table);
c->vm_assist = v->domain->vm_assist;
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 169138667d..8dc812aa84 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -8,7 +8,7 @@
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
+ * Gareth Hughes <gareth@valinux.com>, May 2000
*/
#include <xen/config.h>
@@ -115,7 +115,7 @@ static inline void kb_wait(void)
void machine_restart(char * __unused)
{
int i;
-
+
if ( opt_noreboot )
{
printk("Reboot disabled on cmdline: require manual reset\n");
@@ -432,7 +432,7 @@ int arch_set_info_guest(
if ( v->vcpu_id == 0 )
d->vm_assist = c->vm_assist;
- phys_basetab = c->pt_base;
+ phys_basetab = c->ctrlreg[3];
v->arch.guest_table = mk_pagetable(phys_basetab);
if ( shadow_mode_refcounts(d) )
@@ -453,24 +453,15 @@ int arch_set_info_guest(
return rc;
}
-#ifdef CONFIG_VMX
if ( c->flags & VGCF_VMX_GUEST )
{
- int error;
-
- // VMX uses the initially provided page tables as the P2M map.
- //
- // XXX: This creates a security issue -- Xen can't necessarily
- // trust the VMX domain builder. Xen should validate this
- // page table, and/or build the table itself, or ???
- //
+ /* VMX uses the initially provided page tables as the P2M map. */
if ( !pagetable_get_paddr(d->arch.phys_table) )
d->arch.phys_table = v->arch.guest_table;
- if ( (error = vmx_final_setup_guest(v, c)) )
- return error;
+ if ( (rc = vmx_final_setup_guest(v, c)) != 0 )
+ return rc;
}
-#endif
update_pagetables(v);
@@ -704,7 +695,7 @@ static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
#endif
#define loaddebug(_v,_reg) \
- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
+ __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
static void __context_switch(void)
{
@@ -982,6 +973,7 @@ static void relinquish_memory(struct domain *d, struct list_head *list)
void domain_relinquish_resources(struct domain *d)
{
struct vcpu *v;
+ unsigned long pfn;
BUG_ON(!cpus_empty(d->cpumask));
@@ -995,22 +987,20 @@ void domain_relinquish_resources(struct domain *d)
/* Drop the in-use references to page-table bases. */
for_each_vcpu ( d, v )
{
- if ( pagetable_get_paddr(v->arch.guest_table) != 0 )
+ if ( (pfn = pagetable_get_pfn(v->arch.guest_table)) != 0 )
{
- if ( shadow_mode_refcounts(d) )
- put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
- else
- put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
+ if ( !shadow_mode_refcounts(d) )
+ put_page_type(pfn_to_page(pfn));
+ put_page(pfn_to_page(pfn));
v->arch.guest_table = mk_pagetable(0);
}
- if ( pagetable_get_paddr(v->arch.guest_table_user) != 0 )
+ if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 )
{
- if ( shadow_mode_refcounts(d) )
- put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
- else
- put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
+ if ( !shadow_mode_refcounts(d) )
+ put_page_type(pfn_to_page(pfn));
+ put_page(pfn_to_page(pfn));
v->arch.guest_table_user = mk_pagetable(0);
}
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index eb5ff9479b..9740be454d 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -29,7 +29,7 @@ void save_init_fpu(struct vcpu *tsk)
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(_VCPUF_guest_stts, &tsk->vcpu_flags) )
+ if ( VMX_DOMAIN(tsk) || (tsk->arch.guest_context.ctrlreg[0] & X86_CR0_TS) )
clts();
if ( cpu_has_fxsr )
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 2986d9c2b5..8f8e3eb699 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -348,7 +348,7 @@ void propagate_page_fault(unsigned long addr, u16 error_code)
if ( TI_GET_IF(ti) )
tb->flags |= TBF_INTERRUPT;
- v->arch.guest_cr2 = addr;
+ v->arch.guest_context.ctrlreg[2] = addr;
}
static int handle_perdomain_mapping_fault(
@@ -478,12 +478,12 @@ long do_fpu_taskswitch(int set)
if ( set )
{
- set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
+ v->arch.guest_context.ctrlreg[0] |= X86_CR0_TS;
stts();
}
else
{
- clear_bit(_VCPUF_guest_stts, &v->vcpu_flags);
+ v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
clts();
}
@@ -789,13 +789,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
switch ( (opcode >> 3) & 7 )
{
case 0: /* Read CR0 */
- *reg =
- (read_cr0() & ~X86_CR0_TS) |
- (test_bit(_VCPUF_guest_stts, &v->vcpu_flags) ? X86_CR0_TS:0);
+ *reg = v->arch.guest_context.ctrlreg[0];
break;
case 2: /* Read CR2 */
- *reg = v->arch.guest_cr2;
+ *reg = v->arch.guest_context.ctrlreg[2];
break;
case 3: /* Read CR3 */
@@ -820,7 +818,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
break;
case 2: /* Write CR2 */
- v->arch.guest_cr2 = *reg;
+ v->arch.guest_context.ctrlreg[2] = *reg;
break;
case 3: /* Write CR3 */
@@ -1033,12 +1031,13 @@ asmlinkage int math_state_restore(struct cpu_user_regs *regs)
setup_fpu(current);
- if ( test_and_clear_bit(_VCPUF_guest_stts, &current->vcpu_flags) )
+ if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS )
{
struct trap_bounce *tb = &current->arch.trap_bounce;
tb->flags = TBF_EXCEPTION;
tb->cs = current->arch.guest_context.trap_ctxt[7].cs;
tb->eip = current->arch.guest_context.trap_ctxt[7].address;
+ current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
}
return EXCRET_fault_fixed;
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index 9b32d1d502..439eb0dd76 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -168,7 +168,6 @@ void vmx_do_launch(struct vcpu *v)
struct cpu_user_regs *regs = guest_cpu_user_regs();
vmx_stts();
- set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
cpu = smp_processor_id();
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 397b65d031..864fbfcbdb 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -112,9 +112,6 @@ struct arch_vcpu
unsigned long monitor_shadow_ref;
- /* Virtual CR2 value. Can be read/written by guest. */
- unsigned long guest_cr2;
-
/* Current LDT details. */
unsigned long shadow_ldt_mapcnt;
} __cacheline_aligned;
diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h
index 44bc8dd46f..eab800a7a5 100644
--- a/xen/include/public/arch-x86_32.h
+++ b/xen/include/public/arch-x86_32.h
@@ -136,9 +136,7 @@ typedef struct vcpu_guest_context {
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
- unsigned long pt_base; /* CR3 (pagetable base) */
- unsigned long cr0; /* CR0 */
- unsigned long cr4; /* CR4 */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
unsigned long event_callback_cs; /* CS:EIP of event callback */
unsigned long event_callback_eip;
diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h
index f6f73d2f75..257e70319c 100644
--- a/xen/include/public/arch-x86_64.h
+++ b/xen/include/public/arch-x86_64.h
@@ -186,9 +186,7 @@ typedef struct vcpu_guest_context {
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
- unsigned long pt_base; /* CR3 (pagetable base) */
- unsigned long cr0; /* CR0 */
- unsigned long cr4; /* CR4 */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
unsigned long event_callback_eip;
unsigned long failsafe_callback_eip;
diff --git a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h
index 3ff82b43ac..e03f49dc7d 100644
--- a/xen/include/public/dom0_ops.h
+++ b/xen/include/public/dom0_ops.h
@@ -19,7 +19,7 @@
* This makes sure that old versions of dom0 tools will stop working in a
* well-defined way (rather than crashing the machine, for instance).
*/
-#define DOM0_INTERFACE_VERSION 0xAAAA1006
+#define DOM0_INTERFACE_VERSION 0xAAAA1007
/************************************************************************/
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ca21c5eee8..399210084c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -327,9 +327,6 @@ extern struct domain *domain_list;
/* Has the FPU been used since it was last saved? */
#define _VCPUF_fpu_dirtied 1
#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
- /* Has the guest OS requested 'stts'? */
-#define _VCPUF_guest_stts 2
-#define VCPUF_guest_stts (1UL<<_VCPUF_guest_stts)
/* Domain is blocked waiting for an event. */
#define _VCPUF_blocked 3
#define VCPUF_blocked (1UL<<_VCPUF_blocked)