aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoremellor@leeni.uk.xensource.com <emellor@leeni.uk.xensource.com>2006-03-30 17:26:19 +0100
committeremellor@leeni.uk.xensource.com <emellor@leeni.uk.xensource.com>2006-03-30 17:26:19 +0100
commita638cd3e8916998d1506b3bfc875520dcafa4552 (patch)
tree752a12ebdfedcf08971b67f800dec4ebdeac1e96
parent78b637ac771ca9518659dce465f95592c178eaec (diff)
parent20603fcf9a4c03a729762a76dec07a9741b8cb8e (diff)
downloadxen-a638cd3e8916998d1506b3bfc875520dcafa4552.tar.gz
xen-a638cd3e8916998d1506b3bfc875520dcafa4552.tar.bz2
xen-a638cd3e8916998d1506b3bfc875520dcafa4552.zip
Merged.
-rw-r--r--linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c2
-rw-r--r--tools/examples/block2
-rw-r--r--tools/libxc/xc_ptrace.c221
-rw-r--r--tools/libxc/xc_ptrace.h6
-rw-r--r--xen/Rules.mk5
-rw-r--r--xen/arch/ia64/Makefile2
-rw-r--r--xen/arch/x86/Makefile2
-rw-r--r--xen/arch/x86/domain.c2
-rw-r--r--xen/arch/x86/hvm/svm/svm.c7
-rw-r--r--xen/arch/x86/hvm/svm/vmcb.c17
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c26
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c56
-rw-r--r--xen/include/asm-x86/config.h4
-rw-r--r--xen/include/asm-x86/hvm/hvm.h8
-rw-r--r--xen/include/asm-x86/hvm/svm/svm.h1
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h2
16 files changed, 190 insertions, 173 deletions
diff --git a/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c b/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c
index 39d9ed110a..7e44d81bdc 100644
--- a/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c
@@ -5,7 +5,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
{
diff --git a/tools/examples/block b/tools/examples/block
index 5a43560eff..dae32dc234 100644
--- a/tools/examples/block
+++ b/tools/examples/block
@@ -72,7 +72,7 @@ check_sharing()
then
toskip="^$"
else
- toskip="^[^ ]* [^ ]* [^ ]* ro "
+ toskip="^[^ ]* [^ ]* [^ ]* ro[, ]"
fi
for file in $(cat /proc/mounts | grep -v "$toskip" | cut -f 1 -d ' ')
diff --git a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c
index 715cb73b39..f83005b45c 100644
--- a/tools/libxc/xc_ptrace.c
+++ b/tools/libxc/xc_ptrace.c
@@ -153,6 +153,79 @@ online_vcpus_changed(cpumap_t cpumap)
}
/* --------------------- */
+/* XXX application state */
+static long nr_pages = 0;
+static unsigned long *page_array = NULL;
+
+static void *
+map_domain_va_32(
+ int xc_handle,
+ int cpu,
+ void *guest_va,
+ int perm)
+{
+ unsigned long pde, page;
+ unsigned long va = (unsigned long)guest_va;
+
+ static unsigned long cr3_phys[MAX_VIRT_CPUS];
+ static uint32_t *cr3_virt[MAX_VIRT_CPUS];
+ static unsigned long pde_phys[MAX_VIRT_CPUS];
+ static uint32_t *pde_virt[MAX_VIRT_CPUS];
+ static unsigned long page_phys[MAX_VIRT_CPUS];
+ static uint32_t *page_virt[MAX_VIRT_CPUS];
+ static int prev_perm[MAX_VIRT_CPUS];
+
+ if (ctxt[cpu].ctrlreg[3] == 0)
+ return NULL;
+ if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
+ {
+ cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
+ if ( cr3_virt[cpu] )
+ munmap(cr3_virt[cpu], PAGE_SIZE);
+ cr3_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ cr3_phys[cpu] >> PAGE_SHIFT);
+ if ( cr3_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
+ return NULL;
+ if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
+ pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
+ if ( pde != pde_phys[cpu] )
+ {
+ pde_phys[cpu] = pde;
+ if ( pde_virt[cpu] )
+ munmap(pde_virt[cpu], PAGE_SIZE);
+ pde_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, PROT_READ,
+ pde_phys[cpu] >> PAGE_SHIFT);
+ if ( pde_virt[cpu] == NULL )
+ return NULL;
+ }
+ if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
+ return NULL;
+ if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+ page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
+ if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
+ {
+ page_phys[cpu] = page;
+ if ( page_virt[cpu] )
+ munmap(page_virt[cpu], PAGE_SIZE);
+ page_virt[cpu] = xc_map_foreign_range(
+ xc_handle, current_domid, PAGE_SIZE, perm,
+ page_phys[cpu] >> PAGE_SHIFT);
+ if ( page_virt[cpu] == NULL )
+ {
+ page_phys[cpu] = 0;
+ return NULL;
+ }
+ prev_perm[cpu] = perm;
+ }
+
+ return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
+}
+
static void *
map_domain_va_pae(
@@ -165,28 +238,31 @@ map_domain_va_pae(
uint64_t *l3, *l2, *l1;
static void *v;
- if (fetch_regs(xc_handle, cpu, NULL))
- return NULL;
-
l3 = xc_map_foreign_range(
xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
if ( l3 == NULL )
return NULL;
l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
+ l2p = page_array[l2p];
l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
+ munmap(l3, PAGE_SIZE);
if ( l2 == NULL )
return NULL;
l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
+ l1p = page_array[l1p];
l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
+ munmap(l2, PAGE_SIZE);
if ( l1 == NULL )
return NULL;
p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
+ p = page_array[p];
if ( v != NULL )
munmap(v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+ munmap(l1, PAGE_SIZE);
if ( v == NULL )
return NULL;
@@ -195,46 +271,58 @@ map_domain_va_pae(
#ifdef __x86_64__
static void *
-map_domain_va(
+map_domain_va_64(
int xc_handle,
int cpu,
void *guest_va,
int perm)
{
- unsigned long l3p, l2p, l1p, p, va = (unsigned long)guest_va;
+ unsigned long l3p, l2p, l1p, l1e, p, va = (unsigned long)guest_va;
uint64_t *l4, *l3, *l2, *l1;
static void *v;
if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
- return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
-
- if (fetch_regs(xc_handle, cpu, NULL))
- return NULL;
+ return map_domain_va_32(xc_handle, cpu, guest_va, perm);
- l4 = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
+ l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
if ( l4 == NULL )
return NULL;
l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT;
+ l3p = page_array[l3p];
l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p);
+ munmap(l4, PAGE_SIZE);
if ( l3 == NULL )
return NULL;
l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT;
+ l2p = page_array[l2p];
l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
+ munmap(l3, PAGE_SIZE);
if ( l2 == NULL )
return NULL;
- l1p = l2[l2_table_offset(va)] >> PAGE_SHIFT;
- l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
- if ( l1 == NULL )
- return NULL;
+ l1 = NULL;
+ l1e = l2[l2_table_offset(va)];
+ l1p = l1e >> PAGE_SHIFT;
+ if (l1e & 0x80) { /* 2M pages */
+ p = (l1p + l1_table_offset(va));
+ } else { /* 4K pages */
+ l1p = page_array[l1p];
+ l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
+ munmap(l2, PAGE_SIZE);
+ if ( l1 == NULL )
+ return NULL;
- p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
+ p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
+ }
+ p = page_array[p];
if ( v != NULL )
munmap(v, PAGE_SIZE);
v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
+ if (l1)
+ munmap(l1, PAGE_SIZE);
if ( v == NULL )
return NULL;
@@ -242,11 +330,6 @@ map_domain_va(
}
#endif
-#ifdef __i386__
-/* XXX application state */
-static long nr_pages = 0;
-static unsigned long *page_array = NULL;
-
static void *
map_domain_va(
int xc_handle,
@@ -254,20 +337,9 @@ map_domain_va(
void *guest_va,
int perm)
{
-
- unsigned long pde, page;
- unsigned long va = (unsigned long)guest_va;
+ unsigned long va = (unsigned long) guest_va;
long npgs = xc_get_tot_pages(xc_handle, current_domid);
-
-
- static uint32_t cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
- static int prev_perm[MAX_VIRT_CPUS];
- static enum { MODE_UNKNOWN, MODE_32, MODE_PAE, MODE_64 } mode;
+ static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
if ( mode == MODE_UNKNOWN )
{
@@ -281,9 +353,6 @@ map_domain_va(
mode = MODE_32;
}
- if ( mode == MODE_PAE )
- return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
-
if ( nr_pages != npgs )
{
if ( nr_pages > 0 )
@@ -305,60 +374,32 @@ map_domain_va(
if (fetch_regs(xc_handle, cpu, NULL))
return NULL;
- if (paging_enabled(&ctxt[cpu])) {
- if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
- {
- cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
- if ( cr3_virt[cpu] )
- munmap(cr3_virt[cpu], PAGE_SIZE);
- cr3_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- cr3_phys[cpu] >> PAGE_SHIFT);
- if ( cr3_virt[cpu] == NULL )
- return NULL;
- }
- if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
- return NULL;
- if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
- pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
- if ( pde != pde_phys[cpu] )
- {
- pde_phys[cpu] = pde;
- if ( pde_virt[cpu] )
- munmap(pde_virt[cpu], PAGE_SIZE);
- pde_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, PROT_READ,
- pde_phys[cpu] >> PAGE_SHIFT);
- if ( pde_virt[cpu] == NULL )
- return NULL;
- }
- if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
- return NULL;
- } else {
- page = va;
- }
- if (ctxt[cpu].flags & VGCF_HVM_GUEST)
- page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
- if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
- {
- page_phys[cpu] = page;
- if ( page_virt[cpu] )
- munmap(page_virt[cpu], PAGE_SIZE);
- page_virt[cpu] = xc_map_foreign_range(
- xc_handle, current_domid, PAGE_SIZE, perm,
- page_phys[cpu] >> PAGE_SHIFT);
- if ( page_virt[cpu] == NULL )
- {
- page_phys[cpu] = 0;
- return NULL;
- }
- prev_perm[cpu] = perm;
- }
+ if (!paging_enabled(&ctxt[cpu])) {
+ static void * v;
+ unsigned long page;
- return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
-}
+ if ( v != NULL )
+ munmap(v, PAGE_SIZE);
+
+ page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
+ v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
+ perm, page >> PAGE_SHIFT);
+
+ if ( v == NULL )
+ return NULL;
+
+ return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
+ }
+#ifdef __x86_64__
+ if ( mode == MODE_64 )
+ return map_domain_va_64(xc_handle, cpu, guest_va, perm);
#endif
+ if ( mode == MODE_PAE )
+ return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
+ /* else ( mode == MODE_32 ) */
+ return map_domain_va_32(xc_handle, cpu, guest_va, perm);
+}
static int
__xc_waitdomain(
@@ -470,7 +511,7 @@ xc_ptrace(
break;
case PTRACE_SETREGS:
- if (!current_isfile)
+ if (current_isfile)
goto out_unspported; /* XXX not yet supported */
SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
@@ -492,7 +533,7 @@ xc_ptrace(
case PTRACE_CONT:
case PTRACE_DETACH:
- if (!current_isfile)
+ if (current_isfile)
goto out_unspported; /* XXX not yet supported */
if ( request != PTRACE_SINGLESTEP )
{
diff --git a/tools/libxc/xc_ptrace.h b/tools/libxc/xc_ptrace.h
index 2119d9c984..b6d5bea314 100644
--- a/tools/libxc/xc_ptrace.h
+++ b/tools/libxc/xc_ptrace.h
@@ -31,7 +31,7 @@ struct gdb_regs
unsigned long orig_rax;
unsigned long rip;
unsigned long xcs;
- unsigned long eflags;
+ unsigned long rflags;
unsigned long rsp;
unsigned long xss;
unsigned long fs_base;
@@ -61,7 +61,7 @@ struct gdb_regs
pt.rax = xc.rax; \
pt.rip = xc.rip; \
pt.xcs = xc.cs; \
- pt.eflags = xc.eflags; \
+ pt.rflags = xc.rflags; \
pt.rsp = xc.rsp; \
pt.xss = xc.ss; \
pt.xes = xc.es; \
@@ -89,7 +89,7 @@ struct gdb_regs
xc.rax = pt->rax; \
xc.rip = pt->rip; \
xc.cs = pt->xcs; \
- xc.eflags = pt->eflags; \
+ xc.rflags = pt->rflags & 0xffffffff; \
xc.rsp = pt->rsp; \
xc.ss = pt->xss; \
xc.es = pt->xes; \
diff --git a/xen/Rules.mk b/xen/Rules.mk
index f887ea44f5..ade8b87a1e 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -55,11 +55,14 @@ ifneq ($(max_phys_cpus),)
CFLAGS-y += -DMAX_PHYS_CPUS=$(max_phys_cpus)
endif
+AFLAGS-y += -D__ASSEMBLY__
+
ALL_OBJS := $(ALL_OBJS-y)
CFLAGS := $(strip $(CFLAGS) $(CFLAGS-y))
+AFLAGS := $(strip $(AFLAGS) $(AFLAGS-y))
%.o: %.c $(HDRS) Makefile
$(CC) $(CFLAGS) -c $< -o $@
%.o: %.S $(HDRS) Makefile
- $(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
+ $(CC) $(CFLAGS) $(AFLAGS) -c $< -o $@
diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile
index 444f4eb30b..381c6d739b 100644
--- a/xen/arch/ia64/Makefile
+++ b/xen/arch/ia64/Makefile
@@ -76,7 +76,7 @@ $(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
# I'm sure a Makefile wizard would know a better way to do this
xen.lds.s: xen/xen.lds.S
- $(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
+ $(CC) -E $(CPPFLAGS) -P -DXEN $(AFLAGS) \
-o xen.lds.s xen/xen.lds.S
clean:: FORCE
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 3fa20dd7d0..c7a2e6ae1d 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -72,7 +72,7 @@ asm-offsets.s: $(TARGET_SUBARCH)/asm-offsets.c $(HDRS)
$(CC) $(CFLAGS) -S -o $@ $<
xen.lds: $(TARGET_SUBARCH)/xen.lds.S $(HDRS)
- $(CC) $(CFLAGS) -P -E -Ui386 -D__ASSEMBLY__ -o $@ $<
+ $(CC) $(CFLAGS) -P -E -Ui386 $(AFLAGS) -o $@ $<
boot/mkelf32: boot/mkelf32.c
$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index b8e6ce9708..8cb91ead4d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -393,7 +393,7 @@ int arch_set_info_guest(
}
else if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
{
- hvm_modify_guest_state(v);
+ hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
}
if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index d43bfb08d8..77ba3f8089 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -382,11 +382,6 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
return 1;
}
-void svm_modify_guest_state(struct vcpu *v)
-{
- svm_modify_vmcb(v, &v->arch.guest_context.user_regs);
-}
-
int svm_realmode(struct vcpu *v)
{
unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
@@ -449,8 +444,6 @@ int start_svm(void)
hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
- hvm_funcs.modify_guest_state = svm_modify_guest_state;
-
hvm_funcs.realmode = svm_realmode;
hvm_funcs.paging_enabled = svm_paging_enabled;
hvm_funcs.instruction_length = svm_instruction_length;
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 0d101206b6..33c995a6f6 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -161,23 +161,6 @@ static int construct_vmcb_controls(struct arch_svm_struct *arch_svm)
/*
- * modify guest eflags and execption bitmap for gdb
- */
-int svm_modify_vmcb(struct vcpu *v, struct cpu_user_regs *regs)
-{
- int error;
- if ((error = load_vmcb(&v->arch.hvm_svm, v->arch.hvm_svm.host_save_pa)))
- {
- printk("svm_modify_vmcb: load_vmcb failed: VMCB = %lx\n",
- (unsigned long) v->arch.hvm_svm.host_save_pa);
- return -EINVAL;
- }
- svm_load_cpu_user_regs(v,regs);
- return 0;
-}
-
-
-/*
* Initially set the same environement as host.
*/
static int construct_init_vmcb_guest(struct arch_svm_struct *arch_svm,
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a4e09344c4..7a2f50bb27 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -487,32 +487,6 @@ void destroy_vmcs(struct arch_vmx_struct *arch_vmx)
arch_vmx->io_bitmap_b = NULL;
}
-/*
- * modify guest eflags and execption bitmap for gdb
- */
-int modify_vmcs(struct arch_vmx_struct *arch_vmx,
- struct cpu_user_regs *regs)
-{
- int error;
- u64 vmcs_phys_ptr, old, old_phys_ptr;
- vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
-
- old_phys_ptr = virt_to_maddr(&old);
- __vmptrst(old_phys_ptr);
- if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
- printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
- (unsigned long) vmcs_phys_ptr);
- return -EINVAL;
- }
-
-/* XXX VMX change modify_vmcs arg to v */
- hvm_load_cpu_guest_regs(current, regs);
-
- __vmptrld(old_phys_ptr);
-
- return 0;
-}
-
void vm_launch_fail(unsigned long eflags)
{
unsigned long error;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9e146477cc..623fb22922 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -400,7 +400,7 @@ void vmx_migrate_timers(struct vcpu *v)
migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
}
-struct vmx_store_cpu_guest_regs_callback_info {
+struct vmx_cpu_guest_regs_callback_info {
struct vcpu *v;
struct cpu_user_regs *regs;
unsigned long *crs;
@@ -409,12 +409,21 @@ struct vmx_store_cpu_guest_regs_callback_info {
static void vmx_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
+static void vmx_load_cpu_guest_regs(
+ struct vcpu *v, struct cpu_user_regs *regs);
+
static void vmx_store_cpu_guest_regs_callback(void *data)
{
- struct vmx_store_cpu_guest_regs_callback_info *info = data;
+ struct vmx_cpu_guest_regs_callback_info *info = data;
vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
}
+static void vmx_load_cpu_guest_regs_callback(void *data)
+{
+ struct vmx_cpu_guest_regs_callback_info *info = data;
+ vmx_load_cpu_guest_regs(info->v, info->regs);
+}
+
static void vmx_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
@@ -426,7 +435,7 @@ static void vmx_store_cpu_guest_regs(
if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
{
/* Get register details from remote CPU. */
- struct vmx_store_cpu_guest_regs_callback_info info = {
+ struct vmx_cpu_guest_regs_callback_info info = {
.v = v, .regs = regs, .crs = crs };
cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
@@ -479,8 +488,33 @@ static void vmx_store_cpu_guest_regs(
void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
+ if ( v != current )
+ {
+ /* Non-current VCPUs must be paused to set the register snapshot. */
+ ASSERT(atomic_read(&v->pausecnt) != 0);
+
+ if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
+ {
+ struct vmx_cpu_guest_regs_callback_info info = {
+ .v = v, .regs = regs };
+ cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
+ on_selected_cpus(cpumask, vmx_load_cpu_guest_regs_callback,
+ &info, 1, 1);
+ return;
+ }
+
+ /* Register details are on this CPU. Load the correct VMCS. */
+ __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+ }
+
+ ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
+
#if defined (__x86_64__)
__vmwrite(GUEST_SS_SELECTOR, regs->ss);
+ __vmwrite(GUEST_DS_SELECTOR, regs->ds);
+ __vmwrite(GUEST_ES_SELECTOR, regs->es);
+ __vmwrite(GUEST_GS_SELECTOR, regs->gs);
+ __vmwrite(GUEST_FS_SELECTOR, regs->fs);
__vmwrite(GUEST_RSP, regs->rsp);
__vmwrite(GUEST_RFLAGS, regs->rflags);
@@ -493,6 +527,11 @@ void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
__vmwrite(GUEST_RIP, regs->rip);
#elif defined (__i386__)
__vmwrite(GUEST_SS_SELECTOR, regs->ss);
+ __vmwrite(GUEST_DS_SELECTOR, regs->ds);
+ __vmwrite(GUEST_ES_SELECTOR, regs->es);
+ __vmwrite(GUEST_GS_SELECTOR, regs->gs);
+ __vmwrite(GUEST_FS_SELECTOR, regs->fs);
+
__vmwrite(GUEST_RSP, regs->esp);
__vmwrite(GUEST_RFLAGS, regs->eflags);
@@ -503,14 +542,11 @@ void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
__vmwrite(GUEST_CS_SELECTOR, regs->cs);
__vmwrite(GUEST_RIP, regs->eip);
-#else
-#error Unsupported architecture
#endif
-}
-void vmx_modify_guest_state(struct vcpu *v)
-{
- modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs);
+ /* Reload current VCPU's VMCS if it was temporarily unloaded. */
+ if ( (v != current) && hvm_guest(current) )
+ __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
}
int vmx_realmode(struct vcpu *v)
@@ -661,8 +697,6 @@ int start_vmx(void)
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
- hvm_funcs.modify_guest_state = vmx_modify_guest_state;
-
hvm_funcs.realmode = vmx_realmode;
hvm_funcs.paging_enabled = vmx_paging_enabled;
hvm_funcs.instruction_length = vmx_instruction_length;
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 3360a2a5a0..23aabeb2b9 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -212,13 +212,13 @@ extern unsigned long _end; /* standard ELF symbol */
* ------ ------
* I/O remapping area ( 4MB)
* Direct-map (1:1) area [Xen code/data/heap] (12MB)
- * Per-domain mappings (inc. 4MB map_domain_page cache) ( 4MB)
+ * Per-domain mappings (inc. 4MB map_domain_page cache) ( 8MB)
* Shadow linear pagetable ( 4MB) ( 8MB)
* Guest linear pagetable ( 4MB) ( 8MB)
* Machine-to-physical translation table [writable] ( 4MB) (16MB)
* Frame-info table (24MB) (96MB)
* * Start of guest inaccessible area
- * Machine-to-physical translation table [read-only] ( 4MB)
+ * Machine-to-physical translation table [read-only] ( 4MB) (16MB)
* * Start of guest unmodifiable area
*/
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index eff373996c..8e830977cc 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -47,8 +47,6 @@ struct hvm_function_table {
struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
void (*load_cpu_guest_regs)(
struct vcpu *v, struct cpu_user_regs *r);
- void (*modify_guest_state)(struct vcpu *v);
-
/*
* Examine specifics of the guest state:
* 1) determine whether the guest is in real or vm8086 mode,
@@ -105,12 +103,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
hvm_funcs.load_cpu_guest_regs(v, r);
}
-static inline void
-hvm_modify_guest_state(struct vcpu *v)
-{
- hvm_funcs.modify_guest_state(v);
-}
-
static inline int
hvm_realmode(struct vcpu *v)
{
diff --git a/xen/include/asm-x86/hvm/svm/svm.h b/xen/include/asm-x86/hvm/svm/svm.h
index b74ed8c759..30d20238cf 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -39,7 +39,6 @@ extern unsigned int cpu_rev;
extern void svm_stop(void);
extern void svm_save_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
extern void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
-extern int svm_modify_vmcb(struct vcpu *v, struct cpu_user_regs *regs);
extern void svm_vmread(struct vcpu *v, int index, unsigned long *value);
extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
extern void svm_final_setup_guest(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index c77e665463..b2545bbd26 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -89,8 +89,6 @@ struct arch_vmx_struct {
void vmx_do_resume(struct vcpu *);
struct vmcs_struct *alloc_vmcs(void);
-int modify_vmcs(struct arch_vmx_struct *arch_vmx,
- struct cpu_user_regs *regs);
void destroy_vmcs(struct arch_vmx_struct *arch_vmx);
extern void vmx_request_clear_vmcs(struct vcpu *v);