aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/domain.h
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2011-04-05 13:01:25 +0100
committerJan Beulich <jbeulich@novell.com>2011-04-05 13:01:25 +0100
commit9a70856bb28bb8c9b1d37fb8a005447ac77b0619 (patch)
treee03eabf8a03ef712e5b93a91d4b5e13923b0c4a4 /xen/include/asm-x86/domain.h
parent4551775df58d42e2dcfd2a8ac4bcc713709e8b81 (diff)
downloadxen-9a70856bb28bb8c9b1d37fb8a005447ac77b0619.tar.gz
xen-9a70856bb28bb8c9b1d37fb8a005447ac77b0619.tar.bz2
xen-9a70856bb28bb8c9b1d37fb8a005447ac77b0619.zip
x86: split struct vcpu
This is accomplished by splitting the guest_context member, which by itself is larger than a page on x86-64. Quite a number of fields of this structure is completely meaningless for HVM guests, and thus a new struct pv_vcpu gets introduced, which is being overlaid with struct hvm_vcpu in struct arch_vcpu. The one member that is mostly responsible for the large size is trap_ctxt, which now gets allocated separately (unless fitting on the same page as struct arch_vcpu, as is currently the case for x86-32), and only for non-hvm, non-idle domains. This change pointed out a latent problem in arch_set_info_guest(), which is permitted to be called on already initialized vCPU-s, but so far copied the new state into struct arch_vcpu without (in this case) actually going through all the necessary accounting/validation steps. The logic gets changed so that the pieces that bypass accounting will at least be verified to be no different from the currently active bits, and the whole change will fail in case they are. The logic does *not* get adjusted here to do full error recovery, that is, partially modified state continues to not get unrolled in case of failure. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/include/asm-x86/domain.h')
-rw-r--r--xen/include/asm-x86/domain.h56
1 files changed, 50 insertions, 6 deletions
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 8056559308..b4fea4953e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -352,11 +352,52 @@ struct pae_l3_cache { };
#define pae_l3_cache_init(c) ((void)0)
#endif
+struct pv_vcpu
+{
+ struct trap_info *trap_ctxt;
+
+ unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE];
+ unsigned long ldt_base;
+ unsigned int gdt_ents, ldt_ents;
+
+ unsigned long kernel_ss, kernel_sp;
+ unsigned long ctrlreg[8];
+
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_eip;
+ union {
+#ifdef CONFIG_X86_64
+ unsigned long syscall_callback_eip;
+#endif
+ struct {
+ unsigned int event_callback_cs;
+ unsigned int failsafe_callback_cs;
+ };
+ };
+
+ unsigned long vm_assist;
+
+#ifdef CONFIG_X86_64
+ /* Segment base addresses. */
+ unsigned long fs_base;
+ unsigned long gs_base_kernel;
+ unsigned long gs_base_user;
+#endif
+};
+
struct arch_vcpu
{
- /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
- struct vcpu_guest_context guest_context
- __attribute__((__aligned__(16)));
+ /*
+ * guest context (mirroring struct vcpu_guest_context) common
+ * between pv and hvm guests
+ */
+
+ void *fpu_ctxt;
+ unsigned long vgc_flags;
+ struct cpu_user_regs user_regs;
+ unsigned long debugreg[8];
+
+ /* other state */
struct pae_l3_cache pae_l3_cache;
@@ -389,7 +430,10 @@ struct arch_vcpu
#endif
/* Virtual Machine Extensions */
- struct hvm_vcpu hvm_vcpu;
+ union {
+ struct pv_vcpu pv_vcpu;
+ struct hvm_vcpu hvm_vcpu;
+ };
/*
* Every domain has a L1 pagetable of its own. Per-domain mappings
@@ -413,7 +457,7 @@ struct arch_vcpu
* dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
* #NM handler, we XRSTOR the states we XSAVE-ed;
*/
- void *xsave_area;
+ struct xsave_struct *xsave_area;
uint64_t xcr0;
/* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
* itself, as we can never know whether guest OS depends on content
@@ -461,7 +505,7 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4);
/* Convert between guest-visible and real CR4 values. */
#define pv_guest_cr4_to_real_cr4(v) \
- (((v)->arch.guest_context.ctrlreg[4] \
+ (((v)->arch.pv_vcpu.ctrlreg[4] \
| (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)) \
| ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \
| ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0)) \