aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/msr.h
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-07-04 08:21:35 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-07-04 08:21:35 +0000
commitd83ba385f0b5412b02177592954eed3f85a256ed (patch)
treedf338e3466e15f47771f846a1c39877678d2e86e /xen/include/asm-x86/msr.h
parentd05c4adbfb1dc84649a6469e887bf489dc90a8c8 (diff)
downloadxen-d83ba385f0b5412b02177592954eed3f85a256ed.tar.gz
xen-d83ba385f0b5412b02177592954eed3f85a256ed.tar.bz2
xen-d83ba385f0b5412b02177592954eed3f85a256ed.zip
To avoid MSR save/restore at every VM exit/entry time, we restore the
x86_64 specific MSRs at domain switch time if modified. In VMX domains, we modify those upon requests from the guests to that end. Note that IA32_EFER.LME and IA32_EFER.LMA are saved/restored by H/W on every VM exit. For the usual domains (i.e. dom0 and domU), those MSRs are not modified once set at initialization time, so we don't save them when swiched out, but simply reset them (if modified) to the initial values when switched in. This patch also include extended handling for 64-bit guests. Please apply. Signed-off-by: Jun Nakajima <jun.nakajima@intel.com> Signed-off-by: Chengyuan Li <chengyuan.li@intel.com> Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com> Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Diffstat (limited to 'xen/include/asm-x86/msr.h')
-rw-r--r--xen/include/asm-x86/msr.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 35163029e5..d852367a94 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -18,6 +18,8 @@
: /* no outputs */ \
: "c" (msr), "a" (val1), "d" (val2))
+#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
+
#define rdmsr_user(msr,val1,val2) ({\
int _rc; \
__asm__ __volatile__( \