aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-06-23 11:54:53 +0100
committerKeir Fraser <keir@xen.org>2011-06-23 11:54:53 +0100
commita82874185ab1c70cbd1bc267b762d2220234bf3e (patch)
tree41f69bffbcc44f8953228685d40fd35e61c542e6
parentd2336a13c4ae2961714cb5265f5a7976df29c462 (diff)
downloadxen-a82874185ab1c70cbd1bc267b762d2220234bf3e.tar.gz
xen-a82874185ab1c70cbd1bc267b762d2220234bf3e.tar.bz2
xen-a82874185ab1c70cbd1bc267b762d2220234bf3e.zip
x86/hvm: add SMEP support to HVM guest
Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP prevents software operating with CPL < 3 (supervisor mode) from fetching instructions from any linear address with a valid translation for which the U/S flag (bit 2) is 1 in every paging-structure entry controlling the translation for the linear address. This patch adds SMEP support to HVM guest. Signed-off-by: Yang Wei <wei.y.yang@intel.com> Signed-off-by: Shan Haitao <haitao.shan@intel.com> Signed-off-by: Li Xin <xin.li@intel.com> Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com> xen-unstable changeset: 23504:c34604d5a293 xen-unstable date: Mon Jun 06 13:46:48 2011 +0100
-rw-r--r--tools/libxc/xc_cpufeature.h1
-rw-r--r--tools/libxc/xc_cpuid_x86.c8
-rw-r--r--xen/arch/x86/hvm/hvm.c13
-rw-r--r--xen/arch/x86/mm/guest_walk.c15
-rw-r--r--xen/include/asm-x86/hvm/hvm.h3
5 files changed, 35 insertions, 5 deletions
diff --git a/tools/libxc/xc_cpufeature.h b/tools/libxc/xc_cpufeature.h
index d8bb6cb27f..d851736b8b 100644
--- a/tools/libxc/xc_cpufeature.h
+++ b/tools/libxc/xc_cpufeature.h
@@ -143,5 +143,6 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */
#endif /* __LIBXC_CPUFEATURE_H */
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index a30d9a89e6..f867366671 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -300,6 +300,14 @@ static void xc_cpuid_hvm_policy(
clear_bit(X86_FEATURE_PAE, regs[3]);
break;
+ case 0x00000007: /* Intel-defined CPU features */
+ if ( input[1] == 0 ) {
+ regs[1] &= bitmaskof(X86_FEATURE_SMEP);
+ } else
+ regs[1] = 0;
+ regs[0] = regs[2] = regs[3] = 0;
+ break;
+
case 0x0000000d:
xc_cpuid_config_xsave(xch, domid, xfeature_mask, input, regs);
break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ecd1a26804..cbf1a74853 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1473,8 +1473,9 @@ int hvm_set_cr4(unsigned long value)
v->arch.hvm_vcpu.guest_cr[4] = value;
hvm_update_guest_cr(v, 4);
- /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
- if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+ /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries. */
+ if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE |
+ X86_CR4_PAE | X86_CR4_SMEP) )
paging_update_paging_modes(v);
return X86EMUL_OKAY;
@@ -2116,7 +2117,7 @@ enum hvm_copy_result hvm_copy_from_guest_virt(
enum hvm_copy_result hvm_fetch_from_guest_virt(
void *buf, unsigned long vaddr, int size, uint32_t pfec)
{
- if ( hvm_nx_enabled(current) )
+ if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
pfec |= PFEC_insn_fetch;
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
@@ -2142,7 +2143,7 @@ enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
void *buf, unsigned long vaddr, int size, uint32_t pfec)
{
- if ( hvm_nx_enabled(current) )
+ if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
pfec |= PFEC_insn_fetch;
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
@@ -2212,6 +2213,10 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
*ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
break;
+ case 0x7:
+ if ( (count == 0) && !cpu_has_smep )
+ *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+ break;
case 0xb:
/* Fix the x2APIC identifier. */
*edx = v->vcpu_id * 2;
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index b20fdaff8b..8b2e555e7e 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -132,7 +132,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
guest_l4e_t *l4p;
#endif
uint32_t gflags, mflags, iflags, rc = 0;
- int pse;
+ int pse, smep;
perfc_incr(guest_walk);
memset(gw, 0, sizeof(*gw));
@@ -145,6 +145,15 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
mflags = mandatory_flags(v, pfec);
iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
+ /* SMEP: kernel-mode instruction fetches from user-mode mappings
+ * should fault. Unlike NX or invalid bits, we're looking for _all_
+ * entries in the walk to have _PAGE_USER set, so we need to do the
+ * whole walk as if it were a user-mode one and then invert the answer. */
+ smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
+ && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
+ if ( smep )
+ mflags |= _PAGE_USER;
+
#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
@@ -271,6 +280,10 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
rc |= ((gflags & mflags) ^ mflags);
}
+ /* Now re-invert the user-mode requirement for SMEP. */
+ if ( smep )
+ rc ^= _PAGE_USER;
+
/* Go back and set accessed and dirty bits only if the walk was a
* success. Although the PRMs say higher-level _PAGE_ACCESSED bits
* get set whenever a lower-level PT is used, at least some hardware
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index fd79ba2032..9fa0c20c5b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -178,6 +178,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);
(!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
#define hvm_pae_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_smep_enabled(v) \
+ (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
#define hvm_nx_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
@@ -291,6 +293,7 @@ static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
+ (cpu_has_smep ? X86_CR4_SMEP : 0) | \
(xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
/* These exceptions must always be intercepted. */