aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-09-09 10:23:32 +0200
committerJan Beulich <jbeulich@suse.com>2013-09-09 10:23:32 +0200
commitcfd54835e6e8a28c743dc7d67c662d151ab4923a (patch)
tree6a6b59ceee96950731f928c70c81581d4b1bf54d
parentb0df10407df96fe25f3c1a2bffc4be0696d328b4 (diff)
downloadxen-cfd54835e6e8a28c743dc7d67c662d151ab4923a.tar.gz
xen-cfd54835e6e8a28c743dc7d67c662d151ab4923a.tar.bz2
xen-cfd54835e6e8a28c743dc7d67c662d151ab4923a.zip
VMX: use proper instruction mnemonics if assembler supports them
With the hex byte emission we were taking away a good part of flexibility from the compiler, as for simplicity reasons these were built using fixed operands. All half way modern build environments would allow using the mnemonics (but we can't disable the hex variants yet, since the binutils around at the time gcc 4.1 got released didn't support these yet). I didn't convert __vmread() yet because that would, just like for __vmread_safe(), imply converting to a macro so that the output operand can be the caller supplied variable rather than an intermediate one. As that would require touching all invocation points of __vmread() (of which there are quite a few), I'd first like to be certain the approach is acceptable; the main question being whether the now conditional code might be considered to cause future maintenance issues, and the second being that of parameter/argument ordering (here I made __vmread_safe() match __vmwrite(), but one could also take the position that read and write should use the inverse order of one another, in line with the actual instruction operands). Additionally I was quite puzzled to find that all the asm()-s involved here have memory clobbers - what are they needed for? Or can they be dropped at least in some cases? Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Tim Deegan <tim@xen.org>
-rw-r--r--Config.mk21
-rw-r--r--xen/arch/x86/Rules.mk2
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c7
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c8
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h90
5 files changed, 99 insertions, 29 deletions
diff --git a/Config.mk b/Config.mk
index f169be482b..a26c7c0f2b 100644
--- a/Config.mk
+++ b/Config.mk
@@ -4,6 +4,12 @@ ifeq ($(filter /%,$(XEN_ROOT)),)
$(error XEN_ROOT must be absolute)
endif
+# Convenient variables
+comma := ,
+squote := '
+empty :=
+space := $(empty) $(empty)
+
# fallback for older make
realpath = $(wildcard $(foreach file,$(1),$(shell cd -P $(dir $(file)) && echo "$$PWD/$(notdir $(file))")))
@@ -128,6 +134,21 @@ endef
check-$(gcc) = $(call cc-ver-check,CC,0x040100,"Xen requires at least gcc-4.1")
$(eval $(check-y))
+# as-insn: Check whether assembler supports an instruction.
+# Usage: cflags-y += $(call as-insn "insn",option-yes,option-no)
+as-insn = $(if $(shell echo 'void _(void) { asm volatile ( $(2) ); }' \
+ | $(1) -c -x c -o /dev/null - 2>&1),$(4),$(3))
+
+# as-insn-check: Add an option to compilation flags, but only if insn is
+# supported by assembler.
+# Usage: $(call as-insn-check CFLAGS,CC,"nop",-DHAVE_GAS_NOP)
+as-insn-check = $(eval $(call as-insn-check-closure,$(1),$(2),$(3),$(4)))
+define as-insn-check-closure
+ ifeq ($$(call as-insn,$$($(2)),$(3),y,n),y)
+ $(1) += $(4)
+ endif
+endef
+
define buildmakevars2shellvars
export PREFIX="$(PREFIX)"; \
export XEN_SCRIPT_DIR="$(XEN_SCRIPT_DIR)"; \
diff --git a/xen/arch/x86/Rules.mk b/xen/arch/x86/Rules.mk
index 04c1a90477..eb11b5bffb 100644
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -28,6 +28,8 @@ CFLAGS += -msoft-float
$(call cc-options-add,CFLAGS,CC,$(EMBEDDED_EXTRA_CFLAGS))
$(call cc-option-add,CFLAGS,CC,-Wnested-externs)
+$(call as-insn-check,CFLAGS,CC,"vmcall",-DHAVE_GAS_VMX)
+$(call as-insn-check,CFLAGS,CC,"invept (%rax)$$(comma)%rax",-DHAVE_GAS_EPT)
ifeq ($(supervisor_mode_kernel),y)
CFLAGS += -DCONFIG_X86_SUPERVISOR_MODE_KERNEL=1
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 31347bb014..0b40b546f6 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1292,12 +1292,11 @@ void vmx_do_resume(struct vcpu *v)
reset_stack_and_jump(vmx_asm_do_vmentry);
}
-static unsigned long vmr(unsigned long field)
+static inline unsigned long vmr(unsigned long field)
{
- int rc;
unsigned long val;
- val = __vmread_safe(field, &rc);
- return rc ? 0 : val;
+
+ return __vmread_safe(field, &val) ? val : 0;
}
static void vmx_dump_sel(char *name, uint32_t selector)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index cecc72f053..5ef5ad7385 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -963,13 +963,11 @@ fallback:
vvmcs_to_shadow(vvmcs, field[i]);
}
-static void shadow_to_vvmcs(void *vvmcs, unsigned int field)
+static inline void shadow_to_vvmcs(void *vvmcs, unsigned int field)
{
- u64 value;
- int rc;
+ unsigned long value;
- value = __vmread_safe(field, &rc);
- if ( !rc )
+ if ( __vmread_safe(field, &value) )
__set_vvmcs(vvmcs, field, value);
}
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index a7b667e31e..9816ce4dce 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -282,27 +282,43 @@ extern uint8_t posted_intr_vector;
static inline void __vmptrld(u64 addr)
{
- asm volatile ( VMPTRLD_OPCODE
- MODRM_EAX_06
+ asm volatile (
+#ifdef HAVE_GAS_VMX
+ "vmptrld %0\n"
+#else
+ VMPTRLD_OPCODE MODRM_EAX_06
+#endif
/* CF==1 or ZF==1 --> crash (ud2) */
UNLIKELY_START(be, vmptrld)
"\tud2\n"
UNLIKELY_END_SECTION
:
+#ifdef HAVE_GAS_VMX
+ : "m" (addr)
+#else
: "a" (&addr)
+#endif
: "memory");
}
static inline void __vmpclear(u64 addr)
{
- asm volatile ( VMCLEAR_OPCODE
- MODRM_EAX_06
+ asm volatile (
+#ifdef HAVE_GAS_VMX
+ "vmclear %0\n"
+#else
+ VMCLEAR_OPCODE MODRM_EAX_06
+#endif
/* CF==1 or ZF==1 --> crash (ud2) */
UNLIKELY_START(be, vmclear)
"\tud2\n"
UNLIKELY_END_SECTION
:
+#ifdef HAVE_GAS_VMX
+ : "m" (addr)
+#else
: "a" (&addr)
+#endif
: "memory");
}
@@ -325,33 +341,50 @@ static inline unsigned long __vmread(unsigned long field)
static inline void __vmwrite(unsigned long field, unsigned long value)
{
- asm volatile ( VMWRITE_OPCODE
- MODRM_EAX_ECX
+ asm volatile (
+#ifdef HAVE_GAS_VMX
+ "vmwrite %1, %0\n"
+#else
+ VMWRITE_OPCODE MODRM_EAX_ECX
+#endif
/* CF==1 or ZF==1 --> crash (ud2) */
UNLIKELY_START(be, vmwrite)
"\tud2\n"
UNLIKELY_END_SECTION
:
+#ifdef HAVE_GAS_VMX
+ : "r" (field) , "rm" (value)
+#else
: "a" (field) , "c" (value)
+#endif
: "memory");
}
-static inline unsigned long __vmread_safe(unsigned long field, int *error)
+static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
{
- unsigned long ecx;
+ bool_t okay;
- asm volatile ( VMREAD_OPCODE
- MODRM_EAX_ECX
- /* CF==1 or ZF==1 --> rc = -1 */
- "setna %b0 ; neg %0"
- : "=q" (*error), "=c" (ecx)
- : "0" (0), "a" (field)
+ asm volatile (
+#ifdef HAVE_GAS_VMX
+ "vmread %2, %1\n\t"
+#else
+ VMREAD_OPCODE MODRM_EAX_ECX
+#endif
+ /* CF==1 or ZF==1 --> rc = 0 */
+ "setnbe %0"
+#ifdef HAVE_GAS_VMX
+ : "=qm" (okay), "=rm" (*value)
+ : "r" (field)
+#else
+ : "=qm" (okay), "=c" (*value)
+ : "a" (field)
+#endif
: "memory");
- return ecx;
+ return okay;
}
-static inline void __invept(int type, u64 eptp, u64 gpa)
+static inline void __invept(unsigned long type, u64 eptp, u64 gpa)
{
struct {
u64 eptp, gpa;
@@ -365,18 +398,26 @@ static inline void __invept(int type, u64 eptp, u64 gpa)
!cpu_has_vmx_ept_invept_single_context )
type = INVEPT_ALL_CONTEXT;
- asm volatile ( INVEPT_OPCODE
- MODRM_EAX_08
+ asm volatile (
+#ifdef HAVE_GAS_EPT
+ "invept %0, %1\n"
+#else
+ INVEPT_OPCODE MODRM_EAX_08
+#endif
/* CF==1 or ZF==1 --> crash (ud2) */
UNLIKELY_START(be, invept)
"\tud2\n"
UNLIKELY_END_SECTION
:
+#ifdef HAVE_GAS_EPT
+ : "m" (operand), "r" (type)
+#else
: "a" (&operand), "c" (type)
+#endif
: "memory" );
}
-static inline void __invvpid(int type, u16 vpid, u64 gva)
+static inline void __invvpid(unsigned long type, u16 vpid, u64 gva)
{
struct {
u64 vpid:16;
@@ -385,7 +426,12 @@ static inline void __invvpid(int type, u16 vpid, u64 gva)
} __attribute__ ((packed)) operand = {vpid, 0, gva};
/* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
- asm volatile ( "1: " INVVPID_OPCODE MODRM_EAX_08
+ asm volatile ( "1: "
+#ifdef HAVE_GAS_EPT
+ "invvpid %0, %1\n"
+#else
+ INVVPID_OPCODE MODRM_EAX_08
+#endif
/* CF==1 or ZF==1 --> crash (ud2) */
UNLIKELY_START(be, invvpid)
"\tud2\n"
@@ -393,7 +439,11 @@ static inline void __invvpid(int type, u16 vpid, u64 gva)
"2:"
_ASM_EXTABLE(1b, 2b)
:
+#ifdef HAVE_GAS_EPT
+ : "m" (operand), "r" (type)
+#else
: "a" (&operand), "c" (type)
+#endif
: "memory" );
}