aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64/traps.c
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-11 12:14:38 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-11 12:14:38 +0100
commit9caf62ac1390ad6fc8895ae442023dedd2c3597b (patch)
tree5c31e5f0416a2f740a7ce97fa7422d0adc55388e /xen/arch/x86/x86_64/traps.c
parentf7f39e3ac3944a3a56cfc57696347c485aea941c (diff)
downloadxen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.tar.gz
xen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.tar.bz2
xen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.zip
x86: Clean up asm keyword usage (asm volatile rather than __asm__
__volatile__ in most places) and ensure we use volatile keyword wherever we have an asm stmt that produces outputs but has other unspecified side effects or dependencies other than the explicitly-stated inputs. Also added volatile in a few places where its not strictly necessary but where it's unlikely to produce worse code and it makes our intentions perfectly clear. The original problem this patch fixes was tracked down by Joseph Cihula <joseph.cihula@intel.com>. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/x86_64/traps.c')
-rw-r--r--xen/arch/x86/x86_64/traps.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 1b8b0ad496..d1ceabcd6d 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -149,7 +149,7 @@ asmlinkage void do_double_fault(struct cpu_user_regs *regs)
{
unsigned int cpu, tr;
- asm ( "str %0" : "=r" (tr) );
+ asm volatile ( "str %0" : "=r" (tr) );
cpu = ((tr >> 3) - __FIRST_TSS_ENTRY) >> 2;
watchdog_disable();
@@ -185,11 +185,11 @@ void toggle_guest_mode(struct vcpu *v)
if ( is_pv_32bit_vcpu(v) )
return;
v->arch.flags ^= TF_kernel_mode;
- __asm__ __volatile__ ( "swapgs" );
+ asm volatile ( "swapgs" );
update_cr3(v);
#ifdef USER_MAPPINGS_ARE_GLOBAL
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
- __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
+ asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
#else
write_ptbase(v);
#endif