aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/bitops.c
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-11 12:14:38 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-09-11 12:14:38 +0100
commit9caf62ac1390ad6fc8895ae442023dedd2c3597b (patch)
tree5c31e5f0416a2f740a7ce97fa7422d0adc55388e /xen/arch/x86/bitops.c
parentf7f39e3ac3944a3a56cfc57696347c485aea941c (diff)
downloadxen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.tar.gz
xen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.tar.bz2
xen-9caf62ac1390ad6fc8895ae442023dedd2c3597b.zip
x86: Clean up asm keyword usage (asm volatile rather than __asm__
__volatile__ in most places) and ensure we use volatile keyword wherever we have an asm stmt that produces outputs but has other unspecified side effects or dependencies other than the explicitly-stated inputs. Also added volatile in a few places where its not strictly necessary but where it's unlikely to produce worse code and it makes our intentions perfectly clear. The original problem this patch fixes was tracked down by Joseph Cihula <joseph.cihula@intel.com>. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/bitops.c')
-rw-r--r--xen/arch/x86/bitops.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/xen/arch/x86/bitops.c b/xen/arch/x86/bitops.c
index 639d82d830..3845f59c96 100644
--- a/xen/arch/x86/bitops.c
+++ b/xen/arch/x86/bitops.c
@@ -7,7 +7,7 @@ unsigned int __find_first_bit(
{
unsigned long d0, d1, res;
- __asm__ __volatile__ (
+ asm volatile (
" xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */
" repe; scas"__OS"\n\t"
" je 1f\n\t"
@@ -34,8 +34,8 @@ unsigned int __find_next_bit(
if ( bit != 0 )
{
/* Look for a bit in the first word. */
- __asm__ ( "bsf %1,%%"__OP"ax"
- : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
+ asm ( "bsf %1,%%"__OP"ax"
+ : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
if ( set < (BITS_PER_LONG - bit) )
return (offset + set);
offset += BITS_PER_LONG - bit;
@@ -55,7 +55,7 @@ unsigned int __find_first_zero_bit(
{
unsigned long d0, d1, d2, res;
- __asm__ (
+ asm volatile (
" xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */
" repe; scas"__OS"\n\t"
" je 1f\n\t"
@@ -83,7 +83,7 @@ unsigned int __find_next_zero_bit(
if ( bit != 0 )
{
/* Look for zero in the first word. */
- __asm__ ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
+ asm ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
if ( set < (BITS_PER_LONG - bit) )
return (offset + set);
offset += BITS_PER_LONG - bit;