aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2010-12-16 18:46:55 +0000
committerKeir Fraser <keir@xen.org>2010-12-16 18:46:55 +0000
commit165e3aba7c1754d159099a6adf89e54f33090ea5 (patch)
tree097f5d1eca29b55b95f4555fe4ec9f3536ba3a7b
parentab134ffcc5ca8edc3678aa1429e281e47c42462d (diff)
downloadxen-165e3aba7c1754d159099a6adf89e54f33090ea5.tar.gz
xen-165e3aba7c1754d159099a6adf89e54f33090ea5.tar.bz2
xen-165e3aba7c1754d159099a6adf89e54f33090ea5.zip
x86: Remove unnecessary LOCK/LOCK_PREFIX macros.
We don't support !CONFIG_SMP. Signed-off-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/include/asm-ia64/config.h3
-rw-r--r--xen/include/asm-x86/atomic.h22
-rw-r--r--xen/include/asm-x86/bitops.h24
-rw-r--r--xen/include/asm-x86/system.h10
-rw-r--r--xen/include/asm-x86/x86_32/system.h6
-rw-r--r--xen/include/asm-x86/x86_64/system.h2
6 files changed, 23 insertions, 44 deletions
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index 8194b4476c..8e93a45814 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -85,9 +85,6 @@ typedef unsigned long paddr_t;
#define CLEAR_BITMAP(name,bits) \
memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
-// FIXME?: x86-ism used in xen/mm.h
-#define LOCK_PREFIX
-
extern unsigned long total_pages;
extern unsigned long xen_pstart;
extern unsigned long xenheap_size;
diff --git a/xen/include/asm-x86/atomic.h b/xen/include/asm-x86/atomic.h
index 17becc5a7e..c73d1ce725 100644
--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -4,12 +4,6 @@
#include <xen/config.h>
#include <asm/system.h>
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* NB. I've pushed the volatile qualifier into the operations. This allows
* fast accessors such as _atomic_read() and _atomic_set() which don't give
@@ -48,7 +42,7 @@ typedef struct { int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
asm volatile(
- LOCK "addl %1,%0"
+ "lock; addl %1,%0"
:"=m" (*(volatile int *)&v->counter)
:"ir" (i), "m" (*(volatile int *)&v->counter));
}
@@ -63,7 +57,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
asm volatile(
- LOCK "subl %1,%0"
+ "lock; subl %1,%0"
:"=m" (*(volatile int *)&v->counter)
:"ir" (i), "m" (*(volatile int *)&v->counter));
}
@@ -82,7 +76,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
asm volatile(
- LOCK "subl %2,%0; sete %1"
+ "lock; subl %2,%0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
return c;
@@ -97,7 +91,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
asm volatile(
- LOCK "incl %0"
+ "lock; incl %0"
:"=m" (*(volatile int *)&v->counter)
:"m" (*(volatile int *)&v->counter));
}
@@ -111,7 +105,7 @@ static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
asm volatile(
- LOCK "decl %0"
+ "lock; decl %0"
:"=m" (*(volatile int *)&v->counter)
:"m" (*(volatile int *)&v->counter));
}
@@ -129,7 +123,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
asm volatile(
- LOCK "decl %0; sete %1"
+ "lock; decl %0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"m" (*(volatile int *)&v->counter) : "memory");
return c != 0;
@@ -148,7 +142,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
asm volatile(
- LOCK "incl %0; sete %1"
+ "lock; incl %0; sete %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"m" (*(volatile int *)&v->counter) : "memory");
return c != 0;
@@ -168,7 +162,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
asm volatile(
- LOCK "addl %2,%0; sets %1"
+ "lock; addl %2,%0; sets %1"
:"=m" (*(volatile int *)&v->counter), "=qm" (c)
:"ir" (i), "m" (*(volatile int *)&v->counter) : "memory");
return c;
diff --git a/xen/include/asm-x86/bitops.h b/xen/include/asm-x86/bitops.h
index 8237381f81..0bc1952e79 100644
--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -7,12 +7,6 @@
#include <xen/config.h>
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
/*
* We specify the memory operand as both input and output because the memory
* operand is both read from and written to. Since the operand is in fact a
@@ -41,8 +35,7 @@ extern void __bitop_bad_size(void);
static inline void set_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btsl %1,%0"
+ "lock; btsl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
@@ -85,8 +78,7 @@ static inline void __set_bit(int nr, volatile void *addr)
static inline void clear_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btrl %1,%0"
+ "lock; btrl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
@@ -152,8 +144,7 @@ static inline void __change_bit(int nr, volatile void *addr)
static inline void change_bit(int nr, volatile void *addr)
{
asm volatile (
- LOCK_PREFIX
- "btcl %1,%0"
+ "lock; btcl %1,%0"
: "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
}
@@ -175,8 +166,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btsl %2,%1\n\tsbbl %0,%0"
+ "lock; btsl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
@@ -223,8 +213,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
+ "lock; btrl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
@@ -287,8 +276,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
int oldbit;
asm volatile (
- LOCK_PREFIX
- "btcl %2,%1\n\tsbbl %0,%0"
+ "lock; btcl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "=m" (ADDR)
: "Ir" (nr), "m" (ADDR) : "memory");
return oldbit;
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 52816da1c9..a57d35de81 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -91,14 +91,14 @@ static always_inline unsigned long __cmpxchg(
switch ( size )
{
case 1:
- asm volatile ( LOCK_PREFIX "cmpxchgb %b1,%2"
+ asm volatile ( "lock; cmpxchgb %b1,%2"
: "=a" (prev)
: "q" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
: "memory" );
return prev;
case 2:
- asm volatile ( LOCK_PREFIX "cmpxchgw %w1,%2"
+ asm volatile ( "lock; cmpxchgw %w1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
@@ -106,7 +106,7 @@ static always_inline unsigned long __cmpxchg(
return prev;
#if defined(__i386__)
case 4:
- asm volatile ( LOCK_PREFIX "cmpxchgl %1,%2"
+ asm volatile ( "lock; cmpxchgl %1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
@@ -114,14 +114,14 @@ static always_inline unsigned long __cmpxchg(
return prev;
#elif defined(__x86_64__)
case 4:
- asm volatile ( LOCK_PREFIX "cmpxchgl %k1,%2"
+ asm volatile ( "lock; cmpxchgl %k1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
: "memory" );
return prev;
case 8:
- asm volatile ( LOCK_PREFIX "cmpxchgq %1,%2"
+ asm volatile ( "lock; cmpxchgq %1,%2"
: "=a" (prev)
: "r" (new), "m" (*__xg((volatile void *)ptr)),
"0" (old)
diff --git a/xen/include/asm-x86/x86_32/system.h b/xen/include/asm-x86/x86_32/system.h
index 56ef751ec7..0ec103d449 100644
--- a/xen/include/asm-x86/x86_32/system.h
+++ b/xen/include/asm-x86/x86_32/system.h
@@ -6,7 +6,7 @@ static always_inline unsigned long long __cmpxchg8b(
{
unsigned long long prev;
asm volatile (
- LOCK_PREFIX "cmpxchg8b %3"
+ "lock; cmpxchg8b %3"
: "=A" (prev)
: "c" ((u32)(new>>32)), "b" ((u32)new),
"m" (*__xg((volatile void *)ptr)), "0" (old)
@@ -43,7 +43,7 @@ static always_inline unsigned long long __cmpxchg8b(
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
@@ -72,7 +72,7 @@ static always_inline unsigned long long __cmpxchg8b(
break; \
case 8: \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg8b %4\n" \
+ "1: lock; cmpxchg8b %4\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
diff --git a/xen/include/asm-x86/x86_64/system.h b/xen/include/asm-x86/x86_64/system.h
index fa9b3118b0..3d0a294e3a 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -13,7 +13,7 @@
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
asm volatile ( \
- "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \