diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2013-07-19 16:20:08 +0100 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2013-08-22 15:47:44 +0100 |
commit | 79474832b5e068aa4d131f8e586b0fa674a7ee3e (patch) | |
tree | d888e28a0b8bcaa5f04c3c5eae4e6467b64b1f52 /xen/include/asm-arm | |
parent | 7f4f85a70d645bff93230ab86c0276a5eb67c3bc (diff) | |
download | xen-79474832b5e068aa4d131f8e586b0fa674a7ee3e.tar.gz xen-79474832b5e068aa4d131f8e586b0fa674a7ee3e.tar.bz2 xen-79474832b5e068aa4d131f8e586b0fa674a7ee3e.zip |
xen/arm64: Assembly optimized bitops from Linux
This patch replaces the previous hashed lock implementaiton of bitops with
assembly optimized ones taken from Linux v3.10-rc4.
The Linux derived ASM only supports 8 byte aligned bitmaps (which under Linux
are unsigned long * rather than our void *). We do have actually uses of 4
byte alignment (i.e. the bitmaps in struct xmem_pool) which trigger alignment
faults.
Therefore adjust the assembly to work in 4 byte increments, which involved:
- bit offset now bits 4:0 => mask #31 not #63
- use wN register not xN for load/modify/store loop.
There is no need to adjust the shift used to calculate the word offset, the
difference is already acounted for in the #63->#31 change.
NB: Xen's build system cannot cope with the change from .c to .S file,
remove xen/arch/arm/arm64/lib/.bitops.o.d or clean your build tree.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/include/asm-arm')
-rw-r--r-- | xen/include/asm-arm/arm64/bitops.h | 203 |
1 files changed, 8 insertions, 195 deletions
diff --git a/xen/include/asm-arm/arm64/bitops.h b/xen/include/asm-arm/arm64/bitops.h index 0a6eba32b9..b43931daea 100644 --- a/xen/include/asm-arm/arm64/bitops.h +++ b/xen/include/asm-arm/arm64/bitops.h @@ -1,200 +1,15 @@ #ifndef _ARM_ARM64_BITOPS_H #define _ARM_ARM64_BITOPS_H -/* Generic bitop support. Based on linux/include/asm-generic/bitops/atomic.h */ - -#include <xen/spinlock.h> -#include <xen/cache.h> /* we use L1_CACHE_BYTES */ - -/* Use an array of spinlocks for our atomic_ts. - * Hash function to index into a different SPINLOCK. - * Since "a" is usually an address, use one spinlock per cacheline. - */ -# define ATOMIC_HASH_SIZE 4 -# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) - -extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]/* __lock_aligned*/; - -#define _atomic_spin_lock_irqsave(l,f) do { \ - spinlock_t *s = ATOMIC_HASH(l); \ - spin_lock_irqsave(s, f);\ -} while(0) - -#define _atomic_spin_unlock_irqrestore(l,f) do {\ - spinlock_t *s = ATOMIC_HASH(l); \ - spin_unlock_irqrestore(s,f); \ -} while(0) - -#define FIXUP(_p, _mask) \ - { \ - unsigned long __p = (unsigned long)_p; \ - if (__p & 0x7) { \ - if (_mask > 0xffffffff) { \ - __p = (__p+32)&~0x7; _mask >>=32; \ - } else { \ - __p &= ~0x7; _mask <<= 32; \ - } \ - if (0)printk("BITOPS: Fixup misaligned ptr %p => %#lx\n", _p, __p); \ - _p = (void *)__p; \ - } \ - } - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ - -static inline void set_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n", - // nr, addr, mask, p, ATOMIC_HASH(p)); - FIXUP(p, mask); - //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n", - // nr, addr, mask, p, ATOMIC_HASH(p)); - //printk("before *p is %#lx\n", *p); - _atomic_spin_lock_irqsave(p, flags); - *p |= mask; - _atomic_spin_unlock_irqrestore(p, flags); - //printk(" after *p is %#lx\n", *p); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - FIXUP(p, mask); - - _atomic_spin_lock_irqsave(p, flags); - *p &= ~mask; - _atomic_spin_unlock_irqrestore(p, flags); -} - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long flags; - - FIXUP(p, mask); - - _atomic_spin_lock_irqsave(p, flags); - *p ^= mask; - _atomic_spin_unlock_irqrestore(p, flags); -} - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - - FIXUP(p, mask); - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old | mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - - FIXUP(p, mask); - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old & ~mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; -} - -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. +/* + * Little endian assembly atomic bitops. */ -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old; - unsigned long flags; - - FIXUP(p, mask); - - _atomic_spin_lock_irqsave(p, flags); - old = *p; - *p = old ^ mask; - _atomic_spin_unlock_irqrestore(p, flags); - - return (old & mask) != 0; -} +extern void set_bit(int nr, volatile void *p); +extern void clear_bit(int nr, volatile void *p); +extern void change_bit(int nr, volatile void *p); +extern int test_and_set_bit(int nr, volatile void *p); +extern int test_and_clear_bit(int nr, volatile void *p); +extern int test_and_change_bit(int nr, volatile void *p); /* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */ /** @@ -217,8 +32,6 @@ static /*__*/always_inline unsigned long __ffs(unsigned long word) */ #define ffz(x) __ffs(~(x)) - - /* Based on linux/include/asm-generic/bitops/find.h */ #ifndef find_next_bit |