diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2013-07-19 16:20:10 +0100 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2013-08-22 15:48:13 +0100 |
commit | 08d89ac8bd7f43d897cdffe85b236c71d3408e16 (patch) | |
tree | c5fc91ab77714fadb6c7567f584532d345829bf2 /xen/include/asm-arm | |
parent | c85c3e3cb9c0540cb604c379298bfd885ade1592 (diff) | |
download | xen-08d89ac8bd7f43d897cdffe85b236c71d3408e16.tar.gz xen-08d89ac8bd7f43d897cdffe85b236c71d3408e16.tar.bz2 xen-08d89ac8bd7f43d897cdffe85b236c71d3408e16.zip |
xen: arm: retry trylock if strex fails on free lock.
This comes from the Linux patches 15e7e5c1ebf5 for arm32 and 4ecf7ccb1973 for
arm64 by Will Deacon and Catalin Marinas respectively. The Linux commit message
says:
An exclusive store instruction may fail for reasons other than lock
contention (e.g. a cache eviction during the critical section) so, in
line with other architectures using similar exclusive instructions
(alpha, mips, powerpc), retry the trylock operation if the lock appears
to be free but the strex reported failure.
I have observed this due to register_cpu_notifier containing:
if ( !spin_trylock(&cpu_add_remove_lock) )
BUG(); /* Should never fail as we are called only during boot. */
which was spuriously failing.
The ARMv8 variant is taken directly from the Linux patch. For v7 I had to
reimplement since we don't currently use ticket locks.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/include/asm-arm')
-rw-r--r-- | xen/include/asm-arm/arm32/spinlock.h | 25 | ||||
-rw-r--r-- | xen/include/asm-arm/arm64/spinlock.h | 3 |
2 files changed, 16 insertions, 12 deletions
diff --git a/xen/include/asm-arm/arm32/spinlock.h b/xen/include/asm-arm/arm32/spinlock.h index 4a11a97b93..ba11ad612f 100644 --- a/xen/include/asm-arm/arm32/spinlock.h +++ b/xen/include/asm-arm/arm32/spinlock.h @@ -34,17 +34,20 @@ static always_inline void _raw_spin_unlock(raw_spinlock_t *lock) static always_inline int _raw_spin_trylock(raw_spinlock_t *lock) { - unsigned long tmp; - - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "cc"); - - if (tmp == 0) { + unsigned long contended, res; + + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]\n" + " movne %1, #0\n" + : "=&r" (contended), "=r" (res) + : "r" (&lock->lock), "r" (1) + : "cc"); + } while (res); + + if (!contended) { smp_mb(); return 1; } else { diff --git a/xen/include/asm-arm/arm64/spinlock.h b/xen/include/asm-arm/arm64/spinlock.h index 717f2fe604..3a36cfd4d6 100644 --- a/xen/include/asm-arm/arm64/spinlock.h +++ b/xen/include/asm-arm/arm64/spinlock.h @@ -40,9 +40,10 @@ static always_inline int _raw_spin_trylock(raw_spinlock_t *lock) unsigned int tmp; asm volatile( - " ldaxr %w0, %1\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1f\n" " stxr %w0, %w2, %1\n" + " cbnz %w0, 2b\n" "1:\n" : "=&r" (tmp), "+Q" (lock->lock) : "r" (1) |