locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
This commit is contained in:
@@ -28,7 +28,7 @@
|
||||
#include <asm/asm-compat.h>
|
||||
#include <asm/synch.h>
|
||||
|
||||
#define __raw_spin_is_locked(x) ((x)->slock != 0)
|
||||
#define arch_spin_is_locked(x) ((x)->slock != 0)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* use 0x800000yy when locked, where yy == CPU number */
|
||||
@@ -54,7 +54,7 @@
|
||||
* This returns the old value in the lock, so we succeeded
|
||||
* in getting the lock if the return value is 0.
|
||||
*/
|
||||
static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
|
||||
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp, token;
|
||||
|
||||
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
CLEAR_IO_SYNC;
|
||||
return arch_spin_trylock(lock) == 0;
|
||||
return __arch_spin_trylock(lock) == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock);
|
||||
#define SHARED_PROCESSOR 0
|
||||
#endif
|
||||
|
||||
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
CLEAR_IO_SYNC;
|
||||
while (1) {
|
||||
if (likely(arch_spin_trylock(lock) == 0))
|
||||
if (likely(__arch_spin_trylock(lock) == 0))
|
||||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||
}
|
||||
|
||||
static inline
|
||||
void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
unsigned long flags_dis;
|
||||
|
||||
CLEAR_IO_SYNC;
|
||||
while (1) {
|
||||
if (likely(arch_spin_trylock(lock) == 0))
|
||||
if (likely(__arch_spin_trylock(lock) == 0))
|
||||
break;
|
||||
local_save_flags(flags_dis);
|
||||
local_irq_restore(flags);
|
||||
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
SYNC_IO;
|
||||
__asm__ __volatile__("# __raw_spin_unlock\n\t"
|
||||
__asm__ __volatile__("# arch_spin_unlock\n\t"
|
||||
LWSYNC_ON_SMP: : :"memory");
|
||||
lock->slock = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
|
||||
extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
|
||||
#else
|
||||
#define __raw_spin_unlock_wait(lock) \
|
||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||
#define arch_spin_unlock_wait(lock) \
|
||||
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
||||
|
||||
#define _raw_spin_relax(lock) __spin_yield(lock)
|
||||
#define _raw_read_relax(lock) __rw_yield(lock)
|
||||
#define _raw_write_relax(lock) __rw_yield(lock)
|
||||
#define arch_spin_relax(lock) __spin_yield(lock)
|
||||
#define arch_read_relax(lock) __rw_yield(lock)
|
||||
#define arch_write_relax(lock) __rw_yield(lock)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_SPINLOCK_H */
|
||||
|
Reference in New Issue
Block a user