locking: Convert raw_rwlock functions to arch_rwlock

Name space cleanup for rwlock functions. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
This commit is contained in:
Thomas Gleixner
2009-12-03 20:08:46 +01:00
parent fb3a6bbc91
commit e5931943d0
18 changed files with 215 additions and 215 deletions

View File

@@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
static inline void __arch_read_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
#define __raw_read_lock(lock) \
#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
arch_read_lock(lock); \
__arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
#define __raw_read_unlock(lock) \
#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
arch_read_unlock(lock); \
__arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
lp = rw;
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
return (val == 0);
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
return res;
}
#define __raw_read_trylock(lock) \
#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
res = arch_read_trylock(lock); \
res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
#define __raw_write_can_lock(rw) (!(rw)->lock)
#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */

View File

@@ -210,17 +210,17 @@ static int inline arch_write_trylock(arch_rwlock_t *lock)
return result;
}
#define __raw_read_lock(p) arch_read_lock(p)
#define __raw_read_lock_flags(p, f) arch_read_lock(p)
#define __raw_read_trylock(p) arch_read_trylock(p)
#define __raw_read_unlock(p) arch_read_unlock(p)
#define __raw_write_lock(p) arch_write_lock(p)
#define __raw_write_lock_flags(p, f) arch_write_lock(p)
#define __raw_write_unlock(p) arch_write_unlock(p)
#define __raw_write_trylock(p) arch_write_trylock(p)
#define arch_read_lock(p) arch_read_lock(p)
#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_read_trylock(p) arch_read_trylock(p)
#define arch_read_unlock(p) arch_read_unlock(p)
#define arch_write_lock(p) arch_write_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
#define arch_write_unlock(p) arch_write_unlock(p)
#define arch_write_trylock(p) arch_write_trylock(p)
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)
#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define arch_write_can_lock(rw) (!(rw)->lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()