x86: Use xadd helper more widely
This covers the trivial cases from open-coded xadd to the xadd macros. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
committed by
H. Peter Anvin
parent
433b352061
commit
8b8bc2f731
@@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
|||||||
*/
|
*/
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
static inline int atomic_add_return(int i, atomic_t *v)
|
||||||
{
|
{
|
||||||
int __i;
|
|
||||||
#ifdef CONFIG_M386
|
#ifdef CONFIG_M386
|
||||||
|
int __i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
if (unlikely(boot_cpu_data.x86 <= 3))
|
if (unlikely(boot_cpu_data.x86 <= 3))
|
||||||
goto no_xadd;
|
goto no_xadd;
|
||||||
#endif
|
#endif
|
||||||
/* Modern 486+ processor */
|
/* Modern 486+ processor */
|
||||||
__i = i;
|
return i + xadd(&v->counter, i);
|
||||||
asm volatile(LOCK_PREFIX "xaddl %0, %1"
|
|
||||||
: "+r" (i), "+m" (v->counter)
|
|
||||||
: : "memory");
|
|
||||||
return i + __i;
|
|
||||||
|
|
||||||
#ifdef CONFIG_M386
|
#ifdef CONFIG_M386
|
||||||
no_xadd: /* Legacy 386 processor */
|
no_xadd: /* Legacy 386 processor */
|
||||||
|
@@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
|
|||||||
*/
|
*/
|
||||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
long __i = i;
|
return i + xadd(&v->counter, i);
|
||||||
asm volatile(LOCK_PREFIX "xaddq %0, %1;"
|
|
||||||
: "+r" (i), "+m" (v->counter)
|
|
||||||
: : "memory");
|
|
||||||
return i + __i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long atomic64_sub_return(long i, atomic64_t *v)
|
static inline long atomic64_sub_return(long i, atomic64_t *v)
|
||||||
|
@@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
|||||||
*/
|
*/
|
||||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||||
{
|
{
|
||||||
long tmp = delta;
|
return delta + xadd(&sem->count, delta);
|
||||||
|
|
||||||
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
|
||||||
: "+r" (tmp), "+m" (sem->count)
|
|
||||||
: : "memory");
|
|
||||||
|
|
||||||
return tmp + delta;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
@@ -656,11 +656,7 @@ static inline int atomic_read_short(const struct atomic_short *v)
|
|||||||
*/
|
*/
|
||||||
static inline int atom_asr(short i, struct atomic_short *v)
|
static inline int atom_asr(short i, struct atomic_short *v)
|
||||||
{
|
{
|
||||||
short __i = i;
|
return i + xadd(&v->counter, i);
|
||||||
asm volatile(LOCK_PREFIX "xaddw %0, %1"
|
|
||||||
: "+r" (i), "+m" (v->counter)
|
|
||||||
: : "memory");
|
|
||||||
return i + __i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user