memory barrier: adding smp_mb__after_lock
Adding smp_mb__after_lock define to be used as a smp_mb call after a lock. Making it nop for x86, since {read|write|spin}_lock() on x86 are full memory barriers. Signed-off-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
a57de0b433
commit
ad46276952
@@ -302,4 +302,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define _raw_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define _raw_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
|
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||||
|
static inline void smp_mb__after_lock(void) { }
|
||||||
|
#define ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||||
|
|
||||||
#endif /* _ASM_X86_SPINLOCK_H */
|
#endif /* _ASM_X86_SPINLOCK_H */
|
||||||
|
@@ -132,6 +132,11 @@ do { \
|
|||||||
#endif /*__raw_spin_is_contended*/
|
#endif /*__raw_spin_is_contended*/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* The lock does not imply full memory barrier. */
|
||||||
|
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
|
||||||
|
static inline void smp_mb__after_lock(void) { smp_mb(); }
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* spin_unlock_wait - wait until the spinlock gets unlocked
|
* spin_unlock_wait - wait until the spinlock gets unlocked
|
||||||
* @lock: the spinlock in question.
|
* @lock: the spinlock in question.
|
||||||
|
@@ -1271,6 +1271,9 @@ static inline int sk_has_allocations(const struct sock *sk)
|
|||||||
* in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
|
* in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
|
||||||
* could then endup calling schedule and sleep forever if there are no more
|
* could then endup calling schedule and sleep forever if there are no more
|
||||||
* data on the socket.
|
* data on the socket.
|
||||||
|
*
|
||||||
|
* The sk_has_sleeper is always called right after a call to read_lock, so we
|
||||||
|
* can use smp_mb__after_lock barrier.
|
||||||
*/
|
*/
|
||||||
static inline int sk_has_sleeper(struct sock *sk)
|
static inline int sk_has_sleeper(struct sock *sk)
|
||||||
{
|
{
|
||||||
@@ -1280,7 +1283,7 @@ static inline int sk_has_sleeper(struct sock *sk)
|
|||||||
*
|
*
|
||||||
* This memory barrier is paired in the sock_poll_wait.
|
* This memory barrier is paired in the sock_poll_wait.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb__after_lock();
|
||||||
return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
|
return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user