Merge branch 'core/locking' into perfcounters/core
Merge reason: we moved a mutex.h commit that originated from the perfcounters tree into core/locking - but now merge back that branch to solve a merge artifact and to pick up cleanups of this commit that happened in core/locking. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -150,28 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
|||||||
*/
|
*/
|
||||||
extern int mutex_trylock(struct mutex *lock);
|
extern int mutex_trylock(struct mutex *lock);
|
||||||
extern void mutex_unlock(struct mutex *lock);
|
extern void mutex_unlock(struct mutex *lock);
|
||||||
|
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||||
/**
|
|
||||||
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
|
||||||
* @cnt: the atomic which we are to dec
|
|
||||||
* @lock: the mutex to return holding if we dec to 0
|
|
||||||
*
|
|
||||||
* return true and hold lock if we dec to 0, return false otherwise
|
|
||||||
*/
|
|
||||||
static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
|
||||||
{
|
|
||||||
/* dec if we can't possibly hit 0 */
|
|
||||||
if (atomic_add_unless(cnt, -1, 1))
|
|
||||||
return 0;
|
|
||||||
/* we might hit 0, so take the lock */
|
|
||||||
mutex_lock(lock);
|
|
||||||
if (!atomic_dec_and_test(cnt)) {
|
|
||||||
/* when we actually did the dec, we didn't hit 0 */
|
|
||||||
mutex_unlock(lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* we hit 0, and we hold the lock */
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -471,5 +471,28 @@ int __sched mutex_trylock(struct mutex *lock)
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(mutex_trylock);
|
EXPORT_SYMBOL(mutex_trylock);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
||||||
|
* @cnt: the atomic which we are to dec
|
||||||
|
* @lock: the mutex to return holding if we dec to 0
|
||||||
|
*
|
||||||
|
* return true and hold lock if we dec to 0, return false otherwise
|
||||||
|
*/
|
||||||
|
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
||||||
|
{
|
||||||
|
/* dec if we can't possibly hit 0 */
|
||||||
|
if (atomic_add_unless(cnt, -1, 1))
|
||||||
|
return 0;
|
||||||
|
/* we might hit 0, so take the lock */
|
||||||
|
mutex_lock(lock);
|
||||||
|
if (!atomic_dec_and_test(cnt)) {
|
||||||
|
/* when we actually did the dec, we didn't hit 0 */
|
||||||
|
mutex_unlock(lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* we hit 0, and we hold the lock */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|
||||||
|
@@ -864,9 +864,9 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
|
|||||||
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
|
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
||||||
* the timeout structure is provided
|
* the timeout structure is provided
|
||||||
* by the caller
|
* by the caller
|
||||||
*
|
*
|
||||||
* @lock: the rt_mutex to be locked
|
* @lock: the rt_mutex to be locked
|
||||||
* @timeout: timeout structure or NULL (no timeout)
|
* @timeout: timeout structure or NULL (no timeout)
|
||||||
@@ -913,7 +913,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
||||||
|
|
||||||
/***
|
/**
|
||||||
* rt_mutex_destroy - mark a mutex unusable
|
* rt_mutex_destroy - mark a mutex unusable
|
||||||
* @lock: the mutex to be destroyed
|
* @lock: the mutex to be destroyed
|
||||||
*
|
*
|
||||||
|
Reference in New Issue
Block a user