Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: gcc-4.6: kernel/*: Fix unused but set warnings mutex: Fix annotations to include it in kernel-locking docbook pid: make setpgid() system call use RCU read-side critical section MAINTAINERS: Add RCU's public git tree
This commit is contained in:
@@ -1961,6 +1961,12 @@ machines due to caching.
|
|||||||
</sect1>
|
</sect1>
|
||||||
</chapter>
|
</chapter>
|
||||||
|
|
||||||
|
<chapter id="apiref">
|
||||||
|
<title>Mutex API reference</title>
|
||||||
|
!Iinclude/linux/mutex.h
|
||||||
|
!Ekernel/mutex.c
|
||||||
|
</chapter>
|
||||||
|
|
||||||
<chapter id="references">
|
<chapter id="references">
|
||||||
<title>Further reading</title>
|
<title>Further reading</title>
|
||||||
|
|
||||||
|
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
|
|||||||
mutex semantics are sufficient for your code, then there are a couple
|
mutex semantics are sufficient for your code, then there are a couple
|
||||||
of advantages of mutexes:
|
of advantages of mutexes:
|
||||||
|
|
||||||
- 'struct mutex' is smaller on most architectures: .e.g on x86,
|
- 'struct mutex' is smaller on most architectures: E.g. on x86,
|
||||||
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
|
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
|
||||||
A smaller structure size means less RAM footprint, and better
|
A smaller structure size means less RAM footprint, and better
|
||||||
CPU-cache utilization.
|
CPU-cache utilization.
|
||||||
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
|
|||||||
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||||
int mutex_lock_interruptible_nested(struct mutex *lock,
|
int mutex_lock_interruptible_nested(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass);
|
||||||
|
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||||
|
@@ -4810,6 +4810,7 @@ RCUTORTURE MODULE
|
|||||||
M: Josh Triplett <josh@freedesktop.org>
|
M: Josh Triplett <josh@freedesktop.org>
|
||||||
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
|
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
|
||||||
F: Documentation/RCU/torture.txt
|
F: Documentation/RCU/torture.txt
|
||||||
F: kernel/rcutorture.c
|
F: kernel/rcutorture.c
|
||||||
|
|
||||||
@@ -4834,6 +4835,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
|
|||||||
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
||||||
W: http://www.rdrop.com/users/paulmck/rclock/
|
W: http://www.rdrop.com/users/paulmck/rclock/
|
||||||
S: Supported
|
S: Supported
|
||||||
|
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
|
||||||
F: Documentation/RCU/
|
F: Documentation/RCU/
|
||||||
F: include/linux/rcu*
|
F: include/linux/rcu*
|
||||||
F: include/linux/srcu*
|
F: include/linux/srcu*
|
||||||
|
@@ -78,6 +78,14 @@ struct mutex_waiter {
|
|||||||
# include <linux/mutex-debug.h>
|
# include <linux/mutex-debug.h>
|
||||||
#else
|
#else
|
||||||
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
# define __DEBUG_MUTEX_INITIALIZER(lockname)
|
||||||
|
/**
|
||||||
|
* mutex_init - initialize the mutex
|
||||||
|
* @mutex: the mutex to be initialized
|
||||||
|
*
|
||||||
|
* Initialize the mutex to unlocked state.
|
||||||
|
*
|
||||||
|
* It is not allowed to initialize an already locked mutex.
|
||||||
|
*/
|
||||||
# define mutex_init(mutex) \
|
# define mutex_init(mutex) \
|
||||||
do { \
|
do { \
|
||||||
static struct lock_class_key __key; \
|
static struct lock_class_key __key; \
|
||||||
|
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
|
|||||||
int i, bpno;
|
int i, bpno;
|
||||||
kdb_bp_t *bp, *bp_check;
|
kdb_bp_t *bp, *bp_check;
|
||||||
int diag;
|
int diag;
|
||||||
int free;
|
|
||||||
char *symname = NULL;
|
char *symname = NULL;
|
||||||
long offset = 0ul;
|
long offset = 0ul;
|
||||||
int nextarg;
|
int nextarg;
|
||||||
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
|
|||||||
/*
|
/*
|
||||||
* Find an empty bp structure to allocate
|
* Find an empty bp structure to allocate
|
||||||
*/
|
*/
|
||||||
free = KDB_MAXBPT;
|
|
||||||
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
|
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
|
||||||
if (bp->bp_free)
|
if (bp->bp_free)
|
||||||
break;
|
break;
|
||||||
|
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
|||||||
*/
|
*/
|
||||||
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
||||||
{
|
{
|
||||||
struct hrtimer_clock_base *base;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
ktime_t rem;
|
ktime_t rem;
|
||||||
|
|
||||||
base = lock_hrtimer_base(timer, &flags);
|
lock_hrtimer_base(timer, &flags);
|
||||||
rem = hrtimer_expires_remaining(timer);
|
rem = hrtimer_expires_remaining(timer);
|
||||||
unlock_hrtimer_base(timer, &flags);
|
unlock_hrtimer_base(timer, &flags);
|
||||||
|
|
||||||
|
@@ -36,15 +36,6 @@
|
|||||||
# include <asm/mutex.h>
|
# include <asm/mutex.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/***
|
|
||||||
* mutex_init - initialize the mutex
|
|
||||||
* @lock: the mutex to be initialized
|
|
||||||
* @key: the lock_class_key for the class; used by mutex lock debugging
|
|
||||||
*
|
|
||||||
* Initialize the mutex to unlocked state.
|
|
||||||
*
|
|
||||||
* It is not allowed to initialize an already locked mutex.
|
|
||||||
*/
|
|
||||||
void
|
void
|
||||||
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||||
{
|
{
|
||||||
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
|
|||||||
static __used noinline void __sched
|
static __used noinline void __sched
|
||||||
__mutex_lock_slowpath(atomic_t *lock_count);
|
__mutex_lock_slowpath(atomic_t *lock_count);
|
||||||
|
|
||||||
/***
|
/**
|
||||||
* mutex_lock - acquire the mutex
|
* mutex_lock - acquire the mutex
|
||||||
* @lock: the mutex to be acquired
|
* @lock: the mutex to be acquired
|
||||||
*
|
*
|
||||||
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
|
|||||||
|
|
||||||
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
||||||
|
|
||||||
/***
|
/**
|
||||||
* mutex_unlock - release the mutex
|
* mutex_unlock - release the mutex
|
||||||
* @lock: the mutex to be released
|
* @lock: the mutex to be released
|
||||||
*
|
*
|
||||||
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
|
|||||||
static noinline int __sched
|
static noinline int __sched
|
||||||
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
||||||
|
|
||||||
/***
|
/**
|
||||||
* mutex_lock_interruptible - acquire the mutex, interruptable
|
* mutex_lock_interruptible - acquire the mutex, interruptible
|
||||||
* @lock: the mutex to be acquired
|
* @lock: the mutex to be acquired
|
||||||
*
|
*
|
||||||
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
|
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
|
||||||
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
|||||||
return prev == 1;
|
return prev == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/***
|
/**
|
||||||
* mutex_trylock - try acquire the mutex, without waiting
|
* mutex_trylock - try to acquire the mutex, without waiting
|
||||||
* @lock: the mutex to be acquired
|
* @lock: the mutex to be acquired
|
||||||
*
|
*
|
||||||
* Try to acquire the mutex atomically. Returns 1 if the mutex
|
* Try to acquire the mutex atomically. Returns 1 if the mutex
|
||||||
* has been acquired successfully, and 0 on contention.
|
* has been acquired successfully, and 0 on contention.
|
||||||
*
|
*
|
||||||
* NOTE: this function follows the spin_trylock() convention, so
|
* NOTE: this function follows the spin_trylock() convention, so
|
||||||
* it is negated to the down_trylock() return values! Be careful
|
* it is negated from the down_trylock() return values! Be careful
|
||||||
* about this when converting semaphore users to mutexes.
|
* about this when converting semaphore users to mutexes.
|
||||||
*
|
*
|
||||||
* This function must not be used in interrupt context. The
|
* This function must not be used in interrupt context. The
|
||||||
|
@@ -1313,7 +1313,7 @@ static struct sched_group *
|
|||||||
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||||
int this_cpu, int load_idx)
|
int this_cpu, int load_idx)
|
||||||
{
|
{
|
||||||
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
|
struct sched_group *idlest = NULL, *group = sd->groups;
|
||||||
unsigned long min_load = ULONG_MAX, this_load = 0;
|
unsigned long min_load = ULONG_MAX, this_load = 0;
|
||||||
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
||||||
|
|
||||||
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||||||
|
|
||||||
if (local_group) {
|
if (local_group) {
|
||||||
this_load = avg_load;
|
this_load = avg_load;
|
||||||
this = group;
|
|
||||||
} else if (avg_load < min_load) {
|
} else if (avg_load < min_load) {
|
||||||
min_load = avg_load;
|
min_load = avg_load;
|
||||||
idlest = group;
|
idlest = group;
|
||||||
|
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
|
|||||||
pgid = pid;
|
pgid = pid;
|
||||||
if (pgid < 0)
|
if (pgid < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
/* From this point forward we keep holding onto the tasklist lock
|
/* From this point forward we keep holding onto the tasklist lock
|
||||||
* so that our parent does not change from under us. -DaveM
|
* so that our parent does not change from under us. -DaveM
|
||||||
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
|
|||||||
out:
|
out:
|
||||||
/* All paths lead to here, thus we are safe. -DaveM */
|
/* All paths lead to here, thus we are safe. -DaveM */
|
||||||
write_unlock_irq(&tasklist_lock);
|
write_unlock_irq(&tasklist_lock);
|
||||||
|
rcu_read_unlock();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
|
|||||||
{
|
{
|
||||||
sysctl_set_parent(NULL, root_table);
|
sysctl_set_parent(NULL, root_table);
|
||||||
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
|
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
|
||||||
{
|
sysctl_check_table(current->nsproxy, root_table);
|
||||||
int err;
|
|
||||||
err = sysctl_check_table(current->nsproxy, root_table);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
|
|||||||
|
|
||||||
static void rb_advance_iter(struct ring_buffer_iter *iter)
|
static void rb_advance_iter(struct ring_buffer_iter *iter)
|
||||||
{
|
{
|
||||||
struct ring_buffer *buffer;
|
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
unsigned length;
|
unsigned length;
|
||||||
|
|
||||||
cpu_buffer = iter->cpu_buffer;
|
cpu_buffer = iter->cpu_buffer;
|
||||||
buffer = cpu_buffer->buffer;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if we are at the end of the buffer.
|
* Check if we are at the end of the buffer.
|
||||||
|
Reference in New Issue
Block a user