rcu: treewide: Do not use rcu_read_lock_held when calling rcu_dereference_check
Since ca5ecddf
(rcu: define __rcu address space modifier for sparse)
rcu_dereference_check use rcu_read_lock_held as a part of condition
automatically so callers do not have to do that as well.
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
committed by
Jiri Kosina
parent
eb032b9837
commit
d8bf4ca9ca
@@ -539,7 +539,6 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
|
|||||||
*/
|
*/
|
||||||
#define task_subsys_state_check(task, subsys_id, __c) \
|
#define task_subsys_state_check(task, subsys_id, __c) \
|
||||||
rcu_dereference_check(task->cgroups->subsys[subsys_id], \
|
rcu_dereference_check(task->cgroups->subsys[subsys_id], \
|
||||||
rcu_read_lock_held() || \
|
|
||||||
lockdep_is_held(&task->alloc_lock) || \
|
lockdep_is_held(&task->alloc_lock) || \
|
||||||
cgroup_lock_is_held() || (__c))
|
cgroup_lock_is_held() || (__c))
|
||||||
|
|
||||||
|
@@ -284,7 +284,6 @@ static inline void put_cred(const struct cred *_cred)
|
|||||||
({ \
|
({ \
|
||||||
const struct task_struct *__t = (task); \
|
const struct task_struct *__t = (task); \
|
||||||
rcu_dereference_check(__t->real_cred, \
|
rcu_dereference_check(__t->real_cred, \
|
||||||
rcu_read_lock_held() || \
|
|
||||||
task_is_dead(__t)); \
|
task_is_dead(__t)); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@@ -60,7 +60,6 @@ struct files_struct {
|
|||||||
|
|
||||||
#define rcu_dereference_check_fdtable(files, fdtfd) \
|
#define rcu_dereference_check_fdtable(files, fdtfd) \
|
||||||
(rcu_dereference_check((fdtfd), \
|
(rcu_dereference_check((fdtfd), \
|
||||||
rcu_read_lock_held() || \
|
|
||||||
lockdep_is_held(&(files)->file_lock) || \
|
lockdep_is_held(&(files)->file_lock) || \
|
||||||
atomic_read(&(files)->count) == 1 || \
|
atomic_read(&(files)->count) == 1 || \
|
||||||
rcu_my_thread_group_empty()))
|
rcu_my_thread_group_empty()))
|
||||||
|
@@ -758,8 +758,7 @@ extern int lockdep_rtnl_is_held(void);
|
|||||||
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
|
* or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
|
||||||
*/
|
*/
|
||||||
#define rcu_dereference_rtnl(p) \
|
#define rcu_dereference_rtnl(p) \
|
||||||
rcu_dereference_check(p, rcu_read_lock_held() || \
|
rcu_dereference_check(p, lockdep_rtnl_is_held())
|
||||||
lockdep_rtnl_is_held())
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
|
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
|
||||||
|
@@ -1301,8 +1301,7 @@ extern unsigned long sock_i_ino(struct sock *sk);
|
|||||||
static inline struct dst_entry *
|
static inline struct dst_entry *
|
||||||
__sk_dst_get(struct sock *sk)
|
__sk_dst_get(struct sock *sk)
|
||||||
{
|
{
|
||||||
return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
|
return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
|
||||||
sock_owned_by_user(sk) ||
|
|
||||||
lockdep_is_held(&sk->sk_lock.slock));
|
lockdep_is_held(&sk->sk_lock.slock));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1697,7 +1697,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
|
|||||||
{
|
{
|
||||||
char *start;
|
char *start;
|
||||||
struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
|
struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
cgroup_lock_is_held());
|
cgroup_lock_is_held());
|
||||||
|
|
||||||
if (!dentry || cgrp == dummytop) {
|
if (!dentry || cgrp == dummytop) {
|
||||||
@@ -1723,7 +1722,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
dentry = rcu_dereference_check(cgrp->dentry,
|
dentry = rcu_dereference_check(cgrp->dentry,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
cgroup_lock_is_held());
|
cgroup_lock_is_held());
|
||||||
if (!cgrp->parent)
|
if (!cgrp->parent)
|
||||||
continue;
|
continue;
|
||||||
@@ -4813,8 +4811,7 @@ unsigned short css_id(struct cgroup_subsys_state *css)
|
|||||||
* on this or this is under rcu_read_lock(). Once css->id is allocated,
|
* on this or this is under rcu_read_lock(). Once css->id is allocated,
|
||||||
* it's unchanged until freed.
|
* it's unchanged until freed.
|
||||||
*/
|
*/
|
||||||
cssid = rcu_dereference_check(css->id,
|
cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt));
|
||||||
rcu_read_lock_held() || atomic_read(&css->refcnt));
|
|
||||||
|
|
||||||
if (cssid)
|
if (cssid)
|
||||||
return cssid->id;
|
return cssid->id;
|
||||||
@@ -4826,8 +4823,7 @@ unsigned short css_depth(struct cgroup_subsys_state *css)
|
|||||||
{
|
{
|
||||||
struct css_id *cssid;
|
struct css_id *cssid;
|
||||||
|
|
||||||
cssid = rcu_dereference_check(css->id,
|
cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt));
|
||||||
rcu_read_lock_held() || atomic_read(&css->refcnt));
|
|
||||||
|
|
||||||
if (cssid)
|
if (cssid)
|
||||||
return cssid->depth;
|
return cssid->depth;
|
||||||
|
@@ -85,7 +85,6 @@ static void __exit_signal(struct task_struct *tsk)
|
|||||||
struct tty_struct *uninitialized_var(tty);
|
struct tty_struct *uninitialized_var(tty);
|
||||||
|
|
||||||
sighand = rcu_dereference_check(tsk->sighand,
|
sighand = rcu_dereference_check(tsk->sighand,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_tasklist_lock_is_held());
|
lockdep_tasklist_lock_is_held());
|
||||||
spin_lock(&sighand->siglock);
|
spin_lock(&sighand->siglock);
|
||||||
|
|
||||||
|
@@ -405,7 +405,6 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
|
|||||||
if (pid) {
|
if (pid) {
|
||||||
struct hlist_node *first;
|
struct hlist_node *first;
|
||||||
first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
|
first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_tasklist_lock_is_held());
|
lockdep_tasklist_lock_is_held());
|
||||||
if (first)
|
if (first)
|
||||||
result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
||||||
|
@@ -941,7 +941,6 @@ static void rcu_torture_timer(unsigned long unused)
|
|||||||
idx = cur_ops->readlock();
|
idx = cur_ops->readlock();
|
||||||
completed = cur_ops->completed();
|
completed = cur_ops->completed();
|
||||||
p = rcu_dereference_check(rcu_torture_current,
|
p = rcu_dereference_check(rcu_torture_current,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
rcu_read_lock_bh_held() ||
|
rcu_read_lock_bh_held() ||
|
||||||
rcu_read_lock_sched_held() ||
|
rcu_read_lock_sched_held() ||
|
||||||
srcu_read_lock_held(&srcu_ctl));
|
srcu_read_lock_held(&srcu_ctl));
|
||||||
@@ -1002,7 +1001,6 @@ rcu_torture_reader(void *arg)
|
|||||||
idx = cur_ops->readlock();
|
idx = cur_ops->readlock();
|
||||||
completed = cur_ops->completed();
|
completed = cur_ops->completed();
|
||||||
p = rcu_dereference_check(rcu_torture_current,
|
p = rcu_dereference_check(rcu_torture_current,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
rcu_read_lock_bh_held() ||
|
rcu_read_lock_bh_held() ||
|
||||||
rcu_read_lock_sched_held() ||
|
rcu_read_lock_sched_held() ||
|
||||||
srcu_read_lock_held(&srcu_ctl));
|
srcu_read_lock_held(&srcu_ctl));
|
||||||
|
@@ -581,7 +581,6 @@ static inline int cpu_of(struct rq *rq)
|
|||||||
|
|
||||||
#define rcu_dereference_check_sched_domain(p) \
|
#define rcu_dereference_check_sched_domain(p) \
|
||||||
rcu_dereference_check((p), \
|
rcu_dereference_check((p), \
|
||||||
rcu_read_lock_held() || \
|
|
||||||
lockdep_is_held(&sched_domains_mutex))
|
lockdep_is_held(&sched_domains_mutex))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -97,7 +97,6 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
|
|||||||
struct sta_info *sta;
|
struct sta_info *sta;
|
||||||
|
|
||||||
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
|
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_is_held(&local->sta_lock) ||
|
lockdep_is_held(&local->sta_lock) ||
|
||||||
lockdep_is_held(&local->sta_mtx));
|
lockdep_is_held(&local->sta_mtx));
|
||||||
while (sta) {
|
while (sta) {
|
||||||
@@ -105,7 +104,6 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
|
|||||||
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
|
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
|
||||||
break;
|
break;
|
||||||
sta = rcu_dereference_check(sta->hnext,
|
sta = rcu_dereference_check(sta->hnext,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_is_held(&local->sta_lock) ||
|
lockdep_is_held(&local->sta_lock) ||
|
||||||
lockdep_is_held(&local->sta_mtx));
|
lockdep_is_held(&local->sta_mtx));
|
||||||
}
|
}
|
||||||
@@ -123,7 +121,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
|
|||||||
struct sta_info *sta;
|
struct sta_info *sta;
|
||||||
|
|
||||||
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
|
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_is_held(&local->sta_lock) ||
|
lockdep_is_held(&local->sta_lock) ||
|
||||||
lockdep_is_held(&local->sta_mtx));
|
lockdep_is_held(&local->sta_mtx));
|
||||||
while (sta) {
|
while (sta) {
|
||||||
@@ -132,7 +129,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
|
|||||||
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
|
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
|
||||||
break;
|
break;
|
||||||
sta = rcu_dereference_check(sta->hnext,
|
sta = rcu_dereference_check(sta->hnext,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
lockdep_is_held(&local->sta_lock) ||
|
lockdep_is_held(&local->sta_lock) ||
|
||||||
lockdep_is_held(&local->sta_mtx));
|
lockdep_is_held(&local->sta_mtx));
|
||||||
}
|
}
|
||||||
|
@@ -55,8 +55,7 @@ struct netlbl_domhsh_tbl {
|
|||||||
* should be okay */
|
* should be okay */
|
||||||
static DEFINE_SPINLOCK(netlbl_domhsh_lock);
|
static DEFINE_SPINLOCK(netlbl_domhsh_lock);
|
||||||
#define netlbl_domhsh_rcu_deref(p) \
|
#define netlbl_domhsh_rcu_deref(p) \
|
||||||
rcu_dereference_check(p, rcu_read_lock_held() || \
|
rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock))
|
||||||
lockdep_is_held(&netlbl_domhsh_lock))
|
|
||||||
static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
|
static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
|
||||||
static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
|
static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
|
||||||
|
|
||||||
|
@@ -116,8 +116,7 @@ struct netlbl_unlhsh_walk_arg {
|
|||||||
* hash table should be okay */
|
* hash table should be okay */
|
||||||
static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
|
static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
|
||||||
#define netlbl_unlhsh_rcu_deref(p) \
|
#define netlbl_unlhsh_rcu_deref(p) \
|
||||||
rcu_dereference_check(p, rcu_read_lock_held() || \
|
rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock))
|
||||||
lockdep_is_held(&netlbl_unlhsh_lock))
|
|
||||||
static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
|
static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
|
||||||
static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
|
static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
|
||||||
|
|
||||||
|
@@ -155,7 +155,6 @@ static void keyring_destroy(struct key *keyring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
klist = rcu_dereference_check(keyring->payload.subscriptions,
|
klist = rcu_dereference_check(keyring->payload.subscriptions,
|
||||||
rcu_read_lock_held() ||
|
|
||||||
atomic_read(&keyring->usage) == 0);
|
atomic_read(&keyring->usage) == 0);
|
||||||
if (klist) {
|
if (klist) {
|
||||||
for (loop = klist->nkeys - 1; loop >= 0; loop--)
|
for (loop = klist->nkeys - 1; loop >= 0; loop--)
|
||||||
|
Reference in New Issue
Block a user