xps: add __rcu annotations

Avoid sparse warnings : add __rcu annotations and use
rcu_dereference_protected() where necessary.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-11-28 21:43:02 +00:00 committed by David S. Miller
parent b02038a17b
commit a417786948
2 changed files with 17 additions and 11 deletions

View File

@ -622,7 +622,7 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
struct xps_map *cpu_map[0];
struct xps_map __rcu *cpu_map[0];
};
#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * sizeof(struct xps_map *)))
@ -1049,7 +1049,7 @@ struct net_device {
spinlock_t tx_global_lock;
#ifdef CONFIG_XPS
struct xps_dev_maps *xps_maps;
struct xps_dev_maps __rcu *xps_maps;
#endif
/* These may be needed for future network-power-down code. */

View File

@ -899,6 +899,8 @@ static void xps_dev_maps_release(struct rcu_head *rcu)
}
static DEFINE_MUTEX(xps_map_mutex);
#define xmap_dereference(P) \
rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
static ssize_t store_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute,
@ -935,11 +937,12 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
mutex_lock(&xps_map_mutex);
dev_maps = dev->xps_maps;
dev_maps = xmap_dereference(dev->xps_maps);
for_each_possible_cpu(cpu) {
new_map = map = dev_maps ? dev_maps->cpu_map[cpu] : NULL;
map = dev_maps ?
xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
new_map = map;
if (map) {
for (pos = 0; pos < map->len; pos++)
if (map->queues[pos] == index)
@ -975,13 +978,14 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
else
new_map = NULL;
}
new_dev_maps->cpu_map[cpu] = new_map;
RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
}
/* Cleanup old maps */
for_each_possible_cpu(cpu) {
map = dev_maps ? dev_maps->cpu_map[cpu] : NULL;
if (map && new_dev_maps->cpu_map[cpu] != map)
map = dev_maps ?
xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
call_rcu(&map->rcu, xps_map_release);
if (new_dev_maps->cpu_map[cpu])
nonempty = 1;
@ -1007,7 +1011,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
if (new_dev_maps)
for_each_possible_cpu(i)
kfree(new_dev_maps->cpu_map[i]);
kfree(rcu_dereference_protected(
new_dev_maps->cpu_map[i],
1));
kfree(new_dev_maps);
free_cpumask_var(mask);
return -ENOMEM;
@ -1033,11 +1039,11 @@ static void netdev_queue_release(struct kobject *kobj)
index = get_netdev_queue_index(queue);
mutex_lock(&xps_map_mutex);
dev_maps = dev->xps_maps;
dev_maps = xmap_dereference(dev->xps_maps);
if (dev_maps) {
for_each_possible_cpu(i) {
map = dev_maps->cpu_map[i];
map = xmap_dereference(dev_maps->cpu_map[i]);
if (!map)
continue;