Commit 6e3f7faf authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

rps: add __rcu annotations

Add __rcu annotations to :
	(struct netdev_rx_queue)->rps_map
	(struct netdev_rx_queue)->rps_flow_table
	struct rps_sock_flow_table *rps_sock_flow_table;

And use appropriate rcu primitives.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f6318e55
...@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, ...@@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
table->ents[hash & table->mask] = RPS_NO_CPU; table->ents[hash & table->mask] = RPS_NO_CPU;
} }
extern struct rps_sock_flow_table *rps_sock_flow_table; extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */ /* This structure contains an instance of an RX queue. */
struct netdev_rx_queue { struct netdev_rx_queue {
struct rps_map *rps_map; struct rps_map __rcu *rps_map;
struct rps_dev_flow_table *rps_flow_table; struct rps_dev_flow_table __rcu *rps_flow_table;
struct kobject kobj; struct kobject kobj;
struct netdev_rx_queue *first; struct netdev_rx_queue *first;
atomic_t count; atomic_t count;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */ #endif /* CONFIG_RPS */
......
...@@ -2413,7 +2413,7 @@ EXPORT_SYMBOL(__skb_get_rxhash); ...@@ -2413,7 +2413,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */ /* One global table that all flow-based protocols share. */
struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table); EXPORT_SYMBOL(rps_sock_flow_table);
/* /*
...@@ -2425,7 +2425,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2425,7 +2425,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp) struct rps_dev_flow **rflowp)
{ {
struct netdev_rx_queue *rxqueue; struct netdev_rx_queue *rxqueue;
struct rps_map *map = NULL; struct rps_map *map;
struct rps_dev_flow_table *flow_table; struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table; struct rps_sock_flow_table *sock_flow_table;
int cpu = -1; int cpu = -1;
...@@ -2444,15 +2444,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -2444,15 +2444,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
} else } else
rxqueue = dev->_rx; rxqueue = dev->_rx;
if (rxqueue->rps_map) { map = rcu_dereference(rxqueue->rps_map);
map = rcu_dereference(rxqueue->rps_map); if (map) {
if (map && map->len == 1) { if (map->len == 1) {
tcpu = map->cpus[0]; tcpu = map->cpus[0];
if (cpu_online(tcpu)) if (cpu_online(tcpu))
cpu = tcpu; cpu = tcpu;
goto done; goto done;
} }
} else if (!rxqueue->rps_flow_table) { } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
goto done; goto done;
} }
......
...@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, ...@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
} }
spin_lock(&rps_map_lock); spin_lock(&rps_map_lock);
old_map = queue->rps_map; old_map = rcu_dereference_protected(queue->rps_map,
lockdep_is_held(&rps_map_lock));
rcu_assign_pointer(queue->rps_map, map); rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock); spin_unlock(&rps_map_lock);
...@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, ...@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
table = NULL; table = NULL;
spin_lock(&rps_dev_flow_lock); spin_lock(&rps_dev_flow_lock);
old_table = queue->rps_flow_table; old_table = rcu_dereference_protected(queue->rps_flow_table,
lockdep_is_held(&rps_dev_flow_lock));
rcu_assign_pointer(queue->rps_flow_table, table); rcu_assign_pointer(queue->rps_flow_table, table);
spin_unlock(&rps_dev_flow_lock); spin_unlock(&rps_dev_flow_lock);
...@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj) ...@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
{ {
struct netdev_rx_queue *queue = to_rx_queue(kobj); struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct netdev_rx_queue *first = queue->first; struct netdev_rx_queue *first = queue->first;
struct rps_map *map;
struct rps_dev_flow_table *flow_table;
if (queue->rps_map)
call_rcu(&queue->rps_map->rcu, rps_map_release);
if (queue->rps_flow_table) map = rcu_dereference_raw(queue->rps_map);
call_rcu(&queue->rps_flow_table->rcu, if (map)
rps_dev_flow_table_release); call_rcu(&map->rcu, rps_map_release);
flow_table = rcu_dereference_raw(queue->rps_flow_table);
if (flow_table)
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
if (atomic_dec_and_test(&first->count)) if (atomic_dec_and_test(&first->count))
kfree(first); kfree(first);
......
...@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, ...@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
mutex_lock(&sock_flow_mutex); mutex_lock(&sock_flow_mutex);
orig_sock_table = rps_sock_flow_table; orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment