Commit dc05360f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: convert rps_needed and rfs_needed to new static branch api

We prefer static_branch_unlikely() over static_key_false() these days.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Acked-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7c1508e5
...@@ -1042,7 +1042,7 @@ static int tun_net_close(struct net_device *dev) ...@@ -1042,7 +1042,7 @@ static int tun_net_close(struct net_device *dev)
static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (tun->numqueues == 1 && static_key_false(&rps_needed)) { if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the /* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here. * RPS hash and save it into the flow_table here.
*/ */
......
...@@ -194,8 +194,8 @@ struct net_device_stats { ...@@ -194,8 +194,8 @@ struct net_device_stats {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
#include <linux/static_key.h> #include <linux/static_key.h>
extern struct static_key rps_needed; extern struct static_key_false rps_needed;
extern struct static_key rfs_needed; extern struct static_key_false rfs_needed;
#endif #endif
struct neighbour; struct neighbour;
......
...@@ -966,7 +966,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash) ...@@ -966,7 +966,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
static inline void sock_rps_record_flow(const struct sock *sk) static inline void sock_rps_record_flow(const struct sock *sk)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (static_key_false(&rfs_needed)) { if (static_branch_unlikely(&rfs_needed)) {
/* Reading sk->sk_rxhash might incur an expensive cache line /* Reading sk->sk_rxhash might incur an expensive cache line
* miss. * miss.
* *
......
...@@ -3982,9 +3982,9 @@ EXPORT_SYMBOL(rps_sock_flow_table); ...@@ -3982,9 +3982,9 @@ EXPORT_SYMBOL(rps_sock_flow_table);
u32 rps_cpu_mask __read_mostly; u32 rps_cpu_mask __read_mostly;
EXPORT_SYMBOL(rps_cpu_mask); EXPORT_SYMBOL(rps_cpu_mask);
struct static_key rps_needed __read_mostly; struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed); EXPORT_SYMBOL(rps_needed);
struct static_key rfs_needed __read_mostly; struct static_key_false rfs_needed __read_mostly;
EXPORT_SYMBOL(rfs_needed); EXPORT_SYMBOL(rfs_needed);
static struct rps_dev_flow * static struct rps_dev_flow *
...@@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_buff *skb) ...@@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_buff *skb)
} }
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) { if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow; struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu; int cpu;
...@@ -5179,7 +5179,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) ...@@ -5179,7 +5179,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) { if (static_branch_unlikely(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow; struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow); int cpu = get_rps_cpu(skb->dev, skb, &rflow);
...@@ -5227,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) ...@@ -5227,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock(); rcu_read_lock();
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (static_key_false(&rps_needed)) { if (static_branch_unlikely(&rps_needed)) {
list_for_each_entry_safe(skb, next, head, list) { list_for_each_entry_safe(skb, next, head, list) {
struct rps_dev_flow voidflow, *rflow = &voidflow; struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu = get_rps_cpu(skb->dev, skb, &rflow); int cpu = get_rps_cpu(skb->dev, skb, &rflow);
......
...@@ -754,9 +754,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, ...@@ -754,9 +754,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
rcu_assign_pointer(queue->rps_map, map); rcu_assign_pointer(queue->rps_map, map);
if (map) if (map)
static_key_slow_inc(&rps_needed); static_branch_inc(&rps_needed);
if (old_map) if (old_map)
static_key_slow_dec(&rps_needed); static_branch_dec(&rps_needed);
mutex_unlock(&rps_map_mutex); mutex_unlock(&rps_map_mutex);
......
...@@ -95,12 +95,12 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, ...@@ -95,12 +95,12 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
if (sock_table != orig_sock_table) { if (sock_table != orig_sock_table) {
rcu_assign_pointer(rps_sock_flow_table, sock_table); rcu_assign_pointer(rps_sock_flow_table, sock_table);
if (sock_table) { if (sock_table) {
static_key_slow_inc(&rps_needed); static_branch_inc(&rps_needed);
static_key_slow_inc(&rfs_needed); static_branch_inc(&rfs_needed);
} }
if (orig_sock_table) { if (orig_sock_table) {
static_key_slow_dec(&rps_needed); static_branch_dec(&rps_needed);
static_key_slow_dec(&rfs_needed); static_branch_dec(&rfs_needed);
synchronize_rcu(); synchronize_rcu();
vfree(orig_sock_table); vfree(orig_sock_table);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment