Commit 508c461c authored by David S. Miller's avatar David S. Miller

Merge branch 'net-Update-static-keys-to-modern-api'

Davidlohr Bueso says:

====================
net: Update static keys to modern api

The following patches update pretty much all core net static key users
to the modern api. Changes are mostly trivial conversion without affecting
any semantics. The motivation is a resend of patches 1 and 2 from a while[1]
back, and the rest are added patches, specific for -net.

Applies against today's linux-next. Compile tested only.

[1] lkml.kernel.org/r/20180326210929.5244-1-dave@stgolabs.net
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5bafeb6e 88ab3108
......@@ -477,12 +477,12 @@ static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstat
return (struct ip_tunnel_info *)lwtstate->data;
}
extern struct static_key ip_tunnel_metadata_cnt;
DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
/* Returns > 0 if metadata should be collected */
static inline int ip_tunnel_collect_metadata(void)
{
return static_key_false(&ip_tunnel_metadata_cnt);
return static_branch_unlikely(&ip_tunnel_metadata_cnt);
}
void __init ip_tunnel_core_init(void);
......
......@@ -808,10 +808,10 @@ static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
}
#ifdef CONFIG_NET
extern struct static_key memalloc_socks;
DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
static inline int sk_memalloc_socks(void)
{
return static_key_false(&memalloc_socks);
return static_branch_unlikely(&memalloc_socks_key);
}
#else
......
......@@ -1755,38 +1755,38 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
EXPORT_SYMBOL(call_netdevice_notifiers);
#ifdef CONFIG_NET_INGRESS
static struct static_key ingress_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
void net_inc_ingress_queue(void)
{
static_key_slow_inc(&ingress_needed);
static_branch_inc(&ingress_needed_key);
}
EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
void net_dec_ingress_queue(void)
{
static_key_slow_dec(&ingress_needed);
static_branch_dec(&ingress_needed_key);
}
EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
#endif
#ifdef CONFIG_NET_EGRESS
static struct static_key egress_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
void net_inc_egress_queue(void)
{
static_key_slow_inc(&egress_needed);
static_branch_inc(&egress_needed_key);
}
EXPORT_SYMBOL_GPL(net_inc_egress_queue);
void net_dec_egress_queue(void)
{
static_key_slow_dec(&egress_needed);
static_branch_dec(&egress_needed_key);
}
EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
static struct static_key netstamp_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
#ifdef HAVE_JUMP_LABEL
static atomic_t netstamp_needed_deferred;
static atomic_t netstamp_wanted;
......@@ -1797,9 +1797,9 @@ static void netstamp_clear(struct work_struct *work)
wanted = atomic_add_return(deferred, &netstamp_wanted);
if (wanted > 0)
static_key_enable(&netstamp_needed);
static_branch_enable(&netstamp_needed_key);
else
static_key_disable(&netstamp_needed);
static_branch_disable(&netstamp_needed_key);
}
static DECLARE_WORK(netstamp_work, netstamp_clear);
#endif
......@@ -1819,7 +1819,7 @@ void net_enable_timestamp(void)
atomic_inc(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_key_slow_inc(&netstamp_needed);
static_branch_inc(&netstamp_needed_key);
#endif
}
EXPORT_SYMBOL(net_enable_timestamp);
......@@ -1839,7 +1839,7 @@ void net_disable_timestamp(void)
atomic_dec(&netstamp_needed_deferred);
schedule_work(&netstamp_work);
#else
static_key_slow_dec(&netstamp_needed);
static_branch_dec(&netstamp_needed_key);
#endif
}
EXPORT_SYMBOL(net_disable_timestamp);
......@@ -1847,15 +1847,15 @@ EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
skb->tstamp = 0;
if (static_key_false(&netstamp_needed))
if (static_branch_unlikely(&netstamp_needed_key))
__net_timestamp(skb);
}
#define net_timestamp_check(COND, SKB) \
if (static_key_false(&netstamp_needed)) { \
if ((COND) && !(SKB)->tstamp) \
__net_timestamp(SKB); \
} \
#define net_timestamp_check(COND, SKB) \
if (static_branch_unlikely(&netstamp_needed_key)) { \
if ((COND) && !(SKB)->tstamp) \
__net_timestamp(SKB); \
} \
bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
{
......@@ -3532,7 +3532,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
#ifdef CONFIG_NET_CLS_ACT
skb->tc_at_ingress = 0;
# ifdef CONFIG_NET_EGRESS
if (static_key_false(&egress_needed)) {
if (static_branch_unlikely(&egress_needed_key)) {
skb = sch_handle_egress(skb, &rc, dev);
if (!skb)
goto out;
......@@ -4154,7 +4154,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
}
EXPORT_SYMBOL_GPL(generic_xdp_tx);
static struct static_key generic_xdp_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
{
......@@ -4194,7 +4194,7 @@ static int netif_rx_internal(struct sk_buff *skb)
trace_netif_rx(skb);
if (static_key_false(&generic_xdp_needed)) {
if (static_branch_unlikely(&generic_xdp_needed_key)) {
int ret;
preempt_disable();
......@@ -4566,7 +4566,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
skip_taps:
#ifdef CONFIG_NET_INGRESS
if (static_key_false(&ingress_needed)) {
if (static_branch_unlikely(&ingress_needed_key)) {
skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
......@@ -4726,9 +4726,9 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
bpf_prog_put(old);
if (old && !new) {
static_key_slow_dec(&generic_xdp_needed);
static_branch_dec(&generic_xdp_needed_key);
} else if (new && !old) {
static_key_slow_inc(&generic_xdp_needed);
static_branch_inc(&generic_xdp_needed_key);
dev_disable_lro(dev);
dev_disable_gro_hw(dev);
}
......@@ -4756,7 +4756,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
if (static_key_false(&generic_xdp_needed)) {
if (static_branch_unlikely(&generic_xdp_needed_key)) {
int ret;
preempt_disable();
......
......@@ -327,8 +327,8 @@ EXPORT_SYMBOL(sysctl_optmem_max);
int sysctl_tstamp_allow_data __read_mostly = 1;
struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
EXPORT_SYMBOL_GPL(memalloc_socks);
DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
EXPORT_SYMBOL_GPL(memalloc_socks_key);
/**
* sk_set_memalloc - sets %SOCK_MEMALLOC
......@@ -342,7 +342,7 @@ void sk_set_memalloc(struct sock *sk)
{
sock_set_flag(sk, SOCK_MEMALLOC);
sk->sk_allocation |= __GFP_MEMALLOC;
static_key_slow_inc(&memalloc_socks);
static_branch_inc(&memalloc_socks_key);
}
EXPORT_SYMBOL_GPL(sk_set_memalloc);
......@@ -350,7 +350,7 @@ void sk_clear_memalloc(struct sock *sk)
{
sock_reset_flag(sk, SOCK_MEMALLOC);
sk->sk_allocation &= ~__GFP_MEMALLOC;
static_key_slow_dec(&memalloc_socks);
static_branch_dec(&memalloc_socks_key);
/*
* SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
......
......@@ -423,17 +423,17 @@ void __init ip_tunnel_core_init(void)
lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
}
struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
void ip_tunnel_need_metadata(void)
{
static_key_slow_inc(&ip_tunnel_metadata_cnt);
static_branch_inc(&ip_tunnel_metadata_cnt);
}
EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
void ip_tunnel_unneed_metadata(void)
{
static_key_slow_dec(&ip_tunnel_metadata_cnt);
static_branch_dec(&ip_tunnel_metadata_cnt);
}
EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
......@@ -1875,10 +1875,10 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
}
static struct static_key udp_encap_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void)
{
static_key_enable(&udp_encap_needed);
static_branch_enable(&udp_encap_needed_key);
}
EXPORT_SYMBOL(udp_encap_enable);
......@@ -1902,7 +1902,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
nf_reset(skb);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
......@@ -2365,7 +2365,7 @@ void udp_destroy_sock(struct sock *sk)
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
......
......@@ -546,10 +546,10 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
static DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void)
{
static_key_enable(&udpv6_encap_needed);
static_branch_enable(&udpv6_encap_needed_key);
}
EXPORT_SYMBOL(udpv6_encap_enable);
......@@ -561,7 +561,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
......@@ -1427,7 +1427,7 @@ void udpv6_destroy_sock(struct sock *sk)
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment