Commit 9754e293 authored by David S. Miller's avatar David S. Miller

net: Don't write to current task flags on every packet received.

Even for non-pfmalloc SKBs, __netif_receive_skb() will do a
tsk_restore_flags() on current unconditionally.

Make __netif_receive_skb() a shim around the existing code, renamed to
__netif_receive_skb_core().  Let __netif_receive_skb() wrap the
__netif_receive_skb_core() call with the task flag modifications, if
necessary.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba779711
...@@ -3457,7 +3457,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb) ...@@ -3457,7 +3457,7 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
} }
} }
static int __netif_receive_skb(struct sk_buff *skb) static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
{ {
struct packet_type *ptype, *pt_prev; struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler; rx_handler_func_t *rx_handler;
...@@ -3466,24 +3466,11 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -3466,24 +3466,11 @@ static int __netif_receive_skb(struct sk_buff *skb)
bool deliver_exact = false; bool deliver_exact = false;
int ret = NET_RX_DROP; int ret = NET_RX_DROP;
__be16 type; __be16 type;
unsigned long pflags = current->flags;
net_timestamp_check(!netdev_tstamp_prequeue, skb); net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb); trace_netif_receive_skb(skb);
/*
* PFMEMALLOC skbs are special, they should
* - be delivered to SOCK_MEMALLOC sockets only
* - stay away from userspace
* - have bounded memory usage
*
* Use PF_MEMALLOC as this saves us from propagating the allocation
* context down to all allocation sites.
*/
if (sk_memalloc_socks() && skb_pfmemalloc(skb))
current->flags |= PF_MEMALLOC;
/* if we've gotten here through NAPI, check netpoll */ /* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb)) if (netpoll_receive_skb(skb))
goto out; goto out;
...@@ -3517,7 +3504,7 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -3517,7 +3504,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
} }
#endif #endif
if (sk_memalloc_socks() && skb_pfmemalloc(skb)) if (pfmemalloc)
goto skip_taps; goto skip_taps;
list_for_each_entry_rcu(ptype, &ptype_all, list) { list_for_each_entry_rcu(ptype, &ptype_all, list) {
...@@ -3536,8 +3523,7 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -3536,8 +3523,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
ncls: ncls:
#endif #endif
if (sk_memalloc_socks() && skb_pfmemalloc(skb) if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
&& !skb_pfmemalloc_protocol(skb))
goto drop; goto drop;
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
...@@ -3607,7 +3593,31 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -3607,7 +3593,31 @@ static int __netif_receive_skb(struct sk_buff *skb)
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
out: out:
tsk_restore_flags(current, pflags, PF_MEMALLOC); return ret;
}
static int __netif_receive_skb(struct sk_buff *skb)
{
int ret;
if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
unsigned long pflags = current->flags;
/*
* PFMEMALLOC skbs are special, they should
* - be delivered to SOCK_MEMALLOC sockets only
* - stay away from userspace
* - have bounded memory usage
*
* Use PF_MEMALLOC as this saves us from propagating the allocation
* context down to all allocation sites.
*/
current->flags |= PF_MEMALLOC;
ret = __netif_receive_skb_core(skb, true);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
} else
ret = __netif_receive_skb_core(skb, false);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment