Commit fec6e49b authored by Alexander Lobakin's avatar Alexander Lobakin Committed by David S. Miller

skbuff: remove __kfree_skb_flush()

This function isn't much needed as NAPI skb queue gets bulk-freed
anyway when there's no more room, and even may reduce the efficiency
of bulk operations.
It will be even less needed after reusing skb cache on allocation path,
so remove it and this way lighten network softirqs a bit.
Suggested-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarAlexander Lobakin <alobakin@pm.me>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f9d6725b
...@@ -2919,7 +2919,6 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, ...@@ -2919,7 +2919,6 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
} }
void napi_consume_skb(struct sk_buff *skb, int budget); void napi_consume_skb(struct sk_buff *skb, int budget);
void __kfree_skb_flush(void);
void __kfree_skb_defer(struct sk_buff *skb); void __kfree_skb_defer(struct sk_buff *skb);
/** /**
......
...@@ -4944,8 +4944,6 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) ...@@ -4944,8 +4944,6 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
else else
__kfree_skb_defer(skb); __kfree_skb_defer(skb);
} }
__kfree_skb_flush();
} }
if (sd->output_queue) { if (sd->output_queue) {
...@@ -7012,7 +7010,6 @@ static int napi_threaded_poll(void *data) ...@@ -7012,7 +7010,6 @@ static int napi_threaded_poll(void *data)
__napi_poll(napi, &repoll); __napi_poll(napi, &repoll);
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
__kfree_skb_flush();
local_bh_enable(); local_bh_enable();
if (!repoll) if (!repoll)
...@@ -7042,7 +7039,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) ...@@ -7042,7 +7039,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
if (list_empty(&list)) { if (list_empty(&list)) {
if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
goto out; return;
break; break;
} }
...@@ -7069,8 +7066,6 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) ...@@ -7069,8 +7066,6 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
__raise_softirq_irqoff(NET_RX_SOFTIRQ); __raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd); net_rps_action_and_irq_enable(sd);
out:
__kfree_skb_flush();
} }
struct netdev_adjacent { struct netdev_adjacent {
......
...@@ -838,18 +838,6 @@ void __consume_stateless_skb(struct sk_buff *skb) ...@@ -838,18 +838,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
kfree_skbmem(skb); kfree_skbmem(skb);
} }
void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
/* flush skb_cache if containing objects */
if (nc->skb_count) {
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
nc->skb_cache);
nc->skb_count = 0;
}
}
static inline void _kfree_skb_defer(struct sk_buff *skb) static inline void _kfree_skb_defer(struct sk_buff *skb)
{ {
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment