Commit 7c169445 authored by David S. Miller's avatar David S. Miller

Merge branch 'netpoll-next'

Eric W. Biederman says:

====================
netpoll: Cleanup received packet processing

This is the long-winded, careful, and polite version of removing the netpoll
receive packet processing.

First I untangle the code in small steps.  Then I modify the code to not
force reception and dropping of packets when we are transmiting a packet
with netpoll.  Finally I move all of the packet reception under
CONFIG_NETPOLL_TRAP and delete CONFIG_NETPOLL_TRAP.

If someone wants to do a stable backport of these patches, it would
require backporting the first 18 patches that handle the budget == 0 in
the networking drivers, and the first 6 of these patches.

If anyone wants to resurrect netpoll packet reception someday it should
just be a matter of reverting the last patch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e86e180b 9c62a68d
......@@ -177,11 +177,6 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
config NETPOLL_TRAP
bool "Netpoll traffic trapping"
default n
depends on NETPOLL
config NET_POLL_CONTROLLER
def_bool NETPOLL
......
......@@ -1979,9 +1979,6 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
int netpoll_trap(void);
#endif
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
......@@ -2186,12 +2183,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) {
netif_tx_start_queue(dev_queue);
return;
}
#endif
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
......@@ -2435,10 +2426,6 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
netif_tx_stop_queue(txq);
}
......@@ -2473,10 +2460,6 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
......
......@@ -24,27 +24,20 @@ struct netpoll {
struct net_device *dev;
char dev_name[IFNAMSIZ];
const char *name;
void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
int offset, int len);
union inet_addr local_ip, remote_ip;
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
struct list_head rx; /* rx_np list element */
struct work_struct cleanup_work;
};
struct netpoll_info {
atomic_t refcnt;
unsigned long rx_flags;
spinlock_t rx_lock;
struct semaphore dev_lock;
struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
struct sk_buff_head txq;
struct delayed_work tx_work;
......@@ -66,12 +59,9 @@ void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free_async(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
......@@ -82,46 +72,7 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
local_irq_restore(flags);
}
#ifdef CONFIG_NETPOLL
static inline bool netpoll_rx_on(struct sk_buff *skb)
{
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
}
static inline bool netpoll_rx(struct sk_buff *skb)
{
struct netpoll_info *npinfo;
unsigned long flags;
bool ret = false;
local_irq_save(flags);
if (!netpoll_rx_on(skb))
goto out;
npinfo = rcu_dereference_bh(skb->dev->npinfo);
spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
ret = true;
spin_unlock(&npinfo->rx_lock);
out:
local_irq_restore(flags);
return ret;
}
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
if (!list_empty(&skb->dev->napi_list))
return netpoll_rx(skb);
return 0;
}
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
......@@ -150,18 +101,6 @@ static inline bool netpoll_tx_running(struct net_device *dev)
}
#else
static inline bool netpoll_rx(struct sk_buff *skb)
{
return false;
}
static inline bool netpoll_rx_on(struct sk_buff *skb)
{
return false;
}
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
return 0;
}
static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
return NULL;
......
......@@ -3231,10 +3231,6 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb);
......@@ -3520,10 +3516,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
trace_netif_receive_skb(skb);
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
goto out;
orig_dev = skb->dev;
skb_reset_network_header(skb);
......@@ -3650,7 +3642,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
unlock:
rcu_read_unlock();
out:
return ret;
}
......@@ -3875,7 +3866,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
int same_flow;
enum gro_result ret;
if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
if (skb_is_gso(skb) || skb_has_frag_list(skb))
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment