Commit 0693e88e authored by Michał Mirosław's avatar Michał Mirosław Committed by David S. Miller

net: bonding: factor out rlock(bond->lock) in xmit path

Pull read_lock(&bond->lock) and BOND_IS_OK() to bond_start_xmit() from
mode-dependent xmit functions.

netif_running() is always true in hard_start_xmit.
Signed-off-by: default avatarMichał Mirosław <mirq-linux@rere.qmqm.pl>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 48752e1b
...@@ -2403,14 +2403,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) ...@@ -2403,14 +2403,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct ad_info ad_info; struct ad_info ad_info;
int res = 1; int res = 1;
/* make sure that the slaves list will
* not change during tx
*/
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
if (bond_3ad_get_active_agg_info(bond, &ad_info)) { if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
dev->name); dev->name);
...@@ -2464,7 +2456,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) ...@@ -2464,7 +2456,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -1225,16 +1225,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1225,16 +1225,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
eth_data = eth_hdr(skb); eth_data = eth_hdr(skb);
/* make sure that the curr_active_slave and the slaves list do /* make sure that the curr_active_slave do not change during tx
* not change during tx
*/ */
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
if (!BOND_IS_OK(bond)) {
goto out;
}
switch (ntohs(skb->protocol)) { switch (ntohs(skb->protocol)) {
case ETH_P_IP: { case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb); const struct iphdr *iph = ip_hdr(skb);
...@@ -1334,13 +1328,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -1334,13 +1328,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
} }
} }
out:
if (res) { if (res) {
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
read_unlock(&bond->curr_slave_lock); read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -4004,10 +4004,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev ...@@ -4004,10 +4004,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
int i, slave_no, res = 1; int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
/* /*
* Start with the curr_active_slave that joined the bond as the * Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one * default for sending IGMP traffic. For failover purposes one
...@@ -4054,7 +4050,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev ...@@ -4054,7 +4050,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -4068,24 +4064,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d ...@@ -4068,24 +4064,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
int res = 1; int res = 1;
read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
if (!BOND_IS_OK(bond)) if (bond->curr_active_slave)
goto out; res = bond_dev_queue_xmit(bond, skb,
bond->curr_active_slave->dev);
if (!bond->curr_active_slave)
goto out;
res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
out:
if (res) if (res)
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
dev_kfree_skb(skb); dev_kfree_skb(skb);
read_unlock(&bond->curr_slave_lock); read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -4102,11 +4092,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -4102,11 +4092,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
int i; int i;
int res = 1; int res = 1;
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
bond_for_each_slave(bond, slave, i) { bond_for_each_slave(bond, slave, i) {
...@@ -4126,12 +4111,11 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -4126,12 +4111,11 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
} }
} }
out:
if (res) { if (res) {
/* no suitable interface, frame not sent */ /* no suitable interface, frame not sent */
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -4146,11 +4130,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -4146,11 +4130,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
int i; int i;
int res = 1; int res = 1;
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
read_lock(&bond->curr_slave_lock); read_lock(&bond->curr_slave_lock);
start_at = bond->curr_active_slave; start_at = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock); read_unlock(&bond->curr_slave_lock);
...@@ -4189,7 +4168,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) ...@@ -4189,7 +4168,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
dev_kfree_skb(skb); dev_kfree_skb(skb);
/* frame sent to all suitable interfaces */ /* frame sent to all suitable interfaces */
read_unlock(&bond->lock);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -4221,10 +4199,8 @@ static inline int bond_slave_override(struct bonding *bond, ...@@ -4221,10 +4199,8 @@ static inline int bond_slave_override(struct bonding *bond,
struct slave *slave = NULL; struct slave *slave = NULL;
struct slave *check_slave; struct slave *check_slave;
read_lock(&bond->lock); if (!skb->queue_mapping)
return 1;
if (!BOND_IS_OK(bond) || !skb->queue_mapping)
goto out;
/* Find out if any slaves have the same mapping as this skb. */ /* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave(bond, check_slave, i) { bond_for_each_slave(bond, check_slave, i) {
...@@ -4240,8 +4216,6 @@ static inline int bond_slave_override(struct bonding *bond, ...@@ -4240,8 +4216,6 @@ static inline int bond_slave_override(struct bonding *bond,
res = bond_dev_queue_xmit(bond, skb, slave->dev); res = bond_dev_queue_xmit(bond, skb, slave->dev);
} }
out:
read_unlock(&bond->lock);
return res; return res;
} }
...@@ -4263,17 +4237,10 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) ...@@ -4263,17 +4237,10 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
return txq; return txq;
} }
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct bonding *bond = netdev_priv(dev); struct bonding *bond = netdev_priv(dev);
/*
* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
if (is_netpoll_tx_blocked(dev))
return NETDEV_TX_BUSY;
if (TX_QUEUE_OVERRIDE(bond->params.mode)) { if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
if (!bond_slave_override(bond, skb)) if (!bond_slave_override(bond, skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -4303,6 +4270,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -4303,6 +4270,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
netdev_tx_t ret = NETDEV_TX_OK;
/*
* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
if (is_netpoll_tx_blocked(dev))
return NETDEV_TX_BUSY;
read_lock(&bond->lock);
if (bond->slave_cnt)
ret = __bond_start_xmit(skb, dev);
else
dev_kfree_skb(skb);
read_unlock(&bond->lock);
return ret;
}
/* /*
* set bond mode specific net device operations * set bond mode specific net device operations
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment