Commit 8801d48c authored by David S. Miller's avatar David S. Miller

Merge branch 'bonding-next'

Nikolay Aleksandrov says:

====================
bonding: get rid of curr_slave_lock

This is the second patch-set dealing with bond locking and the purpose here
is to convert curr_slave_lock into a spinlock called "mode_lock" which can
be used in the various modes for their specific needs. The first three
patches cleanup the use of curr_slave_lock and prepare it for the
conversion which is done in patch 4 and then the modes that were using
their own locks are converted to use the new "mode_lock" giving us the
opportunity to remove their locks.
This patch-set has been tested in each mode by running enslave/release of
slaves in parallel with traffic transmission and miimon=1 i.e. running
all the time. In fact this lead to the discovery of a subtle bug related to
RCU which will be fixed in -net.
Also did an allmodconfig test just in case :-)

v2: fix bond_3ad_state_machine_handler's use of mode_lock and
    curr_slave_lock
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b25bd251 8c0bc550
......@@ -233,24 +233,6 @@ static inline int __check_agg_selection_timer(struct port *port)
return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
}
/**
* __get_state_machine_lock - lock the port's state machines
* @port: the port we're looking at
*/
static inline void __get_state_machine_lock(struct port *port)
{
spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
* __release_state_machine_lock - unlock the port's state machines
* @port: the port we're looking at
*/
static inline void __release_state_machine_lock(struct port *port)
{
spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
* __get_link_speed - get a port's speed
* @port: the port we're looking at
......@@ -341,16 +323,6 @@ static u8 __get_duplex(struct port *port)
return retval;
}
/**
* __initialize_port_locks - initialize a port's STATE machine spinlock
* @port: the slave of the port we're looking at
*/
static inline void __initialize_port_locks(struct slave *slave)
{
/* make sure it isn't called twice */
spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
}
/* Conversions */
/**
......@@ -1843,7 +1815,6 @@ void bond_3ad_bind_slave(struct slave *slave)
ad_initialize_port(port, bond->params.lacp_fast);
__initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
/* key is determined according to the link speed, duplex and user key(which
......@@ -1899,6 +1870,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct slave *slave_iter;
struct list_head *iter;
/* Sync against bond_3ad_state_machine_handler() */
spin_lock_bh(&bond->mode_lock);
aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
port = &(SLAVE_AD_INFO(slave)->port);
......@@ -1906,7 +1879,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
if (!port->slave) {
netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n",
slave->dev->name);
return;
goto out;
}
netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n",
......@@ -2032,6 +2005,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
}
}
port->slave = NULL;
out:
spin_unlock_bh(&bond->mode_lock);
}
/**
......@@ -2057,7 +2033,11 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct port *port;
bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
read_lock(&bond->curr_slave_lock);
/* Lock to protect data accessed by all (e.g., port->sm_vars) and
* against running with bond_3ad_unbind_slave. ad_rx_machine may run
* concurrently due to incoming LACPDU as well.
*/
spin_lock_bh(&bond->mode_lock);
rcu_read_lock();
/* check if there are any slaves */
......@@ -2093,12 +2073,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
goto re_arm;
}
/* Lock around state machines to protect data accessed
* by all (e.g., port->sm_vars). ad_rx_machine may run
* concurrently due to incoming LACPDU.
*/
__get_state_machine_lock(port);
ad_rx_machine(NULL, port);
ad_periodic_machine(port);
ad_port_selection_logic(port);
......@@ -2108,8 +2082,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
/* turn off the BEGIN bit, since we already handled it */
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
__release_state_machine_lock(port);
}
re_arm:
......@@ -2120,7 +2092,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
}
rcu_read_unlock();
read_unlock(&bond->curr_slave_lock);
spin_unlock_bh(&bond->mode_lock);
if (should_notify_rtnl && rtnl_trylock()) {
bond_slave_state_notify(bond);
......@@ -2161,9 +2133,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
port->actor_port_number);
/* Protect against concurrent state machines */
__get_state_machine_lock(port);
spin_lock(&slave->bond->mode_lock);
ad_rx_machine(lacpdu, port);
__release_state_machine_lock(port);
spin_unlock(&slave->bond->mode_lock);
break;
case AD_TYPE_MARKER:
......@@ -2213,7 +2185,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
return;
}
__get_state_machine_lock(port);
spin_lock_bh(&slave->bond->mode_lock);
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
......@@ -2224,7 +2196,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
spin_unlock_bh(&slave->bond->mode_lock);
}
/**
......@@ -2246,7 +2218,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
return;
}
__get_state_machine_lock(port);
spin_lock_bh(&slave->bond->mode_lock);
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
......@@ -2257,7 +2229,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
spin_unlock_bh(&slave->bond->mode_lock);
}
/**
......@@ -2280,7 +2252,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
return;
}
__get_state_machine_lock(port);
spin_lock_bh(&slave->bond->mode_lock);
/* on link down we are zeroing duplex and speed since
* some of the adaptors(ce1000.lan) report full duplex/speed
* instead of N/A(duplex) / 0(speed).
......@@ -2311,7 +2283,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
*/
port->sm_vars |= AD_PORT_BEGIN;
__release_state_machine_lock(port);
spin_unlock_bh(&slave->bond->mode_lock);
}
/**
......@@ -2476,20 +2448,16 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
int ret = RX_HANDLER_ANOTHER;
struct lacpdu *lacpdu, _lacpdu;
if (skb->protocol != PKT_TYPE_LACPDU)
return ret;
return RX_HANDLER_ANOTHER;
lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
if (!lacpdu)
return ret;
return RX_HANDLER_ANOTHER;
read_lock(&bond->curr_slave_lock);
ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
read_unlock(&bond->curr_slave_lock);
return ret;
return bond_3ad_rx_indication(lacpdu, slave, skb->len);
}
/**
......@@ -2499,7 +2467,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
* When modify lacp_rate parameter via sysfs,
* update actor_oper_port_state of each port.
*
* Hold slave->state_machine_lock,
* Hold bond->mode_lock,
* so we can modify port->actor_oper_port_state,
* no matter bond is up or down.
*/
......@@ -2511,13 +2479,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
int lacp_fast;
lacp_fast = bond->params.lacp_fast;
spin_lock_bh(&bond->mode_lock);
bond_for_each_slave(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave)->port);
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
else
port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
__release_state_machine_lock(port);
}
spin_unlock_bh(&bond->mode_lock);
}
......@@ -259,7 +259,6 @@ struct ad_bond_info {
struct ad_slave_info {
struct aggregator aggregator; /* 802.3ad aggregator structure */
struct port port; /* 802.3ad port structure */
spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
u16 id;
};
......
......@@ -100,27 +100,6 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
/*********************** tlb specific functions ***************************/
static inline void _lock_tx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _lock_tx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
static inline void _unlock_tx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
/* Caller must hold tx_hashtbl lock */
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
if (save_load) {
......@@ -140,7 +119,6 @@ static inline void tlb_init_slave(struct slave *slave)
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
/* Caller must hold bond lock for read, BH disabled */
static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
......@@ -163,13 +141,12 @@ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
tlb_init_slave(slave);
}
/* Caller must hold bond lock for read */
static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
int save_load)
{
_lock_tx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
__tlb_clear_slave(bond, slave, save_load);
_unlock_tx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
/* Must be called before starting the monitor timer */
......@@ -184,14 +161,14 @@ static int tlb_initialize(struct bonding *bond)
if (!new_hashtbl)
return -1;
_lock_tx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
_unlock_tx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
return 0;
}
......@@ -202,12 +179,12 @@ static void tlb_deinitialize(struct bonding *bond)
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_up_slave *arr;
_lock_tx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
_unlock_tx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
arr = rtnl_dereference(bond_info->slave_arr);
if (arr)
......@@ -220,7 +197,6 @@ static long long compute_gap(struct slave *slave)
(s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
}
/* Caller must hold bond lock for read */
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
......@@ -281,7 +257,6 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
return assigned_slave;
}
/* Caller must hold bond lock for read */
static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
u32 skb_len)
{
......@@ -291,32 +266,13 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
* tlb_choose_channel() is only called by bond_alb_xmit()
* which already has softirq disabled.
*/
_lock_tx_hashtbl(bond);
spin_lock(&bond->mode_lock);
tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
_unlock_tx_hashtbl(bond);
spin_unlock(&bond->mode_lock);
return tx_slave;
}
/*********************** rlb specific functions ***************************/
static inline void _lock_rx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _lock_rx_hashtbl(struct bonding *bond)
{
spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
static inline void _unlock_rx_hashtbl(struct bonding *bond)
{
spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
......@@ -327,7 +283,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
......@@ -342,7 +298,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
bond_info->rx_ntt = 1;
}
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
......@@ -378,7 +334,6 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
return RX_HANDLER_ANOTHER;
}
/* Caller must hold bond lock for read */
static struct slave *rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
......@@ -411,7 +366,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
return rx_slave;
}
/* Caller must hold rcu_read_lock() for read */
/* Caller must hold rcu_read_lock() */
static struct slave *__rlb_next_rx_slave(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
......@@ -447,11 +402,11 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
/* teach the switch the mac of a disabled slave
* on the primary for fault tolerance
*
* Caller must hold bond->curr_slave_lock for write or bond lock for write
* Caller must hold RTNL
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
struct slave *curr_active = bond_deref_active_protected(bond);
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
if (!curr_active)
return;
......@@ -479,7 +434,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
u32 index, next_index;
/* clear slave from rx_hashtbl */
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_used_head;
......@@ -510,14 +465,10 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
}
}
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
write_lock_bh(&bond->curr_slave_lock);
if (slave != bond_deref_active_protected(bond))
if (slave != rtnl_dereference(bond->curr_active_slave))
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
write_unlock_bh(&bond->curr_slave_lock);
}
static void rlb_update_client(struct rlb_client_info *client_info)
......@@ -565,7 +516,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
......@@ -583,7 +534,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
/* The slave was assigned a new mac address - update the clients */
......@@ -594,7 +545,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
int ntt = 0;
u32 hash_index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
......@@ -615,7 +566,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
/* mark all clients using src_ip to be updated */
......@@ -625,7 +576,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
struct rlb_client_info *client_info;
u32 hash_index;
_lock_rx_hashtbl(bond);
spin_lock(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
......@@ -649,10 +600,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
}
}
_unlock_rx_hashtbl(bond);
spin_unlock(&bond->mode_lock);
}
/* Caller must hold both bond and ptr locks for read */
static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
......@@ -661,7 +611,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
struct rlb_client_info *client_info;
u32 hash_index = 0;
_lock_rx_hashtbl(bond);
spin_lock(&bond->mode_lock);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
......@@ -680,7 +630,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
assigned_slave = client_info->slave;
if (assigned_slave) {
_unlock_rx_hashtbl(bond);
spin_unlock(&bond->mode_lock);
return assigned_slave;
}
} else {
......@@ -742,7 +692,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
}
}
_unlock_rx_hashtbl(bond);
spin_unlock(&bond->mode_lock);
return assigned_slave;
}
......@@ -795,7 +745,6 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return tx_slave;
}
/* Caller must hold bond lock for read */
static void rlb_rebalance(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
......@@ -804,7 +753,7 @@ static void rlb_rebalance(struct bonding *bond)
int ntt;
u32 hash_index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
ntt = 0;
hash_index = bond_info->rx_hashtbl_used_head;
......@@ -822,7 +771,7 @@ static void rlb_rebalance(struct bonding *bond)
/* update the team's flag only after the whole iteration */
if (ntt)
bond_info->rx_ntt = 1;
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
/* Caller must hold rx_hashtbl lock */
......@@ -921,7 +870,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
index = bond_info->rx_hashtbl[ip_src_hash].src_first;
while (index != RLB_NULL_INDEX) {
......@@ -932,7 +881,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
rlb_delete_table_entry(bond, index);
index = next_index;
}
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
static int rlb_initialize(struct bonding *bond)
......@@ -946,7 +895,7 @@ static int rlb_initialize(struct bonding *bond)
if (!new_hashtbl)
return -1;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
bond_info->rx_hashtbl = new_hashtbl;
......@@ -955,7 +904,7 @@ static int rlb_initialize(struct bonding *bond)
for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
......@@ -967,13 +916,13 @@ static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
......@@ -981,7 +930,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
_lock_rx_hashtbl_bh(bond);
spin_lock_bh(&bond->mode_lock);
curr_index = bond_info->rx_hashtbl_used_head;
while (curr_index != RLB_NULL_INDEX) {
......@@ -994,7 +943,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
curr_index = next_index;
}
_unlock_rx_hashtbl_bh(bond);
spin_unlock_bh(&bond->mode_lock);
}
/*********************** tlb/rlb shared functions *********************/
......@@ -1398,9 +1347,9 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
if (tx_slave && bond->params.tlb_dynamic_lb) {
_lock_tx_hashtbl(bond);
spin_lock(&bond->mode_lock);
__tlb_clear_slave(bond, tx_slave, 0);
_unlock_tx_hashtbl(bond);
spin_unlock(&bond->mode_lock);
}
/* no suitable interface, frame not sent */
......@@ -1595,13 +1544,6 @@ void bond_alb_monitor(struct work_struct *work)
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
bool strict_match;
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
* read.
*/
read_lock(&bond->curr_slave_lock);
bond_for_each_slave_rcu(bond, slave, iter) {
/* If updating current_active, use all currently
* user mac addreses (!strict_match). Otherwise, only
......@@ -1613,17 +1555,11 @@ void bond_alb_monitor(struct work_struct *work)
alb_send_learning_packets(slave, slave->dev->dev_addr,
strict_match);
}
read_unlock(&bond->curr_slave_lock);
bond_info->lp_counter = 0;
}
/* rebalance tx traffic */
if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
......@@ -1633,9 +1569,6 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->unbalanced_load = 0;
}
}
read_unlock(&bond->curr_slave_lock);
bond_info->tx_rebalance_counter = 0;
}
......@@ -1739,7 +1672,6 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
}
/* Caller must hold bond lock for read */
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
......@@ -1775,21 +1707,14 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
* Set the bond->curr_active_slave to @new_slave and handle
* mac address swapping and promiscuity changes as needed.
*
* If new_slave is NULL, caller must hold curr_slave_lock for write
*
* If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
* for write. Processing here may sleep, so no other locks may be held.
* Caller must hold RTNL
*/
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
__releases(&bond->curr_slave_lock)
__acquires(&bond->curr_slave_lock)
{
struct slave *swap_slave;
struct slave *curr_active;
curr_active = rcu_dereference_protected(bond->curr_active_slave,
!new_slave ||
lockdep_is_held(&bond->curr_slave_lock));
curr_active = rtnl_dereference(bond->curr_active_slave);
if (curr_active == new_slave)
return;
......@@ -1820,10 +1745,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
tlb_clear_slave(bond, swap_slave, 1);
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
ASSERT_RTNL();
/* in TLB mode, the slave might flip down/up with the old dev_addr,
* and thus filter bond->dev_addr's packets, so force bond's mac
*/
......@@ -1852,8 +1773,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
alb_send_learning_packets(new_slave, bond->dev->dev_addr,
false);
}
write_lock_bh(&bond->curr_slave_lock);
}
/* Called with RTNL */
......
......@@ -147,7 +147,6 @@ struct tlb_up_slave {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
int tx_rebalance_counter;
int lp_counter;
......@@ -156,7 +155,6 @@ struct alb_bond_info {
/* -------- rlb parameters -------- */
int rlb_enabled;
struct rlb_client_info *rx_hashtbl; /* Receive hash table */
spinlock_t rx_hashtbl_lock;
u32 rx_hashtbl_used_head;
u8 rx_ntt; /* flag - need to transmit
* to all rx clients
......
......@@ -29,7 +29,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
seq_printf(m, "SourceIP DestinationIP "
"Destination MAC DEV\n");
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX;
......@@ -42,7 +42,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
client_info->slave->dev->name);
}
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
spin_unlock_bh(&bond->mode_lock);
return 0;
}
......
......@@ -637,13 +637,11 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
*
* Perform special MAC address swapping for fail_over_mac settings
*
* Called with RTNL, curr_slave_lock for write_bh.
* Called with RTNL
*/
static void bond_do_fail_over_mac(struct bonding *bond,
struct slave *new_active,
struct slave *old_active)
__releases(&bond->curr_slave_lock)
__acquires(&bond->curr_slave_lock)
{
u8 tmp_mac[ETH_ALEN];
struct sockaddr saddr;
......@@ -651,11 +649,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
switch (bond->params.fail_over_mac) {
case BOND_FOM_ACTIVE:
if (new_active) {
write_unlock_bh(&bond->curr_slave_lock);
if (new_active)
bond_set_dev_addr(bond->dev, new_active->dev);
write_lock_bh(&bond->curr_slave_lock);
}
break;
case BOND_FOM_FOLLOW:
/*
......@@ -666,8 +661,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!new_active)
return;
write_unlock_bh(&bond->curr_slave_lock);
if (old_active) {
ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
ether_addr_copy(saddr.sa_data,
......@@ -696,7 +689,6 @@ static void bond_do_fail_over_mac(struct bonding *bond,
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
out:
write_lock_bh(&bond->curr_slave_lock);
break;
default:
netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
......@@ -709,7 +701,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
static bool bond_should_change_active(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = bond_deref_active_protected(bond);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
if (!prim || !curr || curr->link != BOND_LINK_UP)
return true;
......@@ -785,15 +777,15 @@ static bool bond_should_notify_peers(struct bonding *bond)
* because it is apparently the best available slave we have, even though its
* updelay hasn't timed out yet.
*
* If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
* Caller must hold RTNL.
*/
void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
{
struct slave *old_active;
old_active = rcu_dereference_protected(bond->curr_active_slave,
!new_active ||
lockdep_is_held(&bond->curr_slave_lock));
ASSERT_RTNL();
old_active = rtnl_dereference(bond->curr_active_slave);
if (old_active == new_active)
return;
......@@ -861,14 +853,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_should_notify_peers(bond);
}
write_unlock_bh(&bond->curr_slave_lock);
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
write_lock_bh(&bond->curr_slave_lock);
}
}
......@@ -893,7 +881,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* - The primary_slave has got its link back.
* - A slave has got its link back and there's no old curr_active_slave.
*
* Caller must hold curr_slave_lock for write_bh.
* Caller must hold RTNL.
*/
void bond_select_active_slave(struct bonding *bond)
{
......@@ -901,7 +889,7 @@ void bond_select_active_slave(struct bonding *bond)
int rv;
best_slave = bond_find_best_slave(bond);
if (best_slave != bond_deref_active_protected(bond)) {
if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
bond_change_active_slave(bond, best_slave);
rv = bond_set_carrier(bond);
if (!rv)
......@@ -1571,9 +1559,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond_uses_primary(bond)) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
}
......@@ -1601,10 +1587,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
}
/* either primary_slave or curr_active_slave might've changed */
......@@ -1645,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/*
* Try to release the slave device <slave> from the bond device <master>
* It is legal to access curr_active_slave without a lock because all the function
* is write-locked. If "all" is true it means that the function is being called
* is RTNL-locked. If "all" is true it means that the function is being called
* while destroying a bond interface and all slaves are being released.
*
* The rules for slave state should be:
......@@ -1691,14 +1675,8 @@ static int __bond_release_one(struct net_device *bond_dev,
*/
netdev_rx_handler_unregister(slave_dev);
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* Sync against bond_3ad_rx_indication and
* bond_3ad_state_machine_handler
*/
write_lock_bh(&bond->curr_slave_lock);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
write_unlock_bh(&bond->curr_slave_lock);
}
netdev_info(bond_dev, "Releasing %s interface %s\n",
bond_is_active_slave(slave) ? "active" : "backup",
......@@ -1720,11 +1698,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (rtnl_dereference(bond->primary_slave) == slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
if (oldcurrent == slave) {
write_lock_bh(&bond->curr_slave_lock);
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
write_unlock_bh(&bond->curr_slave_lock);
}
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
......@@ -1743,11 +1718,7 @@ static int __bond_release_one(struct net_device *bond_dev,
* is no concern that another slave add/remove event
* will interfere.
*/
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
}
if (!bond_has_slaves(bond)) {
......@@ -2058,9 +2029,7 @@ static void bond_miimon_commit(struct bonding *bond)
do_failover:
ASSERT_RTNL();
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
}
......@@ -2506,15 +2475,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave_state_changed) {
bond_slave_state_change(bond);
} else if (do_failover) {
/* the bond_select_active_slave must hold RTNL
* and curr_slave_lock for write.
*/
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
}
rtnl_unlock();
......@@ -2532,7 +2494,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* place for the slave. Returns 0 if no changes are found, >0 if changes
* to link states must be committed.
*
* Called with rcu_read_lock hold.
* Called with rcu_read_lock held.
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
......@@ -2670,9 +2632,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
do_failover:
ASSERT_RTNL();
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
}
......@@ -2682,7 +2642,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
/*
* Send ARP probes for active-backup mode ARP monitor.
*
* Called with rcu_read_lock hold.
* Called with rcu_read_lock held.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
......@@ -2939,9 +2899,7 @@ static int bond_slave_netdev_event(unsigned long event,
primary ? slave_dev->name : "none");
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
......@@ -3106,7 +3064,6 @@ static int bond_open(struct net_device *bond_dev)
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, iter) {
if (bond_uses_primary(bond) &&
slave != rcu_access_pointer(bond->curr_active_slave)) {
......@@ -3117,7 +3074,6 @@ static int bond_open(struct net_device *bond_dev)
BOND_SLAVE_NOTIFY_NOW);
}
}
read_unlock(&bond->curr_slave_lock);
}
bond_work_init_all(bond);
......@@ -3239,14 +3195,10 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
if (!mii)
return -EINVAL;
if (mii->reg_num == 1) {
mii->val_out = 0;
read_lock(&bond->curr_slave_lock);
if (netif_carrier_ok(bond->dev))
mii->val_out = BMSR_LSTATUS;
read_unlock(&bond->curr_slave_lock);
}
return 0;
......@@ -3892,8 +3844,7 @@ void bond_setup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
/* initialize rwlocks */
rwlock_init(&bond->curr_slave_lock);
spin_lock_init(&bond->mode_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
......@@ -4340,19 +4291,9 @@ static int bond_init(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
netdev_dbg(bond_dev, "Begin bond_init\n");
/*
* Initialize locks that may be required during
* en/deslave operations. All of the bond_open work
* (of which this is part) should really be moved to
* a phase prior to dev_open
*/
spin_lock_init(&(bond_info->tx_hashtbl_lock));
spin_lock_init(&(bond_info->rx_hashtbl_lock));
bond->wq = create_singlethread_workqueue(bond_dev->name);
if (!bond->wq)
return -ENOMEM;
......
......@@ -734,15 +734,13 @@ static int bond_option_active_slave_set(struct bonding *bond,
}
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_info(bond->dev, "Clearing current active slave\n");
RCU_INIT_POINTER(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = bond_deref_active_protected(bond);
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
struct slave *new_active = bond_slave_get_rtnl(slave_dev);
BUG_ON(!new_active);
......@@ -765,8 +763,6 @@ static int bond_option_active_slave_set(struct bonding *bond,
}
}
}
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
return ret;
......@@ -1066,7 +1062,6 @@ static int bond_option_primary_set(struct bonding *bond,
struct slave *slave;
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
p = strchr(primary, '\n');
if (p)
......@@ -1103,7 +1098,6 @@ static int bond_option_primary_set(struct bonding *bond,
primary, bond->dev->name);
out:
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
return 0;
......@@ -1117,9 +1111,7 @@ static int bond_option_primary_reselect_set(struct bonding *bond,
bond->params.primary_reselect = newval->value;
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
unblock_netpoll_tx();
return 0;
......
......@@ -184,9 +184,7 @@ struct slave {
/*
* Here are the locking policies for the two bonding locks:
*
* 1) Get rcu_read_lock when reading or RTNL when writing slave list.
* 2) Get bond->curr_slave_lock when reading/writing bond->curr_active_slave.
* Get rcu_read_lock when reading or RTNL when writing slave list.
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
......@@ -197,7 +195,13 @@ struct bonding {
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
struct slave *);
rwlock_t curr_slave_lock;
/* mode_lock is used for mode-specific locking needs, currently used by:
* 3ad mode (4) - protect against running bond_3ad_unbind_slave() and
* bond_3ad_state_machine_handler() concurrently.
* TLB mode (5) - to sync the use and modifications of its hash table
* ALB mode (6) - to sync the use and modifications of its hash table
*/
spinlock_t mode_lock;
u8 send_peer_notif;
u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
......@@ -227,10 +231,6 @@ struct bonding {
#define bond_slave_get_rtnl(dev) \
((struct slave *) rtnl_dereference(dev->rx_handler_data))
#define bond_deref_active_protected(bond) \
rcu_dereference_protected(bond->curr_active_slave, \
lockdep_is_held(&bond->curr_slave_lock))
struct bond_vlan_tag {
__be16 vlan_proto;
unsigned short vlan_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment