Commit 4bab16d7 authored by Nikolay Aleksandrov's avatar Nikolay Aleksandrov Committed by David S. Miller

bonding: alb: convert to bond->mode_lock

The ALB/TLB specific spinlocks are no longer necessary as we now have
bond->mode_lock for this purpose, so convert them and remove them from
struct alb_bond_info.
Also remove the unneeded lock/unlock functions and use spin_lock/unlock
directly.
Suggested-by: default avatarJay Vosburgh <jay.vosburgh@canonical.com>
Signed-off-by: default avatarNikolay Aleksandrov <nikolay@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b7435628
This diff is collapsed.
...@@ -147,7 +147,6 @@ struct tlb_up_slave { ...@@ -147,7 +147,6 @@ struct tlb_up_slave {
struct alb_bond_info { struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load; u32 unbalanced_load;
int tx_rebalance_counter; int tx_rebalance_counter;
int lp_counter; int lp_counter;
...@@ -156,7 +155,6 @@ struct alb_bond_info { ...@@ -156,7 +155,6 @@ struct alb_bond_info {
/* -------- rlb parameters -------- */ /* -------- rlb parameters -------- */
int rlb_enabled; int rlb_enabled;
struct rlb_client_info *rx_hashtbl; /* Receive hash table */ struct rlb_client_info *rx_hashtbl; /* Receive hash table */
spinlock_t rx_hashtbl_lock;
u32 rx_hashtbl_used_head; u32 rx_hashtbl_used_head;
u8 rx_ntt; /* flag - need to transmit u8 rx_ntt; /* flag - need to transmit
* to all rx clients * to all rx clients
......
...@@ -29,7 +29,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) ...@@ -29,7 +29,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
seq_printf(m, "SourceIP DestinationIP " seq_printf(m, "SourceIP DestinationIP "
"Destination MAC DEV\n"); "Destination MAC DEV\n");
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); spin_lock_bh(&bond->mode_lock);
hash_index = bond_info->rx_hashtbl_used_head; hash_index = bond_info->rx_hashtbl_used_head;
for (; hash_index != RLB_NULL_INDEX; for (; hash_index != RLB_NULL_INDEX;
...@@ -42,7 +42,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) ...@@ -42,7 +42,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
client_info->slave->dev->name); client_info->slave->dev->name);
} }
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); spin_unlock_bh(&bond->mode_lock);
return 0; return 0;
} }
......
...@@ -4297,19 +4297,9 @@ static int bond_init(struct net_device *bond_dev) ...@@ -4297,19 +4297,9 @@ static int bond_init(struct net_device *bond_dev)
{ {
struct bonding *bond = netdev_priv(bond_dev); struct bonding *bond = netdev_priv(bond_dev);
struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
netdev_dbg(bond_dev, "Begin bond_init\n"); netdev_dbg(bond_dev, "Begin bond_init\n");
/*
* Initialize locks that may be required during
* en/deslave operations. All of the bond_open work
* (of which this is part) should really be moved to
* a phase prior to dev_open
*/
spin_lock_init(&(bond_info->tx_hashtbl_lock));
spin_lock_init(&(bond_info->rx_hashtbl_lock));
bond->wq = create_singlethread_workqueue(bond_dev->name); bond->wq = create_singlethread_workqueue(bond_dev->name);
if (!bond->wq) if (!bond->wq)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment