Commit baa1e8a0 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by Simon Wunderlich

batman-adv: Axe 'aggr_list_lock'

'aggr_list.lock' can safely be used in place of another explicit spinlock
when access to 'aggr_list' has to be guarded.

This avoids to take 2 locks, knowing that the 2nd one is always successful.

Now that the 'aggr_list.lock' is handled explicitly, the lock-free
__sbk_something() variants should be used when dealing with 'aggr_list'.
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarSven Eckelmann <sven@narfation.org>
Signed-off-by: default avatarSimon Wunderlich <sw@simonwunderlich.de>
parent 9044854e
...@@ -1085,7 +1085,6 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface) ...@@ -1085,7 +1085,6 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
hard_iface->bat_v.aggr_len = 0; hard_iface->bat_v.aggr_len = 0;
skb_queue_head_init(&hard_iface->bat_v.aggr_list); skb_queue_head_init(&hard_iface->bat_v.aggr_list);
spin_lock_init(&hard_iface->bat_v.aggr_list_lock);
INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq, INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq,
batadv_v_ogm_aggr_work); batadv_v_ogm_aggr_work);
} }
......
...@@ -152,7 +152,7 @@ static unsigned int batadv_v_ogm_len(struct sk_buff *skb) ...@@ -152,7 +152,7 @@ static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
* @skb: the OGM to check * @skb: the OGM to check
* @hard_iface: the interface to use to send the OGM * @hard_iface: the interface to use to send the OGM
* *
* Caller needs to hold the hard_iface->bat_v.aggr_list_lock. * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
* *
* Return: True, if the given OGMv2 packet still fits, false otherwise. * Return: True, if the given OGMv2 packet still fits, false otherwise.
*/ */
...@@ -163,7 +163,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, ...@@ -163,7 +163,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
BATADV_MAX_AGGREGATION_BYTES); BATADV_MAX_AGGREGATION_BYTES);
unsigned int ogm_len = batadv_v_ogm_len(skb); unsigned int ogm_len = batadv_v_ogm_len(skb);
lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
return hard_iface->bat_v.aggr_len + ogm_len <= max; return hard_iface->bat_v.aggr_len + ogm_len <= max;
} }
...@@ -174,13 +174,13 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, ...@@ -174,13 +174,13 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
* *
* Empties the OGMv2 aggregation queue and frees all the skbs it contained. * Empties the OGMv2 aggregation queue and frees all the skbs it contained.
* *
* Caller needs to hold the hard_iface->bat_v.aggr_list_lock. * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/ */
static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface) static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
{ {
lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
skb_queue_purge(&hard_iface->bat_v.aggr_list); __skb_queue_purge(&hard_iface->bat_v.aggr_list);
hard_iface->bat_v.aggr_len = 0; hard_iface->bat_v.aggr_len = 0;
} }
...@@ -193,7 +193,7 @@ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface) ...@@ -193,7 +193,7 @@ static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
* *
* The aggregation queue is empty after this call. * The aggregation queue is empty after this call.
* *
* Caller needs to hold the hard_iface->bat_v.aggr_list_lock. * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/ */
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{ {
...@@ -202,7 +202,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) ...@@ -202,7 +202,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
unsigned int ogm_len; unsigned int ogm_len;
struct sk_buff *skb; struct sk_buff *skb;
lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock); lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
if (!aggr_len) if (!aggr_len)
return; return;
...@@ -216,7 +216,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) ...@@ -216,7 +216,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN); skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
skb_reset_network_header(skb_aggr); skb_reset_network_header(skb_aggr);
while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) { while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb); hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
ogm_len = batadv_v_ogm_len(skb); ogm_len = batadv_v_ogm_len(skb);
...@@ -243,13 +243,13 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb, ...@@ -243,13 +243,13 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
return; return;
} }
spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
if (!batadv_v_ogm_queue_left(skb, hard_iface)) if (!batadv_v_ogm_queue_left(skb, hard_iface))
batadv_v_ogm_aggr_send(hard_iface); batadv_v_ogm_aggr_send(hard_iface);
hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb); hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
skb_queue_tail(&hard_iface->bat_v.aggr_list, skb); __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
} }
/** /**
...@@ -388,9 +388,9 @@ void batadv_v_ogm_aggr_work(struct work_struct *work) ...@@ -388,9 +388,9 @@ void batadv_v_ogm_aggr_work(struct work_struct *work)
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work); batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
hard_iface = container_of(batv, struct batadv_hard_iface, bat_v); hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_send(hard_iface); batadv_v_ogm_aggr_send(hard_iface);
spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_start_queue_timer(hard_iface); batadv_v_ogm_start_queue_timer(hard_iface);
} }
...@@ -421,9 +421,9 @@ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface) ...@@ -421,9 +421,9 @@ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{ {
cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq); cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
spin_lock_bh(&hard_iface->bat_v.aggr_list_lock); spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_list_free(hard_iface); batadv_v_ogm_aggr_list_free(hard_iface);
spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock); spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
} }
/** /**
......
...@@ -130,9 +130,6 @@ struct batadv_hard_iface_bat_v { ...@@ -130,9 +130,6 @@ struct batadv_hard_iface_bat_v {
/** @aggr_len: size of the OGM aggregate (excluding ethernet header) */ /** @aggr_len: size of the OGM aggregate (excluding ethernet header) */
unsigned int aggr_len; unsigned int aggr_len;
/** @aggr_list_lock: protects aggr_list */
spinlock_t aggr_list_lock;
/** /**
* @throughput_override: throughput override to disable link * @throughput_override: throughput override to disable link
* auto-detection * auto-detection
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment