Commit 6d5808d4 authored by Sven Eckelmann's avatar Sven Eckelmann

batman-adv: Add missing hardif_free_ref in forw_packet_free

add_bcast_packet_to_list increases the refcount for if_incoming but the
reference count is never decreased. The reference count must be
increased for all kinds of forwarded packets which have the primary
interface stored and forw_packet_free must decrease them. Also
purge_outstanding_packets has to invoke forw_packet_free when a work
item was really cancelled.

This regression was introduced in
32ae9b22.
Reported-by: default avatarAntonio Quartulli <ordex@autistici.org>
Signed-off-by: default avatarSven Eckelmann <sven@narfation.org>
parent 27aea212
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "aggregation.h" #include "aggregation.h"
#include "send.h" #include "send.h"
#include "routing.h" #include "routing.h"
#include "hard-interface.h"
/* calculate the size of the tt information for a given packet */ /* calculate the size of the tt information for a given packet */
static int tt_len(struct batman_packet *batman_packet) static int tt_len(struct batman_packet *batman_packet)
...@@ -105,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, ...@@ -105,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
struct forw_packet *forw_packet_aggr; struct forw_packet *forw_packet_aggr;
unsigned char *skb_buff; unsigned char *skb_buff;
if (!atomic_inc_not_zero(&if_incoming->refcount))
return;
/* own packet should always be scheduled */ /* own packet should always be scheduled */
if (!own_packet) { if (!own_packet) {
if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv, bat_dbg(DBG_BATMAN, bat_priv,
"batman packet queue full\n"); "batman packet queue full\n");
return; goto out;
} }
} }
...@@ -118,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, ...@@ -118,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
if (!forw_packet_aggr) { if (!forw_packet_aggr) {
if (!own_packet) if (!own_packet)
atomic_inc(&bat_priv->batman_queue_left); atomic_inc(&bat_priv->batman_queue_left);
return; goto out;
} }
if ((atomic_read(&bat_priv->aggregated_ogms)) && if ((atomic_read(&bat_priv->aggregated_ogms)) &&
...@@ -133,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, ...@@ -133,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
if (!own_packet) if (!own_packet)
atomic_inc(&bat_priv->batman_queue_left); atomic_inc(&bat_priv->batman_queue_left);
kfree(forw_packet_aggr); kfree(forw_packet_aggr);
return; goto out;
} }
skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
...@@ -164,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, ...@@ -164,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
queue_delayed_work(bat_event_workqueue, queue_delayed_work(bat_event_workqueue,
&forw_packet_aggr->delayed_work, &forw_packet_aggr->delayed_work,
send_time - jiffies); send_time - jiffies);
return;
out:
hardif_free_ref(if_incoming);
} }
/* aggregate a new packet into the existing aggregation */ /* aggregate a new packet into the existing aggregation */
......
...@@ -377,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet) ...@@ -377,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet)
{ {
if (forw_packet->skb) if (forw_packet->skb)
kfree_skb(forw_packet->skb); kfree_skb(forw_packet->skb);
if (forw_packet->if_incoming)
hardif_free_ref(forw_packet->if_incoming);
kfree(forw_packet); kfree(forw_packet);
} }
...@@ -539,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, ...@@ -539,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
{ {
struct forw_packet *forw_packet; struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node; struct hlist_node *tmp_node, *safe_tmp_node;
bool pending;
if (hard_iface) if (hard_iface)
bat_dbg(DBG_BATMAN, bat_priv, bat_dbg(DBG_BATMAN, bat_priv,
...@@ -567,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, ...@@ -567,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
* send_outstanding_bcast_packet() will lock the list to * send_outstanding_bcast_packet() will lock the list to
* delete the item from the list * delete the item from the list
*/ */
cancel_delayed_work_sync(&forw_packet->delayed_work); pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_bh(&bat_priv->forw_bcast_list_lock); spin_lock_bh(&bat_priv->forw_bcast_list_lock);
if (pending) {
hlist_del(&forw_packet->list);
forw_packet_free(forw_packet);
}
} }
spin_unlock_bh(&bat_priv->forw_bcast_list_lock); spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
...@@ -591,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, ...@@ -591,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
* send_outstanding_bat_packet() will lock the list to * send_outstanding_bat_packet() will lock the list to
* delete the item from the list * delete the item from the list
*/ */
cancel_delayed_work_sync(&forw_packet->delayed_work); pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_bh(&bat_priv->forw_bat_list_lock); spin_lock_bh(&bat_priv->forw_bat_list_lock);
if (pending) {
hlist_del(&forw_packet->list);
forw_packet_free(forw_packet);
}
} }
spin_unlock_bh(&bat_priv->forw_bat_list_lock); spin_unlock_bh(&bat_priv->forw_bat_list_lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment