Commit f3e0008f authored by Marek Lindner's avatar Marek Lindner

batman-adv: make broadcast seqno operations atomic

Batman-adv could receive several payload broadcasts at the same time
that would trigger access to the broadcast seqno sliding window to
determine whether this is a new broadcast or not. If these incoming
broadcasts are accessing the sliding window simultaneously it could
be left in an inconsistent state. Therefore it is necessary to make
sure this access is atomic.
Reported-by: default avatarLinus Lüssing <linus.luessing@web.de>
Signed-off-by: default avatarMarek Lindner <lindner_marek@yahoo.de>
parent 0ede9f41
...@@ -215,6 +215,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) ...@@ -215,6 +215,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
INIT_HLIST_HEAD(&orig_node->neigh_list); INIT_HLIST_HEAD(&orig_node->neigh_list);
INIT_LIST_HEAD(&orig_node->bond_list); INIT_LIST_HEAD(&orig_node->bond_list);
spin_lock_init(&orig_node->ogm_cnt_lock); spin_lock_init(&orig_node->ogm_cnt_lock);
spin_lock_init(&orig_node->bcast_seqno_lock);
spin_lock_init(&orig_node->neigh_list_lock); spin_lock_init(&orig_node->neigh_list_lock);
kref_init(&orig_node->refcount); kref_init(&orig_node->refcount);
......
...@@ -1473,81 +1473,93 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) ...@@ -1473,81 +1473,93 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
{ {
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node; struct orig_node *orig_node = NULL;
struct bcast_packet *bcast_packet; struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr; struct ethhdr *ethhdr;
int hdr_size = sizeof(struct bcast_packet); int hdr_size = sizeof(struct bcast_packet);
int ret = NET_RX_DROP;
int32_t seq_diff; int32_t seq_diff;
/* drop packet if it has not necessary minimum size */ /* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size))) if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP; goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */ /* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest)) if (!is_broadcast_ether_addr(ethhdr->h_dest))
return NET_RX_DROP; goto out;
/* packet with broadcast sender address */ /* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source)) if (is_broadcast_ether_addr(ethhdr->h_source))
return NET_RX_DROP; goto out;
/* ignore broadcasts sent by myself */ /* ignore broadcasts sent by myself */
if (is_my_mac(ethhdr->h_source)) if (is_my_mac(ethhdr->h_source))
return NET_RX_DROP; goto out;
bcast_packet = (struct bcast_packet *)skb->data; bcast_packet = (struct bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */ /* ignore broadcasts originated by myself */
if (is_my_mac(bcast_packet->orig)) if (is_my_mac(bcast_packet->orig))
return NET_RX_DROP; goto out;
if (bcast_packet->ttl < 2) if (bcast_packet->ttl < 2)
return NET_RX_DROP; goto out;
spin_lock_bh(&bat_priv->orig_hash_lock); spin_lock_bh(&bat_priv->orig_hash_lock);
rcu_read_lock(); rcu_read_lock();
orig_node = ((struct orig_node *) orig_node = ((struct orig_node *)
hash_find(bat_priv->orig_hash, compare_orig, choose_orig, hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
bcast_packet->orig)); bcast_packet->orig));
if (!orig_node)
goto rcu_unlock;
kref_get(&orig_node->refcount);
rcu_read_unlock(); rcu_read_unlock();
if (!orig_node) { spin_lock_bh(&orig_node->bcast_seqno_lock);
spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
/* check whether the packet is a duplicate */ /* check whether the packet is a duplicate */
if (get_bit_status(orig_node->bcast_bits, if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
orig_node->last_bcast_seqno, ntohl(bcast_packet->seqno)))
ntohl(bcast_packet->seqno))) { goto spin_unlock;
spin_unlock_bh(&bat_priv->orig_hash_lock);
return NET_RX_DROP;
}
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */ /* check whether the packet is old and the host just restarted. */
if (window_protected(bat_priv, seq_diff, if (window_protected(bat_priv, seq_diff,
&orig_node->bcast_seqno_reset)) { &orig_node->bcast_seqno_reset))
spin_unlock_bh(&bat_priv->orig_hash_lock); goto spin_unlock;
return NET_RX_DROP;
}
/* mark broadcast in flood history, update window position /* mark broadcast in flood history, update window position
* if required. */ * if required. */
if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
spin_unlock_bh(&orig_node->bcast_seqno_lock);
spin_unlock_bh(&bat_priv->orig_hash_lock); spin_unlock_bh(&bat_priv->orig_hash_lock);
/* rebroadcast packet */ /* rebroadcast packet */
add_bcast_packet_to_list(bat_priv, skb); add_bcast_packet_to_list(bat_priv, skb);
/* broadcast for me */ /* broadcast for me */
interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
ret = NET_RX_SUCCESS;
goto out;
return NET_RX_SUCCESS; rcu_unlock:
rcu_read_unlock();
spin_unlock_bh(&bat_priv->orig_hash_lock);
goto out;
spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
spin_unlock_bh(&bat_priv->orig_hash_lock);
out:
if (orig_node)
kref_put(&orig_node->refcount, orig_node_free_ref);
return ret;
} }
int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
......
...@@ -90,6 +90,8 @@ struct orig_node { ...@@ -90,6 +90,8 @@ struct orig_node {
spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum, spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum,
* neigh_node->real_bits, * neigh_node->real_bits,
* neigh_node->real_packet_count */ * neigh_node->real_packet_count */
spinlock_t bcast_seqno_lock; /* protects bcast_bits,
* last_bcast_seqno */
atomic_t bond_candidates; atomic_t bond_candidates;
struct list_head bond_list; struct list_head bond_list;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment