Commit bde000ae authored by Miquel Raynal's avatar Miquel Raynal Committed by Stefan Schmidt

net: mac802154: Follow the count of ongoing transmissions

In order to create a synchronous API for MLME command purposes, we need
to be able to track the end of the ongoing transmissions. Let's
introduce an atomic variable which is incremented when a transmission
starts and decremented when relevant so that we know at any moment
whether there is an ongoing transmission.

The counter gets decremented in the following situations:
- The operation is asynchronous and there was a failure during the
  offloading process.
- The operation is synchronous and the synchronous operation failed.
- The operation finished, either successfully or not.
Signed-off-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
Acked-by: default avatarAlexander Aring <aahringo@redhat.com>
Link: https://lore.kernel.org/r/20220519150516.443078-5-miquel.raynal@bootlin.comSigned-off-by: default avatarStefan Schmidt <stefan@datenfreihafen.org>
parent d08d951a
...@@ -214,6 +214,9 @@ struct wpan_phy { ...@@ -214,6 +214,9 @@ struct wpan_phy {
/* the network namespace this phy lives in currently */ /* the network namespace this phy lives in currently */
possible_net_t _net; possible_net_t _net;
/* Transmission monitoring */
atomic_t ongoing_txs;
char priv[] __aligned(NETDEV_ALIGN); char priv[] __aligned(NETDEV_ALIGN);
}; };
......
...@@ -44,6 +44,7 @@ void ieee802154_xmit_sync_worker(struct work_struct *work) ...@@ -44,6 +44,7 @@ void ieee802154_xmit_sync_worker(struct work_struct *work)
err_tx: err_tx:
/* Restart the netif queue on each sub_if_data object. */ /* Restart the netif queue on each sub_if_data object. */
ieee802154_wake_queue(&local->hw); ieee802154_wake_queue(&local->hw);
atomic_dec(&local->phy->ongoing_txs);
kfree_skb(skb); kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n"); netdev_dbg(dev, "transmission failed\n");
} }
...@@ -75,6 +76,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) ...@@ -75,6 +76,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
/* Stop the netif queue on each sub_if_data object. */ /* Stop the netif queue on each sub_if_data object. */
ieee802154_stop_queue(&local->hw); ieee802154_stop_queue(&local->hw);
atomic_inc(&local->phy->ongoing_txs);
/* Drivers should preferably implement the async callback. In some rare /* Drivers should preferably implement the async callback. In some rare
* cases they only provide a sync callback which we will use as a * cases they only provide a sync callback which we will use as a
...@@ -98,6 +100,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) ...@@ -98,6 +100,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
err_wake_netif_queue: err_wake_netif_queue:
ieee802154_wake_queue(&local->hw); ieee802154_wake_queue(&local->hw);
atomic_dec(&local->phy->ongoing_txs);
err_free_skb: err_free_skb:
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -88,6 +88,7 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, ...@@ -88,6 +88,7 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
} }
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
atomic_dec(&hw->phy->ongoing_txs);
} }
EXPORT_SYMBOL(ieee802154_xmit_complete); EXPORT_SYMBOL(ieee802154_xmit_complete);
...@@ -99,6 +100,7 @@ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb, ...@@ -99,6 +100,7 @@ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
local->tx_result = reason; local->tx_result = reason;
ieee802154_wake_queue(hw); ieee802154_wake_queue(hw);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
atomic_dec(&hw->phy->ongoing_txs);
} }
EXPORT_SYMBOL(ieee802154_xmit_error); EXPORT_SYMBOL(ieee802154_xmit_error);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment