Commit 84a0d466 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Kalle Valo

ath9k: use irqsave() in USB's complete callback

The USB completion callback does not disable interrupts while acquiring
the lock. We want to remove the local_irq_disable() invocation from
__usb_hcd_giveback_urb() and therefore it is required for the callback
handler to disable the interrupts while acquiring the lock.
The callback may be invoked either in IRQ or BH context depending on the
USB host controller.
Use the _irqsave() variant of the locking primitives.

Cc: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
Cc: Kalle Valo <kvalo@codeaurora.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: linux-wireless@vger.kernel.org
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent fe041deb
...@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb) ...@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{ {
struct cmd_buf *cmd = (struct cmd_buf *)urb->context; struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev; struct hif_device_usb *hif_dev;
unsigned long flags;
bool txok = true; bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev) if (!cmd || !cmd->skb || !cmd->hif_dev)
...@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb) ...@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete * If the URBs are being flushed, no need to complete
* this packet. * this packet.
*/ */
spin_lock(&hif_dev->tx.tx_lock); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock); spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb); dev_kfree_skb_any(cmd->skb);
kfree(cmd); kfree(cmd);
return; return;
} }
spin_unlock(&hif_dev->tx.tx_lock); spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break; break;
default: default:
......
...@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, ...@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah; struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
spin_lock(&priv->rx.rxbuflock); spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) { if (!tmp_buf->in_process) {
rxbuf = tmp_buf; rxbuf = tmp_buf;
break; break;
} }
} }
spin_unlock(&priv->rx.rxbuflock); spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) { if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n"); ath_dbg(common, ANY, "No free RX buffer\n");
goto err; goto err;
} }
spin_lock(&priv->rx.rxbuflock); spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb; rxbuf->skb = skb;
rxbuf->in_process = true; rxbuf->in_process = true;
spin_unlock(&priv->rx.rxbuflock); spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet); tasklet_schedule(&priv->rx_tasklet);
return; return;
......
...@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, ...@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{ {
struct wmi *wmi = priv; struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr; struct wmi_cmd_hdr *hdr;
unsigned long flags;
u16 cmd_id; u16 cmd_id;
if (unlikely(wmi->stopped)) if (unlikely(wmi->stopped))
...@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb, ...@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id); cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) { if (cmd_id & 0x1000) {
spin_lock(&wmi->wmi_lock); spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb); __skb_queue_tail(&wmi->wmi_event_queue, skb);
spin_unlock(&wmi->wmi_lock); spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet); tasklet_schedule(&wmi->wmi_event_tasklet);
return; return;
} }
/* Check if there has been a timeout. */ /* Check if there has been a timeout. */
spin_lock(&wmi->wmi_lock); spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) { if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
spin_unlock(&wmi->wmi_lock); spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb; goto free_skb;
} }
spin_unlock(&wmi->wmi_lock); spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */ /* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb); ath9k_wmi_rsp_callback(wmi, skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment