Commit 5d7b045b authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-for-davem-2015-01-20' of...

Merge tag 'wireless-drivers-for-davem-2015-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

ath9k:

* fix an IRQ storm caused by commit 872b5d81

iwlwifi:

* A fix for scan that fixes a firmware assertion

* A fix that improves roaming behavior. Same fix has been tested for
  a while in iwldvm. This is a bit of a work around, but the real fix
  should be in mac80211 and will come later.

* A fix for BARs that avoids a WARNING.

* one fix for rfkill while scheduled scan is running.
  Linus's system hit this issue. WiFi would be unavailable
  after this has happpened because of bad state in cfg80211.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9143e398 e3f31175
...@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) ...@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc); __ath_cancel_work(sc);
disable_irq(sc->irq);
tasklet_disable(&sc->intr_tq); tasklet_disable(&sc->intr_tq);
tasklet_disable(&sc->bcon_tasklet); tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock); spin_lock_bh(&sc->sc_pcu_lock);
...@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) ...@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
r = -EIO; r = -EIO;
out: out:
enable_irq(sc->irq);
spin_unlock_bh(&sc->sc_pcu_lock); spin_unlock_bh(&sc->sc_pcu_lock);
tasklet_enable(&sc->bcon_tasklet); tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq); tasklet_enable(&sc->intr_tq);
...@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) ...@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE; return IRQ_NONE;
if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_NONE;
/* shared irq, not for us */ /* shared irq, not for us */
if (!ath9k_hw_intrpend(ah)) if (!ath9k_hw_intrpend(ah))
return IRQ_NONE; return IRQ_NONE;
...@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) ...@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause); ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */ status &= ah->imask; /* discard unasked-for bits */
if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_HANDLED; return IRQ_HANDLED;
/* /*
......
...@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { ...@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate * regardless of the band or the number of the probes. FW will calculate
* the actual dwell time. * the actual dwell time.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
...@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { ...@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
}; };
/** /**
......
...@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { ...@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
}; };
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
* @flags: enum iwl_scan_channel_flgs * @flags: enum iwl_scan_channel_flags
* @non_ebs_ratio: how many regular scan iteration before EBS * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
* involved.
* 1 - EBS is disabled.
* 2 - every second scan will be full scan(and so on).
*/ */
struct iwl_scan_channel_opt { struct iwl_scan_channel_opt {
__le16 flags; __le16 flags;
......
...@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, ...@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
msk |= mvmsta->tfd_queue_msk; msk |= mvmsta->tfd_queue_msk;
} }
if (drop) { msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
if (iwl_mvm_flush_tx_path(mvm, msk, true)) if (iwl_mvm_flush_tx_path(mvm, msk, true))
IWL_ERR(mvm, "flush request fail\n"); IWL_ERR(mvm, "flush request fail\n");
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} else {
mutex_unlock(&mvm->mutex);
/* this can take a while, and we may need/want other operations /* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held * to succeed while doing this, so do it without the mutex held
*/ */
iwl_trans_wait_tx_queue_empty(mvm->trans, msk); iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
} }
const struct ieee80211_ops iwl_mvm_hw_ops = { const struct ieee80211_ops iwl_mvm_hw_ops = {
......
...@@ -72,6 +72,8 @@ ...@@ -72,6 +72,8 @@
#define IWL_PLCP_QUIET_THRESH 1 #define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10 #define IWL_ACTIVE_QUIET_TIME 10
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
struct iwl_mvm_scan_params { struct iwl_mvm_scan_params {
u32 max_out_time; u32 max_out_time;
...@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ...@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify); notify);
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
if (iwl_mvm_is_radio_killed(mvm))
goto out;
if (mvm->scan_status != IWL_MVM_SCAN_SCHED && if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) { mvm->scan_status != IWL_MVM_SCAN_OS)) {
...@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ...@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS) if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) { if (notify) {
...@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, ...@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1); cmd->iter_num = cpu_to_le32(1);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
}
if (iwl_mvm_rrm_scan_needed(mvm)) if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |= cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
...@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, ...@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0; cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0; cmd->schedule[1].full_scan_mul = 0;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
for (i = 1; i <= req->req.n_ssids; i++) for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i); ssid_bitmap |= BIT(i);
...@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff; cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd); ssid_bitmap, cmd);
......
...@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc)) if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF; tx_flags |= TX_CMD_FLG_TSF;
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc)) if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG; tx_flags |= TX_CMD_FLG_MORE_FRAG;
...@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf; tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL; tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else { } else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS; tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment