Commit d5234cb2 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach

Merge remote-tracking branch 'iwlwifi-fixes/master' into iwlwifi-next

Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>

Conflicts:
	drivers/net/wireless/iwlwifi/iwl-fw-file.h
	drivers/net/wireless/iwlwifi/mvm/scan.c
parents bd1ba664 2cee4762
......@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 10
#define IWL3160_UCODE_API_MAX 10
#define IWL7260_UCODE_API_MAX 12
#define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10
......@@ -111,7 +111,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
......
......@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 10
#define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10
......
......@@ -1261,10 +1261,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
try_again:
/* try next, if any */
kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
kfree(pieces);
return;
out_free_fw:
......
......@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
#define FH_MEM_TB_MAX_LENGTH (0x00020000)
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
......
......@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
* the actual dwell time.
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
* @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
*/
enum iwl_ucode_tlv_api {
......@@ -256,6 +257,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
};
......
......@@ -675,6 +675,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
* @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
......@@ -684,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
};
enum iwl_scan_priority {
......
......@@ -1211,7 +1211,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
/* disallow low power states when the FW is down */
/*
* Disallow low power states when the FW is down by taking
* the UCODE_DOWN ref. in case of ongoing hw restart the
* ref is already taken, so don't take it again.
*/
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
......@@ -1230,6 +1235,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
*/
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
mvm->ucode_loaded = false;
}
......@@ -3554,18 +3565,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
msk |= mvmsta->tfd_queue_msk;
}
if (drop) {
msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
if (iwl_mvm_flush_tx_path(mvm, msk, true))
IWL_ERR(mvm, "flush request fail\n");
mutex_unlock(&mvm->mutex);
} else {
mutex_unlock(&mvm->mutex);
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
*/
iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
......
......@@ -1124,6 +1124,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
if (iwl_mvm_is_radio_killed(mvm))
goto out;
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
......@@ -1160,6 +1166,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
......@@ -1316,22 +1323,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
......@@ -1406,6 +1397,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
......@@ -1478,6 +1485,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
else
flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
......@@ -1509,6 +1518,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
......
......@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF;
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
......@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
......@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
/* tid_tspec will default to 0 = BE when QOS isn't enabled */
/* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
else
ac = tid_to_mac80211_ac[0];
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
......@@ -920,6 +931,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
sta_id = ba_notif->sta_id;
tid = ba_notif->tid;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return 0;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
......
......@@ -664,7 +664,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return false;
if (!mvm->cfg->rx_with_siso_diversity)
if (mvm->cfg->rx_with_siso_diversity)
return false;
ieee80211_iterate_active_interfaces_atomic(
......
......@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
......@@ -525,8 +529,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
#endif
pci_set_drvdata(pdev, iwl_trans);
......
......@@ -629,7 +629,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
u32 offset, chunk_sz = section->len;
u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
......@@ -1034,16 +1034,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(20);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
* This is a bug in certain verions of the hardware.
* Certain devices also keep sending HW RF kill interrupt all
* the time, unless the interrupt is ACKed even if the interrupt
* should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment