Commit 65400a53 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2015-01-22' of...

Merge tag 'iwlwifi-next-for-kalle-2015-01-22' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* more work for new devices (4165 / 8260)
* cleanups / improvemnts in rate control
* fixes for TDLS
* major statistics work from Johannes - more to come
* improvements for the fw error dump infrastructure
* usual amount of small fixes here and there (scan, D0i3 etc...)
parents 6b03e32d 0b83795a
...@@ -69,8 +69,8 @@ ...@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 10 #define IWL7260_UCODE_API_MAX 12
#define IWL3160_UCODE_API_MAX 10 #define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10 #define IWL7260_UCODE_API_OK 10
...@@ -111,7 +111,7 @@ ...@@ -111,7 +111,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-" #define IWL7265D_FW_PRE "iwlwifi-7265D-"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0 #define NVM_HW_SECTION_NUM_FAMILY_7000 0
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 10 #define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10 #define IWL8000_UCODE_API_OK 10
...@@ -84,6 +84,8 @@ ...@@ -84,6 +84,8 @@
/* Memory offsets and lengths */ /* Memory offsets and lengths */
#define IWL8260_DCCM_OFFSET 0x800000 #define IWL8260_DCCM_OFFSET 0x800000
#define IWL8260_DCCM_LEN 0x18000 #define IWL8260_DCCM_LEN 0x18000
#define IWL8260_DCCM2_OFFSET 0x880000
#define IWL8260_DCCM2_LEN 0x8000
#define IWL8260_SMEM_OFFSET 0x400000 #define IWL8260_SMEM_OFFSET 0x400000
#define IWL8260_SMEM_LEN 0x68000 #define IWL8260_SMEM_LEN 0x68000
...@@ -134,6 +136,8 @@ static const struct iwl_ht_params iwl8000_ht_params = { ...@@ -134,6 +136,8 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.non_shared_ant = ANT_A, \ .non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \ .dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
.smem_offset = IWL8260_SMEM_OFFSET, \ .smem_offset = IWL8260_SMEM_OFFSET, \
.smem_len = IWL8260_SMEM_LEN .smem_len = IWL8260_SMEM_LEN
...@@ -156,6 +160,16 @@ const struct iwl_cfg iwl8260_2ac_cfg = { ...@@ -156,6 +160,16 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
const struct iwl_cfg iwl4165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8260_2ac_sdio_cfg = { const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8260", .name = "Intel(R) Dual Band Wireless-AC 8260",
.fw_name_pre = IWL8000_FW_PRE, .fw_name_pre = IWL8000_FW_PRE,
......
...@@ -263,6 +263,8 @@ struct iwl_pwr_tx_backoff { ...@@ -263,6 +263,8 @@ struct iwl_pwr_tx_backoff {
* station can receive in VHT * station can receive in VHT
* @dccm_offset: offset from which DCCM begins * @dccm_offset: offset from which DCCM begins
* @dccm_len: length of DCCM (including runtime stack CCM) * @dccm_len: length of DCCM (including runtime stack CCM)
* @dccm2_offset: offset from which the second DCCM begins
* @dccm2_len: length of the second DCCM
* @smem_offset: offset from which the SMEM begins * @smem_offset: offset from which the SMEM begins
* @smem_len: the length of SMEM * @smem_len: the length of SMEM
* *
...@@ -310,6 +312,8 @@ struct iwl_cfg { ...@@ -310,6 +312,8 @@ struct iwl_cfg {
unsigned int max_vht_ampdu_exponent; unsigned int max_vht_ampdu_exponent;
const u32 dccm_offset; const u32 dccm_offset;
const u32 dccm_len; const u32 dccm_len;
const u32 dccm2_offset;
const u32 dccm2_len;
const u32 smem_offset; const u32 smem_offset;
const u32 smem_len; const u32 smem_len;
}; };
...@@ -378,6 +382,7 @@ extern const struct iwl_cfg iwl7265d_2n_cfg; ...@@ -378,6 +382,7 @@ extern const struct iwl_cfg iwl7265d_2n_cfg;
extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
......
...@@ -307,6 +307,7 @@ ...@@ -307,6 +307,7 @@
enum { enum {
SILICON_A_STEP = 0, SILICON_A_STEP = 0,
SILICON_B_STEP, SILICON_B_STEP,
SILICON_C_STEP,
}; };
......
...@@ -1490,7 +1490,7 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, ...@@ -1490,7 +1490,7 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
bool, S_IRUGO); bool, S_IRUGO | S_IWUSR);
#ifdef CONFIG_IWLWIFI_UAPSD #ifdef CONFIG_IWLWIFI_UAPSD
MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
#else #else
......
...@@ -134,6 +134,27 @@ struct iwl_fw_error_dump_txcmd { ...@@ -134,6 +134,27 @@ struct iwl_fw_error_dump_txcmd {
u8 data[]; u8 data[];
} __packed; } __packed;
/**
* struct iwl_fw_error_dump_fifo - RX/TX FIFO data
* @fifo_num: number of FIFO (starting from 0)
* @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
* @wr_ptr: position of write pointer
* @rd_ptr: position of read pointer
* @fence_ptr: position of fence pointer
* @fence_mode: the current mode of the fence (before locking) -
* 0=follow RD pointer ; 1 = freeze
* @data: all of the FIFO's data
*/
struct iwl_fw_error_dump_fifo {
__le32 fifo_num;
__le32 available_bytes;
__le32 wr_ptr;
__le32 rd_ptr;
__le32 fence_ptr;
__le32 fence_mode;
u8 data[];
} __packed;
enum iwl_fw_error_dump_family { enum iwl_fw_error_dump_family {
IWL_FW_ERROR_DUMP_FAMILY_7 = 7, IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
IWL_FW_ERROR_DUMP_FAMILY_8 = 8, IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
......
...@@ -235,25 +235,34 @@ enum iwl_ucode_tlv_flag { ...@@ -235,25 +235,34 @@ enum iwl_ucode_tlv_flag {
/** /**
* enum iwl_ucode_tlv_api - ucode api * enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
* @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
* @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
* @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan. * longer than the passive one, which is essential for fragmented scan.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
* @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
}; };
/** /**
......
...@@ -193,11 +193,15 @@ void iwl_force_nmi(struct iwl_trans *trans) ...@@ -193,11 +193,15 @@ void iwl_force_nmi(struct iwl_trans *trans)
* DEVICE_SET_NMI_8000B_REG - is used. * DEVICE_SET_NMI_8000B_REG - is used.
*/ */
if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) || if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
(CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) {
iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL); iwl_write_prph(trans, DEVICE_SET_NMI_REG,
else DEVICE_SET_NMI_VAL_DRV);
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_HW);
} else {
iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG, iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
DEVICE_SET_NMI_8000B_VAL); DEVICE_SET_NMI_8000B_VAL);
}
} }
IWL_EXPORT_SYMBOL(iwl_force_nmi); IWL_EXPORT_SYMBOL(iwl_force_nmi);
......
...@@ -108,7 +108,8 @@ ...@@ -108,7 +108,8 @@
/* Device NMI register */ /* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30 #define DEVICE_SET_NMI_REG 0x00a01c30
#define DEVICE_SET_NMI_VAL 0x1 #define DEVICE_SET_NMI_VAL_HW BIT(0)
#define DEVICE_SET_NMI_VAL_DRV BIT(7)
#define DEVICE_SET_NMI_8000B_REG 0x00a01c24 #define DEVICE_SET_NMI_8000B_REG 0x00a01c24
#define DEVICE_SET_NMI_8000B_VAL 0x1000000 #define DEVICE_SET_NMI_8000B_VAL 0x1000000
...@@ -359,12 +360,30 @@ enum secure_load_status_reg { ...@@ -359,12 +360,30 @@ enum secure_load_status_reg {
/* Rx FIFO */ /* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88) #define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40)
#define RXF_RD_WR_PTR (0xa00c50)
#define RXF_RD_RD_PTR (0xa00c54)
#define RXF_RD_FENCE_PTR (0xa00c4c)
#define RXF_SET_FENCE_MODE (0xa00c14)
#define RXF_LD_WR2FENCE (0xa00c1c)
#define RXF_FIFO_RD_FENCE_INC (0xa00c68)
#define RXF_SIZE_BYTE_CND_POS (7) #define RXF_SIZE_BYTE_CND_POS (7)
#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) #define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
#define RXF_DIFF_FROM_PREV (0x200)
#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) #define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) #define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
/* Tx FIFO */
#define TXF_FIFO_ITEM_CNT (0xa00438)
#define TXF_WR_PTR (0xa00414)
#define TXF_RD_PTR (0xa00410)
#define TXF_FENCE_PTR (0xa00418)
#define TXF_LOCK_FENCE (0xa00424)
#define TXF_LARC_NUM (0xa0043c)
#define TXF_READ_MODIFY_DATA (0xa00448)
#define TXF_READ_MODIFY_ADDR (0xa0044c)
/* FW monitor */ /* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00) #define MON_BUFF_SAMPLE_CTL (0xa03c00)
#define MON_BUFF_BASE_ADDR (0xa03c3c) #define MON_BUFF_BASE_ADDR (0xa03c3c)
......
...@@ -342,7 +342,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -342,7 +342,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 12, .range = 12,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000001), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -363,7 +363,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -363,7 +363,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 20, .range = 20,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000002), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -384,7 +384,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -384,7 +384,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 21, .range = 21,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000003), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -405,7 +405,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -405,7 +405,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 23, .range = 23,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000004), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -426,7 +426,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -426,7 +426,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 27, .range = 27,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000005), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -447,7 +447,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -447,7 +447,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 30, .range = 30,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000006), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -468,7 +468,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -468,7 +468,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 32, .range = 32,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000007), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -489,7 +489,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -489,7 +489,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 33, .range = 33,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000008), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
......
...@@ -330,7 +330,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -330,7 +330,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 12, .range = 12,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000001), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -351,7 +351,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -351,7 +351,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 20, .range = 20,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000002), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -372,7 +372,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -372,7 +372,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 21, .range = 21,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000003), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -393,7 +393,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -393,7 +393,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 23, .range = 23,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000004), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -414,7 +414,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -414,7 +414,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 27, .range = 27,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000005), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -435,7 +435,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -435,7 +435,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 30, .range = 30,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000006), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -456,7 +456,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -456,7 +456,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 32, .range = 32,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000007), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
...@@ -477,7 +477,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = { ...@@ -477,7 +477,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
{ {
.range = 33, .range = 33,
.lut20 = { .lut20 = {
cpu_to_le32(0x00000008), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
......
...@@ -94,21 +94,22 @@ ...@@ -94,21 +94,22 @@
#define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_MPLUT 1
#define IWL_MVM_BT_COEX_RRC 1 #define IWL_MVM_BT_COEX_RRC 1
#define IWL_MVM_BT_COEX_TTC 1 #define IWL_MVM_BT_COEX_TTC 1
#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201 #define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 #define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
#define IWL_MVM_QUOTA_THRESHOLD 8 #define IWL_MVM_QUOTA_THRESHOLD 8
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_MIMO 0 #define IWL_MVM_RS_DISABLE_P2P_MIMO 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_LEGACY_RETRIES_PER_RATE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 16 #define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16 #define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3 #define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1 #define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
......
...@@ -517,6 +517,34 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, ...@@ -517,6 +517,34 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
} }
static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[20];
int len;
len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
bool ret;
mutex_lock(&mvm->mutex);
ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
mutex_unlock(&mvm->mutex);
return ret ? count : -EINVAL;
}
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
...@@ -531,6 +559,7 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params); ...@@ -531,6 +559,7 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{ {
...@@ -564,6 +593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -564,6 +593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR); S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif) mvmvif == mvm->bf_allowed_vif)
......
...@@ -654,10 +654,10 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, ...@@ -654,10 +654,10 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count; return ret ?: count;
} }
#define PRINT_STATS_LE32(_str, _val) \ #define PRINT_STATS_LE32(_struct, _memb) \
pos += scnprintf(buf + pos, bufsz - pos, \ pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \ fmt_table, #_memb, \
le32_to_cpu(_val)) le32_to_cpu(_struct->_memb))
static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
char __user *user_buf, size_t count, char __user *user_buf, size_t count,
...@@ -692,97 +692,89 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, ...@@ -692,97 +692,89 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, fmt_header, pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - OFDM"); "Statistics_Rx - OFDM");
PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt); PRINT_STATS_LE32(ofdm, ina_cnt);
PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt); PRINT_STATS_LE32(ofdm, fina_cnt);
PRINT_STATS_LE32("plcp_err", ofdm->plcp_err); PRINT_STATS_LE32(ofdm, plcp_err);
PRINT_STATS_LE32("crc32_err", ofdm->crc32_err); PRINT_STATS_LE32(ofdm, crc32_err);
PRINT_STATS_LE32("overrun_err", ofdm->overrun_err); PRINT_STATS_LE32(ofdm, overrun_err);
PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err); PRINT_STATS_LE32(ofdm, early_overrun_err);
PRINT_STATS_LE32("crc32_good", ofdm->crc32_good); PRINT_STATS_LE32(ofdm, crc32_good);
PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt); PRINT_STATS_LE32(ofdm, false_alarm_cnt);
PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt); PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout); PRINT_STATS_LE32(ofdm, sfd_timeout);
PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout); PRINT_STATS_LE32(ofdm, fina_timeout);
PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts); PRINT_STATS_LE32(ofdm, unresponded_rts);
PRINT_STATS_LE32("rxe_frame_lmt_overrun", PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
ofdm->rxe_frame_limit_overrun); PRINT_STATS_LE32(ofdm, sent_ack_cnt);
PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt); PRINT_STATS_LE32(ofdm, sent_cts_cnt);
PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill);
PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill); PRINT_STATS_LE32(ofdm, mh_format_err);
PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err); PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum); PRINT_STATS_LE32(ofdm, reserved);
PRINT_STATS_LE32("reserved", ofdm->reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header, pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - CCK"); "Statistics_Rx - CCK");
PRINT_STATS_LE32("ina_cnt", cck->ina_cnt); PRINT_STATS_LE32(cck, ina_cnt);
PRINT_STATS_LE32("fina_cnt", cck->fina_cnt); PRINT_STATS_LE32(cck, fina_cnt);
PRINT_STATS_LE32("plcp_err", cck->plcp_err); PRINT_STATS_LE32(cck, plcp_err);
PRINT_STATS_LE32("crc32_err", cck->crc32_err); PRINT_STATS_LE32(cck, crc32_err);
PRINT_STATS_LE32("overrun_err", cck->overrun_err); PRINT_STATS_LE32(cck, overrun_err);
PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err); PRINT_STATS_LE32(cck, early_overrun_err);
PRINT_STATS_LE32("crc32_good", cck->crc32_good); PRINT_STATS_LE32(cck, crc32_good);
PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt); PRINT_STATS_LE32(cck, false_alarm_cnt);
PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt); PRINT_STATS_LE32(cck, fina_sync_err_cnt);
PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout); PRINT_STATS_LE32(cck, sfd_timeout);
PRINT_STATS_LE32("fina_timeout", cck->fina_timeout); PRINT_STATS_LE32(cck, fina_timeout);
PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts); PRINT_STATS_LE32(cck, unresponded_rts);
PRINT_STATS_LE32("rxe_frame_lmt_overrun", PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
cck->rxe_frame_limit_overrun); PRINT_STATS_LE32(cck, sent_ack_cnt);
PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt); PRINT_STATS_LE32(cck, sent_cts_cnt);
PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill);
PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill); PRINT_STATS_LE32(cck, mh_format_err);
PRINT_STATS_LE32("mh_format_err", cck->mh_format_err); PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum); PRINT_STATS_LE32(cck, reserved);
PRINT_STATS_LE32("reserved", cck->reserved);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header, pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - GENERAL"); "Statistics_Rx - GENERAL");
PRINT_STATS_LE32("bogus_cts", general->bogus_cts); PRINT_STATS_LE32(general, bogus_cts);
PRINT_STATS_LE32("bogus_ack", general->bogus_ack); PRINT_STATS_LE32(general, bogus_ack);
PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames); PRINT_STATS_LE32(general, non_bssid_frames);
PRINT_STATS_LE32("filtered_frames", general->filtered_frames); PRINT_STATS_LE32(general, filtered_frames);
PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons); PRINT_STATS_LE32(general, non_channel_beacons);
PRINT_STATS_LE32("channel_beacons", general->channel_beacons); PRINT_STATS_LE32(general, channel_beacons);
PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon); PRINT_STATS_LE32(general, num_missed_bcon);
PRINT_STATS_LE32("adc_rx_saturation_time", PRINT_STATS_LE32(general, adc_rx_saturation_time);
general->adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time);
PRINT_STATS_LE32("ina_detection_search_time", PRINT_STATS_LE32(general, beacon_silence_rssi_a);
general->ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_b);
PRINT_STATS_LE32("beacon_silence_rssi_a", PRINT_STATS_LE32(general, beacon_silence_rssi_c);
general->beacon_silence_rssi_a); PRINT_STATS_LE32(general, interference_data_flag);
PRINT_STATS_LE32("beacon_silence_rssi_b", PRINT_STATS_LE32(general, channel_load);
general->beacon_silence_rssi_b); PRINT_STATS_LE32(general, dsp_false_alarms);
PRINT_STATS_LE32("beacon_silence_rssi_c", PRINT_STATS_LE32(general, beacon_rssi_a);
general->beacon_silence_rssi_c); PRINT_STATS_LE32(general, beacon_rssi_b);
PRINT_STATS_LE32("interference_data_flag", PRINT_STATS_LE32(general, beacon_rssi_c);
general->interference_data_flag); PRINT_STATS_LE32(general, beacon_energy_a);
PRINT_STATS_LE32("channel_load", general->channel_load); PRINT_STATS_LE32(general, beacon_energy_b);
PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms); PRINT_STATS_LE32(general, beacon_energy_c);
PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a); PRINT_STATS_LE32(general, num_bt_kills);
PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b); PRINT_STATS_LE32(general, mac_id);
PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c); PRINT_STATS_LE32(general, directed_data_mpdu);
PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
PRINT_STATS_LE32("mac_id", general->mac_id);
PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
pos += scnprintf(buf + pos, bufsz - pos, fmt_header, pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
"Statistics_Rx - HT"); "Statistics_Rx - HT");
PRINT_STATS_LE32("plcp_err", ht->plcp_err); PRINT_STATS_LE32(ht, plcp_err);
PRINT_STATS_LE32("overrun_err", ht->overrun_err); PRINT_STATS_LE32(ht, overrun_err);
PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err); PRINT_STATS_LE32(ht, early_overrun_err);
PRINT_STATS_LE32("crc32_good", ht->crc32_good); PRINT_STATS_LE32(ht, crc32_good);
PRINT_STATS_LE32("crc32_err", ht->crc32_err); PRINT_STATS_LE32(ht, crc32_err);
PRINT_STATS_LE32("mh_format_err", ht->mh_format_err); PRINT_STATS_LE32(ht, mh_format_err);
PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good); PRINT_STATS_LE32(ht, agg_crc32_good);
PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_mpdu_cnt);
PRINT_STATS_LE32("agg_cnt", ht->agg_cnt); PRINT_STATS_LE32(ht, agg_cnt);
PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs); PRINT_STATS_LE32(ht, unsupport_mcs);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
...@@ -988,9 +980,14 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, ...@@ -988,9 +980,14 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
char *buf, size_t count, char *buf, size_t count,
loff_t *ppos) loff_t *ppos)
{ {
mutex_lock(&mvm->mutex); int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
if (ret)
return ret;
iwl_mvm_fw_dbg_collect(mvm); iwl_mvm_fw_dbg_collect(mvm);
mutex_unlock(&mvm->mutex);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
return count; return count;
} }
...@@ -1390,6 +1387,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, ...@@ -1390,6 +1387,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos); return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
} }
...@@ -1489,6 +1487,26 @@ iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, ...@@ -1489,6 +1487,26 @@ iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
return count; return count;
} }
static ssize_t iwl_dbgfs_enable_scan_iteration_notif_write(struct iwl_mvm *mvm,
char *buf,
size_t count,
loff_t *ppos)
{
int val;
mutex_lock(&mvm->mutex);
if (kstrtoint(buf, 10, &val)) {
mutex_unlock(&mvm->mutex);
return -EINVAL;
}
mvm->scan_iter_notif_enabled = val;
mutex_unlock(&mvm->mutex);
return count;
}
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */ /* Device wide debugfs entries */
...@@ -1511,6 +1529,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); ...@@ -1511,6 +1529,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(enable_scan_iteration_notif, 8);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
...@@ -1554,6 +1573,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) ...@@ -1554,6 +1573,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(enable_scan_iteration_notif, mvm->debugfs_dir,
S_IWUSR);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
......
...@@ -91,15 +91,33 @@ enum iwl_ltr_config_flags { ...@@ -91,15 +91,33 @@ enum iwl_ltr_config_flags {
LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
}; };
/**
* struct iwl_ltr_config_cmd_v1 - configures the LTR
* @flags: See %enum iwl_ltr_config_flags
*/
struct iwl_ltr_config_cmd_v1 {
__le32 flags;
__le32 static_long;
__le32 static_short;
} __packed; /* LTR_CAPABLE_API_S_VER_1 */
#define LTR_VALID_STATES_NUM 4
/** /**
* struct iwl_ltr_config_cmd - configures the LTR * struct iwl_ltr_config_cmd - configures the LTR
* @flags: See %enum iwl_ltr_config_flags * @flags: See %enum iwl_ltr_config_flags
* @static_long:
* @static_short:
* @ltr_cfg_values:
* @ltr_short_idle_timeout:
*/ */
struct iwl_ltr_config_cmd { struct iwl_ltr_config_cmd {
__le32 flags; __le32 flags;
__le32 static_long; __le32 static_long;
__le32 static_short; __le32 static_short;
} __packed; __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
__le32 ltr_short_idle_timeout;
} __packed; /* LTR_CAPABLE_API_S_VER_2 */
/* Radio LP RX Energy Threshold measured in dBm */ /* Radio LP RX Energy Threshold measured in dBm */
#define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD 75
......
...@@ -308,6 +308,17 @@ enum { ...@@ -308,6 +308,17 @@ enum {
#define LQ_FLAG_DYNAMIC_BW_POS 6 #define LQ_FLAG_DYNAMIC_BW_POS 6
#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) #define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
/* Single Stream Parameters
* SS_STBC/BFER_ALLOWED - Controls whether STBC or Beamformer (BFER) is allowed
* ucode will make a smart decision between SISO/STBC/BFER
* SS_PARAMS_VALID - if not set ignore the ss_params field.
*/
enum {
RS_SS_STBC_ALLOWED = BIT(0),
RS_SS_BFER_ALLOWED = BIT(1),
RS_SS_PARAMS_VALID = BIT(31),
};
/** /**
* struct iwl_lq_cmd - link quality command * struct iwl_lq_cmd - link quality command
* @sta_id: station to update * @sta_id: station to update
...@@ -330,7 +341,7 @@ enum { ...@@ -330,7 +341,7 @@ enum {
* 2 - 0x3f: maximal number of frames (up to 3f == 63) * 2 - 0x3f: maximal number of frames (up to 3f == 63)
* @rs_table: array of rates for each TX try, each is rate_n_flags, * @rs_table: array of rates for each TX try, each is rate_n_flags,
* meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
* @bf_params: beam forming params, currently not used * @ss_params: single stream features. declare whether STBC or BFER are allowed.
*/ */
struct iwl_lq_cmd { struct iwl_lq_cmd {
u8 sta_id; u8 sta_id;
...@@ -348,6 +359,6 @@ struct iwl_lq_cmd { ...@@ -348,6 +359,6 @@ struct iwl_lq_cmd {
u8 agg_frame_cnt_limit; u8 agg_frame_cnt_limit;
__le32 reserved2; __le32 reserved2;
__le32 rs_table[LQ_MAX_RETRY_NUM]; __le32 rs_table[LQ_MAX_RETRY_NUM];
__le32 bf_params; __le32 ss_params;
}; /* LINK_QUALITY_CMD_API_S_VER_1 */ }; /* LINK_QUALITY_CMD_API_S_VER_1 */
#endif /* __fw_api_rs_h__ */ #endif /* __fw_api_rs_h__ */
...@@ -675,6 +675,7 @@ struct iwl_scan_channel_opt { ...@@ -675,6 +675,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests. * and DS parameter set IEs into probe requests.
* @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/ */
enum iwl_mvm_lmac_scan_flags { enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
...@@ -684,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags { ...@@ -684,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
}; };
enum iwl_scan_priority { enum iwl_scan_priority {
......
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __fw_api_stats_h__
#define __fw_api_stats_h__
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
struct mvm_statistics_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
__le32 rssi_ant;
__le32 reserved2;
} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
struct mvm_statistics_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
* doesn't belong to the STA BSSID */
__le32 filtered_frames; /* count frames that were dumped in the
* filtering process */
__le32 non_channel_beacons; /* beacons with our bss id but not on
* our serving channel */
__le32 channel_beacons; /* beacons with our bss id and in our
* serving channel */
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
__le32 ina_detection_search_time;/* total time (in 0.8us) searched
* for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
__le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
__le32 beacon_rssi_b;
__le32 beacon_rssi_c;
__le32 beacon_energy_a;
__le32 beacon_energy_b;
__le32 beacon_energy_c;
__le32 num_bt_kills;
__le32 mac_id;
__le32 directed_data_mpdu;
} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
struct mvm_statistics_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
__le32 crc32_err;
__le32 overrun_err;
__le32 early_overrun_err;
__le32 crc32_good;
__le32 false_alarm_cnt;
__le32 fina_sync_err_cnt;
__le32 sfd_timeout;
__le32 fina_timeout;
__le32 unresponded_rts;
__le32 rxe_frame_lmt_overrun;
__le32 sent_ack_cnt;
__le32 sent_cts_cnt;
__le32 sent_ba_rsp_cnt;
__le32 dsp_self_kill;
__le32 mh_format_err;
__le32 re_acq_main_rssi_sum;
__le32 reserved;
} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
struct mvm_statistics_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
__le32 crc32_good;
__le32 crc32_err;
__le32 mh_format_err;
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
__le32 unsupport_mcs;
} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
struct mvm_statistics_tx_non_phy {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
__le32 bt_prio_kill_cnt;
__le32 few_bytes_cnt;
__le32 cts_timeout;
__le32 ack_timeout;
__le32 expected_ack_cnt;
__le32 actual_ack_cnt;
__le32 dump_msdu_cnt;
__le32 burst_abort_next_frame_mismatch_cnt;
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
#define MAX_CHAINS 3
struct mvm_statistics_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
__le32 scd_query_no_agg;
__le32 scd_query_agg;
__le32 scd_query_mismatch;
__le32 frame_not_ready;
__le32 underrun;
__le32 bt_prio_kill;
__le32 rx_ba_rsp_cnt;
__s8 txpower[MAX_CHAINS];
__s8 reserved;
__le32 reserved2;
} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
struct mvm_statistics_tx_channel_width {
__le32 ext_cca_narrow_ch20[1];
__le32 ext_cca_narrow_ch40[2];
__le32 ext_cca_narrow_ch80[3];
__le32 ext_cca_narrow_ch160[4];
__le32 last_tx_ch_width_indx;
__le32 rx_detected_per_ch_width[4];
__le32 success_per_ch_width[4];
__le32 fail_per_ch_width[4];
}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
struct mvm_statistics_tx {
struct mvm_statistics_tx_non_phy general;
struct mvm_statistics_tx_non_phy_agg agg;
struct mvm_statistics_tx_channel_width channel_width;
} __packed; /* STATISTICS_TX_API_S_VER_4 */
struct mvm_statistics_bt_activity {
__le32 hi_priority_tx_req_cnt;
__le32 hi_priority_tx_denied_cnt;
__le32 lo_priority_tx_req_cnt;
__le32 lo_priority_tx_denied_cnt;
__le32 hi_priority_rx_req_cnt;
__le32 hi_priority_rx_denied_cnt;
__le32 lo_priority_rx_req_cnt;
__le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
struct mvm_statistics_general {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
struct mvm_statistics_div slow_div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
* count the number of times we have to re-tune
* in order to get out of bad PHY status
*/
__le32 num_of_sos_states;
__le32 beacon_filtered;
__le32 missed_beacons;
__s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
__le32 beacon_filter_delta_time;
struct mvm_statistics_bt_activity bt_activity;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
struct mvm_statistics_rx_non_phy general;
struct mvm_statistics_rx_ht_phy ofdm_ht;
} __packed; /* STATISTICS_RX_API_S_VER_3 */
/*
* STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* REPLY_STATISTICS_CMD 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
* cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
* 0x9c with CLEAR_STATS bit set (see above).
*
* uCode also issues this notification during scans. uCode clears statistics
* appropriately so that each notification contains statistics for only the
* one channel that has just been scanned.
*/
struct iwl_notif_statistics {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general general;
} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
#endif /* __fw_api_stats_h__ */
...@@ -592,4 +592,43 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) ...@@ -592,4 +592,43 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff; tx_resp->frame_count) & 0xfff;
} }
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token:
* @sta_id: station id
* @tid:
* @scd_queue: scheduler queue to confiug
* @enable: 1 queue enable, 0 queue disable
* @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: %enum iwl_mvm_tx_fifo
* @window: BA window size
* @ssn: SSN for the BA agreement
*/
struct iwl_scd_txq_cfg_cmd {
u8 token;
u8 sta_id;
u8 tid;
u8 scd_queue;
u8 enable;
u8 aggregate;
u8 tx_fifo;
u8 window;
__le16 ssn;
__le16 reserved;
} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
/**
* struct iwl_scd_txq_cfg_rsp
* @token: taken from the command
* @sta_id: station id from the command
* @tid: tid from the command
* @scd_queue: scd_queue from the command
*/
struct iwl_scd_txq_cfg_rsp {
u8 token;
u8 sta_id;
u8 tid;
u8 scd_queue;
} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
#endif /* __fw_api_tx_h__ */ #endif /* __fw_api_tx_h__ */
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#include "fw-api-d3.h" #include "fw-api-d3.h"
#include "fw-api-coex.h" #include "fw-api-coex.h"
#include "fw-api-scan.h" #include "fw-api-scan.h"
#include "fw-api-stats.h"
/* Tx queue numbers */ /* Tx queue numbers */
enum { enum {
...@@ -128,6 +129,9 @@ enum { ...@@ -128,6 +129,9 @@ enum {
/* global key */ /* global key */
WEP_KEY = 0x20, WEP_KEY = 0x20,
/* Memory */
SHARED_MEM_CFG = 0x25,
/* TDLS */ /* TDLS */
TDLS_CHANNEL_SWITCH_CMD = 0x27, TDLS_CHANNEL_SWITCH_CMD = 0x27,
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
...@@ -1381,214 +1385,6 @@ struct iwl_mvm_marker { ...@@ -1381,214 +1385,6 @@ struct iwl_mvm_marker {
__le32 metadata[0]; __le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */ } __packed; /* MARKER_API_S_VER_1 */
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
struct mvm_statistics_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
__le32 rssi_ant;
__le32 reserved2;
} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
struct mvm_statistics_general_common {
__le32 temperature; /* radio temperature */
__le32 temperature_m; /* radio voltage */
struct mvm_statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
struct mvm_statistics_div div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
* count the number of times we have to re-tune
* in order to get out of bad PHY status
*/
__le32 num_of_sos_states;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
struct mvm_statistics_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
* doesn't belong to the STA BSSID */
__le32 filtered_frames; /* count frames that were dumped in the
* filtering process */
__le32 non_channel_beacons; /* beacons with our bss id but not on
* our serving channel */
__le32 channel_beacons; /* beacons with our bss id and in our
* serving channel */
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
__le32 ina_detection_search_time;/* total time (in 0.8us) searched
* for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
__le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
__le32 beacon_rssi_b;
__le32 beacon_rssi_c;
__le32 beacon_energy_a;
__le32 beacon_energy_b;
__le32 beacon_energy_c;
__le32 num_bt_kills;
__le32 mac_id;
__le32 directed_data_mpdu;
} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
struct mvm_statistics_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
__le32 crc32_err;
__le32 overrun_err;
__le32 early_overrun_err;
__le32 crc32_good;
__le32 false_alarm_cnt;
__le32 fina_sync_err_cnt;
__le32 sfd_timeout;
__le32 fina_timeout;
__le32 unresponded_rts;
__le32 rxe_frame_limit_overrun;
__le32 sent_ack_cnt;
__le32 sent_cts_cnt;
__le32 sent_ba_rsp_cnt;
__le32 dsp_self_kill;
__le32 mh_format_err;
__le32 re_acq_main_rssi_sum;
__le32 reserved;
} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
struct mvm_statistics_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
__le32 crc32_good;
__le32 crc32_err;
__le32 mh_format_err;
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
__le32 unsupport_mcs;
} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
#define MAX_CHAINS 3
struct mvm_statistics_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
__le32 scd_query_no_agg;
__le32 scd_query_agg;
__le32 scd_query_mismatch;
__le32 frame_not_ready;
__le32 underrun;
__le32 bt_prio_kill;
__le32 rx_ba_rsp_cnt;
__s8 txpower[MAX_CHAINS];
__s8 reserved;
__le32 reserved2;
} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
struct mvm_statistics_tx_channel_width {
__le32 ext_cca_narrow_ch20[1];
__le32 ext_cca_narrow_ch40[2];
__le32 ext_cca_narrow_ch80[3];
__le32 ext_cca_narrow_ch160[4];
__le32 last_tx_ch_width_indx;
__le32 rx_detected_per_ch_width[4];
__le32 success_per_ch_width[4];
__le32 fail_per_ch_width[4];
}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
struct mvm_statistics_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
__le32 bt_prio_kill_cnt;
__le32 few_bytes_cnt;
__le32 cts_timeout;
__le32 ack_timeout;
__le32 expected_ack_cnt;
__le32 actual_ack_cnt;
__le32 dump_msdu_cnt;
__le32 burst_abort_next_frame_mismatch_cnt;
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
struct mvm_statistics_tx_non_phy_agg agg;
struct mvm_statistics_tx_channel_width channel_width;
} __packed; /* STATISTICS_TX_API_S_VER_4 */
struct mvm_statistics_bt_activity {
__le32 hi_priority_tx_req_cnt;
__le32 hi_priority_tx_denied_cnt;
__le32 lo_priority_tx_req_cnt;
__le32 lo_priority_tx_denied_cnt;
__le32 hi_priority_rx_req_cnt;
__le32 hi_priority_rx_denied_cnt;
__le32 lo_priority_rx_req_cnt;
__le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_filtered;
__le32 missed_beacons;
__s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
__le32 beacon_filter_delta_time;
struct mvm_statistics_bt_activity bt_activity;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
struct mvm_statistics_rx_non_phy general;
struct mvm_statistics_rx_ht_phy ofdm_ht;
} __packed; /* STATISTICS_RX_API_S_VER_3 */
/*
* STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* REPLY_STATISTICS_CMD 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
* cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
* 0x9c with CLEAR_STATS bit set (see above).
*
* uCode also issues this notification during scans. uCode clears statistics
* appropriately so that each notification contains statistics for only the
* one channel that has just been scanned.
*/
struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general general;
} __packed;
/*********************************** /***********************************
* Smart Fifo API * Smart Fifo API
***********************************/ ***********************************/
...@@ -1680,63 +1476,6 @@ struct iwl_dts_measurement_notif { ...@@ -1680,63 +1476,6 @@ struct iwl_dts_measurement_notif {
__le32 voltage; __le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
/**
* enum iwl_scd_control - scheduler config command control flags
* @IWL_SCD_CONTROL_RM_TID: remove TID from this queue
* @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW
*/
enum iwl_scd_control {
IWL_SCD_CONTROL_RM_TID = BIT(4),
IWL_SCD_CONTROL_SET_SSN = BIT(5),
};
/**
* enum iwl_scd_flags - scheduler config command flags
* @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue
* @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue
* @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled
*/
enum iwl_scd_flags {
IWL_SCD_FLAGS_SHARE_TID = BIT(0),
IWL_SCD_FLAGS_SHARE_RA = BIT(1),
IWL_SCD_FLAGS_DQA_ENABLED = BIT(2),
};
#define IWL_SCDQ_INVALID_STA 0xff
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token: dialog token addba - unused legacy
* @sta_id: station id 4-bit
* @tid: TID 0..7
* @scd_queue: TFD queue num 0 .. 31
* @enable: 1 queue enable, 0 queue disable
* @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: tx fifo num 0..7
* @window: up to 64
* @ssn: starting seq num 12-bit
* @control: command control flags
* @flags: flags - see &enum iwl_scd_flags
*
* Note that every time the command is sent, all parameters must
* be filled with the exception of
* - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN
* - the window, which is only relevant when starting aggregation
*/
struct iwl_scd_txq_cfg_cmd {
u8 token;
u8 sta_id;
u8 tid;
u8 scd_queue;
u8 enable;
u8 aggregate;
u8 tx_fifo;
u8 window;
__le16 ssn;
u8 control;
u8 flags;
} __packed;
/*********************************** /***********************************
* TDLS API * TDLS API
***********************************/ ***********************************/
...@@ -1878,4 +1617,36 @@ struct iwl_tdls_config_res { ...@@ -1878,4 +1617,36 @@ struct iwl_tdls_config_res {
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#define TX_FIFO_MAX_NUM 8
#define RX_FIFO_MAX_NUM 2
/**
* Shared memory configuration information from the FW
*
* @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
* accessible)
* @shared_mem_size: shared memory size
* @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
* 0x0 as accessible only via DBGM RDAT)
* @sample_buff_size: internal sample buff size
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
* 8000 HW set to 0x0 as not accessible)
* @txfifo_size: size of TXF0 ... TXF7
* @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM];
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr;
__le32 page_buff_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -400,10 +400,59 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ...@@ -400,10 +400,59 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
return ret; return ret;
} }
void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm) static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{ {
struct iwl_host_cmd cmd = {
.id = SHARED_MEM_CFG,
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
struct iwl_rx_packet *pkt;
struct iwl_shared_mem_cfg *mem_cfg;
u32 i;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
pkt->hdr.flags);
goto exit;
}
mem_cfg = (void *)pkt->data;
mvm->shared_mem_cfg.shared_mem_addr =
le32_to_cpu(mem_cfg->shared_mem_addr);
mvm->shared_mem_cfg.shared_mem_size =
le32_to_cpu(mem_cfg->shared_mem_size);
mvm->shared_mem_cfg.sample_buff_addr =
le32_to_cpu(mem_cfg->sample_buff_addr);
mvm->shared_mem_cfg.sample_buff_size =
le32_to_cpu(mem_cfg->sample_buff_size);
mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
mvm->shared_mem_cfg.page_buff_addr =
le32_to_cpu(mem_cfg->page_buff_addr);
mvm->shared_mem_cfg.page_buff_size =
le32_to_cpu(mem_cfg->page_buff_size);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
exit:
iwl_free_resp(&cmd);
}
void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
{
/* stop recording */ /* stop recording */
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
...@@ -412,11 +461,7 @@ void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm) ...@@ -412,11 +461,7 @@ void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
} }
iwl_mvm_fw_error_dump(mvm); schedule_work(&mvm->fw_error_dump_wk);
/* start recording again */
WARN_ON_ONCE(mvm->fw->dbg_dest_tlv &&
iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
} }
int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id) int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
...@@ -454,6 +499,35 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id) ...@@ -454,6 +499,35 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
return ret; return ret;
} }
static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
{
struct iwl_ltr_config_cmd_v1 cmd_v1 = {
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
if (!mvm->trans->ltr_enabled)
return 0;
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
sizeof(cmd_v1), &cmd_v1);
}
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{
struct iwl_ltr_config_cmd cmd = {
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
if (!mvm->trans->ltr_enabled)
return 0;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
return iwl_mvm_config_ltr_v1(mvm);
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
sizeof(cmd), &cmd);
}
int iwl_mvm_up(struct iwl_mvm *mvm) int iwl_mvm_up(struct iwl_mvm *mvm)
{ {
int ret, i; int ret, i;
...@@ -501,6 +575,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -501,6 +575,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error; goto error;
} }
iwl_mvm_get_shared_mem_conf(mvm);
ret = iwl_mvm_sf_update(mvm, NULL, false); ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret) if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
...@@ -557,14 +633,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -557,14 +633,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Initialize tx backoffs to the minimal possible */ /* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0); iwl_mvm_tt_tx_backoff(mvm, 0);
if (mvm->trans->ltr_enabled) { WARN_ON(iwl_mvm_config_ltr(mvm));
struct iwl_ltr_config_cmd cmd = {
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
sizeof(cmd), &cmd));
}
ret = iwl_mvm_power_update_device(mvm); ret = iwl_mvm_power_update_device(mvm);
if (ret) if (ret)
......
...@@ -208,8 +208,10 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) ...@@ -208,8 +208,10 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return BIT(IWL_MVM_OFFCHANNEL_QUEUE); return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
qmask |= BIT(vif->hw_queue[ac]); qmask |= BIT(vif->hw_queue[ac]);
}
if (vif->type == NL80211_IFTYPE_AP) if (vif->type == NL80211_IFTYPE_AP)
qmask |= BIT(vif->cab_queue); qmask |= BIT(vif->cab_queue);
...@@ -496,14 +498,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -496,14 +498,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE); iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue); iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
/* fall through */ /* fall through */
default: default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]); iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
} }
} }
......
...@@ -761,6 +761,132 @@ static void iwl_mvm_free_coredump(const void *data) ...@@ -761,6 +761,132 @@ static void iwl_mvm_free_coredump(const void *data)
kfree(fw_error_dump); kfree(fw_error_dump);
} }
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
int i, j;
if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
return;
/* Pull RXF data from all RXFs */
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
/*
* Keep aside the additional offset that might be needed for
* next RXF
*/
u32 offset_diff = RXF_DIFF_FROM_PREV * i;
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
continue;
/* Add a TLV for the RXF */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_D_SPACE +
offset_diff));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_WR_PTR +
offset_diff));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_RD_PTR +
offset_diff));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_FENCE_PTR +
offset_diff));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_SET_FENCE_MODE +
offset_diff));
/* Lock fence */
iwl_trans_write_prph(mvm->trans,
RXF_SET_FENCE_MODE + offset_diff, 0x1);
/* Set fence pointer to the same place like WR pointer */
iwl_trans_write_prph(mvm->trans,
RXF_LD_WR2FENCE + offset_diff, 0x1);
/* Set fence offset */
iwl_trans_write_prph(mvm->trans,
RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
0x0);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
RXF_FIFO_RD_FENCE_INC +
offset_diff);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
/* Pull TXF data from all TXFs */
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
continue;
/* Add a TLV for the FIFO */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FIFO_ITEM_CNT));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_WR_PTR));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_RD_PTR));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FENCE_PTR));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_LOCK_FENCE));
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
TXF_WR_PTR);
/* Dummy-read to advance the read pointer to the head */
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
TXF_READ_MODIFY_DATA);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
iwl_trans_release_nic_access(mvm->trans, &flags);
}
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{ {
struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_file *dump_file;
...@@ -769,19 +895,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -769,19 +895,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_mem *dump_mem; struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_mvm_dump_ptrs *fw_error_dump; struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs; u32 sram_len, sram_ofs;
u32 file_len, rxf_len; u32 file_len, fifo_data_len = 0;
unsigned long flags;
int reg_val;
u32 smem_len = mvm->cfg->smem_len; u32 smem_len = mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* W/A for 8000 HW family A-step */ /* W/A for 8000 HW family A-step */
if (mvm->cfg->smem_len && if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) {
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) if (smem_len)
smem_len = 0x38000; smem_len = 0x38000;
if (sram2_len)
sram2_len = 0x10000;
}
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump) if (!fw_error_dump)
return; return;
...@@ -798,23 +927,49 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -798,23 +927,49 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sram_len = mvm->cfg->dccm_len; sram_len = mvm->cfg->dccm_len;
} }
/* reading buffer size */ /* reading RXF/TXF sizes */
reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR); if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
rxf_len = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS; struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
int i;
fifo_data_len = 0;
/* Count RXF size */
for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
if (!mem_cfg->rxfifo_size[i])
continue;
/* Add header info */
fifo_data_len += mem_cfg->rxfifo_size[i] +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
if (!mem_cfg->txfifo_size[i])
continue;
/* the register holds the value divided by 128 */ /* Add header info */
rxf_len = rxf_len << 7; fifo_data_len += mem_cfg->txfifo_size[i] +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
file_len = sizeof(*dump_file) + file_len = sizeof(*dump_file) +
sizeof(*dump_data) * 3 + sizeof(*dump_data) * 2 +
sram_len + sizeof(*dump_mem) + sram_len + sizeof(*dump_mem) +
rxf_len + fifo_data_len +
sizeof(*dump_info); sizeof(*dump_info);
/* Make room for the SMEM, if it exists */ /* Make room for the SMEM, if it exists */
if (smem_len) if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
/* Make room for the secondary SRAM, if it exists */
if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
dump_file = vzalloc(file_len); dump_file = vzalloc(file_len);
if (!dump_file) { if (!dump_file) {
kfree(fw_error_dump); kfree(fw_error_dump);
...@@ -842,24 +997,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -842,24 +997,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(dump_info->bus_human_readable)); sizeof(dump_info->bus_human_readable));
dump_data = iwl_fw_error_next_data(dump_data); dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); /* We only dump the FIFOs if the FW is in error state */
dump_data->len = cpu_to_le32(rxf_len); if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
iwl_mvm_dump_fifos(mvm, &dump_data);
if (iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
u32 *rxf = (void *)dump_data->data;
int i;
for (i = 0; i < (rxf_len / sizeof(u32)); i++) {
iwl_trans_write_prph(mvm->trans,
RXF_LD_FENCE_OFFSET_ADDR,
i * sizeof(u32));
rxf[i] = iwl_trans_read_prph(mvm->trans,
RXF_FIFO_RD_FENCE_ADDR);
}
iwl_trans_release_nic_access(mvm->trans, &flags);
}
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
...@@ -879,6 +1020,17 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -879,6 +1020,17 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->data, smem_len); dump_mem->data, smem_len);
} }
if (sram2_len) {
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
dump_mem->data, sram2_len);
}
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans); fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
fw_error_dump->op_mode_len = file_len; fw_error_dump->op_mode_len = file_len;
if (fw_error_dump->trans_ptr) if (fw_error_dump->trans_ptr)
...@@ -1213,10 +1365,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ...@@ -1213,10 +1365,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->bf_allowed_vif = mvmvif; mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI; IEEE80211_VIF_SUPPORTS_CQM_RSSI;
if (mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
!iwlwifi_mod_params.uapsd_disable)
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
} }
/* /*
...@@ -2164,6 +2312,20 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, ...@@ -2164,6 +2312,20 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const u8 *bssid)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return;
if (iwlwifi_mod_params.uapsd_disable) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
...@@ -2223,6 +2385,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2223,6 +2385,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
* Reset EBS status here assuming environment has been changed. * Reset EBS status here assuming environment has been changed.
*/ */
mvm->last_ebs_successful = true; mvm->last_ebs_successful = true;
iwl_mvm_check_uapsd(mvm, vif, sta->addr);
ret = 0; ret = 0;
} else if (old_state == IEEE80211_STA_AUTH && } else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) { new_state == IEEE80211_STA_ASSOC) {
......
...@@ -276,6 +276,7 @@ enum iwl_mvm_ref_type { ...@@ -276,6 +276,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_TM_CMD, IWL_MVM_REF_TM_CMD,
IWL_MVM_REF_EXIT_WORK, IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_PROTECT_CSA, IWL_MVM_REF_PROTECT_CSA,
IWL_MVM_REF_FW_DBG_COLLECT,
/* update debugfs.c when changing this */ /* update debugfs.c when changing this */
...@@ -535,6 +536,18 @@ enum iwl_mvm_tdls_cs_state { ...@@ -535,6 +536,18 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE, IWL_MVM_TDLS_SW_ACTIVE,
}; };
struct iwl_mvm_shared_mem_cfg {
u32 shared_mem_addr;
u32 shared_mem_size;
u32 sample_buff_addr;
u32 sample_buff_size;
u32 txfifo_addr;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
u32 page_buff_addr;
u32 page_buff_size;
};
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
struct device *dev; struct device *dev;
...@@ -641,6 +654,8 @@ struct iwl_mvm { ...@@ -641,6 +654,8 @@ struct iwl_mvm {
bool disable_power_off; bool disable_power_off;
bool disable_power_off_d3; bool disable_power_off_d3;
bool scan_iter_notif_enabled;
struct debugfs_blob_wrapper nvm_hw_blob; struct debugfs_blob_wrapper nvm_hw_blob;
struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_sw_blob;
struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_calib_blob;
...@@ -784,6 +799,8 @@ struct iwl_mvm { ...@@ -784,6 +799,8 @@ struct iwl_mvm {
u32 ch_sw_tm_ie; u32 ch_sw_tm_ie;
} peer; } peer;
} tdls_cs; } tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
}; };
/* Extract MVM priv from op_mode and _hw */ /* Extract MVM priv from op_mode and _hw */
...@@ -855,9 +872,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) ...@@ -855,9 +872,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
} }
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{ {
return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT; return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_API_SCD_CFG;
} }
extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_tx_fifo[];
...@@ -999,6 +1016,9 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -999,6 +1016,9 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* MVM PHY */ /* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
...@@ -1060,6 +1080,9 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan); ...@@ -1060,6 +1080,9 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req, struct cfg80211_sched_scan_request *req,
...@@ -1120,9 +1143,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -1120,9 +1143,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */ /* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
struct iwl_mvm_frame_stats *stats,
u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, const u32 rate); int rs_pretty_print_rate(char *buf, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm, void rs_update_last_rssi(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta, struct iwl_lq_sta *lq_sta,
...@@ -1292,7 +1313,7 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) ...@@ -1292,7 +1313,7 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
/* hw scheduler queue config */ /* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg); const struct iwl_trans_txq_scd_cfg *cfg);
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue); void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
u8 fifo) u8 fifo)
......
...@@ -356,7 +356,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) ...@@ -356,7 +356,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
max_section_size = IWL_MAX_NVM_SECTION_SIZE; max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE; max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
else /* Family 8000 B-step */ else /* Family 8000 B-step or C-step */
max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE; max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
/* /*
...@@ -565,6 +565,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) ...@@ -565,6 +565,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
mvm->nvm_data = iwl_parse_nvm_sections(mvm); mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data) if (!mvm->nvm_data)
return -ENODATA; return -ENODATA;
IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
mvm->nvm_data->nvm_version);
return 0; return 0;
} }
...@@ -234,6 +234,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -234,6 +234,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true), RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
iwl_mvm_rx_scan_offload_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE, RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, true), iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
...@@ -268,6 +270,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { ...@@ -268,6 +270,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MGMT_MCAST_KEY), CMD(MGMT_MCAST_KEY),
CMD(TX_CMD), CMD(TX_CMD),
CMD(TXPATH_FLUSH), CMD(TXPATH_FLUSH),
CMD(SHARED_MEM_CFG),
CMD(MAC_CONTEXT_CMD), CMD(MAC_CONTEXT_CMD),
CMD(TIME_EVENT_CMD), CMD(TIME_EVENT_CMD),
CMD(TIME_EVENT_NOTIFICATION), CMD(TIME_EVENT_NOTIFICATION),
...@@ -818,9 +821,20 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) ...@@ -818,9 +821,20 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
struct iwl_mvm *mvm = struct iwl_mvm *mvm =
container_of(work, struct iwl_mvm, fw_error_dump_wk); container_of(work, struct iwl_mvm, fw_error_dump_wk);
if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
return;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
iwl_mvm_fw_error_dump(mvm); iwl_mvm_fw_error_dump(mvm);
/* start recording again if the firmware is not crashed */
WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
mvm->fw->dbg_dest_tlv &&
iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
} }
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
......
...@@ -170,7 +170,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, ...@@ -170,7 +170,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2; active_cnt = 2;
} }
cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant << cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS); PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt << cmd->rxchain_info |= cpu_to_le32(active_cnt <<
......
...@@ -917,7 +917,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, ...@@ -917,7 +917,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
break; break;
if (rate_mask & (1 << low)) if (rate_mask & (1 << low))
break; break;
IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
} }
high = index; high = index;
...@@ -927,7 +926,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask, ...@@ -927,7 +926,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
break; break;
if (rate_mask & (1 << high)) if (rate_mask & (1 << high))
break; break;
IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
} }
return (high << 8) | low; return (high << 8) | low;
...@@ -1804,20 +1802,12 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, ...@@ -1804,20 +1802,12 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta) struct iwl_lq_sta *lq_sta)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *vif = mvmsta->vif;
bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
!vif->bss_conf.ps);
/* Our chip supports Tx STBC and the peer is an HT/VHT STA which /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
* supports STBC of at least 1*SS * supports STBC of at least 1*SS
*/ */
if (!lq_sta->stbc) if (!lq_sta->stbc)
return false; return false;
if (!mvm->ps_disabled && !sta_ps_disabled)
return false;
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
return false; return false;
...@@ -2610,68 +2600,116 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, ...@@ -2610,68 +2600,116 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
} }
} }
static void rs_ht_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta_ht_cap *ht_cap)
{
/* active_siso_rate mask includes 9 MBits (bit 5),
* and CCK (bits 0-3), supp_rates[] does not;
* shift to convert format, force 9 MBits off.
*/
lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
lq_sta->active_siso_rate &= ~((u16)0x2);
lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
if (mvm->cfg->ht_params->ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
lq_sta->stbc = true;
lq_sta->is_vht = false;
}
static void rs_vht_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta_vht_cap *vht_cap)
{
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
if (mvm->cfg->ht_params->ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc = true;
lq_sta->is_vht = true;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm, static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
struct iwl_mvm_frame_stats *stats)
{ {
spin_lock_bh(&mvm->drv_stats_lock); spin_lock_bh(&mvm->drv_stats_lock);
memset(stats, 0, sizeof(*stats)); memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
spin_unlock_bh(&mvm->drv_stats_lock); spin_unlock_bh(&mvm->drv_stats_lock);
} }
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
struct iwl_mvm_frame_stats *stats,
u32 rate, bool agg)
{ {
u8 nss = 0, mcs = 0; u8 nss = 0, mcs = 0;
spin_lock(&mvm->drv_stats_lock); spin_lock(&mvm->drv_stats_lock);
if (agg) if (agg)
stats->agg_frames++; mvm->drv_rx_stats.agg_frames++;
stats->success_frames++; mvm->drv_rx_stats.success_frames++;
switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20: case RATE_MCS_CHAN_WIDTH_20:
stats->bw_20_frames++; mvm->drv_rx_stats.bw_20_frames++;
break; break;
case RATE_MCS_CHAN_WIDTH_40: case RATE_MCS_CHAN_WIDTH_40:
stats->bw_40_frames++; mvm->drv_rx_stats.bw_40_frames++;
break; break;
case RATE_MCS_CHAN_WIDTH_80: case RATE_MCS_CHAN_WIDTH_80:
stats->bw_80_frames++; mvm->drv_rx_stats.bw_80_frames++;
break; break;
default: default:
WARN_ONCE(1, "bad BW. rate 0x%x", rate); WARN_ONCE(1, "bad BW. rate 0x%x", rate);
} }
if (rate & RATE_MCS_HT_MSK) { if (rate & RATE_MCS_HT_MSK) {
stats->ht_frames++; mvm->drv_rx_stats.ht_frames++;
mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
} else if (rate & RATE_MCS_VHT_MSK) { } else if (rate & RATE_MCS_VHT_MSK) {
stats->vht_frames++; mvm->drv_rx_stats.vht_frames++;
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1; RATE_VHT_MCS_NSS_POS) + 1;
} else { } else {
stats->legacy_frames++; mvm->drv_rx_stats.legacy_frames++;
} }
if (nss == 1) if (nss == 1)
stats->siso_frames++; mvm->drv_rx_stats.siso_frames++;
else if (nss == 2) else if (nss == 2)
stats->mimo2_frames++; mvm->drv_rx_stats.mimo2_frames++;
if (rate & RATE_MCS_SGI_MSK) if (rate & RATE_MCS_SGI_MSK)
stats->sgi_frames++; mvm->drv_rx_stats.sgi_frames++;
else else
stats->ngi_frames++; mvm->drv_rx_stats.ngi_frames++;
stats->last_rates[stats->last_frame_idx] = rate; mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
stats->last_frame_idx = (stats->last_frame_idx + 1) % mvm->drv_rx_stats.last_frame_idx =
ARRAY_SIZE(stats->last_rates); (mvm->drv_rx_stats.last_frame_idx + 1) %
ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
spin_unlock(&mvm->drv_stats_lock); spin_unlock(&mvm->drv_stats_lock);
} }
...@@ -2724,46 +2762,12 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -2724,46 +2762,12 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
/* TODO: should probably account for rx_highest for both HT/VHT */ /* TODO: should probably account for rx_highest for both HT/VHT */
if (!vht_cap || !vht_cap->vht_supported) { if (!vht_cap || !vht_cap->vht_supported)
/* active_siso_rate mask includes 9 MBits (bit 5), rs_ht_init(mvm, sta, lq_sta, ht_cap);
* and CCK (bits 0-3), supp_rates[] does not; else
* shift to convert format, force 9 MBits off. rs_vht_init(mvm, sta, lq_sta, vht_cap);
*/
lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
lq_sta->active_siso_rate &= ~((u16)0x2);
lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
/* Same here */
lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
lq_sta->is_vht = false;
if (mvm->cfg->ht_params->ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
lq_sta->stbc = true;
} else {
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
lq_sta->is_vht = true;
if (mvm->cfg->ht_params->ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
if (mvm->cfg->ht_params->stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc = true;
}
if (IWL_MVM_RS_DISABLE_MIMO) if (IWL_MVM_RS_DISABLE_P2P_MIMO && sta_priv->vif->p2p)
lq_sta->active_mimo2_rate = 0; lq_sta->active_mimo2_rate = 0;
lq_sta->max_legacy_rate_idx = lq_sta->max_legacy_rate_idx =
...@@ -2774,7 +2778,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -2774,7 +2778,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate); rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n", "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d\n",
lq_sta->active_legacy_rate, lq_sta->active_legacy_rate,
lq_sta->active_siso_rate, lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate, lq_sta->active_mimo2_rate,
...@@ -2793,7 +2797,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -2793,7 +2797,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
lq_sta->is_agg = 0; lq_sta->is_agg = 0;
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); iwl_mvm_reset_frame_stats(mvm);
#endif #endif
rs_initialize_lq(mvm, sta, lq_sta, band, init); rs_initialize_lq(mvm, sta, lq_sta, band, init);
} }
...@@ -2862,12 +2866,13 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, ...@@ -2862,12 +2866,13 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
int index = *rs_table_index; int index = *rs_table_index;
for (i = 0; i < num_rates && index < end; i++) { for (i = 0; i < num_rates && index < end; i++) {
ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate)); for (j = 0; j < num_retries && index < end; j++, index++) {
for (j = 0; j < num_retries && index < end; j++, index++) ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
rate));
rs_table[index] = ucode_rate; rs_table[index] = ucode_rate;
if (toggle_ant) if (toggle_ant)
rs_toggle_antenna(valid_tx_ant, rate); rs_toggle_antenna(valid_tx_ant, rate);
}
prev_rate_idx = rate->index; prev_rate_idx = rate->index;
bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate); bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
...@@ -2875,7 +2880,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, ...@@ -2875,7 +2880,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
break; break;
} }
if (!bottom_reached) if (!bottom_reached && !is_legacy(rate))
rate->index = prev_rate_idx; rate->index = prev_rate_idx;
*rs_table_index = index; *rs_table_index = index;
...@@ -2911,11 +2916,23 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, ...@@ -2911,11 +2916,23 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
u8 valid_tx_ant = 0; u8 valid_tx_ant = 0;
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
bool toggle_ant = false; bool toggle_ant = false;
bool stbc_allowed = false;
memcpy(&rate, initial_rate, sizeof(rate)); memcpy(&rate, initial_rate, sizeof(rate));
valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
rate.stbc = rs_stbc_allow(mvm, sta, lq_sta);
stbc_allowed = rs_stbc_allow(mvm, sta, lq_sta);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) {
u32 ss_params = RS_SS_PARAMS_VALID;
if (stbc_allowed)
ss_params |= RS_SS_STBC_ALLOWED;
lq_cmd->ss_params = cpu_to_le32(ss_params);
} else {
/* TODO: remove old API when min FW API hits 14 */
rate.stbc = stbc_allowed;
}
if (is_siso(&rate)) { if (is_siso(&rate)) {
num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES; num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
...@@ -2925,7 +2942,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, ...@@ -2925,7 +2942,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE; num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
} else { } else {
num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES; num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
num_retries = IWL_MVM_RS_LEGACY_RETRIES_PER_RATE; num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
toggle_ant = true; toggle_ant = true;
} }
...@@ -2941,7 +2958,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, ...@@ -2941,7 +2958,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
lq_cmd->mimo_delim = index; lq_cmd->mimo_delim = index;
} else if (is_legacy(&rate)) { } else if (is_legacy(&rate)) {
num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
num_retries = IWL_MVM_RS_LEGACY_RETRIES_PER_RATE; num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
} else { } else {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
...@@ -2955,7 +2972,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, ...@@ -2955,7 +2972,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
rs_get_lower_rate_down_column(lq_sta, &rate); rs_get_lower_rate_down_column(lq_sta, &rate);
num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES; num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
num_retries = IWL_MVM_RS_LEGACY_RETRIES_PER_RATE; num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
num_rates, num_retries, valid_tx_ant, num_rates, num_retries, valid_tx_ant,
......
...@@ -407,7 +407,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -407,7 +407,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
} }
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags, iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS); rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif #endif
iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status, iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
...@@ -511,13 +511,17 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, ...@@ -511,13 +511,17 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)&pkt->data; struct iwl_notif_statistics *stats = (void *)&pkt->data;
struct mvm_statistics_general_common *common = &stats->general.common;
struct iwl_mvm_stat_data data = { struct iwl_mvm_stat_data data = {
.stats = stats, .stats = stats,
.mvm = mvm, .mvm = mvm,
}; };
iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature)); /* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm,
le32_to_cpu(stats->general.radio_temperature));
iwl_mvm_update_rx_statistics(mvm, stats); iwl_mvm_update_rx_statistics(mvm, stats);
......
...@@ -173,15 +173,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid, ...@@ -173,15 +173,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only * already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit. * req->n_ssids - 1 bits in addition to the first bit.
*/ */
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band, int n_ssids)
{ {
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
return 10;
if (band == IEEE80211_BAND_2GHZ) if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1); return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1); return 10 + 2 * (n_ssids + 1);
} }
static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band)
{ {
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
} }
...@@ -309,18 +315,18 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, ...@@ -309,18 +315,18 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
goto not_bound; goto not_bound;
params->suspend_time = 30; params->suspend_time = 30;
params->max_out_time = 170; params->max_out_time = 120;
if (iwl_mvm_low_latency(mvm)) { if (iwl_mvm_low_latency(mvm)) {
if (mvm->fw->ucode_capa.api[0] & if (mvm->fw->ucode_capa.api[0] &
IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
params->suspend_time = 105; params->suspend_time = 105;
params->max_out_time = 70;
/* /*
* If there is more than one active interface make * If there is more than one active interface make
* passive scan more fragmented. * passive scan more fragmented.
*/ */
frag_passive_dwell = (global_cnt < 2) ? 40 : 20; frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
params->max_out_time = frag_passive_dwell;
} else { } else {
params->suspend_time = 120; params->suspend_time = 120;
params->max_out_time = 120; params->max_out_time = 120;
...@@ -337,7 +343,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, ...@@ -337,7 +343,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/ */
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell = u32 passive_dwell =
iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); iwl_mvm_get_passive_dwell(mvm,
IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell; params->max_out_time = passive_dwell;
} else { } else {
params->passive_fragmented = true; params->passive_fragmented = true;
...@@ -354,8 +361,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, ...@@ -354,8 +361,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
params->dwell[band].passive = frag_passive_dwell; params->dwell[band].passive = frag_passive_dwell;
else else
params->dwell[band].passive = params->dwell[band].passive =
iwl_mvm_get_passive_dwell(band); iwl_mvm_get_passive_dwell(mvm, band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band, params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids); n_ssids);
} }
} }
...@@ -536,6 +543,19 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -536,6 +543,19 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0; return 0;
} }
int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_complete_notif *notif = (void *)pkt->data;
IWL_DEBUG_SCAN(mvm,
"Scan offload iteration complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
return 0;
}
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
{ {
...@@ -1104,6 +1124,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ...@@ -1104,6 +1124,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify); notify);
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0;
if (iwl_mvm_is_radio_killed(mvm))
goto out;
if (mvm->scan_status != IWL_MVM_SCAN_SCHED && if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) { mvm->scan_status != IWL_MVM_SCAN_OS)) {
...@@ -1140,6 +1166,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ...@@ -1140,6 +1166,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS) if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) { if (notify) {
...@@ -1296,22 +1323,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, ...@@ -1296,22 +1323,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1); cmd->iter_num = cpu_to_le32(1);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
if (iwl_mvm_rrm_scan_needed(mvm)) if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |= cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
...@@ -1386,6 +1397,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, ...@@ -1386,6 +1397,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0; cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0; cmd->schedule[1].full_scan_mul = 0;
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
for (i = 1; i <= req->req.n_ssids; i++) for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i); ssid_bitmap |= BIT(i);
...@@ -1458,6 +1485,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1458,6 +1485,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req)) if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
else
flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
...@@ -1468,6 +1497,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1468,6 +1497,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (req->n_ssids == 0) if (req->n_ssids == 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->scan_iter_notif_enabled)
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
#endif
cmd->scan_flags |= cpu_to_le32(flags); cmd->scan_flags |= cpu_to_le32(flags);
cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band); cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
...@@ -1484,6 +1518,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1484,6 +1518,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff; cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[0].non_ebs_ratio =
cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
cmd->channel_opt[1].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
cmd->channel_opt[1].non_ebs_ratio =
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd); ssid_bitmap, cmd);
......
...@@ -250,8 +250,8 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, ...@@ -250,8 +250,8 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
/* disable the TDLS STA-specific queues */ /* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk; sta_msk = mvmsta->tfd_queue_msk;
for_each_set_bit(i, &sta_msk, sizeof(sta_msk)) for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i); iwl_mvm_disable_txq(mvm, i, 0);
} }
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
...@@ -464,8 +464,8 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) ...@@ -464,8 +464,8 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
if (mvm->tfd_drained[sta_id]) { if (mvm->tfd_drained[sta_id]) {
unsigned long i, msk = mvm->tfd_drained[sta_id]; unsigned long i, msk = mvm->tfd_drained[sta_id];
for_each_set_bit(i, &msk, sizeof(msk)) for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i); iwl_mvm_disable_txq(mvm, i, 0);
mvm->tfd_drained[sta_id] = 0; mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
...@@ -1058,7 +1058,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1058,7 +1058,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, txq_id); iwl_mvm_disable_txq(mvm, txq_id, 0);
return 0; return 0;
case IWL_AGG_STARTING: case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA: case IWL_EMPTYING_HW_QUEUE_ADDBA:
...@@ -1116,7 +1116,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1116,7 +1116,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, tid_data->txq_id); iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
} }
mvm->queue_to_mac80211[tid_data->txq_id] = mvm->queue_to_mac80211[tid_data->txq_id] =
...@@ -1196,6 +1196,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, ...@@ -1196,6 +1196,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
break; break;
case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
/* fall through */
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
......
...@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc)) if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF; tx_flags |= TX_CMD_FLG_TSF;
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc)) if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG; tx_flags |= TX_CMD_FLG_MORE_FRAG;
...@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf; tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL; tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else { } else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS; tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
...@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL; tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
} }
/* tid_tspec will default to 0 = BE when QOS isn't enabled */ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
else
ac = tid_to_mac80211_ac[0];
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS; TX_CMD_FLG_BT_PRIO_POS;
...@@ -496,7 +507,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, ...@@ -496,7 +507,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n", "Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed); tid_data->next_reclaimed);
iwl_mvm_disable_txq(mvm, tid_data->txq_id); iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
/* /*
* we can't hold the mutex - but since we are after a sequence * we can't hold the mutex - but since we are after a sequence
...@@ -656,7 +667,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -656,7 +667,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/* Single frame failure in an AMPDU queue => send BAR */ /* Single frame failure in an AMPDU queue => send BAR */
if (txq_id >= mvm->first_agg_queue && if (txq_id >= mvm->first_agg_queue &&
!(info->flags & IEEE80211_TX_STAT_ACK)) !(info->flags & IEEE80211_TX_STAT_ACK) &&
!(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
/* W/A FW bug: seq_ctl is wrong when the status isn't success */ /* W/A FW bug: seq_ctl is wrong when the status isn't success */
...@@ -919,6 +931,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -919,6 +931,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
sta_id = ba_notif->sta_id; sta_id = ba_notif->sta_id;
tid = ba_notif->tid; tid = ba_notif->tid;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return 0;
rcu_read_lock(); rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
......
...@@ -533,7 +533,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) ...@@ -533,7 +533,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg) const struct iwl_trans_txq_scd_cfg *cfg)
{ {
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 1, .enable = 1,
...@@ -542,38 +541,38 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, ...@@ -542,38 +541,38 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
.ssn = cpu_to_le16(ssn), .ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo, .tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate, .aggregate = cfg->aggregate,
.flags = IWL_SCD_FLAGS_DQA_ENABLED,
.tid = cfg->tid, .tid = cfg->tid,
.control = IWL_SCD_CONTROL_SET_SSN,
}; };
int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(cmd), &cmd); if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
if (ret) iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg);
IWL_ERR(mvm, return;
"Failed to configure queue %d on FIFO %d\n",
queue, cfg->fifo);
} }
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL);
iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
} }
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue) void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
{ {
iwl_trans_txq_disable(mvm->trans, queue,
!iwl_mvm_is_dqa_supported(mvm));
if (iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .enable = 0,
}; };
int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC, int ret;
if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
iwl_trans_txq_disable(mvm->trans, queue, true);
return;
}
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(cmd), &cmd); sizeof(cmd), &cmd);
if (ret) if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret); queue, ret);
}
} }
/** /**
...@@ -665,7 +664,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) ...@@ -665,7 +664,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return false; return false;
if (!mvm->cfg->rx_with_siso_diversity) if (mvm->cfg->rx_with_siso_diversity)
return false; return false;
ieee80211_iterate_active_interfaces_atomic( ieee80211_iterate_active_interfaces_atomic(
......
...@@ -415,6 +415,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -415,6 +415,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
{0} {0}
...@@ -527,8 +529,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -527,8 +529,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg) else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg; cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d && if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d; cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
#endif #endif
pci_set_drvdata(pdev, iwl_trans); pci_set_drvdata(pdev, iwl_trans);
......
...@@ -722,6 +722,11 @@ static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans, ...@@ -722,6 +722,11 @@ static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
*first_ucode_section = last_read_idx; *first_ucode_section = last_read_idx;
if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
else
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
return 0; return 0;
} }
...@@ -911,9 +916,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans, ...@@ -911,9 +916,6 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
if (trans->dbg_dest_tlv) if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans); iwl_pcie_apply_destination(trans);
/* Notify FW loading is done */
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
/* wait for image verification to complete */ /* wait for image verification to complete */
ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0, ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
LMPM_SECURE_BOOT_STATUS_SUCCESS, LMPM_SECURE_BOOT_STATUS_SUCCESS,
...@@ -982,7 +984,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ...@@ -982,7 +984,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* Load the given image to the HW */ /* Load the given image to the HW */
if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) && if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
(CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)) (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
return iwl_pcie_load_given_ucode_8000b(trans, fw); return iwl_pcie_load_given_ucode_8000b(trans, fw);
else else
return iwl_pcie_load_given_ucode(trans, fw); return iwl_pcie_load_given_ucode(trans, fw);
...@@ -2348,6 +2350,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -2348,6 +2350,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->trans = trans; trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->reg_lock);
spin_lock_init(&trans_pcie->ref_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->ucode_write_waitq);
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
......
...@@ -1190,12 +1190,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1190,12 +1190,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
* Assumes that ssn_idx is valid (!= 0xFFF) */ * Assumes that ssn_idx is valid (!= 0xFFF) */
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
if (cfg) { if (cfg) {
u8 frame_limit = cfg->frame_limit; u8 frame_limit = cfg->frame_limit;
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
...@@ -1220,11 +1220,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1220,11 +1220,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (txq_id == trans_pcie->cmd_queue && if (txq_id == trans_pcie->cmd_queue &&
trans_pcie->scd_set_active) trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id)); iwl_scd_enable_set_active(trans, BIT(txq_id));
IWL_DEBUG_TX_QUEUES(trans,
"Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
} else {
IWL_DEBUG_TX_QUEUES(trans,
"Activate queue %d WrPtr: %d\n",
txq_id, ssn & 0xff);
} }
trans_pcie->txq[txq_id].active = true; trans_pcie->txq[txq_id].active = true;
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
} }
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment