Commit 500fc34f authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2017-04-19-2' of...

Merge tag 'iwlwifi-next-for-kalle-2017-04-19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Another series of patches intended for v4.12.  These are the changes:

 * Heavy work for the A000 device series;
 * Some cleanup patches;
 * A few fixes;
 * Dynamic SAR support;
 * Geographical SAR support;
 * Support a few new PCI device IDs;
parents f9558f5f 718ceb22
...@@ -7,7 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o ...@@ -7,7 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o iwlwifi-objs += iwl-trans.o
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#define IWL9000_UCODE_API_MAX 30 #define IWL9000_UCODE_API_MAX 30
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 17 #define IWL9000_UCODE_API_MIN 30
/* NVM versions */ /* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d #define IWL9000_NVM_VERSION 0x0a1d
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2015-2016 Intel Deutschland GmbH * Copyright(c) 2015-2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2015-2016 Intel Deutschland GmbH * Copyright(c) 2015-2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -65,15 +65,16 @@ ...@@ -65,15 +65,16 @@
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */ /* Memory offsets and lengths */
#define IWL_A000_DCCM_OFFSET 0x800000 #define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
#define IWL_A000_DCCM_LEN 0x18000 #define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
#define IWL_A000_DCCM2_OFFSET 0x880000 #define IWL_A000_DCCM2_OFFSET 0x880000
#define IWL_A000_DCCM2_LEN 0x8000 #define IWL_A000_DCCM2_LEN 0x8000
#define IWL_A000_SMEM_OFFSET 0x400000 #define IWL_A000_SMEM_OFFSET 0x400000
#define IWL_A000_SMEM_LEN 0x68000 #define IWL_A000_SMEM_LEN 0xD0000
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" #define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \ #define IWL_A000_HR_MODULE_FIRMWARE(api) \
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
...@@ -134,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = { ...@@ -134,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
IWL_DEVICE_A000,
.ht_params = &iwl_a000_ht_params,
.nvm_ver = IWL_A000_NVM_VERSION,
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.cdb = true,
};
const struct iwl_cfg iwla000_2ac_cfg_jf = { const struct iwl_cfg iwla000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC a000", .name = "Intel(R) Dual Band Wireless AC a000",
.fw_name_pre = IWL_A000_JF_FW_PRE, .fw_name_pre = IWL_A000_JF_FW_PRE,
......
...@@ -90,16 +90,6 @@ enum iwl_device_family { ...@@ -90,16 +90,6 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_8000,
}; };
static inline bool iwl_has_secure_boot(u32 hw_rev,
enum iwl_device_family family)
{
/* return 1 only for family 8000 B0 */
if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
return true;
return false;
}
/* /*
* LED mode * LED mode
* IWL_LED_DEFAULT: use device default * IWL_LED_DEFAULT: use device default
...@@ -324,6 +314,7 @@ struct iwl_pwr_tx_backoff { ...@@ -324,6 +314,7 @@ struct iwl_pwr_tx_backoff {
* @rf_id: need to read rf_id to determine the firmware image * @rf_id: need to read rf_id to determine the firmware image
* @integrated: discrete or integrated * @integrated: discrete or integrated
* @gen2: a000 and on transport operation * @gen2: a000 and on transport operation
* @cdb: CDB support
* *
* We enable the driver to be backward compatible wrt. hardware features. * We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs * API differences in uCode shouldn't be handled here but through TLVs
...@@ -370,7 +361,8 @@ struct iwl_cfg { ...@@ -370,7 +361,8 @@ struct iwl_cfg {
rf_id:1, rf_id:1,
integrated:1, integrated:1,
use_tfh:1, use_tfh:1,
gen2:1; gen2:1,
cdb:1;
u8 valid_tx_ant; u8 valid_tx_ant;
u8 valid_rx_ant; u8 valid_rx_ant;
u8 non_shared_ant; u8 non_shared_ant;
...@@ -460,6 +452,7 @@ extern const struct iwl_cfg iwl9270_2ac_cfg; ...@@ -460,6 +452,7 @@ extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf; extern const struct iwl_cfg iwla000_2ac_cfg_jf;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
......
...@@ -76,7 +76,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait) ...@@ -76,7 +76,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
} }
IWL_EXPORT_SYMBOL(iwl_notification_wait_init); IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
bool triggered = false; bool triggered = false;
...@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, ...@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
} }
} }
spin_unlock(&notif_wait->notif_wait_lock); spin_unlock(&notif_wait->notif_wait_lock);
} }
if (triggered) return triggered;
wake_up_all(&notif_wait->notif_waitq);
} }
IWL_EXPORT_SYMBOL(iwl_notification_wait_notify); IWL_EXPORT_SYMBOL(iwl_notification_wait);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{ {
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -89,10 +90,10 @@ struct iwl_notif_wait_data { ...@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
* *
* This structure is not used directly, to wait for a * This structure is not used directly, to wait for a
* notification declare it on the stack, and call * notification declare it on the stack, and call
* iwlagn_init_notification_wait() with appropriate * iwl_init_notification_wait() with appropriate
* parameters. Then do whatever will cause the ucode * parameters. Then do whatever will cause the ucode
* to notify the driver, and to wait for that then * to notify the driver, and to wait for that then
* call iwlagn_wait_notification(). * call iwl_wait_notification().
* *
* Each notification is one-shot. If at some point we * Each notification is one-shot. If at some point we
* need to support multi-shot notifications (which * need to support multi-shot notifications (which
...@@ -114,10 +115,24 @@ struct iwl_notification_wait { ...@@ -114,10 +115,24 @@ struct iwl_notification_wait {
/* caller functions */ /* caller functions */
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt); struct iwl_rx_packet *pkt);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
static inline void
iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
{
wake_up_all(&notif_data->notif_waitq);
}
static inline void
iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
struct iwl_rx_packet *pkt)
{
if (iwl_notification_wait(notif_data, pkt))
iwl_notification_notify(notif_data);
}
/* user functions */ /* user functions */
void __acquires(wait_entry) void __acquires(wait_entry)
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
......
...@@ -294,9 +294,6 @@ ...@@ -294,9 +294,6 @@
/*********************** END TX SCHEDULER *************************************/ /*********************** END TX SCHEDULER *************************************/
/* tcp checksum offload */
#define RX_EN_CSUM (0x00a00d88)
/* Oscillator clock */ /* Oscillator clock */
#define OSC_CLK (0xa04068) #define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8) #define OSC_CLK_FORCE_CONTROL (0x8)
...@@ -317,6 +314,8 @@ ...@@ -317,6 +314,8 @@
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
#define LMAC2_PRPH_OFFSET (0x100000)
/* Rx FIFO */ /* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88) #define RXF_SIZE_ADDR (0xa00c88)
#define RXF_RD_D_SPACE (0xa00c40) #define RXF_RD_D_SPACE (0xa00c40)
...@@ -400,6 +399,8 @@ enum aux_misc_master1_en { ...@@ -400,6 +399,8 @@ enum aux_misc_master1_en {
#define PREG_AUX_BUS_WPROT_0 0xA04CC0 #define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34 #define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4
/* FW chicken bits */ /* FW chicken bits */
#define LMPM_CHICK 0xA01FF8 #define LMPM_CHICK 0xA01FF8
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -397,6 +397,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) ...@@ -397,6 +397,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
*/ */
#define IWL_MAX_HW_QUEUES 32 #define IWL_MAX_HW_QUEUES 32
#define IWL_MAX_TID_COUNT 8 #define IWL_MAX_TID_COUNT 8
#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64 #define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16 #define IWL_MAX_RX_HW_QUEUES 16
...@@ -530,6 +531,44 @@ struct iwl_trans_txq_scd_cfg { ...@@ -530,6 +531,44 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit; int frame_limit;
}; };
/* Available options for &struct iwl_tx_queue_cfg_cmd */
enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
* @tid: tid of the queue
* @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
* @byte_cnt_addr: address of byte count table
* @tfdq_addr: address of TFD circular buffer
*/
struct iwl_tx_queue_cfg_cmd {
u8 sta_id;
u8 tid;
__le16 flags;
__le32 cb_size;
__le64 byte_cnt_addr;
__le64 tfdq_addr;
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
/**
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
* @queue_number: queue number assigned to this RA -TID
* @flags: set on failure
* @write_pointer: initial value for write pointer
*/
struct iwl_tx_queue_cfg_rsp {
__le16 queue_number;
__le16 flags;
__le16 write_pointer;
__le16 reserved;
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
/** /**
* struct iwl_trans_ops - transport specific operations * struct iwl_trans_ops - transport specific operations
* *
...@@ -640,6 +679,12 @@ struct iwl_trans_ops { ...@@ -640,6 +679,12 @@ struct iwl_trans_ops {
unsigned int queue_wdg_timeout); unsigned int queue_wdg_timeout);
void (*txq_disable)(struct iwl_trans *trans, int queue, void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd); bool configure_scd);
/* a000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared); bool shared);
...@@ -1057,6 +1102,34 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, ...@@ -1057,6 +1102,34 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
} }
static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{
if (WARN_ON_ONCE(!trans->ops->txq_free))
return;
trans->ops->txq_free(trans, queue);
}
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int queue_wdg_timeout)
{
might_sleep();
if (WARN_ON_ONCE(!trans->ops->txq_alloc))
return -ENOTSUPP;
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
int queue, bool shared_mode) int queue, bool shared_mode)
{ {
......
...@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Rssi update while not associated - can happen since the statistics * Rssi update while not associated - can happen since the statistics
* are handled asynchronously * are handled asynchronously
*/ */
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
return; return;
/* No BT - reports should be disabled */ /* No BT - reports should be disabled */
......
...@@ -1208,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1208,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) { if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */ /* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) { if (!wowlan->nd_config) {
ret = 1; ret = 1;
...@@ -2116,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -2116,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
*/ */
iwl_mvm_update_changed_regdom(mvm); iwl_mvm_update_changed_regdom(mvm);
if (!unified_image)
/* Re-configure default SAR profile */
iwl_mvm_sar_select_profile(mvm, 1, 1);
if (mvm->net_detect) { if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW, /* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that * so no need to stop the netdetect scan. If that
......
...@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, ...@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->queue_params[i].uapsd); mvmvif->queue_params[i].uapsd);
if (vif->type == NL80211_IFTYPE_STATION && if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_STATION_COUNT) { ap_sta_id != IWL_MVM_INVALID_STA) {
struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
......
...@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, ...@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
#define IWL_MVM_STATION_COUNT 16 #define IWL_MVM_STATION_COUNT 16
#define IWL_MVM_INVALID_STA 0xFF
#define IWL_MVM_TDLS_STA_COUNT 4 #define IWL_MVM_TDLS_STA_COUNT 4
enum iwl_ac { enum iwl_ac {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd { ...@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
u8 reserved[3]; u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
#define IWL_NUM_GEO_PROFILES 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
* @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
* @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
*/
enum iwl_geo_per_chain_offset_operation {
IWL_PER_CHAIN_OFFSET_SET_TABLES,
IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
/**
* struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
* @max_tx_power: maximum allowed tx power.
* @chain_a: tx power offset for chain a.
* @chain_b: tx power offset for chain b.
*/
struct iwl_per_chain_offset {
__le16 max_tx_power;
u8 chain_a;
u8 chain_b;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
struct iwl_per_chain_offset_group {
struct iwl_per_chain_offset lb;
struct iwl_per_chain_offset hb;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
/**
* struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
struct iwl_geo_tx_power_profiles_cmd {
__le32 ops;
struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
} __packed; /* GEO_TX_POWER_LIMIT */
/** /**
* struct iwl_beacon_filter_cmd * struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
......
...@@ -516,7 +516,7 @@ struct iwl_scan_dwell { ...@@ -516,7 +516,7 @@ struct iwl_scan_dwell {
* scan_config_channel_flag * scan_config_channel_flag
* @channel_array: default supported channels * @channel_array: default supported channels
*/ */
struct iwl_scan_config { struct iwl_scan_config_v1 {
__le32 flags; __le32 flags;
__le32 tx_chains; __le32 tx_chains;
__le32 rx_chains; __le32 rx_chains;
...@@ -532,7 +532,7 @@ struct iwl_scan_config { ...@@ -532,7 +532,7 @@ struct iwl_scan_config {
#define SCAN_TWO_LMACS 2 #define SCAN_TWO_LMACS 2
struct iwl_scan_config_cdb { struct iwl_scan_config {
__le32 flags; __le32 flags;
__le32 tx_chains; __le32 tx_chains;
__le32 rx_chains; __le32 rx_chains;
...@@ -669,7 +669,7 @@ struct iwl_scan_req_umac { ...@@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
u8 n_channels; u8 n_channels;
__le16 reserved; __le16 reserved;
u8 data[]; u8 data[];
} no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct { struct {
__le32 max_out_time[SCAN_TWO_LMACS]; __le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS];
...@@ -679,12 +679,12 @@ struct iwl_scan_req_umac { ...@@ -679,12 +679,12 @@ struct iwl_scan_req_umac {
u8 n_channels; u8 n_channels;
__le16 reserved; __le16 reserved;
u8 data[]; u8 data[];
} cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
}; };
} __packed; } __packed;
#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac) #define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \ #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
2 * sizeof(__le32)) 2 * sizeof(__le32))
/** /**
......
...@@ -179,7 +179,7 @@ enum iwl_sta_key_flag { ...@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
...@@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 { ...@@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 {
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
* mac-addr. * mac-addr.
* @beamform_flags: beam forming controls * @beamform_flags: beam forming controls
* @tfd_queue_msk: tfd queues used by this station * @tfd_queue_msk: tfd queues used by this station.
* Obselete for new TX API (9 and above).
* @rx_ba_window: aggregation window size * @rx_ba_window: aggregation window size
* @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means * @sp_length: the size of the SP as it appears in the WME IE
* that the queues used by this station are in the first 32. * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
* enabled ACs.
* *
* The device contains an internal table of per-station information, with info * The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx * on security keys, aggregation parameters, and Tx rates for initial Tx
...@@ -384,9 +386,9 @@ struct iwl_mvm_add_sta_cmd { ...@@ -384,9 +386,9 @@ struct iwl_mvm_add_sta_cmd {
__le16 beamform_flags; __le16 beamform_flags;
__le32 tfd_queue_msk; __le32 tfd_queue_msk;
__le16 rx_ba_window; __le16 rx_ba_window;
u8 scd_queue_bank; u8 sp_length;
u8 uapsd_trigger_acs; u8 uapsd_acs;
} __packed; /* ADD_STA_CMD_API_S_VER_8 */ } __packed; /* ADD_STA_CMD_API_S_VER_9 */
/** /**
* struct iwl_mvm_add_sta_key_common - add/modify sta key common part * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -123,6 +124,20 @@ enum iwl_tx_flags { ...@@ -123,6 +124,20 @@ enum iwl_tx_flags {
TX_CMD_FLG_HCCA_CHUNK = BIT(31) TX_CMD_FLG_HCCA_CHUNK = BIT(31)
}; /* TX_FLAGS_BITS_API_S_VER_1 */ }; /* TX_FLAGS_BITS_API_S_VER_1 */
/**
* enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
* @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
* @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
IWL_TX_FLAGS_HIGH_PRI = BIT(2),
}; /* TX_FLAGS_BITS_API_S_VER_3 */
/** /**
* enum iwl_tx_pm_timeouts - pm timeout values in TX command * enum iwl_tx_pm_timeouts - pm timeout values in TX command
* @PM_FRAME_NONE: no need to suspend sleep mode * @PM_FRAME_NONE: no need to suspend sleep mode
...@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl { ...@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08, TX_CMD_SEC_KEY128 = 0x08,
TX_CMD_SEC_KEY_FROM_TABLE = 0x08, TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
}; };
/* TODO: how does these values are OK with only 16 bit variable??? */ /* TODO: how does these values are OK with only 16 bit variable??? */
...@@ -301,6 +316,31 @@ struct iwl_tx_cmd { ...@@ -301,6 +316,31 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0]; struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_6 */ } __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
__le32 pn_low;
__le16 pn_high;
__le16 aux_info;
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
* struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of &iwl_tx_cmd_flags
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
*/
struct iwl_tx_cmd_gen2 {
__le16 len;
__le16 offload_assist;
__le32 flags;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
/* /*
* TX response related data * TX response related data
*/ */
...@@ -508,9 +548,11 @@ struct agg_tx_status { ...@@ -508,9 +548,11 @@ struct agg_tx_status {
* @tlc_info: TLC rate info * @tlc_info: TLC rate info
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
* @frame_ctrl: frame control * @frame_ctrl: frame control
* @tx_queue: TX queue for this response
* @status: for non-agg: frame status TX_STATUS_* * @status: for non-agg: frame status TX_STATUS_*
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
* follow this one, up to frame_count. * follow this one, up to frame_count.
* For version 6 TX response isn't received for aggregation at all.
* *
* After the array of statuses comes the SSN of the SCD. Look at * After the array of statuses comes the SSN of the SCD. Look at
* %iwl_mvm_get_scd_ssn for more details. * %iwl_mvm_get_scd_ssn for more details.
...@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp { ...@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
u8 tlc_info; u8 tlc_info;
u8 ra_tid; u8 ra_tid;
__le16 frame_ctrl; __le16 frame_ctrl;
union {
struct {
struct agg_tx_status status; struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_3 */ } v3;/* TX_RSP_API_S_VER_3 */
struct {
__le16 tx_queue;
__le16 reserved2;
struct agg_tx_status status;
} v6;
};
} __packed; /* TX_RSP_API_S_VER_6 */
/** /**
* struct iwl_mvm_ba_notif - notifies about reception of BA * struct iwl_mvm_ba_notif - notifies about reception of BA
...@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif { ...@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number * @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue * @tfd_index: Index of first un-acked frame in the TFD queue
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
*/ */
struct iwl_mvm_compressed_ba_tfd { struct iwl_mvm_compressed_ba_tfd {
u8 q_num; __le16 q_num;
u8 reserved;
__le16 tfd_index; __le16 tfd_index;
u8 scd_queue;
u8 reserved;
__le16 reserved2;
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/** /**
...@@ -759,25 +812,6 @@ struct iwl_tx_path_flush_cmd { ...@@ -759,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
__le16 reserved; __le16 reserved;
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
/**
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
* it can't know that everything will go well until the end of the AMPDU, it
* can't know in advance the number of MPDUs that will be sent in the current
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
* Hence, it can't know in advance what the SSN of the SCD will be at the end
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)&tx_resp->status +
tx_resp->frame_count) & 0xfff;
}
/* Available options for the SCD_QUEUE_CFG HCMD */ /* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions { enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0, SCD_CFG_DISABLE_QUEUE = 0x0,
......
...@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids { ...@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03, CTDP_CONFIG_CMD = 0x03,
TEMP_REPORTING_THRESHOLDS_CMD = 0x04, TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
GEO_TX_POWER_LIMIT = 0x05,
CT_KILL_NOTIFICATION = 0xFE, CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
}; };
enum iwl_system_subcmd_ids { enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0, SHARED_MEM_CFG_CMD = 0x0,
INIT_EXTENDED_CFG_CMD = 0x03,
}; };
enum iwl_data_path_subcmd_ids { enum iwl_data_path_subcmd_ids {
...@@ -688,7 +690,7 @@ struct iwl_error_resp { ...@@ -688,7 +690,7 @@ struct iwl_error_resp {
(_color << FW_CTXT_COLOR_POS)) (_color << FW_CTXT_COLOR_POS))
/* Possible actions on PHYs, MACs and Bindings */ /* Possible actions on PHYs, MACs and Bindings */
enum { enum iwl_phy_ctxt_action {
FW_CTXT_ACTION_STUB = 0, FW_CTXT_ACTION_STUB = 0,
FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_ADD,
FW_CTXT_ACTION_MODIFY, FW_CTXT_ACTION_MODIFY,
...@@ -2026,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 { ...@@ -2026,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/**
* struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
*
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
* @txfifo_size: size of TX FIFOs
* @rxfifo1_addr: RXF1 addr
* @rxfifo1_size: RXF1 size
*/
struct iwl_shared_mem_lmac_cfg {
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM];
__le32 rxfifo1_addr;
__le32 rxfifo1_size;
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
/**
* Shared memory configuration information from the FW
*
* @shared_mem_addr: shared memory address
* @shared_mem_size: shared memory size
* @sample_buff_addr: internal sample (mon/adc) buff addr
* @sample_buff_size: internal sample buff size
* @rxfifo2_addr: start addr of RXF2
* @rxfifo2_size: size of RXF2
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
* @lmac_num: number of LMACs (1 or 2)
* @lmac_smem: per - LMAC smem data
*/
struct iwl_shared_mem_cfg { struct iwl_shared_mem_cfg {
__le32 shared_mem_addr; __le32 shared_mem_addr;
__le32 shared_mem_size; __le32 shared_mem_size;
__le32 sample_buff_addr; __le32 sample_buff_addr;
__le32 sample_buff_size; __le32 sample_buff_size;
__le32 txfifo_addr; __le32 rxfifo2_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM]; __le32 rxfifo2_size;
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr; __le32 page_buff_addr;
__le32 page_buff_size; __le32 page_buff_size;
__le32 rxfifo_addr; __le32 lmac_num;
__le32 internal_txfifo_addr; struct iwl_shared_mem_lmac_cfg lmac_smem[2];
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ } __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/** /**
...@@ -2206,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd { ...@@ -2206,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved; __le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
* @IWL_INIT_PHY: driver is going to send PHY_DB commands
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,
IWL_INIT_NVM,
IWL_INIT_PHY,
};
/**
* struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
struct iwl_init_extended_cfg_cmd {
__le32 init_flags;
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -99,138 +99,169 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, ...@@ -99,138 +99,169 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
iwl_trans_release_nic_access(mvm->trans, &flags); iwl_trans_release_nic_access(mvm->trans, &flags);
} }
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data) struct iwl_fw_error_dump_data **dump_data,
int size, u32 offset, int fifo_num)
{ {
struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data; u32 *fifo_data;
u32 fifo_len; u32 fifo_len;
unsigned long flags; int i;
int i, j;
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
/* Pull RXF data from all RXFs */
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
/*
* Keep aside the additional offset that might be needed for
* next RXF
*/
u32 offset_diff = RXF_DIFF_FROM_PREV * i;
fifo_hdr = (void *)(*dump_data)->data; fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data; fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.rxfifo_size[i]; fifo_len = size;
/* No need to try to read the data if the length is 0 */ /* No need to try to read the data if the length is 0 */
if (fifo_len == 0) if (fifo_len == 0)
continue; return;
/* Add a TLV for the RXF */ /* Add a TLV for the RXF */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i); fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes = fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_D_SPACE + RXF_RD_D_SPACE + offset));
offset_diff));
fifo_hdr->wr_ptr = fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_WR_PTR + RXF_RD_WR_PTR + offset));
offset_diff));
fifo_hdr->rd_ptr = fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_RD_PTR + RXF_RD_RD_PTR + offset));
offset_diff));
fifo_hdr->fence_ptr = fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_RD_FENCE_PTR + RXF_RD_FENCE_PTR + offset));
offset_diff));
fifo_hdr->fence_mode = fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
RXF_SET_FENCE_MODE + RXF_SET_FENCE_MODE + offset));
offset_diff));
/* Lock fence */ /* Lock fence */
iwl_trans_write_prph(mvm->trans, iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
RXF_SET_FENCE_MODE + offset_diff, 0x1);
/* Set fence pointer to the same place like WR pointer */ /* Set fence pointer to the same place like WR pointer */
iwl_trans_write_prph(mvm->trans, iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
RXF_LD_WR2FENCE + offset_diff, 0x1);
/* Set fence offset */ /* Set fence offset */
iwl_trans_write_prph(mvm->trans, iwl_trans_write_prph(mvm->trans,
RXF_LD_FENCE_OFFSET_ADDR + offset_diff, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
0x0);
/* Read FIFO */ /* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */ fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++) for (i = 0; i < fifo_len; i++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans, fifo_data[i] = iwl_trans_read_prph(mvm->trans,
RXF_FIFO_RD_FENCE_INC + RXF_FIFO_RD_FENCE_INC +
offset_diff); offset);
*dump_data = iwl_fw_error_next_data(*dump_data); *dump_data = iwl_fw_error_next_data(*dump_data);
} }
/* Pull TXF data from all TXFs */ static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) { struct iwl_fw_error_dump_data **dump_data,
/* Mark the number of TXF we're pulling now */ int size, u32 offset, int fifo_num)
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); {
struct iwl_fw_error_dump_fifo *fifo_hdr;
u32 *fifo_data;
u32 fifo_len;
int i;
fifo_hdr = (void *)(*dump_data)->data; fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data; fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.txfifo_size[i]; fifo_len = size;
/* No need to try to read the data if the length is 0 */ /* No need to try to read the data if the length is 0 */
if (fifo_len == 0) if (fifo_len == 0)
continue; return;
/* Add a TLV for the FIFO */ /* Add a TLV for the FIFO */
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i); fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
fifo_hdr->available_bytes = fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FIFO_ITEM_CNT)); TXF_FIFO_ITEM_CNT + offset));
fifo_hdr->wr_ptr = fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_WR_PTR)); TXF_WR_PTR + offset));
fifo_hdr->rd_ptr = fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_RD_PTR)); TXF_RD_PTR + offset));
fifo_hdr->fence_ptr = fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_FENCE_PTR)); TXF_FENCE_PTR + offset));
fifo_hdr->fence_mode = fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_LOCK_FENCE)); TXF_LOCK_FENCE + offset));
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR, iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
TXF_WR_PTR); TXF_WR_PTR + offset);
/* Dummy-read to advance the read pointer to the head */ /* Dummy-read to advance the read pointer to the head */
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA); iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
/* Read FIFO */ /* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */ fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++) for (i = 0; i < fifo_len; i++)
fifo_data[j] = iwl_trans_read_prph(mvm->trans, fifo_data[i] = iwl_trans_read_prph(mvm->trans,
TXF_READ_MODIFY_DATA); TXF_READ_MODIFY_DATA +
offset);
*dump_data = iwl_fw_error_next_data(*dump_data); *dump_data = iwl_fw_error_next_data(*dump_data);
}
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
struct iwl_fw_error_dump_data **dump_data)
{
struct iwl_fw_error_dump_fifo *fifo_hdr;
struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
u32 *fifo_data;
u32 fifo_len;
unsigned long flags;
int i, j;
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
/* Pull RXF1 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
/* Pull RXF2 */
iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV, 1);
/* Pull LMAC2 RXF1 */
if (mvm->smem_cfg.num_lmacs > 1)
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
LMAC2_PRPH_OFFSET, 2);
/* Pull TXF data from LMAC1 */
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
0, i);
}
/* Pull TXF data from LMAC2 */
if (mvm->smem_cfg.num_lmacs > 1) {
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans,
TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
i);
iwl_mvm_dump_txf(mvm, dump_data,
cfg->lmac[1].txfifo_size[i],
LMAC2_PRPH_OFFSET,
i + cfg->num_txfifo_entries);
}
} }
if (fw_has_capa(&mvm->fw->ucode_capa, if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */ /* Pull UMAC internal TXF data from all TXFs */
for (i = 0; for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
i++) { i++) {
fifo_hdr = (void *)(*dump_data)->data; fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data; fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i]; fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */ /* No need to try to read the data if the length is 0 */
if (fifo_len == 0) if (fifo_len == 0)
...@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, ...@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
/* Mark the number of TXF we're pulling now */ /* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size)); mvm->smem_cfg.num_txfifo_entries);
fifo_hdr->available_bytes = fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans, cpu_to_le32(iwl_trans_read_prph(mvm->trans,
...@@ -553,30 +584,44 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -553,30 +584,44 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
/* reading RXF/TXF sizes */ /* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg; struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
fifo_data_len = 0; fifo_data_len = 0;
/* Count RXF size */ /* Count RXF2 size */
for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) { if (mem_cfg->rxfifo2_size) {
if (!mem_cfg->rxfifo_size[i]) /* Add header info */
fifo_data_len += mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
/* Count RXF1 sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
if (!mem_cfg->lmac[i].rxfifo1_size)
continue; continue;
/* Add header info */ /* Add header info */
fifo_data_len += mem_cfg->rxfifo_size[i] + fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) + sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo); sizeof(struct iwl_fw_error_dump_fifo);
} }
for (i = 0; i < mem_cfg->num_txfifo_entries; i++) { /* Count TXF sizes */
if (!mem_cfg->txfifo_size[i]) for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
if (!mem_cfg->lmac[i].txfifo_size[j])
continue; continue;
/* Add header info */ /* Add header info */
fifo_data_len += mem_cfg->txfifo_size[i] + fifo_data_len +=
mem_cfg->lmac[i].txfifo_size[j] +
sizeof(*dump_data) + sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo); sizeof(struct iwl_fw_error_dump_fifo);
} }
}
if (fw_has_capa(&mvm->fw->ucode_capa, if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
......
This diff is collapsed.
...@@ -472,9 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -472,9 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
} }
mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->mcast_sta.sta_id = IWL_MVM_STATION_COUNT; mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
...@@ -1443,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, ...@@ -1443,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *beacon_notify_hdr; struct iwl_mvm_tx_resp *beacon_notify_hdr;
struct ieee80211_vif *csa_vif; struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif; struct ieee80211_vif *tx_blocked_vif;
struct agg_tx_status *agg_status;
u16 status; u16 status;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -1450,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, ...@@ -1450,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
beacon_notify_hdr = &beacon->beacon_notify_hdr; beacon_notify_hdr = &beacon->beacon_notify_hdr;
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK; agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm, IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame, status, beacon_notify_hdr->failure_frame,
......
...@@ -767,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, ...@@ -767,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out; goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_id == IWL_MVM_STATION_COUNT || if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id) mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out; goto out;
...@@ -1011,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, ...@@ -1011,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->uploaded = false; mvmvif->uploaded = false;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
spin_lock_bh(&mvm->time_event_lock); spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
...@@ -1054,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -1054,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL; mvm->p2p_device_vif = NULL;
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
iwl_mvm_reset_phy_ctxts(mvm); iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
...@@ -1965,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -1965,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_MVM_SMPS_REQ_PROT, IWL_MVM_SMPS_REQ_PROT,
IEEE80211_SMPS_DYNAMIC); IEEE80211_SMPS_DYNAMIC);
} }
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
/* /*
* If update fails - SF might be running in associated * If update fails - SF might be running in associated
* mode while disassociated - which is forbidden. * mode while disassociated - which is forbidden.
...@@ -1979,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -1979,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to remove AP station\n"); IWL_ERR(mvm, "failed to remove AP station\n");
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
/* remove quota for this interface */ /* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, false, NULL); ret = iwl_mvm_update_quotas(mvm, false, NULL);
if (ret) if (ret)
...@@ -2391,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, ...@@ -2391,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
*/ */
break; break;
case STA_NOTIFY_AWAKE: case STA_NOTIFY_AWAKE:
if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
break; break;
if (txqs) if (txqs)
...@@ -3961,7 +3961,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, ...@@ -3961,7 +3961,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */ /* flush the AP-station and all TDLS peers */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta)) if (IS_ERR_OR_NULL(sta))
...@@ -4218,7 +4218,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, ...@@ -4218,7 +4218,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm)) /* TODO - remove a000 disablement when we have RXQ config API */
if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
return; return;
notif->cookie = mvm->queue_sync_cookie; notif->cookie = mvm->queue_sync_cookie;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -604,10 +604,15 @@ enum iwl_mvm_tdls_cs_state { ...@@ -604,10 +604,15 @@ enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_ACTIVE, IWL_MVM_TDLS_SW_ACTIVE,
}; };
#define MAX_NUM_LMAC 2
struct iwl_mvm_shared_mem_cfg { struct iwl_mvm_shared_mem_cfg {
int num_lmacs;
int num_txfifo_entries; int num_txfifo_entries;
struct {
u32 txfifo_size[TX_FIFO_MAX_NUM]; u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM]; u32 rxfifo1_size;
} lmac[MAX_NUM_LMAC];
u32 rxfifo2_size;
u32 internal_txfifo_addr; u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
}; };
...@@ -626,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg { ...@@ -626,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg {
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame * it is the time of last received sub-frame
* @removed: prevent timer re-arming * @removed: prevent timer re-arming
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state * @lock: protect reorder buffer internal state
* @mvm: mvm pointer, needed for frame timer context * @mvm: mvm pointer, needed for frame timer context
*/ */
...@@ -641,6 +647,7 @@ struct iwl_mvm_reorder_buffer { ...@@ -641,6 +647,7 @@ struct iwl_mvm_reorder_buffer {
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF]; unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer; struct timer_list reorder_timer;
bool removed; bool removed;
bool valid;
spinlock_t lock; spinlock_t lock;
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
...@@ -710,6 +717,21 @@ enum iwl_mvm_queue_status { ...@@ -710,6 +717,21 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
#define IWL_MVM_NUM_CIPHERS 10 #define IWL_MVM_NUM_CIPHERS 10
#ifdef CONFIG_ACPI
#define IWL_MVM_SAR_TABLE_SIZE 10
#define IWL_MVM_SAR_PROFILE_NUM 4
#define IWL_MVM_GEO_TABLE_SIZE 18
struct iwl_mvm_sar_profile {
bool enabled;
u8 table[IWL_MVM_SAR_TABLE_SIZE];
};
struct iwl_mvm_geo_table {
u8 values[IWL_MVM_GEO_TABLE_SIZE];
};
#endif
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
struct device *dev; struct device *dev;
...@@ -1022,7 +1044,7 @@ struct iwl_mvm { ...@@ -1022,7 +1044,7 @@ struct iwl_mvm {
} peer; } peer;
} tdls_cs; } tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg; struct iwl_mvm_shared_mem_cfg smem_cfg;
u32 ciphers[IWL_MVM_NUM_CIPHERS]; u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
...@@ -1039,6 +1061,9 @@ struct iwl_mvm { ...@@ -1039,6 +1061,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode; bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork; struct delayed_work cs_tx_unblock_dwork;
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
#endif
}; };
/* Extract MVM priv from op_mode and _hw */ /* Extract MVM priv from op_mode and _hw */
...@@ -1237,6 +1262,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) ...@@ -1237,6 +1262,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CDB_SUPPORT); IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
} }
static inline struct agg_tx_status*
iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
if (iwl_mvm_has_new_tx_api(mvm))
return &tx_resp->v6.status;
else
return &tx_resp->v3.status;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{ {
#ifdef CONFIG_THERMAL #ifdef CONFIG_THERMAL
...@@ -1676,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) ...@@ -1676,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout); unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout);
/* /*
* Disable a TXQ. * Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
...@@ -1806,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, ...@@ -1806,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
u32 duration, u32 timeout); u32 duration, u32 timeout);
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
#else
static inline
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
return -ENOENT;
}
#endif /* CONFIG_ACPI */
#endif /* __IWL_MVM_H__ */ #endif /* __IWL_MVM_H__ */
...@@ -428,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { ...@@ -428,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/ */
static const struct iwl_hcmd_names iwl_mvm_system_names[] = { static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(SHARED_MEM_CFG_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
}; };
/* Please keep this array *SORTED* by hex value. /* Please keep this array *SORTED* by hex value.
...@@ -446,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { ...@@ -446,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(GEO_TX_POWER_LIMIT),
HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
}; };
...@@ -1268,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm, ...@@ -1268,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
u8 tid; u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION || if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
return false; return false;
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
...@@ -1356,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, ...@@ -1356,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta; struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta; struct iwl_mvm_sta *mvm_ap_sta;
if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT) if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
return; return;
rcu_read_lock(); rcu_read_lock();
...@@ -1426,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) ...@@ -1426,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading; mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else { } else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1); WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
mvm->d0i3_offloading = false; mvm->d0i3_offloading = false;
} }
...@@ -1439,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) ...@@ -1439,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
return ret; return ret;
/* configure wowlan configuration only if needed */ /* configure wowlan configuration only if needed */
if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
/* wake on beacons only if beacon storing isn't supported */ /* wake on beacons only if beacon storing isn't supported */
if (!fw_has_capa(&mvm->fw->ucode_capa, if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_STORING)) IWL_UCODE_TLV_CAPA_BEACON_STORING))
...@@ -1516,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) ...@@ -1516,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
spin_lock_bh(&mvm->d0i3_tx_lock); spin_lock_bh(&mvm->d0i3_tx_lock);
if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT) if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
goto out; goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n"); IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
...@@ -1554,7 +1556,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) ...@@ -1554,7 +1556,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
} }
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq); wake_up(&mvm->d0i3_exit_waitq);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
if (wake_queues) if (wake_queues)
ieee80211_wake_queues(mvm->hw); ieee80211_wake_queues(mvm->hw);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ...@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef, struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic) u8 chains_static, u8 chains_dynamic)
{ {
enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* In CDB mode we cannot modify PHY context between bands so... */
if (iwl_mvm_has_new_tx_api(mvm) &&
ctxt->channel->band != chandef->chan->band) {
int ret;
/* ... remove it here ...*/
ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE, 0);
if (ret)
return ret;
/* ... and proceed to add it again */
action = FW_CTXT_ACTION_ADD;
}
ctxt->channel = chandef->chan; ctxt->channel = chandef->chan;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic, chains_static, chains_dynamic,
FW_CTXT_ACTION_MODIFY, 0); action, 0);
} }
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -351,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -351,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta)) if (IS_ERR(sta))
sta = NULL; sta = NULL;
...@@ -460,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -460,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (rate_n_flags & RATE_MCS_BF_MSK) if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->vht_flag |= RX_VHT_FLAG_BF; rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else { } else {
rx_status->rate_idx = int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band); rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
"Invalid rate flags 0x%x, band %d,\n",
rate_n_flags, rx_status->band)) {
kfree_skb(skb);
return;
}
rx_status->rate_idx = rate;
} }
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
...@@ -649,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, ...@@ -649,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm, .mvm = mvm,
}; };
int expected_size; int expected_size;
int i;
u8 *energy;
__le32 *bytes, *air_time;
if (iwl_mvm_is_cdb_supported(mvm)) if (iwl_mvm_is_cdb_supported(mvm))
expected_size = sizeof(*stats); expected_size = sizeof(*stats);
...@@ -657,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, ...@@ -657,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else else
expected_size = sizeof(struct iwl_notif_statistics_v10); expected_size = sizeof(struct iwl_notif_statistics_v10);
if (iwl_rx_packet_payload_len(pkt) != expected_size) if (iwl_rx_packet_payload_len(pkt) != expected_size) {
goto invalid; IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
iwl_rx_packet_payload_len(pkt));
return;
}
data.mac_id = stats->rx.general.mac_id; data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy = data.beacon_filter_average_energy =
...@@ -674,10 +687,16 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, ...@@ -674,10 +687,16 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.common.on_time_scan); le64_to_cpu(stats->general.common.on_time_scan);
data.general = &stats->general; data.general = &stats->general;
if (iwl_mvm_has_new_rx_api(mvm)) {
int i; iwl_mvm_rx_stats_check_trigger(mvm, pkt);
u8 *energy;
__le32 *bytes, *air_time; ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
if (!iwl_mvm_has_new_rx_api(mvm))
return;
if (!iwl_mvm_is_cdb_supported(mvm)) { if (!iwl_mvm_is_cdb_supported(mvm)) {
struct iwl_notif_statistics_v11 *v11 = struct iwl_notif_statistics_v11 *v11 =
...@@ -693,7 +712,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, ...@@ -693,7 +712,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
} }
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
struct iwl_mvm_sta *sta; struct iwl_mvm_sta *sta;
if (!energy[i]) if (!energy[i])
...@@ -705,18 +724,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, ...@@ -705,18 +724,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
sta->avg_energy = energy[i]; sta->avg_energy = energy[i];
} }
rcu_read_unlock(); rcu_read_unlock();
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
return;
invalid:
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
iwl_rx_packet_payload_len(pkt));
} }
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
......
...@@ -636,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, ...@@ -636,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false; return false;
baid_data = rcu_dereference(mvm->baid_map[baid]); baid_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN(!baid_data, if (!baid_data) {
"Received baid %d, but no data exists for this BAID\n", baid)) WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
"Received baid %d, but no data exists for this BAID\n",
baid);
return false; return false;
}
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
...@@ -653,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, ...@@ -653,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock); spin_lock_bh(&buffer->lock);
if (!buffer->valid) {
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
spin_unlock_bh(&buffer->lock);
return false;
}
buffer->valid = true;
}
if (ieee80211_is_back_req(hdr->frame_control)) { if (ieee80211_is_back_req(hdr->frame_control)) {
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
goto drop; goto drop;
...@@ -737,7 +749,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, ...@@ -737,7 +749,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return true; return true;
} }
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
u32 reorder_data, u8 baid)
{ {
unsigned long now = jiffies; unsigned long now = jiffies;
unsigned long timeout; unsigned long timeout;
...@@ -746,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid) ...@@ -746,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
rcu_read_lock(); rcu_read_lock();
data = rcu_dereference(mvm->baid_map[baid]); data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON(!data)) if (!data) {
WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
goto out; goto out;
}
if (!data->timeout) if (!data->timeout)
goto out; goto out;
...@@ -841,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -841,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) { if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta)) if (IS_ERR(sta))
sta = NULL; sta = NULL;
...@@ -903,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -903,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb); kfree_skb(skb);
rcu_read_unlock(); goto out;
return;
} }
/* /*
* Our hardware de-aggregates AMSDUs but copies the mac header * Our hardware de-aggregates AMSDUs but copies the mac header
* as it to the de-aggregated MPDUs. We need to turn off the * as it to the de-aggregated MPDUs. We need to turn off the
* AMSDU bit in the QoS control ourselves. * AMSDU bit in the QoS control ourselves.
* In addition, HW reverses addr3 and addr4 - reverse it back.
*/ */
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
int i;
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 mac_addr[ETH_ALEN];
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
if (!(desc->amsdu_info &
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) for (i = 0; i < ETH_ALEN; i++)
rx_status->flag |= RX_FLAG_AMSDU_MORE; mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
ether_addr_copy(hdr->addr3, mac_addr);
if (ieee80211_has_a4(hdr->frame_control)) {
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] =
hdr->addr4[ETH_ALEN - i - 1];
ether_addr_copy(hdr->addr4, mac_addr);
}
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
u32 reorder_data = le32_to_cpu(desc->reorder_data);
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
} }
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid);
} }
/* Set up the HT phy flags */ /* Set up the HT phy flags */
...@@ -963,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -963,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (rate_n_flags & RATE_MCS_BF_MSK) if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->vht_flag |= RX_VHT_FLAG_BF; rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else { } else {
rx_status->rate_idx = int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band); rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
"Invalid rate flags 0x%x, band %d,\n",
rate_n_flags, rx_status->band)) {
kfree_skb(skb);
goto out;
}
rx_status->rate_idx = rate;
} }
/* management stuff on default queue */ /* management stuff on default queue */
...@@ -984,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -984,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
out:
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) ...@@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
channels[j] = band->channels[i].hw_value; channels[j] = band->channels[i].hw_value;
} }
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags) u32 flags, u8 channel_flags)
{ {
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config *cfg = config; struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags); cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
...@@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, ...@@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array); iwl_mvm_fill_channels(mvm, cfg->channel_array);
} }
static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags) u32 flags, u8 channel_flags)
{ {
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
struct iwl_scan_config_cdb *cfg = config; struct iwl_scan_config *cfg = config;
cfg->flags = cpu_to_le32(flags); cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
...@@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config, ...@@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
cfg->out_of_channel_time[0] = cfg->out_of_channel_time[0] =
cpu_to_le32(scan_timing[type].max_out_time); cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
cfg->suspend_time[1] =
cpu_to_le32(scan_timing[type].suspend_time);
cfg->out_of_channel_time[1] = cfg->out_of_channel_time[1] =
cpu_to_le32(scan_timing[type].max_out_time); cpu_to_le32(scan_timing[type].max_out_time);
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); }
cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]); iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
...@@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) ...@@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS; return -ENOBUFS;
if (type == mvm->scan_type) { if (type == mvm->scan_type)
IWL_DEBUG_SCAN(mvm,
"Ignoring UMAC scan config of the same type\n");
return 0; return 0;
}
if (iwl_mvm_is_cdb_supported(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
cmd_size = sizeof(struct iwl_scan_config_cdb);
else
cmd_size = sizeof(struct iwl_scan_config); cmd_size = sizeof(struct iwl_scan_config);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += mvm->fw->ucode_capa.n_scan_channels; cmd_size += mvm->fw->ucode_capa.n_scan_channels;
cfg = kzalloc(cmd_size, GFP_KERNEL); cfg = kzalloc(cmd_size, GFP_KERNEL);
...@@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) ...@@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
if (iwl_mvm_is_cdb_supported(mvm)) { if (iwl_mvm_has_new_tx_api(mvm)) {
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
} }
cmd.data[0] = cfg; cmd.data[0] = cfg;
...@@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, ...@@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
} }
cmd->fragmented_dwell = timing->dwell_fragmented; cmd->fragmented_dwell = timing->dwell_fragmented;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) {
cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time); cmd->v6.max_out_time[1] =
cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time); cpu_to_le32(timing->max_out_time);
cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time); cmd->v6.suspend_time[1] =
cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time); cpu_to_le32(timing->suspend_time);
cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); }
} else { } else {
cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time); cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time); cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
cmd->no_cdb.scan_priority = cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
} }
...@@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type) int type)
{ {
struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ? void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
(void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data; (void *)&cmd->v6.data : (void *)&cmd->v1.data;
struct iwl_scan_req_umac_tail *sec_part = cmd_data + struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) * sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels; mvm->fw->ucode_capa.n_scan_channels;
...@@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
if (iwl_mvm_is_cdb_supported(mvm)) { if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->cdb.channel_flags = channel_flags; cmd->v6.channel_flags = channel_flags;
cmd->cdb.n_channels = params->n_channels; cmd->v6.n_channels = params->n_channels;
} else { } else {
cmd->no_cdb.channel_flags = channel_flags; cmd->v1.channel_flags = channel_flags;
cmd->no_cdb.n_channels = params->n_channels; cmd->v1.n_channels = params->n_channels;
} }
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap); iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
...@@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) ...@@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
int iwl_mvm_scan_size(struct iwl_mvm *mvm) int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{ {
int base_size = IWL_SCAN_REQ_UMAC_SIZE; int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
if (iwl_mvm_is_cdb_supported(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB; base_size = IWL_SCAN_REQ_UMAC_SIZE;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size + return base_size +
......
...@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, ...@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break; break;
case SF_FULL_ON: case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) { if (sta_id == IWL_MVM_INVALID_STA) {
IWL_ERR(mvm, IWL_ERR(mvm,
"No station: Cannot switch SF to FULL_ON\n"); "No station: Cannot switch SF to FULL_ON\n");
return -EINVAL; return -EINVAL;
...@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, ...@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
bool remove_vif) bool remove_vif)
{ {
enum iwl_sf_state new_state; enum iwl_sf_state new_state;
u8 sta_id = IWL_MVM_STATION_COUNT; u8 sta_id = IWL_MVM_INVALID_STA;
struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_active_iface_iterator_data data = { struct iwl_mvm_active_iface_iterator_data data = {
.ignore_vif = changed_vif, .ignore_vif = changed_vif,
.sta_vif_state = SF_UNINIT, .sta_vif_state = SF_UNINIT,
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT, .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
}; };
/* /*
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) ...@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls) if (!sta || IS_ERR(sta) || !sta->tdls)
...@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls) if (!sta || IS_ERR(sta) || !sta->tdls)
...@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */ /* populate TDLS peer data */
cnt = 0; cnt = 0;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls) if (IS_ERR_OR_NULL(sta) || !sta->tdls)
...@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, ...@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
if (state == IWL_MVM_TDLS_SW_IDLE) if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
} }
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
...@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, ...@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
/* get the existing peer if it's there */ /* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *sta = rcu_dereference_protected( struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
...@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) ...@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */ /* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
goto out; goto out;
sta = rcu_dereference_protected( sta = rcu_dereference_protected(
...@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, ...@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
sta->addr, chandef->chan->center_freq, chandef->width); sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */ /* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) { if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n", "Existing peer. Can't start switch with %pM\n",
sta->addr); sta->addr);
...@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, ...@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */ /* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) { if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out; goto out;
} }
...@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, ...@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true; wait_for_phy = true;
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
dev_kfree_skb(mvm->tdls_cs.peer.skb); dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL; mvm->tdls_cs.peer.skb = NULL;
...@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, ...@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 && params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) { mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
struct ieee80211_sta *cur_sta; struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */ /* make sure it's the same peer */
......
...@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm) ...@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
if (IWL_MVM_TOF_IS_RESPONDER) { if (IWL_MVM_TOF_IS_RESPONDER) {
tof_data->responder_cfg.sub_grp_cmd_id = tof_data->responder_cfg.sub_grp_cmd_id =
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
} }
#endif #endif
......
...@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) ...@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
int i, err; int i, err;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta) if (!mvmsta)
continue; continue;
......
...@@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
memset(dev_cmd, 0, sizeof(*dev_cmd)); memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = TX_CMD; dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
/* padding is inserted later in transport */
/* FIXME - check for AMSDU may need to be removed */
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
cmd->offload_assist |= cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
memcpy(cmd->hdr, hdr, hdrlen);
if (!info->control.hw_key)
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
/* For data packets rate info comes from the fw */
if (ieee80211_is_data(hdr->frame_control) && sta)
goto out;
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
cmd->rate_n_flags =
cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
goto out;
}
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
if (info->control.hw_key) if (info->control.hw_key)
...@@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
out:
return dev_cmd; return dev_cmd;
} }
...@@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info; struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd; struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
u8 sta_id; u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control); int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue; int queue;
...@@ -598,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -598,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
is_multicast_ether_addr(hdr->addr1)) { is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT) if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id; sta_id = ap_sta_id;
} else if (iwl_mvm_is_dqa_supported(mvm) && } else if (iwl_mvm_is_dqa_supported(mvm) &&
info.control.vif->type == NL80211_IFTYPE_STATION && info.control.vif->type == NL80211_IFTYPE_STATION &&
...@@ -616,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -616,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* From now on, we cannot access info->control */ /* From now on, we cannot access info->control */
iwl_mvm_skb_prepare_status(skb, dev_cmd); iwl_mvm_skb_prepare_status(skb, dev_cmd);
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1; return -1;
...@@ -713,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -713,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* fifo to be able to send bursts. * fifo to be able to send bursts.
*/ */
max_amsdu_len = min_t(unsigned int, max_amsdu_len, max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256); mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
if (unlikely(dbg_max_amsdu_len)) if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len, max_amsdu_len = min_t(unsigned int, max_amsdu_len,
...@@ -862,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) ...@@ -862,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
unsigned long now = jiffies; unsigned long now = jiffies;
int tid; int tid;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false;
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now)) IWL_MVM_DQA_QUEUE_TIMEOUT, now))
...@@ -881,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -881,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd; struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc; __le16 fc;
u16 seq_number = 0; u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT; u8 tid = IWL_MAX_TID_COUNT;
...@@ -896,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -896,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta)) if (WARN_ON_ONCE(!mvmsta))
return -1; return -1;
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1; return -1;
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
...@@ -904,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -904,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!dev_cmd) if (!dev_cmd)
goto drop; goto drop;
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* /*
* we handle that entirely ourselves -- for uAPSD the firmware * we handle that entirely ourselves -- for uAPSD the firmware
* will always send a notification, and for PS-Poll responses * will always send a notification, and for PS-Poll responses
...@@ -926,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -926,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
goto drop_unlock_sta; goto drop_unlock_sta;
seq_number = mvmsta->tid_data[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (WARN_ON_ONCE(is_ampdu && if (WARN_ON_ONCE(is_ampdu &&
mvmsta->tid_data[tid].state != IWL_AGG_ON)) mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta; goto drop_unlock_sta;
seq_number = mvmsta->tid_data[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
if (!iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
/* update the tx_cmd hdr as it was already copied */
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
}
} }
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */ /* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
...@@ -945,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -945,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
} }
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */ /* Check if TXQ needs to be allocated or re-activated */
...@@ -1036,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1036,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(!mvmsta)) if (WARN_ON_ONCE(!mvmsta))
return -1; return -1;
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1; return -1;
memcpy(&info, skb->cb, sizeof(info)); memcpy(&info, skb->cb, sizeof(info));
...@@ -1245,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, ...@@ -1245,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
} }
} }
/**
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
* it can't know that everything will go well until the end of the AMPDU, it
* can't know in advance the number of MPDUs that will be sent in the current
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
* Hence, it can't know in advance what the SSN of the SCD will be at the end
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count) & 0xfff;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
...@@ -1254,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1254,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u32 status = le16_to_cpu(tx_resp->status.status); struct agg_tx_status *agg_status =
u16 ssn = iwl_mvm_get_scd_ssn(tx_resp); iwl_mvm_get_agg_status(mvm, tx_resp);
u32 status = le16_to_cpu(agg_status->status);
u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs; struct sk_buff_head skbs;
u8 skb_freed = 0; u8 skb_freed = 0;
...@@ -1264,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1264,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
__skb_queue_head_init(&skbs); __skb_queue_head_init(&skbs);
if (iwl_mvm_has_new_tx_api(mvm))
txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
seq_ctl = le16_to_cpu(tx_resp->seq_ctl); seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
/* we can free until ssn % q.n_bd not inclusive */ /* we can free until ssn % q.n_bd not inclusive */
...@@ -1388,7 +1450,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1388,7 +1450,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
if (!IS_ERR(sta)) { if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) { if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data = struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid]; &mvmsta->tid_data[tid];
bool send_eosp_ndp = false; bool send_eosp_ndp = false;
...@@ -1520,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, ...@@ -1520,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
struct agg_tx_status *frame_status = &tx_resp->status; struct agg_tx_status *frame_status =
iwl_mvm_get_agg_status(mvm, tx_resp);
int i; int i;
for (i = 0; i < tx_resp->frame_count; i++) { for (i = 0; i < tx_resp->frame_count; i++) {
...@@ -1722,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ...@@ -1722,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_info.status.status_driver_data[0] = ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp; (void *)(uintptr_t)ba_res->reduced_txp;
if (!le16_to_cpu(ba_res->tfd_cnt))
goto out;
/* /*
* TODO: * TODO:
* When supporting multi TID aggregations - we need to move * When supporting multi TID aggregations - we need to move
...@@ -1730,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ...@@ -1730,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* This will go together with SN and AddBA offload and cannot * This will go together with SN and AddBA offload and cannot
* be handled properly for now. * be handled properly for now.
*/ */
WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1); WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid, tid = ba_res->ra_tid[0].tid;
(int)ba_res->tfd[0].q_num, if (tid == IWL_MGMT_TID)
tid = IWL_MAX_TID_COUNT;
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_res->tfd[0].q_num)),
le16_to_cpu(ba_res->tfd[0].tfd_index), le16_to_cpu(ba_res->tfd[0].tfd_index),
&ba_info, le32_to_cpu(ba_res->tx_rate)); &ba_info, le32_to_cpu(ba_res->tx_rate));
out:
IWL_DEBUG_TX_REPLY(mvm, IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags), sta_id, le32_to_cpu(ba_res->flags),
......
...@@ -598,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) ...@@ -598,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i; return i;
if (iwl_mvm_has_new_tx_api(mvm))
return -ENOSPC;
/* /*
* If no free queue found - settle for an inactive one to reconfigure * If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA, * Make sure that the inactive queue either already belongs to this STA,
...@@ -628,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, ...@@ -628,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
}; };
int ret; int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) { "Trying to reconfig unallocated queue %d\n", queue)) {
...@@ -689,10 +695,43 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, ...@@ -689,10 +695,43 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
return enable_queue; return enable_queue;
} }
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout)
{
struct iwl_tx_queue_cfg_cmd cmd = {
.flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
.sta_id = sta_id,
.tid = tid,
};
int queue;
if (cmd.tid == IWL_MAX_TID_COUNT)
cmd.tid = IWL_MGMT_TID;
queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
SCD_QUEUE_CFG, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
sta_id, tid, queue);
return queue;
}
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid);
return queue;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout) unsigned int wdg_timeout)
{ {
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Send the enabling command if we need to */ /* Send the enabling command if we need to */
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid)) { cfg->sta_id, cfg->tid)) {
...@@ -709,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -709,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout); wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd), &cmd),
"Failed to configure queue %d on FIFO %d\n", queue, "Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo); cfg->fifo);
...@@ -724,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -724,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.action = SCD_CFG_DISABLE_QUEUE, .action = SCD_CFG_DISABLE_QUEUE,
}; };
bool remove_mac_queue = true; bool remove_mac_queue = true;
int ret;
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
...@@ -795,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -795,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
if (iwl_mvm_has_new_tx_api(mvm)) {
iwl_trans_txq_free(mvm->trans, queue);
} else {
int ret;
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(cmd), &cmd); sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd);
if (ret) if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret); queue, ret);
return ret; return ret;
}
return 0;
} }
/** /**
...@@ -824,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init) ...@@ -824,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
.data = { lq, }, .data = { lq, },
}; };
if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
return -EINVAL; return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd); return iwl_mvm_send_cmd(mvm, &cmd);
...@@ -1096,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, ...@@ -1096,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */ /* If some TFDs are still queued - don't mark TID as inactive */
...@@ -1162,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) ...@@ -1162,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
unsigned long now = jiffies; unsigned long now = jiffies;
int i; int i;
if (iwl_mvm_has_new_tx_api(mvm))
return;
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0) if (mvm->queue_info[i].hw_queue_refcount > 0)
......
...@@ -235,14 +235,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ...@@ -235,14 +235,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */ /* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr = ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue].dma_addr); cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size = ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX);
/* allocate ucode sections in dram and set addresses */ /* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
if (ret) if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
return ret; return ret;
}
trans_pcie->ctxt_info = ctxt_info; trans_pcie->ctxt_info = ctxt_info;
......
...@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
...@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */ /* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
{0} {0}
...@@ -668,8 +673,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -668,8 +673,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
if (iwl_trans->cfg->rf_id && if (iwl_trans->cfg->rf_id &&
(cfg == &iwla000_2ac_cfg_hr && (cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF)) { iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
cfg = &iwla000_2ac_cfg_jf; cfg = &iwla000_2ac_cfg_jf;
iwl_trans->cfg = cfg; iwl_trans->cfg = cfg;
} }
......
...@@ -205,11 +205,11 @@ struct iwl_cmd_meta { ...@@ -205,11 +205,11 @@ struct iwl_cmd_meta {
* into the buffer regardless of whether it should be mapped or not. * into the buffer regardless of whether it should be mapped or not.
* This indicates how big the first TB must be to include the scratch buffer * This indicates how big the first TB must be to include the scratch buffer
* and the assigned PN. * and the assigned PN.
* Since PN location is 16 bytes at offset 24, it's 40 now. * Since PN location is 8 bytes at offset 12, it's 20 now.
* If we make it bigger then allocations will be bigger and copy slower, so * If we make it bigger then allocations will be bigger and copy slower, so
* that's probably not useful. * that's probably not useful.
*/ */
#define IWL_FIRST_TB_SIZE 40 #define IWL_FIRST_TB_SIZE 20
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry { struct iwl_pcie_txq_entry {
...@@ -241,6 +241,7 @@ struct iwl_pcie_first_tb_buf { ...@@ -241,6 +241,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue * @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen * @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires * @frozen_expiry_remainder: remember how long until the timer fires
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w * @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r * @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's * @dma_addr: physical addr for BD's
...@@ -280,6 +281,7 @@ struct iwl_txq { ...@@ -280,6 +281,7 @@ struct iwl_txq {
int block; int block;
unsigned long wd_timeout; unsigned long wd_timeout;
struct sk_buff_head overflow_q; struct sk_buff_head overflow_q;
struct iwl_dma_ptr bc_tbl;
int write_ptr; int write_ptr;
int read_ptr; int read_ptr;
...@@ -411,7 +413,8 @@ struct iwl_trans_pcie { ...@@ -411,7 +413,8 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw; struct iwl_dma_ptr kw;
struct iwl_txq *txq; struct iwl_txq *txq_memory;
struct iwl_txq *txq[IWL_MAX_HW_QUEUES];
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
...@@ -650,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) ...@@ -650,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
} }
} }
static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, int idx)
{
return txq->tfds + trans_pcie->tfd_size * idx;
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
...@@ -758,10 +767,35 @@ void iwl_pcie_apm_config(struct iwl_trans *trans); ...@@ -758,10 +767,35 @@ void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_queue_space(const struct iwl_txq *q);
int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
struct iwl_txq *txq, int slots_num, bool cmd_queue);
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
/* transport gen 2 exported functions */ /* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill); const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
struct iwl_tx_queue_cfg_cmd *cmd,
int cmd_id,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
bool low_power);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
#endif /* __iwl_trans_int_pcie_h__ */ #endif /* __iwl_trans_int_pcie_h__ */
...@@ -1094,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1094,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool emergency) bool emergency)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false; bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0; u32 offset = 0;
...@@ -1419,8 +1419,11 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) ...@@ -1419,8 +1419,11 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans); iwl_trans_fw_error(trans);
local_bh_enable(); local_bh_enable();
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
del_timer(&trans_pcie->txq[i].stuck_timer); if (!trans_pcie->txq[i])
continue;
del_timer(&trans_pcie->txq[i]->stuck_timer);
}
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue); wake_up(&trans_pcie->wait_command_queue);
......
...@@ -111,6 +111,153 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) ...@@ -111,6 +111,153 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
return 0; return 0;
} }
static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
{
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
if (op_mode_leave) {
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_pcie_gen2_apm_init(trans);
/* inform ME that we are leaving */
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
mdelay(1);
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
mdelay(5);
}
clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(1000, 2000);
/*
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, was_hw_rfkill;
lockdep_assert_held(&trans_pcie->mutex);
if (trans_pcie->is_down)
return;
trans_pcie->is_down = true;
was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
/*
* If a HW restart happens during firmware loading,
* then the firmware loading might call this function
* and later it might be called again due to the
* restart. So don't process again if the device is
* already dead.
*/
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
iwl_pcie_gen2_tx_stop(trans);
iwl_pcie_rx_stop(trans);
}
iwl_pcie_ctxt_info_free_paging(trans);
iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(1000, 2000);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
* work. This causes a bug in RF-KILL flows, since the interrupt
* that enables radio won't fire on the correct irq, and the
* driver won't be able to handle the interrupt.
* Configure the IVAR table again after reset.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
* This is a bug in certain verions of the hardware.
* Certain devices also keep sending HW RF kill interrupt all
* the time, unless the interrupt is ACKed even if the interrupt
* should be masked. Re-ACK all the interrupts here.
*/
iwl_disable_interrupts(trans);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status);
/*
* Even if we stop the HW, we still want the RF kill
* interrupt
*/
iwl_enable_rfkill_int(trans);
/*
* Check again since the RF kill state may have changed while
* all the interrupts were disabled, in this case we couldn't
* receive the RF kill interrupt and update the state in the
* op_mode.
* Don't call the op_mode if the rkfill state hasn't changed.
* This allows the op_mode to call stop_device from the rfkill
* notification without endless recursion. Under very rare
* circumstances, we might have a small recursion if the rfkill
* state changed exactly now while we were called from stop_device.
* This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
/* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
mutex_lock(&trans_pcie->mutex);
_iwl_trans_pcie_gen2_stop_device(trans, low_power);
mutex_unlock(&trans_pcie->mutex);
}
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
...@@ -212,8 +359,9 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, ...@@ -212,8 +359,9 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out; goto out;
} }
if (iwl_pcie_ctxt_info_init(trans, fw)) ret = iwl_pcie_ctxt_info_init(trans, fw);
return -ENOMEM; if (ret)
goto out;
/* re-check RF-Kill state since we may have missed the interrupt */ /* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_trans_check_hw_rf_kill(trans); hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
......
...@@ -80,7 +80,6 @@ ...@@ -80,7 +80,6 @@
#include "iwl-prph.h" #include "iwl-prph.h"
#include "iwl-scd.h" #include "iwl-scd.h"
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
#include "iwl-context-info.h"
#include "iwl-fw-error-dump.h" #include "iwl-fw-error-dump.h"
#include "internal.h" #include "internal.h"
#include "iwl-fh.h" #include "iwl-fh.h"
...@@ -449,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) ...@@ -449,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
} }
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{ {
int ret = 0; int ret = 0;
...@@ -721,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, ...@@ -721,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret; return ret;
} }
/*
* Driver Takes the ownership on secure machine before FW load
* and prevent race with the BT load.
* W/A for ROM bug. (should be remove in the next Si step)
*/
static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
{
u32 val, loop = 1000;
/*
* Check the RSA semaphore is accessible.
* If the HW isn't locked and the rsa semaphore isn't accessible,
* we are in trouble.
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
IWL_DEBUG_INFO(trans,
"can't access the RSA semaphore it is write protected\n");
return 0;
}
/* take ownership on the AUX IF */
iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
do {
iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
if (val == 0x1) {
iwl_write_prph(trans, RSA_ENABLE, 0);
return 0;
}
udelay(10);
loop--;
} while (loop > 0);
IWL_ERR(trans, "Failed to take ownership on secure machine\n");
return -EIO;
}
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
const struct fw_img *image, const struct fw_img *image,
int cpu, int cpu,
...@@ -1011,11 +969,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, ...@@ -1011,11 +969,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
if (trans->dbg_dest_tlv) if (trans->dbg_dest_tlv)
iwl_pcie_apply_destination(trans); iwl_pcie_apply_destination(trans);
/* TODO: remove in the next Si step */
ret = iwl_pcie_rsa_race_bug_wa(trans);
if (ret)
return ret;
IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
iwl_read_prph(trans, WFPM_GP2)); iwl_read_prph(trans, WFPM_GP2));
...@@ -1126,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) ...@@ -1126,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
} }
static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
{ {
struct iwl_trans *trans = trans_pcie->trans; struct iwl_trans *trans = trans_pcie->trans;
...@@ -1213,9 +1166,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) ...@@ -1213,9 +1166,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
} }
} }
iwl_pcie_ctxt_info_free_paging(trans);
iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */ /* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
...@@ -1405,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) ...@@ -1405,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
lockdep_assert_held(&trans_pcie->mutex); lockdep_assert_held(&trans_pcie->mutex);
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
if (trans->cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans, true);
else
_iwl_trans_pcie_stop_device(trans, true); _iwl_trans_pcie_stop_device(trans, true);
}
} }
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
...@@ -1813,6 +1767,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) ...@@ -1813,6 +1767,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans); iwl_pcie_synchronize_irqs(trans);
if (trans->cfg->gen2)
iwl_pcie_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans); iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans); iwl_pcie_rx_free(trans);
...@@ -1983,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, ...@@ -1983,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
int queue; int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) { for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
struct iwl_txq *txq = &trans_pcie->txq[queue]; struct iwl_txq *txq = trans_pcie->txq[queue];
unsigned long now; unsigned long now;
spin_lock_bh(&txq->lock); spin_lock_bh(&txq->lock);
...@@ -2035,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) ...@@ -2035,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
int i; int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = &trans_pcie->txq[i]; struct iwl_txq *txq = trans_pcie->txq[i];
if (i == trans_pcie->cmd_queue) if (i == trans_pcie->cmd_queue)
continue; continue;
...@@ -2108,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) ...@@ -2108,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
continue; continue;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt]; txq = trans_pcie->txq[cnt];
wr_ptr = ACCESS_ONCE(txq->write_ptr); wr_ptr = ACCESS_ONCE(txq->write_ptr);
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
...@@ -2299,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, ...@@ -2299,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
if (!trans_pcie->txq) if (!trans_pcie->txq_memory)
return -EAGAIN; return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL); buf = kzalloc(bufsz, GFP_KERNEL);
...@@ -2307,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, ...@@ -2307,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
return -ENOMEM; return -ENOMEM;
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt]; txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
cnt, txq->read_ptr, txq->write_ptr, cnt, txq->read_ptr, txq->write_ptr,
...@@ -2724,7 +2681,7 @@ static struct iwl_trans_dump_data ...@@ -2724,7 +2681,7 @@ static struct iwl_trans_dump_data
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data; struct iwl_fw_error_dump_data *data;
struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data; struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs; u32 len, num_rbs;
...@@ -2916,20 +2873,15 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { ...@@ -2916,20 +2873,15 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.start_hw = iwl_trans_pcie_start_hw, .start_hw = iwl_trans_pcie_start_hw,
.fw_alive = iwl_trans_pcie_gen2_fw_alive, .fw_alive = iwl_trans_pcie_gen2_fw_alive,
.start_fw = iwl_trans_pcie_gen2_start_fw, .start_fw = iwl_trans_pcie_gen2_start_fw,
.stop_device = iwl_trans_pcie_stop_device, .stop_device = iwl_trans_pcie_gen2_stop_device,
.send_cmd = iwl_trans_pcie_send_hcmd, .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
.tx = iwl_trans_pcie_tx, .tx = iwl_trans_pcie_gen2_tx,
.reclaim = iwl_trans_pcie_reclaim, .reclaim = iwl_trans_pcie_reclaim,
.txq_disable = iwl_trans_pcie_txq_disable, .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_enable = iwl_trans_pcie_txq_enable, .txq_free = iwl_trans_pcie_dyn_txq_free,
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
}; };
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment