Commit 59731bb8 authored by John W. Linville's avatar John W. Linville
parents 729d8d18 bd4ace2a
......@@ -128,16 +128,6 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
config IWLWIFI_DEVICE_TESTMODE
def_bool y
depends on IWLWIFI
depends on NL80211_TESTMODE
help
This option enables the testmode support for iwlwifi device through
NL80211_TESTMODE. This provide the capabilities of enable user space
validation applications to interacts with the device through the
generic netlink message via NL80211_TESTMODE channel.
config IWLWIFI_P2P
def_bool y
bool "iwlwifi experimental P2P support"
......
......@@ -13,7 +13,6 @@ iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
iwlwifi-objs += $(iwlwifi-m)
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
......
......@@ -8,6 +8,5 @@ iwldvm-objs += scan.o led.o
iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
......@@ -405,43 +405,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
extern int iwl_alive_start(struct iwl_priv *priv);
/* testmode support */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
extern void iwl_testmode_free(struct iwl_priv *priv);
#else
static inline
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
static inline
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
return -ENOSYS;
}
static inline void iwl_testmode_init(struct iwl_priv *priv)
{
}
static inline void iwl_testmode_free(struct iwl_priv *priv)
{
}
#endif
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
enum iwl_rxon_context_id ctxid);
......
......@@ -52,8 +52,6 @@
#include "rs.h"
#include "tt.h"
#include "iwl-test.h"
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
......@@ -691,10 +689,6 @@ struct iwl_priv {
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
#define IWL_OWNERSHIP_DRIVER 0
#define IWL_OWNERSHIP_TM 1
u8 ucode_owner;
/* ucode beacon time */
u32 ucode_beacon_time;
int missed_beacon_threshold;
......@@ -889,7 +883,7 @@ struct iwl_priv {
#endif /* CONFIG_IWLWIFI_DEBUGFS */
struct iwl_nvm_data *nvm_data;
/* eeprom blob for debugfs/testmode */
/* eeprom blob for debugfs */
u8 *eeprom_blob;
size_t eeprom_blob_size;
......@@ -905,11 +899,6 @@ struct iwl_priv {
unsigned long blink_on, blink_off;
bool led_registered;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_test tst;
u32 tm_fixed_rate;
#endif
/* WoWLAN GTK rekey data */
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
__le64 replay_ctr;
......
......@@ -1288,12 +1288,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_held(&priv->mutex);
if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
!(cmd->flags & CMD_ON_DEMAND)) {
IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
return -EIO;
}
return iwl_trans_send_cmd(priv->trans, cmd);
}
......
......@@ -1766,8 +1766,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
.remain_on_channel = iwlagn_mac_remain_on_channel,
.cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
.rssi_callback = iwlagn_mac_rssi_callback,
CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
.set_tim = iwlagn_mac_set_tim,
};
......
......@@ -1105,8 +1105,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->agg_tids_count = 0;
priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
priv->rx_statistics_jiffies = jiffies;
/* Choose which receivers/antennas to use */
......@@ -1172,12 +1170,6 @@ static void iwl_option_config(struct iwl_priv *priv)
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
#endif
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
#else
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
#endif
#ifdef CONFIG_IWLWIFI_P2P
IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
#else
......@@ -1355,8 +1347,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
true : false;
/* enable/disable bt channel inhibition */
priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
/* bt channel inhibition enabled*/
priv->bt_ch_announce = true;
IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
(priv->bt_ch_announce) ? "On" : "Off");
......@@ -1451,7 +1443,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
********************/
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
iwl_testmode_init(priv);
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
......@@ -1488,7 +1479,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
iwlagn_mac_unregister(priv);
out_destroy_workqueue:
iwl_tt_exit(priv);
iwl_testmode_free(priv);
iwl_cancel_deferred_work(priv);
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
......@@ -1510,7 +1500,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
......
......@@ -351,12 +351,6 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
#endif
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
......@@ -419,7 +413,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
load = rs_tl_get_load(lq_data, tid);
if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
......@@ -433,10 +426,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
tid);
ieee80211_stop_tx_ba_session(sta, tid);
}
} else {
IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
"because load = %u\n", tid, load);
}
return ret;
}
......@@ -1083,11 +1072,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
......@@ -2913,9 +2897,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
......
......@@ -335,8 +335,7 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
if (msecs < 99)
return;
if (iwlwifi_mod_params.plcp_check &&
!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
iwl_force_rf_reset(priv, false);
}
......@@ -1120,20 +1119,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
*/
iwl_notification_wait_notify(&priv->notif_wait, pkt);
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/*
* RX data may be forwarded to userspace in one
* of two cases: the user owns the fw through testmode or when
* the user requested to monitor the rx w/o affecting the regular flow.
* In these cases the iwl_test object will handle forwarding the rx
* data to user space.
* Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
* continues.
*/
iwl_test_rx(&priv->tst, rxb);
#endif
if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
......@@ -1146,6 +1131,5 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
}
return err;
}
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <net/net_namespace.h>
#include <linux/netdevice.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <net/netlink.h>
#include "iwl-debug.h"
#include "iwl-trans.h"
#include "dev.h"
#include "agn.h"
#include "iwl-test.h"
#include "iwl-testmode.h"
static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
struct iwl_host_cmd *cmd)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return iwl_dvm_send_cmd(priv, cmd);
}
static bool iwl_testmode_valid_hw_addr(u32 addr)
{
if (iwlagn_hw_valid_rtc_data_addr(addr))
return true;
if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
addr < IWLAGN_RTC_INST_UPPER_BOUND)
return true;
return false;
}
static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return priv->fw->ucode_ver;
}
static struct sk_buff*
iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
}
static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
return cfg80211_testmode_reply(skb);
}
static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
int len)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
GFP_ATOMIC);
}
static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
return cfg80211_testmode_event(skb, GFP_ATOMIC);
}
static struct iwl_test_ops tst_ops = {
.send_cmd = iwl_testmode_send_cmd,
.valid_hw_addr = iwl_testmode_valid_hw_addr,
.get_fw_ver = iwl_testmode_get_fw_ver,
.alloc_reply = iwl_testmode_alloc_reply,
.reply = iwl_testmode_reply,
.alloc_event = iwl_testmode_alloc_event,
.event = iwl_testmode_event,
};
void iwl_testmode_init(struct iwl_priv *priv)
{
iwl_test_init(&priv->tst, priv->trans, &tst_ops);
}
void iwl_testmode_free(struct iwl_priv *priv)
{
iwl_test_free(&priv->tst);
}
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
{
struct iwl_notification_wait calib_wait;
static const u8 calib_complete[] = {
CALIBRATION_COMPLETE_NOTIFICATION
};
int ret;
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
calib_complete, ARRAY_SIZE(calib_complete),
NULL, NULL);
ret = iwl_init_alive_start(priv);
if (ret) {
IWL_ERR(priv, "Fail init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
if (ret)
IWL_ERR(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
iwl_remove_notification(&priv->notif_wait, &calib_wait);
return ret;
}
/*
* This function handles the user application commands for driver.
*
* It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
* handlers respectively.
*
* If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
* value of the actual command execution is replied to the user application.
*
* If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
* is used for carry the message while IWL_TM_ATTR_COMMAND must set to
* IWL_TM_CMD_DEV2APP_SYNC_RSP.
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_trans *trans = priv->trans;
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
u32 inst_size = 0, data_size = 0;
const struct fw_img *img;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
rsp_data_ptr = (unsigned char *)priv->cfg->name;
rsp_data_len = strlen(priv->cfg->name);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
rsp_data_len + 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
rsp_data_len, rsp_data_ptr))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (status)
IWL_ERR(priv, "Error loading init ucode: %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
priv->ucode_loaded = false;
iwl_trans_stop_device(trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (status) {
IWL_ERR(priv,
"Error loading runtime ucode: %d\n", status);
break;
}
status = iwl_alive_start(priv);
if (status)
IWL_ERR(priv,
"Error starting the device: %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
iwl_scan_cancel_timeout(priv, 200);
priv->ucode_loaded = false;
iwl_trans_stop_device(trans);
status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (status) {
IWL_ERR(priv,
"Error loading WOWLAN ucode: %d\n", status);
break;
}
status = iwl_alive_start(priv);
if (status)
IWL_ERR(priv,
"Error starting the device: %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
if (priv->eeprom_blob) {
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
priv->eeprom_blob_size + 20);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
nla_put(skb, IWL_TM_ATTR_EEPROM,
priv->eeprom_blob_size,
priv->eeprom_blob))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n",
status);
} else
return -ENODATA;
break;
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
if (!tb[IWL_TM_ATTR_FIXRATE]) {
IWL_ERR(priv, "Missing fixrate setting\n");
return -ENOMSG;
}
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
if (!skb) {
IWL_ERR(priv, "Memory allocation fail\n");
return -ENOMEM;
}
if (!priv->ucode_loaded) {
IWL_ERR(priv, "No uCode has not been loaded\n");
return -EINVAL;
} else {
img = &priv->fw->img[priv->cur_ucode];
inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
}
if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
goto nla_put_failure;
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_ERR(priv, "Error sending msg : %d\n", status);
break;
default:
IWL_ERR(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
}
return status;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* This function handles the user application switch ucode ownership.
*
* It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
* decide who the current owner of the uCode
*
* If the current owner is OWNERSHIP_TM, then the only host command
* can deliver to uCode is from testmode, all the other host commands
* will dropped.
*
* default driver is the owner of uCode in normal operational mode
*
* @hw: ieee80211_hw object that represents the device
* @tb: gnl message fields from the user space
*/
static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
u8 owner;
if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
IWL_ERR(priv, "Missing ucode owner\n");
return -ENOMSG;
}
owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
if (owner == IWL_OWNERSHIP_DRIVER) {
priv->ucode_owner = owner;
iwl_test_enable_notifications(&priv->tst, false);
} else if (owner == IWL_OWNERSHIP_TM) {
priv->ucode_owner = owner;
iwl_test_enable_notifications(&priv->tst, true);
} else {
IWL_ERR(priv, "Invalid owner\n");
return -EINVAL;
}
return 0;
}
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
* invoke the corresponding handlers.
*
* This function is invoked when there is user space application sending
* gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
* by nl80211.
*
* It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
* dispatching it to the corresponding handler.
*
* If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
* -ENOSYS is replied to the user application if the command is unknown;
* Otherwise, the command is dispatched to the respective handler.
*
* @hw: ieee80211_hw object that represents the device
* @data: pointer to user space message
* @len: length in byte of @data
*/
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct nlattr *tb[IWL_TM_ATTR_MAX];
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
result = iwl_test_parse(&priv->tst, tb, data, len);
if (result)
return result;
/* in case multiple accesses to the device happens */
mutex_lock(&priv->mutex);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
case IWL_TM_CMD_APP2DEV_END_TRACE:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
result = iwl_test_handle_cmd(&priv->tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
case IWL_TM_CMD_APP2DEV_OWNERSHIP:
IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
result = iwl_testmode_ownership(hw, tb);
break;
default:
IWL_ERR(priv, "Unknown testmode command\n");
result = -ENOSYS;
break;
}
mutex_unlock(&priv->mutex);
if (result)
IWL_ERR(priv, "Test cmd failed result=%d\n", result);
return result;
}
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int result;
u32 cmd;
if (cb->args[3]) {
/* offset by 1 since commands start at 0 */
cmd = cb->args[3] - 1;
} else {
struct nlattr *tb[IWL_TM_ATTR_MAX];
result = iwl_test_parse(&priv->tst, tb, data, len);
if (result)
return result;
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
cb->args[3] = cmd + 1;
}
/* in case multiple accesses to the device happens */
mutex_lock(&priv->mutex);
result = iwl_test_dump(&priv->tst, cmd, skb, cb);
mutex_unlock(&priv->mutex);
return result;
}
......@@ -162,18 +162,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
if (priv->tm_fixed_rate) {
/*
* rate overwrite by testmode
* we not only send lq command to change rate
* we also re-enforce per data pkt base.
*/
tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
sizeof(tx_cmd->rate_n_flags));
}
#endif
return;
} else if (ieee80211_is_back_req(fc))
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
......
......@@ -67,16 +67,16 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 6
#define IWL3160_UCODE_API_MAX 6
#define IWL7260_UCODE_API_MAX 7
#define IWL3160_UCODE_API_MAX 7
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 6
#define IWL3160_UCODE_API_OK 6
#define IWL7260_UCODE_API_OK 7
#define IWL3160_UCODE_API_OK 7
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 6
#define IWL3160_UCODE_API_MIN 6
#define IWL7260_UCODE_API_MIN 7
#define IWL3160_UCODE_API_MIN 7
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
......
......@@ -222,6 +222,7 @@ struct iwl_cfg {
const u32 max_inst_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
bool bt_shared_single_ant;
u16 nvm_ver;
u16 nvm_calib_ver;
/* params not likely to change within a device family */
......
......@@ -1111,11 +1111,8 @@ void iwl_drv_stop(struct iwl_drv *drv)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
.restart_fw = true,
.plcp_check = true,
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.bt_ch_announce = true,
.auto_agg = true,
.wd_disable = true,
/* the rest are 0 by default */
};
......@@ -1223,14 +1220,6 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (defualt: 0 dB)");
module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
bool, S_IRUGO);
MODULE_PARM_DESC(bt_ch_inhibition,
"Enable BT channel inhibition (default: enable)");
module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
"Disable stuck queue watchdog timer 0=system default, "
......@@ -1272,8 +1261,3 @@ module_param_named(power_level, iwlwifi_mod_params.power_level,
int, S_IRUGO);
MODULE_PARM_DESC(power_level,
"default power save level (range from 1 - 5, default: 1)");
module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
bool, S_IRUGO);
MODULE_PARM_DESC(auto_agg,
"enable agg w/o check traffic load (default: enable)");
......@@ -93,7 +93,6 @@ enum iwl_power_level {
* use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
......@@ -101,15 +100,12 @@ enum iwl_power_level {
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
* @bt_ch_announce: BT channel inhibition, default = enable
* @auto_agg: enable agg. without check, default = true
*/
struct iwl_mod_params {
int sw_crypto;
unsigned int disable_11n;
int amsdu_size_8K;
bool restart_fw;
bool plcp_check;
int wd_disable;
bool bt_coex_active;
int led_mode;
......@@ -119,8 +115,6 @@ struct iwl_mod_params {
u32 debug_level;
#endif
int ant_coupling;
bool bt_ch_announce;
bool auto_agg;
char *nvm_file;
};
......
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <linux/export.h>
#include <net/netlink.h>
#include "iwl-drv.h"
#include "iwl-io.h"
#include "iwl-fh.h"
#include "iwl-prph.h"
#include "iwl-trans.h"
#include "iwl-test.h"
#include "iwl-csr.h"
#include "iwl-testmode.h"
/*
* Periphery registers absolute lower bound. This is used in order to
* differentiate registery access through HBUS_TARG_PRPH_* and
* HBUS_TARG_MEM_* accesses.
*/
#define IWL_ABS_PRPH_START (0xA00000)
/*
* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
* through the NL80211_CMD_TESTMODE channel regulated by nl80211.
* See iwl-testmode.h
*/
static
struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
[IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
[IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
[IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
[IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
[IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
[IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
};
static inline void iwl_test_trace_clear(struct iwl_test *tst)
{
memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
}
static void iwl_test_trace_stop(struct iwl_test *tst)
{
if (!tst->trace.enabled)
return;
if (tst->trace.cpu_addr && tst->trace.dma_addr)
dma_free_coherent(tst->trans->dev,
tst->trace.tsize,
tst->trace.cpu_addr,
tst->trace.dma_addr);
iwl_test_trace_clear(tst);
}
static inline void iwl_test_mem_clear(struct iwl_test *tst)
{
memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
}
static inline void iwl_test_mem_stop(struct iwl_test *tst)
{
if (!tst->mem.in_read)
return;
iwl_test_mem_clear(tst);
}
/*
* Initializes the test object
* During the lifetime of the test object it is assumed that the transport is
* started. The test object should be stopped before the transport is stopped.
*/
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
struct iwl_test_ops *ops)
{
tst->trans = trans;
tst->ops = ops;
iwl_test_trace_clear(tst);
iwl_test_mem_clear(tst);
}
EXPORT_SYMBOL_GPL(iwl_test_init);
/*
* Stop the test object
*/
void iwl_test_free(struct iwl_test *tst)
{
iwl_test_mem_stop(tst);
iwl_test_trace_stop(tst);
}
EXPORT_SYMBOL_GPL(iwl_test_free);
static inline int iwl_test_send_cmd(struct iwl_test *tst,
struct iwl_host_cmd *cmd)
{
return tst->ops->send_cmd(tst->trans->op_mode, cmd);
}
static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
{
return tst->ops->valid_hw_addr(addr);
}
static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
{
return tst->ops->get_fw_ver(tst->trans->op_mode);
}
static inline struct sk_buff*
iwl_test_alloc_reply(struct iwl_test *tst, int len)
{
return tst->ops->alloc_reply(tst->trans->op_mode, len);
}
static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
{
return tst->ops->reply(tst->trans->op_mode, skb);
}
static inline struct sk_buff*
iwl_test_alloc_event(struct iwl_test *tst, int len)
{
return tst->ops->alloc_event(tst->trans->op_mode, len);
}
static inline void
iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
{
return tst->ops->event(tst->trans->op_mode, skb);
}
/*
* This function handles the user application commands to the fw. The fw
* commands are sent in a synchronuous manner. In case that the user requested
* to get commands response, it is send to the user.
*/
static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
{
struct iwl_host_cmd cmd;
struct iwl_rx_packet *pkt;
struct sk_buff *skb;
void *reply_buf;
u32 reply_len;
int ret;
bool cmd_want_skb;
memset(&cmd, 0, sizeof(struct iwl_host_cmd));
if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
!tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
return -ENOMSG;
}
cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
if (cmd_want_skb)
cmd.flags |= CMD_WANT_SKB;
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
cmd.id, cmd.flags, cmd.len[0]);
ret = iwl_test_send_cmd(tst, &cmd);
if (ret) {
IWL_ERR(tst->trans, "Failed to send hcmd\n");
return ret;
}
if (!cmd_want_skb)
return ret;
/* Handling return of SKB to the user */
pkt = cmd.resp_pkt;
if (!pkt) {
IWL_ERR(tst->trans, "HCMD received a null response packet\n");
return ret;
}
reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
skb = iwl_test_alloc_reply(tst, reply_len + 20);
reply_buf = kmemdup(&pkt->hdr, reply_len, GFP_KERNEL);
if (!skb || !reply_buf) {
kfree_skb(skb);
kfree(reply_buf);
return -ENOMEM;
}
/* The reply is in a page, that we cannot send to user space. */
iwl_free_resp(&cmd);
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
goto nla_put_failure;
return iwl_test_reply(tst, skb);
nla_put_failure:
IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
kfree(reply_buf);
kfree_skb(skb);
return -ENOMSG;
}
/*
* Handles the user application commands for register access.
*/
static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
{
u32 ofs, val32, cmd;
u8 val8;
struct sk_buff *skb;
int status = 0;
struct iwl_trans *trans = tst->trans;
if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
IWL_ERR(trans, "Missing reg offset\n");
return -ENOMSG;
}
ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
/*
* Allow access only to FH/CSR/HBUS in direct mode.
* Since we don't have the upper bounds for the CSR and HBUS segments,
* we will use only the upper bound of FH for sanity check.
*/
if (ofs >= FH_MEM_UPPER_BOUND) {
IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
FH_MEM_UPPER_BOUND);
return -EINVAL;
}
switch (cmd) {
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read_direct32(tst->trans, ofs);
IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(trans, "Error sending msg : %d\n", status);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_ERR(trans, "Missing value to write\n");
return -ENOMSG;
} else {
val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
iwl_write_direct32(tst->trans, ofs, val32);
}
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_ERR(trans, "Missing value to write\n");
return -ENOMSG;
} else {
val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
iwl_write8(tst->trans, ofs, val8);
}
break;
default:
IWL_ERR(trans, "Unknown test register cmd ID\n");
return -ENOMSG;
}
return status;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Handles the request to start FW tracing. Allocates of the trace buffer
* and sends a reply to user space with the address of the allocated buffer.
*/
static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
{
struct sk_buff *skb;
int status = 0;
if (tst->trace.enabled)
return -EBUSY;
if (!tb[IWL_TM_ATTR_TRACE_SIZE])
tst->trace.size = TRACE_BUFF_SIZE_DEF;
else
tst->trace.size =
nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
if (!tst->trace.size)
return -EINVAL;
if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
tst->trace.size > TRACE_BUFF_SIZE_MAX)
return -EINVAL;
tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
tst->trace.tsize,
&tst->trace.dma_addr,
GFP_KERNEL);
if (!tst->trace.cpu_addr)
return -ENOMEM;
tst->trace.enabled = true;
tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
iwl_test_trace_stop(tst);
return -ENOMEM;
}
if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
sizeof(tst->trace.dma_addr),
(u64 *)&tst->trace.dma_addr))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
DUMP_CHUNK_SIZE);
return status;
nla_put_failure:
kfree_skb(skb);
if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
iwl_test_trace_stop(tst);
return -EMSGSIZE;
}
/*
* Handles indirect read from the periphery or the SRAM. The read is performed
* to a temporary buffer. The user space application should later issue a dump
*/
static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
{
struct iwl_trans *trans = tst->trans;
unsigned long flags;
int i;
if (size & 0x3)
return -EINVAL;
tst->mem.size = size;
tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
if (tst->mem.addr == NULL)
return -ENOMEM;
/* Hard-coded periphery absolute address */
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
return -EIO;
}
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24));
for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
iwl_trans_release_nic_access(trans, &flags);
} else { /* target memory (SRAM) */
iwl_trans_read_mem(trans, addr, tst->mem.addr,
tst->mem.size / 4);
}
tst->mem.nchunks =
DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
tst->mem.in_read = true;
return 0;
}
/*
* Handles indirect write to the periphery or SRAM. The is performed to a
* temporary buffer.
*/
static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
u32 size, unsigned char *buf)
{
struct iwl_trans *trans = tst->trans;
u32 val, i;
unsigned long flags;
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
/* Periphery writes can be 1-3 bytes long, or DWORDs */
if (size < 4) {
memcpy(&val, buf, size);
if (!iwl_trans_grab_nic_access(trans, false, &flags))
return -EIO;
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
iwl_trans_release_nic_access(trans, &flags);
} else {
if (size % 4)
return -EINVAL;
for (i = 0; i < size; i += 4)
iwl_write_prph(trans, addr+i,
*(u32 *)(buf+i));
}
} else if (iwl_test_valid_hw_addr(tst, addr)) {
iwl_trans_write_mem(trans, addr, buf, size / 4);
} else {
return -EINVAL;
}
return 0;
}
/*
* Handles the user application commands for indirect read/write
* to/from the periphery or the SRAM.
*/
static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
{
u32 addr, size, cmd;
unsigned char *buf;
/* Both read and write should be blocked, for atomicity */
if (tst->mem.in_read)
return -EBUSY;
cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
IWL_ERR(tst->trans, "Error finding memory offset address\n");
return -ENOMSG;
}
addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
IWL_ERR(tst->trans, "Error finding size for memory reading\n");
return -ENOMSG;
}
size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
return iwl_test_indirect_read(tst, addr, size);
} else {
if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
return -EINVAL;
buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
return iwl_test_indirect_write(tst, addr, size, buf);
}
}
/*
* Enable notifications to user space
*/
static int iwl_test_notifications(struct iwl_test *tst,
struct nlattr **tb)
{
tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
return 0;
}
/*
* Handles the request to get the device id
*/
static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
{
u32 devid = tst->trans->hw_id;
struct sk_buff *skb;
int status;
IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
return 0;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Handles the request to get the FW version
*/
static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
{
struct sk_buff *skb;
int status;
u32 ver = iwl_test_fw_ver(tst);
IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
skb = iwl_test_alloc_reply(tst, 20);
if (!skb) {
IWL_ERR(tst->trans, "Memory allocation fail\n");
return -ENOMEM;
}
if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
goto nla_put_failure;
status = iwl_test_reply(tst, skb);
if (status < 0)
IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
return 0;
nla_put_failure:
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
*/
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
void *data, int len)
{
int result;
result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
iwl_testmode_gnl_msg_policy);
if (result) {
IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
return result;
}
/* IWL_TM_ATTR_COMMAND is absolutely mandatory */
if (!tb[IWL_TM_ATTR_COMMAND]) {
IWL_ERR(tst->trans, "Missing testmode command type\n");
return -ENOMSG;
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_test_parse);
/*
* Handle test commands.
* Returns 1 for unknown commands (not handled by the test object); negative
* value in case of error.
*/
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
{
int result;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_UCODE:
IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
result = iwl_test_fw_cmd(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
result = iwl_test_reg(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
result = iwl_test_trace_begin(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
iwl_test_trace_stop(tst);
result = 0;
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
result = iwl_test_indirect_mem(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
result = iwl_test_notifications(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
result = iwl_test_get_fw_ver(tst, tb);
break;
case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
result = iwl_test_get_dev_id(tst, tb);
break;
default:
IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
result = 1;
break;
}
return result;
}
IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx, length;
if (!tst->trace.enabled || !tst->trace.trace_addr)
return -EFAULT;
idx = cb->args[4];
if (idx >= tst->trace.nchunks)
return -ENOENT;
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == tst->trace.nchunks) &&
(tst->trace.size % DUMP_CHUNK_SIZE))
length = tst->trace.size %
DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
cb->args[4] = ++idx;
return 0;
nla_put_failure:
return -ENOBUFS;
}
static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
struct netlink_callback *cb)
{
int idx, length;
if (!tst->mem.in_read)
return -EFAULT;
idx = cb->args[4];
if (idx >= tst->mem.nchunks) {
iwl_test_mem_stop(tst);
return -ENOENT;
}
length = DUMP_CHUNK_SIZE;
if (((idx + 1) == tst->mem.nchunks) &&
(tst->mem.size % DUMP_CHUNK_SIZE))
length = tst->mem.size % DUMP_CHUNK_SIZE;
if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
goto nla_put_failure;
cb->args[4] = ++idx;
return 0;
nla_put_failure:
return -ENOBUFS;
}
/*
* Handle dump commands.
* Returns 1 for unknown commands (not handled by the test object); negative
* value in case of error.
*/
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
struct netlink_callback *cb)
{
int result;
switch (cmd) {
case IWL_TM_CMD_APP2DEV_READ_TRACE:
IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
result = iwl_test_trace_dump(tst, skb, cb);
break;
case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
result = iwl_test_buffer_dump(tst, skb, cb);
break;
default:
result = 1;
break;
}
return result;
}
IWL_EXPORT_SYMBOL(iwl_test_dump);
/*
* Multicast a spontaneous messages from the device to the user space.
*/
static void iwl_test_send_rx(struct iwl_test *tst,
struct iwl_rx_cmd_buffer *rxb)
{
struct sk_buff *skb;
struct iwl_rx_packet *data;
int length;
data = rxb_addr(rxb);
length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
/* the length doesn't include len_n_flags field, so add it manually */
length += sizeof(__le32);
skb = iwl_test_alloc_event(tst, length + 20);
if (skb == NULL) {
IWL_ERR(tst->trans, "Out of memory for message to user\n");
return;
}
if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
goto nla_put_failure;
iwl_test_event(tst, skb);
return;
nla_put_failure:
kfree_skb(skb);
IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
}
/*
* Called whenever a Rx frames is recevied from the device. If notifications to
* the user space are requested, sends the frames to the user.
*/
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
{
if (tst->notify)
iwl_test_send_rx(tst, rxb);
}
IWL_EXPORT_SYMBOL(iwl_test_rx);
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __IWL_TEST_H__
#define __IWL_TEST_H__
#include <linux/types.h>
#include "iwl-trans.h"
struct iwl_test_trace {
u32 size;
u32 tsize;
u32 nchunks;
u8 *cpu_addr;
u8 *trace_addr;
dma_addr_t dma_addr;
bool enabled;
};
struct iwl_test_mem {
u32 size;
u32 nchunks;
u8 *addr;
bool in_read;
};
/*
* struct iwl_test_ops: callback to the op mode
*
* The structure defines the callbacks that the op_mode should handle,
* inorder to handle logic that is out of the scope of iwl_test. The
* op_mode must set all the callbacks.
* @send_cmd: handler that is used by the test object to request the
* op_mode to send a command to the fw.
*
* @valid_hw_addr: handler that is used by the test object to request the
* op_mode to check if the given address is a valid address.
*
* @get_fw_ver: handler used to get the FW version.
*
* @alloc_reply: handler used by the test object to request the op_mode
* to allocate an skb for sending a reply to the user, and initialize
* the skb. It is assumed that the test object only fills the required
* attributes.
*
* @reply: handler used by the test object to request the op_mode to reply
* to a request. The skb is an skb previously allocated by the the
* alloc_reply callback.
I
* @alloc_event: handler used by the test object to request the op_mode
* to allocate an skb for sending an event, and initialize
* the skb. It is assumed that the test object only fills the required
* attributes.
*
* @reply: handler used by the test object to request the op_mode to send
* an event. The skb is an skb previously allocated by the the
* alloc_event callback.
*/
struct iwl_test_ops {
int (*send_cmd)(struct iwl_op_mode *op_modes,
struct iwl_host_cmd *cmd);
bool (*valid_hw_addr)(u32 addr);
u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
};
struct iwl_test {
struct iwl_trans *trans;
struct iwl_test_ops *ops;
struct iwl_test_trace trace;
struct iwl_test_mem mem;
bool notify;
};
void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
struct iwl_test_ops *ops);
void iwl_test_free(struct iwl_test *tst);
int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
void *data, int len);
int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
struct netlink_callback *cb);
void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
static inline void iwl_test_enable_notifications(struct iwl_test *tst,
bool enable)
{
tst->notify = enable;
}
#endif
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __IWL_TESTMODE_H__
#define __IWL_TESTMODE_H__
#include <linux/types.h>
/*
* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
* from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
* The command ID is carried with IWL_TM_ATTR_COMMAND.
*
* @IWL_TM_CMD_APP2DEV_UCODE:
* commands from user application to the uCode,
* the actual uCode host command ID is carried with
* IWL_TM_ATTR_UCODE_CMD_ID
*
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
* @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
* commands from user applicaiton to access register
*
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
* @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image
* @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration
* @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image
* @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data
* @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS
* commands fom user space for pure driver level operations
*
* @IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
* @IWL_TM_CMD_APP2DEV_END_TRACE:
* @IWL_TM_CMD_APP2DEV_READ_TRACE:
* commands fom user space for uCode trace operations
*
* @IWL_TM_CMD_DEV2APP_SYNC_RSP:
* commands from kernel space to carry the synchronous response
* to user application
* @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
* commands from kernel space to multicast the spontaneous messages
* to user application, or reply of host commands
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
*
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
* commands from user application to own change the ownership of the uCode
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
*
* @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Wake On Wireless LAN uCode image
* @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
* @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
* @IWL_TM_CMD_APP2DEV_GET_FW_INFO:
* retrieve information of existing loaded uCode image
*
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
* @IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
* Commands to read/write data from periphery or SRAM memory ranges.
* Fore reading, a READ command is sent from the userspace and the data
* is returned when the user calls a DUMP command.
* For writing, only a WRITE command is used.
* @IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
* Command to enable/disable notifications (currently RX packets) from the
* driver to userspace.
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8,
IWL_TM_CMD_APP2DEV_GET_EEPROM = 9,
IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10,
IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11,
IWL_TM_CMD_APP2DEV_END_TRACE = 12,
IWL_TM_CMD_APP2DEV_READ_TRACE = 13,
IWL_TM_CMD_DEV2APP_SYNC_RSP = 14,
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
RESERVED_18 = 18,
RESERVED_19 = 19,
RESERVED_20 = 20,
RESERVED_21 = 21,
IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
IWL_TM_CMD_APP2DEV_GET_FW_INFO = 25,
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ = 26,
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP = 27,
IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE = 28,
IWL_TM_CMD_APP2DEV_NOTIFICATIONS = 29,
IWL_TM_CMD_MAX = 30,
};
/*
* Atrribute filed in testmode command
* See enum iwl_tm_cmd_t.
*
* @IWL_TM_ATTR_NOT_APPLICABLE:
* The attribute is not applicable or invalid
* @IWL_TM_ATTR_COMMAND:
* From user space to kernel space:
* the command either destines to ucode, driver, or register;
* From kernel space to user space:
* the command either carries synchronous response,
* or the spontaneous message multicast from the device;
*
* @IWL_TM_ATTR_UCODE_CMD_ID:
* @IWL_TM_ATTR_UCODE_CMD_DATA:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
* The mandatory fields are :
* IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
* IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
* to the ucode
*
* @IWL_TM_ATTR_REG_OFFSET:
* @IWL_TM_ATTR_REG_VALUE8:
* @IWL_TM_ATTR_REG_VALUE32:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
* The mandatory fields are:
* IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
* IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value
*
* @IWL_TM_ATTR_SYNC_RSP:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
* The mandatory fields are:
* IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
* application command
*
* @IWL_TM_ATTR_UCODE_RX_PKT:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
* application
*
* @IWL_TM_ATTR_EEPROM:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
* The mandatory fields are:
* IWL_TM_ATTR_EEPROM for the data content responging to the user
* application
*
* @IWL_TM_ATTR_TRACE_ADDR:
* @IWL_TM_ATTR_TRACE_SIZE:
* @IWL_TM_ATTR_TRACE_DUMP:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
* The mandatory fields are:
* IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
* IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size
* IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump
*
* @IWL_TM_ATTR_FIXRATE:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
* The mandatory fields are:
* IWL_TM_ATTR_FIXRATE for the fixed rate
*
* @IWL_TM_ATTR_UCODE_OWNER:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
*
* @IWL_TM_ATTR_MEM_ADDR:
* @IWL_TM_ATTR_BUFFER_SIZE:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
* or IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE.
* The mandatory fields are:
* IWL_TM_ATTR_MEM_ADDR for the address in SRAM/periphery to read/write
* IWL_TM_ATTR_BUFFER_SIZE for the buffer size of data to read/write.
*
* @IWL_TM_ATTR_BUFFER_DUMP:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP,
* IWL_TM_ATTR_BUFFER_DUMP is used for the data that was read.
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE,
* this attribute contains the data to write.
*
* @IWL_TM_ATTR_FW_VERSION:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
* IWL_TM_ATTR_FW_VERSION for the uCode version
*
* @IWL_TM_ATTR_DEVICE_ID:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
* IWL_TM_ATTR_DEVICE_ID for the device ID information
*
* @IWL_TM_ATTR_FW_TYPE:
* @IWL_TM_ATTR_FW_INST_SIZE:
* @IWL_TM_ATTR_FW_DATA_SIZE:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_INFO,
* The mandatory fields are:
* IWL_TM_ATTR_FW_TYPE for the uCode type (INIT/RUNTIME/...)
* IWL_TM_ATTR_FW_INST_SIZE for the size of instruction section
* IWL_TM_ATTR_FW_DATA_SIZE for the size of data section
*
* @IWL_TM_ATTR_UCODE_CMD_SKB:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE this flag
* indicates that the user wants to receive the response of the command
* in a reply SKB. If it's not present, the response is not returned.
* @IWL_TM_ATTR_ENABLE_NOTIFICATIONS:
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_NOTIFICATIONS, this
* flag enables (if present) or disables (if not) the forwarding
* to userspace.
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
IWL_TM_ATTR_COMMAND = 1,
IWL_TM_ATTR_UCODE_CMD_ID = 2,
IWL_TM_ATTR_UCODE_CMD_DATA = 3,
IWL_TM_ATTR_REG_OFFSET = 4,
IWL_TM_ATTR_REG_VALUE8 = 5,
IWL_TM_ATTR_REG_VALUE32 = 6,
IWL_TM_ATTR_SYNC_RSP = 7,
IWL_TM_ATTR_UCODE_RX_PKT = 8,
IWL_TM_ATTR_EEPROM = 9,
IWL_TM_ATTR_TRACE_ADDR = 10,
IWL_TM_ATTR_TRACE_SIZE = 11,
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
IWL_TM_ATTR_MEM_ADDR = 15,
IWL_TM_ATTR_BUFFER_SIZE = 16,
IWL_TM_ATTR_BUFFER_DUMP = 17,
IWL_TM_ATTR_FW_VERSION = 18,
IWL_TM_ATTR_DEVICE_ID = 19,
IWL_TM_ATTR_FW_TYPE = 20,
IWL_TM_ATTR_FW_INST_SIZE = 21,
IWL_TM_ATTR_FW_DATA_SIZE = 22,
IWL_TM_ATTR_UCODE_CMD_SKB = 23,
IWL_TM_ATTR_ENABLE_NOTIFICATION = 24,
IWL_TM_ATTR_MAX = 25,
};
/* uCode trace buffer */
#define TRACE_BUFF_SIZE_MAX 0x200000
#define TRACE_BUFF_SIZE_MIN 0x20000
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
/* Maximum data size of each dump it packet */
#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
/* Address offset of data segment in SRAM */
#define SRAM_DATA_SEG_OFFSET 0x800000
#endif
......@@ -183,14 +183,12 @@ struct iwl_rx_packet {
* @CMD_ASYNC: Return right away and don't want for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_ON_DEMAND = BIT(3),
};
#define DEF_CMD_PAYLOAD_SIZE 320
......
......@@ -202,6 +202,22 @@ static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
cpu_to_le32(0x00000000),
};
/* single shared antenna */
static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
cpu_to_le32(0x40000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0x44000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0x40000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0x44000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0xC0004000),
cpu_to_le32(0xF0005000),
cpu_to_le32(0xC0004000),
cpu_to_le32(0xF0005000),
};
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
struct iwl_bt_coex_cmd cmd = {
......@@ -225,7 +241,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_REDUCED_TX_POWER |
BT_VALID_LUT);
if (is_loose_coex())
if (mvm->cfg->bt_shared_single_ant)
memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
sizeof(iwl_single_shared_ant_lookup));
else if (is_loose_coex())
memcpy(&cmd.decision_lut, iwl_loose_lookup,
sizeof(iwl_tight_lookup));
else
......
......@@ -1026,6 +1026,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->d3_wake_sysassert)
d3_cfg_cmd_data.wakeup_flags |=
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
......
......@@ -344,6 +344,13 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
dbgfs_pm->disable_power_off = val;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
break;
case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
dbgfs_pm->lprx_rssi_threshold = val;
break;
}
}
......@@ -387,6 +394,17 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
if (sscanf(buf + 18, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
} else if (!strncmp("lprx=", buf, 5)) {
if (sscanf(buf + 5, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_ENA;
} else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
if (sscanf(buf + 20, "%d", &val) != 1)
return -EINVAL;
if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
POWER_LPRX_RSSI_THRESHOLD_MIN)
return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
} else {
return -EINVAL;
}
......@@ -421,7 +439,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
le32_to_cpu(cmd.skip_dtim_periods));
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = %d\n",
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
le16_to_cpu(cmd.flags));
pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
cmd.keep_alive_seconds);
......@@ -435,6 +453,10 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
le32_to_cpu(cmd.rx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
le32_to_cpu(cmd.tx_data_timeout));
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
pos += scnprintf(buf+pos, bufsz-pos,
"lprx_rssi_threshold = %d\n",
le32_to_cpu(cmd.lprx_rssi_threshold));
}
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
......@@ -939,6 +961,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
if (!debugfs_create_bool("d3_wake_sysassert", S_IRUSR | S_IWUSR,
mvm->debugfs_dir, &mvm->d3_wake_sysassert))
goto err;
#endif
/*
......
......@@ -66,6 +66,11 @@
/* Power Management Commands, Responses, Notifications */
/* Radio LP RX Energy Threshold measured in dBm */
#define POWER_LPRX_RSSI_THRESHOLD 75
#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
/**
* enum iwl_scan_flags - masks for power table command flags
* @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
......
......@@ -865,6 +865,30 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
return ret;
}
struct iwl_mvm_mac_ap_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
u32 beacon_device_ts;
u16 beacon_int;
};
/* Find the beacon_device_ts and beacon_int for a managed interface */
static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_mac_ap_iterator_data *data = _data;
if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
return;
/* Station client has higher priority over P2P client*/
if (vif->p2p && data->beacon_device_ts)
return;
data->beacon_device_ts = vif->bss_conf.sync_device_ts;
data->beacon_int = vif->bss_conf.beacon_int;
}
/*
* Fill the specific data for mac context of type AP of P2P GO
*/
......@@ -874,6 +898,11 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
bool add)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_mac_ap_iterator_data data = {
.mvm = mvm,
.vif = vif,
.beacon_device_ts = 0
};
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
......@@ -887,16 +916,33 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
/*
* Only read the system time when the MAC is being added, when we
* Only set the beacon time when the MAC is being added, when we
* just modify the MAC then we should keep the time -- the firmware
* can otherwise have a "jumping" TBTT.
*/
if (add)
if (add) {
/*
* If there is a station/P2P client interface which is
* associated, set the AP's TBTT far enough from the station's
* TBTT. Otherwise, set it to the current system time
*/
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
u32 rand = (prandom_u32() % (80 - 20)) + 20;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
} else {
mvmvif->ap_beacon_time =
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
iwl_read_prph(mvm->trans,
DEVICE_SYSTEM_TIME_REG);
}
}
ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
ctxt_ap->beacon_tsf = 0; /* unused */
/* TODO: Assume that the beacon id == mac context id */
......
......@@ -73,7 +73,6 @@
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
#include "iwl-test.h"
#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
......@@ -159,6 +158,8 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
};
struct iwl_dbgfs_pm {
......@@ -168,6 +169,8 @@ struct iwl_dbgfs_pm {
bool skip_over_dtim;
u8 skip_dtim_periods;
bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
int mask;
};
......@@ -353,12 +356,14 @@ struct iwl_tt_params {
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
struct iwl_mvm {
......@@ -461,6 +466,7 @@ struct iwl_mvm {
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
bool d3_test_active;
bool store_d3_resume_sram;
void *d3_resume_sram;
......
......@@ -137,11 +137,12 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
cmd->lprx_rssi_threshold);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
le32_to_cpu(cmd->skip_dtim_periods));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
le32_to_cpu(cmd->lprx_rssi_threshold));
}
}
......@@ -181,6 +182,14 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
if (vif->bss_conf.beacon_rate &&
(vif->bss_conf.beacon_rate->bitrate == 10 ||
vif->bss_conf.beacon_rate->bitrate == 60)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
cmd->lprx_rssi_threshold =
cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
}
dtimper = hw->conf.ps_dtim_period ?: 1;
/* Check if radar detection is required on current channel */
......@@ -236,6 +245,15 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
cmd->skip_dtim_periods =
cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
if (mvmvif->dbgfs_pm.lprx_ena)
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
else
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
cmd->lprx_rssi_threshold =
cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
......
......@@ -412,7 +412,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
return ret;
}
if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
......@@ -426,11 +425,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
tid);
ieee80211_stop_tx_ba_session(sta, tid);
}
} else {
IWL_DEBUG_HT(mvm,
"Aggregation not enabled for tid %d because load = %u\n",
tid, load);
}
return ret;
}
......
......@@ -427,6 +427,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
const struct iwl_tt_params *params = mvm->thermal_throttle.params;
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
s32 temperature = mvm->temperature;
bool throttle_enable = false;
int i;
u32 tx_backoff;
......@@ -445,6 +446,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_tt_smps_iterator, mvm);
throttle_enable = true;
} else if (tt->dynamic_smps &&
temperature <= params->dynamic_smps_exit) {
IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
......@@ -456,11 +458,13 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_tx_protection) {
if (temperature >= params->tx_protection_entry)
if (temperature >= params->tx_protection_entry) {
iwl_mvm_tt_tx_protection(mvm, true);
else if (temperature <= params->tx_protection_exit)
throttle_enable = true;
} else if (temperature <= params->tx_protection_exit) {
iwl_mvm_tt_tx_protection(mvm, false);
}
}
if (params->support_tx_backoff) {
tx_backoff = 0;
......@@ -469,9 +473,22 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
break;
tx_backoff = params->tx_backoff[i].backoff;
}
if (tx_backoff != 0)
throttle_enable = true;
if (tt->tx_backoff != tx_backoff)
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
}
if (!tt->throttle && throttle_enable) {
IWL_WARN(mvm,
"Due to high temperature thermal throttling initiated\n");
tt->throttle = true;
} else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
temperature <= params->tx_protection_exit) {
IWL_WARN(mvm,
"Temperature is back to normal thermal throttling stopped\n");
tt->throttle = false;
}
}
static const struct iwl_tt_params iwl7000_tt_params = {
......@@ -502,6 +519,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
tt->params = &iwl7000_tt_params;
tt->throttle = false;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
......
......@@ -110,9 +110,10 @@
/*
* iwl_rxq_space - Return number of free slots available in queue.
*/
static int iwl_rxq_space(const struct iwl_rxq *q)
static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
int s = q->read - q->write;
int s = rxq->read - rxq->write;
if (s <= 0)
s += RX_QUEUE_SIZE;
/* keep some buffer to not confuse full and empty queue */
......@@ -143,21 +144,22 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&q->lock, flags);
spin_lock_irqsave(&rxq->lock, flags);
if (q->need_update == 0)
if (rxq->need_update == 0)
goto exit_unlock;
if (trans->cfg->base_params->shadow_reg_enable) {
/* shadow register enabled */
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
rxq->write_actual = (rxq->write & ~0x7);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
} else {
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -175,22 +177,22 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
goto exit_unlock;
}
q->write_actual = (q->write & ~0x7);
rxq->write_actual = (rxq->write & ~0x7);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
q->write_actual);
rxq->write_actual);
/* Else device is assumed to be awake */
} else {
/* Device expects a multiple of 8 */
q->write_actual = (q->write & ~0x7);
rxq->write_actual = (rxq->write & ~0x7);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
q->write_actual);
rxq->write_actual);
}
}
q->need_update = 0;
rxq->need_update = 0;
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&rxq->lock, flags);
}
/*
......@@ -355,20 +357,17 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i;
/* Fill the rx_used queue with _all_ of the Rx buffers */
lockdep_assert_held(&rxq->lock);
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
trans_pcie->rx_page_order);
__free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
}
/*
......@@ -491,6 +490,20 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
int i;
lockdep_assert_held(&rxq->lock);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -505,13 +518,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
}
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
INIT_WORK(&trans_pcie->rx_replenish,
iwl_pcie_rx_replenish_work);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
......@@ -520,7 +532,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
rxq->free_count = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock_irqrestore(&rxq->lock, flags);
......
......@@ -838,8 +838,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
{
int ret;
struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
......@@ -875,7 +876,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
val);
spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
return false;
}
}
......@@ -884,22 +885,22 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
*/
__release(&pcie_trans->reg_lock);
__release(&trans_pcie->reg_lock);
return true;
}
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
unsigned long *flags)
{
struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&pcie_trans->reg_lock);
lockdep_assert_held(&trans_pcie->reg_lock);
/*
* Fool sparse by faking we acquiring the lock - sparse will
* track nic_access anyway.
*/
__acquire(&pcie_trans->reg_lock);
__acquire(&trans_pcie->reg_lock);
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
......@@ -910,7 +911,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment