Commit c745f722 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2019-04-29' of...

Merge tag 'iwlwifi-next-for-kalle-2019-04-29' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Fourth batch of patches intended for v5.2

* Fix a bug we introduced in the RX path in a previous patch;
* Add command version parsing from the FW TLVs;
* Some fixes and improvements in the new debugging framework;
* Bump the FW API supported for 22000 series;
* Small improvement in FTM;
* Some RF-Kill interrupt handling fixes;
* Support for a new WoWLAN patterns FW API;
* Other small fixes and improvements;
parents 9ef77fbe a0eaead4
......@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 47
#define IWL_22000_UCODE_API_MAX 48
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
......
......@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -214,7 +214,7 @@ struct iwl_proto_offload_cmd_v3_large {
#define IWL_WOWLAN_MIN_PATTERN_LEN 16
#define IWL_WOWLAN_MAX_PATTERN_LEN 128
struct iwl_wowlan_pattern {
struct iwl_wowlan_pattern_v1 {
u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
u8 mask_size;
......@@ -227,7 +227,7 @@ struct iwl_wowlan_pattern {
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd {
struct iwl_wowlan_patterns_cmd_v1 {
/**
* @n_patterns: number of patterns
*/
......@@ -236,9 +236,129 @@ struct iwl_wowlan_patterns_cmd {
/**
* @patterns: the patterns, array length in @n_patterns
*/
struct iwl_wowlan_pattern patterns[];
struct iwl_wowlan_pattern_v1 patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
#define IPV4_ADDR_SIZE 4
#define IPV6_ADDR_SIZE 16
enum iwl_wowlan_pattern_type {
WOWLAN_PATTERN_TYPE_BITMASK,
WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN,
WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN,
WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD,
WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD,
}; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */
/**
* struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data
*/
struct iwl_wowlan_ipv4_tcp_syn {
/**
* @src_addr: source IP address to match
*/
u8 src_addr[IPV4_ADDR_SIZE];
/**
* @dst_addr: destination IP address to match
*/
u8 dst_addr[IPV4_ADDR_SIZE];
/**
* @src_port: source TCP port to match
*/
__le16 src_port;
/**
* @dst_port: destination TCP port to match
*/
__le16 dst_port;
} __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */
/**
* struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data
*/
struct iwl_wowlan_ipv6_tcp_syn {
/**
* @src_addr: source IP address to match
*/
u8 src_addr[IPV6_ADDR_SIZE];
/**
* @dst_addr: destination IP address to match
*/
u8 dst_addr[IPV6_ADDR_SIZE];
/**
* @src_port: source TCP port to match
*/
__le16 src_port;
/**
* @dst_port: destination TCP port to match
*/
__le16 dst_port;
} __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */
/**
* union iwl_wowlan_pattern_data - Data for the different pattern types
*
* If wildcard addresses/ports are to be used, the union can be left
* undefined.
*/
union iwl_wowlan_pattern_data {
/**
* @bitmask: bitmask pattern data
*/
struct iwl_wowlan_pattern_v1 bitmask;
/**
* @ipv4_tcp_syn: IPv4 TCP SYN pattern data
*/
struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn;
/**
* @ipv6_tcp_syn: IPv6 TCP SYN pattern data
*/
struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn;
}; /* WOWLAN_PATTERN_API_U_VER_1 */
/**
* struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns
*/
struct iwl_wowlan_pattern_v2 {
/**
* @pattern_type: defines the struct type to be used in the union
*/
u8 pattern_type;
/**
* @reserved: reserved for alignment
*/
u8 reserved[3];
/**
* @u: the union containing the match data, or undefined for
* wildcard matches
*/
union iwl_wowlan_pattern_data u;
} __packed; /* WOWLAN_PATTERN_API_S_VER_2 */
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command
*/
struct iwl_wowlan_patterns_cmd {
/**
* @n_patterns: number of patterns
*/
__le32 n_patterns;
/**
* @patterns: the patterns, array length in @n_patterns
*/
struct iwl_wowlan_pattern_v2 patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
......@@ -383,7 +503,11 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17),
IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18),
IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19),
IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20),
IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
struct iwl_wowlan_gtk_status_v1 {
......
......@@ -473,6 +473,8 @@ enum iwl_fw_ini_debug_flow {
* @IWL_FW_INI_REGION_CSR: CSR registers
* @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
* @IWL_FW_INI_REGION_DHC: dhc response to dump
* @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
* @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
......@@ -490,6 +492,8 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_CSR,
IWL_FW_INI_REGION_NOTIFICATION,
IWL_FW_INI_REGION_DHC,
IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
IWL_FW_INI_REGION_NUM
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
......
......@@ -917,11 +917,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
dump_info->device_family =
fwrt->trans->cfg->device_family ==
IWL_DEVICE_FAMILY_7000 ?
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
dump_info->hw_type =
cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
dump_info->hw_step =
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
......@@ -1754,12 +1751,18 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
continue;
}
/* currently the driver supports always on domain only */
if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
continue;
switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
case IWL_FW_INI_REGION_PERIPHERY_AUX:
case IWL_FW_INI_REGION_CSR:
case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
break;
case IWL_FW_INI_REGION_TXF:
......@@ -1821,6 +1824,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
......@@ -2464,15 +2469,20 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
{
void *iter = (void *)tlv->region_config;
int i, size = le32_to_cpu(tlv->num_regions);
const char *err_st =
"WRT: ext=%d. Invalid region %s %d for apply point %d\n";
for (i = 0; i < size; i++) {
struct iwl_fw_ini_region_cfg *reg = iter, **active;
int id = le32_to_cpu(reg->region_id);
u32 type = le32_to_cpu(reg->region_type);
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
"WRT: ext=%d. Invalid region id %d for apply point %d\n",
ext, id, pnt))
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext,
"id", id, pnt))
break;
if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st,
ext, "type", type, pnt))
break;
active = &fwrt->dump.active_regs[id];
......@@ -2498,7 +2508,9 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
type == IWL_FW_INI_REGION_PERIPHERY_AUX ||
type == IWL_FW_INI_REGION_INTERNAL_BUFFER ||
type == IWL_FW_INI_REGION_PAGING ||
type == IWL_FW_INI_REGION_CSR)
type == IWL_FW_INI_REGION_CSR ||
type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE ||
type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE)
iter += le32_to_cpu(reg->internal.num_of_ranges) *
sizeof(__le32);
......@@ -2610,6 +2622,20 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
active->trig->occurrences = cpu_to_le32(-1);
active->active = true;
if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) {
u32 collect_interval = le32_to_cpu(trig->trigger_data);
/* the minimum allowed interval is 50ms */
if (collect_interval < 50) {
collect_interval = 50;
trig->trigger_data =
cpu_to_le32(collect_interval);
}
mod_timer(&fwrt->dump.periodic_trig,
jiffies + msecs_to_jiffies(collect_interval));
}
next:
iter += sizeof(*trig) + trig_regs_size;
......@@ -2690,8 +2716,34 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
del_timer(&fwrt->dump.periodic_trig);
iwl_fw_dbg_collect_sync(fwrt);
iwl_trans_stop_device(fwrt->trans);
}
IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device);
void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t)
{
struct iwl_fw_runtime *fwrt;
enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER;
int ret;
typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump),
periodic_trig);
fwrt = container_of(dump_ptr, typeof(*fwrt), dump);
ret = _iwl_fw_dbg_ini_collect(fwrt, id);
if (!ret || ret == -EBUSY) {
struct iwl_fw_ini_trigger *trig =
fwrt->dump.active_trigs[id].trig;
u32 occur = le32_to_cpu(trig->occurrences);
u32 collect_interval = le32_to_cpu(trig->trigger_data);
if (!occur)
return;
mod_timer(&fwrt->dump.periodic_trig,
jiffies + msecs_to_jiffies(collect_interval));
}
}
......@@ -385,11 +385,13 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
{
del_timer(&fwrt->dump.periodic_trig);
flush_delayed_work(&fwrt->dump.wk);
}
static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
{
del_timer(&fwrt->dump.periodic_trig);
cancel_delayed_work_sync(&fwrt->dump.wk);
}
......@@ -468,4 +470,5 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
}
}
void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
#endif /* __iwl_fw_dbg_h__ */
......@@ -184,7 +184,7 @@ enum iwl_fw_error_dump_family {
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
* @device_family: the family of the device (7 / 8)
* @hw_type: the type of the device
* @hw_step: the step of the device
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
......@@ -196,7 +196,7 @@ enum iwl_fw_error_dump_family {
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
__le32 device_family;
__le32 hw_type;
__le32 hw_step;
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
......
......@@ -142,17 +142,22 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_CMD_VERSIONS = 48,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP + 0x2,
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP + 0x3,
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP + 0x4,
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP + 0x5,
IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
......@@ -311,6 +316,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
NUM_IWL_UCODE_TLV_API
......@@ -938,4 +944,20 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
#define IWL_FW_CMD_VER_UNKNOWN 99
/**
* struct iwl_fw_cmd_version - firmware command version entry
* @cmd: command ID
* @group: group ID
* @cmd_ver: command version
* @notif_ver: notification version
*/
struct iwl_fw_cmd_version {
u8 cmd;
u8 group;
u8 cmd_ver;
u8 notif_ver;
} __packed;
#endif /* __iwl_fw_file_h__ */
......@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -109,6 +109,9 @@ struct iwl_ucode_capabilities {
u32 error_log_size;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
const struct iwl_fw_cmd_version *cmd_versions;
u32 n_cmd_versions;
};
static inline bool
......
......@@ -76,6 +76,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
timer_setup(&fwrt->dump.periodic_trig,
iwl_fw_dbg_periodic_trig_handler, 0);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
......
......@@ -146,6 +146,7 @@ struct iwl_fw_runtime {
u32 umac_err_id;
void *fifo_iter;
enum iwl_fw_ini_trigger_id ini_trig_id;
struct timer_list periodic_trig;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
......
......@@ -290,6 +290,7 @@
/* HW REV */
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
/* HW RFID */
#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
......
......@@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.trigger_tlv[i]);
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
kfree(drv->fw.ucode_capa.cmd_versions);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
......@@ -1144,6 +1145,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (iwlwifi_mod_params.enable_ini)
iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
break;
case IWL_UCODE_TLV_CMD_VERSIONS:
if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
IWL_ERR(drv,
"Invalid length for command versions: %u\n",
tlv_len);
tlv_len /= sizeof(struct iwl_fw_cmd_version);
tlv_len *= sizeof(struct iwl_fw_cmd_version);
}
if (WARN_ON(capa->cmd_versions))
return -EINVAL;
capa->cmd_versions = kmemdup(tlv_data, tlv_len,
GFP_KERNEL);
if (!capa->cmd_versions)
return -ENOMEM;
capa->n_cmd_versions =
tlv_len / sizeof(struct iwl_fw_cmd_version);
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
......
......@@ -1496,7 +1496,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
(void *)rsp_v3->regulatory.channel_profile;
iwl_init_sbands(trans->dev, trans->cfg, nvm,
rsp->regulatory.channel_profile,
channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
sbands_flags, v4);
......
......@@ -385,10 +385,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
}
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan)
{
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
......@@ -399,7 +399,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v1);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
......@@ -426,6 +426,50 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan)
{
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
if (!wowlan->n_patterns)
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
return -ENOMEM;
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
pattern_cmd->patterns[i].pattern_type =
WOWLAN_PATTERN_TYPE_BITMASK;
memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
wowlan->patterns[i].mask, mask_len);
memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
wowlan->patterns[i].pattern,
wowlan->patterns[i].pattern_len);
pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
pattern_cmd->patterns[i].u.bitmask.pattern_size =
wowlan->patterns[i].pattern_len;
}
cmd.data[0] = pattern_cmd;
err = iwl_mvm_send_cmd(mvm, &cmd);
kfree(pattern_cmd);
return err;
}
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
......@@ -851,7 +895,11 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
ret = iwl_mvm_send_patterns(mvm, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
return ret;
......
......@@ -187,12 +187,24 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
if (vif->bss_conf.assoc)
if (vif->bss_conf.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
else
/* AP's TSF is only relevant if associated */
for (i = 0; i < req->n_peers; i++) {
if (req->peers[i].report_ap_tsf) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(vif);
cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
return;
}
}
} else {
eth_broadcast_addr(cmd->range_req_bssid);
}
/* TODO: fill in tsf_mac_id if needed */
/* Don't report AP's TSF */
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
......@@ -527,6 +539,8 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
fw_ap = (void *)&fw_resp_v6->ap[i];
result.final = fw_resp->ap[i].last_burst;
result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
result.ap_tsf_valid = 1;
} else {
/* the first part is the same for old and new APIs */
fw_ap = (void *)&fw_resp_v5->ap[i];
......
......@@ -1570,7 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
return;
case NL80211_IFTYPE_STATION:
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work_sync(&mvmvif->csa_work);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
break;
default:
......
......@@ -1261,6 +1261,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
{
iwl_abort_notification_waits(&mvm->notif_wait);
del_timer(&mvm->fwrt.dump.periodic_trig);
/*
* This is a bit racy, but worst case we tell mac80211 about
......
......@@ -230,19 +230,43 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_he_cap *he_cap,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
{
u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
u16 tx_mcs_80 =
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80);
u16 tx_mcs_160 =
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
int i;
for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
/* If one side doesn't support - mark both as not supporting */
if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
_tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
_tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
}
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][0] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
_tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
_tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
}
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][1] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
......@@ -271,7 +295,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
/* HT/VHT rates */
if (he_cap && he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
......
......@@ -234,7 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
break;
}
iwl_mvm_csa_client_absent(mvm, te_data->vif);
cancel_delayed_work_sync(&mvmvif->csa_work);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(te_data->vif, true);
break;
default:
......
......@@ -536,7 +536,7 @@ struct iwl_trans_pcie {
int ict_index;
bool use_ict;
bool is_down, opmode_down;
bool debug_rfkill;
s8 debug_rfkill;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
......@@ -982,7 +982,7 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->mutex);
if (trans_pcie->debug_rfkill)
if (trans_pcie->debug_rfkill == 1)
return true;
return !(iwl_read32(trans, CSR_GP_CNTRL) &
......
......@@ -434,7 +434,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
/*
* Issue an error if we don't have enough pre-allocated
* buffers.
` */
*/
if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
IWL_CRIT(trans,
"Failed to alloc_pages\n");
......@@ -1429,10 +1429,15 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
return;
rxq = &trans_pcie->rxq[queue];
restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
......
......@@ -2688,16 +2688,17 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool old = trans_pcie->debug_rfkill;
bool new_value;
int ret;
ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
ret = kstrtobool_from_user(user_buf, count, &new_value);
if (ret)
return ret;
if (old == trans_pcie->debug_rfkill)
if (new_value == trans_pcie->debug_rfkill)
return count;
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
old, trans_pcie->debug_rfkill);
trans_pcie->debug_rfkill, new_value);
trans_pcie->debug_rfkill = new_value;
iwl_pcie_handle_rfkill_irq(trans);
return count;
......@@ -3421,7 +3422,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = -ENOMEM;
goto out_no_pci;
}
trans_pcie->debug_rfkill = -1;
if (!cfg->base_params->pcie_l1_allowed) {
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment