Commit d98c3edc authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-next-for-davem-2015-05-21' of...

Merge tag 'wireless-drivers-next-for-davem-2015-05-21' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
ath10k:

* enable channel 144 on 5 GHz
* enable Adaptive Noise Immunity (ANI) by default
* add Wake on Wireless LAN (WOW) patterns support
* add basic Tunneled Direct Link Setup (TDLS) support
* add multi-channel support for QCA6174
* enable IBSS RSN support
* enable Bluetooth Coexistance whenever firmware supports it
* add more versatile way to set bitrates used by the firmware

ath9k:

* spectral scan: add support for multiple FFT frames per report

iwlwifi:

* major rework of the scan code (Luca)
* some work on the thermal code (Chaya Rachel)
* some work on the firwmare debugging infrastructure

brcmfmac:

* SDIO suspend and resume fixes
* wiphy band info and changes in regulatory settings
* add support for BCM4324 SDIO and BCM4358 PCIe
* enable support of PCIe devices on router platforms (Hante)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4029685a 6e651045
...@@ -226,6 +226,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) ...@@ -226,6 +226,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->of_node = cc->core->dev.of_node; chip->of_node = cc->core->dev.of_node;
#endif #endif
switch (bus->chipinfo.id) { switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572: case BCMA_CHIP_ID_BCM53572:
chip->ngpio = 32; chip->ngpio = 32;
...@@ -235,16 +236,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) ...@@ -235,16 +236,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
} }
/* /*
* On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO * Register SoC GPIO devices with absolute GPIO pin base.
* pin numbers. We don't have Device Tree there and we can't really use * On MIPS, we don't have Device Tree and we can't use relative (per chip)
* relative (per chip) numbers. * GPIO numbers.
* So let's use predictable base for BCM47XX and "random" for all other. * On some ARM devices, user space may want to access some system GPIO
* pins directly, which is easier to do with a predictable GPIO base.
*/ */
#if IS_BUILTIN(CONFIG_BCM47XX) if (IS_BUILTIN(CONFIG_BCM47XX) ||
cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->base = bus->num * BCMA_GPIO_MAX_PINS; chip->base = bus->num * BCMA_GPIO_MAX_PINS;
#else else
chip->base = -1; chip->base = -1;
#endif
err = bcma_gpio_irq_domain_init(cc); err = bcma_gpio_irq_domain_init(cc);
if (err) if (err)
......
...@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common, ...@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* @ATH_DBG_DFS: radar datection * @ATH_DBG_DFS: radar datection
* @ATH_DBG_WOW: Wake on Wireless * @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_DYNACK: dynack handling * @ATH_DBG_DYNACK: dynack handling
* @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
* @ATH_DBG_ANY: enable all debugging * @ATH_DBG_ANY: enable all debugging
* *
* The debug level is used to control the amount and type of debugging output * The debug level is used to control the amount and type of debugging output
...@@ -280,6 +281,7 @@ enum ATH_DEBUG { ...@@ -280,6 +281,7 @@ enum ATH_DEBUG {
ATH_DBG_WOW = 0x00020000, ATH_DBG_WOW = 0x00020000,
ATH_DBG_CHAN_CTX = 0x00040000, ATH_DBG_CHAN_CTX = 0x00040000,
ATH_DBG_DYNACK = 0x00080000, ATH_DBG_DYNACK = 0x00080000,
ATH_DBG_SPECTRAL_SCAN = 0x00100000,
ATH_DBG_ANY = 0xffffffff ATH_DBG_ANY = 0xffffffff
}; };
......
...@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \ ...@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
wmi.o \ wmi.o \
wmi-tlv.o \ wmi-tlv.o \
bmi.o \ bmi.o \
hw.o hw.o \
p2p.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \ ath10k_pci-y += pci.o \
......
...@@ -482,31 +482,79 @@ static int ath10k_fetch_cal_file(struct ath10k *ar) ...@@ -482,31 +482,79 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
{ {
int ret = 0; char filename[100];
if (ar->hw_params.fw.fw == NULL) { scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
ath10k_err(ar, "firmware file not defined\n"); ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
return -EINVAL;
}
if (ar->hw_params.fw.board == NULL) { ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
ath10k_err(ar, "board data file not defined"); if (IS_ERR(ar->board))
return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
ar->spec_board_loaded = true;
return 0;
}
static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
{
if (!ar->hw_params.fw.board) {
ath10k_err(ar, "failed to find board file fw entry\n");
return -EINVAL; return -EINVAL;
} }
ar->board = ath10k_fetch_fw_file(ar, ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir, ar->hw_params.fw.dir,
ar->hw_params.fw.board); ar->hw_params.fw.board);
if (IS_ERR(ar->board)) { if (IS_ERR(ar->board))
ret = PTR_ERR(ar->board); return PTR_ERR(ar->board);
ath10k_err(ar, "could not fetch board data (%d)\n", ret);
goto err;
}
ar->board_data = ar->board->data; ar->board_data = ar->board->data;
ar->board_len = ar->board->size; ar->board_len = ar->board->size;
ar->spec_board_loaded = false;
return 0;
}
static int ath10k_core_fetch_board_file(struct ath10k *ar)
{
int ret;
if (strlen(ar->spec_board_id) > 0) {
ret = ath10k_core_fetch_spec_board_file(ar);
if (ret) {
ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
ret);
goto generic;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
ar->spec_board_id);
return 0;
}
generic:
ret = ath10k_core_fetch_generic_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
return ret;
}
return 0;
}
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
if (ar->hw_params.fw.fw == NULL) {
ath10k_err(ar, "firmware file not defined\n");
return -EINVAL;
}
ar->firmware = ath10k_fetch_fw_file(ar, ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir, ar->hw_params.fw.dir,
...@@ -675,6 +723,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ...@@ -675,6 +723,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n", ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
ar->wmi.op_version); ar->wmi.op_version);
break; break;
case ATH10K_FW_IE_HTT_OP_VERSION:
if (ie_len != sizeof(u32))
break;
version = (__le32 *)data;
ar->htt.op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
ar->htt.op_version);
break;
default: default:
ath10k_warn(ar, "Unknown FW IE: %u\n", ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id)); le32_to_cpu(hdr->id));
...@@ -695,27 +754,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ...@@ -695,27 +754,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
goto err; goto err;
} }
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
ath10k_err(ar, "board data file not defined");
ret = -EINVAL;
goto err;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err;
}
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
return 0; return 0;
err: err:
...@@ -730,6 +768,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) ...@@ -730,6 +768,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
/* calibration file is optional, don't check for any errors */ /* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar); ath10k_fetch_cal_file(ar);
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
return ret;
}
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
if (ret == 0)
goto success;
ar->fw_api = 4; ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
...@@ -958,6 +1009,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ...@@ -958,6 +1009,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_NUM_STATIONS; ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS; ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
break; break;
case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2:
...@@ -966,18 +1019,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ...@@ -966,18 +1019,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_10X_NUM_STATIONS; ar->max_num_stations = TARGET_10X_NUM_STATIONS;
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
break; break;
case ATH10K_FW_WMI_OP_VERSION_TLV: case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS; ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1);
return -EINVAL;
}
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break; break;
case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX: case ATH10K_FW_WMI_OP_VERSION_MAX:
WARN_ON(1); WARN_ON(1);
return -EINVAL; return -EINVAL;
} }
}
return 0; return 0;
} }
...@@ -1080,9 +1161,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ...@@ -1080,9 +1161,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_wmi_wait_for_service_ready(ar); status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) { if (status) {
ath10k_warn(ar, "wmi service ready event not received"); ath10k_warn(ar, "wmi service ready event not received");
status = -ETIMEDOUT;
goto err_hif_stop; goto err_hif_stop;
} }
} }
...@@ -1098,9 +1178,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ...@@ -1098,9 +1178,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
} }
status = ath10k_wmi_wait_for_unified_ready(ar); status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) { if (status) {
ath10k_err(ar, "wmi unified ready event not received\n"); ath10k_err(ar, "wmi unified ready event not received\n");
status = -ETIMEDOUT;
goto err_hif_stop; goto err_hif_stop;
} }
...@@ -1151,6 +1230,7 @@ EXPORT_SYMBOL(ath10k_core_start); ...@@ -1151,6 +1230,7 @@ EXPORT_SYMBOL(ath10k_core_start);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
{ {
int ret; int ret;
unsigned long time_left;
reinit_completion(&ar->target_suspend); reinit_completion(&ar->target_suspend);
...@@ -1160,9 +1240,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) ...@@ -1160,9 +1240,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
return ret; return ret;
} }
ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
if (ret == 0) { if (!time_left) {
ath10k_warn(ar, "suspend timed out - target pause event never came\n"); ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
...@@ -1386,6 +1466,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ...@@ -1386,6 +1466,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->scan.completed); init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel); init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend); init_completion(&ar->target_suspend);
init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done); init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done); init_completion(&ar->vdev_setup_done);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "../dfs_pattern_detector.h" #include "../dfs_pattern_detector.h"
#include "spectral.h" #include "spectral.h"
#include "thermal.h" #include "thermal.h"
#include "wow.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
...@@ -43,15 +44,16 @@ ...@@ -43,15 +44,16 @@
#define ATH10K_SCAN_ID 0 #define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ) #define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
#define ATH10K_NUM_CHANS 38 #define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */ /* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95 #define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128 #define ATH10K_MAX_NUM_MGMT_PENDING 128
/* number of failed packets */ /* number of failed packets (20 packets with 16 sw reties each) */
#define ATH10K_KICKOUT_THRESHOLD 50 #define ATH10K_KICKOUT_THRESHOLD (20 * 16)
/* /*
* Use insanely high numbers to make sure that the firmware implementation * Use insanely high numbers to make sure that the firmware implementation
...@@ -82,6 +84,8 @@ struct ath10k_skb_cb { ...@@ -82,6 +84,8 @@ struct ath10k_skb_cb {
dma_addr_t paddr; dma_addr_t paddr;
u8 eid; u8 eid;
u8 vdev_id; u8 vdev_id;
enum ath10k_hw_txrx_mode txmode;
bool is_protected;
struct { struct {
u8 tid; u8 tid;
...@@ -280,6 +284,15 @@ struct ath10k_sta { ...@@ -280,6 +284,15 @@ struct ath10k_sta {
#endif #endif
}; };
struct ath10k_chanctx {
/* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
* mac80211 should allow some sort of explicit locking to guarantee
* that the publicly available chanctx_conf can be accessed safely at
* all times.
*/
struct ieee80211_chanctx_conf conf;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state { enum ath10k_beacon_state {
...@@ -301,6 +314,7 @@ struct ath10k_vif { ...@@ -301,6 +314,7 @@ struct ath10k_vif {
enum ath10k_beacon_state beacon_state; enum ath10k_beacon_state beacon_state;
void *beacon_buf; void *beacon_buf;
dma_addr_t beacon_paddr; dma_addr_t beacon_paddr;
unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar; struct ath10k *ar;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
...@@ -334,13 +348,13 @@ struct ath10k_vif { ...@@ -334,13 +348,13 @@ struct ath10k_vif {
} ap; } ap;
} u; } u;
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
bool use_cts_prot; bool use_cts_prot;
int num_legacy_stations; int num_legacy_stations;
int txpower; int txpower;
struct wmi_wmm_params_all_arg wmm_params; struct wmi_wmm_params_all_arg wmm_params;
struct work_struct ap_csa_work;
struct delayed_work connection_loss_work;
struct cfg80211_bitrate_mask bitrate_mask;
}; };
struct ath10k_vif_iter { struct ath10k_vif_iter {
...@@ -440,6 +454,12 @@ enum ath10k_fw_features { ...@@ -440,6 +454,12 @@ enum ath10k_fw_features {
*/ */
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5, ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
/* Some firmware revisions have an incomplete WoWLAN implementation
* despite WMI service bit being advertised. This feature flag is used
* to distinguish whether WoWLAN is really supported or not.
*/
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
/* keep last */ /* keep last */
ATH10K_FW_FEATURE_COUNT, ATH10K_FW_FEATURE_COUNT,
}; };
...@@ -498,6 +518,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state) ...@@ -498,6 +518,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
return "unknown"; return "unknown";
} }
enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_Q_FULL,
ATH10K_TX_PAUSE_MAX,
};
struct ath10k { struct ath10k {
struct ath_common ath_common; struct ath_common ath_common;
struct ieee80211_hw *hw; struct ieee80211_hw *hw;
...@@ -511,12 +536,15 @@ struct ath10k { ...@@ -511,12 +536,15 @@ struct ath10k {
u32 fw_version_minor; u32 fw_version_minor;
u16 fw_version_release; u16 fw_version_release;
u16 fw_version_build; u16 fw_version_build;
u32 fw_stats_req_mask;
u32 phy_capability; u32 phy_capability;
u32 hw_min_tx_power; u32 hw_min_tx_power;
u32 hw_max_tx_power; u32 hw_max_tx_power;
u32 ht_cap_info; u32 ht_cap_info;
u32 vht_cap_info; u32 vht_cap_info;
u32 num_rf_chains; u32 num_rf_chains;
/* protected by conf_mutex */
bool ani_enabled;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
...@@ -565,6 +593,9 @@ struct ath10k { ...@@ -565,6 +593,9 @@ struct ath10k {
const struct firmware *cal_file; const struct firmware *cal_file;
char spec_board_id[100];
bool spec_board_loaded;
int fw_api; int fw_api;
enum ath10k_cal_mode cal_mode; enum ath10k_cal_mode cal_mode;
...@@ -593,6 +624,7 @@ struct ath10k { ...@@ -593,6 +624,7 @@ struct ath10k {
struct cfg80211_chan_def chandef; struct cfg80211_chan_def chandef;
unsigned long long free_vdev_map; unsigned long long free_vdev_map;
struct ath10k_vif *monitor_arvif;
bool monitor; bool monitor;
int monitor_vdev_id; int monitor_vdev_id;
bool monitor_started; bool monitor_started;
...@@ -633,6 +665,7 @@ struct ath10k { ...@@ -633,6 +665,7 @@ struct ath10k {
int max_num_peers; int max_num_peers;
int max_num_stations; int max_num_stations;
int max_num_vdevs; int max_num_vdevs;
int max_num_tdls_vdevs;
struct work_struct offchan_tx_work; struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue; struct sk_buff_head offchan_tx_queue;
...@@ -655,6 +688,8 @@ struct ath10k { ...@@ -655,6 +688,8 @@ struct ath10k {
struct dfs_pattern_detector *dfs_detector; struct dfs_pattern_detector *dfs_detector;
unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
#ifdef CONFIG_ATH10K_DEBUGFS #ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug; struct ath10k_debug debug;
#endif #endif
...@@ -686,6 +721,7 @@ struct ath10k { ...@@ -686,6 +721,7 @@ struct ath10k {
} stats; } stats;
struct ath10k_thermal thermal; struct ath10k_thermal thermal;
struct ath10k_wow wow;
/* must be last */ /* must be last */
u8 drv_priv[0] __aligned(sizeof(void *)); u8 drv_priv[0] __aligned(sizeof(void *));
......
...@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info); ...@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar) void ath10k_print_driver_info(struct ath10k *ar)
{ {
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n", ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name, ar->hw_params.name,
ar->target_version, ar->target_version,
ar->chip_id, ar->chip_id,
(strlen(ar->spec_board_id) > 0 ? ", " : ""),
ar->spec_board_id,
(strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
? " fallback" : ""),
ar->hw->wiphy->fw_version, ar->hw->wiphy->fw_version,
ar->fw_api, ar->fw_api,
ar->htt.target_version_major, ar->htt.target_version_major,
...@@ -380,12 +384,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) ...@@ -380,12 +384,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
static int ath10k_debug_fw_stats_request(struct ath10k *ar) static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{ {
unsigned long timeout; unsigned long timeout, time_left;
int ret; int ret;
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
timeout = jiffies + msecs_to_jiffies(1*HZ); timeout = jiffies + msecs_to_jiffies(1 * HZ);
ath10k_debug_fw_stats_reset(ar); ath10k_debug_fw_stats_reset(ar);
...@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) ...@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete); reinit_completion(&ar->debug.fw_stats_complete);
ret = ath10k_wmi_request_stats(ar, ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
WMI_STAT_PDEV |
WMI_STAT_VDEV |
WMI_STAT_PEER);
if (ret) { if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret); ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret; return ret;
} }
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, time_left =
1*HZ); wait_for_completion_timeout(&ar->debug.fw_stats_complete,
if (ret == 0) 1 * HZ);
if (!time_left)
return -ETIMEDOUT; return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
...@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode, ...@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
return 0; return 0;
} }
static ssize_t ath10k_write_ani_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int ret;
u8 enable;
if (kstrtou8_from_user(user_buf, count, 0, &enable))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->ani_enabled == enable) {
ret = count;
goto exit;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
enable);
if (ret) {
ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
goto exit;
}
ar->ani_enabled = enable;
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int len = 0;
char buf[32];
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->ani_enabled);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ani_enable = {
.read = ath10k_read_ani_enable,
.write = ath10k_write_ani_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static const struct file_operations fops_cal_data = { static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open, .open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read, .read = ath10k_debug_cal_data_read,
...@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = { ...@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
.open = simple_open .open = simple_open
}; };
static ssize_t ath10k_write_quiet_period(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
u32 period;
if (kstrtouint_from_user(ubuf, count, 0, &period))
return -EINVAL;
if (period < ATH10K_QUIET_PERIOD_MIN) {
ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
period);
return -EINVAL;
}
mutex_lock(&ar->conf_mutex);
ar->thermal.quiet_period = period;
ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return count;
}
static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32];
struct ath10k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->thermal.quiet_period);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_quiet_period = {
.read = ath10k_read_quiet_period,
.write = ath10k_write_quiet_period,
.open = simple_open
};
int ath10k_debug_create(struct ath10k *ar) int ath10k_debug_create(struct ath10k *ar)
{ {
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
...@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar) ...@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data); ar, &fops_cal_data);
debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_ani_enable);
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period); ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
...@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar) ...@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR, debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter); ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_quiet_period);
return 0; return 0;
} }
......
...@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, ...@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
} }
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
struct ath10k *ar = ep->htc->ar;
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, ...@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid; hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0; hdr->flags = 0;
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock); spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++; hdr->seq_no = ep->seq_no++;
if (ath10k_htc_ep_need_credit_update(ep))
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_unlock_bh(&ep->htc->tx_lock); spin_unlock_bh(&ep->htc->tx_lock);
} }
...@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, ...@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) { switch (__le16_to_cpu(msg->hdr.message_id)) {
default: case ATH10K_HTC_MSG_READY_ID:
case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */ /* handle HTC control message */
if (completion_done(&htc->ctl_resp)) { if (completion_done(&htc->ctl_resp)) {
/* /*
...@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, ...@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
break; break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar); htc->htc_ops.target_send_suspend_complete(ar);
break;
default:
ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
break;
} }
goto out; goto out;
} }
...@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) ...@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
{ {
struct ath10k *ar = htc->ar; struct ath10k *ar = htc->ar;
int i, status = 0; int i, status = 0;
unsigned long time_left;
struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp; struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg; struct ath10k_htc_msg *msg;
...@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) ...@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count; u16 credit_count;
u16 credit_size; u16 credit_size;
status = wait_for_completion_timeout(&htc->ctl_resp, time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ); ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0) { if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't /* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message * receive interrupt for the control response message
* even if the buffer was completed. It is suspected * even if the buffer was completed. It is suspected
...@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) ...@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
for (i = 0; i < CE_COUNT; i++) for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1); ath10k_hif_send_complete_check(htc->ar, i, 1);
status = wait_for_completion_timeout(&htc->ctl_resp, time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ); ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0) if (!time_left)
status = -ETIMEDOUT; status = -ETIMEDOUT;
} }
...@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, ...@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int max_msg_size = 0; unsigned int max_msg_size = 0;
int length, status; int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false; bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0; u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0; u8 tx_alloc = 0;
...@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, ...@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
} }
/* wait for response */ /* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp, time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status == 0) { if (!time_left) {
ath10k_err(ar, "Service connect timeout: %d\n", status); ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
......
...@@ -22,6 +22,86 @@ ...@@ -22,6 +22,86 @@
#include "core.h" #include "core.h"
#include "debug.h" #include "debug.h"
static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
[HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
[HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
[HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
[HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
[HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
[HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
[HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
[HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
[HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
[HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
};
static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
[HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
[HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
[HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
[HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
[HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
[HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
[HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
[HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
[HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
[HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
[HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
[HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
[HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
[HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
[HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
[HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
[HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
[HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
[HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
};
int ath10k_htt_connect(struct ath10k_htt *htt) int ath10k_htt_connect(struct ath10k_htt *htt)
{ {
struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_req conn_req;
...@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar) ...@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */ 8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */ 2; /* ip4 dscp or ip6 priority */
switch (ar->htt.op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_1:
ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_TLV:
ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_MAIN:
ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
break;
case ATH10K_FW_HTT_OP_VERSION_MAX:
case ATH10K_FW_HTT_OP_VERSION_UNSET:
WARN_ON(1);
return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -25,7 +25,9 @@ ...@@ -25,7 +25,9 @@
#include <net/mac80211.h> #include <net/mac80211.h>
#include "htc.h" #include "htc.h"
#include "hw.h"
#include "rx_desc.h" #include "rx_desc.h"
#include "hw.h"
enum htt_dbg_stats_type { enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
...@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status { ...@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
/*=== target -> host messages ===============================================*/ /*=== target -> host messages ===============================================*/
enum htt_t2h_msg_type { enum htt_main_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0, HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_T2H_MSG_TYPE_RX_IND = 0x1, HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2, HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_T2H_MSG_TYPE_RX_DELBA = 0x6, HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_T2H_MSG_TYPE_PKTLOG = 0x8, HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_T2H_MSG_TYPE_STATS_CONF = 0x9, HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_T2H_MSG_TYPE_SEC_IND = 0xb, HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf, HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10, HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11, HTT_MAIN_T2H_MSG_TYPE_TEST,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12, /* keep this last */
HTT_MAIN_T2H_NUM_MSGS
};
enum htt_10x_t2h_msg_type {
HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
/* keep this last */
HTT_10X_T2H_NUM_MSGS
};
enum htt_tlv_t2h_msg_type {
HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */ /* 0x13 reservd */
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14, HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
HTT_TLV_T2H_MSG_TYPE_TEST,
/* keep this last */
HTT_TLV_T2H_NUM_MSGS
};
/* FIXME: Do not depend on this event id. Numbering of this event id is enum htt_t2h_msg_type {
* broken across different firmware revisions and HTT version fails to HTT_T2H_MSG_TYPE_VERSION_CONF,
* indicate this. HTT_T2H_MSG_TYPE_RX_IND,
*/ HTT_T2H_MSG_TYPE_RX_FLUSH,
HTT_T2H_MSG_TYPE_PEER_MAP,
HTT_T2H_MSG_TYPE_PEER_UNMAP,
HTT_T2H_MSG_TYPE_RX_ADDBA,
HTT_T2H_MSG_TYPE_RX_DELBA,
HTT_T2H_MSG_TYPE_TX_COMPL_IND,
HTT_T2H_MSG_TYPE_PKTLOG,
HTT_T2H_MSG_TYPE_STATS_CONF,
HTT_T2H_MSG_TYPE_RX_FRAG_IND,
HTT_T2H_MSG_TYPE_SEC_IND,
HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
HTT_T2H_MSG_TYPE_RX_PN_IND,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
HTT_T2H_MSG_TYPE_CHAN_CHANGE,
HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
HTT_T2H_MSG_TYPE_AGGR_CONF,
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_TEST,
/* keep this last */ /* keep this last */
HTT_T2H_NUM_MSGS HTT_T2H_NUM_MSGS
}; };
...@@ -1222,6 +1297,7 @@ struct htt_tx_done { ...@@ -1222,6 +1297,7 @@ struct htt_tx_done {
u32 msdu_id; u32 msdu_id;
bool discard; bool discard;
bool no_ack; bool no_ack;
bool success;
}; };
struct htt_peer_map_event { struct htt_peer_map_event {
...@@ -1248,6 +1324,10 @@ struct ath10k_htt { ...@@ -1248,6 +1324,10 @@ struct ath10k_htt {
u8 target_version_major; u8 target_version_major;
u8 target_version_minor; u8 target_version_minor;
struct completion target_version_received; struct completion target_version_received;
enum ath10k_fw_htt_op_version op_version;
const enum htt_t2h_msg_type *t2h_msg_types;
u32 t2h_msg_types_max;
struct { struct {
/* /*
......
...@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, ...@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0; return 0;
} }
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct amsdu_subframe_hdr { struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN]; u8 dst[ETH_ALEN];
u8 src[ETH_ALEN]; u8 src[ETH_ALEN];
__be16 len; __be16 len;
} __packed; } __packed;
static const u8 rx_legacy_rate_idx[] = {
3, /* 0x00 - 11Mbps */
2, /* 0x01 - 5.5Mbps */
1, /* 0x02 - 2Mbps */
0, /* 0x03 - 1Mbps */
3, /* 0x04 - 11Mbps */
2, /* 0x05 - 5.5Mbps */
1, /* 0x06 - 2Mbps */
0, /* 0x07 - 1Mbps */
10, /* 0x08 - 48Mbps */
8, /* 0x09 - 24Mbps */
6, /* 0x0A - 12Mbps */
4, /* 0x0B - 6Mbps */
11, /* 0x0C - 54Mbps */
9, /* 0x0D - 36Mbps */
7, /* 0x0E - 18Mbps */
5, /* 0x0F - 9Mbps */
};
static void ath10k_htt_rx_h_rates(struct ath10k *ar, static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status, struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd) struct htt_rx_desc *rxd)
{ {
enum ieee80211_band band; struct ieee80211_supported_band *sband;
u8 cck, rate, rate_idx, bw, sgi, mcs, nss; u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0; u8 preamble = 0;
u32 info1, info2, info3; u32 info1, info2, info3;
/* Band value can't be set as undefined but freq can be 0 - use that to
* determine whether band is provided.
*
* FIXME: Perhaps this can go away if CCK rate reporting is a little
* reworked?
*/
if (!status->freq)
return;
band = status->band;
info1 = __le32_to_cpu(rxd->ppdu_start.info1); info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2); info2 = __le32_to_cpu(rxd->ppdu_start.info2);
info3 = __le32_to_cpu(rxd->ppdu_start.info3); info3 = __le32_to_cpu(rxd->ppdu_start.info3);
...@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, ...@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
switch (preamble) { switch (preamble) {
case HTT_RX_LEGACY: case HTT_RX_LEGACY:
/* To get legacy rate index band is required. Since band can't
* be undefined check if freq is non-zero.
*/
if (!status->freq)
return;
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
rate_idx = 0; rate &= ~RX_PPDU_START_RATE_FLAG;
if (rate < 0x08 || rate > 0x0F)
break;
switch (band) { sband = &ar->mac.sbands[status->band];
case IEEE80211_BAND_2GHZ: status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
if (cck)
rate &= ~BIT(3);
rate_idx = rx_legacy_rate_idx[rate];
break;
case IEEE80211_BAND_5GHZ:
rate_idx = rx_legacy_rate_idx[rate];
/* We are using same rate table registering
HW - ath10k_rates[]. In case of 5GHz skip
CCK rates, so -4 here */
rate_idx -= 4;
break;
default:
break;
}
status->rate_idx = rate_idx;
break; break;
case HTT_RX_HT: case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF: case HTT_RX_HT_WITH_TXBF:
...@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, ...@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
} }
} }
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
u16 peer_id;
lockdep_assert_held(&ar->data_lock);
if (!rxd)
return NULL;
if (rxd->attention.flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
if (!(rxd->msdu_end.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer)
return NULL;
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (WARN_ON_ONCE(!arvif))
return NULL;
if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
return NULL;
return def.chan;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_id == vdev_id &&
ath10k_mac_vif_chan(arvif->vif, &def) == 0)
return def.chan;
}
return NULL;
}
static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data)
{
struct cfg80211_chan_def *def = data;
*def = conf->def;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k *ar)
{
struct cfg80211_chan_def def = {};
ieee80211_iter_chan_contexts_atomic(ar->hw,
ath10k_htt_rx_h_any_chan_iter,
&def);
return def.chan;
}
static bool ath10k_htt_rx_h_channel(struct ath10k *ar, static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status) struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd,
u32 vdev_id)
{ {
struct ieee80211_channel *ch; struct ieee80211_channel *ch;
...@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, ...@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ar->scan_channel; ch = ar->scan_channel;
if (!ch) if (!ch)
ch = ar->rx_channel; ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
if (!ch)
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
if (!ch) if (!ch)
...@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar, ...@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu, struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status) struct ieee80211_rx_status *status,
u32 vdev_id)
{ {
struct sk_buff *first; struct sk_buff *first;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
...@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, ...@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL; status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd); ath10k_htt_rx_h_signal(ar, status, rxd);
ath10k_htt_rx_h_channel(ar, status); ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd); ath10k_htt_rx_h_rates(ar, status, rxd);
} }
...@@ -1522,7 +1558,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, ...@@ -1522,7 +1558,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
break; break;
} }
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
...@@ -1569,7 +1605,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, ...@@ -1569,7 +1605,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
return; return;
} }
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
...@@ -1598,6 +1634,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, ...@@ -1598,6 +1634,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.no_ack = true; tx_done.no_ack = true;
break; break;
case HTT_DATA_TX_STATUS_OK: case HTT_DATA_TX_STATUS_OK:
tx_done.success = true;
break; break;
case HTT_DATA_TX_STATUS_DISCARD: case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE: case HTT_DATA_TX_STATUS_POSTPONE:
...@@ -1796,7 +1833,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ...@@ -1796,7 +1833,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL; status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu); ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status); ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu); ath10k_process_rx(ar, status, msdu);
} }
} }
...@@ -1869,7 +1906,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ...@@ -1869,7 +1906,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This * better to report something than nothing though. This
* should still give an idea about rx rate to the user. * should still give an idea about rx rate to the user.
*/ */
ath10k_htt_rx_h_ppdu(ar, &amsdu, status); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status); ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status); ath10k_htt_rx_h_deliver(ar, &amsdu, status);
...@@ -1892,6 +1929,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1892,6 +1929,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data; struct htt_resp *resp = (struct htt_resp *)skb->data;
enum htt_t2h_msg_type type;
/* confirm alignment */ /* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4)) if (!IS_ALIGNED((unsigned long)skb->data, 4))
...@@ -1899,7 +1937,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1899,7 +1937,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type); resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
dev_kfree_skb_any(skb);
return;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: { case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major; htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor; htt->target_version_minor = resp->ver_resp.minor;
...@@ -1937,6 +1984,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1937,6 +1984,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
switch (status) { switch (status) {
case HTT_MGMT_TX_STATUS_OK: case HTT_MGMT_TX_STATUS_OK:
tx_done.success = true;
break; break;
case HTT_MGMT_TX_STATUS_RETRY: case HTT_MGMT_TX_STATUS_RETRY:
tx_done.no_ack = true; tx_done.no_ack = true;
...@@ -1976,7 +2024,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1976,7 +2024,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break; break;
} }
case HTT_T2H_MSG_TYPE_TEST: case HTT_T2H_MSG_TYPE_TEST:
/* FIX THIS */
break; break;
case HTT_T2H_MSG_TYPE_STATS_CONF: case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len); trace_ath10k_htt_stats(ar, skb->data, skb->len);
...@@ -2018,11 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -2018,11 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
return; return;
} }
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
/* FIXME: This WMI-TLV event is overlapping with 10.2 break;
* CHAN_CHANGE - both being 0xF. Neither is being used in case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
* practice so no immediate action is necessary. Nevertheless
* HTT may need an abstraction layer like WMI has one day.
*/
break; break;
default: default:
ath10k_warn(ar, "htt event (%d) not handled\n", ath10k_warn(ar, "htt event (%d) not handled\n",
......
...@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) ...@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{ {
htt->num_pending_tx--; htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1) if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw); ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
} }
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
...@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) ...@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
htt->num_pending_tx++; htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx) if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw); ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit: exit:
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
...@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int res; int res;
u8 flags0 = 0; u8 flags0 = 0;
u16 msdu_id, flags1 = 0; u16 msdu_id, flags1 = 0;
dma_addr_t paddr; dma_addr_t paddr = 0;
u32 frags_paddr; u32 frags_paddr = 0;
bool use_frags;
res = ath10k_htt_tx_inc_pending(htt); res = ath10k_htt_tx_inc_pending(htt);
if (res) if (res)
...@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4); prefetch_len = roundup(prefetch_len, 4);
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr); &paddr);
if (!skb_cb->htt.txbuf) { if (!skb_cb->htt.txbuf) {
...@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (res) if (res)
goto err_free_txbuf; goto err_free_txbuf;
if (likely(use_frags)) { switch (skb_cb->txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags; frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr); frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
...@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
frags[1].paddr = 0; frags[1].paddr = 0;
frags[1].len = 0; frags[1].len = 0;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr; frags_paddr = skb_cb->htt.txbuf_paddr;
} else { break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT, flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr; frags_paddr = skb_cb->paddr;
break;
} }
/* Normally all commands go through HTC which manages tx credits for /* Normally all commands go through HTC which manages tx credits for
...@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len); prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0; skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (!ieee80211_has_protected(hdr->frame_control)) if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) { if (msdu->ip_summed == CHECKSUM_PARTIAL) {
......
...@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev { ...@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */ /* added support for ATH10K_FW_IE_WMI_OP_VERSION */
#define ATH10K_FW_API4_FILE "firmware-4.bin" #define ATH10K_FW_API4_FILE "firmware-4.bin"
/* HTT id conflict fix for management frames over HTT */
#define ATH10K_FW_API5_FILE "firmware-5.bin"
#define ATH10K_FW_UTF_FILE "utf.bin" #define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */ /* includes also the null byte */
...@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type { ...@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
* FW API 4 and above. * FW API 4 and above.
*/ */
ATH10K_FW_IE_WMI_OP_VERSION = 5, ATH10K_FW_IE_WMI_OP_VERSION = 5,
/* HTT "operations" interface version, 32 bit value. Supported from
* FW API 5 and above.
*/
ATH10K_FW_IE_HTT_OP_VERSION = 6,
}; };
enum ath10k_fw_wmi_op_version { enum ath10k_fw_wmi_op_version {
...@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version { ...@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX, ATH10K_FW_WMI_OP_VERSION_MAX,
}; };
enum ath10k_fw_htt_op_version {
ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
/* also used in 10.2 and 10.2.4 branches */
ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
ATH10K_FW_HTT_OP_VERSION_TLV = 3,
/* keep last */
ATH10K_FW_HTT_OP_VERSION_MAX,
};
enum ath10k_hw_rev { enum ath10k_hw_rev {
ATH10K_HW_QCA988X, ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174, ATH10K_HW_QCA6174,
...@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr { ...@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
u8 payload[0]; u8 payload[0];
} __packed; } __packed;
enum ath10k_hw_rate_ofdm {
ATH10K_HW_RATE_OFDM_48M = 0,
ATH10K_HW_RATE_OFDM_24M,
ATH10K_HW_RATE_OFDM_12M,
ATH10K_HW_RATE_OFDM_6M,
ATH10K_HW_RATE_OFDM_54M,
ATH10K_HW_RATE_OFDM_36M,
ATH10K_HW_RATE_OFDM_18M,
ATH10K_HW_RATE_OFDM_9M,
};
enum ath10k_hw_rate_cck {
ATH10K_HW_RATE_CCK_LP_11M = 0,
ATH10K_HW_RATE_CCK_LP_5_5M,
ATH10K_HW_RATE_CCK_LP_2M,
ATH10K_HW_RATE_CCK_LP_1M,
ATH10K_HW_RATE_CCK_SP_11M,
ATH10K_HW_RATE_CCK_SP_5_5M,
ATH10K_HW_RATE_CCK_SP_2M,
};
/* Target specific defines for MAIN firmware */ /* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8 #define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2 #define TARGET_NUM_PEER_AST 2
...@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr { ...@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_WDS_ENTRIES 32 #define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0 #define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0 #define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16 #define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128 #define TARGET_10X_NUM_STATIONS 128
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ #define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS)) (TARGET_10X_NUM_VDEVS))
...@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr { ...@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
#define TARGET_10_2_DMA_BURST_SIZE 1 #define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */ /* Target specific defines for WMI-TLV firmware */
#define TARGET_TLV_NUM_VDEVS 3 #define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32 #define TARGET_TLV_NUM_STATIONS 32
#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \ #define TARGET_TLV_NUM_PEERS 35
(TARGET_TLV_NUM_VDEVS) + \ #define TARGET_TLV_NUM_TDLS_VDEVS 1
2)
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Number of Copy Engines supported */ /* Number of Copy Engines supported */
#define CE_COUNT 8 #define CE_COUNT 8
......
This diff is collapsed.
...@@ -23,11 +23,22 @@ ...@@ -23,11 +23,22 @@
#define WEP_KEYID_SHIFT 6 #define WEP_KEYID_SHIFT 6
enum wmi_tlv_tx_pause_id;
enum wmi_tlv_tx_pause_action;
struct ath10k_generic_iter { struct ath10k_generic_iter {
struct ath10k *ar; struct ath10k *ar;
int ret; int ret;
}; };
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
struct ath10k *ath10k_mac_create(size_t priv_size); struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar); void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar); int ath10k_mac_register(struct ath10k *ar);
...@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif); ...@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
void ath10k_drain_tx(struct ath10k *ar); void ath10k_drain_tx(struct ath10k *ar);
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
u8 keyidx); u8 keyidx);
int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
struct cfg80211_chan_def *def);
void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_id pause_id,
enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 hw_rate);
u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate);
void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{ {
......
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "wmi.h"
#include "mac.h"
#include "p2p.h"
static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
const struct wmi_p2p_noa_info *noa)
{
struct ieee80211_p2p_noa_attr *noa_attr;
u8 ctwindow_oppps = noa->ctwindow_oppps;
u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
__le16 *noa_attr_len;
u16 attr_len;
u8 noa_descriptors = noa->num_descriptors;
int i;
/* P2P IE */
data[0] = WLAN_EID_VENDOR_SPECIFIC;
data[1] = len - 2;
data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
data[5] = WLAN_OUI_TYPE_WFA_P2P;
/* NOA ATTR */
data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
noa_attr->index = noa->index;
noa_attr->oppps_ctwindow = ctwindow;
if (oppps)
noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
for (i = 0; i < noa_descriptors; i++) {
noa_attr->desc[i].count =
__le32_to_cpu(noa->descriptors[i].type_count);
noa_attr->desc[i].duration = noa->descriptors[i].duration;
noa_attr->desc[i].interval = noa->descriptors[i].interval;
noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
}
attr_len = 2; /* index + oppps_ctwindow */
attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
*noa_attr_len = __cpu_to_le16(attr_len);
}
static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
{
size_t len = 0;
if (!noa->num_descriptors &&
!(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
return 0;
len += 1 + 1 + 4; /* EID + len + OUI */
len += 1 + 2; /* noa attr + attr len */
len += 1 + 1; /* index + oppps_ctwindow */
len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
return len;
}
static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
size_t len)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
kfree(arvif->u.ap.noa_data);
arvif->u.ap.noa_data = ie;
arvif->u.ap.noa_len = len;
}
static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k *ar = arvif->ar;
void *ie;
size_t len;
lockdep_assert_held(&ar->data_lock);
ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
len = ath10k_p2p_noa_ie_len_compute(noa);
if (!len)
return;
ie = kmalloc(len, GFP_ATOMIC);
if (!ie)
return;
ath10k_p2p_noa_ie_fill(ie, len, noa);
ath10k_p2p_noa_ie_assign(arvif, ie, len);
}
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k *ar = arvif->ar;
spin_lock_bh(&ar->data_lock);
__ath10k_p2p_noa_update(arvif, noa);
spin_unlock_bh(&ar->data_lock);
}
struct ath10k_p2p_noa_arg {
u32 vdev_id;
const struct wmi_p2p_noa_info *noa;
};
static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_p2p_noa_arg *arg = data;
if (arvif->vdev_id != arg->vdev_id)
return;
ath10k_p2p_noa_update(arvif, arg->noa);
}
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
const struct wmi_p2p_noa_info *noa)
{
struct ath10k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
.noa = noa,
};
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_p2p_noa_update_vdev_iter,
&arg);
}
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _P2P_H
#define _P2P_H
struct ath10k_vif;
struct wmi_p2p_noa_info;
void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
const struct wmi_p2p_noa_info *noa);
void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
const struct wmi_p2p_noa_info *noa);
#endif
...@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = { ...@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
.flags = CE_ATTR_FLAGS, .flags = CE_ATTR_FLAGS,
.src_nentries = 0, .src_nentries = 0,
.src_sz_max = 2048, .src_sz_max = 2048,
.dest_nentries = 32, .dest_nentries = 128,
}, },
/* CE3: host->target WMI */ /* CE3: host->target WMI */
...@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { ...@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
{ {
.pipenum = __cpu_to_le32(2), .pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN), .pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32), .nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048), .nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS), .flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0), .reserved = __cpu_to_le32(0),
...@@ -819,6 +819,21 @@ static int ath10k_pci_wake_wait(struct ath10k *ar) ...@@ -819,6 +819,21 @@ static int ath10k_pci_wake_wait(struct ath10k *ar)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
/* The rule is host is forbidden from accessing device registers while it's
* asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't
* balanced and the device is kept awake all the time. This is intended for a
* simpler solution for the following problems:
*
* * device can enter sleep during s2ram without the host knowing,
*
* * irq handlers access registers which is a problem if other device asserts
* a shared irq line when ath10k is between hif_power_down() and
* hif_power_up().
*
* FIXME: If power consumption is a concern (and there are *real* gains) then a
* refcounted wake/sleep needs to be implemented.
*/
static int ath10k_pci_wake(struct ath10k *ar) static int ath10k_pci_wake(struct ath10k *ar)
{ {
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
...@@ -1524,12 +1539,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) ...@@ -1524,12 +1539,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV: case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV: case QCA6174_HW_1_1_CHIP_ID_REV:
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 3; return 3;
case QCA6174_HW_1_3_CHIP_ID_REV: case QCA6174_HW_1_3_CHIP_ID_REV:
return 2; return 2;
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 6;
case QCA6174_HW_3_0_CHIP_ID_REV: case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV: case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV: case QCA6174_HW_3_2_CHIP_ID_REV:
...@@ -2034,28 +2048,13 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar) ...@@ -2034,28 +2048,13 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
/* Currently hif_power_up performs effectively a reset and hif_stop /* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here. * resets the chip as well so there's no point in resetting here.
*/ */
ath10k_pci_sleep(ar);
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
#define ATH10K_PCI_PM_CONTROL 0x44
static int ath10k_pci_hif_suspend(struct ath10k *ar) static int ath10k_pci_hif_suspend(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_pci_sleep(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
if ((val & 0x000000ff) != 0x3) {
pci_save_state(pdev);
pci_disable_device(pdev);
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
(val & 0xffffff00) | 0x03);
}
return 0; return 0;
} }
...@@ -2065,25 +2064,24 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) ...@@ -2065,25 +2064,24 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev; struct pci_dev *pdev = ar_pci->pdev;
u32 val; u32 val;
int ret;
pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_err(ar, "failed to wake device up on resume: %d\n", ret);
return ret;
}
if ((val & 0x000000ff) != 0) { /* Suspend/Resume resets the PCI configuration space, so we have to
pci_restore_state(pdev); * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, * from interfering with C3 CPU state. pci_restore_state won't help
val & 0xffffff00); * here since it only restores the first 64 bytes pci config header.
/*
* Suspend/Resume resets the PCI configuration space,
* so we have to re-disable the RETRY_TIMEOUT register (0x41)
* to keep PCI Tx retries from interfering with C3 CPU state
*/ */
pci_read_config_dword(pdev, 0x40, &val); pci_read_config_dword(pdev, 0x40, &val);
if ((val & 0x0000ff00) != 0) if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
}
return 0; return ret;
} }
#endif #endif
...@@ -2177,6 +2175,13 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) ...@@ -2177,6 +2175,13 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
{ {
struct ath10k *ar = arg; struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ret = ath10k_pci_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
return IRQ_NONE;
}
if (ar_pci->num_msi_intrs == 0) { if (ar_pci->num_msi_intrs == 0) {
if (!ath10k_pci_irq_pending(ar)) if (!ath10k_pci_irq_pending(ar))
...@@ -2621,6 +2626,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2621,6 +2626,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev; ar_pci->dev = &pdev->dev;
ar_pci->ar = ar; ar_pci->ar = ar;
if (pdev->subsystem_vendor || pdev->subsystem_device)
scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
"%04x:%04x:%04x:%04x",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
spin_lock_init(&ar_pci->ce_lock); spin_lock_init(&ar_pci->ce_lock);
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar); (unsigned long)ar);
...@@ -2678,11 +2689,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2678,11 +2689,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id); pdev->device, chip_id);
goto err_sleep; goto err_free_irq;
} }
ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id); ret = ath10k_core_register(ar, chip_id);
if (ret) { if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret); ath10k_err(ar, "failed to register driver core: %d\n", ret);
...@@ -2770,7 +2779,19 @@ module_exit(ath10k_pci_exit); ...@@ -2770,7 +2779,19 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros"); MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
...@@ -661,6 +661,28 @@ struct rx_msdu_end { ...@@ -661,6 +661,28 @@ struct rx_msdu_end {
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff #define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0 #define RX_PPDU_START_INFO5_SERVICE_LSB 0
/* No idea what this flag means. It seems to be always set in rate. */
#define RX_PPDU_START_RATE_FLAG BIT(3)
enum rx_ppdu_start_rate {
RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
};
struct rx_ppdu_start { struct rx_ppdu_start {
struct { struct {
u8 pri20_mhz; u8 pri20_mhz;
......
...@@ -23,102 +23,50 @@ ...@@ -23,102 +23,50 @@
#include "debug.h" #include "debug.h"
#include "wmi-ops.h" #include "wmi-ops.h"
static int ath10k_thermal_get_active_vifs(struct ath10k *ar, static int
enum wmi_vdev_type type) ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
{
struct ath10k_vif *arvif;
int count = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_started)
continue;
if (!arvif->is_up)
continue;
if (arvif->vdev_type != type)
continue;
count++;
}
return count;
}
static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
unsigned long *state) unsigned long *state)
{ {
*state = ATH10K_QUIET_DUTY_CYCLE_MAX; *state = ATH10K_THERMAL_THROTTLE_MAX;
return 0; return 0;
} }
static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev, static int
ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state) unsigned long *state)
{ {
struct ath10k *ar = cdev->devdata; struct ath10k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
*state = ar->thermal.duty_cycle; *state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return 0; return 0;
} }
static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev, static int
unsigned long duty_cycle) ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long throttle_state)
{ {
struct ath10k *ar = cdev->devdata; struct ath10k *ar = cdev->devdata;
u32 period, duration, enabled;
int num_bss, ret = 0;
mutex_lock(&ar->conf_mutex); if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
if (ar->state != ATH10K_STATE_ON) { ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
ret = -ENETDOWN; throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
goto out; return -EINVAL;
}
if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
ret = -EINVAL;
goto out;
}
/* TODO: Right now, thermal mitigation is handled only for single/multi
* vif AP mode. Since quiet param is not validated in STA mode, it needs
* to be investigated further to handle multi STA and multi-vif (AP+STA)
* mode properly.
*/
num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
if (!num_bss) {
ath10k_warn(ar, "no active AP interfaces\n");
ret = -ENETDOWN;
goto out;
}
period = max(ATH10K_QUIET_PERIOD_MIN,
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
duration = (period * duty_cycle) / 100;
enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
ATH10K_QUIET_START_OFFSET,
enabled);
if (ret) {
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
period, duration, enabled, ret);
goto out;
} }
ar->thermal.duty_cycle = duty_cycle; mutex_lock(&ar->conf_mutex);
out: ar->thermal.throttle_state = throttle_state;
ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return ret; return 0;
} }
static struct thermal_cooling_device_ops ath10k_thermal_ops = { static struct thermal_cooling_device_ops ath10k_thermal_ops = {
.get_max_state = ath10k_thermal_get_max_dutycycle, .get_max_state = ath10k_thermal_get_max_throttle_state,
.get_cur_state = ath10k_thermal_get_cur_dutycycle, .get_cur_state = ath10k_thermal_get_cur_throttle_state,
.set_cur_state = ath10k_thermal_set_cur_dutycycle, .set_cur_state = ath10k_thermal_set_cur_throttle_state,
}; };
static ssize_t ath10k_thermal_show_temp(struct device *dev, static ssize_t ath10k_thermal_show_temp(struct device *dev,
...@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, ...@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
{ {
struct ath10k *ar = dev_get_drvdata(dev); struct ath10k *ar = dev_get_drvdata(dev);
int ret, temperature; int ret, temperature;
unsigned long time_left;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
...@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, ...@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
goto out; goto out;
} }
ret = wait_for_completion_timeout(&ar->thermal.wmi_sync, time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
ATH10K_THERMAL_SYNC_TIMEOUT_HZ); ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
if (ret == 0) { if (!time_left) {
ath10k_warn(ar, "failed to synchronize thermal read\n"); ath10k_warn(ar, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
goto out; goto out;
...@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = { ...@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
}; };
ATTRIBUTE_GROUPS(ath10k_hwmon); ATTRIBUTE_GROUPS(ath10k_hwmon);
void ath10k_thermal_set_throttling(struct ath10k *ar)
{
u32 period, duration, enabled;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return;
if (ar->state != ATH10K_STATE_ON)
return;
period = ar->thermal.quiet_period;
duration = (period * ar->thermal.throttle_state) / 100;
enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
ATH10K_QUIET_START_OFFSET,
enabled);
if (ret) {
ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
period, duration, enabled, ret);
}
}
int ath10k_thermal_register(struct ath10k *ar) int ath10k_thermal_register(struct ath10k *ar)
{ {
struct thermal_cooling_device *cdev; struct thermal_cooling_device *cdev;
...@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj, ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
"cooling_device"); "cooling_device");
if (ret) { if (ret) {
ath10k_err(ar, "failed to create thermal symlink\n"); ath10k_err(ar, "failed to create cooling device symlink\n");
goto err_cooling_destroy; goto err_cooling_destroy;
} }
ar->thermal.cdev = cdev; ar->thermal.cdev = cdev;
ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
/* Do not register hwmon device when temperature reading is not /* Do not register hwmon device when temperature reading is not
* supported by firmware * supported by firmware
...@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0; return 0;
err_remove_link: err_remove_link:
sysfs_remove_link(&ar->dev->kobj, "thermal_sensor"); sysfs_remove_link(&ar->dev->kobj, "cooling_device");
err_cooling_destroy: err_cooling_destroy:
thermal_cooling_device_unregister(cdev); thermal_cooling_device_unregister(cdev);
return ret; return ret;
......
...@@ -19,16 +19,17 @@ ...@@ -19,16 +19,17 @@
#define ATH10K_QUIET_PERIOD_DEFAULT 100 #define ATH10K_QUIET_PERIOD_DEFAULT 100
#define ATH10K_QUIET_PERIOD_MIN 25 #define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10 #define ATH10K_QUIET_START_OFFSET 10
#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
#define ATH10K_HWMON_NAME_LEN 15 #define ATH10K_HWMON_NAME_LEN 15
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ) #define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal { struct ath10k_thermal {
struct thermal_cooling_device *cdev; struct thermal_cooling_device *cdev;
struct completion wmi_sync; struct completion wmi_sync;
/* protected by conf_mutex */ /* protected by conf_mutex */
u32 duty_cycle; u32 throttle_state;
u32 quiet_period;
/* temperature value in Celcius degree /* temperature value in Celcius degree
* protected by data_lock * protected by data_lock
*/ */
...@@ -39,6 +40,7 @@ struct ath10k_thermal { ...@@ -39,6 +40,7 @@ struct ath10k_thermal {
int ath10k_thermal_register(struct ath10k *ar); int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar); void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature); void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
void ath10k_thermal_set_throttling(struct ath10k *ar);
#else #else
static inline int ath10k_thermal_register(struct ath10k *ar) static inline int ath10k_thermal_register(struct ath10k *ar)
{ {
...@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar, ...@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
{ {
} }
static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
{
}
#endif #endif
#endif /* _THERMAL_ */ #endif /* _THERMAL_ */
...@@ -21,11 +21,16 @@ ...@@ -21,11 +21,16 @@
#include "core.h" #include "core.h"
#if !defined(_TRACE_H_) #if !defined(_TRACE_H_)
static inline u32 ath10k_frm_hdr_len(const void *buf) static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
{ {
const struct ieee80211_hdr *hdr = buf; const struct ieee80211_hdr *hdr = buf;
return ieee80211_hdrlen(hdr->frame_control); /* In some rare cases (e.g. fcs error) device reports frame buffer
* shorter than what frame header implies (e.g. len = 0). The buffer
* can still be accessed so do a simple min() to guarantee caller
* doesn't get value greater than len.
*/
return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
} }
#endif #endif
...@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {} ...@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k #define TRACE_SYSTEM ath10k
#define ATH10K_MSG_MAX 200 #define ATH10K_MSG_MAX 400
DECLARE_EVENT_CLASS(ath10k_log_event, DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf), TP_PROTO(struct ath10k *ar, struct va_format *vaf),
...@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event, ...@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
__string(device, dev_name(ar->dev)) __string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev)) __string(driver, dev_driver_string(ar->dev))
__field(size_t, len) __field(size_t, len)
__dynamic_array(u8, data, ath10k_frm_hdr_len(data)) __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
), ),
TP_fast_assign( TP_fast_assign(
__assign_str(device, dev_name(ar->dev)); __assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev));
__entry->len = ath10k_frm_hdr_len(data); __entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len); memcpy(__get_dynamic_array(data), data, __entry->len);
), ),
...@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event, ...@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
__string(device, dev_name(ar->dev)) __string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev)) __string(driver, dev_driver_string(ar->dev))
__field(size_t, len) __field(size_t, len)
__dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data))) __dynamic_array(u8, payload, (len -
ath10k_frm_hdr_len(data, len)))
), ),
TP_fast_assign( TP_fast_assign(
__assign_str(device, dev_name(ar->dev)); __assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len - ath10k_frm_hdr_len(data); __entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload), memcpy(__get_dynamic_array(payload),
data + ath10k_frm_hdr_len(data), __entry->len); data + ath10k_frm_hdr_len(data, len), __entry->len);
), ),
TP_printk( TP_printk(
......
...@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", ath10k_dbg(ar, ATH10K_DBG_HTT,
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
tx_done->msdu_id, !!tx_done->discard,
!!tx_done->no_ack, !!tx_done->success);
if (tx_done->msdu_id >= htt->max_num_pending_tx) { if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
...@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->no_ack) if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK; info->flags &= ~IEEE80211_TX_STAT_ACK;
if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status(htt->ar->hw, msdu); ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */ /* we do not own the msdu anymore */
......
...@@ -45,6 +45,10 @@ struct wmi_ops { ...@@ -45,6 +45,10 @@ struct wmi_ops {
struct wmi_rdy_ev_arg *arg); struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats); struct ath10k_fw_stats *stats);
int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
...@@ -81,7 +85,8 @@ struct wmi_ops { ...@@ -81,7 +85,8 @@ struct wmi_ops {
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg); const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]); const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]); const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
...@@ -148,6 +153,27 @@ struct wmi_ops { ...@@ -148,6 +153,27 @@ struct wmi_ops {
u32 num_ac); u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg); const struct wmi_sta_keepalive_arg *arg);
struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable);
struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id,
const u8 *pattern,
const u8 *mask,
int pattern_len,
int pattern_offset);
struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
u32 pattern_id);
struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
u32 vdev_id,
enum wmi_tdls_state state);
struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan);
struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
}; };
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
...@@ -273,6 +299,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, ...@@ -273,6 +299,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_fw_stats(ar, skb, stats); return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
} }
static inline int
ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_roam_ev_arg *arg)
{
if (!ar->wmi.ops->pull_roam_ev)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
}
static inline int
ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg)
{
if (!ar->wmi.ops->pull_wow_event)
return -EOPNOTSUPP;
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
static inline int static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{ {
...@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, ...@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
static inline int static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]) const u8 peer_addr[ETH_ALEN],
enum wmi_peer_type peer_type)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create) if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr); skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar, ...@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, cmd_id); return ath10k_wmi_cmd_send(ar, skb, cmd_id);
} }
static inline int
ath10k_wmi_wow_enable(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_enable)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_enable(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_wakeup_event)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_add_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
pattern, mask, pattern_len,
pattern_offset);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_del_pattern)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_update_fw_tdls_state)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
}
static inline int
ath10k_wmi_tdls_peer_update(struct ath10k *ar,
const struct wmi_tdls_peer_update_cmd_arg *arg,
const struct wmi_tdls_peer_capab_arg *cap,
const struct wmi_channel_arg *chan)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_tdls_peer_update)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->tdls_peer_update_cmdid);
}
static inline int
ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_adaptive_qcs)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
}
#endif #endif
This diff is collapsed.
...@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev { ...@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
__le32 num_chan_stats; __le32 num_chan_stats;
} __packed; } __packed;
struct wmi_tlv_p2p_noa_ev {
__le32 vdev_id;
} __packed;
struct wmi_tlv_roam_ev {
__le32 vdev_id;
__le32 reason;
__le32 rssi;
} __packed;
struct wmi_tlv_wow_add_del_event_cmd {
__le32 vdev_id;
__le32 is_add;
__le32 event_bitmap;
} __packed;
struct wmi_tlv_wow_enable_cmd {
__le32 enable;
} __packed;
struct wmi_tlv_wow_host_wakeup_ind {
__le32 reserved;
} __packed;
struct wmi_tlv_wow_event_info {
__le32 vdev_id;
__le32 flag;
__le32 wake_reason;
__le32 data_len;
} __packed;
enum wmi_tlv_pattern_type {
WOW_PATTERN_MIN = 0,
WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
WOW_IPV4_SYNC_PATTERN,
WOW_IPV6_SYNC_PATTERN,
WOW_WILD_CARD_PATTERN,
WOW_TIMER_PATTERN,
WOW_MAGIC_PATTERN,
WOW_IPV6_RA_PATTERN,
WOW_IOAC_PKT_PATTERN,
WOW_IOAC_TMR_PATTERN,
WOW_PATTERN_MAX
};
#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
#define WOW_DEFAULT_BITMASK_SIZE 148
struct wmi_tlv_wow_bitmap_pattern {
u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
__le32 pattern_offset;
__le32 pattern_len;
__le32 bitmask_len;
__le32 pattern_id;
} __packed;
struct wmi_tlv_wow_add_pattern_cmd {
__le32 vdev_id;
__le32 pattern_id;
__le32 pattern_type;
} __packed;
struct wmi_tlv_wow_del_pattern_cmd {
__le32 vdev_id;
__le32 pattern_id;
__le32 pattern_type;
} __packed;
/* TDLS Options */
enum wmi_tlv_tdls_options {
WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
};
struct wmi_tdls_set_state_cmd {
__le32 vdev_id;
__le32 state;
__le32 notification_interval_ms;
__le32 tx_discovery_threshold;
__le32 tx_teardown_threshold;
__le32 rssi_teardown_threshold;
__le32 rssi_delta;
__le32 tdls_options;
__le32 tdls_peer_traffic_ind_window;
__le32 tdls_peer_traffic_response_timeout_ms;
__le32 tdls_puapsd_mask;
__le32 tdls_puapsd_inactivity_time_ms;
__le32 tdls_puapsd_rx_frame_threshold;
} __packed;
struct wmi_tdls_peer_update_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
__le32 peer_state;
} __packed;
enum {
WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
};
#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
#define WMI_TLV_TDLS_PEER_SP_LSB 5
struct wmi_tdls_peer_capab {
__le32 peer_qos;
__le32 buff_sta_support;
__le32 off_chan_support;
__le32 peer_curr_operclass;
__le32 self_curr_operclass;
__le32 peer_chan_len;
__le32 peer_operclass_len;
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
} __packed;
struct wmi_tlv_adaptive_qcs {
__le32 enable;
} __packed;
/**
* wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
* Only peer_id is valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
* @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
* @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
* vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
* vdev_map is valid.
* @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
*/
enum wmi_tlv_tx_pause_id {
WMI_TLV_TX_PAUSE_ID_MCC = 1,
WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
WMI_TLV_TX_PAUSE_ID_HOST = 21,
};
enum wmi_tlv_tx_pause_action {
WMI_TLV_TX_PAUSE_ACTION_STOP,
WMI_TLV_TX_PAUSE_ACTION_WAKE,
};
struct wmi_tlv_tx_pause_ev {
__le32 pause_id;
__le32 action;
__le32 vdev_map;
__le32 peer_id;
__le32 tid_map;
} __packed;
void ath10k_wmi_tlv_attach(struct ath10k *ar); void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif #endif
This diff is collapsed.
...@@ -148,6 +148,8 @@ enum wmi_service { ...@@ -148,6 +148,8 @@ enum wmi_service {
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_SERVICE_MDNS_OFFLOAD, WMI_SERVICE_MDNS_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD, WMI_SERVICE_SAP_AUTH_OFFLOAD,
WMI_SERVICE_ATF,
WMI_SERVICE_COEX_GPIO,
/* keep last */ /* keep last */
WMI_SERVICE_MAX, WMI_SERVICE_MAX,
...@@ -177,6 +179,8 @@ enum wmi_10x_service { ...@@ -177,6 +179,8 @@ enum wmi_10x_service {
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG, WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_10X_SERVICE_ATF,
WMI_10X_SERVICE_COEX_GPIO,
}; };
enum wmi_main_service { enum wmi_main_service {
...@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id) ...@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT); SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD); SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD); SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
SVCSTR(WMI_SERVICE_ATF);
SVCSTR(WMI_SERVICE_COEX_GPIO);
default: default:
return NULL; return NULL;
} }
...@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, ...@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_FORCE_FW_HANG, len); WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
SVCMAP(WMI_10X_SERVICE_ATF,
WMI_SERVICE_ATF, len);
SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
WMI_SERVICE_COEX_GPIO, len);
} }
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
...@@ -552,6 +562,9 @@ struct wmi_cmd_map { ...@@ -552,6 +562,9 @@ struct wmi_cmd_map {
u32 gpio_output_cmdid; u32 gpio_output_cmdid;
u32 pdev_get_temperature_cmdid; u32 pdev_get_temperature_cmdid;
u32 vdev_set_wmm_params_cmdid; u32 vdev_set_wmm_params_cmdid;
u32 tdls_set_state_cmdid;
u32 tdls_peer_update_cmdid;
u32 adaptive_qcs_cmdid;
}; };
/* /*
...@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x { ...@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
enum wmi_10_2_feature_mask { enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0), WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1), WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
}; };
struct wmi_resource_config_10_2 { struct wmi_resource_config_10_2 {
...@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg { ...@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
u32 max_scan_time; u32 max_scan_time;
u32 probe_delay; u32 probe_delay;
u32 scan_ctrl_flags; u32 scan_ctrl_flags;
u32 burst_duration_ms;
u32 ie_len; u32 ie_len;
u32 n_channels; u32 n_channels;
...@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd { ...@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
struct wmi_mac_addr peer_macaddr; struct wmi_mac_addr peer_macaddr;
} __packed; } __packed;
enum wmi_peer_type {
WMI_PEER_TYPE_DEFAULT = 0,
WMI_PEER_TYPE_BSS = 1,
WMI_PEER_TYPE_TDLS = 2,
};
struct wmi_peer_delete_cmd { struct wmi_peer_delete_cmd {
__le32 vdev_id; __le32 vdev_id;
struct wmi_mac_addr peer_macaddr; struct wmi_mac_addr peer_macaddr;
...@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event { ...@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
} __packed; } __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0) #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
/* FIXME: empirically extrapolated */
#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
/* Beacon filter wmi command info */ /* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256 #define BCN_FLT_MAX_SUPPORTED_IES 256
...@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd { ...@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid; __le32 config_valid;
} __packed; } __packed;
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
WMI_ROAM_REASON_LOW_RSSI = 3,
WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
WMI_ROAM_REASON_HO_FAILED = 5,
/* keep last */
WMI_ROAM_REASON_MAX,
};
struct wmi_roam_ev {
__le32 vdev_id;
__le32 reason;
} __packed;
#define ATH10K_FRAGMT_THRESHOLD_MIN 540 #define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346 #define ATH10K_FRAGMT_THRESHOLD_MAX 2346
...@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg { ...@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
const u8 *mac_addr; const u8 *mac_addr;
}; };
struct wmi_roam_ev_arg {
__le32 vdev_id;
__le32 reason;
__le32 rssi;
};
struct wmi_pdev_temperature_event { struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */ /* temperature value in Celcius degree */
__le32 temperature; __le32 temperature;
} __packed; } __packed;
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
WOW_BETTER_AP_EVENT,
WOW_DEAUTH_RECVD_EVENT,
WOW_MAGIC_PKT_RECVD_EVENT,
WOW_GTK_ERR_EVENT,
WOW_FOURWAY_HSHAKE_EVENT,
WOW_EAPOL_RECVD_EVENT,
WOW_NLO_DETECTED_EVENT,
WOW_DISASSOC_RECVD_EVENT,
WOW_PATTERN_MATCH_EVENT,
WOW_CSA_IE_EVENT,
WOW_PROBE_REQ_WPS_IE_EVENT,
WOW_AUTH_REQ_EVENT,
WOW_ASSOC_REQ_EVENT,
WOW_HTT_EVENT,
WOW_RA_MATCH_EVENT,
WOW_HOST_AUTO_SHUTDOWN_EVENT,
WOW_IOAC_MAGIC_EVENT,
WOW_IOAC_SHORT_EVENT,
WOW_IOAC_EXTEND_EVENT,
WOW_IOAC_TIMER_EVENT,
WOW_DFS_PHYERR_RADAR_EVENT,
WOW_BEACON_EVENT,
WOW_CLIENT_KICKOUT_EVENT,
WOW_EVENT_MAX,
};
#define C2S(x) case x: return #x
static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
{
switch (ev) {
C2S(WOW_BMISS_EVENT);
C2S(WOW_BETTER_AP_EVENT);
C2S(WOW_DEAUTH_RECVD_EVENT);
C2S(WOW_MAGIC_PKT_RECVD_EVENT);
C2S(WOW_GTK_ERR_EVENT);
C2S(WOW_FOURWAY_HSHAKE_EVENT);
C2S(WOW_EAPOL_RECVD_EVENT);
C2S(WOW_NLO_DETECTED_EVENT);
C2S(WOW_DISASSOC_RECVD_EVENT);
C2S(WOW_PATTERN_MATCH_EVENT);
C2S(WOW_CSA_IE_EVENT);
C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
C2S(WOW_AUTH_REQ_EVENT);
C2S(WOW_ASSOC_REQ_EVENT);
C2S(WOW_HTT_EVENT);
C2S(WOW_RA_MATCH_EVENT);
C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
C2S(WOW_IOAC_MAGIC_EVENT);
C2S(WOW_IOAC_SHORT_EVENT);
C2S(WOW_IOAC_EXTEND_EVENT);
C2S(WOW_IOAC_TIMER_EVENT);
C2S(WOW_DFS_PHYERR_RADAR_EVENT);
C2S(WOW_BEACON_EVENT);
C2S(WOW_CLIENT_KICKOUT_EVENT);
C2S(WOW_EVENT_MAX);
default:
return NULL;
}
}
enum wmi_wow_wake_reason {
WOW_REASON_UNSPECIFIED = -1,
WOW_REASON_NLOD = 0,
WOW_REASON_AP_ASSOC_LOST,
WOW_REASON_LOW_RSSI,
WOW_REASON_DEAUTH_RECVD,
WOW_REASON_DISASSOC_RECVD,
WOW_REASON_GTK_HS_ERR,
WOW_REASON_EAP_REQ,
WOW_REASON_FOURWAY_HS_RECV,
WOW_REASON_TIMER_INTR_RECV,
WOW_REASON_PATTERN_MATCH_FOUND,
WOW_REASON_RECV_MAGIC_PATTERN,
WOW_REASON_P2P_DISC,
WOW_REASON_WLAN_HB,
WOW_REASON_CSA_EVENT,
WOW_REASON_PROBE_REQ_WPS_IE_RECV,
WOW_REASON_AUTH_REQ_RECV,
WOW_REASON_ASSOC_REQ_RECV,
WOW_REASON_HTT_EVENT,
WOW_REASON_RA_MATCH,
WOW_REASON_HOST_AUTO_SHUTDOWN,
WOW_REASON_IOAC_MAGIC_EVENT,
WOW_REASON_IOAC_SHORT_EVENT,
WOW_REASON_IOAC_EXTEND_EVENT,
WOW_REASON_IOAC_TIMER_EVENT,
WOW_REASON_ROAM_HO,
WOW_REASON_DFS_PHYERR_RADADR_EVENT,
WOW_REASON_BEACON_RECV,
WOW_REASON_CLIENT_KICKOUT_EVENT,
WOW_REASON_DEBUG_TEST = 0xFF,
};
static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
{
switch (reason) {
C2S(WOW_REASON_UNSPECIFIED);
C2S(WOW_REASON_NLOD);
C2S(WOW_REASON_AP_ASSOC_LOST);
C2S(WOW_REASON_LOW_RSSI);
C2S(WOW_REASON_DEAUTH_RECVD);
C2S(WOW_REASON_DISASSOC_RECVD);
C2S(WOW_REASON_GTK_HS_ERR);
C2S(WOW_REASON_EAP_REQ);
C2S(WOW_REASON_FOURWAY_HS_RECV);
C2S(WOW_REASON_TIMER_INTR_RECV);
C2S(WOW_REASON_PATTERN_MATCH_FOUND);
C2S(WOW_REASON_RECV_MAGIC_PATTERN);
C2S(WOW_REASON_P2P_DISC);
C2S(WOW_REASON_WLAN_HB);
C2S(WOW_REASON_CSA_EVENT);
C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
C2S(WOW_REASON_AUTH_REQ_RECV);
C2S(WOW_REASON_ASSOC_REQ_RECV);
C2S(WOW_REASON_HTT_EVENT);
C2S(WOW_REASON_RA_MATCH);
C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
C2S(WOW_REASON_IOAC_MAGIC_EVENT);
C2S(WOW_REASON_IOAC_SHORT_EVENT);
C2S(WOW_REASON_IOAC_EXTEND_EVENT);
C2S(WOW_REASON_IOAC_TIMER_EVENT);
C2S(WOW_REASON_ROAM_HO);
C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
C2S(WOW_REASON_BEACON_RECV);
C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
C2S(WOW_REASON_DEBUG_TEST);
default:
return NULL;
}
}
#undef C2S
struct wmi_wow_ev_arg {
u32 vdev_id;
u32 flag;
enum wmi_wow_wake_reason wake_reason;
u32 data_len;
};
#define WOW_MIN_PATTERN_SIZE 1
#define WOW_MAX_PATTERN_SIZE 148
#define WOW_MAX_PKT_OFFSET 128
enum wmi_tdls_state {
WMI_TDLS_DISABLE,
WMI_TDLS_ENABLE_PASSIVE,
WMI_TDLS_ENABLE_ACTIVE,
};
enum wmi_tdls_peer_state {
WMI_TDLS_PEER_STATE_PEERING,
WMI_TDLS_PEER_STATE_CONNECTED,
WMI_TDLS_PEER_STATE_TEARDOWN,
};
struct wmi_tdls_peer_update_cmd_arg {
u32 vdev_id;
enum wmi_tdls_peer_state peer_state;
u8 addr[ETH_ALEN];
};
#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
struct wmi_tdls_peer_capab_arg {
u8 peer_uapsd_queues;
u8 peer_max_sp;
u32 buff_sta_support;
u32 off_chan_support;
u32 peer_curr_operclass;
u32 self_curr_operclass;
u32 peer_chan_len;
u32 peer_operclass_len;
u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
u32 is_peer_responder;
u32 pref_offchan_num;
u32 pref_offchan_bw;
};
struct ath10k; struct ath10k;
struct ath10k_vif; struct ath10k_vif;
struct ath10k_fw_stats_pdev; struct ath10k_fw_stats_pdev;
......
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mac.h"
#include <net/mac80211.h>
#include "hif.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
#include "wmi-ops.h"
static const struct wiphy_wowlan_support ath10k_wowlan_support = {
.flags = WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_MAGIC_PKT,
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
int i, ret;
for (i = 0; i < WOW_EVENT_MAX; i++) {
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
if (ret) {
ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
for (i = 0; i < ar->wow.max_num_patterns; i++) {
ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
if (ret) {
ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
i, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_cleanup(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_wow_vif_cleanup(arvif);
if (ret) {
ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
int ret, i;
unsigned long wow_mask = 0;
struct ath10k *ar = arvif->ar;
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int pattern_id = 0;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
/* fall through */
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
__set_bit(WOW_HTT_EVENT, &wow_mask);
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
break;
case WMI_VDEV_TYPE_STA:
if (wowlan->disconnect) {
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_BMISS_EVENT, &wow_mask);
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
}
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
break;
default:
break;
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
int j;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
patterns[i].pattern,
bitmask,
patterns[i].pattern_len,
patterns[i].pkt_offset);
if (ret) {
ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
arvif->vdev_id, ret);
return ret;
}
pattern_id++;
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
}
for (i = 0; i < WOW_EVENT_MAX; i++) {
if (!test_bit(i, &wow_mask))
continue;
ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
if (ret) {
ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_set_wakeups(struct ath10k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath10k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_enable(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->target_suspend);
ret = ath10k_wmi_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "timed out while waiting for suspend completion\n");
return -ETIMEDOUT;
}
return 0;
}
static int ath10k_wow_wakeup(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->wow.wakeup_completed);
ret = ath10k_wmi_wow_host_wakeup_ind(ar);
if (ret) {
ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
return -ETIMEDOUT;
}
return 0;
}
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct ath10k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
ar->fw_features))) {
ret = 1;
goto exit;
}
ret = ath10k_wow_cleanup(ar);
if (ret) {
ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
ret);
goto exit;
}
ret = ath10k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
ret);
goto cleanup;
}
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);
goto cleanup;
}
ret = ath10k_hif_suspend(ar);
if (ret) {
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
goto wakeup;
}
goto exit;
wakeup:
ath10k_wow_wakeup(ar);
cleanup:
ath10k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
int ath10k_wow_op_resume(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
ar->fw_features))) {
ret = 1;
goto exit;
}
ret = ath10k_hif_resume(ar);
if (ret) {
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
goto exit;
}
ret = ath10k_wow_wakeup(ar);
if (ret)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
int ath10k_wow_init(struct ath10k *ar)
{
if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
return 0;
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
return -EINVAL;
ar->wow.wowlan_support = ath10k_wowlan_support;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
return 0;
}
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _WOW_H_
#define _WOW_H_
struct ath10k_wow {
u32 max_num_patterns;
struct completion wakeup_completed;
struct wiphy_wowlan_support wowlan_support;
};
#ifdef CONFIG_PM
int ath10k_wow_init(struct ath10k *ar);
int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath10k_wow_op_resume(struct ieee80211_hw *hw);
#else
static inline int ath10k_wow_init(struct ath10k *ar)
{
return 0;
}
#endif /* CONFIG_PM */
#endif /* _WOW_H_ */
...@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet { ...@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
} __packed; } __packed;
#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet)) #define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
#define SPECTRAL_HT20_SAMPLE_LEN (sizeof(struct ath_ht20_mag_info) +\
SPECTRAL_HT20_NUM_BINS)
/* Dynamic 20/40 mode: /* Dynamic 20/40 mode:
* *
...@@ -101,6 +103,10 @@ struct ath_spec_scan_priv { ...@@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
}; };
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet)) #define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
#define SPECTRAL_HT20_40_SAMPLE_LEN (sizeof(struct ath_ht20_40_mag_info) +\
SPECTRAL_HT20_40_NUM_BINS)
#define SPECTRAL_SAMPLE_MAX_LEN SPECTRAL_HT20_40_SAMPLE_LEN
/* grabs the max magnitude from the all/upper/lower bins */ /* grabs the max magnitude from the all/upper/lower bins */
static inline u16 spectral_max_magnitude(u8 *bins) static inline u16 spectral_max_magnitude(u8 *bins)
...@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins) ...@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
} }
/* return the max magnitude from the all/upper/lower bins */ /* return the max magnitude from the all/upper/lower bins */
static inline u8 spectral_max_index(u8 *bins) static inline u8 spectral_max_index(u8 *bins, int num_bins)
{ {
s8 m = (bins[2] & 0xfc) >> 2; s8 m = (bins[2] & 0xfc) >> 2;
u8 zero_idx = num_bins / 2;
/* TODO: this still doesn't always report the right values ... */ /* It's a 5 bit signed int, remove its sign and use one's
if (m > 32) * complement interpretation to add the sign back to the 8
* bit int
*/
if (m & 0x20) {
m &= ~0x20;
m |= 0xe0; m |= 0xe0;
else }
m &= ~0xe0;
/* Bring the zero point to the beginning
* instead of the middle so that we can use
* it for array lookup and that we don't deal
* with negative values later
*/
m += zero_idx;
/* Sanity check to make sure index is within bounds */
if (m < 0 || m > num_bins - 1)
m = 0;
return m + 29; return m;
} }
/* return the bitmap weight from the all/upper/lower bins */ /* return the bitmap weight from the all/upper/lower bins */
......
...@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv) ...@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
priv->spec_priv.ah = priv->ah; priv->spec_priv.ah = priv->ah;
priv->spec_priv.spec_config.enabled = 0; priv->spec_priv.spec_config.enabled = 0;
priv->spec_priv.spec_config.short_repeat = false; priv->spec_priv.spec_config.short_repeat = true;
priv->spec_priv.spec_config.count = 8; priv->spec_priv.spec_config.count = 8;
priv->spec_priv.spec_config.endless = false; priv->spec_priv.spec_config.endless = false;
priv->spec_priv.spec_config.period = 0x12; priv->spec_priv.spec_config.period = 0x12;
......
...@@ -41,30 +41,31 @@ struct radar_types { ...@@ -41,30 +41,31 @@ struct radar_types {
/* percentage on ppb threshold to trigger detection */ /* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50 #define MIN_PPB_THRESH 50
#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100) #define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF) #define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */ /* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5 #define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100) #define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100) #define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \ #define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \ { \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \ (PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \ (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, \ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
} }
/* radar types as defined by ETSI EN-301-893 v1.5.1 */ /* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = { static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18), ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10), ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15), ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25), ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20), ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10), ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15), ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
}; };
static const struct radar_types etsi_radar_types_v15 = { static const struct radar_types etsi_radar_types_v15 = {
...@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = { ...@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15, .radar_types = etsi_radar_ref_types_v15,
}; };
#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \ #define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \ { \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \ PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH(PPB), PRI_TOLERANCE, \ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
} }
/* radar types released on August 14, 2014
* type 1 PRI values randomly selected within the range of 518 and 3066.
* divide it to 3 groups is good enough for both of radar detection and
* avoiding false detection based on practical test results
* collected for more than a year.
*/
static const struct radar_detector_specs fcc_radar_ref_types[] = { static const struct radar_detector_specs fcc_radar_ref_types[] = {
FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18), FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
FCC_PATTERN(1, 0, 5, 150, 230, 1, 23), FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
FCC_PATTERN(2, 6, 10, 200, 500, 1, 16), FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
FCC_PATTERN(3, 11, 20, 200, 500, 1, 12), FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1), FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
FCC_PATTERN(5, 0, 1, 333, 333, 1, 9), FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
}; };
static const struct radar_types fcc_radar_types = { static const struct radar_types fcc_radar_types = {
...@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = { ...@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
.radar_types = fcc_radar_ref_types, .radar_types = fcc_radar_ref_types,
}; };
#define JP_PATTERN FCC_PATTERN #define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
}
static const struct radar_detector_specs jp_radar_ref_types[] = { static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18), JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18), JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18), JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18), JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23), JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16), JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12), JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20), JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9), JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
}; };
static const struct radar_types jp_radar_types = { static const struct radar_types jp_radar_types = {
......
...@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats { ...@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
* @freq: channel frequency in MHz * @freq: channel frequency in MHz
* @width: pulse duration in us * @width: pulse duration in us
* @rssi: rssi of radar event * @rssi: rssi of radar event
* @chirp: chirp detected in pulse
*/ */
struct pulse_event { struct pulse_event {
u64 ts; u64 ts;
u16 freq; u16 freq;
u8 width; u8 width;
u8 rssi; u8 rssi;
bool chirp;
}; };
/** /**
...@@ -59,6 +61,7 @@ struct pulse_event { ...@@ -59,6 +61,7 @@ struct pulse_event {
* @ppb: pulses per bursts for this type * @ppb: pulses per bursts for this type
* @ppb_thresh: number of pulses required to trigger detection * @ppb_thresh: number of pulses required to trigger detection
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us] * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
* @chirp: chirp required for the radar pattern
*/ */
struct radar_detector_specs { struct radar_detector_specs {
u8 type_id; u8 type_id;
...@@ -70,6 +73,7 @@ struct radar_detector_specs { ...@@ -70,6 +73,7 @@ struct radar_detector_specs {
u8 ppb; u8 ppb;
u8 ppb_thresh; u8 ppb_thresh;
u8 max_pri_tolerance; u8 max_pri_tolerance;
bool chirp;
}; };
/** /**
......
...@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de, ...@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if ((ts - de->last_ts) < rs->max_pri_tolerance) if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */ /* if delta to last pulse is too short, don't use this pulse */
return NULL; return NULL;
/* radar detector spec needs chirp, but not detected */
if (rs->chirp && rs->chirp != event->chirp)
return NULL;
de->last_ts = ts; de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts); max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
......
...@@ -1011,6 +1011,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) ...@@ -1011,6 +1011,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
return 0; return 0;
} }
static void brcmf_sdiod_host_fixup(struct mmc_host *host)
{
/* runtime-pm powers off the device */
pm_runtime_forbid(host->parent);
/* avoid removal detection upon resume */
host->caps |= MMC_CAP_NONREMOVABLE;
}
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{ {
struct sdio_func *func; struct sdio_func *func;
...@@ -1076,7 +1084,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) ...@@ -1076,7 +1084,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
pm_runtime_forbid(host->parent); brcmf_sdiod_host_fixup(host);
out: out:
if (ret) if (ret)
brcmf_sdiod_remove(sdiodev); brcmf_sdiod_remove(sdiodev);
...@@ -1246,15 +1254,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev) ...@@ -1246,15 +1254,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
brcmf_sdiod_freezer_on(sdiodev); brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0); brcmf_sdio_wd_timer(sdiodev->bus, 0);
if (sdiodev->wowl_enabled) {
sdio_flags = MMC_PM_KEEP_POWER; sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->wowl_enabled) {
if (sdiodev->pdata->oob_irq_supported) if (sdiodev->pdata->oob_irq_supported)
enable_irq_wake(sdiodev->pdata->oob_irq_nr); enable_irq_wake(sdiodev->pdata->oob_irq_nr);
else else
sdio_flags = MMC_PM_WAKE_SDIO_IRQ; sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags)) if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags); brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
}
return 0; return 0;
} }
......
...@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = { ...@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
RATETAB_ENT(BRCM_RATE_54M, 0), RATETAB_ENT(BRCM_RATE_54M, 0),
}; };
#define wl_a_rates (__wl_rates + 4)
#define wl_a_rates_size 8
#define wl_g_rates (__wl_rates + 0) #define wl_g_rates (__wl_rates + 0)
#define wl_g_rates_size 12 #define wl_g_rates_size ARRAY_SIZE(__wl_rates)
#define wl_a_rates (__wl_rates + 4)
#define wl_a_rates_size (wl_g_rates_size - 4)
#define CHAN2G(_channel, _freq) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_channel __wl_2ghz_channels[] = {
CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
CHAN2G(13, 2472), CHAN2G(14, 2484)
};
static struct ieee80211_channel __wl_5ghz_channels[] = {
CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
};
/* Band templates duplicated per wiphy. The channel info /* Band templates duplicated per wiphy. The channel info
* is filled in after querying the device. * above is added to the band during setup.
*/ */
static const struct ieee80211_supported_band __wl_band_2ghz = { static const struct ieee80211_supported_band __wl_band_2ghz = {
.band = IEEE80211_BAND_2GHZ, .band = IEEE80211_BAND_2GHZ,
...@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = { ...@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
.n_bitrates = wl_g_rates_size, .n_bitrates = wl_g_rates_size,
}; };
static const struct ieee80211_supported_band __wl_band_5ghz_a = { static const struct ieee80211_supported_band __wl_band_5ghz = {
.band = IEEE80211_BAND_5GHZ, .band = IEEE80211_BAND_5GHZ,
.bitrates = wl_a_rates, .bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size, .n_bitrates = wl_a_rates_size,
...@@ -5253,40 +5287,6 @@ brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time, ...@@ -5253,40 +5287,6 @@ brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time,
return err; return err;
} }
/* Filter the list of channels received from firmware counting only
* the 20MHz channels. The wiphy band data only needs those which get
* flagged to indicate if they can take part in higher bandwidth.
*/
static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
struct brcmf_chanspec_list *chlist,
u32 chcnt[])
{
u32 total = le32_to_cpu(chlist->count);
struct brcmu_chan ch;
int i;
for (i = 0; i < total; i++) {
ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
cfg->d11inf.decchspec(&ch);
/* Firmware gives a ordered list. We skip non-20MHz
* channels is 2G. For 5G we can abort upon reaching
* a non-20MHz channel in the list.
*/
if (ch.bw != BRCMU_CHAN_BW_20) {
if (ch.band == BRCMU_CHAN_BAND_5G)
break;
else
continue;
}
if (ch.band == BRCMU_CHAN_BAND_2G)
chcnt[0] += 1;
else if (ch.band == BRCMU_CHAN_BAND_5G)
chcnt[1] += 1;
}
}
static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel, static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
struct brcmu_chan *ch) struct brcmu_chan *ch)
{ {
...@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ...@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j; u32 i, j;
u32 total; u32 total;
u32 chaninfo; u32 chaninfo;
u32 chcnt[2] = { 0, 0 };
u32 index; u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
...@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ...@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
goto fail_pbuf; goto fail_pbuf;
} }
brcmf_count_20mhz_channels(cfg, list, chcnt);
wiphy = cfg_to_wiphy(cfg); wiphy = cfg_to_wiphy(cfg);
if (chcnt[0]) { band = wiphy->bands[IEEE80211_BAND_2GHZ];
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz), if (band)
GFP_KERNEL); for (i = 0; i < band->n_channels; i++)
if (band == NULL) { band->channels[i].flags = IEEE80211_CHAN_DISABLED;
err = -ENOMEM; band = wiphy->bands[IEEE80211_BAND_5GHZ];
goto fail_pbuf; if (band)
} for (i = 0; i < band->n_channels; i++)
band->channels = kcalloc(chcnt[0], sizeof(*channel), band->channels[i].flags = IEEE80211_CHAN_DISABLED;
GFP_KERNEL);
if (band->channels == NULL) {
kfree(band);
err = -ENOMEM;
goto fail_pbuf;
}
band->n_channels = 0;
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
if (chcnt[1]) {
band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
GFP_KERNEL);
if (band == NULL) {
err = -ENOMEM;
goto fail_band2g;
}
band->channels = kcalloc(chcnt[1], sizeof(*channel),
GFP_KERNEL);
if (band->channels == NULL) {
kfree(band);
err = -ENOMEM;
goto fail_band2g;
}
band->n_channels = 0;
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
}
total = le32_to_cpu(list->count); total = le32_to_cpu(list->count);
for (i = 0; i < total; i++) { for (i = 0; i < total; i++) {
...@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ...@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec); brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue; continue;
} }
if (!band)
continue;
if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) && if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40) ch.bw == BRCMU_CHAN_BW_40)
continue; continue;
...@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ...@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
} else if (ch.bw == BRCMU_CHAN_BW_40) { } else if (ch.bw == BRCMU_CHAN_BW_40) {
brcmf_update_bw40_channel_flag(&channel[index], &ch); brcmf_update_bw40_channel_flag(&channel[index], &ch);
} else { } else {
/* disable other bandwidths for now as mentioned /* enable the channel and disable other bandwidths
* order assure they are enabled for subsequent * for now as mentioned order assure they are enabled
* chanspecs. * for subsequent chanspecs.
*/ */
channel[index].flags = IEEE80211_CHAN_NO_HT40 | channel[index].flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ; IEEE80211_CHAN_NO_80MHZ;
...@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ...@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_IR; IEEE80211_CHAN_NO_IR;
} }
} }
if (index == band->n_channels)
band->n_channels++;
} }
kfree(pbuf);
return 0;
fail_band2g:
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
fail_pbuf: fail_pbuf:
kfree(pbuf); kfree(pbuf);
return err; return err;
...@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy) ...@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{ {
struct ieee80211_supported_band *band;
struct ieee80211_iface_combination ifc_combo; struct ieee80211_iface_combination ifc_combo;
__le32 bandlist[3];
u32 n_bands;
int err, i;
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
...@@ -5812,6 +5783,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) ...@@ -5812,6 +5783,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000; wiphy->max_remain_on_channel_duration = 5000;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
brcmf_wiphy_pno_params(wiphy); brcmf_wiphy_pno_params(wiphy);
/* vendor commands/events support */ /* vendor commands/events support */
...@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) ...@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
brcmf_wiphy_wowl_params(wiphy); brcmf_wiphy_wowl_params(wiphy);
return brcmf_setup_wiphybands(wiphy); err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
sizeof(bandlist));
if (err) {
brcmf_err("could not obtain band info: err=%d\n", err);
return err;
}
/* first entry in bandlist is number of bands */
n_bands = le32_to_cpu(bandlist[0]);
for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
GFP_KERNEL);
if (!band)
return -ENOMEM;
band->channels = kmemdup(&__wl_2ghz_channels,
sizeof(__wl_2ghz_channels),
GFP_KERNEL);
if (!band->channels) {
kfree(band);
return -ENOMEM;
}
band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
GFP_KERNEL);
if (!band)
return -ENOMEM;
band->channels = kmemdup(&__wl_5ghz_channels,
sizeof(__wl_5ghz_channels),
GFP_KERNEL);
if (!band->channels) {
kfree(band);
return -ENOMEM;
}
band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
wiphy->bands[IEEE80211_BAND_5GHZ] = band;
}
}
err = brcmf_setup_wiphybands(wiphy);
return err;
} }
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
...@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, ...@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
memset(&ccreq, 0, sizeof(ccreq)); memset(&ccreq, 0, sizeof(ccreq));
ccreq.rev = cpu_to_le32(-1); ccreq.rev = cpu_to_le32(-1);
memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2)); memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq)); if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
brcmf_err("firmware rejected country setting\n");
return;
}
brcmf_setup_wiphybands(wiphy);
} }
static void brcmf_free_wiphy(struct wiphy *wiphy) static void brcmf_free_wiphy(struct wiphy *wiphy)
{ {
if (!wiphy)
return;
kfree(wiphy->iface_combinations); kfree(wiphy->iface_combinations);
if (wiphy->bands[IEEE80211_BAND_2GHZ]) { if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels); kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment