Commit f95cd524 authored by Kalle Valo's avatar Kalle Valo

Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.20. Major changes:

ath10k

* support NET_DETECT WoWLAN feature

* wcn3990 basic functionality now working after we got QMI support
parents 6bfa6975 ba94c753
...@@ -56,6 +56,11 @@ Optional properties: ...@@ -56,6 +56,11 @@ Optional properties:
the length can vary between hw versions. the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node - <supply-name>-supply: handle to the regulator device tree node
optional "supply-name" is "vdd-0.8-cx-mx". optional "supply-name" is "vdd-0.8-cx-mx".
- memory-region:
Usage: optional
Value type: <phandle>
Definition: reference to the reserved-memory for the msa region
used by the wifi firmware running in Q6.
Example (to supply the calibration data alone): Example (to supply the calibration data alone):
...@@ -149,4 +154,5 @@ wifi@18000000 { ...@@ -149,4 +154,5 @@ wifi@18000000 {
<0 140 0 /* CE10 */ >, <0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >; <0 141 0 /* CE11 */ >;
vdd-0.8-cx-mx-supply = <&pm8998_l5>; vdd-0.8-cx-mx-supply = <&pm8998_l5>;
memory-region = <&wifi_msa_mem>;
}; };
...@@ -44,6 +44,7 @@ config ATH10K_SNOC ...@@ -44,6 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
depends on ATH10K depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
---help--- ---help---
This module adds support for integrated WCN3990 chip connected This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not to system NOC(SNOC). Currently work in progress and will not
......
...@@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o ...@@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o ath10k_usb-y += usb.o
obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
ath10k_snoc-y += snoc.o ath10k_snoc-y += qmi.o \
qmi_wlfw_v01.o \
snoc.o
# for tracing framework to find trace.h # for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
...@@ -989,7 +989,7 @@ static int ath10k_download_fw(struct ath10k *ar) ...@@ -989,7 +989,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data, data_len); data, data_len);
} }
static void ath10k_core_free_board_files(struct ath10k *ar) void ath10k_core_free_board_files(struct ath10k *ar)
{ {
if (!IS_ERR(ar->normal_mode_fw.board)) if (!IS_ERR(ar->normal_mode_fw.board))
release_firmware(ar->normal_mode_fw.board); release_firmware(ar->normal_mode_fw.board);
...@@ -1004,6 +1004,7 @@ static void ath10k_core_free_board_files(struct ath10k *ar) ...@@ -1004,6 +1004,7 @@ static void ath10k_core_free_board_files(struct ath10k *ar)
ar->normal_mode_fw.ext_board_data = NULL; ar->normal_mode_fw.ext_board_data = NULL;
ar->normal_mode_fw.ext_board_len = 0; ar->normal_mode_fw.ext_board_len = 0;
} }
EXPORT_SYMBOL(ath10k_core_free_board_files);
static void ath10k_core_free_firmware_files(struct ath10k *ar) static void ath10k_core_free_firmware_files(struct ath10k *ar)
{ {
...@@ -1331,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, ...@@ -1331,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
goto out; goto out;
} }
if (ar->id.qmi_ids_valid) {
scnprintf(name, name_len,
"bus=%s,qmi-board-id=%x",
ath10k_bus_str(ar->hif.bus),
ar->id.qmi_board_id);
goto out;
}
scnprintf(name, name_len, scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus), ath10k_bus_str(ar->hif.bus),
...@@ -1359,7 +1368,7 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name, ...@@ -1359,7 +1368,7 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
return -1; return -1;
} }
static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
{ {
char boardname[100], fallback_boardname[100]; char boardname[100], fallback_boardname[100];
int ret; int ret;
...@@ -1407,6 +1416,7 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) ...@@ -1407,6 +1416,7 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
return 0; return 0;
} }
EXPORT_SYMBOL(ath10k_core_fetch_board_file);
static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
{ {
......
...@@ -951,6 +951,7 @@ struct ath10k { ...@@ -951,6 +951,7 @@ struct ath10k {
/* protected by conf_mutex */ /* protected by conf_mutex */
u8 ps_state_enable; u8 ps_state_enable;
bool nlo_enabled;
bool p2p; bool p2p;
struct { struct {
...@@ -988,6 +989,8 @@ struct ath10k { ...@@ -988,6 +989,8 @@ struct ath10k {
u32 subsystem_device; u32 subsystem_device;
bool bmi_ids_valid; bool bmi_ids_valid;
bool qmi_ids_valid;
u32 qmi_board_id;
u8 bmi_board_id; u8 bmi_board_id;
u8 bmi_eboard_id; u8 bmi_eboard_id;
u8 bmi_chip_id; u8 bmi_chip_id;
...@@ -1215,5 +1218,7 @@ void ath10k_core_stop(struct ath10k *ar); ...@@ -1215,5 +1218,7 @@ void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params); const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar); void ath10k_core_unregister(struct ath10k *ar);
int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */ #endif /* _CORE_H_ */
...@@ -2421,7 +2421,7 @@ static ssize_t ath10k_write_ps_state_enable(struct file *file, ...@@ -2421,7 +2421,7 @@ static ssize_t ath10k_write_ps_state_enable(struct file *file,
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL; return -EINVAL;
if (ps_state_enable > 1 || ps_state_enable < 0) if (ps_state_enable > 1)
return -EINVAL; return -EINVAL;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
......
...@@ -44,6 +44,7 @@ enum ath10k_debug_mask { ...@@ -44,6 +44,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB = 0x00040000, ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000, ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000, ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_QMI = 0x00200000,
ATH10K_DBG_ANY = 0xffffffff, ATH10K_DBG_ANY = 0xffffffff,
}; };
......
...@@ -2680,8 +2680,6 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, ...@@ -2680,8 +2680,6 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else { } else {
mcs = legacy_rate_idx; mcs = legacy_rate_idx;
if (mcs < 0)
return;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
...@@ -2753,7 +2751,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, ...@@ -2753,7 +2751,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats) struct ath10k_per_peer_tx_stats *peer_stats)
{ {
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, rate_idx = 0, sgi; u8 rate = 0, sgi;
s8 rate_idx = 0;
struct rate_info txrate; struct rate_info txrate;
lockdep_assert_held(&ar->data_lock); lockdep_assert_held(&ar->data_lock);
......
...@@ -164,7 +164,7 @@ static int ath10k_mac_get_rate_hw_value(int bitrate) ...@@ -164,7 +164,7 @@ static int ath10k_mac_get_rate_hw_value(int bitrate)
if (ath10k_mac_bitrate_is_cck(bitrate)) if (ath10k_mac_bitrate_is_cck(bitrate))
hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
for (i = 0; i < sizeof(ath10k_rates); i++) { for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
if (ath10k_rates[i].bitrate == bitrate) if (ath10k_rates[i].bitrate == bitrate)
return hw_value_prefix | ath10k_rates[i].hw_value; return hw_value_prefix | ath10k_rates[i].hw_value;
} }
...@@ -4697,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw) ...@@ -4697,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop; goto err_core_stop;
} }
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
goto err_core_stop;
}
}
if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
ret = ath10k_wmi_adaptive_qcs(ar, true); ret = ath10k_wmi_adaptive_qcs(ar, true);
if (ret) { if (ret) {
...@@ -5682,22 +5690,22 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ...@@ -5682,22 +5690,22 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
return; return;
} }
sband = ar->hw->wiphy->bands[def.chan->band]; sband = ar->hw->wiphy->bands[def.chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate; bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) { if (hw_rate_code < 0) {
ath10k_warn(ar, "bitrate not supported %d\n", bitrate); ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return; return;
} }
vdev_param = ar->wmi.vdev_param->mgmt_rate; vdev_param = ar->wmi.vdev_param->mgmt_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
hw_rate_code); hw_rate_code);
if (ret) if (ret)
ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
} }
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
...@@ -6855,9 +6863,20 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ...@@ -6855,9 +6863,20 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop) u32 queues, bool drop)
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif;
if (drop) u32 bitmap;
if (drop) {
if (vif->type == NL80211_IFTYPE_STATION) {
bitmap = ~(1 << WMI_MGMT_TID);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap);
}
}
return; return;
}
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
ath10k_mac_wait_tx_complete(ar); ath10k_mac_wait_tx_complete(ar);
...@@ -8493,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar) ...@@ -8493,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
ar->hw->wiphy->max_sched_scan_plan_interval =
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif); ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta); ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->txq_data_size = sizeof(struct ath10k_txq); ar->hw->txq_data_size = sizeof(struct ath10k_txq);
...@@ -8542,9 +8573,10 @@ int ath10k_mac_register(struct ath10k *ar) ...@@ -8542,9 +8573,10 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy, wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL); NL80211_EXT_FEATURE_SET_SCAN_DWELL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy, wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT); NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
/* /*
* on LL hardware queues are managed entirely by the FW * on LL hardware queues are managed entirely by the FW
...@@ -8635,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar) ...@@ -8635,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar)
} }
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
goto err_dfs_detector_exit;
}
ar->hw->wiphy->features |= ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
} }
......
...@@ -1071,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1071,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0; int ret = 0;
u32 *buf; u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag; struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL; void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0; dma_addr_t ce_data_base = 0;
int i; int i;
...@@ -1088,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1088,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment * 1) 4-byte alignment
* 2) Buffer in DMA-able space * 2) Buffer in DMA-able space
*/ */
orig_nbytes = nbytes; alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes, alloc_nbytes,
&ce_data_base, &ce_data_base,
GFP_ATOMIC); GFP_ATOMIC);
if (!data_buf) { if (!data_buf) {
...@@ -1098,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1098,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done; goto done;
} }
/* Copy caller's data to allocated DMA buf */
memcpy(data_buf, data, orig_nbytes);
/* /*
* The address supplied by the caller is in the * The address supplied by the caller is in the
* Target CPU virtual address space. * Target CPU virtual address space.
...@@ -1113,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1113,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/ */
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = orig_nbytes; remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) { while (remaining_bytes) {
/* FIXME: check cast */ /* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Copy caller's data to allocated DMA buf */
memcpy(data_buf, data, nbytes);
/* Set up to receive directly into Target(!) address */ /* Set up to receive directly into Target(!) address */
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0) if (ret != 0)
...@@ -1128,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1128,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that * Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address. * was copied to bounce buffer to Target(!) address.
*/ */
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0); nbytes, 0, 0);
if (ret != 0) if (ret != 0)
goto done; goto done;
...@@ -1171,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1171,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
remaining_bytes -= nbytes; remaining_bytes -= nbytes;
address += nbytes; address += nbytes;
ce_data += nbytes; data += nbytes;
} }
done: done:
if (data_buf) { if (data_buf) {
dma_free_coherent(ar->dev, orig_nbytes, data_buf, dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base); ce_data_base);
} }
......
This diff is collapsed.
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _ATH10K_QMI_H_
#define _ATH10K_QMI_H_
#include <linux/soc/qcom/qmi.h>
#include <linux/qrtr.h>
#include "qmi_wlfw_v01.h"
#define MAX_NUM_MEMORY_REGIONS 2
#define MAX_TIMESTAMP_LEN 32
#define MAX_BUILD_ID_LEN 128
#define MAX_NUM_CAL_V01 5
enum ath10k_qmi_driver_event_type {
ATH10K_QMI_EVENT_SERVER_ARRIVE,
ATH10K_QMI_EVENT_SERVER_EXIT,
ATH10K_QMI_EVENT_FW_READY_IND,
ATH10K_QMI_EVENT_FW_DOWN_IND,
ATH10K_QMI_EVENT_MSA_READY_IND,
ATH10K_QMI_EVENT_MAX,
};
struct ath10k_msa_mem_info {
phys_addr_t addr;
u32 size;
bool secure;
};
struct ath10k_qmi_chip_info {
u32 chip_id;
u32 chip_family;
};
struct ath10k_qmi_board_info {
u32 board_id;
};
struct ath10k_qmi_soc_info {
u32 soc_id;
};
struct ath10k_qmi_cal_data {
u32 cal_id;
u32 total_size;
u8 *data;
};
struct ath10k_tgt_pipe_cfg {
__le32 pipe_num;
__le32 pipe_dir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
struct ath10k_svc_pipe_cfg {
__le32 service_id;
__le32 pipe_dir;
__le32 pipe_num;
};
struct ath10k_shadow_reg_cfg {
__le16 ce_id;
__le16 reg_offset;
};
struct ath10k_qmi_wlan_enable_cfg {
u32 num_ce_tgt_cfg;
struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
u32 num_ce_svc_pipe_cfg;
struct ath10k_svc_pipe_cfg *ce_svc_cfg;
u32 num_shadow_reg_cfg;
struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
};
struct ath10k_qmi_driver_event {
struct list_head list;
enum ath10k_qmi_driver_event_type type;
void *data;
};
struct ath10k_qmi {
struct ath10k *ar;
struct qmi_handle qmi_hdl;
struct sockaddr_qrtr sq;
struct work_struct event_work;
struct workqueue_struct *event_wq;
struct list_head event_list;
spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
dma_addr_t msa_pa;
u32 msa_mem_size;
void *msa_va;
struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info;
char fw_build_id[MAX_BUILD_ID_LEN + 1];
u32 fw_version;
bool fw_ready;
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
struct ath10k_qmi_wlan_enable_cfg *config,
enum wlfw_driver_mode_enum_v01 mode,
const char *version);
int ath10k_qmi_wlan_disable(struct ath10k *ar);
int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
int ath10k_qmi_deinit(struct ath10k *ar);
#endif /* ATH10K_QMI_H */
This diff is collapsed.
This diff is collapsed.
...@@ -67,6 +67,72 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); ...@@ -67,6 +67,72 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = { static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990, .hw_rev = ATH10K_HW_WCN3990,
.dma_mask = DMA_BIT_MASK(37), .dma_mask = DMA_BIT_MASK(37),
.msa_size = 0x100000,
};
#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
#define WCN3990_DST_WR_IDX_OFFSET 0x40
static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
{
.ce_id = __cpu_to_le16(0),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(3),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(4),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(5),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(7),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(1),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(2),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(7),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(8),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(9),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(10),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(11),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
}; };
static struct ce_attr host_ce_config_wlan[] = { static struct ce_attr host_ce_config_wlan[] = {
...@@ -176,6 +242,128 @@ static struct ce_attr host_ce_config_wlan[] = { ...@@ -176,6 +242,128 @@ static struct ce_attr host_ce_config_wlan[] = {
}, },
}; };
static struct ce_pipe_config target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host HTT (HIF->HTT) */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(1024),
.nbytes_max = __cpu_to_le32(64),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(4),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE8 Target to uMC */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(0),
.reserved = __cpu_to_le32(0),
},
/* CE9 target->host HTT */
{
.pipenum = __cpu_to_le32(9),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE10 target->host HTT */
{
.pipenum = __cpu_to_le32(10),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE11 target autonomous qcache memcpy */
{
.pipenum = __cpu_to_le32(11),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
};
static struct service_to_pipe target_service_to_ce_map_wlan[] = { static struct service_to_pipe target_service_to_ce_map_wlan[] = {
{ {
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
...@@ -766,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar) ...@@ -766,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
static int ath10k_snoc_wlan_enable(struct ath10k *ar) static int ath10k_snoc_wlan_enable(struct ath10k *ar)
{ {
return 0; struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
struct ath10k_qmi_wlan_enable_cfg cfg;
enum wlfw_driver_mode_enum_v01 mode;
int pipe_num;
for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
tgt_cfg[pipe_num].pipe_num =
target_ce_config_wlan[pipe_num].pipenum;
tgt_cfg[pipe_num].pipe_dir =
target_ce_config_wlan[pipe_num].pipedir;
tgt_cfg[pipe_num].nentries =
target_ce_config_wlan[pipe_num].nentries;
tgt_cfg[pipe_num].nbytes_max =
target_ce_config_wlan[pipe_num].nbytes_max;
tgt_cfg[pipe_num].flags =
target_ce_config_wlan[pipe_num].flags;
tgt_cfg[pipe_num].reserved = 0;
}
cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
sizeof(struct ath10k_tgt_pipe_cfg);
cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
&tgt_cfg;
cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
sizeof(struct ath10k_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
sizeof(struct ath10k_shadow_reg_cfg);
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
mode = QMI_WLFW_MISSION_V01;
return ath10k_qmi_wlan_enable(ar, &cfg, mode,
NULL);
} }
static void ath10k_snoc_wlan_disable(struct ath10k *ar) static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{ {
ath10k_qmi_wlan_disable(ar);
} }
static void ath10k_snoc_hif_power_down(struct ath10k *ar) static void ath10k_snoc_hif_power_down(struct ath10k *ar)
...@@ -957,6 +1181,32 @@ static int ath10k_snoc_resource_init(struct ath10k *ar) ...@@ -957,6 +1181,32 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
return ret; return ret;
} }
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct ath10k_bus_params bus_params;
int ret;
switch (type) {
case ATH10K_QMI_EVENT_FW_READY_IND:
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ar_snoc->target_info.soc_version;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n",
ret);
}
break;
case ATH10K_QMI_EVENT_FW_DOWN_IND:
break;
default:
ath10k_err(ar, "invalid fw indication: %llx\n", type);
return -EINVAL;
}
return 0;
}
static int ath10k_snoc_setup_resource(struct ath10k *ar) static int ath10k_snoc_setup_resource(struct ath10k *ar)
{ {
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
...@@ -1281,9 +1531,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ...@@ -1281,9 +1531,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k_snoc *ar_snoc; struct ath10k_snoc *ar_snoc;
struct device *dev; struct device *dev;
struct ath10k *ar; struct ath10k *ar;
u32 msa_size;
int ret; int ret;
u32 i; u32 i;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
if (!of_id) { if (!of_id) {
...@@ -1313,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ...@@ -1313,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ar = ar; ar_snoc->ar = ar;
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce; ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size;
ret = ath10k_snoc_resource_init(ar); ret = ath10k_snoc_resource_init(ar);
if (ret) { if (ret) {
...@@ -1351,12 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ...@@ -1351,12 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq; goto err_free_irq;
} }
bus_params.dev_type = ATH10K_DEV_TYPE_LL; ret = ath10k_qmi_init(ar, msa_size);
bus_params.chip_id = drv_data->hw_rev;
ret = ath10k_core_register(ar, &bus_params);
if (ret) { if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret); ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
goto err_hw_power_off; goto err_core_destroy;
} }
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
...@@ -1364,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ...@@ -1364,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
return 0; return 0;
err_hw_power_off:
ath10k_hw_power_off(ar);
err_free_irq: err_free_irq:
ath10k_snoc_free_irq(ar); ath10k_snoc_free_irq(ar);
...@@ -1388,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev) ...@@ -1388,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_hw_power_off(ar); ath10k_hw_power_off(ar);
ath10k_snoc_free_irq(ar); ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar); ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar);
ath10k_core_destroy(ar); ath10k_core_destroy(ar);
return 0; return 0;
......
...@@ -19,10 +19,12 @@ ...@@ -19,10 +19,12 @@
#include "hw.h" #include "hw.h"
#include "ce.h" #include "ce.h"
#include "qmi.h"
struct ath10k_snoc_drv_priv { struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev; enum ath10k_hw_rev hw_rev;
u64 dma_mask; u64 dma_mask;
u32 msa_size;
}; };
struct snoc_state { struct snoc_state {
...@@ -81,6 +83,7 @@ struct ath10k_snoc { ...@@ -81,6 +83,7 @@ struct ath10k_snoc {
struct timer_list rx_post_retry; struct timer_list rx_post_retry;
struct ath10k_wcn3990_vreg_info *vreg; struct ath10k_wcn3990_vreg_info *vreg;
struct ath10k_wcn3990_clk_info *clk; struct ath10k_wcn3990_clk_info *clk;
struct ath10k_qmi *qmi;
}; };
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
...@@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) ...@@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
#endif /* _SNOC_H_ */ #endif /* _SNOC_H_ */
...@@ -210,6 +210,9 @@ struct wmi_ops { ...@@ -210,6 +210,9 @@ struct wmi_ops {
u32 fw_feature_bitmap); u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar, int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype); enum wmi_vdev_subtype subtype);
struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno_scan);
struct sk_buff *(*gen_pdev_bss_chan_info_req) struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar, (struct ath10k *ar,
enum wmi_bss_survey_req_type type); enum wmi_bss_survey_req_type type);
...@@ -1360,6 +1363,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) ...@@ -1360,6 +1363,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
return ath10k_wmi_cmd_send(ar, skb, cmd_id); return ath10k_wmi_cmd_send(ar, skb, cmd_id);
} }
static inline int
ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_config_pno)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state) enum wmi_tdls_state state)
......
...@@ -3441,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, ...@@ -3441,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
return skb; return skb;
} }
/* Request FW to start PNO operation */
static struct sk_buff *
ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno)
{
struct nlo_configured_parameters *nlo_list;
struct wmi_tlv_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
u16 tlv_len;
size_t len;
void *ptr;
u32 i;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
WMI_NLO_MAX_CHAN);
len += sizeof(struct nlo_configured_parameters) *
min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
/* wmi_tlv_wow_nlo_config_cmd parameters*/
cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
/* current FW does not support min-max range for dwell time */
cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
if (pno->do_passive_scan)
cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
/* copy scan interval */
cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
if (pno->enable_pno_scan_randomization) {
cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
}
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(len);
ptr += sizeof(*tlv);
nlo_list = ptr;
for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
sizeof(*tlv));
/* copy ssid and it's length */
nlo_list[i].ssid.valid = __cpu_to_le32(true);
nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
memcpy(nlo_list[i].ssid.ssid.ssid,
pno->a_networks[i].ssid.ssid,
__le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
/* copy rssi threshold */
if (pno->a_networks[i].rssi_threshold &&
pno->a_networks[i].rssi_threshold > -300) {
nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
nlo_list[i].rssi_cond.rssi =
__cpu_to_le32(pno->a_networks[i].rssi_threshold);
}
nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
nlo_list[i].bcast_nw_type.bcast_nw_type =
__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
}
ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
/* copy channel info */
cmd->num_of_channels = __cpu_to_le32(min_t(u8,
pno->a_networks[0].channel_count,
WMI_NLO_MAX_CHAN));
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
sizeof(u_int32_t));
ptr += sizeof(*tlv);
channel_list = (__le32 *)ptr;
for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
vdev_id);
return skb;
}
/* Request FW to stop ongoing PNO operation */
static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
u32 vdev_id)
{
struct wmi_tlv_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* channel list */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(0);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
if (pno_scan->enable)
return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
else
return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
}
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
{ {
...@@ -3973,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = { ...@@ -3973,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
......
This diff is collapsed.
...@@ -7068,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params { ...@@ -7068,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin; __le32 cca_detect_margin;
} __packed; } __packed;
#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
#define WMI_PNO_MAX_NETW_CHANNELS 26
#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
#define WMI_PNO_MAX_PB_REQ_SIZE 450
#define WMI_PNO_24G_DEFAULT_CH 1
#define WMI_PNO_5G_DEFAULT_CH 36
#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
/* SSID broadcast type */
enum wmi_SSID_bcast_type {
BCAST_UNKNOWN = 0,
BCAST_NORMAL = 1,
BCAST_HIDDEN = 2,
};
struct wmi_network_type {
struct wmi_ssid ssid;
u32 authentication;
u32 encryption;
u32 bcast_nw_type;
u8 channel_count;
u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
s32 rssi_threshold;
} __packed;
struct wmi_pno_scan_req {
u8 enable;
u8 vdev_id;
u8 uc_networks_count;
struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
u32 fast_scan_period;
u32 slow_scan_period;
u8 fast_scan_max_cycles;
bool do_passive_scan;
u32 delay_start_time;
u32 active_min_time;
u32 active_max_time;
u32 passive_min_time;
u32 passive_max_time;
/* mac address randomization attributes */
u32 enable_pno_scan_randomization;
u8 mac_addr[ETH_ALEN];
u8 mac_addr_mask[ETH_ALEN];
} __packed;
enum wmi_host_platform_type { enum wmi_host_platform_type {
WMI_HOST_PLATFORM_HIGH_PERF, WMI_HOST_PLATFORM_HIGH_PERF,
WMI_HOST_PLATFORM_LOW_PERF, WMI_HOST_PLATFORM_LOW_PERF,
......
This diff is collapsed.
...@@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) ...@@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
} }
if (main_ant_conf == rx_ant_conf) { if (main_ant_conf == rx_ant_conf) {
ANT_STAT_INC(ANT_MAIN, recv_cnt); ANT_STAT_INC(sc, ANT_MAIN, recv_cnt);
ANT_LNA_INC(ANT_MAIN, rx_ant_conf); ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf);
} else { } else {
ANT_STAT_INC(ANT_ALT, recv_cnt); ANT_STAT_INC(sc, ANT_ALT, recv_cnt);
ANT_LNA_INC(ANT_ALT, rx_ant_conf); ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf);
} }
/* Short scan check */ /* Short scan check */
......
...@@ -624,9 +624,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h ...@@ -624,9 +624,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type); tsf, freq, chan_type);
if (ret == 0) if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good); RX_STAT_INC(sc, rx_spectral_sample_good);
else else
RX_STAT_INC(rx_spectral_sample_err); RX_STAT_INC(sc, rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
...@@ -642,9 +642,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h ...@@ -642,9 +642,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type); tsf, freq, chan_type);
if (ret == 0) if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good); RX_STAT_INC(sc, rx_spectral_sample_good);
else else
RX_STAT_INC(rx_spectral_sample_err); RX_STAT_INC(sc, rx_spectral_sample_err);
/* Mix the received bins to the /dev/random /* Mix the received bins to the /dev/random
* pool * pool
......
...@@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, ...@@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
{ {
int qnum = txq->axq_qnum; int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all); TX_STAT_INC(sc, qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) { if (bf_isampdu(bf)) {
if (flags & ATH_TX_ERROR) if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries); TX_STAT_INC(sc, qnum, a_xretries);
else else
TX_STAT_INC(qnum, a_completed); TX_STAT_INC(sc, qnum, a_completed);
} else { } else {
if (ts->ts_status & ATH9K_TXERR_XRETRY) if (ts->ts_status & ATH9K_TXERR_XRETRY)
TX_STAT_INC(qnum, xretries); TX_STAT_INC(sc, qnum, xretries);
else else
TX_STAT_INC(qnum, completed); TX_STAT_INC(sc, qnum, completed);
} }
if (ts->ts_status & ATH9K_TXERR_FILT) if (ts->ts_status & ATH9K_TXERR_FILT)
TX_STAT_INC(qnum, txerr_filtered); TX_STAT_INC(sc, qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO) if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(qnum, fifo_underrun); TX_STAT_INC(sc, qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP) if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(qnum, xtxop); TX_STAT_INC(sc, qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(qnum, timer_exp); TX_STAT_INC(sc, qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(qnum, desc_cfg_err); TX_STAT_INC(sc, qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(qnum, data_underrun); TX_STAT_INC(sc, qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun); TX_STAT_INC(sc, qnum, delim_underrun);
} }
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
......
...@@ -25,17 +25,17 @@ struct ath_buf; ...@@ -25,17 +25,17 @@ struct ath_buf;
struct fft_sample_tlv; struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS #ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ #define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) #define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0)
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ #define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0)
#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++ #define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0)
#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++; #define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0)
#else #else
#define TX_STAT_INC(q, c) do { } while (0) #define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
#define RX_STAT_INC(c) #define RX_STAT_INC(sc, c) do { (void)(sc); } while (0)
#define RESET_STAT_INC(sc, type) do { } while (0) #define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0)
#define ANT_STAT_INC(i, c) do { } while (0) #define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0)
#define ANT_LNA_INC(i, c) do { } while (0) #define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0)
#endif #endif
enum ath_reset_type { enum ath_reset_type {
......
...@@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, ...@@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
if (ath_tx_start(hw, skb, &txctl) != 0) { if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "TX failed\n"); ath_dbg(common, XMIT, "TX failed\n");
TX_STAT_INC(txctl.txq->axq_qnum, txfailed); TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed);
goto exit; goto exit;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. /* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd. * Copyright (C) 2015 Linaro Ltd.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -33,6 +33,8 @@ struct qcom_scm_vmperm { ...@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
#define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_MSS_MSA 0xF
#define QCOM_SCM_VMID_WLAN 0x18
#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4 #define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2 #define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1 #define QCOM_SCM_PERM_EXEC 0x1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment