Commit c6f4075e authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-for-davem-2018-12-19' of...

Merge tag 'wireless-drivers-for-davem-2018-12-19' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for 4.20

Last set of fixes for 4.20. All (except the mt76 fix) of these are
important fixes to user reported problems and pretty small in size.

rtlwifi

* fix skb leak

mwifiex

* revert a commit from v4.19 due to problems with locking

mt76

* fix a potential NULL derenfence

* add entry to MAINTAINERS

iwlwifi

* fix a firmware crash which was a regression introduced in v4.20-rc4

ath10k

* fix a firmware crash with wcn3990 firmware
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 49ce708b 53884577
...@@ -9443,6 +9443,13 @@ F: drivers/media/platform/mtk-vpu/ ...@@ -9443,6 +9443,13 @@ F: drivers/media/platform/mtk-vpu/
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd@nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
MEDIATEK MT7601U WIRELESS LAN DRIVER MEDIATEK MT7601U WIRELESS LAN DRIVER
M: Jakub Kicinski <kubakici@wp.pl> M: Jakub Kicinski <kubakici@wp.pl>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
......
...@@ -2418,6 +2418,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar) ...@@ -2418,6 +2418,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_core_compat_services(struct ath10k *ar)
{
struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
/* all 10.x firmware versions support thermal throttling but don't
* advertise the support via service flags so we have to hardcode
* it here
*/
switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
case ATH10K_FW_WMI_OP_VERSION_10_4:
set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
break;
default:
break;
}
return 0;
}
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw) const struct ath10k_fw_components *fw)
{ {
...@@ -2617,6 +2639,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ...@@ -2617,6 +2639,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop; goto err_hif_stop;
} }
status = ath10k_core_compat_services(ar);
if (status) {
ath10k_err(ar, "compat services failed: %d\n", status);
goto err_hif_stop;
}
/* Some firmware revisions do not properly set up hardware rx filter /* Some firmware revisions do not properly set up hardware rx filter
* registers. * registers.
* *
......
...@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar) ...@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar, debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
&fops_pktlog_filter); &fops_pktlog_filter);
debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar, if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
&fops_quiet_period); debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
&fops_quiet_period);
debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar, debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_tpc_stats); &fops_tpc_stats);
......
...@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar) ...@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
return;
if (!ar->wmi.ops->gen_pdev_set_quiet_mode) if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return; return;
...@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
struct device *hwmon_dev; struct device *hwmon_dev;
int ret; int ret;
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
return 0;
cdev = thermal_cooling_device_register("ath10k_thermal", ar, cdev = thermal_cooling_device_register("ath10k_thermal", ar,
&ath10k_thermal_ops); &ath10k_thermal_ops);
...@@ -216,6 +222,9 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -216,6 +222,9 @@ int ath10k_thermal_register(struct ath10k *ar)
void ath10k_thermal_unregister(struct ath10k *ar) void ath10k_thermal_unregister(struct ath10k *ar)
{ {
if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
return;
sysfs_remove_link(&ar->dev->kobj, "cooling_device"); sysfs_remove_link(&ar->dev->kobj, "cooling_device");
thermal_cooling_device_unregister(ar->thermal.cdev); thermal_cooling_device_unregister(ar->thermal.cdev);
} }
...@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) ...@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_SPOOF_MAC_SUPPORT, WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_TLV_MAX_SERVICE); WMI_TLV_MAX_SERVICE);
SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
WMI_SERVICE_THERM_THROT,
WMI_TLV_MAX_SERVICE);
} }
#undef SVCMAP #undef SVCMAP
......
...@@ -205,6 +205,7 @@ enum wmi_service { ...@@ -205,6 +205,7 @@ enum wmi_service {
WMI_SERVICE_SPOOF_MAC_SUPPORT, WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
WMI_SERVICE_THERM_THROT,
/* keep last */ /* keep last */
WMI_SERVICE_MAX, WMI_SERVICE_MAX,
......
...@@ -881,6 +881,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) ...@@ -881,6 +881,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j; int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
/*
* This command is not supported on earlier firmware versions.
* Unfortunately, we don't have a TLV API flag to rely on, so
* rely on the major version which is in the first byte of
* ucode_ver.
*/
if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
return 0;
ret = iwl_mvm_sar_get_wgds_table(mvm); ret = iwl_mvm_sar_get_wgds_table(mvm);
if (ret < 0) { if (ret < 0) {
IWL_DEBUG_RADIO(mvm, IWL_DEBUG_RADIO(mvm,
......
...@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid) ...@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n", "Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta); tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, goto exit;
flags);
return;
} }
} }
exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} }
......
...@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload) ...@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function. * There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate * Since the buffer is linear, the function uses rotation to simulate
* circular buffer. * circular buffer.
*
* The caller must hold rx_reorder_tbl_lock spinlock.
*/ */
static void static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
...@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, ...@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{ {
int pkt_to_send, i; int pkt_to_send, i;
void *rx_tmp_ptr; void *rx_tmp_ptr;
unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ? pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) : min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size; tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) { for (i = 0; i < pkt_to_send; ++i) {
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_tmp_ptr = NULL; rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) { if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i]; rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL; tbl->rx_reorder_ptr[i] = NULL;
} }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (rx_tmp_ptr) if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
} }
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/* /*
* We don't have a circular buffer, hence use rotation to simulate * We don't have a circular buffer, hence use rotation to simulate
* circular buffer * circular buffer
...@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, ...@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
} }
tbl->start_win = start_win; tbl->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} }
/* /*
...@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, ...@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located. * The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate * Since the buffer is linear, the function uses rotation to simulate
* circular buffer. * circular buffer.
*
* The caller must hold rx_reorder_tbl_lock spinlock.
*/ */
static void static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
...@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, ...@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{ {
int i, j, xchg; int i, j, xchg;
void *rx_tmp_ptr; void *rx_tmp_ptr;
unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) { for (i = 0; i < tbl->win_size; ++i) {
if (!tbl->rx_reorder_ptr[i]) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!tbl->rx_reorder_ptr[i]) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
break; break;
}
rx_tmp_ptr = tbl->rx_reorder_ptr[i]; rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL; tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
} }
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/* /*
* We don't have a circular buffer, hence use rotation to simulate * We don't have a circular buffer, hence use rotation to simulate
* circular buffer * circular buffer
...@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, ...@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
} }
} }
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} }
/* /*
...@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, ...@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
* *
* The function stops the associated timer and dispatches all the * The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion. * pending packets in the Rx reorder table before deletion.
*
* The caller must hold rx_reorder_tbl_lock spinlock.
*/ */
static void static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
...@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, ...@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer); del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false; tbl->timer_context.timer_is_set = false;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list); list_del(&tbl->list);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
kfree(tbl->rx_reorder_ptr); kfree(tbl->rx_reorder_ptr);
kfree(tbl); kfree(tbl);
...@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, ...@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/* /*
* This function returns the pointer to an entry in Rx reordering * This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair. * table which matches the given TA/TID pair.
*
* The caller must hold rx_reorder_tbl_lock spinlock.
*/ */
struct mwifiex_rx_reorder_tbl * struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{ {
struct mwifiex_rx_reorder_tbl *tbl; struct mwifiex_rx_reorder_tbl *tbl;
unsigned long flags;
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
return tbl; return tbl;
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL; return NULL;
} }
...@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) ...@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return; return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN)) if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
mwifiex_del_rx_reorder_entry(priv, tbl); mwifiex_del_rx_reorder_entry(priv, tbl);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return; return;
...@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) ...@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/* /*
* This function finds the last sequence number used in the packets * This function finds the last sequence number used in the packets
* buffered in Rx reordering table. * buffered in Rx reordering table.
*
* The caller must hold rx_reorder_tbl_lock spinlock.
*/ */
static int static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{ {
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
struct mwifiex_private *priv = ctx->priv;
unsigned long flags;
int i; int i;
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
return i; return i;
}
}
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1; return -1;
} }
...@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t) ...@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx = struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer); from_timer(ctx, t, timer);
int start_win, seq_num; int start_win, seq_num;
unsigned long flags;
ctx->timer_is_set = false; ctx->timer_is_set = false;
spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx); seq_num = mwifiex_11n_find_last_seq_num(ctx);
if (seq_num < 0) { if (seq_num < 0)
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
return; return;
}
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win); start_win);
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
} }
/* /*
...@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, ...@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the * If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn * the packets and move the window size until the ssn
*/ */
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) { if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return; return;
} }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */ /* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node) if (!new_node)
...@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, ...@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size; int prev_start_win, start_win, end_win, win_size;
u16 pkt_index; u16 pkt_index;
bool init_window_shift = false; bool init_window_shift = false;
unsigned long flags;
int ret = 0; int ret = 0;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) { if (!tbl) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR) if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload); mwifiex_11n_dispatch_pkt(priv, payload);
return ret; return ret;
} }
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload); mwifiex_11n_dispatch_pkt(priv, payload);
return ret; return ret;
} }
...@@ -651,8 +666,6 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, ...@@ -651,8 +666,6 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
if (!tbl->timer_context.timer_is_set || if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win) prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl); mwifiex_11n_rxreorder_timer_restart(tbl);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret; return ret;
} }
...@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, ...@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator); peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) { if (cleanup_rx_reorder_tbl) {
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac); peer_mac);
if (!tbl) { if (!tbl) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
mwifiex_dbg(priv->adapter, EVENT, mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n"); "event: TID, TA not found in table\n");
return; return;
} }
mwifiex_del_rx_reorder_entry(priv, tbl); mwifiex_del_rx_reorder_entry(priv, tbl);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else { } else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) { if (!ptx_tbl) {
...@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, ...@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size; int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl; struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set; uint16_t block_ack_param_set;
unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
...@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, ...@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid); add_ba_rsp->peer_mac_addr, tid);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr); add_ba_rsp->peer_mac_addr);
if (tbl) if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl); mwifiex_del_rx_reorder_entry(priv, tbl);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0; return 0;
} }
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS; >> BLOCKACKPARAM_WINSIZE_POS;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr); add_ba_rsp->peer_mac_addr);
if (tbl) { if (tbl) {
...@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, ...@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else else
tbl->amsdu = false; tbl->amsdu = false;
} }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD, mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
...@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) ...@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node, list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) &priv->rx_reorder_tbl_ptr, list) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
}
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
...@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, ...@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len; int tlv_buf_left = len;
int ret; int ret;
u8 *tmp; u8 *tmp;
unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len); event_buf, len);
...@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv, ...@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len); tlv_bitmap_len);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr = rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac); tlv_rxba->mac);
if (!rx_reor_tbl_ptr) { if (!rx_reor_tbl_ptr) {
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
flags);
mwifiex_dbg(priv->adapter, ERROR, mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!"); "Can not find rx_reorder_tbl!");
return; return;
} }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) { for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) { for (j = 0 ; j < 8; j++) {
......
...@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, ...@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
} }
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled || if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb); ret = mwifiex_handle_uap_rx_forward(priv, skb);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret; return ret;
} }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */ /* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
......
...@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, ...@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i]; struct ieee80211_txq *txq = sta->txq[i];
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&mtxq->hwq->lock); spin_lock_bh(&mtxq->hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar; mtxq->send_bar = mtxq->aggr && send_bar;
......
...@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rtl_c2h_fast_cmd(hw, skb)) { if (rtl_c2h_fast_cmd(hw, skb)) {
rtl_c2h_content_parsing(hw, skb); rtl_c2h_content_parsing(hw, skb);
kfree_skb(skb);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment