Commit d113c0f2 authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-2020-07-13' of...

Merge tag 'wireless-drivers-2020-07-13' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for v5.8

First set of fixes for v5.8. Various important fixes for iwlwifi and
mt76.

iwlwifi

* fix sleeping under RCU

* fix a kernel crash when using compressed firmware images

mt76

* tx queueing fixes for mt7615/22/63

* locking fix

* fix a crash during watchdog reset

* fix memory leaks
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 46ef5b89 dc7bd30b
......@@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
{
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
u32 tp = le32_to_cpu(trig->time_point);
struct iwl_ucode_tlv *dup = NULL;
int ret;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
......@@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return -EINVAL;
}
if (!le32_to_cpu(trig->occurrences))
if (!le32_to_cpu(trig->occurrences)) {
dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
GFP_KERNEL);
if (!dup)
return -ENOMEM;
trig = (void *)dup->data;
trig->occurrences = cpu_to_le32(-1);
tlv = dup;
}
ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
kfree(dup);
return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
return ret;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
......
......@@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
iwl_mvm_change_queue_tid(mvm, i);
rcu_read_unlock();
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
if (ret) {
rcu_read_unlock();
if (ret)
return ret;
}
}
rcu_read_unlock();
return free_queue;
}
......
......@@ -301,6 +301,7 @@ struct mt76_hw_cap {
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
struct mt76_driver_ops {
u32 drv_flags;
......
......@@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
dev->coverage_class = max_t(s16, coverage_class, 0);
mt7603_mac_set_timing(dev);
mutex_unlock(&dev->mt76.mutex);
}
static void mt7603_tx(struct ieee80211_hw *hw,
......
......@@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
int i;
for (i = 0; i < 16; i++) {
int j, acs = i / 4, index = i % 4;
int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
int acs = i / MT7615_MAX_WMM_SETS;
u32 ctrl, val, qlen = 0;
val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
ctrl = BIT(31) | BIT(15) | (acs << 8);
for (j = 0; j < 32; j++) {
......@@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
continue;
mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
ctrl | (j + (index << 5)));
ctrl | (j + (wmm_idx << 5)));
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
}
return 0;
......
......@@ -36,10 +36,10 @@ static int
mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
static const u8 wmm_queue_map[] = {
MT7622_TXQ_AC0,
MT7622_TXQ_AC1,
MT7622_TXQ_AC2,
MT7622_TXQ_AC3,
[IEEE80211_AC_BK] = MT7622_TXQ_AC0,
[IEEE80211_AC_BE] = MT7622_TXQ_AC1,
[IEEE80211_AC_VI] = MT7622_TXQ_AC2,
[IEEE80211_AC_VO] = MT7622_TXQ_AC3,
};
int ret;
int i;
......@@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev)
int i;
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
} else {
......
......@@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
{
int ret;
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE +
MT7615_EEPROM_EXTRA_DATA);
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE);
if (ret < 0)
return ret;
......
......@@ -17,7 +17,7 @@
#define MT7615_EEPROM_TXDPD_SIZE 216
#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \
#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \
MT7615_EEPROM_TXDPD_COUNT * \
MT7615_EEPROM_TXDPD_SIZE)
......
......@@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
skb_get_queue_mapping(skb);
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
} else if (beacon) {
if (ext_phy)
q_idx = MT_LMAC_BCN1;
else
q_idx = MT_LMAC_BCN0;
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
if (ext_phy)
q_idx = MT_LMAC_ALTX1;
else
q_idx = MT_LMAC_ALTX0;
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
......
......@@ -124,21 +124,6 @@ enum tx_pkt_type {
MT_TX_TYPE_FW,
};
enum tx_pkt_queue_idx {
MT_LMAC_AC00,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
MT_LMAC_ALTX1,
MT_LMAC_BMC1,
MT_LMAC_BCN1,
MT_LMAC_PSMP1,
};
enum tx_port_idx {
MT_TX_PORT_IDX_LMAC,
MT_TX_PORT_IDX_MCU
......
......@@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
queue = mt7615_lmac_mapping(dev, queue);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
return mt7615_mcu_set_wmm(dev, queue, params);
......@@ -735,9 +736,12 @@ static void
mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = phy->dev;
mutex_lock(&dev->mt76.mutex);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7615_mac_set_timing(phy);
mutex_unlock(&dev->mt76.mutex);
}
static int
......
......@@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
.drv_flags = MT_DRV_TXWI_NO_FREE,
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
......
......@@ -282,6 +282,21 @@ struct mt7615_dev {
struct list_head wd_head;
};
enum tx_pkt_queue_idx {
MT_LMAC_AC00,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
MT_LMAC_ALTX1,
MT_LMAC_BMC1,
MT_LMAC_BCN1,
MT_LMAC_PSMP1,
};
enum {
HW_BSSID_0 = 0x0,
HW_BSSID_1,
......@@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
return MT7615_WTBL_SIZE;
}
static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
{
static const u8 lmac_queue_map[] = {
[IEEE80211_AC_BK] = MT_LMAC_AC00,
[IEEE80211_AC_BE] = MT_LMAC_AC01,
[IEEE80211_AC_VI] = MT_LMAC_AC02,
[IEEE80211_AC_VO] = MT_LMAC_AC03,
};
if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
return MT_LMAC_AC01; /* BE */
return lmac_queue_map[ac];
}
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
......
......@@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
.drv_flags = MT_DRV_RX_DMA_HDR,
.drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
.tx_prepare_skb = mt7663u_tx_prepare_skb,
.tx_complete_skb = mt7663u_tx_complete_skb,
.tx_status_data = mt7663u_tx_status_data,
......@@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
dev_err(dev->mt76.dev, "Timeout for power on\n");
return -EIO;
ret = -EIO;
goto error;
}
alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error;
goto error_free_q;
ret = mt76u_alloc_queues(&dev->mt76);
if (ret)
goto error;
goto error_free_q;
ret = mt7663u_register_device(dev);
if (ret)
goto error_freeq;
goto error_free_q;
return 0;
error_freeq:
error_free_q:
mt76u_queues_deinit(&dev->mt76);
error:
mt76u_deinit(&dev->mt76);
......
......@@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.tx_tasklet);
napi_disable(&dev->mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
mt76_for_each_q_rx(&dev->mt76, i) {
napi_disable(&dev->mt76.napi[i]);
}
mutex_lock(&dev->mt76.mutex);
......@@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
......
......@@ -716,9 +716,12 @@ static void
mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
mutex_lock(&dev->mt76.mutex);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7915_mac_set_timing(phy);
mutex_unlock(&dev->mt76.mutex);
}
static int
......
......@@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
qid = MT_TXQ_PSD;
skb_set_queue_mapping(skb, qid);
}
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
......
......@@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
{
if (mt76_chip(dev) == 0x7663) {
static const u8 wmm_queue_map[] = {
[IEEE80211_AC_VO] = 0,
[IEEE80211_AC_VI] = 1,
[IEEE80211_AC_BE] = 2,
[IEEE80211_AC_BK] = 4,
static const u8 lmac_queue_map[] = {
/* ac to lmac mapping */
[IEEE80211_AC_BK] = 0,
[IEEE80211_AC_BE] = 1,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_VO] = 4,
};
if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map)))
return 2; /* BE */
if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
return 1; /* BE */
return wmm_queue_map[ac];
return lmac_queue_map[ac];
}
return mt76_ac_to_hwq(ac);
......@@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
static void mt76u_free_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i, j;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
struct mt76_queue *q;
int j;
q = dev->q_tx[i].q;
if (!q)
continue;
for (j = 0; j < q->ndesc; j++)
usb_free_urb(q->entry[j].urb);
}
......@@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev)
void mt76u_stop_tx(struct mt76_dev *dev)
{
struct mt76_queue_entry entry;
struct mt76_queue *q;
int i, j, ret;
int ret;
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
struct mt76_queue_entry entry;
struct mt76_queue *q;
int i, j;
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q;
if (!q)
continue;
for (j = 0; j < q->ndesc; j++)
usb_kill_urb(q->entry[j].urb);
}
......@@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q;
if (!q)
continue;
/* Assure we are in sync with killed tasklet. */
spin_lock_bh(&q->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment