Commit 22781f07 authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-for-davem-2019-03-19' of...

Merge tag 'wireless-drivers-for-davem-2019-03-19' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for 5.1

First set of fixes for 5.1. Lots of fixes for mt76 this time.

iwlwifi

* fix warning with do_div()

mt7601u

* avoid using hardware which is supported by mt76

mt76

* more fixes for hweight8() usage

* fix hardware restart for mt76x2

* fix writing txwi on USB devices

* fix (and disable by default) ED/CCA support on 76x2

* fix powersave issues on 7603

* fix return value check for ioremap on 7603

* fix duplicate USB device IDs
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e8629d29 7dfc45e6
......@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
s64 rtt_avg = res->ftm.rtt_avg * 100;
do_div(rtt_avg, 6666);
s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
IWL_DEBUG_INFO(mvm, "entry %d\n", index);
IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
......
......@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
iowrite32(q->desc_dma, &q->regs->desc_base);
iowrite32(q->ndesc, &q->regs->ring_size);
q->head = ioread32(&q->regs->dma_idx);
q->tail = q->head;
iowrite32(q->head, &q->regs->cpu_idx);
......@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
mt76_dma_sync_idx(dev, q);
wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
......
......@@ -679,19 +679,15 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
return ret;
}
static void
mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int idx = wcid->idx;
int i;
int i, idx = wcid->idx;
rcu_assign_pointer(dev->wcid[idx], NULL);
synchronize_rcu();
mutex_lock(&dev->mutex);
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
......@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_free(dev->wcid_mask, idx);
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
static void
mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
mutex_lock(&dev->mutex);
__mt76_sta_remove(dev, vif, sta);
mutex_unlock(&dev->mutex);
}
......
......@@ -126,6 +126,7 @@ struct mt76_queue {
int ndesc;
int queued;
int buf_size;
bool stopped;
u8 buf_offset;
u8 hw_idx;
......@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
int (*mcu_restart)(struct mt76_dev *dev);
};
struct mt76_queue_ops {
......@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
......
......@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
__sw_hweight8(dev->beacon_mask))
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
dev->beacon_check++;
}
......
......@@ -27,12 +27,16 @@ static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
__le32 *txd = (__le32 *)skb->data;
struct ieee80211_hdr *hdr;
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
void *priv;
int idx;
u32 val;
u8 tid;
if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
val = le32_to_cpu(txd[1]);
......@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (!wcid)
goto free;
msta = container_of(wcid, struct mt7603_sta, wcid);
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
ieee80211_sta_set_buffered(sta, tid, true);
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
......
......@@ -112,7 +112,7 @@ static void
mt7603_phy_init(struct mt7603_dev *dev)
{
int rx_chains = dev->mt76.antenna_mask;
int tx_chains = __sw_hweight8(rx_chains) - 1;
int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
(MT_WF_RMAC_RMCR_SMPS_MODE |
......
......@@ -1072,7 +1072,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
case MT_PHY_TYPE_HT:
final_rate_flags |= IEEE80211_TX_RC_MCS;
final_rate &= GENMASK(5, 0);
if (i > 15)
if (final_rate > 15)
return false;
break;
default:
......
......@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7603.h"
#include "mac.h"
#include "eeprom.h"
static int
......@@ -385,6 +386,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mt7603_ps_tx_list(dev, &list);
}
static void
mt7603_ps_set_more_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
static void
mt7603_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
......@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
__skb_queue_head_init(&list);
mt7603_wtbl_set_ps(dev, msta, false);
spin_lock_bh(&dev->ps_lock);
skb_queue_walk_safe(&msta->psq, skb, tmp) {
if (!nframes)
......@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
skb_set_queue_mapping(skb, MT_TXQ_PSD);
__skb_unlink(skb, &msta->psq);
mt7603_ps_set_more_data(skb);
__skb_queue_tail(&list, skb);
nframes--;
}
spin_unlock_bh(&dev->ps_lock);
if (!skb_queue_empty(&list))
ieee80211_sta_eosp(sta);
mt7603_ps_tx_list(dev, &list);
if (nframes)
......
......@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
int n_chains = hweight8(dev->mt76.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
......
......@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
}
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (!mem_base) {
if (IS_ERR(mem_base)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
return -EINVAL;
return PTR_ERR(mem_base);
}
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
......
......@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_MM20_PROT_CFG, 0x01742004 },
{ MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x00092b20 },
{ MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },
......
......@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
u32 asic_rev, mac_rev;
u32 mac_rev;
int ret;
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
......@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
goto err;
}
asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
mdev->rev, mac_rev);
if (!is_mt76x0(dev)) {
ret = -ENODEV;
goto err;
}
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
......
......@@ -51,6 +51,7 @@ struct mt76x02_calibration {
u16 false_cca;
s8 avg_rssi_all;
s8 agc_gain_adjust;
s8 agc_lowest_gain;
s8 low_gain;
s8 temp_vco;
......@@ -114,8 +115,11 @@ struct mt76x02_dev {
struct mt76x02_dfs_pattern_detector dfs_pd;
/* edcca monitor */
unsigned long ed_trigger_timeout;
bool ed_tx_blocked;
bool ed_monitor;
u8 ed_monitor_enabled;
u8 ed_monitor_learning;
u8 ed_trigger;
u8 ed_silent;
ktime_t ed_time;
......@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
static inline bool is_mt76x0(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7610 ||
mt76_chip(&dev->mt76) == 0x7630 ||
mt76_chip(&dev->mt76) == 0x7650;
}
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
......
......@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
static int
mt76_edcca_set(void *data, u64 val)
{
struct mt76x02_dev *dev = data;
enum nl80211_dfs_regions region = dev->dfs_pd.region;
dev->ed_monitor_enabled = !!val;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
return 0;
}
static int
mt76_edcca_get(void *data, u64 *val)
{
struct mt76x02_dev *dev = data;
*val = dev->ed_monitor_enabled;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
"%lld\n");
void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
......@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
......
......@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
dev->ed_monitor = region == NL80211_DFS_ETSI;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
dfs_pd->region = region;
......
......@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u32 iv, eiv;
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
iv = mt76_rr(dev, MT_WCID_IV(idx));
eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
pn = (u64)eiv << 16;
if (cipher == MT_CIPHER_TKIP) {
pn |= (iv >> 16) & 0xff;
pn |= (iv & 0xff) << 8;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
pn |= iv & 0xffff;
} else {
return;
}
atomic64_set(&key->tx_pn, pn);
}
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
u64 pn;
cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
......@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
if (key) {
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
pn = atomic64_read(&key->tx_pn);
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
if (cipher >= MT_CIPHER_TKIP) {
iv_data[3] |= 0x20;
put_unaligned_le32(pn >> 16, &iv_data[4]);
}
if (cipher == MT_CIPHER_TKIP) {
iv_data[0] = (pn >> 8) & 0xff;
iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
iv_data[2] = pn & 0xff;
} else if (cipher >= MT_CIPHER_AES_CCMP) {
put_unaligned_le16((pn & 0xffff), &iv_data[0]);
}
}
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
......@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
}
}
mt76x02_edcca_tx_enable(dev, true);
dev->ed_monitor_learning = true;
/* clear previous CCA timer value */
mt76_rr(dev, MT_ED_CCA_TIMER);
......@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
#define MT_EDCCA_TH 92
#define MT_EDCCA_BLOCK_TH 2
#define MT_EDCCA_LEARN_TH 50
#define MT_EDCCA_LEARN_CCA 180
#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
static void mt76x02_edcca_check(struct mt76x02_dev *dev)
{
ktime_t cur_time;
......@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
dev->ed_trigger = 0;
}
if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
!dev->ed_tx_blocked)
if (dev->cal.agc_lowest_gain &&
dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
dev->ed_trigger > MT_EDCCA_LEARN_TH) {
dev->ed_monitor_learning = false;
dev->ed_trigger_timeout = jiffies + 20 * HZ;
} else if (!dev->ed_monitor_learning &&
time_is_after_jiffies(dev->ed_trigger_timeout)) {
dev->ed_monitor_learning = true;
mt76x02_edcca_tx_enable(dev, true);
}
if (dev->ed_monitor_learning)
return;
if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, false);
else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
dev->ed_tx_blocked)
else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
mt76x02_edcca_tx_enable(dev, true);
}
......
......@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
......
......@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include "mt76x02.h"
#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
struct beacon_bc_data {
......@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
return i < 4;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key, void *data)
{
struct mt76x02_dev *dev = hw->priv;
struct mt76_wcid *wcid;
if (!sta)
return;
wcid = (struct mt76_wcid *) sta->drv_priv;
if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
return;
mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
}
static void mt76x02_reset_state(struct mt76x02_dev *dev)
{
int i;
lockdep_assert_held(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
rcu_read_lock();
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
rcu_read_unlock();
for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct mt76x02_sta *msta;
struct mt76_wcid *wcid;
void *priv;
wcid = rcu_dereference_protected(dev->mt76.wcid[i],
lockdep_is_held(&dev->mt76.mutex));
if (!wcid)
continue;
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
priv = msta->vif;
vif = container_of(priv, struct ieee80211_vif, drv_priv);
__mt76_sta_remove(&dev->mt76, vif, sta);
memset(msta, 0, sizeof(*msta));
}
dev->vif_mask = 0;
dev->beacon_mask = 0;
}
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
{
u32 mask = dev->mt76.mmio.irqmask;
bool restart = dev->mt76.mcu_ops->mcu_restart;
int i;
ieee80211_stop_queues(dev->mt76.hw);
......@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
if (restart)
mt76x02_reset_state(dev);
if (dev->beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
......@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
/* let fw reset DMA */
mt76_set(dev, 0x734, 0x3);
if (restart)
dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
mt76_queue_rx_reset(dev, i);
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
mt76_set(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
mt76x02_mac_start(dev);
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
if (dev->beacon_mask)
if (dev->beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
......@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
napi_schedule(&dev->mt76.napi[i]);
}
ieee80211_wake_queues(dev->mt76.hw);
mt76_txq_schedule_all(&dev->mt76);
if (restart) {
mt76x02_mcu_function_select(dev, Q_SELECT, 1);
ieee80211_restart_hw(dev->mt76.hw);
} else {
ieee80211_wake_queues(dev->mt76.hw);
mt76_txq_schedule_all(&dev->mt76);
}
}
static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
......
......@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
ret = true;
}
dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
......
......@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mt76x02_insert_hdr_pad(skb);
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
skb_push(skb, sizeof(struct mt76x02_txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
......
......@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int idx = 0;
memset(msta, 0, sizeof(*msta));
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0)
return -ENOSPC;
......@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76_txq *mtxq;
memset(mvif, 0, sizeof(*mvif));
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
......@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask &&
(((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
mt76x02_mac_setaddr(dev, vif->addr);
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
......@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (dev->vif_mask & BIT(idx))
return -EBUSY;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr);
dev->vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
......
......@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_TX_SW_CFG1, 0x00010000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ MT_TXOP_CTRL_CFG, 0x0400583f },
{ MT_TX_RTS_CFG, 0x00100020 },
{ MT_TX_RTS_CFG, 0x00ffff20 },
{ MT_TX_TIMEOUT_CFG, 0x000a2290 },
{ MT_TX_RETRY_CFG, 0x47f01f0f },
{ MT_EXP_ACK_TIME, 0x002c00dc },
......
......@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev);
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband);
......
......@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
}
}
static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
const u8 *macaddr = dev->mt76.macaddr;
u32 val;
......
......@@ -165,9 +165,30 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
return -ENOENT;
}
static int
mt76pci_mcu_restart(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev;
int ret;
dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76x02_mcu_cleanup(dev);
mt76x2_mac_reset(dev, true);
ret = mt76pci_load_firmware(dev);
if (ret)
return ret;
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
return 0;
}
int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_restart = mt76pci_mcu_restart,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;
......
......@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1836 << 16;
if (!mt76x2_has_ext_lna(dev) &&
dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
else
val = 0x1836 << 16;
if (mt76x2_has_ext_lna(dev) &&
dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
val = 0x0f36 << 16;
val |= 0xf8;
......@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
u32 agc_35, agc_37;
bool gain_change;
int low_gain;
u32 val;
......@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
else
low_gain_delta = 14;
agc_37 = 0x2121262c;
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
agc_35 = 0x11111516;
else if (low_gain == 2)
agc_35 = agc_37 = 0x08080808;
else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
agc_35 = 0x10101014;
else
agc_35 = 0x11111116;
if (low_gain == 2) {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
......@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
dev->cal.agc_gain_adjust = 0;
} else {
mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
else
mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
gain_delta = 0;
dev->cal.agc_gain_adjust = low_gain_delta;
}
mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
mt76x2_phy_set_gain_val(dev);
......
......@@ -21,11 +21,10 @@
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
{ USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
{ USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
{ USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
{ USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
......@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
if (!is_mt76x2(dev)) {
err = -ENODEV;
goto err;
}
err = mt76x2u_register_device(dev);
if (err < 0)
......
......@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
......
......@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
if (q->queued > q->ndesc - 8 && !q->stopped) {
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
q->stopped = true;
}
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
......@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
} else {
ieee80211_sta_eosp(sta);
}
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
......@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);
......
......@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
......
......@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
if ((asic_rev >> 16) != 0x7601) {
ret = -ENODEV;
goto err;
}
/* Note: vendor driver skips this check for MT7601U */
if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment