Commit d3b6fab9 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'mt76-for-kvalo-2020-12-04' of https://github.com/nbd168/wireless

mt76 patches for 5.11

* mt7915 fixes
* mt7615 fixes
* support for more sta interfaces on mt7615/mt7915
* mt7915 encap offload
* performance improvements
* channel noise report on mt7915
* usb/sdio support improvements
* mt7915 testmode support
* mt7915 DBDC support
* warning fixes
parents 3324e05e f12758f6
......@@ -30,8 +30,8 @@ int mt76_queues_read(struct seq_file *s, void *data)
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
struct mt76_queue *q = dev->q_tx[i];
for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
struct mt76_queue *q = dev->phy.q_tx[i];
if (!q)
continue;
......
......@@ -72,9 +72,11 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
local_bh_disable();
while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
local_bh_enable();
}
static int
......@@ -86,6 +88,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int i;
spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc;
......@@ -215,16 +218,15 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
{
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
bool wake = false;
int last;
if (!q)
return;
spin_lock_bh(&q->cleanup_lock);
if (flush)
last = -1;
else
......@@ -237,13 +239,13 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
if (flush) {
spin_lock_bh(&q->lock);
......@@ -252,16 +254,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
spin_unlock_bh(&q->lock);
}
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
if (!q->queued)
wake_up(&dev->tx_wait);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
static void *
......@@ -312,10 +306,9 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
......@@ -343,11 +336,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
}
static int
mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
......@@ -397,7 +389,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
......@@ -661,8 +653,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
if (dev->phy2)
mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
}
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
mt76_for_each_q_rx(dev, i) {
netif_napi_del(&dev->napi[i]);
......
......@@ -88,8 +88,10 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
void
mt76_eeprom_override(struct mt76_dev *dev)
mt76_eeprom_override(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
#ifdef CONFIG_OF
struct device_node *np = dev->dev->of_node;
const u8 *mac = NULL;
......@@ -97,14 +99,14 @@ mt76_eeprom_override(struct mt76_dev *dev)
if (np)
mac = of_get_mac_address(np);
if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->macaddr, mac);
ether_addr_copy(phy->macaddr, mac);
#endif
if (!is_valid_ether_addr(dev->macaddr)) {
eth_random_addr(dev->macaddr);
if (!is_valid_ether_addr(phy->macaddr)) {
eth_random_addr(phy->macaddr);
dev_info(dev->dev,
"Invalid MAC address, using random address %pM\n",
dev->macaddr);
phy->macaddr);
}
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
......
......@@ -159,21 +159,22 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
{
if (phy->dev->cap.has_2ghz)
if (phy->cap.has_2ghz)
mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
if (phy->dev->cap.has_5ghz)
if (phy->cap.has_5ghz)
mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
static int
mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
const struct ieee80211_channel *chan, int n_chan,
struct ieee80211_rate *rates, int n_rates, bool vht)
{
struct ieee80211_supported_band *sband = &msband->sband;
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_sta_ht_cap *ht_cap;
struct mt76_dev *dev = phy->dev;
void *chanlist;
int size;
......@@ -203,7 +204,7 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
mt76_init_stream_cap(&dev->phy, sband, vht);
mt76_init_stream_cap(phy, sband, vht);
if (!vht)
return 0;
......@@ -221,27 +222,25 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
}
static int
mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
int n_rates)
{
dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband;
phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
return mt76_init_sband(dev, &dev->phy.sband_2g,
mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz),
rates, n_rates, false);
return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz), rates,
n_rates, false);
}
static int
mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
int n_rates, bool vht)
{
dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband;
phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
return mt76_init_sband(dev, &dev->phy.sband_5g,
mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz),
rates, n_rates, vht);
return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz), rates,
n_rates, vht);
}
static void
......@@ -274,12 +273,13 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
}
static void
mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
{
struct mt76_dev *dev = phy->dev;
struct wiphy *wiphy = hw->wiphy;
SET_IEEE80211_DEV(hw, dev->dev);
SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
......@@ -305,6 +305,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
ieee80211_hw_set(hw, TX_AMSDU);
......@@ -314,7 +315,6 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
wiphy->interface_modes =
......@@ -333,65 +333,57 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops)
{
struct ieee80211_hw *hw;
unsigned int phy_size;
struct mt76_phy *phy;
unsigned int phy_size, chan_size;
unsigned int size_2g, size_5g;
void *priv;
phy_size = ALIGN(sizeof(*phy), 8);
chan_size = sizeof(dev->phy.sband_2g.chan[0]);
size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8);
size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8);
size += phy_size + size_2g + size_5g;
hw = ieee80211_alloc_hw(size, ops);
hw = ieee80211_alloc_hw(size + phy_size, ops);
if (!hw)
return NULL;
phy = hw->priv;
phy->dev = dev;
phy->hw = hw;
phy->priv = hw->priv + phy_size;
mt76_phy_init(dev, hw);
priv = hw->priv + phy_size;
return phy;
}
EXPORT_SYMBOL_GPL(mt76_alloc_phy);
phy->sband_2g = dev->phy.sband_2g;
phy->sband_2g.chan = priv;
priv += size_2g;
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates)
{
int ret;
phy->sband_5g = dev->phy.sband_5g;
phy->sband_5g.chan = priv;
priv += size_5g;
mt76_phy_init(phy, phy->hw);
phy->priv = priv;
if (phy->cap.has_2ghz) {
ret = mt76_init_sband_2g(phy, rates, n_rates);
if (ret)
return ret;
}
hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
if (phy->cap.has_5ghz) {
ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
if (ret)
return ret;
}
wiphy_read_of_freq_limits(phy->hw->wiphy);
mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
return phy;
}
EXPORT_SYMBOL_GPL(mt76_alloc_phy);
int
mt76_register_phy(struct mt76_phy *phy)
{
int ret;
ret = ieee80211_register_hw(phy->hw);
if (ret)
return ret;
phy->dev->phy2 = phy;
return 0;
}
EXPORT_SYMBOL_GPL(mt76_register_phy);
void
mt76_unregister_phy(struct mt76_phy *phy)
void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
......@@ -459,16 +451,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
int ret;
dev_set_drvdata(dev->dev, dev);
mt76_phy_init(dev, hw);
mt76_phy_init(phy, hw);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
if (phy->cap.has_2ghz) {
ret = mt76_init_sband_2g(phy, rates, n_rates);
if (ret)
return ret;
}
if (dev->cap.has_5ghz) {
ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
if (phy->cap.has_5ghz) {
ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
if (ret)
return ret;
}
......@@ -539,14 +531,11 @@ EXPORT_SYMBOL_GPL(mt76_rx);
bool mt76_has_tx_pending(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
struct mt76_queue *q;
int i, offset;
offset = __MT_TXQ_MAX * (phy != &dev->phy);
int i;
for (i = 0; i < __MT_TXQ_MAX; i++) {
q = dev->q_tx[offset + i];
q = phy->q_tx[i];
if (q && q->queued)
return true;
}
......@@ -842,7 +831,7 @@ mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
return;
if (!wcid || !wcid->sta) {
if (!ether_addr_equal(hdr->addr1, dev->macaddr))
if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
return;
wcid = NULL;
......@@ -932,7 +921,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
{
struct ieee80211_sta *sta;
struct ieee80211_hw *hw;
struct sk_buff *skb;
struct sk_buff *skb, *tmp;
LIST_HEAD(list);
spin_lock(&dev->rx_lock);
while ((skb = __skb_dequeue(frames)) != NULL) {
......@@ -942,9 +932,19 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
}
mt76_rx_convert(dev, skb, &hw, &sta);
ieee80211_rx_napi(hw, sta, skb, napi);
ieee80211_rx_list(hw, sta, skb, &list);
}
spin_unlock(&dev->rx_lock);
if (!napi) {
netif_receive_skb_list(&list);
return;
}
list_for_each_entry_safe(skb, tmp, &list, list) {
skb_list_del_init(skb);
napi_gro_receive(napi, skb);
}
}
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
......@@ -1202,3 +1202,22 @@ int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base)
{
struct mt76_queue *hwq;
int err;
hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return ERR_PTR(-ENOMEM);
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0)
return ERR_PTR(err);
return hwq;
}
EXPORT_SYMBOL_GPL(mt76_init_queue);
......@@ -50,3 +50,83 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
wake_up(&dev->mcu.wait);
}
EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp, struct sk_buff **ret_skb)
{
struct sk_buff *skb;
if (dev->mcu_ops->mcu_send_msg)
return dev->mcu_ops->mcu_send_msg(dev, cmd, data, len, wait_resp);
skb = mt76_mcu_msg_alloc(dev, data, len);
if (!skb)
return -ENOMEM;
return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, ret_skb);
}
EXPORT_SYMBOL_GPL(mt76_mcu_send_and_get_msg);
int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp,
struct sk_buff **ret_skb)
{
unsigned long expires;
int ret, seq;
if (ret_skb)
*ret_skb = NULL;
mutex_lock(&dev->mcu.mutex);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
if (!wait_resp) {
ret = 0;
goto out;
}
expires = jiffies + dev->mcu.timeout;
do {
skb = mt76_mcu_get_response(dev, expires);
ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq);
if (!ret && ret_skb)
*ret_skb = skb;
else
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
out:
mutex_unlock(&dev->mcu.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76_mcu_skb_send_and_get_msg);
int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
int len)
{
int err, cur_len;
while (len > 0) {
cur_len = min_t(int, 4096 - dev->mcu_ops->headroom, len);
err = mt76_mcu_send_msg(dev, cmd, data, cur_len, false);
if (err)
return err;
data += cur_len;
len -= cur_len;
if (dev->queue_ops->tx_cleanup)
dev->queue_ops->tx_cleanup(dev,
dev->q_mcu[MT_MCUQ_FWDL],
false);
}
return 0;
}
EXPORT_SYMBOL_GPL(mt76_mcu_send_firmware);
......@@ -64,18 +64,23 @@ enum mt76_txq_id {
MT_TXQ_BE = IEEE80211_AC_BE,
MT_TXQ_BK = IEEE80211_AC_BK,
MT_TXQ_PSD,
MT_TXQ_MCU,
MT_TXQ_MCU_WA,
MT_TXQ_BEACON,
MT_TXQ_CAB,
MT_TXQ_FWDL,
__MT_TXQ_MAX
};
enum mt76_mcuq_id {
MT_MCUQ_WM,
MT_MCUQ_WA,
MT_MCUQ_FWDL,
__MT_MCUQ_MAX
};
enum mt76_rxq_id {
MT_RXQ_MAIN,
MT_RXQ_MCU,
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
__MT_RXQ_MAX
};
......@@ -121,6 +126,7 @@ struct mt76_queue {
struct mt76_queue_regs __iomem *regs;
spinlock_t lock;
spinlock_t cleanup_lock;
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
......@@ -131,9 +137,11 @@ struct mt76_queue {
int queued;
int buf_size;
bool stopped;
bool blocked;
u8 buf_offset;
u8 hw_idx;
u8 qid;
dma_addr_t desc_dma;
struct sk_buff *rx_head;
......@@ -147,7 +155,9 @@ struct mt76_mcu_ops {
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp);
int cmd, int *seq);
int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
struct sk_buff *skb, int seq);
u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
......@@ -164,11 +174,11 @@ struct mt76_queue_ops {
int idx, int n_desc, int bufsize,
u32 ring_base);
int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info);
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
......@@ -176,7 +186,7 @@ struct mt76_queue_ops {
void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
bool flush);
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
......@@ -185,6 +195,7 @@ struct mt76_queue_ops {
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
MT_WCID_FLAG_4ADDR,
};
#define MT76_N_WCIDS 288
......@@ -411,6 +422,7 @@ enum mt76u_out_ep {
struct mt76_mcu {
struct mutex mutex;
u32 msg_seq;
int timeout;
struct sk_buff_head res_q;
wait_queue_head_t wait;
......@@ -426,7 +438,9 @@ struct mt76_usb {
u8 *data;
u16 data_len;
struct tasklet_struct rx_tasklet;
struct mt76_worker status_worker;
struct mt76_worker rx_worker;
struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
......@@ -445,25 +459,18 @@ struct mt76_usb {
#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
struct mt76_sdio {
struct workqueue_struct *txrx_wq;
struct {
struct work_struct xmit_work;
struct work_struct status_work;
} tx;
struct {
struct work_struct recv_work;
struct work_struct net_work;
} rx;
struct mt76_worker txrx_worker;
struct mt76_worker status_worker;
struct mt76_worker net_worker;
struct work_struct stat_work;
u8 *xmit_buf[MT_TXQ_MCU_WA];
u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
struct sdio_func *func;
void *intr_data;
struct {
struct mutex lock;
int pse_data_quota;
int ple_data_quota;
int pse_mcu_quota;
......@@ -528,6 +535,8 @@ struct mt76_testmode_data {
u8 tx_rate_nss;
u8 tx_rate_sgi;
u8 tx_rate_ldpc;
u8 tx_rate_stbc;
u8 tx_ltf;
u8 tx_antenna_mask;
......@@ -555,15 +564,20 @@ struct mt76_phy {
unsigned long state;
struct mt76_queue *q_tx[__MT_TXQ_MAX];
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
u8 macaddr[ETH_ALEN];
u32 vif_mask;
int txpower_cur;
......@@ -601,7 +615,7 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
......@@ -619,7 +633,6 @@ struct mt76_dev {
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
u8 macaddr[ETH_ALEN];
u32 rev;
u32 aggr_stats[32];
......@@ -630,7 +643,6 @@ struct mt76_dev {
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;
......@@ -690,10 +702,7 @@ enum mt76_phy_type {
#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
#define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__)
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
......@@ -752,7 +761,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
......@@ -770,7 +779,8 @@ void mt76_unregister_phy(struct mt76_phy *phy);
struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
const struct ieee80211_ops *ops);
int mt76_register_phy(struct mt76_phy *phy);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_queues_read(struct seq_file *s, void *data);
......@@ -778,7 +788,40 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
void mt76_eeprom_override(struct mt76_phy *phy);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base)
{
struct mt76_queue *q;
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
if (IS_ERR(q))
return PTR_ERR(q);
q->qid = qid;
phy->q_tx[qid] = q;
return 0;
}
static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
int n_desc, int ring_base)
{
struct mt76_queue *q;
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
if (IS_ERR(q))
return PTR_ERR(q);
q->qid = __MT_TXQ_MAX + qid;
dev->q_mcu[qid] = q;
return 0;
}
static inline struct mt76_phy *
mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
......@@ -901,7 +944,7 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar);
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
......@@ -1064,7 +1107,6 @@ void mt76u_queues_deinit(struct mt76_dev *dev);
int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops);
int mt76s_alloc_queues(struct mt76_dev *dev);
void mt76s_stop_txrx(struct mt76_dev *dev);
void mt76s_deinit(struct mt76_dev *dev);
struct sk_buff *
......@@ -1073,6 +1115,25 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp, struct sk_buff **ret);
int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp, struct sk_buff **ret);
int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
int len);
static inline int
mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
bool wait_resp)
{
return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
}
static inline int
mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
bool wait_resp)
{
return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
}
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
......
......@@ -13,23 +13,25 @@ static void
mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7603_dev *dev = (struct mt7603_dev *)priv;
struct mt76_dev *mdev = &dev->mt76;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct sk_buff *skb = NULL;
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
if (!(mdev->beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb,
&mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) |
dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
......@@ -64,9 +66,10 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
data->count[mvif->idx]++;
}
void mt7603_pre_tbtt_tasklet(unsigned long arg)
void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
{
struct mt7603_dev *dev = (struct mt7603_dev *)arg;
struct mt7603_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
struct mt76_dev *mdev = &dev->mt76;
struct mt76_queue *q;
struct beacon_bc_data data = {};
struct sk_buff *skb;
......@@ -78,7 +81,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
data.dev = dev;
__skb_queue_head_init(&data.q);
q = dev->mt76.q_tx[MT_TXQ_BEACON];
q = dev->mphy.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
......@@ -89,13 +92,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
/* Flush all previous CAB queue packets */
mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0));
mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false);
mt76_csa_check(&dev->mt76);
if (dev->mt76.csa_complete)
mt76_csa_check(mdev);
if (mdev->csa_complete)
goto out;
q = dev->mt76.q_tx[MT_TXQ_CAB];
q = dev->mphy.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
......@@ -120,7 +123,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
......@@ -135,9 +138,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
((1 << (MT7603_MAX_INTERFACES - 1)) - 1)));
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued >
hweight8(dev->mt76.beacon_mask))
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false);
if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask))
dev->beacon_check++;
}
......
......@@ -4,27 +4,6 @@
#include "mac.h"
#include "../dma.h"
static int
mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
dev->mt76.q_tx[qid] = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
}
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
......@@ -152,14 +131,16 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
dev->tx_dma_check = 0;
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0))
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt7603_mac_sta_poll(dev);
......@@ -191,32 +172,42 @@ int mt7603_dma_init(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
MT7603_TX_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7603_TX_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
}
ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT7603_PSD_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
mt7603_irq_enable(dev,
MT_INT_TX_DONE(IEEE80211_AC_VO) |
MT_INT_TX_DONE(IEEE80211_AC_VI) |
MT_INT_TX_DONE(IEEE80211_AC_BE) |
MT_INT_TX_DONE(IEEE80211_AC_BK) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_BCN) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_BMC));
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
......
......@@ -141,6 +141,7 @@ static int mt7603_check_eeprom(struct mt76_dev *dev)
switch (val) {
case 0x7628:
case 0x7603:
case 0x7600:
return 0;
default:
return -EINVAL;
......@@ -170,8 +171,8 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
}
eeprom = (u8 *)dev->mt76.eeprom.data;
dev->mt76.cap.has_2ghz = true;
memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
dev->mphy.cap.has_2ghz = true;
memcpy(dev->mphy.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
/* Check for 1SS devices */
dev->mphy.antenna_mask = 3;
......@@ -180,7 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
is_mt7688(dev))
dev->mphy.antenna_mask = 1;
mt76_eeprom_override(&dev->mt76);
mt76_eeprom_override(&dev->mphy);
return 0;
}
......@@ -533,8 +533,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
spin_lock_init(&dev->ps_lock);
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet);
dev->slottime = 9;
dev->sensitivity_limit = 28;
......@@ -557,6 +556,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
......
......@@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) {
struct mt76_queue *q = dev->mt76.q_tx[i];
struct mt76_queue *q = dev->mphy.q_tx[i];
u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx];
......@@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
struct mt76_queue *q = dev->mt76.q_tx[qid];
struct mt76_queue *q = dev->mphy.q_tx[qid];
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
......@@ -1434,8 +1434,9 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
......@@ -1514,7 +1515,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i;
for (i = 0; i < 4; i++) {
q = dev->mt76.q_tx[i];
q = dev->mphy.q_tx[i];
if (!q->queued)
continue;
......
......@@ -380,9 +380,11 @@ mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb),
skb, 0);
while ((skb = __skb_dequeue(list)) != NULL) {
int qid = skb_get_queue_mapping(skb);
mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[qid], skb, 0);
}
}
void
......@@ -392,7 +394,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76_stop_tx_queues(&dev->mphy, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
......@@ -512,7 +514,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1;
u32 val;
queue = dev->mt76.q_tx[queue]->hw_idx;
queue = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
......
......@@ -14,14 +14,38 @@ struct mt7603_fw_trailer {
} __packed;
static int
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
mt7603_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_mcu_rxd *rxd;
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n",
cmd, seq);
dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
return -ETIMEDOUT;
}
rxd = (struct mt7603_mcu_rxd *)skb->data;
if (seq != rxd->seq)
return -EAGAIN;
return 0;
}
static int
mt7603_mcu_skb_send_msg(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
struct mt76_dev *mdev = &dev->mt76;
struct mt7603_mcu_txd *txd;
u8 seq;
mdev->mcu.timeout = 3 * HZ;
seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
seq = ++mdev->mcu.msg_seq & 0xf;
......@@ -49,56 +73,7 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
if (wait_seq)
*wait_seq = seq;
return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
}
static int
mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
unsigned long expires = jiffies + 3 * HZ;
struct mt7603_mcu_rxd *rxd;
struct sk_buff *skb;
int ret, seq;
skb = mt76_mcu_msg_alloc(mdev, data, len);
if (!skb)
return -ENOMEM;
mutex_lock(&mdev->mcu.mutex);
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
goto out;
while (wait_resp) {
bool check_seq = false;
skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n",
cmd, seq);
dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
ret = -ETIMEDOUT;
break;
}
rxd = (struct mt7603_mcu_rxd *)skb->data;
if (seq == rxd->seq)
check_seq = true;
dev_kfree_skb(skb);
if (check_seq)
break;
}
out:
mutex_unlock(&mdev->mcu.mutex);
return ret;
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, 0);
}
static int
......@@ -114,29 +89,8 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
.mode = cpu_to_le32(BIT(31)),
};
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
&req, sizeof(req), true);
}
static int
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
{
int cur_len, ret = 0;
while (len > 0) {
cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
len);
ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
data, cur_len, false);
if (ret)
break;
data += cur_len;
len -= cur_len;
}
return ret;
return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
&req, sizeof(req), true);
}
static int
......@@ -150,15 +104,14 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
.addr = cpu_to_le32(addr),
};
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
&req, sizeof(req), true);
return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, &req,
sizeof(req), true);
}
static int
mt7603_mcu_restart(struct mt76_dev *dev)
{
return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
NULL, 0, true);
return mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL, 0, true);
}
static int mt7603_load_firmware(struct mt7603_dev *dev)
......@@ -226,7 +179,8 @@ static int mt7603_load_firmware(struct mt7603_dev *dev)
goto out;
}
ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len);
ret = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER,
fw->data, dl_len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
goto out;
......@@ -266,7 +220,8 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
{
static const struct mt76_mcu_ops mt7603_mcu_ops = {
.headroom = sizeof(struct mt7603_mcu_txd),
.mcu_send_msg = mt7603_mcu_msg_send,
.mcu_skb_send_msg = mt7603_mcu_skb_send_msg,
.mcu_parse_response = mt7603_mcu_parse_response,
.mcu_restart = mt7603_mcu_restart,
};
......@@ -377,8 +332,8 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
data[i].val = eep[req_fields[i]];
}
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
req, len, true);
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
req, len, true);
kfree(req);
return ret;
......@@ -424,8 +379,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
sizeof(req.temp_comp_power));
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
&req, sizeof(req), true);
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
&req, sizeof(req), true);
}
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
......@@ -470,8 +425,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
&req, sizeof(req), true);
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH, &req,
sizeof(req), true);
if (ret)
return ret;
......
......@@ -256,7 +256,7 @@ void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7603_pre_tbtt_tasklet(unsigned long arg);
void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t);
void mt7603_update_channel(struct mt76_dev *mdev);
......
......@@ -57,7 +57,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
mt76_free_device(&dev->mt76);
return ret;
}
......
......@@ -55,11 +55,26 @@ static int
mt7615_pm_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
int ret = 0;
if (!mt7615_wait_for_mcu_init(dev))
return 0;
return mt7615_pm_set_enable(dev, val);
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
return -EOPNOTSUPP;
mt7615_mutex_acquire(dev);
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
goto out;
}
dev->pm.enable = val;
out:
mt7615_mutex_release(dev);
return ret;
}
static int
......@@ -172,7 +187,7 @@ mt7615_reset_test_set(void *data, u64 val)
skb_put(skb, 1);
mt7615_mutex_acquire(dev);
mt76_tx_queue_skb_raw(dev, 0, skb, 0);
mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[0], skb, 0);
mt7615_mutex_release(dev);
return 0;
......@@ -317,21 +332,18 @@ static int
mt7615_queues_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
static const struct {
struct {
struct mt76_queue *q;
char *queue;
int id;
} queue_map[] = {
{ "PDMA0", MT_TXQ_BE },
{ "MCUQ", MT_TXQ_MCU },
{ "MCUFWQ", MT_TXQ_FWDL },
{ dev->mphy.q_tx[MT_TXQ_BE], "PDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
};
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
if (!q)
continue;
struct mt76_queue *q = queue_map[i].q;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
......@@ -365,6 +377,107 @@ mt7615_rf_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set,
"0x%08llx\n");
static ssize_t
mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mt7615_dev *dev = file->private_data;
char buf[32 * ((ETH_ALEN * 3) + 4) + 1];
u8 addr[ETH_ALEN];
int ofs = 0;
int i;
for (i = 0; i < 32; i++) {
if (!(dev->muar_mask & BIT(i)))
continue;
mt76_wr(dev, MT_WF_RMAC_MAR1,
FIELD_PREP(MT_WF_RMAC_MAR1_IDX, i * 2) |
MT_WF_RMAC_MAR1_START);
put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr);
put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) &
MT_WF_RMAC_MAR1_ADDR), addr + 4);
ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr);
}
return simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
}
static ssize_t
mt7615_ext_mac_addr_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mt7615_dev *dev = file->private_data;
unsigned long idx = 0;
u8 addr[ETH_ALEN];
char buf[32];
char *p;
if (count > sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
p = strchr(buf, '=');
if (p) {
*p = 0;
p++;
if (kstrtoul(buf, 0, &idx) || idx > 31)
return -EINVAL;
} else {
idx = 0;
p = buf;
}
if (!mac_pton(p, addr))
return -EINVAL;
if (is_valid_ether_addr(addr)) {
dev->muar_mask |= BIT(idx);
} else {
memset(addr, 0, sizeof(addr));
dev->muar_mask &= ~BIT(idx);
}
mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, 1);
mt76_wr(dev, MT_WF_RMAC_MAR0, get_unaligned_le32(addr));
mt76_wr(dev, MT_WF_RMAC_MAR1,
get_unaligned_le16(addr + 4) |
FIELD_PREP(MT_WF_RMAC_MAR1_IDX, idx * 2) |
MT_WF_RMAC_MAR1_START |
MT_WF_RMAC_MAR1_WRITE);
mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, !!dev->muar_mask);
return count;
}
static const struct file_operations fops_ext_mac_addr = {
.open = simple_open,
.llseek = generic_file_llseek,
.read = mt7615_ext_mac_addr_read,
.write = mt7615_ext_mac_addr_write,
.owner = THIS_MODULE,
};
static int
mt7663s_sched_quota_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
struct mt76_sdio *sdio = &dev->mt76.sdio;
seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota);
seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota);
seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota);
seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit);
return 0;
}
int mt7615_init_debugfs(struct mt7615_dev *dev)
{
struct dentry *dir;
......@@ -406,11 +519,15 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
&fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr);
debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg);
debugfs_create_file_unsafe("rf_regval", 0600, dir, dev,
&fops_rf_reg);
if (mt76_is_sdio(&dev->mt76))
debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir,
mt7663s_sched_quota_read);
return 0;
}
......
......@@ -11,25 +11,6 @@
#include "../dma.h"
#include "mac.h"
static int
mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
dev->mt76.q_tx[qid] = hwq;
return 0;
}
static int
mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
......@@ -43,20 +24,21 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
int i;
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2);
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2,
MT_TX_RING_BASE);
if (ret)
return ret;
}
ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD,
MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE,
MT_TX_RING_BASE);
if (ret)
return ret;
ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU,
MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
return ret;
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7622_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
}
static int
......@@ -64,25 +46,24 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
{
int ret, i;
ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL,
MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE,
MT_TX_RING_BASE);
if (ret)
return ret;
for (i = 1; i < MT_TXQ_MCU; i++)
dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
for (i = 1; i <= MT_TXQ_PSD ; i++)
dev->mphy.q_tx[i] = dev->mphy.q_tx[0];
ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE);
return 0;
return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
}
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
......@@ -91,7 +72,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
......@@ -202,7 +183,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
int ret;
/* Increase buffer size to receive large VHT MPDUs */
if (dev->mt76.cap.has_5ghz)
if (dev->mphy.cap.has_5ghz)
rx_buf_size *= 2;
mt76_dma_attach(&dev->mt76);
......
......@@ -99,20 +99,20 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
if (is_mt7663(&dev->mt76)) {
/* dual band */
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
return;
}
if (is_mt7622(&dev->mt76)) {
/* 2GHz only */
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
return;
}
if (is_mt7611(&dev->mt76)) {
/* 5GHz only */
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
return;
}
......@@ -120,17 +120,17 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
eeprom[MT_EE_WIFI_CONF]);
switch (val) {
case MT_EE_5GHZ:
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
break;
case MT_EE_DBDC:
dev->dbdc_support = true;
/* fall through */
fallthrough;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
}
}
......@@ -342,10 +342,10 @@ int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
}
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mt76_eeprom_override(&dev->mt76);
mt76_eeprom_override(&dev->mphy);
return 0;
}
......
......@@ -221,7 +221,7 @@ static const struct ieee80211_iface_combination if_comb_radar[] = {
{
.limits = if_limits,
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 4,
.max_interfaces = MT7615_MAX_INTERFACES,
.num_different_channels = 1,
.beacon_int_infra_match = true,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
......@@ -237,7 +237,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 4,
.max_interfaces = MT7615_MAX_INTERFACES,
.num_different_channels = 1,
.beacon_int_infra_match = true,
}
......@@ -385,7 +385,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
struct mt76_phy *mphy;
int ret;
int i, ret;
if (!is_mt7615(&dev->mt76))
return -EOPNOTSUPP;
......@@ -422,14 +422,21 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
* Make the secondary PHY MAC address local without overlapping with
* the usual MAC address allocation scheme on multiple virtual interfaces
*/
mphy->hw->wiphy->perm_addr[0] |= 2;
mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
mt76_eeprom_override(mphy);
/* second phy can only handle 5 GHz */
mphy->sband_2g.sband.n_channels = 0;
mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
mphy->cap.has_5ghz = true;
ret = mt76_register_phy(mphy);
/* mt7615 second phy shares the same hw queues with the primary one */
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
ret = mt76_register_phy(mphy, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
ieee80211_free_hw(mphy->hw);
......
......@@ -215,8 +215,8 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
dev->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
dev->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
#endif
}
......@@ -915,22 +915,20 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *rates)
{
struct mt7615_dev *dev = phy->dev;
struct mt7615_wtbl_desc *wd;
struct mt7615_wtbl_rate_desc *wrd;
if (work_pending(&dev->wtbl_work))
if (work_pending(&dev->rate_work))
return -EBUSY;
wd = kzalloc(sizeof(*wd), GFP_ATOMIC);
if (!wd)
wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC);
if (!wrd)
return -ENOMEM;
wd->type = MT7615_WTBL_RATE_DESC;
wd->sta = sta;
wrd->sta = sta;
mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
&wd->rate);
list_add_tail(&wd->node, &dev->wd_head);
queue_work(dev->mt76.wq, &dev->wtbl_work);
&wrd->rate);
list_add_tail(&wrd->node, &dev->wrd_head);
queue_work(dev->mt76.wq, &dev->rate_work);
return 0;
}
......@@ -1030,31 +1028,33 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
u8 *key, u8 keylen,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
if (keylen > sizeof(data))
if (key->keylen > sizeof(data))
return -EINVAL;
mt76_rr_copy(dev, addr, data, sizeof(data));
if (cmd == SET_KEY) {
if (cipher == MT_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
memcpy(data + 16, key + 24, 8);
memcpy(data + 24, key + 16, 8);
memcpy(data, key->key, 16);
memcpy(data + 16, key->key + 24, 8);
memcpy(data + 24, key->key + 16, 8);
} else {
if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
memmove(data + 16, data, 16);
if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
memcpy(data, key->key, key->keylen);
else if (cipher == MT_CIPHER_BIP_CMAC_128)
memcpy(data + 16, key->key, 16);
}
if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
memmove(data + 16, data, 16);
if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
memcpy(data, key, keylen);
else if (cipher == MT_CIPHER_BIP_CMAC_128)
memcpy(data + 16, key, 16);
} else {
if (wcid->cipher & ~BIT(cipher)) {
if (cipher != MT_CIPHER_BIP_CMAC_128)
......@@ -1068,12 +1068,11 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key);
int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
int keyidx, enum set_key_cmd cmd)
static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
int keyidx, enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
......@@ -1105,12 +1104,11 @@ int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk);
void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
......@@ -1128,12 +1126,11 @@ void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
}
}
EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
enum mt7615_cipher_type cipher;
int err;
......@@ -1142,25 +1139,32 @@ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
if (cipher == MT_CIPHER_NONE)
return -EOPNOTSUPP;
spin_lock_bh(&dev->mt76.lock);
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
cipher, cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
if (err < 0)
goto out;
return err;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
cmd);
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
if (err < 0)
goto out;
return err;
if (cmd == SET_KEY)
wcid->cipher |= BIT(cipher);
else
wcid->cipher &= ~BIT(cipher);
out:
return 0;
}
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
int err;
spin_lock_bh(&dev->mt76.lock);
err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
spin_unlock_bh(&dev->mt76.lock);
return err;
......@@ -1431,12 +1435,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
} else {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
mt76_queue_tx_cleanup(dev, i, false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
......@@ -1969,49 +1973,6 @@ void mt7615_pm_power_save_work(struct work_struct *work)
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
}
static void
mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct mt7615_phy *phy = priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable))
return;
if (dev->pm.enable) {
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mt76_set(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
} else {
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
mt76_clear(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
}
int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable)
{
struct mt76_phy *mphy = dev->phy.mt76;
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
return -EOPNOTSUPP;
mt7615_mutex_acquire(dev);
if (dev->pm.enable == enable)
goto out;
dev->pm.enable = enable;
ieee80211_iterate_active_interfaces(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_pm_interface_iter, mphy->priv);
out:
mt7615_mutex_release(dev);
return 0;
}
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_phy *phy;
......@@ -2083,8 +2044,9 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
usleep_range(1000, 2000);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
......@@ -2314,3 +2276,46 @@ int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
mt7615_dfs_stop_radar_detector(phy);
return 0;
}
int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
struct ieee80211_vif *vif,
bool enable)
{
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int err;
if (!mt7615_firmware_offload(dev))
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
return 0;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
if (enable)
phy->n_beacon_vif++;
else
phy->n_beacon_vif--;
fallthrough;
default:
break;
}
err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif);
if (err)
return err;
if (phy->n_beacon_vif) {
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
mt76_clear(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
} else {
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mt76_set(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
return 0;
}
......@@ -115,29 +115,50 @@ static void mt7615_stop(struct ieee80211_hw *hw)
mt7615_mutex_release(dev);
}
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
static inline int get_free_idx(u32 mask, u8 start, u8 end)
{
return ffs(~mask & GENMASK(end, start));
}
static int get_omac_idx(enum nl80211_iftype type, u64 mask)
{
int i;
switch (type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
/* ap use hw bssid 0 and ext bssid */
case NL80211_IFTYPE_STATION:
/* prefer hw bssid slot 1-3 */
i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
if (i)
return i - 1;
if (type != NL80211_IFTYPE_STATION)
break;
/* next, try to find a free repeater entry for the sta */
i = get_free_idx(mask >> REPEATER_BSSID_START, 0,
REPEATER_BSSID_MAX - REPEATER_BSSID_START);
if (i)
return i + 32 - 1;
i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
if (i)
return i - 1;
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
if (~mask & BIT(i))
return i;
break;
case NL80211_IFTYPE_STATION:
/* sta use hw bssid other than 0 */
for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
if (~mask & BIT(i))
return i;
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
/* ap uses hw bssid 0 and ext bssid */
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
if (i)
return i - 1;
break;
default:
......@@ -187,8 +208,8 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
dev->mphy.vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
phy->omac_mask |= BIT(mvif->omac_idx);
dev->omac_mask |= BIT_ULL(mvif->omac_idx);
phy->omac_mask |= BIT_ULL(mvif->omac_idx);
mt7615_mcu_set_dbdc(dev);
......@@ -211,15 +232,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out;
if (dev->pm.enable) {
ret = mt7615_mcu_set_bss_pm(dev, vif, true);
if (ret)
goto out;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
mt76_set(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
mt7615_mac_set_beacon_filter(phy, vif, true);
out:
mt7615_mutex_release(dev);
......@@ -245,20 +258,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt7615_free_pending_tx_skbs(dev, msta);
if (dev->pm.enable) {
bool ext_phy = phy != &dev->phy;
mt7615_mcu_set_bss_pm(dev, vif, false);
mt76_clear(dev, MT_WF_RFCR(ext_phy),
MT_WF_RFCR_DROP_OTHER_BEACON);
}
mt7615_mac_set_beacon_filter(phy, vif, false);
mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
phy->omac_mask &= ~BIT(mvif->omac_idx);
dev->omac_mask &= ~BIT_ULL(mvif->omac_idx);
phy->omac_mask &= ~BIT_ULL(mvif->omac_idx);
mt7615_mutex_release(dev);
......@@ -334,39 +341,6 @@ int mt7615_set_channel(struct mt7615_phy *phy)
return ret;
}
static int
mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
struct mt7615_sta *msta,
struct ieee80211_key_conf *key)
{
struct mt7615_wtbl_desc *wd;
wd = kzalloc(sizeof(*wd), GFP_KERNEL);
if (!wd)
return -ENOMEM;
wd->type = MT7615_WTBL_KEY_DESC;
wd->sta = msta;
wd->key.key = kmemdup(key->key, key->keylen, GFP_KERNEL);
if (!wd->key.key) {
kfree(wd);
return -ENOMEM;
}
wd->key.cipher = key->cipher;
wd->key.keyidx = key->keyidx;
wd->key.keylen = key->keylen;
wd->key.cmd = cmd;
spin_lock_bh(&dev->mt76.lock);
list_add_tail(&wd->node, &dev->wd_head);
spin_unlock_bh(&dev->mt76.lock);
queue_work(dev->mt76.wq, &dev->wtbl_work);
return 0;
}
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
......@@ -393,8 +367,6 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_AES_CMAC:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
......@@ -402,6 +374,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
default:
return -EOPNOTSUPP;
}
......@@ -420,7 +394,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (mt76_is_mmio(&dev->mt76))
err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
else
err = mt7615_queue_key_update(dev, cmd, msta, key);
err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
mt7615_mutex_release(dev);
......@@ -511,7 +485,6 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
} while (0)
phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
MT_WF_RFCR_DROP_OTHER_BEACON |
MT_WF_RFCR_DROP_FRAME_REPORT |
MT_WF_RFCR_DROP_PROBEREQ |
MT_WF_RFCR_DROP_MCAST_FILTERED |
......@@ -522,6 +495,9 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_UNWANTED_CTL |
MT_WF_RFCR_DROP_STBC_MULTI);
if (phy->n_beacon_vif || !mt7615_firmware_offload(dev))
phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_BEACON;
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
MT_WF_RFCR_DROP_A3_BSSID);
......@@ -1127,7 +1103,6 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool ext_phy = phy != &dev->phy;
int err = 0;
cancel_delayed_work_sync(&dev->pm.ps_work);
......@@ -1139,8 +1114,6 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
cancel_delayed_work_sync(&phy->scan_work);
cancel_delayed_work_sync(&phy->mac_work);
mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
set_bit(MT76_STATE_SUSPEND, &phy->mt76->state);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
......@@ -1158,7 +1131,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
bool running, ext_phy = phy != &dev->phy;
bool running;
mt7615_mutex_acquire(dev);
......@@ -1182,7 +1155,6 @@ static int mt7615_resume(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(hw, &phy->mac_work,
MT7615_WATCHDOG_TIME);
mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
mt7615_mutex_release(dev);
......
......@@ -275,6 +275,7 @@ enum {
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
MCU_EXT_CMD_DBDC_CTRL = 0x45,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
MCU_EXT_CMD_MUAR_UPDATE = 0x48,
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
......@@ -477,6 +478,12 @@ struct mt7615_bss_qos_tlv {
u8 pad[3];
} __packed;
enum {
WOW_USB = 1,
WOW_PCIE = 2,
WOW_GPIO = 3,
};
struct mt7615_wow_ctrl_tlv {
__le16 tag;
__le16 len;
......@@ -501,6 +508,16 @@ struct mt7615_wow_ctrl_tlv {
u8 rsv[4];
} __packed;
struct mt7615_wow_gpio_param_tlv {
__le16 tag;
__le16 len;
u8 gpio_pin;
u8 trigger_lvl;
u8 pad[2];
__le32 gpio_interval;
u8 rsv[4];
} __packed;
#define MT7615_WOW_MASK_MAX_LEN 16
#define MT7615_WOW_PATTEN_MAX_LEN 128
struct mt7615_wow_pattern_tlv {
......
......@@ -98,9 +98,9 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
static void mt7615_irq_tasklet(unsigned long data)
static void mt7615_irq_tasklet(struct tasklet_struct *t)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
......@@ -203,7 +203,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
dev = container_of(mdev, struct mt7615_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev);
tasklet_setup(&dev->irq_tasklet, mt7615_irq_tasklet);
dev->reg_map = map;
dev->ops = ops;
......@@ -240,7 +240,8 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
mt76_free_device(&dev->mt76);
return ret;
}
......
......@@ -11,7 +11,7 @@
#include "../mt76.h"
#include "regs.h"
#define MT7615_MAX_INTERFACES 4
#define MT7615_MAX_INTERFACES 16
#define MT7615_MAX_WMM_SETS 4
#define MT7663_WTBL_SIZE 32
#define MT7615_WTBL_SIZE 128
......@@ -106,29 +106,11 @@ struct mt7615_rate_desc {
u8 bw;
};
enum mt7615_wtbl_desc_type {
MT7615_WTBL_RATE_DESC,
MT7615_WTBL_KEY_DESC
};
struct mt7615_key_desc {
enum set_key_cmd cmd;
u32 cipher;
s8 keyidx;
u8 keylen;
u8 *key;
};
struct mt7615_wtbl_desc {
struct mt7615_wtbl_rate_desc {
struct list_head node;
enum mt7615_wtbl_desc_type type;
struct mt7615_rate_desc rate;
struct mt7615_sta *sta;
union {
struct mt7615_rate_desc rate;
struct mt7615_key_desc key;
};
};
struct mt7615_sta {
......@@ -175,8 +157,10 @@ struct mt7615_phy {
struct ieee80211_vif *monitor_vif;
u8 n_beacon_vif;
u32 rxfilter;
u32 omac_mask;
u64 omac_mask;
u16 noise;
......@@ -254,7 +238,7 @@ struct mt7615_dev {
struct tasklet_struct irq_tasklet;
struct mt7615_phy phy;
u32 omac_mask;
u64 omac_mask;
u16 chainmask;
......@@ -289,20 +273,22 @@ struct mt7615_dev {
u8 fw_ver;
struct work_struct wtbl_work;
struct list_head wd_head;
struct work_struct rate_work;
struct list_head wrd_head;
u32 debugfs_rf_wf;
u32 debugfs_rf_reg;
u32 muar_mask;
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 *reg_backup;
s16 last_freq_offset;
u8 last_rcpi[4];
s8 last_ib_rssi;
s8 last_wb_rssi;
s8 last_ib_rssi[4];
s8 last_wb_rssi[4];
} test;
#endif
......@@ -344,24 +330,13 @@ enum {
HW_BSSID_1,
HW_BSSID_2,
HW_BSSID_3,
HW_BSSID_MAX,
HW_BSSID_MAX = HW_BSSID_3,
EXT_BSSID_START = 0x10,
EXT_BSSID_1,
EXT_BSSID_2,
EXT_BSSID_3,
EXT_BSSID_4,
EXT_BSSID_5,
EXT_BSSID_6,
EXT_BSSID_7,
EXT_BSSID_8,
EXT_BSSID_9,
EXT_BSSID_10,
EXT_BSSID_11,
EXT_BSSID_12,
EXT_BSSID_13,
EXT_BSSID_14,
EXT_BSSID_15,
EXT_BSSID_END
EXT_BSSID_15 = 0x1f,
EXT_BSSID_MAX = EXT_BSSID_15,
REPEATER_BSSID_START = 0x20,
REPEATER_BSSID_MAX = 0x3f,
};
enum {
......@@ -452,7 +427,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable);
void mt7615_pm_wake_work(struct work_struct *work);
int mt7615_pm_wake(struct mt7615_dev *dev);
void mt7615_pm_power_save_sched(struct mt7615_dev *dev);
......@@ -542,7 +516,7 @@ static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
{
return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx);
return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx);
}
void mt7615_dma_reset(struct mt7615_dev *dev);
......@@ -568,28 +542,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct ieee80211_sta *sta, int pid,
struct ieee80211_key_conf *key, bool beacon);
void mt7615_mac_set_timing(struct mt7615_phy *phy);
int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
int keyidx, enum set_key_cmd cmd);
void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd);
int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
struct mt76_wcid *wcid,
u8 *key, u8 keylen,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd);
void mt7615_mac_reset_work(struct work_struct *work);
u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp);
int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
......@@ -651,6 +615,9 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
int mt7615_init_debugfs(struct mt7615_dev *dev);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
struct ieee80211_vif *vif,
bool enable);
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
......@@ -674,14 +641,13 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
struct mt76_queue_entry *e);
void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
void mt7663s_tx_work(struct work_struct *work);
void mt7663s_txrx_worker(struct mt76_worker *w);
void mt7663s_rx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
......
......@@ -16,8 +16,15 @@ static void mt7615_init_work(struct work_struct *work)
{
struct mt7615_dev *dev = container_of(work, struct mt7615_dev,
mcu_work);
int i, ret;
if (mt7615_mcu_init(dev))
ret = mt7615_mcu_init(dev);
for (i = 0; (ret == -EAGAIN) && (i < 10); i++) {
msleep(200);
ret = mt7615_mcu_init(dev);
}
if (ret)
return;
mt7615_mcu_set_eeprom(dev);
......
......@@ -333,6 +333,9 @@ enum mt7615_reg_base {
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
#define MT_WF_RMAC_MORE(_band) MT_WF_RMAC((_band) ? 0x124 : 0x024)
#define MT_WF_RMAC_MORE_MUAR_MODE GENMASK(31, 30)
#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004)
#define MT_WF_RFCR1_DROP_ACK BIT(4)
#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
......@@ -342,6 +345,14 @@ enum mt7615_reg_base {
#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030)
#define MT_WF_RMAC_MAR0 MT_WF_RMAC(0x025c)
#define MT_WF_RMAC_MAR1 MT_WF_RMAC(0x0260)
#define MT_WF_RMAC_MAR1_ADDR GENMASK(15, 0)
#define MT_WF_RMAC_MAR1_START BIT(16)
#define MT_WF_RMAC_MAR1_WRITE BIT(17)
#define MT_WF_RMAC_MAR1_IDX GENMASK(29, 24)
#define MT_WF_RMAC_MAR1_GROUP GENMASK(31, 30)
#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
......
......@@ -294,30 +294,6 @@ static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
return ret;
}
static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt76_sdio *sdio = &mdev->sdio;
u32 pse, ple;
int err;
err = mt7615_mac_sta_add(mdev, vif, sta);
if (err < 0)
return err;
/* init sched data quota */
pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota = pse;
sdio->sched.ple_data_quota = ple;
mutex_unlock(&sdio->sched.lock);
return 0;
}
static int mt7663s_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
......@@ -329,7 +305,7 @@ static int mt7663s_probe(struct sdio_func *func,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7663s_sta_add,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
......@@ -366,14 +342,11 @@ static int mt7663s_probe(struct sdio_func *func,
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work);
INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work);
goto error;
ret = mt7663s_hw_init(dev, func);
if (ret)
goto err_deinit;
goto error;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
......@@ -384,7 +357,7 @@ static int mt7663s_probe(struct sdio_func *func,
GFP_KERNEL);
if (!mdev->sdio.intr_data) {
ret = -ENOMEM;
goto err_deinit;
goto error;
}
for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
......@@ -393,23 +366,29 @@ static int mt7663s_probe(struct sdio_func *func,
GFP_KERNEL);
if (!mdev->sdio.xmit_buf[i]) {
ret = -ENOMEM;
goto err_deinit;
goto error;
}
}
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
goto err_deinit;
goto error;
ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker,
mt7663s_txrx_worker, "sdio-txrx");
if (ret)
goto error;
sched_set_fifo_low(mdev->sdio.txrx_worker.task);
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
goto err_deinit;
goto error;
return 0;
err_deinit:
error:
mt76s_deinit(&dev->mt76);
err_free:
mt76_free_device(&dev->mt76);
return ret;
......@@ -432,6 +411,7 @@ static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct mt7615_dev *mdev = sdio_get_drvdata(func);
int err;
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
mt7615_firmware_offload(mdev)) {
......@@ -444,9 +424,20 @@ static int mt7663s_suspend(struct device *dev)
sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
mt76s_stop_txrx(&mdev->mt76);
err = mt7615_mcu_set_fw_ctrl(mdev);
if (err)
return err;
return mt7615_mcu_set_fw_ctrl(mdev);
mt76_worker_disable(&mdev->mt76.sdio.txrx_worker);
mt76_worker_disable(&mdev->mt76.sdio.status_worker);
mt76_worker_disable(&mdev->mt76.sdio.net_worker);
cancel_work_sync(&mdev->mt76.sdio.stat_work);
clear_bit(MT76_READING_STATS, &mdev->mphy.state);
mt76_tx_status_check(&mdev->mt76, NULL, true);
return 0;
}
static int mt7663s_resume(struct device *dev)
......@@ -455,6 +446,10 @@ static int mt7663s_resume(struct device *dev)
struct mt7615_dev *mdev = sdio_get_drvdata(func);
int err;
mt76_worker_enable(&mdev->mt76.sdio.txrx_worker);
mt76_worker_enable(&mdev->mt76.sdio.status_worker);
mt76_worker_enable(&mdev->mt76.sdio.net_worker);
err = mt7615_mcu_set_drv_ctrl(mdev);
if (err)
return err;
......
......@@ -19,46 +19,34 @@
static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
{
struct mt76_sdio *sdio = &dev->mt76.sdio;
u32 pse0, ple, pse1, txdwcnt;
pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA);
ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
u32 txdwcnt;
sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP,
MT_HIF0_MIN_QUOTA);
sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP,
MT_HIF1_MIN_QUOTA);
sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP,
MT_HIF0_MIN_QUOTA);
txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT,
MT_PP_TXDWCNT_TX1_ADD_DW_CNT);
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_data_quota = pse0;
sdio->sched.ple_data_quota = ple;
sdio->sched.pse_mcu_quota = pse1;
sdio->sched.deficit = txdwcnt << 2;
mutex_unlock(&sdio->sched.lock);
return 0;
}
static int
mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int ret, seq;
mutex_lock(&mdev->mcu.mutex);
int ret;
mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, 0);
if (ret)
goto out;
mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]);
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
return ret;
out:
mutex_unlock(&mdev->mcu.mutex);
mt76_queue_kick(dev, mdev->q_mcu[MT_MCUQ_WM]);
return ret;
}
......@@ -127,7 +115,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.headroom = sizeof(struct mt7615_mcu_txd),
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663s_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_parse_response = mt7615_mcu_parse_response,
.mcu_restart = mt7615_mcu_restart,
.mcu_rr = mt7615_mcu_reg_rr,
.mcu_wr = mt7615_mcu_reg_wr,
......
......@@ -46,11 +46,9 @@ static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
return 0;
mutex_lock(&sdio->sched.lock);
sdio->sched.pse_mcu_quota += pse_mcu_quota;
sdio->sched.pse_data_quota += pse_data_quota;
sdio->sched.ple_data_quota += ple_data_quota;
mutex_unlock(&sdio->sched.lock);
return pse_data_quota + ple_data_quota + pse_mcu_quota;
}
......@@ -105,10 +103,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
sdio_release_host(sdio->func);
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
__free_pages(page, order);
......@@ -138,19 +133,52 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return i;
}
static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
int buf_sz, int *pse_size, int *ple_size)
static int mt7663s_rx_handler(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
struct mt76s_intr *intr = sdio->intr_data;
int nframes = 0, ret;
ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
if (ret < 0)
return ret;
trace_dev_irq(dev, intr->isr, 0);
if (intr->isr & WHIER_RX0_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 0, intr);
if (ret > 0) {
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
if (intr->isr & WHIER_RX1_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 1, intr);
if (ret > 0) {
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr);
return nframes;
}
static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
int *pse_size, int *ple_size)
{
int pse_sz;
pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
if (qid == MT_TXQ_MCU) {
if (mcu) {
if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
return -EBUSY;
} else {
if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
sdio->sched.ple_data_quota < *ple_size)
sdio->sched.ple_data_quota < *ple_size + 1)
return -EBUSY;
*ple_size = *ple_size + 1;
......@@ -160,17 +188,15 @@ static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
return 0;
}
static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu,
int pse_size, int ple_size)
{
mutex_lock(&sdio->sched.lock);
if (qid == MT_TXQ_MCU) {
if (mcu) {
sdio->sched.pse_mcu_quota -= pse_size;
} else {
sdio->sched.pse_data_quota -= pse_size;
sdio->sched.ple_data_quota -= ple_size;
}
mutex_unlock(&sdio->sched.lock);
}
static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
......@@ -181,22 +207,20 @@ static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
sdio_release_host(sdio->func);
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
return err;
}
static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
struct mt76_queue *q = dev->q_tx[qid];
int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
......@@ -214,7 +238,7 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
break;
if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz,
if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
......@@ -240,78 +264,44 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (err)
return err;
}
mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz);
mt7663s_tx_update_quota(sdio, mcu, pse_sz, ple_sz);
return nframes;
}
void mt7663s_tx_work(struct work_struct *work)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
tx.xmit_work);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
mt76_worker_schedule(&sdio->status_worker);
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
ret = mt7663s_tx_run_queue(dev, i);
if (ret < 0)
break;
nframes += ret;
}
if (nframes)
queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
queue_work(sdio->txrx_wq, &sdio->tx.status_work);
return nframes;
}
void mt7663s_rx_work(struct work_struct *work)
void mt7663s_txrx_worker(struct mt76_worker *w)
{
struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
rx.recv_work);
struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
txrx_worker);
struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
struct mt76s_intr *intr = sdio->intr_data;
int nframes = 0, ret;
int i, nframes, ret;
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
sdio_release_host(sdio->func);
if (ret < 0)
goto out;
trace_dev_irq(dev, intr->isr, 0);
do {
nframes = 0;
if (intr->isr & WHIER_RX0_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 0, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->rx.net_work);
nframes += ret;
/* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) {
ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]);
if (ret > 0)
nframes += ret;
}
}
if (intr->isr & WHIER_RX1_DONE_INT_EN) {
ret = mt7663s_rx_run_queue(dev, 1, intr);
if (ret > 0) {
queue_work(sdio->txrx_wq, &sdio->rx.net_work);
ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
if (ret > 0)
nframes += ret;
}
}
if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
/* rx */
ret = mt7663s_rx_handler(dev);
if (ret > 0)
nframes += ret;
} while (nframes > 0);
if (nframes) {
queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
return;
}
out:
/* enable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
......@@ -324,5 +314,5 @@ void mt7663s_sdio_irq(struct sdio_func *func)
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
return;
queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
mt76_worker_schedule(&sdio->txrx_worker);
}
......@@ -90,8 +90,8 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
}
return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
}
static void
......@@ -335,9 +335,7 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
if (!rx)
return -ENOMEM;
if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) ||
nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) ||
nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi))
if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset))
return -ENOMEM;
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
......@@ -350,6 +348,26 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
nla_nest_end(msg, rssi);
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_IB_RSSI);
if (!rssi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->test.last_ib_rssi); i++)
if (nla_put_s8(msg, i, dev->test.last_ib_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_WB_RSSI);
if (!rssi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->test.last_wb_rssi); i++)
if (nla_put_s8(msg, i, dev->test.last_wb_rssi[i]))
return -ENOMEM;
nla_nest_end(msg, rssi);
nla_nest_end(msg, rx);
return 0;
......
......@@ -126,21 +126,20 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error_free_q;
goto error;
ret = mt76u_alloc_queues(&dev->mt76);
if (ret)
goto error_free_q;
goto error;
ret = mt7663_usb_sdio_register_device(dev);
if (ret)
goto error_free_q;
goto error;
return 0;
error_free_q:
mt76u_queues_deinit(&dev->mt76);
error:
mt76u_queues_deinit(&dev->mt76);
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
......
......@@ -15,14 +15,12 @@
static int
mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
int cmd, int *seq)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
int ret, seq, ep, len, pad;
int ret, ep, len, pad;
mutex_lock(&mdev->mcu.mutex);
mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
mt7615_mcu_fill_msg(dev, skb, cmd, seq);
if (cmd != MCU_CMD_FW_SCATTER)
ep = MT_EP_OUT_INBAND_CMD;
else
......@@ -37,14 +35,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
1000, ep);
if (ret < 0)
goto out;
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
out:
mutex_unlock(&mdev->mcu.mutex);
dev_kfree_skb(skb);
return ret;
......@@ -56,7 +48,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
.headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd),
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663u_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_parse_response = mt7615_mcu_parse_response,
.mcu_restart = mt7615_mcu_restart,
};
int ret;
......
......@@ -61,12 +61,11 @@ mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
skb_push(skb, MT_USB_TXD_SIZE);
}
static int
mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
struct mt7615_wtbl_rate_desc *wrd)
{
struct mt7615_rate_desc *rate = &wd->rate;
struct mt7615_sta *sta = wd->sta;
struct mt7615_rate_desc *rate = &wrd->rate;
struct mt7615_sta *sta = wrd->sta;
u32 w5, w27, addr, val;
lockdep_assert_held(&dev->mt76.mutex);
......@@ -132,86 +131,30 @@ mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
return 0;
}
static int
mt7663_usb_sdio_set_key(struct mt7615_dev *dev,
struct mt7615_wtbl_desc *wd)
static void mt7663_usb_sdio_rate_work(struct work_struct *work)
{
struct mt7615_key_desc *key = &wd->key;
struct mt7615_sta *sta = wd->sta;
enum mt7615_cipher_type cipher;
struct mt76_wcid *wcid;
int err;
lockdep_assert_held(&dev->mt76.mutex);
if (!sta) {
err = -EINVAL;
goto out;
}
cipher = mt7615_mac_get_cipher(key->cipher);
if (cipher == MT_CIPHER_NONE) {
err = -EOPNOTSUPP;
goto out;
}
wcid = &wd->sta->wcid;
mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
cipher, key->cmd);
if (err < 0)
goto out;
err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
key->cmd);
if (err < 0)
goto out;
if (key->cmd == SET_KEY)
wcid->cipher |= BIT(cipher);
else
wcid->cipher &= ~BIT(cipher);
out:
kfree(key->key);
return err;
}
void mt7663_usb_sdio_wtbl_work(struct work_struct *work)
{
struct mt7615_wtbl_desc *wd, *wd_next;
struct list_head wd_list;
struct mt7615_wtbl_rate_desc *wrd, *wrd_next;
struct list_head wrd_list;
struct mt7615_dev *dev;
dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
wtbl_work);
rate_work);
INIT_LIST_HEAD(&wd_list);
INIT_LIST_HEAD(&wrd_list);
spin_lock_bh(&dev->mt76.lock);
list_splice_init(&dev->wd_head, &wd_list);
list_splice_init(&dev->wrd_head, &wrd_list);
spin_unlock_bh(&dev->mt76.lock);
list_for_each_entry_safe(wd, wd_next, &wd_list, node) {
list_del(&wd->node);
list_for_each_entry_safe(wrd, wrd_next, &wrd_list, node) {
list_del(&wrd->node);
mt7615_mutex_acquire(dev);
switch (wd->type) {
case MT7615_WTBL_RATE_DESC:
mt7663_usb_sdio_set_rates(dev, wd);
break;
case MT7615_WTBL_KEY_DESC:
mt7663_usb_sdio_set_key(dev, wd);
break;
}
mt7663_usb_sdio_set_rates(dev, wrd);
mt7615_mutex_release(dev);
kfree(wd);
kfree(wrd);
}
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work);
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
......@@ -357,8 +300,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
struct ieee80211_hw *hw = mt76_hw(dev);
int err;
INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work);
INIT_LIST_HEAD(&dev->wd_head);
INIT_WORK(&dev->rate_work, mt7663_usb_sdio_rate_work);
INIT_LIST_HEAD(&dev->wrd_head);
mt7615_init_device(dev);
err = mt7663_usb_sdio_init_hardware(dev);
......
......@@ -52,15 +52,15 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
dev->mphy.cap.has_2ghz, dev->mphy.cap.has_5ghz);
if (dev->no_2ghz) {
dev->mt76.cap.has_2ghz = false;
dev->mphy.cap.has_2ghz = false;
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
if (is_mt7630(dev)) {
dev->mt76.cap.has_5ghz = false;
dev->mphy.cap.has_5ghz = false;
dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
}
......@@ -342,10 +342,10 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
memcpy(dev->mphy.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mt76_eeprom_override(&dev->mt76);
mt76x02_mac_setaddr(dev, dev->mt76.macaddr);
mt76_eeprom_override(&dev->mphy);
mt76x02_mac_setaddr(dev, dev->mphy.macaddr);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
......
......@@ -245,7 +245,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (ret)
return ret;
if (dev->mt76.cap.has_5ghz) {
if (dev->mphy.cap.has_5ghz) {
struct ieee80211_supported_band *sband;
sband = &dev->mphy.sband_5g.sband;
......@@ -253,7 +253,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
mt76x0_init_txpower(dev, sband);
}
if (dev->mt76.cap.has_2ghz)
if (dev->mphy.cap.has_2ghz)
mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt76x02_init_debugfs(dev);
......
......@@ -194,7 +194,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
mt76_free_device(&dev->mt76);
return ret;
}
......
......@@ -117,6 +117,7 @@ int mt76x0e_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
.mcu_send_msg = mt76x02_mcu_msg_send,
.mcu_parse_response = mt76x02_mcu_parse_response,
};
int err;
......
......@@ -447,11 +447,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
else
coex3 |= BIT(4);
coex3 |= BIT(3);
if (dev->mt76.cap.has_2ghz)
if (dev->mphy.cap.has_2ghz)
wlan |= BIT(6);
} else {
/* sigle antenna mode */
if (dev->mt76.cap.has_5ghz) {
if (dev->mphy.cap.has_5ghz) {
coex3 |= BIT(3) | BIT(4);
} else {
wlan |= BIT(6);
......
......@@ -277,6 +277,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
mt76u_queues_deinit(&dev->mt76);
mt76_free_device(&dev->mt76);
return ret;
......
......@@ -609,10 +609,11 @@ static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
}
}
static void mt76x02_dfs_tasklet(unsigned long arg)
static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_pattern_detector *dfs_pd = from_tasklet(dfs_pd, t,
dfs_tasklet);
struct mt76x02_dev *dev = container_of(dfs_pd, typeof(*dev), dfs_pd);
u32 engine_mask;
int i;
......@@ -860,8 +861,7 @@ void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
INIT_LIST_HEAD(&dfs_pd->seq_pool);
dev->mt76.region = NL80211_DFS_UNSET;
dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
(unsigned long)dev);
tasklet_setup(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet);
}
static void
......
......@@ -75,14 +75,14 @@ void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
dev->mt76.cap.has_2ghz = true;
dev->mphy.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;
break;
}
}
......
......@@ -727,24 +727,24 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
static const u8 null_addr[ETH_ALEN] = {};
int i;
ether_addr_copy(dev->mt76.macaddr, addr);
ether_addr_copy(dev->mphy.macaddr, addr);
if (!is_valid_ether_addr(dev->mt76.macaddr)) {
eth_random_addr(dev->mt76.macaddr);
if (!is_valid_ether_addr(dev->mphy.macaddr)) {
eth_random_addr(dev->mphy.macaddr);
dev_info(dev->mt76.dev,
"Invalid MAC address, using random address %pM\n",
dev->mt76.macaddr);
dev->mphy.macaddr);
}
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
get_unaligned_le16(dev->mphy.macaddr + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
mt76_wr(dev, MT_MAC_BSSID_DW0,
get_unaligned_le32(dev->mt76.macaddr));
get_unaligned_le32(dev->mphy.macaddr));
mt76_wr(dev, MT_MAC_BSSID_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
get_unaligned_le16(dev->mphy.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
/* enable 7 additional beacon slots and control them with bypass mask */
......
......@@ -10,6 +10,28 @@
#include "mt76x02_mcu.h"
int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
u32 *rxfce;
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
dev->mcu_timeout = 1;
return -ETIMEDOUT;
}
rxfce = (u32 *)skb->cb;
if (seq != FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
return -EAGAIN;
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_parse_response);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp)
{
......@@ -39,31 +61,15 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, tx_info);
if (ret)
goto out;
while (wait_resp) {
u32 *rxfce;
bool check_seq = false;
skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
dev->mcu_timeout = 1;
break;
}
rxfce = (u32 *)skb->cb;
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
check_seq = true;
ret = mt76x02_mcu_parse_response(mdev, cmd, skb, seq);
dev_kfree_skb(skb);
if (check_seq)
if (ret != -EAGAIN)
break;
}
......@@ -89,7 +95,8 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
if (func != Q_SELECT)
wait = true;
return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait);
return mt76_mcu_send_msg(&dev->mt76, CMD_FUN_SET_OP, &msg,
sizeof(msg), wait);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
......@@ -103,8 +110,8 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on)
.level = cpu_to_le32(0),
};
return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg),
false);
return mt76_mcu_send_msg(&dev->mt76, CMD_POWER_SAVING_OP, &msg,
sizeof(msg), false);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
......@@ -123,8 +130,8 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
if (is_mt76x2e)
mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
true);
ret = mt76_mcu_send_msg(&dev->mt76, CMD_CALIBRATION_OP, &msg,
sizeof(msg), true);
if (ret)
return ret;
......
......@@ -89,6 +89,8 @@ int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp);
int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
u32 val);
int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on);
......
......@@ -11,10 +11,11 @@
#include "mt76x02_mcu.h"
#include "trace.h"
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
struct mt76_dev *mdev = &dev->mt76;
struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i;
......@@ -35,9 +36,9 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
mt76_wr(dev, MT_BCN_BYPASS_MASK,
0xff00 | ~(0xff00 >> dev->beacon_data_count));
mt76_csa_check(&dev->mt76);
mt76_csa_check(mdev);
if (dev->mt76.csa_complete)
if (mdev->csa_complete)
return;
mt76x02_enqueue_buffered_bc(dev, &data, 8);
......@@ -58,8 +59,7 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
NULL);
mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
}
spin_unlock_bh(&q->lock);
}
......@@ -103,27 +103,6 @@ void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
static int
mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
dev->mt76.q_tx[qid] = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
}
static int
mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
......@@ -169,14 +148,16 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
mt76x02_mac_poll_tx_status(dev, false);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
if (napi_complete_done(napi, 0))
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
for (i = MT_TXQ_PSD; i >= 0; i--)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
mt76_worker_schedule(&dev->mt76.tx_worker);
......@@ -198,8 +179,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
return -ENOMEM;
dev->mt76.tx_worker.fn = mt76x02_tx_worker;
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet);
spin_lock_init(&dev->txstatus_fifo_lock);
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
......@@ -209,22 +189,31 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE,
MT_TX_RING_BASE);
if (ret)
return ret;
}
ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE);
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
MT_MCU_RING_SIZE, MT_TX_RING_BASE);
if (ret)
return ret;
mt76x02_irq_enable(dev,
MT_INT_TX_DONE(IEEE80211_AC_VO) |
MT_INT_TX_DONE(IEEE80211_AC_VI) |
MT_INT_TX_DONE(IEEE80211_AC_BE) |
MT_INT_TX_DONE(IEEE80211_AC_BK) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU));
ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
......@@ -292,7 +281,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
}
if (intr & MT_INT_TX_STAT)
......@@ -357,7 +346,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i;
for (i = 0; i < 4; i++) {
q = dev->mt76.q_tx[i];
q = dev->mphy.q_tx[i];
if (!q->queued)
continue;
......@@ -475,8 +464,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76_mcu_restart(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
......
......@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
......
......@@ -297,6 +297,7 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
.headroom = MT_CMD_HDR_LEN,
.tailroom = 8,
.mcu_send_msg = mt76x02u_mcu_send_msg,
.mcu_parse_response = mt76x02_mcu_parse_response,
.mcu_wr_rp = mt76x02u_mcu_wr_rp,
.mcu_rd_rp = mt76x02u_mcu_rd_rp,
};
......
......@@ -186,6 +186,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
......@@ -304,12 +305,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
/* Allow to change address in HW if we create first interface. */
if (!dev->mphy.vif_mask &&
(((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
(((vif->addr[0] ^ dev->mphy.macaddr[0]) & ~GENMASK(4, 1)) ||
memcmp(vif->addr + 1, dev->mphy.macaddr + 1, ETH_ALEN - 1)))
mt76x02_mac_setaddr(dev, vif->addr);
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
idx = 1 + (((dev->mphy.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
/*
* Client mode typically only has one configurable BSSID register,
......@@ -487,7 +488,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
qid = dev->mt76.q_tx[queue]->hw_idx;
qid = dev->mphy.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
......@@ -621,7 +622,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76_stop_tx_queues(&dev->mphy, sta, true);
if (mt76_is_mmio(mdev))
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
......@@ -677,7 +678,7 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
u8 *addr = dev->macaddr_list[i].addr;
memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
memcpy(addr, dev->mphy.macaddr, ETH_ALEN);
if (!i)
continue;
......
......@@ -16,7 +16,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
{
void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
memcpy(dev->mt76.macaddr, src, ETH_ALEN);
memcpy(dev->mphy.macaddr, src, ETH_ALEN);
return 0;
}
......@@ -502,8 +502,8 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);
mt76_eeprom_override(&dev->mphy);
dev->mphy.macaddr[0] &= ~BIT(1);
return 0;
}
......
......@@ -33,13 +33,14 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
};
/* first set the channel without the extension channel info */
mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true);
mt76_mcu_send_msg(&dev->mt76, CMD_SWITCH_CHANNEL_OP, &msg,
sizeof(msg), true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg),
true);
return mt76_mcu_send_msg(&dev->mt76, CMD_SWITCH_CHANNEL_OP, &msg,
sizeof(msg), true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
......@@ -66,7 +67,8 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true);
return mt76_mcu_send_msg(&dev->mt76, CMD_LOAD_CR, &msg, sizeof(msg),
true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
......@@ -84,8 +86,8 @@ int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
if (force)
msg.channel |= cpu_to_le32(BIT(31));
return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg),
true);
return mt76_mcu_send_msg(&dev->mt76, CMD_INIT_GAIN_OP, &msg,
sizeof(msg), true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
......@@ -100,7 +102,7 @@ int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
.data = *tssi_data,
};
return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
true);
return mt76_mcu_send_msg(&dev->mt76, CMD_CALIBRATION_OP, &msg,
sizeof(msg), true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
......@@ -90,7 +90,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
mt76_free_device(&dev->mt76);
return ret;
}
......
......@@ -69,7 +69,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
const u8 *macaddr = dev->mt76.macaddr;
const u8 *macaddr = dev->mphy.macaddr;
u32 val;
int i, k;
......
......@@ -179,6 +179,7 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
.mcu_restart = mt76pci_mcu_restart,
.mcu_send_msg = mt76x02_mcu_msg_send,
.mcu_parse_response = mt76x02_mcu_parse_response,
};
int ret;
......
......@@ -75,6 +75,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
return 0;
err:
mt76u_queues_deinit(&dev->mt76);
mt76_free_device(&dev->mt76);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
......
......@@ -4,3 +4,5 @@ obj-$(CONFIG_MT7915E) += mt7915e.o
mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
......@@ -47,32 +47,6 @@ mt7915_radar_trigger(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
mt7915_radar_trigger, "%lld\n");
static int
mt7915_dbdc_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
if (val)
mt7915_register_ext_phy(dev);
else
mt7915_unregister_ext_phy(dev);
return 0;
}
static int
mt7915_dbdc_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
*val = !!mt7915_ext_phy(dev);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7915_dbdc_get,
mt7915_dbdc_set, "%lld\n");
static int
mt7915_fw_debug_set(void *data, u64 val)
{
......@@ -233,6 +207,7 @@ static const struct file_operations fops_tx_stats = {
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
};
static int mt7915_read_temperature(struct seq_file *s, void *data)
......@@ -279,19 +254,23 @@ static int
mt7915_queues_read(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
static const struct {
struct mt76_phy *mphy_ext = dev->mt76.phy2;
struct mt76_queue *ext_q = mphy_ext ? mphy_ext->q_tx[MT_TXQ_BE] : NULL;
struct {
struct mt76_queue *q;
char *queue;
int id;
} queue_map[] = {
{ "WFDMA0", MT_TXQ_BE },
{ "MCUWM", MT_TXQ_MCU },
{ "MCUWA", MT_TXQ_MCU_WA },
{ "MCUFWQ", MT_TXQ_FWDL },
{ dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
{ ext_q, "WFDMA1" },
{ dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
{ dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" },
{ dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" },
{ dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
};
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
struct mt76_queue *q = queue_map[i].q;
if (!q)
continue;
......@@ -375,7 +354,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7915_queues_acq);
debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats);
debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
/* test knobs */
......@@ -460,6 +438,7 @@ static const struct file_operations fops_sta_stats = {
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
};
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
......
......@@ -4,16 +4,11 @@
#include "mt7915.h"
#include "eeprom.h"
static inline bool mt7915_efuse_valid(u8 val)
{
return !(val == 0xff);
}
u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
{
u8 *data = dev->mt76.eeprom.data;
if (!mt7915_efuse_valid(data[offset]))
if (data[offset] == 0xff)
mt7915_mcu_get_eeprom(dev, offset);
return data[offset];
......@@ -34,10 +29,10 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
static int mt7915_check_eeprom(struct mt7915_dev *dev)
{
u16 val;
u8 *eeprom = dev->mt76.eeprom.data;
u16 val;
mt7915_eeprom_read(dev, 0);
mt7915_eeprom_read(dev, MT_EE_CHIP_ID);
val = get_unaligned_le16(eeprom);
switch (val) {
......@@ -48,35 +43,50 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
}
}
static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
u8 *eeprom = dev->mt76.eeprom.data;
u8 tx_mask, max_nss = 4;
u32 val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF);
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
u32 val;
val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy);
val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
switch (val) {
case MT_EE_5GHZ:
dev->mt76.cap.has_5ghz = true;
phy->mt76->cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
phy->mt76->cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
phy->mt76->cap.has_2ghz = true;
phy->mt76->cap.has_5ghz = true;
break;
}
}
static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
{
u8 nss, tx_mask[2] = {}, *eeprom = dev->mt76.eeprom.data;
mt7915_eeprom_parse_band_config(&dev->phy);
/* read tx mask from eeprom */
tx_mask = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
eeprom[MT_EE_WIFI_CONF]);
if (!tx_mask || tx_mask > max_nss)
tx_mask = max_nss;
dev->chainmask = BIT(tx_mask) - 1;
dev->mphy.antenna_mask = dev->chainmask;
dev->phy.chainmask = dev->chainmask;
tx_mask[0] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
eeprom[MT_EE_WIFI_CONF]);
if (dev->dbdc_support)
tx_mask[1] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK,
eeprom[MT_EE_WIFI_CONF + 1]);
nss = tx_mask[0] + tx_mask[1];
if (!nss || nss > 4) {
tx_mask[0] = 4;
nss = 4;
}
dev->chainmask = BIT(nss) - 1;
dev->mphy.antenna_mask = BIT(tx_mask[0]) - 1;
dev->phy.chainmask = dev->mphy.antenna_mask;
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
......@@ -92,10 +102,10 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
return ret;
mt7915_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
mt76_eeprom_override(&dev->mt76);
mt76_eeprom_override(&dev->mphy);
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment