Commit cd4c314a authored by Bo Jiao's avatar Bo Jiao Committed by Felix Fietkau

mt76: mt7915: refine register definition

Add mt7915_reg_desc to differentiate chip generations.
This is an intermediate patch to introduce mt7916 support.
Co-developed-by: default avatarSujuan Chen <sujuan.chen@mediatek.com>
Signed-off-by: default avatarSujuan Chen <sujuan.chen@mediatek.com>
Co-developed-by: default avatarRyder Lee <ryder.lee@mediatek.com>
Signed-off-by: default avatarRyder Lee <ryder.lee@mediatek.com>
Signed-off-by: default avatarBo Jiao <Bo.Jiao@mediatek.com>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent cacdd678
......@@ -521,14 +521,14 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
......@@ -536,13 +536,13 @@ mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
......@@ -570,8 +570,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
if (val & BIT(offs))
continue;
mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
......@@ -633,7 +633,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
......@@ -641,7 +641,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
......
......@@ -5,11 +5,11 @@
#include "../dma.h"
#include "mac.h"
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
int i, err;
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (err < 0)
return err;
......@@ -40,41 +40,77 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
static void mt7915_dma_config(struct mt7915_dev *dev)
{
#define Q_CONFIG(q, wfdma, int, id) do { \
if (wfdma) \
dev->wfdma_mask |= (1 << (q)); \
dev->q_int_mask[(q)] = int; \
dev->q_id[(q)] = id; \
} while (0)
#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
if (is_mt7915(&dev->mt76)) {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
} else {
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
}
}
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
#define PREFETCH(base, depth) ((base) << 16 | (depth))
mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
/* prefetch SRAM wrapping boundary for tx/rx ring. */
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200, 0x4));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240, 0x4));
/* for mt7915, the ring which is next the last
* used ring must be initialized.
*/
if (is_mt7915(&dev->mt76)) {
ofs += 0x4;
mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200, 0x0));
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280, 0x0));
}
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
__mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
__mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
int mt7915_dma_init(struct mt7915_dev *dev)
......@@ -82,10 +118,12 @@ int mt7915_dma_init(struct mt7915_dev *dev)
u32 hif1_ofs = 0;
int ret;
mt7915_dma_config(dev);
mt76_dma_attach(&dev->mt76);
if (dev->hif2)
hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* configure global setting */
mt76_set(dev, MT_WFDMA1_GLO_CFG,
......@@ -116,64 +154,79 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_dma_prefetch(dev);
/* init tx queue */
ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
MT7915_TX_RING_SIZE);
ret = mt7915_init_tx_queues(&dev->phy,
MT_TXQ_ID(0),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(0));
if (ret)
return ret;
/* command to WM */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
MT_MCUQ_ID(MT_MCUQ_WM),
MT7915_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
MT_MCUQ_ID(MT_MCUQ_WA),
MT7915_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
MT_MCUQ_ID(MT_MCUQ_FWDL),
MT7915_TX_FWDL_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
MT_RXQ_ID(MT_RXQ_MCU),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
/* rx data queue */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
MT_RXQ_ID(MT_RXQ_MAIN),
MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN));
if (ret)
return ret;
if (dev->dbdc_support) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
MT_RXQ_ID(MT_RXQ_EXT),
MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RX_DATA_RING_BASE + hif1_ofs);
MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
MT7915_RXQ_MCU_WA_EXT,
MT_RXQ_ID(MT_RXQ_EXT_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RX_EVENT_RING_BASE + hif1_ofs);
MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
if (ret)
return ret;
}
......
......@@ -461,8 +461,9 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
ETH_ALEN);
mt76_eeprom_override(mphy);
ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
MT7915_TX_RING_SIZE);
ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(1),
MT7915_TX_RING_SIZE,
MT_TXQ_RING_BASE(1));
if (ret)
goto error;
......@@ -561,7 +562,10 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
if (is_mt7915(&dev->mt76))
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
else
mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
ret = mt7915_mcu_init(dev);
if (ret) {
......
......@@ -1891,7 +1891,7 @@ static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
struct mt76_phy *mphy_ext = dev->mt76.phy2;
u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
......@@ -2052,9 +2052,11 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
u32 val;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
......@@ -2081,10 +2083,14 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
mib->rx_ampdu_cnt += cnt;
......@@ -2093,7 +2099,9 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->rx_ampdu_bytes_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
......@@ -2105,11 +2113,14 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
mib->rx_vec_queue_overflow_drop_cnt +=
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDRVEC(ext_phy));
mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
mib->rx_ba_cnt += cnt;
......@@ -2117,10 +2128,13 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
if (is_mt7915(&dev->mt76))
cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
mib->tx_pkt_ibf_cnt += is_mt7915(&dev->mt76) ?
FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK, cnt) :
FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
cnt = mt76_rr(dev, MT_MIB_SDRMUBF(ext_phy));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
......@@ -2158,15 +2172,14 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
}
aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
if (is_mt7915(&dev->mt76)) {
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
u32 val;
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 4)));
mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
mib->ack_fail_cnt +=
FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 4)));
mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
mib->rts_retries_cnt +=
FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
......@@ -2179,6 +2192,35 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
dev->mt76.aggr_stats[aggr1++] += val >> 16;
}
} else {
for (i = 0; i < 2; i++) {
/* rts count */
val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 2)));
mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
/* rts retry count */
val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 2)));
mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ba miss count */
val = mt76_rr(dev, MT_MIB_MB_SDR2(ext_phy, (i << 2)));
mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
/* ack fail count */
val = mt76_rr(dev, MT_MIB_MB_BFTF(ext_phy, (i << 2)));
mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
}
for (i = 0; i < 8; i++) {
val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
}
}
}
void mt7915_mac_sta_rc_work(struct work_struct *work)
......
......@@ -861,8 +861,12 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
......@@ -904,8 +908,12 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
......@@ -931,8 +939,12 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
if (is_mt7915(&dev->mt76))
mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
else
mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
......
......@@ -46,6 +46,7 @@
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
......@@ -68,6 +69,14 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
enum mt7916_rxq_id {
MT7916_RXQ_MCU_WM = 0,
MT7916_RXQ_MCU_WA,
MT7916_RXQ_MCU_WA_EXT = 3,
MT7916_RXQ_BAND0,
MT7916_RXQ_BAND1,
};
struct mt7915_sta_key_conf {
s8 keyidx;
u8 key[16];
......@@ -247,6 +256,10 @@ struct mt7915_dev {
};
struct mt7915_hif *hif2;
struct mt7915_reg_desc reg;
u8 q_id[MT7915_MAX_QUEUE];
u32 q_int_mask[MT7915_MAX_QUEUE];
u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
......@@ -283,6 +296,13 @@ struct mt7915_dev {
} twt;
};
enum {
WFDMA0 = 0x0,
WFDMA1,
WFDMA_EXT,
__MT_WFDMA_MAX,
};
enum {
MT_CTX0,
MT_HIF0 = 0x0,
......@@ -360,7 +380,6 @@ int mt7915_mmio_probe(struct device *pdev,
void __iomem *mem_base,
u32 device_id,
int irq, struct mt7915_hif *hif2);
u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
......@@ -505,7 +524,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
......
......@@ -23,30 +23,16 @@ struct reg_band {
u32 band[2];
};
#define REG_BAND(_reg) \
{ .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
#define REG_BAND_IDX(_reg, _idx) \
{ .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
static const struct reg_band reg_backup_list[] = {
REG_BAND_IDX(AGG_PCR0, 0),
REG_BAND_IDX(AGG_PCR0, 1),
REG_BAND_IDX(AGG_AWSCR0, 0),
REG_BAND_IDX(AGG_AWSCR0, 1),
REG_BAND_IDX(AGG_AWSCR0, 2),
REG_BAND_IDX(AGG_AWSCR0, 3),
REG_BAND(AGG_MRCR),
REG_BAND(TMAC_TFCR0),
REG_BAND(TMAC_TCR0),
REG_BAND(AGG_ATCR1),
REG_BAND(AGG_ATCR3),
REG_BAND(TMAC_TRCR0),
REG_BAND(TMAC_ICR0),
REG_BAND_IDX(ARB_DRNGR0, 0),
REG_BAND_IDX(ARB_DRNGR0, 1),
REG_BAND(WF_RFCR),
REG_BAND(WF_RFCR1),
};
#define REG_BAND(_list, _reg) \
{ _list.band[0] = MT_##_reg(0); \
_list.band[1] = MT_##_reg(1); }
#define REG_BAND_IDX(_list, _reg, _idx) \
{ _list.band[0] = MT_##_reg(0, _idx); \
_list.band[1] = MT_##_reg(1, _idx); }
#define TM_REG_MAX_ID 17
static struct reg_band reg_backup_list[TM_REG_MAX_ID];
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
......@@ -355,6 +341,24 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
u32 *b = phy->test.reg_backup;
int i;
REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
REG_BAND(reg_backup_list[6], AGG_MRCR);
REG_BAND(reg_backup_list[7], TMAC_TFCR0);
REG_BAND(reg_backup_list[8], TMAC_TCR0);
REG_BAND(reg_backup_list[9], AGG_ATCR1);
REG_BAND(reg_backup_list[10], AGG_ATCR3);
REG_BAND(reg_backup_list[11], TMAC_TRCR0);
REG_BAND(reg_backup_list[12], TMAC_ICR0);
REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
REG_BAND(reg_backup_list[15], WF_RFCR);
REG_BAND(reg_backup_list[16], WF_RFCR1);
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
......@@ -725,6 +729,7 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
void *rx, *rssi;
u16 fcs_err;
int i;
u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
......@@ -768,8 +773,10 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment