Commit 5bed8d58 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'add-wed-support-for-mt7988-chipset'

Lorenzo Bianconi says:

====================
Add WED support for MT7988 chipset

Similar to MT7622 and MT7986, introduce Wireless Ethernet Dispatcher (WED)
support for MT7988 chipset in order to offload to the hw packet engine traffic
received from LAN/WAN device to WLAN nic (MT7996E).
Add WED RX support in order to offload traffic received by WLAN nic to the
wired interfaces (LAN/WAN).
====================

Link: https://lore.kernel.org/r/cover.1695032290.git.lorenzo@kernel.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents fa17a6d8 1543b8ff
......@@ -22,6 +22,7 @@ properties:
- mediatek,mt7622-wed
- mediatek,mt7981-wed
- mediatek,mt7986-wed
- mediatek,mt7988-wed
- const: syscon
reg:
......
......@@ -20,6 +20,7 @@ properties:
items:
- enum:
- mediatek,mt7986-wo-ccif
- mediatek,mt7988-wo-ccif
- const: syscon
reg:
......
......@@ -197,6 +197,7 @@ static const struct mtk_reg_map mt7988_reg_map = {
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
[2] = 0x5000,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
......
......@@ -1132,7 +1132,7 @@ struct mtk_reg_map {
u32 gdm1_cnt;
u32 gdma_to_ppe;
u32 ppe_base;
u32 wdma_base[2];
u32 wdma_base[3];
u32 pse_iq_sta;
u32 pse_oq_sta;
};
......
......@@ -425,7 +425,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
}
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int wdma_idx, int txq, int bss, int wcid)
int wdma_idx, int txq, int bss, int wcid,
bool amsdu_en)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
......@@ -437,6 +438,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
break;
case 2:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
......
......@@ -88,13 +88,13 @@ enum {
#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
#define MTK_FOE_WINFO_PAO_HF BIT(23)
#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
enum {
MTK_FOE_STATE_INVALID,
......@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
/* netsys_v3 */
u32 w3info;
u32 wpao;
u32 amsdu;
};
/* software-only entry type */
......@@ -392,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int sid);
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int wdma_idx, int txq, int bss, int wcid);
int wdma_idx, int txq, int bss, int wcid,
bool amsdu_en);
int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
unsigned int queue);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
......
......@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
info->queue = path->mtk_wdma.queue;
info->bss = path->mtk_wdma.bss;
info->wcid = path->mtk_wdma.wcid;
info->amsdu = path->mtk_wdma.amsdu;
return 0;
}
......@@ -192,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
info.bss, info.wcid);
info.bss, info.wcid, info.amsdu);
if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
......@@ -201,6 +202,9 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
case 1:
pse_port = PSE_WDMA1_PORT;
break;
case 2:
pse_port = PSE_WDMA2_PORT;
break;
default:
return -EINVAL;
}
......
......@@ -17,17 +17,21 @@
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include "mtk_eth_soc.h"
#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
#define MTK_WED_PKT_SIZE 1900
#define MTK_WED_PKT_SIZE 1920
#define MTK_WED_BUF_SIZE 2048
#define MTK_WED_PAGE_BUF_SIZE 128
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
#define MTK_WED_RX_RING_SIZE 1536
#define MTK_WED_RX_PG_BM_CNT 8192
#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
#define MTK_WED_AMSDU_NPAGES 32
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
......@@ -41,7 +45,10 @@
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
static struct mtk_wed_hw *hw_list[2];
#define MTK_WED_TX_BM_DMA_SIZE 65536
#define MTK_WED_TX_BM_PKT_CNT 32768
static struct mtk_wed_hw *hw_list[3];
static DEFINE_MUTEX(hw_lock);
struct mtk_wed_flow_block_priv {
......@@ -49,6 +56,39 @@ struct mtk_wed_flow_block_priv {
struct net_device *dev;
};
static const struct mtk_wed_soc_data mt7622_data = {
.regmap = {
.tx_bm_tkid = 0x088,
.wpdma_rx_ring0 = 0x770,
.reset_idx_tx_mask = GENMASK(3, 0),
.reset_idx_rx_mask = GENMASK(17, 16),
},
.tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
.wdma_desc_size = sizeof(struct mtk_wdma_desc),
};
static const struct mtk_wed_soc_data mt7986_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
.wpdma_rx_ring0 = 0x770,
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
.tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
.wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
};
static const struct mtk_wed_soc_data mt7988_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
.wpdma_rx_ring0 = 0x7d0,
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
.tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
.wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
};
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
......@@ -109,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
static void
mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
{
u32 status;
if (!mtk_wed_is_v3_or_greater(dev->hw))
return;
wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
dev_err(dev->hw->dev, "rx reset failed\n");
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
dev_err(dev->hw->dev, "rx reset failed\n");
wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
dev_err(dev->hw->dev, "rx reset failed\n");
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
dev_err(dev->hw->dev, "rx reset failed\n");
/* prefetch FIFO */
wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
/* core FIFO */
wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
/* writeback FIFO */
wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
/* prefetch ring status */
wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
/* writeback ring status */
wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
}
static int
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
......@@ -121,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
if (ret)
dev_err(dev->hw->dev, "rx reset failed\n");
mtk_wdma_v3_rx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
......@@ -135,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
return ret;
}
static u32
mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
return !!(wed_r32(dev, reg) & mask);
}
static int
mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
int sleep = 15000;
int timeout = 100 * sleep;
u32 val;
return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
timeout, false, dev, reg, mask);
}
static void
mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
{
u32 status;
if (!mtk_wed_is_v3_or_greater(dev->hw))
return;
wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
dev_err(dev->hw->dev, "tx reset failed\n");
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
dev_err(dev->hw->dev, "tx reset failed\n");
wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
dev_err(dev->hw->dev, "tx reset failed\n");
if (read_poll_timeout(wdma_r32, status,
!(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
dev_err(dev->hw->dev, "tx reset failed\n");
/* prefetch FIFO */
wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
/* core FIFO */
wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
/* writeback FIFO */
wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
/* prefetch ring status */
wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
/* writeback ring status */
wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
}
static void
mtk_wdma_tx_reset(struct mtk_wed_device *dev)
{
......@@ -146,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
!(status & mask), 0, 10000))
dev_err(dev->hw->dev, "tx reset failed\n");
mtk_wdma_v3_tx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
......@@ -278,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev)
if (!hw->wed_dev)
goto out;
if (hw->version == 1)
if (mtk_wed_is_v1(hw))
return NULL;
/* MT7986 WED devices do not have any pcie slot restrictions */
......@@ -297,36 +518,153 @@ mtk_wed_assign(struct mtk_wed_device *dev)
return hw;
}
static int
mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wed_hw *hw = dev->hw;
struct mtk_wed_amsdu *wed_amsdu;
int i;
if (!mtk_wed_is_v3_or_greater(hw))
return 0;
wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
sizeof(*wed_amsdu), GFP_KERNEL);
if (!wed_amsdu)
return -ENOMEM;
for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
void *ptr;
/* each segment is 64K */
ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
__GFP_ZERO | __GFP_COMP |
GFP_DMA32,
get_order(MTK_WED_AMSDU_BUF_SIZE));
if (!ptr)
goto error;
wed_amsdu[i].txd = ptr;
wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
MTK_WED_AMSDU_BUF_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
goto error;
}
dev->hw->wed_amsdu = wed_amsdu;
return 0;
error:
for (i--; i >= 0; i--)
dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
return -ENOMEM;
}
static void
mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
{
struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
int i;
if (!wed_amsdu)
return;
for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
free_pages((unsigned long)wed_amsdu[i].txd,
get_order(MTK_WED_AMSDU_BUF_SIZE));
}
}
static int
mtk_wed_amsdu_init(struct mtk_wed_device *dev)
{
struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
int i, ret;
if (!wed_amsdu)
return 0;
for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
wed_amsdu[i].txd_phy);
/* init all sta parameter */
wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
dev->wlan.amsdu_max_len >> 8) |
FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
dev->wlan.amsdu_max_subframes));
wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
MTK_WED_AMSDU_STA_INFO_DO_INIT);
if (ret) {
dev_err(dev->hw->dev, "amsdu initialization failed\n");
return ret;
}
/* init partial amsdu offload txd src */
wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
/* init qmem */
wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
if (ret) {
pr_info("%s: amsdu qmem initialization failed\n", __func__);
return ret;
}
/* eagle E1 PCIE1 tx ring 22 flow control issue */
if (dev->wlan.id == 0x7991)
wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
return 0;
}
static int
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
void **page_list;
u32 desc_size = dev->hw->soc->tx_ring_desc_size;
int i, page_idx = 0, n_pages, ring_size;
int token = dev->wlan.token_start;
int ring_size;
int n_pages;
int i, page_idx;
struct mtk_wed_buf *page_list;
dma_addr_t desc_phys;
void *desc_ptr;
ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
dev->tx_buf_ring.size = ring_size;
} else {
dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
ring_size = MTK_WED_TX_BM_PKT_CNT;
}
n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
dev->tx_buf_ring.size = ring_size;
dev->tx_buf_ring.pages = page_list;
desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
desc_ptr = dma_alloc_coherent(dev->hw->dev,
dev->tx_buf_ring.size * desc_size,
&desc_phys, GFP_KERNEL);
if (!desc_ptr)
return -ENOMEM;
dev->tx_buf_ring.desc = desc;
dev->tx_buf_ring.desc = desc_ptr;
dev->tx_buf_ring.desc_phys = desc_phys;
for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
void *buf;
......@@ -343,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
return -ENOMEM;
}
page_list[page_idx++] = page;
page_list[page_idx].p = page;
page_list[page_idx++].phy_addr = page_phys;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
......@@ -351,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
u32 txd_size;
u32 ctrl;
txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
struct mtk_wdma_desc *desc = desc_ptr;
desc->buf0 = cpu_to_le32(buf_phys);
desc->buf1 = cpu_to_le32(buf_phys + txd_size);
if (dev->hw->version == 1)
ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
MTK_WED_BUF_SIZE - txd_size) |
MTK_WDMA_DESC_CTRL_LAST_SEG1;
else
ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
MTK_WED_BUF_SIZE - txd_size) |
MTK_WDMA_DESC_CTRL_LAST_SEG0;
desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
desc++;
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
u32 txd_size, ctrl;
txd_size = dev->wlan.init_buf(buf, buf_phys,
token++);
desc->buf1 = cpu_to_le32(buf_phys + txd_size);
ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
if (mtk_wed_is_v1(dev->hw))
ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
MTK_WED_BUF_SIZE - txd_size);
else
ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
MTK_WED_BUF_SIZE - txd_size);
desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
} else {
desc->ctrl = cpu_to_le32(token << 16);
}
desc_ptr += desc_size;
buf += MTK_WED_BUF_SIZE;
buf_phys += MTK_WED_BUF_SIZE;
}
......@@ -387,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
static void
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
void **page_list = dev->tx_buf_ring.pages;
int page_idx;
int i;
struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
struct mtk_wed_hw *hw = dev->hw;
int i, page_idx = 0;
if (!page_list)
return;
if (!desc)
if (!dev->tx_buf_ring.desc)
goto free_pagelist;
for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
i += MTK_WED_BUF_PER_PAGE) {
void *page = page_list[page_idx++];
dma_addr_t buf_addr;
for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phy = page_list[page_idx].phy_addr;
void *page = page_list[page_idx++].p;
if (!page)
break;
buf_addr = le32_to_cpu(desc[i].buf0);
dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
}
dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
desc, dev->tx_buf_ring.desc_phys);
dma_free_coherent(dev->hw->dev,
dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
dev->tx_buf_ring.desc,
dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
{
int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
struct mtk_wed_buf *page_list;
struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
int i, page_idx = 0;
if (!dev->wlan.hw_rro)
return 0;
page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
dev->hw_rro.pages = page_list;
desc = dma_alloc_coherent(dev->hw->dev,
dev->wlan.rx_nbuf * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
dev->hw_rro.desc = desc;
dev->hw_rro.desc_phys = desc_phys;
for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
int s;
page = __dev_alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev->hw->dev, page_phys)) {
__free_page(page);
return -ENOMEM;
}
page_list[page_idx].p = page;
page_list[page_idx++].phy_addr = page_phys;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
buf_phys = page_phys;
for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
desc->buf0 = cpu_to_le32(buf_phys);
buf_phys += MTK_WED_PAGE_BUF_SIZE;
desc++;
}
dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
return 0;
}
static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc;
struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
......@@ -436,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
return 0;
return mtk_wed_hwrro_buffer_alloc(dev);
}
static void
mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
{
struct mtk_wed_buf *page_list = dev->hw_rro.pages;
struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
int i, page_idx = 0;
if (!dev->wlan.hw_rro)
return;
if (!page_list)
return;
if (!desc)
goto free_pagelist;
for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
dma_addr_t buf_addr = page_list[page_idx].phy_addr;
void *page = page_list[page_idx++].p;
if (!page)
break;
dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
}
dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
desc, dev->hw_rro.desc_phys);
free_pagelist:
kfree(page_list);
}
static void
mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
if (!desc)
return;
......@@ -450,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
mtk_wed_hwrro_free_buffer(dev);
}
static void
mtk_wed_hwrro_init(struct mtk_wed_device *dev)
{
if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
return;
wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
MTK_WED_RX_PG_BM_CNT));
/* enable rx_page_bm to fetch dmad */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
}
static void
......@@ -463,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
mtk_wed_hwrro_init(dev);
}
static void
......@@ -498,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
if (dev->hw->version == 1)
switch (dev->hw->version) {
case 1:
mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
else
break;
case 2:
mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
break;
case 3:
mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
break;
default:
break;
}
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
......@@ -516,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
static void
mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
{
if (!mtk_wed_is_v2(dev->hw))
return;
if (enable) {
wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
wed_w32(dev, MTK_WED_TXP_DW1,
......@@ -527,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
}
}
#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
static int
mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
struct mtk_wed_ring *ring)
{
u32 val;
int i;
if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
return; /* queue is not configured by mt76 */
for (i = 0; i < 3; i++) {
u32 cur_idx;
u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
cur_idx = wed_r32(dev,
MTK_WED_WPDMA_RING_RX_DATA(idx) +
MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
......@@ -551,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
if (i == 3) {
dev_err(dev->hw->dev, "rx dma enable failed\n");
return;
return -ETIMEDOUT;
}
val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
MTK_WFMDA_RX_DMA_EN;
wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
return 0;
}
static void
......@@ -577,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
if (mtk_wed_is_v1(dev->hw)) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
......@@ -590,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
if (mtk_wed_is_v3_or_greater(dev->hw) &&
mtk_wed_get_rx_capa(dev)) {
wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
MTK_WDMA_PREF_TX_CFG_PREF_EN);
wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
MTK_WDMA_PREF_RX_CFG_PREF_EN);
}
}
mtk_wed_set_512_support(dev, false);
......@@ -606,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (dev->hw->version == 1)
if (!mtk_wed_get_rx_capa(dev))
return;
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
......@@ -625,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
if (dev->hw->version == 1)
if (mtk_wed_is_v1(dev->hw))
return;
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_EN |
MTK_WED_CTRL_WED_RX_BM_EN |
MTK_WED_CTRL_RX_RRO_QM_EN);
if (mtk_wed_is_v3_or_greater(dev->hw)) {
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
}
}
static void
......@@ -643,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_amsdu_free_buffer(dev);
mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
......@@ -681,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_unlock(&hw_lock);
}
#define PCIE_BASE_ADDR0 0x11280000
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
switch (dev->wlan.bus_type) {
case MTK_WED_BUS_PCIE: {
struct device_node *np = dev->hw->eth->dev->of_node;
struct regmap *regs;
regs = syscon_regmap_lookup_by_phandle(np,
"mediatek,wed-pcie");
if (IS_ERR(regs))
break;
if (mtk_wed_is_v2(dev->hw)) {
struct regmap *regs;
regs = syscon_regmap_lookup_by_phandle(np,
"mediatek,wed-pcie");
if (IS_ERR(regs))
break;
regmap_update_bits(regs, 0, BIT(0), BIT(0));
}
regmap_update_bits(regs, 0, BIT(0), BIT(0));
if (dev->wlan.msi) {
wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
dev->hw->pcie_base | 0xc08);
wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
dev->hw->pcie_base | 0xc04);
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
} else {
wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
dev->hw->pcie_base | 0x180);
wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
dev->hw->pcie_base | 0x184);
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
}
wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
......@@ -703,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
/* pcie interrupt control: pola/source selection */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
/* pcie interrupt status trigger register */
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
/* pola setting */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
dev->hw->index));
break;
}
case MTK_WED_BUS_AXI:
......@@ -731,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
} else {
mtk_wed_bus_init(dev);
int i;
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
return;
}
mtk_wed_bus_init(dev);
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
if (!mtk_wed_get_rx_capa(dev))
return;
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
if (!dev->wlan.hw_rro)
return;
wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
dev->wlan.wpdma_rx_pg + i * 0x10);
}
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
u32 mask, set;
u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_set_wpdma(dev);
mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
}
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
if (dev->hw->version == 1) {
if (mtk_wed_is_v1(dev->hw)) {
u32 offset = dev->hw->index ? 0x04000400 : 0;
wdma_set(dev, MTK_WDMA_GLO_CFG,
......@@ -907,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
}
/* configure RX_ROUTE_QM */
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
if (mtk_wed_is_v2(dev->hw)) {
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
0x3 + dev->hw->index));
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
} else {
wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
0x3 + dev->hw->index));
}
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
}
......@@ -924,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->init_done = true;
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_TX_BM_TKID,
FIELD_PREP(MTK_WED_TX_BM_TKID_START,
dev->wlan.token_start) |
FIELD_PREP(MTK_WED_TX_BM_TKID_END,
dev->wlan.token_start +
dev->wlan.nbuf - 1));
if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
MTK_WED_TX_BM_DYN_THR_HI);
} else {
wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
FIELD_PREP(MTK_WED_TX_BM_TKID_START,
dev->wlan.token_start) |
FIELD_PREP(MTK_WED_TX_BM_TKID_END,
dev->wlan.token_start +
dev->wlan.nbuf - 1));
} else if (mtk_wed_is_v2(dev->hw)) {
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
MTK_WED_TX_BM_DYN_THR_HI_V2);
......@@ -961,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
dev->tx_buf_ring.size / 128));
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
}
wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
FIELD_PREP(MTK_WED_TX_BM_TKID_END,
dev->wlan.token_start + dev->wlan.nbuf - 1));
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
if (dev->hw->version == 1) {
if (mtk_wed_is_v3_or_greater(dev->hw)) {
/* switch to new bm architecture */
wed_clr(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_LEGACY_EN);
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
dev->wlan.nbuf / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
dev->wlan.nbuf / 128));
/* return SKBID + SDP back to bm */
wed_set(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
MTK_WED_TX_BM_PKT_CNT |
MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
}
if (mtk_wed_is_v1(dev->hw)) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
} else {
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
} else if (mtk_wed_get_rx_capa(dev)) {
/* rx hw init */
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
/* reset prefetch index of ring */
wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
/* reset prefetch FIFO of ring */
wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
mtk_wed_rx_buffer_hw_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
if (!mtk_wed_is_v1(dev->hw))
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
}
static void
......@@ -1008,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
}
}
static u32
mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
return !!(wed_r32(dev, reg) & mask);
}
static int
mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
int sleep = 15000;
int timeout = 100 * sleep;
u32 val;
return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
timeout, false, dev, reg, mask);
}
static int
mtk_wed_rx_reset(struct mtk_wed_device *dev)
{
......@@ -1038,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret)
return ret;
if (dev->wlan.hw_rro) {
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
MTK_WED_RX_IND_CMD_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
}
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
MTK_WED_WPDMA_RX_D_PREF_BUSY);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
} else {
if (mtk_wed_is_v3_or_greater(dev->hw)) {
/* 1.a. disable prefetch HW */
wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
MTK_WED_WPDMA_RX_D_PREF_EN);
mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
MTK_WED_WPDMA_RX_D_PREF_BUSY);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
}
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
......@@ -1072,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
}
if (dev->wlan.hw_rro) {
/* disable rro msdu page drv */
wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_EN);
/* disable rro data drv */
wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
/* rro msdu page drv reset */
wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_CLR);
mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_CLR);
/* rro data drv reset */
wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
MTK_WED_RRO_RX_D_DRV_CLR);
mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
MTK_WED_RRO_RX_D_DRV_CLR);
}
/* reset route qm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
if (ret)
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
else
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
MTK_WED_RTQM_Q_RST);
} else if (mtk_wed_is_v3_or_greater(dev->hw)) {
wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
} else {
wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
}
/* reset tx wdma */
mtk_wdma_tx_reset(dev);
/* reset tx wdma drv */
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
if (mtk_wed_is_v3_or_greater(dev->hw))
mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
MTK_WED_WPDMA_STATUS_TX_DRV);
else
mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
/* reset wed rx dma */
......@@ -1098,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
} else {
struct mtk_eth *eth = dev->hw->eth;
if (mtk_is_netsys_v2_or_greater(eth))
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
wed_set(dev, MTK_WED_RESET_IDX,
dev->hw->soc->regmap.reset_idx_rx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
......@@ -1114,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_RX_BM_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
if (dev->wlan.hw_rro) {
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
}
/* wo change to enable state */
val = MTK_WED_WO_STATE_ENABLE;
ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
......@@ -1131,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
false);
}
mtk_wed_free_rx_buffer(dev);
mtk_wed_hwrro_free_buffer(dev);
return 0;
}
......@@ -1157,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
} else {
wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
wed_w32(dev, MTK_WED_RESET_IDX,
dev->hw->soc->regmap.reset_idx_tx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
/* 2. reset WDMA rx DMA */
busy = !!mtk_wdma_rx_reset(dev);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
if (mtk_wed_is_v3_or_greater(dev->hw)) {
val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
} else {
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
}
if (!busy)
busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
} else {
if (mtk_wed_is_v3_or_greater(dev->hw)) {
/* 1.a. disable prefetch HW */
wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_EN);
mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_BUSY);
wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_DDONE2_EN);
/* 2. Reset dma index */
wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
MTK_WED_WDMA_RESET_IDX_RX_ALL);
}
wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
......@@ -1187,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
if (mtk_wed_is_v1(dev->hw))
val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
wed_r32(dev, MTK_WED_TX_BM_INTF));
else
val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
wed_r32(dev, MTK_WED_TX_TKID_INTF));
if (val == 0x40)
break;
}
......@@ -1210,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
} else {
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
MTK_WED_WPDMA_RESET_IDX_TX |
......@@ -1218,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
}
dev->init_done = false;
if (dev->hw->version == 1)
if (mtk_wed_is_v1(dev->hw))
return;
if (!busy) {
......@@ -1226,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
mtk_wed_rx_reset(dev);
if (mtk_wed_is_v3_or_greater(dev->hw)) {
/* reset amsdu engine */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
}
if (mtk_wed_get_rx_capa(dev))
mtk_wed_rx_reset(dev);
}
static int
......@@ -1249,7 +1875,6 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->rx_wdma))
......@@ -1257,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->rx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
desc_size, true))
dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
......@@ -1278,7 +1903,6 @@ static int
mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->tx_wdma))
......@@ -1286,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->tx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
desc_size, true))
dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
if (mtk_wed_is_v3_or_greater(dev->hw)) {
struct mtk_wdma_desc *desc = wdma->desc;
int i;
for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
desc->buf0 = 0;
desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
desc->buf1 = 0;
desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
desc++;
desc->buf0 = 0;
desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
desc->buf1 = 0;
desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
desc++;
}
}
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
......@@ -1344,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
if (dev->hw->version == 1) {
if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
MTK_WED_PCIE_INT_TRIGGER_STATUS);
......@@ -1354,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
GENMASK(1, 0));
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
......@@ -1374,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
MTK_WED_WPDMA_INT_CTRL_RX0_EN |
MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
MTK_WED_WPDMA_INT_CTRL_RX1_EN |
MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
dev->wlan.rx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
dev->wlan.rx_tbit[1]));
if (mtk_wed_get_rx_capa(dev)) {
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
MTK_WED_WPDMA_INT_CTRL_RX0_EN |
MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
MTK_WED_WPDMA_INT_CTRL_RX1_EN |
MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
dev->wlan.rx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
dev->wlan.rx_tbit[1]));
wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
GENMASK(1, 0));
}
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
......@@ -1398,57 +2046,282 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
}
#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_dma_enable(struct mtk_wed_device *dev)
{
wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
int i;
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
} else {
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
}
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
if (mtk_wed_is_v1(dev->hw)) {
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
int i;
return;
}
wed_set(dev, MTK_WED_WPDMA_CTRL,
MTK_WED_WPDMA_CTRL_SDL1_FIXED);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
if (mtk_wed_is_v3_or_greater(dev->hw)) {
wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_DDONE2_EN);
wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
}
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
0x2));
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
if (!mtk_wed_get_rx_capa(dev))
return;
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
if (mtk_wed_is_v3_or_greater(dev->hw)) {
wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
MTK_WED_WPDMA_RX_D_PREF_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
}
for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
struct mtk_wed_ring *ring = &dev->rx_ring[i];
u32 val;
if (!(ring->flags & MTK_WED_RING_CONFIGURED))
continue; /* queue is not configured by mt76 */
if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
dev_err(dev->hw->dev,
"rx_ring(%d) dma enable failed\n", i);
continue;
}
val = wifi_r32(dev,
dev->wlan.wpdma_rx_glo -
dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
wifi_w32(dev,
dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
val);
}
}
static void
mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
{
int i;
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
return;
if (reset) {
wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_EN);
return;
}
wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_CLR);
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
dev->wlan.rro_rx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
dev->wlan.rro_rx_tbit[1]));
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
dev->wlan.rx_pg_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
dev->wlan.rx_pg_tbit[1]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
dev->wlan.rx_pg_tbit[2]));
/* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
* WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
*/
wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
MTK_WED_RRO_MSDU_PG_DRV_EN);
for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
if (!(ring->flags & MTK_WED_RING_CONFIGURED))
continue;
if (mtk_wed_check_wfdma_rx_fill(dev, ring))
dev_err(dev->hw->dev,
"rx_rro_ring(%d) initialization failed\n", i);
}
for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
if (!(ring->flags & MTK_WED_RING_CONFIGURED))
continue;
for (i = 0; i < MTK_WED_RX_QUEUES; i++)
mtk_wed_check_wfdma_rx_fill(dev, i);
if (mtk_wed_check_wfdma_rx_fill(dev, ring))
dev_err(dev->hw->dev,
"rx_page_ring(%d) initialization failed\n", i);
}
}
static void
mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
ring->wpdma = regs;
wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
readl(regs));
wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
readl(regs + MTK_WED_RING_OFS_COUNT));
ring->flags |= MTK_WED_RING_CONFIGURED;
}
static void
mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
ring->wpdma = regs;
wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
readl(regs));
wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
readl(regs + MTK_WED_RING_OFS_COUNT));
ring->flags |= MTK_WED_RING_CONFIGURED;
}
static int
mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
int i, count = 0;
ring->wpdma = regs;
wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
readl(regs) & 0xfffffff0);
wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
readl(regs + MTK_WED_RING_OFS_COUNT));
/* ack sn cr */
wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
dev->wlan.ind_cmd.ack_sn_addr);
wed_w32(dev, MTK_WED_RRO_CFG1,
FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
dev->wlan.ind_cmd.win_size) |
FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
dev->wlan.ind_cmd.particular_sid));
/* particular session addr element */
wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
dev->wlan.ind_cmd.particular_se_phys);
for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
if (count >= 100)
dev_err(dev->hw->dev,
"write ba session base failed\n");
}
/* pn check init */
for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
MTK_WED_PN_CHECK_IS_FIRST);
wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
count = 0;
val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
if (count >= 100)
dev_err(dev->hw->dev,
"session(%d) initialization failed\n", i);
}
wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
return 0;
}
static void
mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
......@@ -1466,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
mtk_wed_set_ext_int(dev, true);
if (dev->hw->version == 1) {
if (mtk_wed_is_v1(dev->hw)) {
u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
dev->hw->index);
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
} else if (mtk_wed_get_rx_capa(dev)) {
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
......@@ -1483,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
if (mtk_wed_is_v3_or_greater(dev->hw)) {
wed_w32(dev, MTK_WED_EXT_INT_MASK3,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_r32(dev, MTK_WED_EXT_INT_MASK3);
}
if (mtk_wed_rro_cfg(dev))
return;
}
mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
mtk_wed_amsdu_init(dev);
mtk_wed_dma_enable(dev);
dev->running = true;
......@@ -1535,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
dev->version = hw->version;
dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
......@@ -1544,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
if (ret)
goto out;
ret = mtk_wed_amsdu_buffer_alloc(dev);
if (ret)
goto out;
if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rro_alloc(dev);
if (ret)
......@@ -1551,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
}
mtk_wed_hw_init_early(dev);
if (hw->version == 1) {
if (mtk_wed_is_v1(hw))
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
} else {
else
dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
if (mtk_wed_get_rx_capa(dev))
ret = mtk_wed_wo_init(hw);
}
out:
if (ret) {
dev_err(dev->hw->dev, "failed to attach wed device\n");
......@@ -1601,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
ring->reg_base = MTK_WED_RING_TX(idx);
ring->wpdma = regs;
if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
/* reset prefetch index */
wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
/* reset prefetch FIFO */
wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
}
/* WED -> WPDMA */
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
......@@ -1619,7 +2521,7 @@ static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
int i, index = dev->hw->version == 1;
int i, index = mtk_wed_is_v1(dev->hw);
/*
* For txfree event handling, the same DMA ring is shared between WED
......@@ -1675,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
u32 val, ext_mask;
if (dev->hw->version == 1)
ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
if (mtk_wed_is_v3_or_greater(dev->hw))
ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
else
ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
......@@ -1844,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
{
struct mtk_wed_hw *hw = wed->hw;
if (hw->version < 2)
if (mtk_wed_is_v1(hw))
return -EOPNOTSUPP;
switch (type) {
......@@ -1876,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
.setup_tc = mtk_wed_setup_tc,
.start_hw_rro = mtk_wed_start_hw_rro,
.rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
.msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
.ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
......@@ -1918,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
hw->version = eth->soc->version;
if (hw->version == 1) {
switch (hw->version) {
case 2:
hw->soc = &mt7986_data;
break;
case 3:
hw->soc = &mt7988_data;
break;
default:
case 1:
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,pcie-mirror");
hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
......@@ -1934,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
regmap_write(hw->mirror, 0, 0);
regmap_write(hw->mirror, 4, 0);
}
hw->soc = &mt7622_data;
break;
}
mtk_wed_hw_add_debugfs(hw);
......
......@@ -9,10 +9,29 @@
#include <linux/regmap.h>
#include <linux/netdevice.h>
#include "mtk_wed_regs.h"
struct mtk_eth;
struct mtk_wed_wo;
struct mtk_wed_soc_data {
struct {
u32 tx_bm_tkid;
u32 wpdma_rx_ring0;
u32 reset_idx_tx_mask;
u32 reset_idx_rx_mask;
} regmap;
u32 tx_ring_desc_size;
u32 wdma_desc_size;
};
struct mtk_wed_amsdu {
void *txd;
dma_addr_t txd_phy;
};
struct mtk_wed_hw {
const struct mtk_wed_soc_data *soc;
struct device_node *node;
struct mtk_eth *eth;
struct regmap *regs;
......@@ -24,6 +43,8 @@ struct mtk_wed_hw {
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
struct mtk_wed_wo *wed_wo;
struct mtk_wed_amsdu *wed_amsdu;
u32 pcie_base;
u32 debugfs_reg;
u32 num_flows;
u8 version;
......@@ -37,9 +58,30 @@ struct mtk_wdma_info {
u8 queue;
u16 wcid;
u8 bss;
u8 amsdu;
};
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
{
return hw->version == 1;
}
static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
{
return hw->version == 2;
}
static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
{
return hw->version == 3;
}
static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
{
return hw->version > 2;
}
static inline void
wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
{
......@@ -122,6 +164,21 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
writel(val, dev->txfree_ring.wpdma + reg);
}
static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
{
if (!mtk_wed_is_v3_or_greater(dev->hw))
return MTK_WED_PCIE_BASE;
switch (dev->hw->index) {
case 1:
return MTK_WED_PCIE_BASE1;
case 2:
return MTK_WED_PCIE_BASE2;
default:
return MTK_WED_PCIE_BASE0;
}
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index);
......
......@@ -11,6 +11,7 @@ struct reg_dump {
u16 offset;
u8 type;
u8 base;
u32 mask;
};
enum {
......@@ -25,6 +26,8 @@ enum {
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
#define DUMP_REG_MASK(_reg, _mask) \
{ #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
#define DUMP_RING(_prefix, _base, ...) \
{ _prefix " BASE", _base, __VA_ARGS__ }, \
{ _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
......@@ -32,6 +35,7 @@ enum {
{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
......@@ -151,7 +155,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
static const struct reg_dump regs_common[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
......@@ -169,7 +173,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
DUMP_STR("WED RRO"),
DUMP_STR("WED WO RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
......@@ -180,17 +184,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
DUMP_STR("WED Route QM"),
DUMP_WED(WED_RTQM_R2H_MIB(0)),
DUMP_WED(WED_RTQM_R2Q_MIB(0)),
DUMP_WED(WED_RTQM_Q2H_MIB(0)),
DUMP_WED(WED_RTQM_R2H_MIB(1)),
DUMP_WED(WED_RTQM_R2Q_MIB(1)),
DUMP_WED(WED_RTQM_Q2H_MIB(1)),
DUMP_WED(WED_RTQM_Q2N_MIB),
DUMP_WED(WED_RTQM_Q2B_MIB),
DUMP_WED(WED_RTQM_PFDBK_MIB),
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
DUMP_WED_RING(WED_WDMA_RING_TX),
......@@ -211,6 +204,287 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
};
static const struct reg_dump regs_wed_v2[] = {
DUMP_STR("WED Route QM"),
DUMP_WED(WED_RTQM_R2H_MIB(0)),
DUMP_WED(WED_RTQM_R2Q_MIB(0)),
DUMP_WED(WED_RTQM_Q2H_MIB(0)),
DUMP_WED(WED_RTQM_R2H_MIB(1)),
DUMP_WED(WED_RTQM_R2Q_MIB(1)),
DUMP_WED(WED_RTQM_Q2H_MIB(1)),
DUMP_WED(WED_RTQM_Q2N_MIB),
DUMP_WED(WED_RTQM_Q2B_MIB),
DUMP_WED(WED_RTQM_PFDBK_MIB),
};
static const struct reg_dump regs_wed_v3[] = {
DUMP_STR("WED RX RRO DATA"),
DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
DUMP_STR("WED RX MSDU PAGE"),
DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
DUMP_STR("WED RX IND CMD"),
DUMP_WED(WED_IND_CMD_RX_CTRL1),
DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
WED_IND_CMD_PREFETCH_FREE_CNT),
DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
DUMP_STR("WED ADDR ELEM"),
DUMP_WED(WED_ADDR_ELEM_CFG0),
DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
WED_ADDR_ELEM_PREFETCH_FREE_CNT),
DUMP_STR("WED Route QM"),
DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (dev) {
dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
if (mtk_wed_is_v2(hw))
dump_wed_regs(s, dev,
regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
else
dump_wed_regs(s, dev,
regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
static int
wed_amsdu_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("WED AMDSU INFO"),
DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
DUMP_STR("WED AMDSU ENG0 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG1 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG2 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG3 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG4 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG5 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG6 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG7 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED AMDSU ENG8 INFO"),
DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
WED_AMSDU_ENG_MAX_PL_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
WED_AMSDU_ENG_MAX_QGPP_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
WED_AMSDU_ENG_CUR_ENTRY),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
WED_AMSDU_ENG_MAX_BUF_MERGED),
DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
WED_AMSDU_ENG_MAX_MSDU_MERGED),
DUMP_STR("WED QMEM INFO"),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
DUMP_STR("WED QMEM HEAD INFO"),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
DUMP_STR("WED QMEM TAIL INFO"),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
DUMP_STR("WED HIFTXD MSDU INFO"),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
......@@ -219,7 +493,94 @@ wed_rxinfo_show(struct seq_file *s, void *data)
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
static int
wed_rtqm_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
DUMP_STR("WED Route QM IGRS1(Legacy)"),
DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
DUMP_STR("WED Route QM IGRS3(DEBUG)"),
DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (dev)
dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
static int
wed_rro_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("RRO/IND CMD CNT"),
DUMP_WED(WED_RX_IND_CMD_CNT(1)),
DUMP_WED(WED_RX_IND_CMD_CNT(2)),
DUMP_WED(WED_RX_IND_CMD_CNT(3)),
DUMP_WED(WED_RX_IND_CMD_CNT(4)),
DUMP_WED(WED_RX_IND_CMD_CNT(5)),
DUMP_WED(WED_RX_IND_CMD_CNT(6)),
DUMP_WED(WED_RX_IND_CMD_CNT(7)),
DUMP_WED(WED_RX_IND_CMD_CNT(8)),
DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
WED_ADDR_ELEM_SIG_FAIL_CNT),
DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
WED_PN_CHK_FAIL_CNT),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (dev)
dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rro);
static int
mtk_wed_reg_set(void *data, u64 val)
......@@ -261,7 +622,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
if (hw->version != 1)
if (!mtk_wed_is_v1(hw)) {
debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
&wed_rxinfo_fops);
if (mtk_wed_is_v3_or_greater(hw)) {
debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
&wed_amsdu_fops);
debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
&wed_rtqm_fops);
debugfs_create_file_unsafe("rro", 0400, dir, hw,
&wed_rro_fops);
}
}
}
......@@ -16,14 +16,30 @@
#include "mtk_wed_wo.h"
#include "mtk_wed.h"
static struct mtk_wed_wo_memory_region mem_region[] = {
[MTK_WED_WO_REGION_EMI] = {
.name = "wo-emi",
},
[MTK_WED_WO_REGION_ILM] = {
.name = "wo-ilm",
},
[MTK_WED_WO_REGION_DATA] = {
.name = "wo-data",
.shared = true,
},
[MTK_WED_WO_REGION_BOOT] = {
.name = "wo-boot",
},
};
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
return readl(wo->boot.addr + reg);
return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
writel(val, wo->boot.addr + reg);
writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
......@@ -207,7 +223,7 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
if (dev->hw->version == 1)
if (!mtk_wed_get_rx_capa(dev))
return 0;
if (WARN_ON(!wo))
......@@ -218,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
int index;
index = of_property_match_string(wo->hw->node, "memory-region-names",
region->name);
if (index < 0)
return index;
np = of_parse_phandle(wo->hw->node, "memory-region", index);
np = of_parse_phandle(hw->node, "memory-region", index);
if (!np)
return -ENODEV;
......@@ -242,7 +252,7 @@ mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
region->phy_addr = rmem->base;
region->size = rmem->size;
region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
return !region->addr ? -EINVAL : 0;
}
......@@ -255,6 +265,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
const struct mtk_wed_fw_trailer *trailer;
const struct mtk_wed_fw_region *fw_region;
if (!region->phy_addr || !region->size)
return 0;
trailer_ptr = fw->data + fw->size - sizeof(*trailer);
trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
......@@ -294,18 +307,6 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
static struct mtk_wed_wo_memory_region mem_region[] = {
[MTK_WED_WO_REGION_EMI] = {
.name = "wo-emi",
},
[MTK_WED_WO_REGION_ILM] = {
.name = "wo-ilm",
},
[MTK_WED_WO_REGION_DATA] = {
.name = "wo-data",
.shared = true,
},
};
const struct mtk_wed_fw_trailer *trailer;
const struct firmware *fw;
const char *fw_name;
......@@ -314,25 +315,38 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
int index = of_property_match_string(wo->hw->node,
"memory-region-names",
mem_region[i].name);
if (index < 0)
continue;
ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
wo->boot.name = "wo-boot";
ret = mtk_wed_get_memory_region(wo, &wo->boot);
if (ret)
return ret;
/* set dummy cr */
wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
wo->hw->index + 1);
/* load firmware */
if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
fw_name = MT7981_FIRMWARE_WO;
else
fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
switch (wo->hw->version) {
case 2:
if (of_device_is_compatible(wo->hw->node,
"mediatek,mt7981-wed"))
fw_name = MT7981_FIRMWARE_WO;
else
fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
: MT7986_FIRMWARE_WO0;
break;
case 3:
fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
: MT7988_FIRMWARE_WO0;
break;
default:
return -EINVAL;
}
ret = request_firmware(&fw, fw_name, wo->hw->dev);
if (ret)
......@@ -353,15 +367,16 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
}
/* set the start address */
boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
: MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
else
boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
: MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
......@@ -396,3 +411,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
......@@ -13,6 +13,9 @@
#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
struct mtk_wdma_desc {
__le32 buf0;
__le32 ctrl;
......@@ -25,6 +28,8 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET 0x008
#define MTK_WED_RESET_TX_BM BIT(0)
#define MTK_WED_RESET_RX_BM BIT(1)
#define MTK_WED_RESET_RX_PG_BM BIT(2)
#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
......@@ -37,6 +42,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
#define MTK_WED_RESET_RX_RRO_QM BIT(20)
#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
#define MTK_WED_RESET_TX_AMSDU BIT(22)
#define MTK_WED_RESET_WED BIT(31)
#define MTK_WED_CTRL 0x00c
......@@ -44,6 +50,9 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
......@@ -54,9 +63,14 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
#define MTK_WED_EXT_INT_STATUS 0x020
#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
......@@ -64,8 +78,8 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
......@@ -89,19 +103,26 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_MASK 0x028
#define MTK_WED_EXT_INT_MASK1 0x02c
#define MTK_WED_EXT_INT_MASK2 0x030
#define MTK_WED_EXT_INT_MASK3 0x034
#define MTK_WED_STATUS 0x060
#define MTK_WED_STATUS_TX GENMASK(15, 8)
#define MTK_WED_WPDMA_STATUS 0x068
#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
#define MTK_WED_TX_BM_CTRL 0x080
#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
#define MTK_WED_TX_BM_BASE 0x084
#define MTK_WED_TX_BM_INIT_PTR 0x088
#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
#define MTK_WED_TX_BM_TKID 0x088
#define MTK_WED_TX_BM_TKID_V2 0x0c8
#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
......@@ -124,6 +145,12 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
#define MTK_WED_TX_TKID_INTF 0x0dc
#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
#define MTK_WED_TX_TKID_DYN_THR 0x0e0
#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
......@@ -160,9 +187,6 @@ struct mtk_wdma_desc {
#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
#define MTK_WED_RESET_IDX 0x20c
#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
......@@ -174,6 +198,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
#define MTK_WED_SCR0 0x3c0
#define MTK_WED_RX1_CTRL2 0x418
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
......@@ -204,12 +229,15 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
#define MTK_WED_WPDMA_RESET_IDX 0x50c
#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
......@@ -255,9 +283,10 @@ struct mtk_wdma_desc {
#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
#define MTK_WED_PCIE_INT_CTRL 0x57c
#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
#define MTK_WED_WPDMA_CFG_BASE 0x580
#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
......@@ -283,15 +312,30 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
#define MTK_WED_WPDMA_RX_RING 0x770
#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
#define MTK_WED_WDMA_RING_TX 0x800
#define MTK_WED_WDMA_TX_MIB 0x810
......@@ -299,6 +343,20 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
#define MTK_WED_WDMA_RX_PREF_CFG 0x950
#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
#define MTK_WED_WDMA_GLO_CFG 0xa04
#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
......@@ -322,6 +380,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RESET_IDX 0xa08
#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
#define MTK_WED_WDMA_INT_CLR 0xa24
......@@ -331,6 +390,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
#define MTK_WED_WDMA_INT_CTRL 0xa2c
#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_WDMA_CFG_BASE 0xaa0
......@@ -391,9 +451,62 @@ struct mtk_wdma_desc {
#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
#define MTK_WDMA_INT_GRP1 0x250
#define MTK_WDMA_INT_GRP2 0x254
#define MTK_WDMA_PREF_TX_CFG 0x2d0
#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
#define MTK_WDMA_PREF_RX_CFG 0x2dc
#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
#define MTK_WDMA_WRBK_TX_CFG 0x300
#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
#define MTK_WDMA_WRBK_RX_CFG 0x344
#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
#define MTK_WDMA_WRBK_SIDX_CFG 0x388
#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
......@@ -407,6 +520,32 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
#define MTK_WED_RTQM_RST 0xb04
#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
#define MTK_WED_RTQM_Q2N_MIB 0xb80
......@@ -415,6 +554,24 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q2B_MIB 0xb8c
#define MTK_WED_RTQM_PFDBK_MIB 0xb90
#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
#define MTK_WED_RTQM_FDROP_MIB 0xb84
#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
#define MTK_WED_RROQM_GLO_CFG 0xc04
#define MTK_WED_RROQM_RST_IDX 0xc08
#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
......@@ -464,7 +621,195 @@ struct mtk_wdma_desc {
#define MTK_WED_RX_BM_INTF 0xd9c
#define MTK_WED_RX_BM_ERR_STS 0xda8
#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
#define MTK_WED_RRO_CFG0 0xe10
#define MTK_WED_RRO_CFG1 0xe14
#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
#define MTK_WED_ADDR_ELEM_CFG0 0xe18
#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
#define MTK_WED_PN_CHECK_CFG 0xe30
#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
#define MTK_WED_PN_CHECK_RD BIT(30)
#define MTK_WED_PN_CHECK_WR BIT(31)
#define MTK_WED_PN_CHECK_WDATA_M 0xe38
#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
#define MTK_WED_RRO_PG_BM_BASE 0xeb4
#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
#define MTK_WED_RRO_RX_HW_STS 0xf00
#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
#define MTK_WED_RX_IND_CMD_CNT0 0xf20
#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
#define MTK_WED_RX_PN_CHK_CNT 0xf70
#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
#define MTK_WED_PCIE_INT_MASK 0x0
#define MTK_WED_AMSDU_FIFO 0x1800
#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
#define MTK_WED_AMSDU_STA_INFO 0x01810
#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
#define MTK_WED_AMSDU_STA_RMVL BIT(1)
#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
#define MTK_WED_AMSDU_PSE 0x1910
#define MTK_WED_AMSDU_PSE_RESET BIT(16)
#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
#define MTK_WED_PCIE_BASE 0x11280000
#define MTK_WED_PCIE_BASE0 0x11300000
#define MTK_WED_PCIE_BASE1 0x11310000
#define MTK_WED_PCIE_BASE2 0x11290000
#endif
......@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
......@@ -228,7 +230,6 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
struct mtk_wed_wo_memory_region boot;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
......
......@@ -591,7 +591,7 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
struct mt76_txwi_cache *t = NULL;
struct mt7915_dev *dev;
struct mt76_queue *q;
......
......@@ -919,6 +919,7 @@ struct net_device_path {
u8 queue;
u16 wcid;
u8 bss;
u8 amsdu;
} mtk_wdma;
};
};
......
......@@ -10,6 +10,7 @@
#define MTK_WED_TX_QUEUES 2
#define MTK_WED_RX_QUEUES 2
#define MTK_WED_RX_PAGE_QUEUES 3
#define WED_WO_STA_REC 0x6
......@@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd {
MTK_WED_WO_CMD_WED_END
};
struct mtk_rxbm_desc {
struct mtk_wed_bm_desc {
__le32 buf0;
__le32 token;
} __packed __aligned(4);
......@@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats {
__le32 rx_drop_cnt;
};
struct mtk_wed_buf {
void *p;
dma_addr_t phy_addr;
};
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
......@@ -94,17 +100,20 @@ struct mtk_wed_device {
struct mtk_wed_ring txfree_ring;
struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
struct mtk_wed_ring ind_cmd_ring;
struct {
int size;
void **pages;
struct mtk_wed_buf *pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} tx_buf_ring;
struct {
int size;
struct mtk_rxbm_desc *desc;
struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
......@@ -114,6 +123,13 @@ struct mtk_wed_device {
dma_addr_t fdbk_phys;
} rro;
struct {
int size;
struct mtk_wed_buf *pages;
struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
} hw_rro;
/* filled by driver: */
struct {
union {
......@@ -123,6 +139,7 @@ struct mtk_wed_device {
enum mtk_wed_bus_tye bus_type;
void __iomem *base;
u32 phy_base;
u32 id;
u32 wpdma_phys;
u32 wpdma_int;
......@@ -131,18 +148,35 @@ struct mtk_wed_device {
u32 wpdma_txfree;
u32 wpdma_rx_glo;
u32 wpdma_rx;
u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
u32 wpdma_rx_pg;
bool wcid_512;
bool hw_rro;
bool msi;
u16 token_start;
unsigned int nbuf;
unsigned int rx_nbuf;
unsigned int rx_npkt;
unsigned int rx_size;
unsigned int amsdu_max_len;
u8 tx_tbit[MTK_WED_TX_QUEUES];
u8 rx_tbit[MTK_WED_RX_QUEUES];
u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
u8 txfree_tbit;
u8 amsdu_max_subframes;
struct {
u8 se_group_nums;
u16 win_size;
u16 particular_sid;
u32 ack_sn_addr;
dma_addr_t particular_se_phys;
dma_addr_t addr_elem_phys[1024];
} ind_cmd;
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
......@@ -182,6 +216,14 @@ struct mtk_wed_ops {
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
enum tc_setup_type type, void *type_data);
void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
bool reset);
void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs);
void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs);
int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
void __iomem *regs);
};
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
......@@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
return ret;
}
static inline bool
mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
if (dev->version == 3)
return dev->wlan.hw_rro;
return dev->version != 1;
#else
return false;
#endif
}
static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
return dev->version == 3;
#else
return false;
#endif
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
#define mtk_wed_device_active(_dev) !!(_dev)->ops
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
......@@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
(_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
(_dev)->ops->start_hw_rro(_dev, _mask, _reset)
#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
(_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
(_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
(_dev)->ops->ind_rx_ring_setup(_dev, _regs)
#else
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
{
......@@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_stop(_dev) do {} while (0)
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment