Commit 265f58f6 authored by David S. Miller's avatar David S. Miller

Merge branch 'mt7986-support'

Jakub Kicinski says:

====================
introduce mt7986 ethernet support

Add support for mt7986-eth driver available on mt7986 soc.

Changes since v2:
- rely on GFP_KERNEL whenever possible
- define mtk_reg_map struct to introduce soc register map and avoid macros
- improve comments

Changes since v1:
- drop SRAM option
- convert ring->dma to void
- convert scratch_ring to void
- enable port4
- fix irq dts bindings
- drop gmac1 support from mt7986a-rfb dts for the moment
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cf0005d2 197c9e9b
...@@ -21,6 +21,7 @@ properties: ...@@ -21,6 +21,7 @@ properties:
- mediatek,mt7623-eth - mediatek,mt7623-eth
- mediatek,mt7622-eth - mediatek,mt7622-eth
- mediatek,mt7629-eth - mediatek,mt7629-eth
- mediatek,mt7986-eth
- ralink,rt5350-eth - ralink,rt5350-eth
reg: reg:
...@@ -28,7 +29,7 @@ properties: ...@@ -28,7 +29,7 @@ properties:
interrupts: interrupts:
minItems: 3 minItems: 3
maxItems: 3 maxItems: 4
power-domains: power-domains:
maxItems: 1 maxItems: 1
...@@ -88,6 +89,9 @@ allOf: ...@@ -88,6 +89,9 @@ allOf:
- mediatek,mt7623-eth - mediatek,mt7623-eth
then: then:
properties: properties:
interrupts:
maxItems: 3
clocks: clocks:
minItems: 4 minItems: 4
maxItems: 4 maxItems: 4
...@@ -112,6 +116,9 @@ allOf: ...@@ -112,6 +116,9 @@ allOf:
const: mediatek,mt7622-eth const: mediatek,mt7622-eth
then: then:
properties: properties:
interrupts:
maxItems: 3
clocks: clocks:
minItems: 11 minItems: 11
maxItems: 11 maxItems: 11
...@@ -155,6 +162,9 @@ allOf: ...@@ -155,6 +162,9 @@ allOf:
const: mediatek,mt7629-eth const: mediatek,mt7629-eth
then: then:
properties: properties:
interrupts:
maxItems: 3
clocks: clocks:
minItems: 17 minItems: 17
maxItems: 17 maxItems: 17
...@@ -189,6 +199,42 @@ allOf: ...@@ -189,6 +199,42 @@ allOf:
minItems: 2 minItems: 2
maxItems: 2 maxItems: 2
- if:
properties:
compatible:
contains:
const: mediatek,mt7986-eth
then:
properties:
interrupts:
minItems: 4
clocks:
minItems: 15
maxItems: 15
clock-names:
items:
- const: fe
- const: gp2
- const: gp1
- const: wocpu1
- const: wocpu0
- const: sgmii_tx250m
- const: sgmii_rx250m
- const: sgmii_cdr_ref
- const: sgmii_cdr_fb
- const: sgmii2_tx250m
- const: sgmii2_rx250m
- const: sgmii2_cdr_ref
- const: sgmii2_cdr_fb
- const: netsys0
- const: netsys1
mediatek,sgmiisys:
minItems: 2
maxItems: 2
patternProperties: patternProperties:
"^mac@[0-1]$": "^mac@[0-1]$":
type: object type: object
...@@ -219,7 +265,6 @@ required: ...@@ -219,7 +265,6 @@ required:
- interrupts - interrupts
- clocks - clocks
- clock-names - clock-names
- power-domains
- mediatek,ethsys - mediatek,ethsys
unevaluatedProperties: false unevaluatedProperties: false
...@@ -295,3 +340,95 @@ examples: ...@@ -295,3 +340,95 @@ examples:
}; };
}; };
}; };
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/mt7622-clk.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
eth: ethernet@15100000 {
#define CLK_ETH_FE_EN 0
#define CLK_ETH_WOCPU1_EN 3
#define CLK_ETH_WOCPU0_EN 4
#define CLK_TOP_NETSYS_SEL 43
#define CLK_TOP_NETSYS_500M_SEL 44
#define CLK_TOP_NETSYS_2X_SEL 46
#define CLK_TOP_SGM_325M_SEL 47
#define CLK_APMIXED_NET2PLL 1
#define CLK_APMIXED_SGMPLL 3
compatible = "mediatek,mt7986-eth";
reg = <0 0x15100000 0 0x80000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ethsys CLK_ETH_FE_EN>,
<&ethsys CLK_ETH_GP2_EN>,
<&ethsys CLK_ETH_GP1_EN>,
<&ethsys CLK_ETH_WOCPU1_EN>,
<&ethsys CLK_ETH_WOCPU0_EN>,
<&sgmiisys0 CLK_SGMII_TX250M_EN>,
<&sgmiisys0 CLK_SGMII_RX250M_EN>,
<&sgmiisys0 CLK_SGMII_CDR_REF>,
<&sgmiisys0 CLK_SGMII_CDR_FB>,
<&sgmiisys1 CLK_SGMII_TX250M_EN>,
<&sgmiisys1 CLK_SGMII_RX250M_EN>,
<&sgmiisys1 CLK_SGMII_CDR_REF>,
<&sgmiisys1 CLK_SGMII_CDR_FB>,
<&topckgen CLK_TOP_NETSYS_SEL>,
<&topckgen CLK_TOP_NETSYS_SEL>;
clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
"sgmii_tx250m", "sgmii_rx250m",
"sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m",
"sgmii2_cdr_ref", "sgmii2_cdr_fb",
"netsys0", "netsys1";
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
assigned-clocks = <&topckgen CLK_TOP_NETSYS_2X_SEL>,
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
#address-cells = <1>;
#size-cells = <0>;
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
phy5: ethernet-phy@0 {
compatible = "ethernet-phy-id67c9.de0a";
phy-mode = "2500base-x";
reset-gpios = <&pio 6 1>;
reset-deassert-us = <20000>;
reg = <5>;
};
phy6: ethernet-phy@1 {
compatible = "ethernet-phy-id67c9.de0a";
phy-mode = "2500base-x";
reg = <6>;
};
};
mac0: mac@0 {
compatible = "mediatek,eth-mac";
phy-mode = "2500base-x";
phy-handle = <&phy5>;
reg = <0>;
};
mac1: mac@1 {
compatible = "mediatek,eth-mac";
phy-mode = "2500base-x";
phy-handle = <&phy6>;
reg = <1>;
};
};
};
...@@ -25,6 +25,80 @@ memory@40000000 { ...@@ -25,6 +25,80 @@ memory@40000000 {
}; };
}; };
&eth {
status = "okay";
gmac0: mac@0 {
compatible = "mediatek,eth-mac";
reg = <0>;
phy-mode = "2500base-x";
fixed-link {
speed = <2500>;
full-duplex;
pause;
};
};
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
};
};
&mdio {
switch: switch@0 {
compatible = "mediatek,mt7531";
reg = <31>;
reset-gpios = <&pio 5 0>;
};
};
&switch {
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "lan0";
};
port@1 {
reg = <1>;
label = "lan1";
};
port@2 {
reg = <2>;
label = "lan2";
};
port@3 {
reg = <3>;
label = "lan3";
};
port@4 {
reg = <4>;
label = "lan4";
};
port@6 {
reg = <6>;
label = "cpu";
ethernet = <&gmac0>;
phy-mode = "2500base-x";
fixed-link {
speed = <2500>;
full-duplex;
pause;
};
};
};
};
&uart0 { &uart0 {
status = "okay"; status = "okay";
}; };
......
...@@ -222,6 +222,45 @@ ethsys: syscon@15000000 { ...@@ -222,6 +222,45 @@ ethsys: syscon@15000000 {
#reset-cells = <1>; #reset-cells = <1>;
}; };
eth: ethernet@15100000 {
compatible = "mediatek,mt7986-eth";
reg = <0 0x15100000 0 0x80000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ethsys CLK_ETH_FE_EN>,
<&ethsys CLK_ETH_GP2_EN>,
<&ethsys CLK_ETH_GP1_EN>,
<&ethsys CLK_ETH_WOCPU1_EN>,
<&ethsys CLK_ETH_WOCPU0_EN>,
<&sgmiisys0 CLK_SGMII0_TX250M_EN>,
<&sgmiisys0 CLK_SGMII0_RX250M_EN>,
<&sgmiisys0 CLK_SGMII0_CDR_REF>,
<&sgmiisys0 CLK_SGMII0_CDR_FB>,
<&sgmiisys1 CLK_SGMII1_TX250M_EN>,
<&sgmiisys1 CLK_SGMII1_RX250M_EN>,
<&sgmiisys1 CLK_SGMII1_CDR_REF>,
<&sgmiisys1 CLK_SGMII1_CDR_FB>,
<&topckgen CLK_TOP_NETSYS_SEL>,
<&topckgen CLK_TOP_NETSYS_500M_SEL>;
clock-names = "fe", "gp2", "gp1", "wocpu1", "wocpu0",
"sgmii_tx250m", "sgmii_rx250m",
"sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m",
"sgmii2_cdr_ref", "sgmii2_cdr_fb",
"netsys0", "netsys1";
assigned-clocks = <&topckgen CLK_TOP_NETSYS_2X_SEL>,
<&topckgen CLK_TOP_SGM_325M_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>,
<&apmixedsys CLK_APMIXED_SGMPLL>;
mediatek,ethsys = <&ethsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
#reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
}; };
}; };
...@@ -28,3 +28,73 @@ memory@40000000 { ...@@ -28,3 +28,73 @@ memory@40000000 {
&uart0 { &uart0 {
status = "okay"; status = "okay";
}; };
&eth {
status = "okay";
gmac0: mac@0 {
compatible = "mediatek,eth-mac";
reg = <0>;
phy-mode = "2500base-x";
fixed-link {
speed = <2500>;
full-duplex;
pause;
};
};
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
switch@0 {
compatible = "mediatek,mt7531";
reg = <31>;
reset-gpios = <&pio 5 0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
label = "lan0";
};
port@1 {
reg = <1>;
label = "lan1";
};
port@2 {
reg = <2>;
label = "lan2";
};
port@3 {
reg = <3>;
label = "lan3";
};
port@4 {
reg = <4>;
label = "lan4";
};
port@6 {
reg = <6>;
label = "cpu";
ethernet = <&gmac0>;
phy-mode = "2500base-x";
fixed-link {
speed = <2500>;
full-duplex;
pause;
};
};
};
};
};
};
...@@ -34,6 +34,96 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); ...@@ -34,6 +34,96 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \ #define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) } offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
.pdma = {
.rx_ptr = 0x0900,
.rx_cnt_cfg = 0x0904,
.pcrx_ptr = 0x0908,
.glo_cfg = 0x0a04,
.rst_idx = 0x0a08,
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
.int_grp = 0x0a50,
},
.qdma = {
.qtx_cfg = 0x1800,
.rx_ptr = 0x1900,
.rx_cnt_cfg = 0x1904,
.qcrx_ptr = 0x1908,
.glo_cfg = 0x1a04,
.rst_idx = 0x1a08,
.delay_irq = 0x1a0c,
.fc_th = 0x1a10,
.int_grp = 0x1a20,
.hred = 0x1a44,
.ctx_ptr = 0x1b00,
.dtx_ptr = 0x1b04,
.crx_ptr = 0x1b10,
.drx_ptr = 0x1b14,
.fq_head = 0x1b20,
.fq_tail = 0x1b24,
.fq_count = 0x1b28,
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
};
static const struct mtk_reg_map mt7628_reg_map = {
.tx_irq_mask = 0x0a28,
.tx_irq_status = 0x0a20,
.pdma = {
.rx_ptr = 0x0900,
.rx_cnt_cfg = 0x0904,
.pcrx_ptr = 0x0908,
.glo_cfg = 0x0a04,
.rst_idx = 0x0a08,
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
.int_grp = 0x0a50,
},
};
static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
.rx_ptr = 0x6100,
.rx_cnt_cfg = 0x6104,
.pcrx_ptr = 0x6108,
.glo_cfg = 0x6204,
.rst_idx = 0x6208,
.delay_irq = 0x620c,
.irq_status = 0x6220,
.irq_mask = 0x6228,
.int_grp = 0x6250,
},
.qdma = {
.qtx_cfg = 0x4400,
.rx_ptr = 0x4500,
.rx_cnt_cfg = 0x4504,
.qcrx_ptr = 0x4508,
.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
.fc_th = 0x4610,
.int_grp = 0x4620,
.hred = 0x4644,
.ctx_ptr = 0x4700,
.dtx_ptr = 0x4704,
.crx_ptr = 0x4710,
.drx_ptr = 0x4714,
.fq_head = 0x4720,
.fq_tail = 0x4724,
.fq_count = 0x4728,
.fq_blen = 0x472c,
},
.gdm1_cnt = 0x1c00,
};
/* strings used by ethtool */ /* strings used by ethtool */
static const struct mtk_ethtool_stats { static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN]; char str[ETH_GSTRING_LEN];
...@@ -57,7 +147,7 @@ static const char * const mtk_clks_source_name[] = { ...@@ -57,7 +147,7 @@ static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
"sgmii_ck", "eth2pll", "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
}; };
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
...@@ -600,8 +690,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) ...@@ -600,8 +690,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val; u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags); spin_lock_irqsave(&eth->tx_irq_lock, flags);
val = mtk_r32(eth, eth->tx_int_mask_reg); val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags); spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
} }
...@@ -611,8 +701,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) ...@@ -611,8 +701,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val; u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags); spin_lock_irqsave(&eth->tx_irq_lock, flags);
val = mtk_r32(eth, eth->tx_int_mask_reg); val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
mtk_w32(eth, val | mask, eth->tx_int_mask_reg); mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags); spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
} }
...@@ -622,8 +712,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) ...@@ -622,8 +712,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val; u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags); spin_lock_irqsave(&eth->rx_irq_lock, flags);
val = mtk_r32(eth, MTK_PDMA_INT_MASK); val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags); spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
} }
...@@ -633,8 +723,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) ...@@ -633,8 +723,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val; u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags); spin_lock_irqsave(&eth->rx_irq_lock, flags);
val = mtk_r32(eth, MTK_PDMA_INT_MASK); val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags); spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
} }
...@@ -685,39 +775,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac) ...@@ -685,39 +775,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
hw_stats->rx_checksum_errors += hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR); mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else { } else {
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset; unsigned int offs = hw_stats->reg_offset;
u64 stats; u64 stats;
hw_stats->rx_bytes += mtk_r32(mac->hw, hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
MTK_GDM1_RX_GBCNT_L + offs); stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
if (stats) if (stats)
hw_stats->rx_bytes += (stats << 32); hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets += hw_stats->rx_packets +=
mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow += hw_stats->rx_overflow +=
mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors += hw_stats->rx_fcs_errors +=
mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors += hw_stats->rx_short_errors +=
mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors += hw_stats->rx_long_errors +=
mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors += hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets += hw_stats->rx_flow_control_packets +=
mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip += hw_stats->tx_skip +=
mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions += hw_stats->tx_collisions +=
mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes += hw_stats->tx_bytes +=
mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs); stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats) if (stats)
hw_stats->tx_bytes += (stats << 32); hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets += hw_stats->tx_packets +=
mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs); mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
} }
u64_stats_update_end(&hw_stats->syncp); u64_stats_update_end(&hw_stats->syncp);
...@@ -791,8 +881,8 @@ static inline int mtk_max_buf_size(int frag_size) ...@@ -791,8 +881,8 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size; return buf_size;
} }
static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
struct mtk_rx_dma *dma_rxd) struct mtk_rx_dma_v2 *dma_rxd)
{ {
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE)) if (!(rxd->rxd2 & RX_DMA_DONE))
...@@ -801,6 +891,10 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, ...@@ -801,6 +891,10 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
return true; return true;
} }
...@@ -808,20 +902,20 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, ...@@ -808,20 +902,20 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */ /* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth) static int mtk_init_fq_dma(struct mtk_eth *eth)
{ {
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail; dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE; int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i; int i;
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
cnt * sizeof(struct mtk_tx_dma), cnt * soc->txrx.txd_size,
&eth->phy_scratch_ring, &eth->phy_scratch_ring,
GFP_ATOMIC); GFP_KERNEL);
if (unlikely(!eth->scratch_ring)) if (unlikely(!eth->scratch_ring))
return -ENOMEM; return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
GFP_KERNEL);
if (unlikely(!eth->scratch_head)) if (unlikely(!eth->scratch_head))
return -ENOMEM; return -ENOMEM;
...@@ -831,37 +925,44 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) ...@@ -831,37 +925,44 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM; return -ENOMEM;
phy_ring_tail = eth->phy_scratch_ring + phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 = struct mtk_tx_dma_v2 *txd;
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
txd = eth->scratch_ring + i * soc->txrx.txd_size;
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1) if (i < cnt - 1)
eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + txd->txd2 = eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma))); (i + 1) * soc->txrx.txd_size;
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
txd->txd8 = 0;
}
} }
mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0; return 0;
} }
static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{ {
void *ret = ring->dma; return ring->dma + (desc - ring->phys);
return ret + (desc - ring->phys);
} }
static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
struct mtk_tx_dma *txd) void *txd, u32 txd_size)
{ {
int idx = txd - ring->dma; int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx]; return &ring->buf[idx];
} }
...@@ -869,12 +970,12 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, ...@@ -869,12 +970,12 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma) struct mtk_tx_dma *dma)
{ {
return ring->dma_pdma - ring->dma + dma; return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
} }
static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{ {
return ((void *)dma - (void *)ring->dma) / sizeof(*dma); return (dma - ring->dma) / txd_size;
} }
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
...@@ -942,18 +1043,108 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, ...@@ -942,18 +1043,108 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
} }
} }
static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *desc = txd;
u32 data;
WRITE_ONCE(desc->txd1, info->addr);
data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
if (info->last)
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
if (info->first) {
if (info->gso)
data |= TX_DMA_TSO;
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM;
/* vlan header offload */
if (info->vlan)
data |= TX_DMA_INS_VLAN | info->vlan_tci;
}
WRITE_ONCE(desc->txd4, data);
}
static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_tx_dma_v2 *desc = txd;
struct mtk_eth *eth = mac->hw;
u32 data;
WRITE_ONCE(desc->txd1, info->addr);
data = TX_DMA_PLEN0(info->size);
if (info->last)
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
if (!info->qid && mac->id)
info->qid = MTK_QDMA_GMAC2_QID;
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
data = 0;
if (info->first) {
if (info->gso)
data |= TX_DMA_TSO_V2;
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM_V2;
}
WRITE_ONCE(desc->txd5, data);
data = 0;
if (info->first && info->vlan)
data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
WRITE_ONCE(desc->txd6, data);
WRITE_ONCE(desc->txd7, 0);
WRITE_ONCE(desc->txd8, 0);
}
static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
mtk_tx_set_dma_desc_v2(dev, txd, info);
else
mtk_tx_set_dma_desc_v1(dev, txd, info);
}
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso) int tx_num, struct mtk_tx_ring *ring, bool gso)
{ {
struct mtk_tx_dma_desc_info txd_info = {
.size = skb_headlen(skb),
.gso = gso,
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
.vlan = skb_vlan_tag_present(skb),
.qid = skb->mark & MTK_QDMA_TX_MASK,
.vlan_tci = skb_vlan_tag_get(skb),
.first = true,
.last = !skb_is_nonlinear(skb),
};
struct mtk_mac *mac = netdev_priv(dev); struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw; struct mtk_eth *eth = mac->hw;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd; struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf; struct mtk_tx_buf *itx_buf, *tx_buf;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1; int i, n_desc = 1;
u32 txd4 = 0, fport;
int k = 0; int k = 0;
itxd = ring->next_free; itxd = ring->next_free;
...@@ -961,52 +1152,35 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -961,52 +1152,35 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free) if (itxd == ring->last_free)
return -ENOMEM; return -ENOMEM;
/* set the forward port */ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
txd4 |= fport;
itx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(itx_buf, 0, sizeof(*itx_buf)); memset(itx_buf, 0, sizeof(*itx_buf));
if (gso) txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
txd4 |= TX_DMA_TSO; DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
/* TX Checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL)
txd4 |= TX_DMA_CHKSUM;
/* VLAN header offload */
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
mapped_addr = dma_map_single(eth->dma_dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
return -ENOMEM; return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr); mtk_tx_set_dma_desc(dev, itxd, &txd_info);
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1; MTK_TX_FLAGS_FPORT1;
setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++); k++);
/* TX SG offload */ /* TX SG offload */
txd = itxd; txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd); txd_pdma = qdma_to_pdma(ring, txd);
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0; unsigned int offset = 0;
int frag_size = skb_frag_size(frag); int frag_size = skb_frag_size(frag);
while (frag_size) { while (frag_size) {
bool last_frag = false;
unsigned int frag_map_size;
bool new_desc = true; bool new_desc = true;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) { (i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2); txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd); txd_pdma = qdma_to_pdma(ring, txd);
...@@ -1018,25 +1192,22 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1018,25 +1192,22 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
new_desc = false; new_desc = false;
} }
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); txd_info.size = min_t(unsigned int, frag_size,
mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, soc->txrx.dma_max_len);
frag_map_size, txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
offset, txd_info.size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma; goto err_dma;
if (i == nr_frags - 1 && mtk_tx_set_dma_desc(dev, txd, &txd_info);
(frag_size - frag_map_size) == 0)
last_frag = true;
WRITE_ONCE(txd->txd1, mapped_addr); tx_buf = mtk_desc_to_tx_buf(ring, txd,
WRITE_ONCE(txd->txd3, (TX_DMA_SWC | soc->txrx.txd_size);
TX_DMA_PLEN0(frag_map_size) |
last_frag * TX_DMA_LS0));
WRITE_ONCE(txd->txd4, fport);
tx_buf = mtk_desc_to_tx_buf(ring, txd);
if (new_desc) if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf)); memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
...@@ -1044,21 +1215,18 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1044,21 +1215,18 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1; MTK_TX_FLAGS_FPORT1;
setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
frag_map_size, k++); txd_info.size, k++);
frag_size -= frag_map_size; frag_size -= txd_info.size;
offset += frag_map_size; offset += txd_info.size;
} }
} }
/* store skb to cleanup */ /* store skb to cleanup */
itx_buf->skb = skb; itx_buf->skb = skb;
WRITE_ONCE(itxd->txd4, txd4); if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (k & 0x1) if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0; txd_pdma->txd2 |= TX_DMA_LS0;
else else
...@@ -1076,12 +1244,14 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1076,12 +1244,14 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/ */
wmb(); wmb();
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more()) !netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else { } else {
int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), int next_idx;
next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
ring->dma_size); ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
} }
...@@ -1090,13 +1260,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1090,13 +1260,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma: err_dma:
do { do {
tx_buf = mtk_desc_to_tx_buf(ring, itxd); tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */ /* unmap dma */
mtk_tx_unmap(eth, tx_buf, false); mtk_tx_unmap(eth, tx_buf, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF; itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
...@@ -1106,17 +1276,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1106,17 +1276,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
return -ENOMEM; return -ENOMEM;
} }
static inline int mtk_cal_txd_req(struct sk_buff *skb) static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{ {
int i, nfrags; int i, nfrags = 1;
skb_frag_t *frag; skb_frag_t *frag;
nfrags = 1;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i]; frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag), nfrags += DIV_ROUND_UP(skb_frag_size(frag),
MTK_TX_DMA_BUF_LEN); eth->soc->txrx.dma_max_len);
} }
} else { } else {
nfrags += skb_shinfo(skb)->nr_frags; nfrags += skb_shinfo(skb)->nr_frags;
...@@ -1168,7 +1337,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1168,7 +1337,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &eth->state))) if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto drop; goto drop;
tx_num = mtk_cal_txd_req(skb); tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev); netif_stop_queue(dev);
netif_err(eth, tx_queued, dev, netif_err(eth, tx_queued, dev,
...@@ -1219,9 +1388,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) ...@@ -1219,9 +1388,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
return &eth->rx_ring[0]; return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
struct mtk_rx_dma *rxd;
ring = &eth->rx_ring[i]; ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (ring->dma[idx].rxd2 & RX_DMA_DONE) { rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true; ring->calc_idx_update = true;
return ring; return ring;
} }
...@@ -1257,7 +1429,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1257,7 +1429,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
int idx; int idx;
struct sk_buff *skb; struct sk_buff *skb;
u8 *data, *new_data; u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd; struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0; int done = 0, bytes = 0;
while (done < budget) { while (done < budget) {
...@@ -1265,26 +1437,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1265,26 +1437,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
unsigned int pktlen; unsigned int pktlen;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 hash, reason; u32 hash, reason;
int mac; int mac = 0;
ring = mtk_get_rx_ring(eth); ring = mtk_get_rx_ring(eth);
if (unlikely(!ring)) if (unlikely(!ring))
goto rx_done; goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx]; rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx]; data = ring->data[idx];
if (!mtk_rx_get_desc(&trxd, rxd)) if (!mtk_rx_get_desc(eth, &trxd, rxd))
break; break;
/* find out which mac the packet come from. values start at 1 */ /* find out which mac the packet come from. values start at 1 */
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
mac = 0; else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
else !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
RX_DMA_FPORT_MASK) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac])) !eth->netdev[mac]))
...@@ -1327,7 +1498,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1327,7 +1498,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev; skb->dev = netdev;
skb_put(skb, pktlen); skb_put(skb, pktlen);
if (trxd.rxd4 & eth->rx_dma_l4_valid) if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
...@@ -1345,10 +1516,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1345,10 +1516,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
mtk_ppe_check_skb(eth->ppe, skb, mtk_ppe_check_skb(eth->ppe, skb,
trxd.rxd4 & MTK_RXD4_FOE_ENTRY); trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
(trxd.rxd2 & RX_DMA_VTAG)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (trxd.rxd3 & RX_DMA_VTAG_V2)
__vlan_hwaccel_put_tag(skb,
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
} else if (trxd.rxd2 & RX_DMA_VTAG) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3)); RX_DMA_VID(trxd.rxd3));
}
/* If the device is attached to a dsa switch, the special
* tag inserted in VLAN field by hw switch can * be offloaded
* by RX HW VLAN offload. Clear vlan info.
*/
if (netdev_uses_dsa(netdev))
__vlan_hwaccel_clear_tag(skb);
}
skb_record_rx_queue(skb, 0); skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
...@@ -1360,7 +1546,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1360,7 +1546,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO; rxd->rxd2 = RX_DMA_LSO;
else else
rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx; ring->calc_idx = idx;
...@@ -1388,6 +1574,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -1388,6 +1574,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes) unsigned int *done, unsigned int *bytes)
{ {
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring; struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc; struct mtk_tx_dma *desc;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1395,7 +1582,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, ...@@ -1395,7 +1582,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
u32 cpu, dma; u32 cpu, dma;
cpu = ring->last_free_ptr; cpu = ring->last_free_ptr;
dma = mtk_r32(eth, MTK_QTX_DRX_PTR); dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu); desc = mtk_qdma_phys_to_virt(ring, cpu);
...@@ -1407,7 +1594,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, ...@@ -1407,7 +1594,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break; break;
tx_buf = mtk_desc_to_tx_buf(ring, desc); tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1; mac = 1;
...@@ -1429,7 +1617,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, ...@@ -1429,7 +1617,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
} }
ring->last_free_ptr = cpu; ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget; return budget;
} }
...@@ -1460,7 +1648,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, ...@@ -1460,7 +1648,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
mtk_tx_unmap(eth, tx_buf, true); mtk_tx_unmap(eth, tx_buf, true);
desc = &ring->dma[cpu]; desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc; ring->last_free = desc;
atomic_inc(&ring->free_count); atomic_inc(&ring->free_count);
...@@ -1522,24 +1710,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth) ...@@ -1522,24 +1710,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget) static int mtk_napi_tx(struct napi_struct *napi, int budget)
{ {
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0; int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth); mtk_handle_status_irq(eth);
mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget); tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) { if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev, dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done, "done tx %d, intr 0x%08x/0x%x\n", tx_done,
mtk_r32(eth, eth->tx_int_status_reg), mtk_r32(eth, reg_map->tx_irq_status),
mtk_r32(eth, eth->tx_int_mask_reg)); mtk_r32(eth, reg_map->tx_irq_mask));
} }
if (tx_done == budget) if (tx_done == budget)
return budget; return budget;
if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget; return budget;
if (napi_complete_done(napi, tx_done)) if (napi_complete_done(napi, tx_done))
...@@ -1551,6 +1740,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) ...@@ -1551,6 +1740,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget) static int mtk_napi_rx(struct napi_struct *napi, int budget)
{ {
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0; int rx_done_total = 0;
mtk_handle_status_irq(eth); mtk_handle_status_irq(eth);
...@@ -1558,32 +1748,36 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) ...@@ -1558,32 +1748,36 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do { do {
int rx_done; int rx_done;
mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done; rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) { if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev, dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done, "done rx %d, intr 0x%08x/0x%x\n", rx_done,
mtk_r32(eth, MTK_PDMA_INT_STATUS), mtk_r32(eth, reg_map->pdma.irq_status),
mtk_r32(eth, MTK_PDMA_INT_MASK)); mtk_r32(eth, reg_map->pdma.irq_mask));
} }
if (rx_done_total == budget) if (rx_done_total == budget)
return budget; return budget;
} while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); } while (mtk_r32(eth, reg_map->pdma.irq_status) &
eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total)) if (napi_complete_done(napi, rx_done_total))
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total; return rx_done_total;
} }
static int mtk_tx_alloc(struct mtk_eth *eth) static int mtk_tx_alloc(struct mtk_eth *eth)
{ {
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring; struct mtk_tx_ring *ring = &eth->tx_ring;
int i, sz = sizeof(*ring->dma); int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL); GFP_KERNEL);
...@@ -1591,7 +1785,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -1591,7 +1785,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem; goto no_tx_mem;
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys, GFP_ATOMIC); &ring->phys, GFP_KERNEL);
if (!ring->dma) if (!ring->dma)
goto no_tx_mem; goto no_tx_mem;
...@@ -1599,18 +1793,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -1599,18 +1793,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
int next = (i + 1) % MTK_DMA_SIZE; int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz; u32 next_ptr = ring->phys + next * sz;
ring->dma[i].txd2 = next_ptr; txd = ring->dma + i * sz;
ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
txd->txd8 = 0;
}
} }
/* On MT7688 (PDMA only) this driver uses the ring->dma structs /* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA * only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma. * descriptors in ring->dma_pdma.
*/ */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
&ring->phys_pdma, &ring->phys_pdma, GFP_KERNEL);
GFP_ATOMIC);
if (!ring->dma_pdma) if (!ring->dma_pdma)
goto no_tx_mem; goto no_tx_mem;
...@@ -1622,8 +1823,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -1622,8 +1823,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma_size = MTK_DMA_SIZE; ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0]; ring->next_free = ring->dma;
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS; ring->thresh = MAX_SKB_FRAGS;
...@@ -1632,20 +1833,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -1632,20 +1833,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/ */
wmb(); wmb();
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth, mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz), ring->phys + ((MTK_DMA_SIZE - 1) * sz),
MTK_QTX_CRX_PTR); soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
MTK_QTX_CFG(0)); soc->reg_map->qdma.qtx_cfg);
} else { } else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
} }
return 0; return 0;
...@@ -1656,6 +1857,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -1656,6 +1857,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
static void mtk_tx_clean(struct mtk_eth *eth) static void mtk_tx_clean(struct mtk_eth *eth)
{ {
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring; struct mtk_tx_ring *ring = &eth->tx_ring;
int i; int i;
...@@ -1668,33 +1870,30 @@ static void mtk_tx_clean(struct mtk_eth *eth) ...@@ -1668,33 +1870,30 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->dma) { if (ring->dma) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma), MTK_DMA_SIZE * soc->txrx.txd_size,
ring->dma, ring->dma, ring->phys);
ring->phys);
ring->dma = NULL; ring->dma = NULL;
} }
if (ring->dma_pdma) { if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(*ring->dma_pdma), MTK_DMA_SIZE * soc->txrx.txd_size,
ring->dma_pdma, ring->dma_pdma, ring->phys_pdma);
ring->phys_pdma);
ring->dma_pdma = NULL; ring->dma_pdma = NULL;
} }
} }
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{ {
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring; struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size; int rx_data_len, rx_dma_size;
int i; int i;
u32 offset = 0;
if (rx_flag == MTK_RX_FLAGS_QDMA) { if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no) if (ring_no)
return -EINVAL; return -EINVAL;
ring = &eth->rx_ring_qdma; ring = &eth->rx_ring_qdma;
offset = 0x1000;
} else { } else {
ring = &eth->rx_ring[ring_no]; ring = &eth->rx_ring[ring_no];
} }
...@@ -1721,38 +1920,68 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) ...@@ -1721,38 +1920,68 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
} }
ring->dma = dma_alloc_coherent(eth->dma_dev, ring->dma = dma_alloc_coherent(eth->dma_dev,
rx_dma_size * sizeof(*ring->dma), rx_dma_size * eth->soc->txrx.rxd_size,
&ring->phys, GFP_ATOMIC); &ring->phys, GFP_KERNEL);
if (!ring->dma) if (!ring->dma)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) { for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
dma_addr_t dma_addr = dma_map_single(eth->dma_dev, dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size, ring->buf_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM; return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
rxd->rxd1 = (unsigned int)dma_addr;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
ring->dma[i].rxd2 = RX_DMA_LSO; rxd->rxd2 = RX_DMA_LSO;
else else
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
rxd->rxd3 = 0;
rxd->rxd4 = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
rxd->rxd8 = 0;
}
} }
ring->dma_size = rx_dma_size; ring->dma_size = rx_dma_size;
ring->calc_idx_update = false; ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1; ring->calc_idx = rx_dma_size - 1;
ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); if (rx_flag == MTK_RX_FLAGS_QDMA)
ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
ring_no * MTK_QRX_OFFSET;
else
ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we /* make sure that all changes to the dma ring are flushed before we
* continue * continue
*/ */
wmb(); wmb();
mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); if (rx_flag == MTK_RX_FLAGS_QDMA) {
mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); mtk_w32(eth, ring->phys,
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); mtk_w32(eth, rx_dma_size,
reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
reg_map->qdma.rst_idx);
} else {
mtk_w32(eth, ring->phys,
reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, rx_dma_size,
reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
reg_map->pdma.rst_idx);
}
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0; return 0;
} }
...@@ -1763,14 +1992,17 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) ...@@ -1763,14 +1992,17 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->data && ring->dma) { if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) { for (i = 0; i < ring->dma_size; i++) {
struct mtk_rx_dma *rxd;
if (!ring->data[i]) if (!ring->data[i])
continue; continue;
if (!ring->dma[i].rxd1)
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
if (!rxd->rxd1)
continue; continue;
dma_unmap_single(eth->dma_dev,
ring->dma[i].rxd1, dma_unmap_single(eth->dma_dev, rxd->rxd1,
ring->buf_size, ring->buf_size, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
skb_free_frag(ring->data[i]); skb_free_frag(ring->data[i]);
} }
kfree(ring->data); kfree(ring->data);
...@@ -1779,9 +2011,8 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) ...@@ -1779,9 +2011,8 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->dma) { if (ring->dma) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
ring->dma_size * sizeof(*ring->dma), ring->dma_size * eth->soc->txrx.rxd_size,
ring->dma, ring->dma, ring->phys);
ring->phys);
ring->dma = NULL; ring->dma = NULL;
} }
} }
...@@ -2056,9 +2287,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth) ...@@ -2056,9 +2287,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
u32 val; u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
reg = MTK_QDMA_GLO_CFG; reg = eth->soc->reg_map->qdma.glo_cfg;
else else
reg = MTK_PDMA_GLO_CFG; reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
...@@ -2116,8 +2347,8 @@ static int mtk_dma_init(struct mtk_eth *eth) ...@@ -2116,8 +2347,8 @@ static int mtk_dma_init(struct mtk_eth *eth)
* automatically * automatically
*/ */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
FC_THRES_MIN, MTK_QDMA_FC_THRES); FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
mtk_w32(eth, 0x0, MTK_QDMA_HRED2); mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
} }
return 0; return 0;
...@@ -2125,6 +2356,7 @@ static int mtk_dma_init(struct mtk_eth *eth) ...@@ -2125,6 +2356,7 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth) static void mtk_dma_free(struct mtk_eth *eth)
{ {
const struct mtk_soc_data *soc = eth->soc;
int i; int i;
for (i = 0; i < MTK_MAC_COUNT; i++) for (i = 0; i < MTK_MAC_COUNT; i++)
...@@ -2132,9 +2364,8 @@ static void mtk_dma_free(struct mtk_eth *eth) ...@@ -2132,9 +2364,8 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]); netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) { if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), MTK_DMA_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->scratch_ring, eth->phy_scratch_ring);
eth->phy_scratch_ring);
eth->scratch_ring = NULL; eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0; eth->phy_scratch_ring = 0;
} }
...@@ -2169,7 +2400,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) ...@@ -2169,7 +2400,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++; eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) { if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi); __napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -2191,13 +2422,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) ...@@ -2191,13 +2422,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth) static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{ {
struct mtk_eth *eth = _eth; struct mtk_eth *eth = _eth;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { if (mtk_r32(eth, reg_map->pdma.irq_mask) &
if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) eth->soc->txrx.rx_irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth); mtk_handle_irq_rx(irq, _eth);
} }
if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth); mtk_handle_irq_tx(irq, _eth);
} }
...@@ -2211,16 +2445,17 @@ static void mtk_poll_controller(struct net_device *dev) ...@@ -2211,16 +2445,17 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw; struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev); mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
} }
#endif #endif
static int mtk_start_dma(struct mtk_eth *eth) static int mtk_start_dma(struct mtk_eth *eth)
{ {
u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err; int err;
err = mtk_dma_init(eth); err = mtk_dma_init(eth);
...@@ -2230,21 +2465,27 @@ static int mtk_start_dma(struct mtk_eth *eth) ...@@ -2230,21 +2465,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
} }
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
mtk_w32(eth, val = mtk_r32(eth, reg_map->qdma.glo_cfg);
MTK_TX_WB_DDONE | MTK_TX_DMA_EN | val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
MTK_RX_BT_32DWORDS,
MTK_QDMA_GLO_CFG); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth, mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN, MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
MTK_PDMA_GLO_CFG); reg_map->pdma.glo_cfg);
} else { } else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
MTK_PDMA_GLO_CFG); reg_map->pdma.glo_cfg);
} }
return 0; return 0;
...@@ -2307,7 +2548,7 @@ static int mtk_open(struct net_device *dev) ...@@ -2307,7 +2548,7 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi); napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi); napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1); refcount_set(&eth->dma_refcnt, 1);
} }
else else
...@@ -2359,7 +2600,7 @@ static int mtk_stop(struct net_device *dev) ...@@ -2359,7 +2600,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(&eth->tx_napi); napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi); napi_disable(&eth->rx_napi);
...@@ -2367,8 +2608,8 @@ static int mtk_stop(struct net_device *dev) ...@@ -2367,8 +2608,8 @@ static int mtk_stop(struct net_device *dev)
cancel_work_sync(&eth->tx_dim.work); cancel_work_sync(&eth->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth); mtk_dma_free(eth);
...@@ -2422,6 +2663,7 @@ static void mtk_dim_rx(struct work_struct *work) ...@@ -2422,6 +2663,7 @@ static void mtk_dim_rx(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile; struct dim_cq_moder cur_profile;
u32 val, cur; u32 val, cur;
...@@ -2429,7 +2671,7 @@ static void mtk_dim_rx(struct work_struct *work) ...@@ -2429,7 +2671,7 @@ static void mtk_dim_rx(struct work_struct *work)
dim->profile_ix); dim->profile_ix);
spin_lock_bh(&eth->dim_lock); spin_lock_bh(&eth->dim_lock);
val = mtk_r32(eth, MTK_PDMA_DELAY_INT); val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK; val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN; val |= MTK_PDMA_DELAY_RX_EN;
...@@ -2439,9 +2681,9 @@ static void mtk_dim_rx(struct work_struct *work) ...@@ -2439,9 +2681,9 @@ static void mtk_dim_rx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT); mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_w32(eth, val, MTK_QDMA_DELAY_INT); mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock); spin_unlock_bh(&eth->dim_lock);
...@@ -2452,6 +2694,7 @@ static void mtk_dim_tx(struct work_struct *work) ...@@ -2452,6 +2694,7 @@ static void mtk_dim_tx(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile; struct dim_cq_moder cur_profile;
u32 val, cur; u32 val, cur;
...@@ -2459,7 +2702,7 @@ static void mtk_dim_tx(struct work_struct *work) ...@@ -2459,7 +2702,7 @@ static void mtk_dim_tx(struct work_struct *work)
dim->profile_ix); dim->profile_ix);
spin_lock_bh(&eth->dim_lock); spin_lock_bh(&eth->dim_lock);
val = mtk_r32(eth, MTK_PDMA_DELAY_INT); val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK; val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN; val |= MTK_PDMA_DELAY_TX_EN;
...@@ -2469,9 +2712,9 @@ static void mtk_dim_tx(struct work_struct *work) ...@@ -2469,9 +2712,9 @@ static void mtk_dim_tx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT); mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_w32(eth, val, MTK_QDMA_DELAY_INT); mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock); spin_unlock_bh(&eth->dim_lock);
...@@ -2482,6 +2725,7 @@ static int mtk_hw_init(struct mtk_eth *eth) ...@@ -2482,6 +2725,7 @@ static int mtk_hw_init(struct mtk_eth *eth)
{ {
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE; ETHSYS_DMA_AG_MAP_PPE;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret; int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state)) if (test_and_set_bit(MTK_HW_INIT, &eth->state))
...@@ -2516,9 +2760,25 @@ static int mtk_hw_init(struct mtk_eth *eth) ...@@ -2516,9 +2760,25 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0; return 0;
} }
/* Non-MT7628 handling... */ val = RSTCTRL_FE | RSTCTRL_PPE;
ethsys_reset(eth, RSTCTRL_FE); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
ethsys_reset(eth, RSTCTRL_PPE); regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
val |= RSTCTRL_ETH;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
val |= RSTCTRL_PPE1;
}
ethsys_reset(eth, val);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
}
if (eth->pctl) { if (eth->pctl) {
/* Set GE2 driving and slew rate */ /* Set GE2 driving and slew rate */
...@@ -2556,12 +2816,48 @@ static int mtk_hw_init(struct mtk_eth *eth) ...@@ -2556,12 +2816,48 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_rx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */ /* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
/* PSE should not drop port8 and port9 packets */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
/* PSE config input queue threshold */
mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
/* PSE config output queue threshold */
mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
/* GDM and CDM Threshold */
mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
}
return 0; return 0;
err_disable_pm: err_disable_pm:
...@@ -3122,20 +3418,8 @@ static int mtk_probe(struct platform_device *pdev) ...@@ -3122,20 +3418,8 @@ static int mtk_probe(struct platform_device *pdev)
if (IS_ERR(eth->base)) if (IS_ERR(eth->base))
return PTR_ERR(eth->base); return PTR_ERR(eth->base);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
} else {
eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
eth->ip_align = NET_IP_ALIGN; eth->ip_align = NET_IP_ALIGN;
} else {
eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
}
spin_lock_init(&eth->page_lock); spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock); spin_lock_init(&eth->tx_irq_lock);
...@@ -3363,50 +3647,119 @@ static int mtk_remove(struct platform_device *pdev) ...@@ -3363,50 +3647,119 @@ static int mtk_remove(struct platform_device *pdev)
} }
static const struct mtk_soc_data mt2701_data = { static const struct mtk_soc_data mt2701_data = {
.reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO, .caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES, .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP, .required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true, .required_pctl = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
}; };
static const struct mtk_soc_data mt7621_data = { static const struct mtk_soc_data mt7621_data = {
.reg_map = &mtk_reg_map,
.caps = MT7621_CAPS, .caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES, .hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP, .required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false, .required_pctl = false,
.offload_version = 2, .offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
}; };
static const struct mtk_soc_data mt7622_data = { static const struct mtk_soc_data mt7622_data = {
.reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028, .ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO, .caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES, .hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP, .required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false, .required_pctl = false,
.offload_version = 2, .offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
}; };
static const struct mtk_soc_data mt7623_data = { static const struct mtk_soc_data mt7623_data = {
.reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO, .caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES, .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP, .required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true, .required_pctl = true,
.offload_version = 2, .offload_version = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
}; };
static const struct mtk_soc_data mt7629_data = { static const struct mtk_soc_data mt7629_data = {
.reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128, .ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO, .caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES, .hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP, .required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false, .required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7986_CAPS,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
}; };
static const struct mtk_soc_data rt5350_data = { static const struct mtk_soc_data rt5350_data = {
.reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS, .caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628, .hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP, .required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false, .required_pctl = false,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
}; };
const struct of_device_id of_mtk_match[] = { const struct of_device_id of_mtk_match[] = {
...@@ -3415,6 +3768,7 @@ const struct of_device_id of_mtk_match[] = { ...@@ -3415,6 +3768,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{}, {},
}; };
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_DMA_SIZE 512 #define MTK_DMA_SIZE 512
#define MTK_MAC_COUNT 2 #define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
...@@ -48,6 +49,8 @@ ...@@ -48,6 +49,8 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
#define MTK_QRX_OFFSET 0x10
#define MTK_MAX_RX_RING_NUM 4 #define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8 #define MTK_HW_LRO_DMA_SIZE 8
...@@ -81,6 +84,10 @@ ...@@ -81,6 +84,10 @@
#define MTK_CDMQ_IG_CTRL 0x1400 #define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0) #define MTK_CDMQ_STAG_EN BIT(0)
/* CDMP Ingress Control Register */
#define MTK_CDMP_IG_CTRL 0x400
#define MTK_CDMP_STAG_EN BIT(0)
/* CDMP Exgress Control Register */ /* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404 #define MTK_CDMP_EG_CTRL 0x404
...@@ -100,25 +107,38 @@ ...@@ -100,25 +107,38 @@
/* Unicast Filter MAC Address Register - High */ /* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
/* PDMA RX Base Pointer Register */ /* FE global misc reg*/
#define MTK_PRX_BASE_PTR0 0x900 #define MTK_FE_GLO_MISC 0x124
#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
/* PSE Free Queue Flow Control */
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
/* PDMA RX Maximum Count Register */ /* PSE Input Queue Reservation Register*/
#define MTK_PRX_MAX_CNT0 0x904 #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
/* PDMA RX CPU Pointer Register */ /* PSE Output Queue Threshold Register*/
#define MTK_PRX_CRX_IDX0 0x908 #define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
/* GDM and CDM Threshold */
#define MTK_GDM2_THRES 0x1530
#define MTK_CDMW0_THRES 0x164c
#define MTK_CDMW1_THRES 0x1650
#define MTK_CDME0_THRES 0x1654
#define MTK_CDME1_THRES 0x1658
#define MTK_CDMM_THRES 0x165c
/* PDMA HW LRO Control Registers */ /* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980 #define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0) #define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7) #define MTK_L3_CKS_UPD_EN BIT(7)
#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
#define MTK_PDMA_LRO_CTRL_DW1 0x984 #define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988 #define MTK_PDMA_LRO_CTRL_DW2 0x988
...@@ -126,18 +146,19 @@ ...@@ -126,18 +146,19 @@
#define MTK_ADMA_MODE BIT(15) #define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
/* PDMA Global Configuration Register */ #define MTK_RX_DMA_LRO_EN BIT(8)
#define MTK_PDMA_GLO_CFG 0xa04
#define MTK_MULTI_EN BIT(10) #define MTK_MULTI_EN BIT(10)
#define MTK_PDMA_SIZE_8DWORDS (1 << 4) #define MTK_PDMA_SIZE_8DWORDS (1 << 4)
/* PDMA Global Configuration Register */
#define MTK_PDMA_LRO_SDL 0x3000
#define MTK_RX_CFG_SDL_OFFSET 16
/* PDMA Reset Index Register */ /* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16) #define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */ /* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT 0xa0c
#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15) #define MTK_PDMA_DELAY_RX_EN BIT(15)
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
...@@ -151,19 +172,9 @@ ...@@ -151,19 +172,9 @@
#define MTK_PDMA_DELAY_PINT_MASK 0x7f #define MTK_PDMA_DELAY_PINT_MASK 0x7f
#define MTK_PDMA_DELAY_PTIME_MASK 0xff #define MTK_PDMA_DELAY_PTIME_MASK 0xff
/* PDMA Interrupt Status Register */
#define MTK_PDMA_INT_STATUS 0xa20
/* PDMA Interrupt Mask Register */
#define MTK_PDMA_INT_MASK 0xa28
/* PDMA HW LRO Alter Flow Delta Register */ /* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
/* PDMA HW LRO IP Setting Registers */ /* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
...@@ -185,26 +196,9 @@ ...@@ -185,26 +196,9 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */ /* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4 #define QDMA_RES_THRES 4
/* QDMA TX Queue Scheduler Registers */
#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
/* QDMA RX Base Pointer Register */
#define MTK_QRX_BASE_PTR0 0x1900
/* QDMA RX Maximum Count Register */
#define MTK_QRX_MAX_CNT0 0x1904
/* QDMA RX CPU Pointer Register */
#define MTK_QRX_CRX_IDX0 0x1908
/* QDMA RX DMA Pointer Register */
#define MTK_QRX_DRX_IDX0 0x190C
/* QDMA Global Configuration Register */ /* QDMA Global Configuration Register */
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11) #define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10) #define MTK_NDP_CO_PRO BIT(10)
...@@ -216,20 +210,19 @@ ...@@ -216,20 +210,19 @@
#define MTK_TX_DMA_EN BIT(0) #define MTK_TX_DMA_EN BIT(0)
#define MTK_DMA_BUSY_TIMEOUT_US 1000000 #define MTK_DMA_BUSY_TIMEOUT_US 1000000
/* QDMA Reset Index Register */ /* QDMA V2 Global Configuration Register */
#define MTK_QDMA_RST_IDX 0x1A08 #define MTK_CHK_DDONE_EN BIT(28)
#define MTK_DMAD_WR_WDONE BIT(26)
/* QDMA Delay Interrupt Register */ #define MTK_WCOMP_EN BIT(24)
#define MTK_QDMA_DELAY_INT 0x1A0C #define MTK_RESV_BUF (0x40 << 16)
#define MTK_MUTLI_CNT (0x4 << 12)
/* QDMA Flow Control Register */ /* QDMA Flow Control Register */
#define MTK_QDMA_FC_THRES 0x1A10
#define FC_THRES_DROP_MODE BIT(20) #define FC_THRES_DROP_MODE BIT(20)
#define FC_THRES_DROP_EN (7 << 16) #define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444 #define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */ /* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30) #define MTK_RX_DONE_DLY BIT(30)
#define MTK_TX_DONE_DLY BIT(28) #define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19) #define MTK_RX_DONE_INT3 BIT(19)
...@@ -243,58 +236,32 @@ ...@@ -243,58 +236,32 @@
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
#define MTK_RX_DONE_INT_V2 BIT(14)
/* QDMA Interrupt grouping registers */ /* QDMA Interrupt grouping registers */
#define MTK_QDMA_INT_GRP1 0x1a20
#define MTK_QDMA_INT_GRP2 0x1a24
#define MTK_RLS_DONE_INT BIT(0) #define MTK_RLS_DONE_INT BIT(0)
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_MASK 0x1A1C
/* QDMA Interrupt Mask Register */
#define MTK_QDMA_HRED2 0x1A44
/* QDMA TX Forward CPU Pointer Register */
#define MTK_QTX_CTX_PTR 0x1B00
/* QDMA TX Forward DMA Pointer Register */
#define MTK_QTX_DTX_PTR 0x1B04
/* QDMA TX Release CPU Pointer Register */
#define MTK_QTX_CRX_PTR 0x1B10
/* QDMA TX Release DMA Pointer Register */
#define MTK_QTX_DRX_PTR 0x1B14
/* QDMA FQ Head Pointer Register */
#define MTK_QDMA_FQ_HEAD 0x1B20
/* QDMA FQ Head Pointer Register */
#define MTK_QDMA_FQ_TAIL 0x1B24
/* QDMA FQ Free Page Counter Register */
#define MTK_QDMA_FQ_CNT 0x1B28
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
/* GMA1 counter / statics register */
#define MTK_GDM1_RX_GBCNT_L 0x2400
#define MTK_GDM1_RX_GBCNT_H 0x2404
#define MTK_GDM1_RX_GPCNT 0x2408
#define MTK_GDM1_RX_OERCNT 0x2410
#define MTK_GDM1_RX_FERCNT 0x2414
#define MTK_GDM1_RX_SERCNT 0x2418
#define MTK_GDM1_RX_LENCNT 0x241c
#define MTK_GDM1_RX_CERCNT 0x2420
#define MTK_GDM1_RX_FCCNT 0x2424
#define MTK_GDM1_TX_SKIPCNT 0x2428
#define MTK_GDM1_TX_COLCNT 0x242c
#define MTK_GDM1_TX_GBCNT_L 0x2430
#define MTK_GDM1_TX_GBCNT_H 0x2434
#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40 #define MTK_STAT_OFFSET 0x40
/* QDMA TX NUM */
#define MTK_QDMA_TX_NUM 16
#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
#define MTK_TX_DMA_BUF_SHIFT 8
/* QDMA V2 descriptor txd6 */
#define TX_DMA_INS_VLAN_V2 BIT(16)
/* QDMA V2 descriptor txd5 */
#define TX_DMA_CHKSUM_V2 (0x7 << 28)
#define TX_DMA_TSO_V2 BIT(31)
/* QDMA V2 descriptor txd4 */
#define TX_DMA_FPORT_SHIFT_V2 8
#define TX_DMA_FPORT_MASK_V2 0xf
#define TX_DMA_SWC_V2 BIT(30)
#define MTK_WDMA0_BASE 0x2800 #define MTK_WDMA0_BASE 0x2800
#define MTK_WDMA1_BASE 0x2c00 #define MTK_WDMA1_BASE 0x2c00
...@@ -308,10 +275,9 @@ ...@@ -308,10 +275,9 @@
/* QDMA descriptor txd3 */ /* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31) #define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30) #define TX_DMA_LS0 BIT(30)
#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14) #define TX_DMA_SWC BIT(14)
#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
/* PDMA on MT7628 */ /* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31) #define TX_DMA_DONE BIT(31)
...@@ -321,12 +287,14 @@ ...@@ -321,12 +287,14 @@
/* QDMA descriptor rxd2 */ /* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31) #define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30) #define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) #define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) #define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_VTAG BIT(15) #define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */ /* QDMA descriptor rxd3 */
#define RX_DMA_VID(_x) ((_x) & 0xfff) #define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
/* QDMA descriptor rxd4 */ /* QDMA descriptor rxd4 */
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
...@@ -337,10 +305,15 @@ ...@@ -337,10 +305,15 @@
/* QDMA descriptor rxd4 */ /* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24) #define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_FPORT_SHIFT 19
#define RX_DMA_FPORT_MASK 0x7
#define RX_DMA_SPECIAL_TAG BIT(22) #define RX_DMA_SPECIAL_TAG BIT(22)
#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
#define RX_DMA_L4_VALID_V2 BIT(2)
/* PHY Indirect Access Control registers */ /* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004 #define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31) #define PHY_IAC_ACCESS BIT(31)
...@@ -463,6 +436,16 @@ ...@@ -463,6 +436,16 @@
#define ETHSYS_TRGMII_MT7621_APLL BIT(6) #define ETHSYS_TRGMII_MT7621_APLL BIT(6)
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
#define RSTCTRL_PPE1 BIT(30)
#define RSTCTRL_ETH BIT(23)
/* ethernet reset check idle register */
#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
/* ethernet reset control register */ /* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34 #define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6) #define RSTCTRL_FE BIT(6)
...@@ -548,6 +531,17 @@ struct mtk_rx_dma { ...@@ -548,6 +531,17 @@ struct mtk_rx_dma {
unsigned int rxd4; unsigned int rxd4;
} __packed __aligned(4); } __packed __aligned(4);
struct mtk_rx_dma_v2 {
unsigned int rxd1;
unsigned int rxd2;
unsigned int rxd3;
unsigned int rxd4;
unsigned int rxd5;
unsigned int rxd6;
unsigned int rxd7;
unsigned int rxd8;
} __packed __aligned(4);
struct mtk_tx_dma { struct mtk_tx_dma {
unsigned int txd1; unsigned int txd1;
unsigned int txd2; unsigned int txd2;
...@@ -555,6 +549,17 @@ struct mtk_tx_dma { ...@@ -555,6 +549,17 @@ struct mtk_tx_dma {
unsigned int txd4; unsigned int txd4;
} __packed __aligned(4); } __packed __aligned(4);
struct mtk_tx_dma_v2 {
unsigned int txd1;
unsigned int txd2;
unsigned int txd3;
unsigned int txd4;
unsigned int txd5;
unsigned int txd6;
unsigned int txd7;
unsigned int txd8;
} __packed __aligned(4);
struct mtk_eth; struct mtk_eth;
struct mtk_mac; struct mtk_mac;
...@@ -622,6 +627,10 @@ enum mtk_clks_map { ...@@ -622,6 +627,10 @@ enum mtk_clks_map {
MTK_CLK_SGMII2_CDR_FB, MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK, MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL, MTK_CLK_ETH2PLL,
MTK_CLK_WOCPU0,
MTK_CLK_WOCPU1,
MTK_CLK_NETSYS0,
MTK_CLK_NETSYS1,
MTK_CLK_MAX MTK_CLK_MAX
}; };
...@@ -652,6 +661,16 @@ enum mtk_clks_map { ...@@ -652,6 +661,16 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \ BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \ BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
BIT(MTK_CLK_SGMII_RX_250M) | \
BIT(MTK_CLK_SGMII_CDR_REF) | \
BIT(MTK_CLK_SGMII_CDR_FB) | \
BIT(MTK_CLK_SGMII2_TX_250M) | \
BIT(MTK_CLK_SGMII2_RX_250M) | \
BIT(MTK_CLK_SGMII2_CDR_REF) | \
BIT(MTK_CLK_SGMII2_CDR_FB))
enum mtk_dev_state { enum mtk_dev_state {
MTK_HW_INIT, MTK_HW_INIT,
...@@ -687,7 +706,7 @@ struct mtk_tx_buf { ...@@ -687,7 +706,7 @@ struct mtk_tx_buf {
* are present * are present
*/ */
struct mtk_tx_ring { struct mtk_tx_ring {
struct mtk_tx_dma *dma; void *dma;
struct mtk_tx_buf *buf; struct mtk_tx_buf *buf;
dma_addr_t phys; dma_addr_t phys;
struct mtk_tx_dma *next_free; struct mtk_tx_dma *next_free;
...@@ -717,7 +736,7 @@ enum mtk_rx_flags { ...@@ -717,7 +736,7 @@ enum mtk_rx_flags {
* @calc_idx: The current head of ring * @calc_idx: The current head of ring
*/ */
struct mtk_rx_ring { struct mtk_rx_ring {
struct mtk_rx_dma *dma; void *dma;
u8 **data; u8 **data;
dma_addr_t phys; dma_addr_t phys;
u16 frag_size; u16 frag_size;
...@@ -741,7 +760,9 @@ enum mkt_eth_capabilities { ...@@ -741,7 +760,9 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT, MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT, MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT, MTK_QDMA_BIT,
MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT, MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
/* MUX BITS*/ /* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
...@@ -773,7 +794,9 @@ enum mkt_eth_capabilities { ...@@ -773,7 +794,9 @@ enum mkt_eth_capabilities {
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT) #define MTK_QDMA BIT(MTK_QDMA_BIT)
#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
...@@ -846,8 +869,62 @@ enum mkt_eth_capabilities { ...@@ -846,8 +869,62 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \ MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
u32 size;
u16 vlan_tci;
u16 qid;
u8 gso:1;
u8 csum:1;
u8 vlan:1;
u8 first:1;
u8 last:1;
};
struct mtk_reg_map {
u32 tx_irq_mask;
u32 tx_irq_status;
struct {
u32 rx_ptr; /* rx base pointer */
u32 rx_cnt_cfg; /* rx max count configuration */
u32 pcrx_ptr; /* rx cpu pointer */
u32 glo_cfg; /* global configuration */
u32 rst_idx; /* reset index */
u32 delay_irq; /* delay interrupt */
u32 irq_status; /* interrupt status */
u32 irq_mask; /* interrupt mask */
u32 int_grp;
} pdma;
struct {
u32 qtx_cfg; /* tx queue configuration */
u32 rx_ptr; /* rx base pointer */
u32 rx_cnt_cfg; /* rx max count configuration */
u32 qcrx_ptr; /* rx cpu pointer */
u32 glo_cfg; /* global configuration */
u32 rst_idx; /* reset index */
u32 delay_irq; /* delay interrupt */
u32 fc_th; /* flow control */
u32 int_grp;
u32 hred; /* interrupt mask */
u32 ctx_ptr; /* tx acquire cpu pointer */
u32 dtx_ptr; /* tx acquire dma pointer */
u32 crx_ptr; /* tx release cpu pointer */
u32 drx_ptr; /* tx release dma pointer */
u32 fq_head; /* fq head pointer */
u32 fq_tail; /* fq tail pointer */
u32 fq_count; /* fq free page count */
u32 fq_blen; /* fq free page buffer length */
} qdma;
u32 gdm1_cnt;
};
/* struct mtk_eth_data - This is the structure holding all differences /* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms * among various plaforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to * @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon * sgmiisys syscon
* @caps Flags shown the extra capability for the SoC * @caps Flags shown the extra capability for the SoC
...@@ -856,14 +933,29 @@ enum mkt_eth_capabilities { ...@@ -856,14 +933,29 @@ enum mkt_eth_capabilities {
* the target SoC * the target SoC
* @required_pctl A bool value to show whether the SoC requires * @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC. * the extra setup for those pins used by GMAC.
* @txd_size Tx DMA descriptor size.
* @rxd_size Rx DMA descriptor size.
* @rx_irq_done_mask Rx irq done register mask.
* @rx_dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/ */
struct mtk_soc_data { struct mtk_soc_data {
const struct mtk_reg_map *reg_map;
u32 ana_rgc3; u32 ana_rgc3;
u32 caps; u32 caps;
u32 required_clks; u32 required_clks;
bool required_pctl; bool required_pctl;
u8 offload_version; u8 offload_version;
netdev_features_t hw_features; netdev_features_t hw_features;
struct {
u32 txd_size;
u32 rxd_size;
u32 rx_irq_done_mask;
u32 rx_dma_l4_valid;
u32 dma_max_len;
u32 dma_len_offset;
} txrx;
}; };
/* currently no SoC has more than 2 macs */ /* currently no SoC has more than 2 macs */
...@@ -959,7 +1051,7 @@ struct mtk_eth { ...@@ -959,7 +1051,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma; struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi; struct napi_struct tx_napi;
struct napi_struct rx_napi; struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring; void *scratch_ring;
dma_addr_t phy_scratch_ring; dma_addr_t phy_scratch_ring;
void *scratch_head; void *scratch_head;
struct clk *clks[MTK_CLK_MAX]; struct clk *clks[MTK_CLK_MAX];
...@@ -982,9 +1074,6 @@ struct mtk_eth { ...@@ -982,9 +1074,6 @@ struct mtk_eth {
u32 tx_bytes; u32 tx_bytes;
struct dim tx_dim; struct dim tx_dim;
u32 tx_int_mask_reg;
u32 tx_int_status_reg;
u32 rx_dma_l4_valid;
int ip_align; int ip_align;
struct mtk_ppe *ppe; struct mtk_ppe *ppe;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment