Commit 4646651e authored by David S. Miller's avatar David S. Miller

Merge branch 'mediatek-hw-lro'

Nelson Chang says:

====================
net: ethernet: mediatek: add HW LRO functions

The series add the large receive offload (LRO) functions by hardware and
the ethtool functions to configure RX flows of HW LRO.

changes since v3:
- Respin the patch by the newer driver
- Move the dts description of hwlro to optional properties

changes since v2:
- Add ndo_fix_features to prevent NETIF_F_LRO off while RX flow is programmed
- Rephrase the dts property is a capability if the hardware supports LRO

changes since v1:
- Add HW LRO support
- Add ethtool hooks to set LRO RX flows
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0fbc81b3 004e6cc6
......@@ -24,7 +24,7 @@ Required properties:
Optional properties:
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- mediatek,hwlro: the capability if the hardware supports LRO functions
* Ethernet MAC node
......@@ -51,6 +51,7 @@ eth: ethernet@1b100000 {
reset-names = "eth";
mediatek,ethsys = <&ethsys>;
mediatek,pctl = <&syscfg_pctl_a>;
mediatek,hwlro;
#address-cells = <1>;
#size-cells = <0>;
......
......@@ -820,11 +820,51 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
{
int i;
struct mtk_rx_ring *ring;
int idx;
if (!eth->hwlro)
return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = &eth->rx_ring[i];
idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
}
return NULL;
}
static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
{
struct mtk_rx_ring *ring;
int i;
if (!eth->hwlro) {
ring = &eth->rx_ring[0];
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
} else {
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = &eth->rx_ring[i];
if (ring->calc_idx_update) {
ring->calc_idx_update = false;
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
}
}
}
}
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
int idx = ring->calc_idx;
struct mtk_rx_ring *ring;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
......@@ -836,7 +876,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_addr_t dma_addr;
int mac = 0;
idx = NEXT_RX_DESP_IDX(idx);
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
......@@ -907,12 +951,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
done++;
}
rx_done:
if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0);
mtk_update_rx_cpu_idx(eth);
}
return done;
......@@ -1135,32 +1180,41 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
}
static int mtk_rx_alloc(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int rx_data_len, rx_dma_size;
int i;
ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
if (rx_flag == MTK_RX_FLAGS_HWLRO) {
rx_data_len = MTK_MAX_LRO_RX_LENGTH;
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
rx_dma_size = MTK_DMA_SIZE;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
for (i = 0; i < MTK_DMA_SIZE; i++) {
for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (!ring->data[i])
return -ENOMEM;
}
ring->dma = dma_alloc_coherent(eth->dev,
MTK_DMA_SIZE * sizeof(*ring->dma),
rx_dma_size * sizeof(*ring->dma),
&ring->phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < MTK_DMA_SIZE; i++) {
for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
ring->data[i] + NET_SKB_PAD,
ring->buf_size,
......@@ -1171,27 +1225,30 @@ static int mtk_rx_alloc(struct mtk_eth *eth)
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
ring->calc_idx = MTK_DMA_SIZE - 1;
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0);
mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0);
mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX);
mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0;
}
static void mtk_rx_clean(struct mtk_eth *eth)
static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int i;
if (ring->data && ring->dma) {
for (i = 0; i < MTK_DMA_SIZE; i++) {
for (i = 0; i < ring->dma_size; i++) {
if (!ring->data[i])
continue;
if (!ring->dma[i].rxd1)
......@@ -1208,13 +1265,274 @@ static void mtk_rx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dev,
MTK_DMA_SIZE * sizeof(*ring->dma),
ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
ring->dma = NULL;
}
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
{
int i;
u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
/* set LRO rings to auto-learn modes */
ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
/* validate LRO ring */
ring_ctrl_dw2 |= MTK_RING_VLD;
/* set AGE timer (unit: 20us) */
ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
/* set max AGG timer (unit: 20us) */
ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
/* set max LRO AGG count */
ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
}
/* IPv4 checksum update enable */
lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
/* switch priority comparison to packet count mode */
lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
/* bandwidth threshold setting */
mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
/* auto-learn score delta setting */
mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
MTK_PDMA_LRO_ALT_REFRESH_TIMER);
/* set HW LRO mode & the max aggregation count for rx packets */
lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
/* the minimal remaining room of SDL0 in RXD for lro aggregation */
lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
/* enable HW LRO */
lro_ctrl_dw0 |= MTK_LRO_EN;
mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
return 0;
}
static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
{
int i;
u32 val;
/* relinquish lro rings, flush aggregated packets */
mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
/* wait for relinquishments done */
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
if (val & MTK_LRO_RING_RELINQUISH_DONE) {
msleep(20);
continue;
}
}
/* invalidate lro rings */
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
/* disable HW LRO */
mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
}
static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
{
u32 reg_val;
reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
/* invalidate the IP setting */
mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
/* validate the IP setting */
mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
}
static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
{
u32 reg_val;
reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
/* invalidate the IP setting */
mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
}
static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
{
int cnt = 0;
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (mac->hwlro_ip[i])
cnt++;
}
return cnt;
}
static int mtk_hwlro_add_ipaddr(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int hwlro_idx;
if ((fsp->flow_type != TCP_V4_FLOW) ||
(!fsp->h_u.tcp_ip4_spec.ip4dst) ||
(fsp->location > 1))
return -EINVAL;
mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
return 0;
}
static int mtk_hwlro_del_ipaddr(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int hwlro_idx;
if (fsp->location > 1)
return -EINVAL;
mac->hwlro_ip[fsp->location] = 0;
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
return 0;
}
static void mtk_hwlro_netdev_disable(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int i, hwlro_idx;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
mac->hwlro_ip[i] = 0;
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
}
mac->hwlro_ip_cnt = 0;
}
static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
fsp->m_u.tcp_ip4_spec.ip4dst = 0;
fsp->h_u.tcp_ip4_spec.ip4src = 0;
fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
fsp->h_u.tcp_ip4_spec.psrc = 0;
fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
fsp->h_u.tcp_ip4_spec.pdst = 0;
fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
fsp->h_u.tcp_ip4_spec.tos = 0;
fsp->m_u.tcp_ip4_spec.tos = 0xff;
return 0;
}
static int mtk_hwlro_get_fdir_all(struct net_device *dev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mtk_mac *mac = netdev_priv(dev);
int cnt = 0;
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
}
}
cmd->rule_cnt = cnt;
return 0;
}
static netdev_features_t mtk_fix_features(struct net_device *dev,
netdev_features_t features)
{
if (!(features & NETIF_F_LRO)) {
struct mtk_mac *mac = netdev_priv(dev);
int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
if (ip_cnt) {
netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
features |= NETIF_F_LRO;
}
}
return features;
}
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
int err = 0;
if (!((dev->features ^ features) & NETIF_F_LRO))
return 0;
if (!(features & NETIF_F_LRO))
mtk_hwlro_netdev_disable(dev);
return err;
}
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
......@@ -1235,6 +1553,7 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
......@@ -1250,9 +1569,20 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (err)
return err;
err = mtk_rx_alloc(eth);
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
if (eth->hwlro) {
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
if (err)
return err;
}
err = mtk_hwlro_rx_init(eth);
if (err)
return err;
}
/* Enable random early drop and set drop threshold automatically */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
......@@ -1278,7 +1608,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
mtk_rx_clean(eth);
mtk_rx_clean(eth, 0);
if (eth->hwlro) {
mtk_hwlro_rx_uninit(eth);
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
mtk_rx_clean(eth, i);
}
kfree(eth->scratch_head);
}
......@@ -1810,6 +2147,62 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
if (dev->features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
if (dev->features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRULE:
if (dev->features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
if (dev->features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
default:
break;
}
return ret;
}
static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if (dev->features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
if (dev->features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:
break;
}
return ret;
}
static const struct ethtool_ops mtk_ethtool_ops = {
.get_settings = mtk_get_settings,
.set_settings = mtk_set_settings,
......@@ -1821,6 +2214,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
......@@ -1835,6 +2230,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
.ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
......@@ -1873,6 +2270,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw = eth;
mac->of_node = np;
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
mac->hwlro_ip_cnt = 0;
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
......@@ -1889,6 +2289,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->hw_features = MTK_HW_FEATURES;
if (eth->hwlro)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
eth->netdev[id]->features |= MTK_HW_FEATURES;
......@@ -1941,6 +2346,8 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
......
......@@ -39,7 +39,21 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
#define MTK_MAX_LRO_IP_CNT 2
#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
#define MTK_HW_LRO_MAX_AGG_CNT 64
#define MTK_HW_LRO_BW_THRE 3000
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
......@@ -50,6 +64,9 @@
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
/* PDMA HW LRO Alter Flow Timer Register */
#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
......@@ -70,12 +87,29 @@
/* PDMA RX Base Pointer Register */
#define MTK_PRX_BASE_PTR0 0x900
#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
/* PDMA RX Maximum Count Register */
#define MTK_PRX_MAX_CNT0 0x904
#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
/* PDMA RX CPU Pointer Register */
#define MTK_PRX_CRX_IDX0 0x908
#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
/* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x3 << 26)
#define MTK_LRO_RING_RELINQUISH_DONE (0x3 << 29)
#define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988
#define MTK_PDMA_LRO_CTRL_DW3 0x98c
#define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
......@@ -84,6 +118,7 @@
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT 0xa0c
......@@ -94,10 +129,33 @@
/* PDMA Interrupt Mask Register */
#define MTK_PDMA_INT_MASK 0xa28
/* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
/* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
#define MTK_RING_MYIP_VLD BIT(9)
/* PDMA HW LRO Ring Control Registers */
#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
#define MTK_RING_VLD BIT(8)
#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
......@@ -132,7 +190,6 @@
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
#define MTK_PST_DRX_IDX0 BIT(16)
/* QDMA Delay Interrupt Register */
#define MTK_QDMA_DELAY_INT 0x1A0C
......@@ -377,6 +434,12 @@ struct mtk_tx_ring {
atomic_t free_count;
};
/* PDMA rx ring mode */
enum mtk_rx_flags {
MTK_RX_FLAGS_NORMAL = 0,
MTK_RX_FLAGS_HWLRO,
};
/* struct mtk_rx_ring - This struct holds info describing a RX ring
* @dma: The descriptor ring
* @data: The memory pointed at by the ring
......@@ -391,7 +454,10 @@ struct mtk_rx_ring {
dma_addr_t phys;
u16 frag_size;
u16 buf_size;
u16 dma_size;
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
};
/* currently no SoC has more than 2 macs */
......@@ -439,9 +505,10 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring;
struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
......@@ -470,6 +537,8 @@ struct mtk_mac {
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
struct phy_device *phy_dev;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment