Commit 9ed4050c authored by Ivan Khoronzhuk's avatar Ivan Khoronzhuk Committed by David S. Miller

net: ethernet: ti: cpsw: add XDP support

Add XDP support based on rx page_pool allocator, one frame per page.
Page pool allocator is used with assumption that only one rx_handler
is running simultaneously. DMA map/unmap is reused from page pool
despite there is no need to map whole page.

Due to specific of cpsw, the same TX/RX handler can be used by 2
network devices, so special fields in buffer are added to identify
an interface the frame is destined to. Thus XDP works for both
interfaces, that allows to test xdp redirect between two interfaces
easily. Also, each rx queue have own page pools, but common for both
netdevs.

XDP prog is common for all channels till appropriate changes are added
in XDP infrastructure. Also, once page_pool recycling becomes part of
skb netstack some simplifications can be added, like removing
page_pool_release_page() before skb receive.

In order to keep rx_dev while redirect, that can be somehow used in
future, do flush in rx_handler, that allows to keep rx dev the same
while redirect. It allows to conform with tracing rx_dev pointed
by Jesper.

Also, there is probability, that XDP generic code can be extended to
support multi ndev drivers like this one, using same rx queue for
several ndevs, based on switchdev for instance or else. In this case,
driver can be modified like exposed here:
https://lkml.org/lkml/2019/7/3/243Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarIvan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 608ef620
...@@ -50,6 +50,7 @@ config TI_CPSW ...@@ -50,6 +50,7 @@ config TI_CPSW
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_MDIO select TI_DAVINCI_MDIO
select MFD_SYSCON select MFD_SYSCON
select PAGE_POOL
select REGMAP select REGMAP
---help--- ---help---
This driver supports TI's CPSW Ethernet Switch. This driver supports TI's CPSW Ethernet Switch.
......
This diff is collapsed.
...@@ -578,6 +578,18 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx, ...@@ -578,6 +578,18 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
return 0; return 0;
} }
static void cpsw_fail(struct cpsw_common *cpsw)
{
struct net_device *ndev;
int i;
for (i = 0; i < cpsw->data.slaves; i++) {
ndev = cpsw->slaves[i].ndev;
if (ndev)
dev_close(ndev);
}
}
int cpsw_set_channels_common(struct net_device *ndev, int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs, struct ethtool_channels *chs,
cpdma_handler_fn rx_handler) cpdma_handler_fn rx_handler)
...@@ -585,7 +597,7 @@ int cpsw_set_channels_common(struct net_device *ndev, ...@@ -585,7 +597,7 @@ int cpsw_set_channels_common(struct net_device *ndev,
struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw; struct cpsw_common *cpsw = priv->cpsw;
struct net_device *sl_ndev; struct net_device *sl_ndev;
int i, ret; int i, new_pools, ret;
ret = cpsw_check_ch_settings(cpsw, chs); ret = cpsw_check_ch_settings(cpsw, chs);
if (ret < 0) if (ret < 0)
...@@ -593,6 +605,8 @@ int cpsw_set_channels_common(struct net_device *ndev, ...@@ -593,6 +605,8 @@ int cpsw_set_channels_common(struct net_device *ndev,
cpsw_suspend_data_pass(ndev); cpsw_suspend_data_pass(ndev);
new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler); ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
if (ret) if (ret)
goto err; goto err;
...@@ -622,12 +636,19 @@ int cpsw_set_channels_common(struct net_device *ndev, ...@@ -622,12 +636,19 @@ int cpsw_set_channels_common(struct net_device *ndev,
cpsw_split_res(cpsw); cpsw_split_res(cpsw);
if (new_pools) {
cpsw_destroy_xdp_rxqs(cpsw);
ret = cpsw_create_xdp_rxqs(cpsw);
if (ret)
goto err;
}
ret = cpsw_resume_data_pass(ndev); ret = cpsw_resume_data_pass(ndev);
if (!ret) if (!ret)
return 0; return 0;
err: err:
dev_err(priv->dev, "cannot update channels number, closing device\n"); dev_err(priv->dev, "cannot update channels number, closing device\n");
dev_close(ndev); cpsw_fail(cpsw);
return ret; return ret;
} }
...@@ -647,8 +668,7 @@ void cpsw_get_ringparam(struct net_device *ndev, ...@@ -647,8 +668,7 @@ void cpsw_get_ringparam(struct net_device *ndev,
int cpsw_set_ringparam(struct net_device *ndev, int cpsw_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering) struct ethtool_ringparam *ering)
{ {
struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct cpsw_common *cpsw = priv->cpsw;
int descs_num, ret; int descs_num, ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */ /* ignore ering->tx_pending - only rx_pending adjustment is supported */
...@@ -672,13 +692,20 @@ int cpsw_set_ringparam(struct net_device *ndev, ...@@ -672,13 +692,20 @@ int cpsw_set_ringparam(struct net_device *ndev,
return ret; return ret;
} }
if (cpsw->usage_count) {
cpsw_destroy_xdp_rxqs(cpsw);
ret = cpsw_create_xdp_rxqs(cpsw);
if (ret)
goto err;
}
ret = cpsw_resume_data_pass(ndev); ret = cpsw_resume_data_pass(ndev);
if (!ret) if (!ret)
return 0; return 0;
err: err:
cpdma_set_num_rx_descs(cpsw->dma, descs_num); cpdma_set_num_rx_descs(cpsw->dma, descs_num);
dev_err(cpsw->dev, "cannot set ring params, closing device\n"); dev_err(cpsw->dev, "cannot set ring params, closing device\n");
dev_close(ndev); cpsw_fail(cpsw);
return ret; return ret;
} }
......
...@@ -346,6 +346,7 @@ struct cpsw_common { ...@@ -346,6 +346,7 @@ struct cpsw_common {
int rx_ch_num, tx_ch_num; int rx_ch_num, tx_ch_num;
int speed; int speed;
int usage_count; int usage_count;
struct page_pool *page_pool[CPSW_MAX_QUEUES];
}; };
struct cpsw_priv { struct cpsw_priv {
...@@ -360,6 +361,10 @@ struct cpsw_priv { ...@@ -360,6 +361,10 @@ struct cpsw_priv {
int shp_cfg_speed; int shp_cfg_speed;
int tx_ts_enabled; int tx_ts_enabled;
int rx_ts_enabled; int rx_ts_enabled;
struct bpf_prog *xdp_prog;
struct xdp_rxq_info xdp_rxq[CPSW_MAX_QUEUES];
struct xdp_attachment_info xdpi;
u32 emac_port; u32 emac_port;
struct cpsw_common *cpsw; struct cpsw_common *cpsw;
}; };
...@@ -391,6 +396,8 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv); ...@@ -391,6 +396,8 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv);
void cpsw_intr_enable(struct cpsw_common *cpsw); void cpsw_intr_enable(struct cpsw_common *cpsw);
void cpsw_intr_disable(struct cpsw_common *cpsw); void cpsw_intr_disable(struct cpsw_common *cpsw);
void cpsw_tx_handler(void *token, int len, int status); void cpsw_tx_handler(void *token, int len, int status);
int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
/* ethtool */ /* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev); u32 cpsw_get_msglevel(struct net_device *ndev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment