Commit 23ba5117 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-xdp-zc'

Ong Boon Leong says:

====================
stmmac: add XDP ZC support

This is the v2 patch series to add XDP ZC support to stmmac driver.

Summary of v2 patch change:-

6/7: fix synchronize_rcu() is called stmmac_disable_all_queues() that is
     used by ndo_setup_tc().

 ########################################################################

Continuous burst traffics are generated by pktgen script and in the midst
of each packet processing operation by xdpsock the following tc-loop.sh
script is looped continuously:-

 #!/bin/bash
 tc qdisc del dev eth0 parent root
 tc qdisc add dev eth0 ingress
 tc qdisc add dev eth0 root mqprio num_tc 4 map 0 1 2 3 0 0 0 0 0 0 0 0 0 0 0 0 queues 1@0 1@1 1@2 1@3 hw 0
 tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 0 hw_tc 0
 tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 1 hw_tc 1
 tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 2 hw_tc 2
 tc filter add dev eth0 parent ffff: protocol 802.1Q flower vlan_prio 3 hw_tc 3
 tc qdisc list dev eth0
 tc filter show dev eth0 ingress

 On different ssh terminal
 $ while true; do ./tc-loop.sh; sleep 1; done

The v2 patch series have been tested using the xdpsock app:
 $ ./xdpsock -i eth0 -l -z

From xdpsock poller pps report and dmesg, we don't find any warning
related to rcu and the only difference when the script is executed is
the pps rate drops momentarily.

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 436347         191361334
tx                 436411         191361334

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 254117         191615476
tx                 254053         191615412

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 466395         192081924
tx                 466395         192081860

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 287410         192369365
tx                 287474         192369365

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 395853         192765329
tx                 395789         192765265

 sock0@eth0:0 l2fwd xdp-drv
                   pps            pkts           1.00
rx                 466132         193231514
tx                 466132         193231450

 ########################################################################

Based on the above result, the fix looks promising. Appreciate that if
community can help to review the patch series and provide me feedback
for improvement.
====================
parents ee684c32 132c32ee
...@@ -40,6 +40,7 @@ enum stmmac_txbuf_type { ...@@ -40,6 +40,7 @@ enum stmmac_txbuf_type {
STMMAC_TXBUF_T_SKB, STMMAC_TXBUF_T_SKB,
STMMAC_TXBUF_T_XDP_TX, STMMAC_TXBUF_T_XDP_TX,
STMMAC_TXBUF_T_XDP_NDO, STMMAC_TXBUF_T_XDP_NDO,
STMMAC_TXBUF_T_XSK_TX,
}; };
struct stmmac_tx_info { struct stmmac_tx_info {
...@@ -69,6 +70,8 @@ struct stmmac_tx_queue { ...@@ -69,6 +70,8 @@ struct stmmac_tx_queue {
struct xdp_frame **xdpf; struct xdp_frame **xdpf;
}; };
struct stmmac_tx_info *tx_skbuff_dma; struct stmmac_tx_info *tx_skbuff_dma;
struct xsk_buff_pool *xsk_pool;
u32 xsk_frames_done;
unsigned int cur_tx; unsigned int cur_tx;
unsigned int dirty_tx; unsigned int dirty_tx;
dma_addr_t dma_tx_phy; dma_addr_t dma_tx_phy;
...@@ -77,9 +80,14 @@ struct stmmac_tx_queue { ...@@ -77,9 +80,14 @@ struct stmmac_tx_queue {
}; };
struct stmmac_rx_buffer { struct stmmac_rx_buffer {
struct page *page; union {
dma_addr_t addr; struct {
__u32 page_offset; struct page *page;
dma_addr_t addr;
__u32 page_offset;
};
struct xdp_buff *xdp;
};
struct page *sec_page; struct page *sec_page;
dma_addr_t sec_addr; dma_addr_t sec_addr;
}; };
...@@ -88,6 +96,7 @@ struct stmmac_rx_queue { ...@@ -88,6 +96,7 @@ struct stmmac_rx_queue {
u32 rx_count_frames; u32 rx_count_frames;
u32 queue_index; u32 queue_index;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_pool *page_pool; struct page_pool *page_pool;
struct stmmac_rx_buffer *buf_pool; struct stmmac_rx_buffer *buf_pool;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
...@@ -95,6 +104,7 @@ struct stmmac_rx_queue { ...@@ -95,6 +104,7 @@ struct stmmac_rx_queue {
struct dma_desc *dma_rx ____cacheline_aligned_in_smp; struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
unsigned int cur_rx; unsigned int cur_rx;
unsigned int dirty_rx; unsigned int dirty_rx;
unsigned int buf_alloc_num;
u32 rx_zeroc_thresh; u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy; dma_addr_t dma_rx_phy;
u32 rx_tail_addr; u32 rx_tail_addr;
...@@ -109,6 +119,7 @@ struct stmmac_rx_queue { ...@@ -109,6 +119,7 @@ struct stmmac_rx_queue {
struct stmmac_channel { struct stmmac_channel {
struct napi_struct rx_napi ____cacheline_aligned_in_smp; struct napi_struct rx_napi ____cacheline_aligned_in_smp;
struct napi_struct tx_napi ____cacheline_aligned_in_smp; struct napi_struct tx_napi ____cacheline_aligned_in_smp;
struct napi_struct rxtx_napi ____cacheline_aligned_in_smp;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
spinlock_t lock; spinlock_t lock;
u32 index; u32 index;
...@@ -283,6 +294,7 @@ struct stmmac_priv { ...@@ -283,6 +294,7 @@ struct stmmac_priv {
struct stmmac_rss rss; struct stmmac_rss rss;
/* XDP BPF Program */ /* XDP BPF Program */
unsigned long *af_xdp_zc_qps;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
}; };
...@@ -328,6 +340,12 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) ...@@ -328,6 +340,12 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
return 0; return 0;
} }
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev, void stmmac_selftest_run(struct net_device *dev,
struct ethtool_test *etest, u64 *buf); struct ethtool_test *etest, u64 *buf);
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h" #include "stmmac_ptp.h"
#include "stmmac.h" #include "stmmac.h"
#include "stmmac_xdp.h" #include "stmmac_xdp.h"
...@@ -69,6 +70,11 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); ...@@ -69,6 +70,11 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
#define STMMAC_TX_XSK_AVAIL 16
#define STMMAC_RX_FILL_BATCH 16
#define STMMAC_XDP_PASS 0 #define STMMAC_XDP_PASS 0
#define STMMAC_XDP_CONSUMED BIT(0) #define STMMAC_XDP_CONSUMED BIT(0)
#define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_TX BIT(1)
...@@ -117,6 +123,8 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); ...@@ -117,6 +123,8 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops; static const struct net_device_ops stmmac_netdev_ops;
...@@ -179,11 +187,7 @@ static void stmmac_verify_args(void) ...@@ -179,11 +187,7 @@ static void stmmac_verify_args(void)
eee_timer = STMMAC_DEFAULT_LPI_TIMER; eee_timer = STMMAC_DEFAULT_LPI_TIMER;
} }
/** static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
* stmmac_disable_all_queues - Disable all queues
* @priv: driver private structure
*/
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{ {
u32 rx_queues_cnt = priv->plat->rx_queues_to_use; u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
u32 tx_queues_cnt = priv->plat->tx_queues_to_use; u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
...@@ -193,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) ...@@ -193,6 +197,12 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) { for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
if (stmmac_xdp_is_enabled(priv) &&
test_bit(queue, priv->af_xdp_zc_qps)) {
napi_disable(&ch->rxtx_napi);
continue;
}
if (queue < rx_queues_cnt) if (queue < rx_queues_cnt)
napi_disable(&ch->rx_napi); napi_disable(&ch->rx_napi);
if (queue < tx_queues_cnt) if (queue < tx_queues_cnt)
...@@ -200,6 +210,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) ...@@ -200,6 +210,28 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
} }
} }
/**
* stmmac_disable_all_queues - Disable all queues
* @priv: driver private structure
*/
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
struct stmmac_rx_queue *rx_q;
u32 queue;
/* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) {
rx_q = &priv->rx_queue[queue];
if (rx_q->xsk_pool) {
synchronize_rcu();
break;
}
}
__stmmac_disable_all_queues(priv);
}
/** /**
* stmmac_enable_all_queues - Enable all queues * stmmac_enable_all_queues - Enable all queues
* @priv: driver private structure * @priv: driver private structure
...@@ -214,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) ...@@ -214,6 +246,12 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
for (queue = 0; queue < maxq; queue++) { for (queue = 0; queue < maxq; queue++) {
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
if (stmmac_xdp_is_enabled(priv) &&
test_bit(queue, priv->af_xdp_zc_qps)) {
napi_enable(&ch->rxtx_napi);
continue;
}
if (queue < rx_queues_cnt) if (queue < rx_queues_cnt)
napi_enable(&ch->rx_napi); napi_enable(&ch->rx_napi);
if (queue < tx_queues_cnt) if (queue < tx_queues_cnt)
...@@ -1388,12 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1388,12 +1426,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); if (!buf->page) {
if (!buf->page) buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
return -ENOMEM; if (!buf->page)
buf->page_offset = stmmac_rx_offset(priv); return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph) { if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page) if (!buf->sec_page)
return -ENOMEM; return -ENOMEM;
...@@ -1465,6 +1505,9 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1465,6 +1505,9 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
tx_q->xdpf[i] = NULL; tx_q->xdpf[i] = NULL;
} }
if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
tx_q->xsk_frames_done++;
if (tx_q->tx_skbuff[i] && if (tx_q->tx_skbuff[i] &&
tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
dev_kfree_skb_any(tx_q->tx_skbuff[i]); dev_kfree_skb_any(tx_q->tx_skbuff[i]);
...@@ -1475,6 +1518,120 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1475,6 +1518,120 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
tx_q->tx_skbuff_dma[i].map_as_page = false; tx_q->tx_skbuff_dma[i].map_as_page = false;
} }
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
* @queue: RX queue index
*/
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
for (i = 0; i < priv->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i);
}
static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
gfp_t flags)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
for (i = 0; i < priv->dma_rx_size; i++) {
struct dma_desc *p;
int ret;
if (priv->extend_desc)
p = &((rx_q->dma_erx + i)->basic);
else
p = rx_q->dma_rx + i;
ret = stmmac_init_rx_buffers(priv, p, i, flags,
queue);
if (ret)
return ret;
rx_q->buf_alloc_num++;
}
return 0;
}
/**
* dma_recycle_rx_skbufs - recycle RX dma buffers
* @priv: private structure
* @queue: RX queue index
*/
static void dma_recycle_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
}
if (priv->sph && buf->sec_page) {
page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
}
}
/**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure
* @queue: RX queue index
*/
static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
continue;
xsk_buff_free(buf->xdp);
buf->xdp = NULL;
}
}
static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int i;
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + i);
else
p = rx_q->dma_rx + i;
buf = &rx_q->buf_pool[i];
buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
if (!buf->xdp)
return -ENOMEM;
dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
stmmac_set_desc_addr(priv, p, dma_addr);
rx_q->buf_alloc_num++;
}
return 0;
}
/** /**
* stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
* @priv: driver private structure * @priv: driver private structure
...@@ -1485,158 +1642,159 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) ...@@ -1485,158 +1642,159 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue; u32 queue;
int i;
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
for (i = 0; i < priv->dma_rx_size; i++) { if (rx_q->xsk_pool)
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; dma_free_rx_xskbufs(priv, queue);
else
if (buf->page) { dma_recycle_rx_skbufs(priv, queue);
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
}
if (priv->sph && buf->sec_page) { rx_q->buf_alloc_num = 0;
page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
}
} }
for (queue = 0; queue < rx_count; queue++) { for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int ret;
for (i = 0; i < priv->dma_rx_size; i++) { if (rx_q->xsk_pool) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; /* RX XDP ZC buffer pool may not be populated, e.g.
struct dma_desc *p; * xdpsock TX-only.
*/
if (priv->extend_desc) stmmac_alloc_rx_buffers_zc(priv, queue);
p = &((rx_q->dma_erx + i)->basic); } else {
else ret = stmmac_alloc_rx_buffers(priv, queue, GFP_KERNEL);
p = rx_q->dma_rx + i; if (ret < 0)
goto err_reinit_rx_buffers;
if (!buf->page) {
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->page)
goto err_reinit_rx_buffers;
buf->addr = page_pool_get_dma_addr(buf->page) +
buf->page_offset;
}
if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page)
goto err_reinit_rx_buffers;
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
}
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->sph)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
} }
} }
return; return;
err_reinit_rx_buffers: err_reinit_rx_buffers:
do { while (queue >= 0) {
while (--i >= 0) dma_free_rx_skbufs(priv, queue);
stmmac_free_rx_buffer(priv, queue, i);
if (queue == 0) if (queue == 0)
break; break;
i = priv->dma_rx_size; queue--;
} while (queue-- > 0); }
}
static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
{
if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
return NULL;
return xsk_get_pool_from_qid(priv->dev, queue);
} }
/** /**
* init_dma_rx_desc_rings - init the RX descriptor rings * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @dev: net device structure * @priv: driver private structure
* @queue: RX queue index
* @flags: gfp flag. * @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors * Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
u32 rx_count = priv->plat->rx_queues_to_use; int ret;
int ret = -ENOMEM;
int queue;
int i;
/* RX INITIALIZATION */
netif_dbg(priv, probe, priv->dev, netif_dbg(priv, probe, priv->dev,
"SKB addresses:\nskb\t\tskb data\tdma data\n"); "(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
for (queue = 0; queue < rx_count; queue++) { stmmac_clear_rx_descriptors(priv, queue);
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev, xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
stmmac_clear_rx_descriptors(priv, queue); rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
if (rx_q->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
netdev_info(priv->dev,
"Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
rx_q->queue_index);
xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
MEM_TYPE_PAGE_POOL, MEM_TYPE_PAGE_POOL,
rx_q->page_pool)); rx_q->page_pool));
netdev_info(priv->dev, netdev_info(priv->dev,
"Register MEM_TYPE_PAGE_POOL RxQ-%d\n", "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
rx_q->queue_index); rx_q->queue_index);
}
for (i = 0; i < priv->dma_rx_size; i++) { if (rx_q->xsk_pool) {
struct dma_desc *p; /* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only.
*/
stmmac_alloc_rx_buffers_zc(priv, queue);
} else {
ret = stmmac_alloc_rx_buffers(priv, queue, flags);
if (ret < 0)
return -ENOMEM;
}
if (priv->extend_desc) rx_q->cur_rx = 0;
p = &((rx_q->dma_erx + i)->basic); rx_q->dirty_rx = 0;
else
p = rx_q->dma_rx + i;
ret = stmmac_init_rx_buffers(priv, p, i, flags, /* Setup the chained descriptor addresses */
queue); if (priv->mode == STMMAC_CHAIN_MODE) {
if (ret) if (priv->extend_desc)
goto err_init_rx_buffers; stmmac_mode_init(priv, rx_q->dma_erx,
} rx_q->dma_rx_phy,
priv->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy,
priv->dma_rx_size, 0);
}
rx_q->cur_rx = 0; return 0;
rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); }
/* Setup the chained descriptor addresses */ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
if (priv->mode == STMMAC_CHAIN_MODE) { {
if (priv->extend_desc) struct stmmac_priv *priv = netdev_priv(dev);
stmmac_mode_init(priv, rx_q->dma_erx, u32 rx_count = priv->plat->rx_queues_to_use;
rx_q->dma_rx_phy, u32 queue;
priv->dma_rx_size, 1); int ret;
else
stmmac_mode_init(priv, rx_q->dma_rx, /* RX INITIALIZATION */
rx_q->dma_rx_phy, netif_dbg(priv, probe, priv->dev,
priv->dma_rx_size, 0); "SKB addresses:\nskb\t\tskb data\tdma data\n");
}
for (queue = 0; queue < rx_count; queue++) {
ret = __init_dma_rx_desc_rings(priv, queue, flags);
if (ret)
goto err_init_rx_buffers;
} }
return 0; return 0;
err_init_rx_buffers: err_init_rx_buffers:
while (queue >= 0) { while (queue >= 0) {
while (--i >= 0) struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
stmmac_free_rx_buffer(priv, queue, i);
if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue);
else
dma_free_rx_skbufs(priv, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
if (queue == 0) if (queue == 0)
break; break;
i = priv->dma_rx_size;
queue--; queue--;
} }
...@@ -1644,63 +1802,75 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1644,63 +1802,75 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
} }
/** /**
* init_dma_tx_desc_rings - init the TX descriptor rings * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @dev: net device structure. * @priv: driver private structure
* @queue : TX queue index
* Description: this function initializes the DMA TX descriptors * Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring * and allocates the socket buffers. It supports the chained and ring
* modes. * modes.
*/ */
static int init_dma_tx_desc_rings(struct net_device *dev) static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
u32 queue;
int i; int i;
for (queue = 0; queue < tx_queue_cnt; queue++) { netif_dbg(priv, probe, priv->dev,
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; "(%s) dma_tx_phy=0x%08x\n", __func__,
(u32)tx_q->dma_tx_phy);
netif_dbg(priv, probe, priv->dev, /* Setup the chained descriptor addresses */
"(%s) dma_tx_phy=0x%08x\n", __func__, if (priv->mode == STMMAC_CHAIN_MODE) {
(u32)tx_q->dma_tx_phy); if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
/* Setup the chained descriptor addresses */ tx_q->dma_tx_phy,
if (priv->mode == STMMAC_CHAIN_MODE) { priv->dma_tx_size, 1);
if (priv->extend_desc) else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_etx, stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy, tx_q->dma_tx_phy,
priv->dma_tx_size, 1); priv->dma_tx_size, 0);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) }
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy,
priv->dma_tx_size, 0);
}
for (i = 0; i < priv->dma_tx_size; i++) { tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
p = &((tx_q->dma_entx + i)->basic);
else
p = tx_q->dma_tx + i;
stmmac_clear_desc(priv, p); for (i = 0; i < priv->dma_tx_size; i++) {
struct dma_desc *p;
tx_q->tx_skbuff_dma[i].buf = 0; if (priv->extend_desc)
tx_q->tx_skbuff_dma[i].map_as_page = false; p = &((tx_q->dma_etx + i)->basic);
tx_q->tx_skbuff_dma[i].len = 0; else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_q->tx_skbuff_dma[i].last_segment = false; p = &((tx_q->dma_entx + i)->basic);
tx_q->tx_skbuff[i] = NULL; else
} p = tx_q->dma_tx + i;
tx_q->dirty_tx = 0; stmmac_clear_desc(priv, p);
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false;
tx_q->tx_skbuff_dma[i].len = 0;
tx_q->tx_skbuff_dma[i].last_segment = false;
tx_q->tx_skbuff[i] = NULL;
} }
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
return 0;
}
static int init_dma_tx_desc_rings(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
u32 queue;
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
__init_dma_tx_desc_rings(priv, queue);
return 0; return 0;
} }
...@@ -1731,19 +1901,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) ...@@ -1731,19 +1901,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
return ret; return ret;
} }
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
* @queue: RX queue index
*/
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
for (i = 0; i < priv->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i);
}
/** /**
* dma_free_tx_skbufs - free TX dma buffers * dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure * @priv: private structure
...@@ -1751,10 +1908,19 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) ...@@ -1751,10 +1908,19 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
*/ */
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int i; int i;
tx_q->xsk_frames_done = 0;
for (i = 0; i < priv->dma_tx_size; i++) for (i = 0; i < priv->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i); stmmac_free_tx_buffer(priv, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
tx_q->xsk_frames_done = 0;
tx_q->xsk_pool = NULL;
}
} }
/** /**
...@@ -1771,153 +1937,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) ...@@ -1771,153 +1937,186 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
} }
/** /**
* free_dma_rx_desc_resources - free RX dma desc resources * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @queue: RX queue index
*/ */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv) static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 rx_count = priv->plat->rx_queues_to_use; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
/* Release the DMA RX socket buffers */ /* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
dma_free_rx_xskbufs(priv, queue);
else
dma_free_rx_skbufs(priv, queue); dma_free_rx_skbufs(priv, queue);
/* Free DMA regions of consistent memory previously allocated */ rx_q->buf_alloc_num = 0;
if (!priv->extend_desc) rx_q->xsk_pool = NULL;
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) /* Free DMA regions of consistent memory previously allocated */
xdp_rxq_info_unreg(&rx_q->xdp_rxq); if (!priv->extend_desc)
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
kfree(rx_q->buf_pool); if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
if (rx_q->page_pool) xdp_rxq_info_unreg(&rx_q->xdp_rxq);
page_pool_destroy(rx_q->page_pool);
} kfree(rx_q->buf_pool);
if (rx_q->page_pool)
page_pool_destroy(rx_q->page_pool);
}
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
__free_dma_rx_desc_resources(priv, queue);
} }
/** /**
* free_dma_tx_desc_resources - free TX dma desc resources * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure * @priv: private structure
* @queue: TX queue index
*/ */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv) static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
u32 queue; size_t size;
void *addr;
/* Free TX queue resources */ /* Release the DMA TX socket buffers */
for (queue = 0; queue < tx_count; queue++) { dma_free_tx_skbufs(priv, queue);
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size; if (priv->extend_desc) {
void *addr; size = sizeof(struct dma_extended_desc);
addr = tx_q->dma_etx;
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
size = sizeof(struct dma_edesc);
addr = tx_q->dma_entx;
} else {
size = sizeof(struct dma_desc);
addr = tx_q->dma_tx;
}
/* Release the DMA TX socket buffers */ size *= priv->dma_tx_size;
dma_free_tx_skbufs(priv, queue);
if (priv->extend_desc) { dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
size = sizeof(struct dma_extended_desc);
addr = tx_q->dma_etx;
} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
size = sizeof(struct dma_edesc);
addr = tx_q->dma_entx;
} else {
size = sizeof(struct dma_desc);
addr = tx_q->dma_tx;
}
size *= priv->dma_tx_size; kfree(tx_q->tx_skbuff_dma);
kfree(tx_q->tx_skbuff);
}
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
kfree(tx_q->tx_skbuff_dma); /* Free TX queue resources */
kfree(tx_q->tx_skbuff); for (queue = 0; queue < tx_count; queue++)
} __free_dma_tx_desc_resources(priv, queue);
} }
/** /**
* alloc_dma_rx_desc_resources - alloc RX resources. * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure * @priv: private structure
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv); bool xdp_prog = stmmac_xdp_is_enabled(priv);
u32 rx_count = priv->plat->rx_queues_to_use; struct page_pool_params pp_params = { 0 };
int ret = -ENOMEM; unsigned int num_pages;
u32 queue; unsigned int napi_id;
int ret;
/* RX queues buffers and DMA */ rx_q->queue_index = queue;
for (queue = 0; queue < rx_count; queue++) { rx_q->priv_data = priv;
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
struct page_pool_params pp_params = { 0 }; pp_params.pool_size = priv->dma_rx_size;
unsigned int num_pages; num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
int ret; pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
rx_q->page_pool = NULL;
return ret;
}
rx_q->queue_index = queue; rx_q->buf_pool = kcalloc(priv->dma_rx_size,
rx_q->priv_data = priv; sizeof(*rx_q->buf_pool),
GFP_KERNEL);
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; if (!rx_q->buf_pool)
pp_params.pool_size = priv->dma_rx_size; return -ENOMEM;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
rx_q->page_pool = NULL;
goto err_dma;
}
rx_q->buf_pool = kcalloc(priv->dma_rx_size, if (priv->extend_desc) {
sizeof(*rx_q->buf_pool), rx_q->dma_erx = dma_alloc_coherent(priv->device,
GFP_KERNEL); priv->dma_rx_size *
if (!rx_q->buf_pool) sizeof(struct dma_extended_desc),
goto err_dma; &rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_erx)
return -ENOMEM;
if (priv->extend_desc) { } else {
rx_q->dma_erx = dma_alloc_coherent(priv->device, rx_q->dma_rx = dma_alloc_coherent(priv->device,
priv->dma_rx_size * priv->dma_rx_size *
sizeof(struct dma_extended_desc), sizeof(struct dma_desc),
&rx_q->dma_rx_phy, &rx_q->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!rx_q->dma_erx) if (!rx_q->dma_rx)
goto err_dma; return -ENOMEM;
}
} else { if (stmmac_xdp_is_enabled(priv) &&
rx_q->dma_rx = dma_alloc_coherent(priv->device, test_bit(queue, priv->af_xdp_zc_qps))
priv->dma_rx_size * napi_id = ch->rxtx_napi.napi_id;
sizeof(struct dma_desc), else
&rx_q->dma_rx_phy, napi_id = ch->rx_napi.napi_id;
GFP_KERNEL);
if (!rx_q->dma_rx)
goto err_dma;
}
ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
rx_q->queue_index, rx_q->queue_index,
ch->rx_napi.napi_id); napi_id);
if (ret) { if (ret) {
netdev_err(priv->dev, "Failed to register xdp rxq info\n"); netdev_err(priv->dev, "Failed to register xdp rxq info\n");
return -EINVAL;
}
return 0;
}
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
int ret;
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
ret = __alloc_dma_rx_desc_resources(priv, queue);
if (ret)
goto err_dma; goto err_dma;
}
} }
return 0; return 0;
...@@ -1929,60 +2128,70 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -1929,60 +2128,70 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
} }
/** /**
* alloc_dma_tx_desc_resources - alloc TX resources. * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure * @priv: private structure
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic) * Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of * this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to * reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism. * allow zero-copy mechanism.
*/ */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
int ret = -ENOMEM; size_t size;
u32 queue; void *addr;
/* TX queues buffers and DMA */ tx_q->queue_index = queue;
for (queue = 0; queue < tx_count; queue++) { tx_q->priv_data = priv;
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue; tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
tx_q->priv_data = priv; sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma), sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff_dma) if (!tx_q->tx_skbuff)
goto err_dma; return -ENOMEM;
tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, if (priv->extend_desc)
sizeof(struct sk_buff *), size = sizeof(struct dma_extended_desc);
GFP_KERNEL); else if (tx_q->tbs & STMMAC_TBS_AVAIL)
if (!tx_q->tx_skbuff) size = sizeof(struct dma_edesc);
goto err_dma; else
size = sizeof(struct dma_desc);
if (priv->extend_desc) size *= priv->dma_tx_size;
size = sizeof(struct dma_extended_desc);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
size = sizeof(struct dma_edesc);
else
size = sizeof(struct dma_desc);
size *= priv->dma_tx_size; addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
if (!addr)
return -ENOMEM;
addr = dma_alloc_coherent(priv->device, size, if (priv->extend_desc)
&tx_q->dma_tx_phy, GFP_KERNEL); tx_q->dma_etx = addr;
if (!addr) else if (tx_q->tbs & STMMAC_TBS_AVAIL)
goto err_dma; tx_q->dma_entx = addr;
else
tx_q->dma_tx = addr;
if (priv->extend_desc) return 0;
tx_q->dma_etx = addr; }
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_q->dma_entx = addr; static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
else {
tx_q->dma_tx = addr; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
int ret;
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
ret = __alloc_dma_tx_desc_resources(priv, queue);
if (ret)
goto err_dma;
} }
return 0; return 0;
...@@ -2172,30 +2381,137 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -2172,30 +2381,137 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support * 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES. * that needs to not insert csum in the TDES.
*/ */
txmode = SF_DMA_MODE; txmode = SF_DMA_MODE;
rxmode = SF_DMA_MODE; rxmode = SF_DMA_MODE;
priv->xstats.threshold = SF_DMA_MODE; priv->xstats.threshold = SF_DMA_MODE;
} else { } else {
txmode = tc; txmode = tc;
rxmode = SF_DMA_MODE; rxmode = SF_DMA_MODE;
} }
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
rxfifosz, qmode);
if (rx_q->xsk_pool) {
buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
stmmac_set_dma_bfsize(priv, priv->ioaddr,
buf_size,
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz,
chan);
}
}
for (chan = 0; chan < tx_channels_count; chan++) {
qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
txfifosz, qmode);
}
}
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
struct xdp_desc xdp_desc;
bool work_done = true;
/* Avoids TX time-out as we are sharing with slow path */
nq->trans_start = jiffies;
budget = min(budget, stmmac_tx_avail(priv, queue));
while (budget-- > 0) {
dma_addr_t dma_addr;
bool set_ic;
/* We are sharing with slow path and stop XSK TX desc submission when
* available TX ring is less than threshold.
*/
if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
!netif_carrier_ok(priv->dev)) {
work_done = false;
break;
}
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
tx_desc = &tx_q->dma_entx[entry].basic;
else
tx_desc = tx_q->dma_tx + entry;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
/* To return XDP buffer to XSK pool, we simple call
* xsk_tx_completed(), so we don't need to fill up
* 'buf' and 'xdpf'.
*/
tx_q->tx_skbuff_dma[entry].buf = 0;
tx_q->xdpf[entry] = NULL;
/* configure all channels */ tx_q->tx_skbuff_dma[entry].map_as_page = false;
for (chan = 0; chan < rx_channels_count; chan++) { tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; tx_q->tx_skbuff_dma[entry].last_segment = true;
tx_q->tx_skbuff_dma[entry].is_jumbo = false;
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, stmmac_set_desc_addr(priv, tx_desc, dma_addr);
rxfifosz, qmode);
stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
chan);
}
for (chan = 0; chan < tx_channels_count; chan++) { tx_q->tx_count_frames++;
qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, if (!priv->tx_coal_frames[queue])
txfifosz, qmode); set_ic = false;
else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
set_ic = true;
else
set_ic = false;
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
priv->xstats.tx_set_ic_bit++;
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
true, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
entry = tx_q->cur_tx;
} }
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
xsk_tx_release(pool);
}
/* Return true if all of the 3 conditions are met
* a) TX Budget is still available
* b) work_done = true when XSK TX desc peek is empty (no more
* pending XSK TX for transmission)
*/
return !!budget && work_done;
} }
/** /**
...@@ -2209,14 +2525,18 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2209,14 +2525,18 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, count = 0; unsigned int entry, xmits = 0, count = 0;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++; priv->xstats.tx_clean++;
tx_q->xsk_frames_done = 0;
entry = tx_q->dirty_tx; entry = tx_q->dirty_tx;
while ((entry != tx_q->cur_tx) && (count < budget)) {
/* Try to clean all TX complete frame in 1 shot */
while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
struct sk_buff *skb; struct sk_buff *skb;
struct dma_desc *p; struct dma_desc *p;
...@@ -2301,6 +2621,9 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2301,6 +2621,9 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
tx_q->xdpf[entry] = NULL; tx_q->xdpf[entry] = NULL;
} }
if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
tx_q->xsk_frames_done++;
if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
if (likely(skb)) { if (likely(skb)) {
pkts_compl++; pkts_compl++;
...@@ -2328,6 +2651,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2328,6 +2651,28 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
} }
if (tx_q->xsk_pool) {
bool work_done;
if (tx_q->xsk_frames_done)
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
if (xsk_uses_need_wakeup(tx_q->xsk_pool))
xsk_set_tx_need_wakeup(tx_q->xsk_pool);
/* For XSK TX, we try to send as many as possible.
* If XSK work done (XSK TX desc empty and budget still
* available), return "budget - 1" to reenable TX IRQ.
* Else, return "budget" to make NAPI continue polling.
*/
work_done = stmmac_xdp_xmit_zc(priv, queue,
STMMAC_XSK_TX_BUDGET_MAX);
if (work_done)
xmits = budget - 1;
else
xmits = budget;
}
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) { priv->eee_sw_timer_en) {
stmmac_enable_eee_mode(priv); stmmac_enable_eee_mode(priv);
...@@ -2342,7 +2687,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2342,7 +2687,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
return count; /* Combine decisions from TX clean and XSK TX */
return max(count, xmits);
} }
/** /**
...@@ -2424,24 +2770,31 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) ...@@ -2424,24 +2770,31 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{ {
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir); &priv->xstats, chan, dir);
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan]; struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
unsigned long flags; unsigned long flags;
rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
if (napi_schedule_prep(&ch->rx_napi)) { if (napi_schedule_prep(rx_napi)) {
spin_lock_irqsave(&ch->lock, flags); spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->rx_napi); __napi_schedule(rx_napi);
} }
} }
if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
if (napi_schedule_prep(&ch->tx_napi)) { if (napi_schedule_prep(tx_napi)) {
spin_lock_irqsave(&ch->lock, flags); spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi); __napi_schedule(tx_napi);
} }
} }
...@@ -2598,7 +2951,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2598,7 +2951,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
rx_q->dma_rx_phy, chan); rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy + rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(priv->dma_rx_size * (rx_q->buf_alloc_num *
sizeof(struct dma_desc)); sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan); rx_q->rx_tail_addr, chan);
...@@ -2639,16 +2992,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) ...@@ -2639,16 +2992,18 @@ static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_priv *priv = tx_q->priv_data;
struct stmmac_channel *ch; struct stmmac_channel *ch;
struct napi_struct *napi;
ch = &priv->channel[tx_q->queue_index]; ch = &priv->channel[tx_q->queue_index];
napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
if (likely(napi_schedule_prep(&ch->tx_napi))) { if (likely(napi_schedule_prep(napi))) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ch->lock, flags); spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi); __napi_schedule(napi);
} }
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
...@@ -4318,114 +4673,421 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, ...@@ -4318,114 +4673,421 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
} }
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_enable_dma_transmission(priv, priv->ioaddr);
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
}
static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
int cpu)
{
int index = cpu;
if (unlikely(index < 0))
index = 0;
while (index >= priv->plat->tx_queues_to_use)
index -= priv->plat->tx_queues_to_use;
return index;
}
static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
struct netdev_queue *nq;
int queue;
int res;
if (unlikely(!xdpf))
return STMMAC_XDP_CONSUMED;
queue = stmmac_xdp_get_tx_queue(priv, cpu);
nq = netdev_get_tx_queue(priv->dev, queue);
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
nq->trans_start = jiffies;
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
stmmac_flush_tx_descriptors(priv, queue);
__netif_tx_unlock(nq);
return res;
}
/* This function assumes rcu_read_lock() is held by the caller. */
static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
struct bpf_prog *prog,
struct xdp_buff *xdp)
{
u32 act;
int res;
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
res = STMMAC_XDP_PASS;
break;
case XDP_TX:
res = stmmac_xdp_xmit_back(priv, xdp);
break;
case XDP_REDIRECT:
if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
res = STMMAC_XDP_CONSUMED;
else
res = STMMAC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->dev, prog, act);
fallthrough;
case XDP_DROP:
res = STMMAC_XDP_CONSUMED;
break;
}
return res;
}
static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
rcu_read_lock();
prog = READ_ONCE(priv->xdp_prog);
if (!prog) {
res = STMMAC_XDP_PASS;
goto unlock;
}
res = __stmmac_xdp_run_prog(priv, prog, xdp);
unlock:
rcu_read_unlock();
return ERR_PTR(-res);
}
static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
int xdp_status)
{
int cpu = smp_processor_id();
int queue;
queue = stmmac_xdp_get_tx_queue(priv, cpu);
if (xdp_status & STMMAC_XDP_TX)
stmmac_tx_timer_arm(priv, queue);
if (xdp_status & STMMAC_XDP_REDIRECT)
xdp_do_flush();
}
static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
skb = __napi_alloc_skb(&ch->rxtx_napi,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
return skb;
}
static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
struct sk_buff *skb;
u32 hash;
skb = stmmac_construct_skb_zc(ch, xdp);
if (!skb) {
priv->dev->stats.rx_dropped++;
return;
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
skb_set_hash(skb, hash, hash_type);
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
budget = min(budget, stmmac_rx_dirty(priv, queue));
while (budget-- > 0 && entry != rx_q->cur_rx) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
dma_addr_t dma_addr;
bool use_rx_wd;
if (!buf->xdp) {
buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
if (!buf->xdp) {
ret = false;
break;
}
}
if (priv->extend_desc)
rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
else
rx_desc = rx_q->dma_rx + entry;
dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
stmmac_set_desc_addr(priv, rx_desc, dma_addr);
stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
stmmac_refill_desc3(priv, rx_q, rx_desc);
rx_q->rx_count_frames++;
rx_q->rx_count_frames += priv->rx_coal_frames[queue];
if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
rx_q->rx_count_frames = 0;
use_rx_wd = !priv->rx_coal_frames[queue];
use_rx_wd |= rx_q->rx_count_frames > 0;
if (!priv->use_riwt)
use_rx_wd = false;
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
}
if (rx_desc) {
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
return ret;
}
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
int xdp_status = 0;
int status = 0;
if (netif_msg_rx_status(priv)) {
void *rx_head;
netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
if (priv->extend_desc) {
rx_head = (void *)rx_q->dma_erx;
desc_size = sizeof(struct dma_extended_desc);
} else {
rx_head = (void *)rx_q->dma_rx;
desc_size = sizeof(struct dma_desc);
}
stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
struct stmmac_rx_buffer *buf;
unsigned int buf1_len = 0;
struct dma_desc *np, *p;
int entry;
int res;
if (!count && rx_q->state_saved) {
error = rx_q->state.error;
len = rx_q->state.len;
} else {
rx_q->state_saved = false;
error = 0;
len = 0;
}
if (count >= limit)
break;
read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); if (dirty >= STMMAC_RX_FILL_BATCH) {
tx_q->cur_tx = entry; failure = failure ||
!stmmac_rx_refill_zc(priv, queue, dirty);
dirty = 0;
}
return STMMAC_XDP_TX; if (priv->extend_desc)
} p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
p = rx_q->dma_rx + entry;
static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, /* read the status of the incoming frame */
int cpu) status = stmmac_rx_status(priv, &priv->dev->stats,
{ &priv->xstats, p);
int index = cpu; /* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
if (unlikely(index < 0)) /* Prefetch the next RX descriptor */
index = 0; rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
priv->dma_rx_size);
next_entry = rx_q->cur_rx;
while (index >= priv->plat->tx_queues_to_use) if (priv->extend_desc)
index -= priv->plat->tx_queues_to_use; np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
else
np = rx_q->dma_rx + next_entry;
return index; prefetch(np);
}
static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, if (priv->extend_desc)
struct xdp_buff *xdp) stmmac_rx_extended_status(priv, &priv->dev->stats,
{ &priv->xstats,
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); rx_q->dma_erx + entry);
int cpu = smp_processor_id(); if (unlikely(status == discard_frame)) {
struct netdev_queue *nq; xsk_buff_free(buf->xdp);
int queue; buf->xdp = NULL;
int res; dirty++;
error = 1;
if (!priv->hwts_rx_en)
priv->dev->stats.rx_errors++;
}
if (unlikely(!xdpf)) if (unlikely(error && (status & rx_not_ls)))
return STMMAC_XDP_CONSUMED; goto read_again;
if (unlikely(error)) {
count++;
continue;
}
queue = stmmac_xdp_get_tx_queue(priv, cpu); /* Ensure a valid XSK buffer before proceed */
nq = netdev_get_tx_queue(priv->dev, queue); if (!buf->xdp)
break;
__netif_tx_lock(nq, cpu); /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
/* Avoids TX time-out as we are sharing with slow path */ if (likely(status & rx_not_ls)) {
nq->trans_start = jiffies; xsk_buff_free(buf->xdp);
buf->xdp = NULL;
dirty++;
count++;
goto read_again;
}
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); /* XDP ZC Frame only support primary buffers for now */
if (res == STMMAC_XDP_TX) buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
stmmac_flush_tx_descriptors(priv, queue); len += buf1_len;
__netif_tx_unlock(nq); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
*
* llc_snap is never checked in GMAC >= 4, so this ACS
* feature is always disabled and packets need to be
* stripped manually.
*/
if (likely(!(status & rx_not_ls)) &&
(likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))) {
buf1_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
}
return res; /* RX buffer is good and fit into a XSK pool buffer */
} buf->xdp->data_end = buf->xdp->data + buf1_len;
xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, rcu_read_lock();
struct xdp_buff *xdp) prog = READ_ONCE(priv->xdp_prog);
{ res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
struct bpf_prog *prog; rcu_read_unlock();
int res;
u32 act;
rcu_read_lock(); switch (res) {
case STMMAC_XDP_PASS:
stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
xsk_buff_free(buf->xdp);
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
priv->dev->stats.rx_dropped++;
break;
case STMMAC_XDP_TX:
case STMMAC_XDP_REDIRECT:
xdp_status |= res;
break;
}
prog = READ_ONCE(priv->xdp_prog); buf->xdp = NULL;
if (!prog) { dirty++;
res = STMMAC_XDP_PASS; count++;
goto unlock;
} }
act = bpf_prog_run_xdp(prog, xdp); if (status & rx_not_ls) {
switch (act) { rx_q->state_saved = true;
case XDP_PASS: rx_q->state.error = error;
res = STMMAC_XDP_PASS; rx_q->state.len = len;
break;
case XDP_TX:
res = stmmac_xdp_xmit_back(priv, xdp);
break;
case XDP_REDIRECT:
if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
res = STMMAC_XDP_CONSUMED;
else
res = STMMAC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->dev, prog, act);
fallthrough;
case XDP_DROP:
res = STMMAC_XDP_CONSUMED;
break;
} }
unlock: stmmac_finalize_xdp_rx(priv, xdp_status);
rcu_read_unlock();
return ERR_PTR(-res);
}
static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
int xdp_status)
{
int cpu = smp_processor_id();
int queue;
queue = stmmac_xdp_get_tx_queue(priv, cpu); if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
if (failure || stmmac_rx_dirty(priv, queue) > 0)
xsk_set_rx_need_wakeup(rx_q->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
if (xdp_status & STMMAC_XDP_TX) return (int)count;
stmmac_tx_timer_arm(priv, queue); }
if (xdp_status & STMMAC_XDP_REDIRECT) return failure ? limit : (int)count;
xdp_do_flush();
} }
/** /**
...@@ -4742,7 +5404,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) ...@@ -4742,7 +5404,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++; priv->xstats.napi_poll++;
work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget); work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) { if (work_done < budget && napi_complete_done(napi, work_done)) {
...@@ -4756,6 +5418,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) ...@@ -4756,6 +5418,42 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
return work_done; return work_done;
} }
static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
{
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done;
u32 chan = ch->index;
priv->xstats.napi_poll++;
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
rx_done = stmmac_rx_zc(priv, budget, chan);
/* If either TX or RX work is not complete, return budget
* and keep pooling
*/
if (tx_done >= budget || rx_done >= budget)
return budget;
/* all work done, exit the polling mode */
if (napi_complete_done(napi, rx_done)) {
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
/* Both RX and TX work done are compelte,
* so enable both RX & TX IRQs.
*/
stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
spin_unlock_irqrestore(&ch->lock, flags);
}
return min(rx_done, budget - 1);
}
/** /**
* stmmac_tx_timeout * stmmac_tx_timeout
* @dev : Pointer to net device structure * @dev : Pointer to net device structure
...@@ -5203,7 +5901,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, ...@@ -5203,7 +5901,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return ret; return ret;
stmmac_disable_all_queues(priv); __stmmac_disable_all_queues(priv);
switch (type) { switch (type) {
case TC_SETUP_CLSU32: case TC_SETUP_CLSU32:
...@@ -5624,6 +6322,9 @@ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) ...@@ -5624,6 +6322,9 @@ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
switch (bpf->command) { switch (bpf->command) {
case XDP_SETUP_PROG: case XDP_SETUP_PROG:
return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
case XDP_SETUP_XSK_POOL:
return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
bpf->xsk.queue_id);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -5671,6 +6372,156 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, ...@@ -5671,6 +6372,156 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit; return nxmit;
} }
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
__free_dma_rx_desc_resources(priv, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
int ret;
ret = __alloc_dma_rx_desc_resources(priv, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return;
}
ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
if (ret) {
__free_dma_rx_desc_resources(priv, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n");
return;
}
stmmac_clear_rx_descriptors(priv, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, rx_q->queue_index);
if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
stmmac_set_dma_bfsize(priv, priv->ioaddr,
buf_size,
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz,
rx_q->queue_index);
}
stmmac_start_rx_dma(priv, queue);
spin_lock_irqsave(&ch->lock, flags);
stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
spin_unlock_irqrestore(&ch->lock, flags);
}
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
__free_dma_tx_desc_resources(priv, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
ret = __alloc_dma_tx_desc_resources(priv, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return;
}
ret = __init_dma_tx_desc_rings(priv, queue);
if (ret) {
__free_dma_tx_desc_resources(priv, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n");
return;
}
stmmac_clear_tx_descriptors(priv, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
tx_q->tx_tail_addr = tx_q->dma_tx_phy;
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, tx_q->queue_index);
stmmac_start_tx_dma(priv, queue);
spin_lock_irqsave(&ch->lock, flags);
stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
spin_unlock_irqrestore(&ch->lock, flags);
}
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
struct stmmac_channel *ch;
if (test_bit(STMMAC_DOWN, &priv->state) ||
!netif_carrier_ok(priv->dev))
return -ENETDOWN;
if (!stmmac_xdp_is_enabled(priv))
return -ENXIO;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
rx_q = &priv->rx_queue[queue];
tx_q = &priv->tx_queue[queue];
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
/* EQoS does not have per-DMA channel SW interrupt,
* so we schedule RX Napi straight-away.
*/
if (likely(napi_schedule_prep(&ch->rxtx_napi)))
__napi_schedule(&ch->rxtx_napi);
}
return 0;
}
static const struct net_device_ops stmmac_netdev_ops = { static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open, .ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit, .ndo_start_xmit = stmmac_xmit,
...@@ -5691,6 +6542,7 @@ static const struct net_device_ops stmmac_netdev_ops = { ...@@ -5691,6 +6542,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
.ndo_bpf = stmmac_bpf, .ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit, .ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
}; };
static void stmmac_reset_subtask(struct stmmac_priv *priv) static void stmmac_reset_subtask(struct stmmac_priv *priv)
...@@ -5849,6 +6701,12 @@ static void stmmac_napi_add(struct net_device *dev) ...@@ -5849,6 +6701,12 @@ static void stmmac_napi_add(struct net_device *dev)
stmmac_napi_poll_tx, stmmac_napi_poll_tx,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} }
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_add(dev, &ch->rxtx_napi,
stmmac_napi_poll_rxtx,
NAPI_POLL_WEIGHT);
}
} }
} }
...@@ -5866,6 +6724,10 @@ static void stmmac_napi_del(struct net_device *dev) ...@@ -5866,6 +6724,10 @@ static void stmmac_napi_del(struct net_device *dev)
netif_napi_del(&ch->rx_napi); netif_napi_del(&ch->rx_napi);
if (queue < priv->plat->tx_queues_to_use) if (queue < priv->plat->tx_queues_to_use)
netif_napi_del(&ch->tx_napi); netif_napi_del(&ch->tx_napi);
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_del(&ch->rxtx_napi);
}
} }
} }
...@@ -6024,6 +6886,10 @@ int stmmac_dvr_probe(struct device *device, ...@@ -6024,6 +6886,10 @@ int stmmac_dvr_probe(struct device *device,
/* Verify driver arguments */ /* Verify driver arguments */
stmmac_verify_args(); stmmac_verify_args();
priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
if (!priv->af_xdp_zc_qps)
return -ENOMEM;
/* Allocate workqueue */ /* Allocate workqueue */
priv->wq = create_singlethread_workqueue("stmmac_wq"); priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) { if (!priv->wq) {
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Intel Corporation. */ /* Copyright (c) 2021, Intel Corporation. */
#include <net/xdp_sock_drv.h>
#include "stmmac.h" #include "stmmac.h"
#include "stmmac_xdp.h" #include "stmmac_xdp.h"
static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
struct xsk_buff_pool *pool, u16 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
bool need_update;
u32 frame_size;
int err;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
/* XDP ZC does not span multiple frame, make sure XSK pool buffer
* size can at least store Q-in-Q frame.
*/
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
return -EOPNOTSUPP;
err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
if (err) {
netdev_err(priv->dev, "Failed to map xsk pool\n");
return err;
}
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
stmmac_disable_tx_queue(priv, queue);
napi_disable(&ch->rx_napi);
napi_disable(&ch->tx_napi);
}
set_bit(queue, priv->af_xdp_zc_qps);
if (need_update) {
napi_enable(&ch->rxtx_napi);
stmmac_enable_rx_queue(priv, queue);
stmmac_enable_tx_queue(priv, queue);
err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
if (err)
return err;
}
return 0;
}
static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
struct xsk_buff_pool *pool;
bool need_update;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
pool = xsk_get_pool_from_qid(priv->dev, queue);
if (!pool)
return -EINVAL;
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
stmmac_disable_tx_queue(priv, queue);
synchronize_rcu();
napi_disable(&ch->rxtx_napi);
}
xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
clear_bit(queue, priv->af_xdp_zc_qps);
if (need_update) {
napi_enable(&ch->rx_napi);
napi_enable(&ch->tx_napi);
stmmac_enable_rx_queue(priv, queue);
stmmac_enable_tx_queue(priv, queue);
}
return 0;
}
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
u16 queue)
{
return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
stmmac_xdp_disable_pool(priv, queue);
}
int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
......
...@@ -5,7 +5,10 @@ ...@@ -5,7 +5,10 @@
#define _STMMAC_XDP_H_ #define _STMMAC_XDP_H_
#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) #define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
u16 queue);
int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment