Commit bba2556e authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller

net: stmmac: Enable RX via AF_XDP zero-copy

This patch adds the support for receiving packet via AF_XDP zero-copy
mechanism.

XDP ZC uses 1:1 mapping of XDP buffer to receive packet, therefore the
use of split header is not used currently. The 'xdp_buff' is declared as
union together with a struct that contains 'page', 'addr' and
'page_offset' that are associated with primary buffer.

RX buffers are now allocated either via page_pool or xsk pool. For RX
buffers from xsk_pool they are allocated and deallocated using below
functions:

 * stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
 * dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)

With above functions now available, we then extend the following driver
functions to support XDP ZC:
 * stmmac_reinit_rx_buffers()
 * __init_dma_rx_desc_rings()
 * init_dma_rx_desc_rings()
 * __free_dma_rx_desc_resources()

Note: stmmac_alloc_rx_buffers_zc() may return -ENOMEM due to RX XDP
buffer pool is not allocated (e.g. samples/bpf/xdpsock TX-only). But,
it is still ok to let TX XDP ZC to continue, therefore, the -ENOMEM
is silently ignored to let the driver succcessfully transition to XDP
ZC mode for the said RX and TX queue.

As XDP ZC buffer size is different, the DMA buffer size is required
to be reprogrammed accordingly for RX DMA/Queue that is populated with
XDP buffer from XSK pool.

Next, to add or remove per-queue XSK pool, stmmac_xdp_setup_pool()
will call stmmac_xdp_enable_pool() or stmmac_xdp_disable_pool()
that in-turn coordinates the tearing down and setting up RX ring via
RX buffers and descriptors removal and reallocation through
stmmac_disable_rx_queue() and stmmac_enable_rx_queue(). In addition,
stmmac_xsk_wakeup() is added to initiate XDP RX buffer replenishing
by signalling user application to add available XDP frames back to
FILL queue.

For RX processing using XDP zero-copy buffer, stmmac_rx_zc() is
introduced which is implemented with the assumption that RX split
header is disabled. For XDP verdict is XDP_PASS, the XDP buffer is
copied into a sk_buff allocated through stmmac_construct_skb_zc()
and sent to Linux network GRO inside stmmac_dispatch_skb_zc(). Free RX
buffers are then replenished using stmmac_rx_refill_zc()

v2: introduce __stmmac_disable_all_queues() to contain the original code
    that does napi_disable() and then make stmmac_setup_tc_block_cb()
    to use it. Move synchronize_rcu() into stmmac_disable_all_queues()
    that eventually calls __stmmac_disable_all_queues(). Then,
    make both stmmac_release() and stmmac_suspend() to use
    stmmac_disable_all_queues(). Thanks David Miller for spotting the
    synchronize_rcu() issue in v1 patch.
Signed-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bba71cac
...@@ -77,9 +77,14 @@ struct stmmac_tx_queue { ...@@ -77,9 +77,14 @@ struct stmmac_tx_queue {
}; };
struct stmmac_rx_buffer { struct stmmac_rx_buffer {
struct page *page; union {
dma_addr_t addr; struct {
__u32 page_offset; struct page *page;
dma_addr_t addr;
__u32 page_offset;
};
struct xdp_buff *xdp;
};
struct page *sec_page; struct page *sec_page;
dma_addr_t sec_addr; dma_addr_t sec_addr;
}; };
...@@ -88,6 +93,7 @@ struct stmmac_rx_queue { ...@@ -88,6 +93,7 @@ struct stmmac_rx_queue {
u32 rx_count_frames; u32 rx_count_frames;
u32 queue_index; u32 queue_index;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_pool *page_pool; struct page_pool *page_pool;
struct stmmac_rx_buffer *buf_pool; struct stmmac_rx_buffer *buf_pool;
struct stmmac_priv *priv_data; struct stmmac_priv *priv_data;
...@@ -95,6 +101,7 @@ struct stmmac_rx_queue { ...@@ -95,6 +101,7 @@ struct stmmac_rx_queue {
struct dma_desc *dma_rx ____cacheline_aligned_in_smp; struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
unsigned int cur_rx; unsigned int cur_rx;
unsigned int dirty_rx; unsigned int dirty_rx;
unsigned int buf_alloc_num;
u32 rx_zeroc_thresh; u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy; dma_addr_t dma_rx_phy;
u32 rx_tail_addr; u32 rx_tail_addr;
...@@ -283,6 +290,7 @@ struct stmmac_priv { ...@@ -283,6 +290,7 @@ struct stmmac_priv {
struct stmmac_rss rss; struct stmmac_rss rss;
/* XDP BPF Program */ /* XDP BPF Program */
unsigned long *af_xdp_zc_qps;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
}; };
...@@ -328,6 +336,10 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) ...@@ -328,6 +336,10 @@ static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
return 0; return 0;
} }
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev, void stmmac_selftest_run(struct net_device *dev,
struct ethtool_test *etest, u64 *buf); struct ethtool_test *etest, u64 *buf);
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Intel Corporation. */ /* Copyright (c) 2021, Intel Corporation. */
#include <net/xdp_sock_drv.h>
#include "stmmac.h" #include "stmmac.h"
#include "stmmac_xdp.h" #include "stmmac_xdp.h"
static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
struct xsk_buff_pool *pool, u16 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
bool need_update;
u32 frame_size;
int err;
if (queue >= priv->plat->rx_queues_to_use)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
/* XDP ZC does not span multiple frame, make sure XSK pool buffer
* size can at least store Q-in-Q frame.
*/
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
return -EOPNOTSUPP;
err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
if (err) {
netdev_err(priv->dev, "Failed to map xsk pool\n");
return err;
}
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
napi_disable(&ch->rx_napi);
}
set_bit(queue, priv->af_xdp_zc_qps);
if (need_update) {
napi_enable(&ch->rx_napi);
stmmac_enable_rx_queue(priv, queue);
err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
if (err)
return err;
}
return 0;
}
static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_channel *ch = &priv->channel[queue];
struct xsk_buff_pool *pool;
bool need_update;
if (queue >= priv->plat->rx_queues_to_use)
return -EINVAL;
pool = xsk_get_pool_from_qid(priv->dev, queue);
if (!pool)
return -EINVAL;
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
if (need_update) {
stmmac_disable_rx_queue(priv, queue);
synchronize_rcu();
napi_disable(&ch->rx_napi);
}
xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
clear_bit(queue, priv->af_xdp_zc_qps);
if (need_update) {
napi_enable(&ch->rx_napi);
stmmac_enable_rx_queue(priv, queue);
}
return 0;
}
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
u16 queue)
{
return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
stmmac_xdp_disable_pool(priv, queue);
}
int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
......
...@@ -5,7 +5,10 @@ ...@@ -5,7 +5,10 @@
#define _STMMAC_XDP_H_ #define _STMMAC_XDP_H_
#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) #define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
u16 queue);
int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment