Commit 092d992b authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2022-03-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-03-18

1) XDP multi buffer support

This series enables XDP on non-linear legacy RQ in multi buffer mode.

When XDP is enabled, fragmentation scheme on non-linear legacy RQ is
adjusted to comply to limitations of XDP multi buffer (fragments of the
same size). DMA addresses of fragments are stored in struct page for the
completion handler to be able to unmap them. XDP_TX is supported.

XDP_REDIRECT is not yet supported, the XDP core blocks it for multi
buffer packets at the moment.

2) Trivial cleanups
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 62f65554 5dc2b581
......@@ -1720,7 +1720,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
......
......@@ -405,6 +405,7 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
MLX5E_SQ_STATE_XDP_MULTIBUF,
};
struct mlx5e_tx_mpwqe {
......@@ -515,7 +516,7 @@ struct mlx5e_xdp_info {
} frame;
struct {
struct mlx5e_rq *rq;
struct mlx5e_dma_info di;
struct page *page;
} page;
};
};
......@@ -537,7 +538,7 @@ struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
struct mlx5e_xdp_info *,
struct skb_shared_info *,
int);
struct mlx5e_xdpsq {
......
......@@ -398,8 +398,12 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
};
}
static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size)
static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
{
if (xdp)
/* XDP requires all fragments to be of the same size. */
return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
/* Optimization for small packets: the last fragment is bigger than the others. */
return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
}
......@@ -438,12 +442,14 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
headroom = mlx5e_get_linear_rq_headroom(params, xsk);
first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
if (byte_count > max_mtu) {
max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
params->xdp_prog);
if (byte_count > max_mtu || params->xdp_prog) {
frag_size_max = PAGE_SIZE;
first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
params->xdp_prog);
if (byte_count > max_mtu) {
mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
params->sw_mtu, max_mtu);
......@@ -463,14 +469,18 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
info->arr[i].frag_size = frag_size;
buf_size += frag_size;
if (i == 0) {
/* Ensure that headroom and tailroom are included. */
frag_size += headroom;
frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (params->xdp_prog) {
/* XDP multi buffer expects fragments of the same size. */
info->arr[i].frag_stride = frag_size_max;
} else {
if (i == 0) {
/* Ensure that headroom and tailroom are included. */
frag_size += headroom;
frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
}
info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
i++;
}
info->num_frags = i;
......@@ -833,6 +843,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
......@@ -841,6 +852,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
......@@ -860,7 +872,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
......
......@@ -31,6 +31,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
bool is_xdp_mb;
u16 stop_room;
};
......@@ -155,6 +156,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
......
......@@ -18,7 +18,6 @@ int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
/* TX datapath API */
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
/* SQ lifecycle */
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
......
......@@ -44,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
......
......@@ -38,7 +38,6 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
......@@ -47,7 +46,7 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
......@@ -59,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
......
......@@ -43,7 +43,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
......
......@@ -103,12 +103,15 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xsk_tx_post_err(sq, &xdpi);
} else {
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
}
flush = true;
......
......@@ -780,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
mlx5e_page_release_dynamic(rq, dma_info, false);
mlx5e_page_release_dynamic(rq, dma_info->page, false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
......@@ -1666,14 +1666,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
/* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
* supported by upstream, and there is no defined trigger to allow
* transmitting redirected multi-buffer frames.
*/
if (param->is_xdp_mb && !is_redirect)
set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
if (!param->is_mpw) {
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
unsigned int inline_hdr_sz = 0;
int i;
......@@ -3945,6 +3953,31 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
{
bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
is_linear = mlx5e_rx_is_linear_skb(params, NULL);
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
return true;
}
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
......@@ -3964,10 +3997,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
if (params->xdp_prog &&
!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
new_mtu, mlx5e_xdp_max_mtu(params, NULL));
if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
err = -EINVAL;
goto out;
}
......@@ -4450,15 +4480,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
new_params.sw_mtu,
mlx5e_xdp_max_mtu(&new_params, NULL));
if (!mlx5e_params_validate_xdp(netdev, &new_params))
return -EINVAL;
}
return 0;
}
......
......@@ -222,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
......@@ -234,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
if (!dev_page_is_reusable(dma_info->page)) {
if (!dev_page_is_reusable(page)) {
stats->cache_waive++;
return false;
}
cache->page_cache[cache->tail] = *dma_info;
cache->page_cache[cache->tail].page = page;
cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->tail = tail_next;
return true;
}
......@@ -287,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
dma_info->page = NULL;
return -ENOMEM;
}
page_pool_set_dma_addr(dma_info->page, dma_info->addr);
return 0;
}
......@@ -300,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
return mlx5e_page_alloc_pool(rq, dma_info);
}
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
}
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle)
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
if (mlx5e_rx_cache_put(rq, dma_info))
if (mlx5e_rx_cache_put(rq, page))
return;
mlx5e_page_dma_unmap(rq, dma_info);
page_pool_recycle_direct(rq->page_pool, dma_info->page);
mlx5e_page_dma_unmap(rq, page);
page_pool_recycle_direct(rq->page_pool, page);
} else {
mlx5e_page_dma_unmap(rq, dma_info);
page_pool_release_page(rq->page_pool, dma_info->page);
put_page(dma_info->page);
mlx5e_page_dma_unmap(rq, page);
page_pool_release_page(rq->page_pool, page);
put_page(page);
}
}
......@@ -334,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
*/
xsk_buff_free(dma_info->xsk);
else
mlx5e_page_release_dynamic(rq, dma_info, recycle);
mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
......@@ -1544,7 +1546,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &xdp))
if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
......@@ -1567,45 +1569,105 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
u32 first_frag_size;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
u32 truesize;
void *va;
va = page_address(di->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
first_frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + frag_consumed_bytes);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
first_frag_size, DMA_FROM_DEVICE);
rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
/* XDP is not supported in this configuration, as incoming packets
* might spread among multiple pages.
*/
skb = mlx5e_build_linear_skb(rq, va, first_frag_size, rx_headroom,
frag_consumed_bytes, 0);
if (unlikely(!skb))
return NULL;
page_ref_inc(di->page);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
sinfo = xdp_get_shared_info_from_buff(&xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
while (cqe_bcnt) {
skb_frag_t *frag;
di = wi->di;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset,
frag_consumed_bytes, frag_info->frag_stride);
dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
frag_consumed_bytes, DMA_FROM_DEVICE);
if (!xdp_buff_has_frags(&xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
xdp_buff_set_frags_flag(&xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, di->page);
skb_frag_off_set(frag, wi->offset);
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(di->page))
xdp_buff_set_frag_pfmemalloc(&xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
di = head_wi->di;
prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
mlx5e_put_rx_frag(rq, &head_wi[i], true);
}
return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
xdp.data - xdp.data_hard_start,
xdp.data_end - xdp.data,
xdp.data - xdp.data_meta);
if (unlikely(!skb))
return NULL;
page_ref_inc(di->page);
if (unlikely(xdp_buff_has_frags(&xdp))) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
xdp_buff_is_frag_pfmemalloc(&xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
page_ref_inc(skb_frag_page(frag));
}
}
return skb;
}
......@@ -1874,7 +1936,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &xdp)) {
if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
......
......@@ -176,7 +176,6 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment