Commit ecbcd689 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-07-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-07-26 (XDP redirect)

This series from Tariq adds the support for device-out XDP redirect.

Start with a simple RX and XDP cleanups:
- Replace call to MPWQE free with dealloc in interface down flow
- Do not recycle RX pages in interface down flow
- Gather all XDP pre-requisite checks in a single function
- Restrict the combination of large MTU and XDP

Since now XDP logic is going to be called from TX side as well,
generic XDP TX logic is not RX only anymore, for that Tariq creates
a new xdp.c file and moves XDP related code into it, and generalizes
the code to support XDP TX for XDP redirect, such as the xdp tx sq
structures and xdp counters.

XDP redirect support:
Add implementation for the ndo_xdp_xmit callback.

Dedicate a new set of XDP-SQ instances to satisfy the XDP_REDIRECT
requests.  These instances are totally separated from the existing
XDP-SQ objects that satisfy local XDP_TX actions.

Performance tests:

xdp_redirect_map from ConnectX-5 to ConnectX-5.
CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz
Packet-rate of 64B packets.

Single queue: 7 Mpps.
Multi queue: 55 Mpps.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f61b6db3 8ee48233
...@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ ...@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
......
...@@ -147,10 +147,6 @@ struct page_pool; ...@@ -147,10 +147,6 @@ struct page_pool;
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS #define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_DS_COUNT \
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
#define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
...@@ -348,6 +344,7 @@ enum { ...@@ -348,6 +344,7 @@ enum {
MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS, MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_REDIRECT,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -368,16 +365,14 @@ struct mlx5e_txqsq { ...@@ -368,16 +365,14 @@ struct mlx5e_txqsq {
struct mlx5e_cq cq; struct mlx5e_cq cq;
/* write@xmit, read@completion */
struct {
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats; struct mlx5e_sq_stats *stats;
struct {
struct mlx5e_sq_dma *dma_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
void __iomem *uar_map; void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
...@@ -399,30 +394,43 @@ struct mlx5e_txqsq { ...@@ -399,30 +394,43 @@ struct mlx5e_txqsq {
} recover; } recover;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
};
struct mlx5e_xdp_info {
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
struct mlx5e_dma_info di;
};
struct mlx5e_xdpsq { struct mlx5e_xdpsq {
/* data path */ /* data path */
/* dirtied @rx completion */ /* dirtied @completion */
u16 cc; u16 cc;
u16 pc; bool redirect_flush;
struct mlx5e_cq cq; /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
bool doorbell;
/* write@xmit, read@completion */ struct mlx5e_cq cq;
struct {
struct mlx5e_dma_info *di;
bool doorbell;
bool redirect_flush;
} db;
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
struct {
struct mlx5e_xdp_info *xdpi;
} db;
void __iomem *uar_map; void __iomem *uar_map;
u32 sqn; u32 sqn;
struct device *pdev; struct device *pdev;
__be32 mkey_be; __be32 mkey_be;
u8 min_inline_mode; u8 min_inline_mode;
unsigned long state; unsigned long state;
unsigned int hw_mtu;
/* control path */ /* control path */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
...@@ -459,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) ...@@ -459,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
} }
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
};
struct mlx5e_wqe_frag_info { struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di; struct mlx5e_dma_info *di;
u32 offset; u32 offset;
...@@ -566,7 +569,6 @@ struct mlx5e_rq { ...@@ -566,7 +569,6 @@ struct mlx5e_rq {
/* XDP */ /* XDP */
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq; struct mlx5e_xdpsq xdpsq;
DECLARE_BITMAP(flags, 8); DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool; struct page_pool *page_pool;
...@@ -595,6 +597,9 @@ struct mlx5e_channel { ...@@ -595,6 +597,9 @@ struct mlx5e_channel {
__be32 mkey_be; __be32 mkey_be;
u8 num_tc; u8 num_tc;
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc; struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats; struct mlx5e_ch_stats *stats;
...@@ -617,6 +622,8 @@ struct mlx5e_channel_stats { ...@@ -617,6 +622,8 @@ struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch; struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq; struct mlx5e_rq_stats rq;
struct mlx5e_xdpsq_stats rq_xdpsq;
struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
enum mlx5e_traffic_types { enum mlx5e_traffic_types {
...@@ -876,14 +883,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); ...@@ -876,14 +883,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget); int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params); struct mlx5e_params *params);
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle); bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
...@@ -892,7 +898,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); ...@@ -892,7 +898,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct sk_buff * struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx); u16 cqe_bcnt, u32 head_offset, u32 page_idx);
......
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bpf_trace.h>
#include "en/xdp.h"
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
struct xdp_buff *xdp)
{
struct mlx5e_xdp_info xdpi;
xdpi.xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpi.xdpf))
return false;
xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, PCI_DMA_TODEVICE);
xdpi.di = *di;
return mlx5e_xmit_xdp_frame(sq, &xdpi);
}
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
u32 act;
int err;
if (!prog)
return false;
xdp.data = va + *rx_headroom;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
case XDP_PASS:
*rx_headroom = xdp.data - xdp.data_hard_start;
*len = xdp.data_end - xdp.data;
return false;
case XDP_TX:
if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
case XDP_REDIRECT:
/* When XDP enabled then page-refcnt==1 here */
err = xdp_do_redirect(rq->netdev, &xdp, prog);
if (unlikely(err))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
rq->xdpsq.redirect_flush = true;
mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
xdp_abort:
trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
rq->stats->xdp_drop++;
return true;
}
}
bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg = wqe->data;
struct xdp_frame *xdpf = xdpi->xdpf;
dma_addr_t dma_addr = xdpi->dma_addr;
unsigned int dma_len = xdpf->len;
struct mlx5e_xdpsq_stats *stats = sq->stats;
prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
return false;
}
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
if (sq->doorbell) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->doorbell = false;
}
stats->full++;
return false;
}
cseg->fm_ce_se = 0;
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
dseg++;
}
/* write the dma part */
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
/* move page to reference to sq responsibility,
* and mark so it's not put back in page-cache.
*/
sq->db.xdpi[pi] = *xdpi;
sq->pc++;
sq->doorbell = true;
stats->xmit++;
return true;
}
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
struct mlx5e_rq *rq;
bool is_redirect;
u16 sqcc;
int i;
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
return false;
is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
rq = container_of(sq, struct mlx5e_rq, xdpsq);
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
i = 0;
do {
u16 wqe_counter;
bool last_wqe;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
last_wqe = (sqcc == wqe_counter);
sqcc++;
if (is_redirect) {
xdp_return_frame(xdpi->xdpf);
dma_unmap_single(sq->pdev, xdpi->dma_addr,
xdpi->xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi->di, true);
}
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
sq->stats->cqes += i;
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
sq->cc = sqcc;
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
struct mlx5e_rq *rq;
bool is_redirect;
is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
while (sq->cc != sq->pc) {
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
sq->cc++;
if (is_redirect) {
xdp_return_frame(xdpi->xdpf);
dma_unmap_single(sq->pdev, xdpi->dma_addr,
xdpi->xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi->di, false);
}
}
}
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_xdpsq *sq;
int drops = 0;
int sq_num;
int i;
if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
sq_num = smp_processor_id();
if (unlikely(sq_num >= priv->channels.num))
return -ENXIO;
sq = &priv->channels.c[sq_num]->xdpsq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return -ENETDOWN;
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_info xdpi;
xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
drops++;
continue;
}
xdpi.xdpf = xdpf;
if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
xdp_return_frame_rx_napi(xdpf);
drops++;
}
}
if (flags & XDP_XMIT_FLUSH)
mlx5e_xmit_xdp_doorbell(sq);
return n - drops;
}
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_EN_XDP_H__
#define __MLX5_EN_XDP_H__
#include "en.h"
#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_DS_COUNT \
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
#endif
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "accel/tls.h" #include "accel/tls.h"
#include "vxlan.h" #include "vxlan.h"
#include "en/port.h" #include "en/port.h"
#include "en/xdp.h"
struct mlx5e_rq_param { struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)]; u32 rqc[MLX5_ST_SZ_DW(rqc)];
...@@ -96,14 +97,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) ...@@ -96,14 +97,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
{ {
if (!params->xdp_prog) { u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); u16 linear_rq_headroom = params->xdp_prog ?
u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
u32 frag_sz;
return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); linear_rq_headroom += NET_IP_ALIGN;
}
return PAGE_SIZE; frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
if (params->xdp_prog && frag_sz < PAGE_SIZE)
frag_sz = PAGE_SIZE;
return frag_sz;
} }
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
...@@ -485,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -485,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c; rq->channel = c;
rq->ix = c->ix; rq->ix = c->ix;
rq->mdev = mdev; rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq; rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
...@@ -877,7 +882,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) ...@@ -877,7 +882,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */ /* UMR WQE (if in progress) is always at wq->head */
if (rq->mpwqe.umr_in_progress) if (rq->mpwqe.umr_in_progress)
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); rq->dealloc_wqe(rq, wq->head);
while (!mlx5_wq_ll_is_empty(wq)) { while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe; struct mlx5e_rx_wqe_ll *wqe;
...@@ -963,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) ...@@ -963,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{ {
kvfree(sq->db.di); kvfree(sq->db.xdpi);
} }
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
if (!sq->db.di) { if (!sq->db.xdpi) {
mlx5e_free_xdpsq_db(sq); mlx5e_free_xdpsq_db(sq);
return -ENOMEM; return -ENOMEM;
} }
...@@ -983,7 +988,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) ...@@ -983,7 +988,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq) struct mlx5e_xdpsq *sq,
bool is_redirect)
{ {
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = c->mdev;
...@@ -995,6 +1001,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, ...@@ -995,6 +1001,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode; sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = is_redirect ?
&c->priv->channel_stats[c->ix].xdpsq :
&c->priv->channel_stats[c->ix].rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu); param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl); err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
...@@ -1524,7 +1534,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq) ...@@ -1524,7 +1534,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
static int mlx5e_open_xdpsq(struct mlx5e_channel *c, static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq) struct mlx5e_xdpsq *sq,
bool is_redirect)
{ {
unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {}; struct mlx5e_create_sq_param csp = {};
...@@ -1532,7 +1543,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, ...@@ -1532,7 +1543,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
int err; int err;
int i; int i;
err = mlx5e_alloc_xdpsq(c, params, param, sq); err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err) if (err)
return err; return err;
...@@ -1541,6 +1552,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, ...@@ -1541,6 +1552,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn; csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl; csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode; csp.min_inline_mode = sq->min_inline_mode;
if (is_redirect)
set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err) if (err)
...@@ -1923,10 +1936,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1923,10 +1936,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err) if (err)
goto err_close_icosq_cq; goto err_close_icosq_cq;
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
if (err) if (err)
goto err_close_tx_cqs; goto err_close_tx_cqs;
err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
/* XDP SQ CQ params are same as normal TXQ sq CQ params */ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
&cparam->tx_cq, &c->rq.xdpsq.cq) : 0; &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
...@@ -1943,7 +1960,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1943,7 +1960,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err) if (err)
goto err_close_icosq; goto err_close_icosq;
err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
if (err) if (err)
goto err_close_sqs; goto err_close_sqs;
...@@ -1951,9 +1968,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1951,9 +1968,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err) if (err)
goto err_close_xdp_sq; goto err_close_xdp_sq;
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
if (err)
goto err_close_rq;
*cp = c; *cp = c;
return 0; return 0;
err_close_rq:
mlx5e_close_rq(&c->rq);
err_close_xdp_sq: err_close_xdp_sq:
if (c->xdp) if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq); mlx5e_close_xdpsq(&c->rq.xdpsq);
...@@ -1972,6 +1997,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1972,6 +1997,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_close_rx_cq: err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq); mlx5e_close_cq(&c->rq.cq);
err_close_xdp_tx_cqs:
mlx5e_close_cq(&c->xdpsq.cq);
err_close_tx_cqs: err_close_tx_cqs:
mlx5e_close_tx_cqs(c); mlx5e_close_tx_cqs(c);
...@@ -2006,6 +2034,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) ...@@ -2006,6 +2034,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c) static void mlx5e_close_channel(struct mlx5e_channel *c)
{ {
mlx5e_close_xdpsq(&c->xdpsq);
mlx5e_close_rq(&c->rq); mlx5e_close_rq(&c->rq);
if (c->xdp) if (c->xdp)
mlx5e_close_xdpsq(&c->rq.xdpsq); mlx5e_close_xdpsq(&c->rq.xdpsq);
...@@ -2015,6 +2044,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) ...@@ -2015,6 +2044,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (c->xdp) if (c->xdp)
mlx5e_close_cq(&c->rq.xdpsq.cq); mlx5e_close_cq(&c->rq.xdpsq.cq);
mlx5e_close_cq(&c->rq.cq); mlx5e_close_cq(&c->rq.cq);
mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c); mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq); mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi); netif_napi_del(&c->napi);
...@@ -3707,6 +3737,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, ...@@ -3707,6 +3737,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params; new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu; new_channels.params.sw_mtu = new_mtu;
if (params->xdp_prog &&
!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
new_mtu, MLX5E_XDP_MAX_MTU);
err = -EINVAL;
goto out;
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
...@@ -4094,26 +4132,47 @@ static void mlx5e_tx_timeout(struct net_device *dev) ...@@ -4094,26 +4132,47 @@ static void mlx5e_tx_timeout(struct net_device *dev)
queue_work(priv->wq, &priv->tx_timeout_work); queue_work(priv->wq, &priv->tx_timeout_work);
} }
static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
{
struct net_device *netdev = priv->netdev;
struct mlx5e_channels new_channels = {};
if (priv->channels.params.lro_en) {
netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
return -EINVAL;
}
if (MLX5_IPSEC_DEV(priv->mdev)) {
netdev_warn(netdev, "can't set XDP with IPSec offload\n");
return -EINVAL;
}
new_channels.params = priv->channels.params;
new_channels.params.xdp_prog = prog;
if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
return -EINVAL;
}
return 0;
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct bpf_prog *old_prog; struct bpf_prog *old_prog;
int err = 0;
bool reset, was_opened; bool reset, was_opened;
int err;
int i; int i;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if ((netdev->features & NETIF_F_LRO) && prog) { if (prog) {
netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); err = mlx5e_xdp_allowed(priv, prog);
err = -EINVAL; if (err)
goto unlock; goto unlock;
}
if ((netdev->features & NETIF_F_HW_ESP) && prog) {
netdev_warn(netdev, "can't set XDP with IPSec offload\n");
err = -EINVAL;
goto unlock;
} }
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
...@@ -4242,6 +4301,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { ...@@ -4242,6 +4301,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
#endif #endif
.ndo_tx_timeout = mlx5e_tx_timeout, .ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp, .ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll, .ndo_poll_controller = mlx5e_netpoll,
#endif #endif
......
...@@ -59,9 +59,11 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -59,9 +59,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
...@@ -73,6 +75,10 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -73,6 +75,10 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
...@@ -128,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -128,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
struct mlx5e_channel_stats *channel_stats = struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i]; &priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j; int j;
...@@ -141,10 +149,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -141,10 +149,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_tx += rq_stats->xdp_tx; s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_full += rq_stats->xdp_tx_full; s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err; s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
...@@ -162,7 +172,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -162,7 +172,12 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->ch_poll += ch_stats->poll; s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm; s->ch_arm += ch_stats->arm;
s->ch_aff_change += ch_stats->aff_change; s->ch_aff_change += ch_stats->aff_change;
s->ch_eq_rearm += ch_stats->eq_rearm; s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) { for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
...@@ -1126,9 +1141,7 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -1126,9 +1141,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
...@@ -1168,6 +1181,20 @@ static const struct counter_desc sq_stats_desc[] = { ...@@ -1168,6 +1181,20 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
}; };
static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
};
static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
};
static const struct counter_desc ch_stats_desc[] = { static const struct counter_desc ch_stats_desc[] = {
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
...@@ -1178,6 +1205,8 @@ static const struct counter_desc ch_stats_desc[] = { ...@@ -1178,6 +1205,8 @@ static const struct counter_desc ch_stats_desc[] = {
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
...@@ -1186,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) ...@@ -1186,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
return (NUM_RQ_STATS * max_nch) + return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) + (NUM_CH_STATS * max_nch) +
(NUM_SQ_STATS * max_nch * priv->max_opened_tc); (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
(NUM_RQ_XDPSQ_STATS * max_nch) +
(NUM_XDPSQ_STATS * max_nch);
} }
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
...@@ -1200,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1200,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i); ch_stats_desc[j].format, i);
for (i = 0; i < max_nch; i++) for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_stats_desc[j].format, i);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
rq_xdpsq_stats_desc[j].format, i);
}
for (tc = 0; tc < priv->max_opened_tc; tc++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++) for (i = 0; i < max_nch; i++)
...@@ -1211,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1211,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
sq_stats_desc[j].format, sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]); priv->channel_tc2txq[i][tc]);
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_XDPSQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
xdpsq_stats_desc[j].format, i);
return idx; return idx;
} }
...@@ -1226,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, ...@@ -1226,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j); ch_stats_desc, j);
for (i = 0; i < max_nch; i++) for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j); rq_stats_desc, j);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
rq_xdpsq_stats_desc, j);
}
for (tc = 0; tc < priv->max_opened_tc; tc++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++) for (i = 0; i < max_nch; i++)
...@@ -1239,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, ...@@ -1239,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j); sq_stats_desc, j);
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_XDPSQ_STATS; j++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
xdpsq_stats_desc, j);
return idx; return idx;
} }
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc { struct counter_desc {
...@@ -70,9 +72,11 @@ struct mlx5e_sw_stats { ...@@ -70,9 +72,11 @@ struct mlx5e_sw_stats {
u64 rx_csum_complete; u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner; u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop; u64 rx_xdp_drop;
u64 rx_xdp_tx; u64 rx_xdp_redirect;
u64 rx_xdp_tx_cqe; u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_full; u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
u64 tx_csum_none; u64 tx_csum_none;
u64 tx_csum_partial; u64 tx_csum_partial;
u64 tx_csum_partial_inner; u64 tx_csum_partial_inner;
...@@ -84,6 +88,10 @@ struct mlx5e_sw_stats { ...@@ -84,6 +88,10 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake; u64 tx_queue_wake;
u64 tx_udp_seg_rem; u64 tx_udp_seg_rem;
u64 tx_cqe_err; u64 tx_cqe_err;
u64 tx_xdp_xmit;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
u64 rx_wqe_err; u64 rx_wqe_err;
u64 rx_mpwqe_filler_cqes; u64 rx_mpwqe_filler_cqes;
u64 rx_mpwqe_filler_strides; u64 rx_mpwqe_filler_strides;
...@@ -178,9 +186,7 @@ struct mlx5e_rq_stats { ...@@ -178,9 +186,7 @@ struct mlx5e_rq_stats {
u64 lro_bytes; u64 lro_bytes;
u64 removed_vlan_packets; u64 removed_vlan_packets;
u64 xdp_drop; u64 xdp_drop;
u64 xdp_tx; u64 xdp_redirect;
u64 xdp_tx_cqe;
u64 xdp_tx_full;
u64 wqe_err; u64 wqe_err;
u64 mpwqe_filler_cqes; u64 mpwqe_filler_cqes;
u64 mpwqe_filler_strides; u64 mpwqe_filler_strides;
...@@ -225,6 +231,14 @@ struct mlx5e_sq_stats { ...@@ -225,6 +231,14 @@ struct mlx5e_sq_stats {
u64 cqe_err; u64 cqe_err;
}; };
struct mlx5e_xdpsq_stats {
u64 xmit;
u64 full;
u64 err;
/* dirtied @completion */
u64 cqes ____cacheline_aligned_in_smp;
};
struct mlx5e_ch_stats { struct mlx5e_ch_stats {
u64 events; u64 events;
u64 poll; u64 poll;
......
...@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev, ...@@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev,
} }
} }
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
}
static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
dma_addr_t addr, dma_addr_t addr,
u32 size, u32 size,
enum mlx5e_dma_map_type map_type) enum mlx5e_dma_map_type map_type)
{ {
u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
sq->db.dma_fifo[i].addr = addr; dma->addr = addr;
sq->db.dma_fifo[i].size = size; dma->size = size;
sq->db.dma_fifo[i].type = map_type; dma->type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
{
return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
} }
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include "en.h" #include "en.h"
#include "en/xdp.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{ {
...@@ -84,6 +85,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -84,6 +85,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp) if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
...@@ -116,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -116,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq); mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
return work_done; return work_done;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment