Commit 6690c2c4 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-xsk-updates-part2-2022-09-28'

Saeed Mahameed says:

====================
mlx5 xsk updates part2 2022-09-28

XSK buffer improvements, This is part #2 of 4 parts series.

 1) Expose xsk min chunk size to drivers, to allow the driver to adjust to a
   better buffer stride size

 2) Adjust MTT page size to the XSK frame size, to avoid umem overrun in
  certain situations.

 3) Use xsk frame size as the striding RQ page size for XSK RQs

 4) KSM for unaligned XSK, KSM allows arbitrary buffer chunk lengths
    registration in HW, which makes more sense for unaligned XSK.

 4) More cleanups and optimizations in preparation for next improvements
    in part3

part 1: https://lore.kernel.org/netdev/20220927203611.244301-1-saeed@kernel.org/
====================

Link: https://lore.kernel.org/r/20220929072156.93299-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0f5ef005 8f5ed1c1
......@@ -93,28 +93,30 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
* sanity checks.
* MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
* size actually used at runtime, but it's not a problem when calculating static
* array sizes.
*/
#define MLX5_UMR_MAX_MTT_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
MLX5_UMR_MTT_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
* WQEs, This page will absorb write overflow by the hardware, when
* receiving packets larger than MTU. These oversize packets are
* dropped by the driver at a later stage.
*/
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5_KSM_OCTW(ksms) (ksms)
#define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
(MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
......@@ -126,8 +128,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
......@@ -266,6 +267,7 @@ struct mlx5e_umr_wqe {
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
......@@ -472,12 +474,9 @@ struct mlx5e_txqsq {
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
dma_addr_t addr;
union {
struct page *page;
struct xdp_buff *xsk;
};
union mlx5e_alloc_unit {
struct page *page;
struct xdp_buff *xsk;
};
/* XDP packets can be transmitted in different ways. On completion, we need to
......@@ -606,15 +605,15 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
union mlx5e_alloc_unit *au;
u32 offset;
bool last_in_page;
};
struct mlx5e_mpw_info {
u16 consumed_strides;
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
struct mlx5e_dma_info dma_info[];
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
union mlx5e_alloc_unit alloc_units[];
};
#define MLX5E_MAX_RX_FRAGS 4
......@@ -622,13 +621,13 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
struct page *page_cache[MLX5E_CACHE_SIZE];
};
struct mlx5e_rq;
......@@ -663,6 +662,11 @@ struct mlx5e_rq_frags_info {
u8 wqe_bulk;
};
struct mlx5e_dma_info {
dma_addr_t addr;
struct page *page;
};
struct mlx5e_shampo_hd {
u32 mkey;
struct mlx5e_dma_info *info;
......@@ -688,7 +692,7 @@ struct mlx5e_rq {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
struct mlx5e_dma_info *di;
union mlx5e_alloc_unit *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
......@@ -697,6 +701,7 @@ struct mlx5e_rq {
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
u16 num_strides;
u16 actual_wq_head;
u8 log_stride_sz;
......@@ -708,6 +713,7 @@ struct mlx5e_rq {
u8 pages_per_wqe;
u8 umr_wqebbs;
u8 mtts_per_wqe;
u8 unaligned;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
......@@ -758,7 +764,6 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5e_channel *channel;
u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
......@@ -1008,7 +1013,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
......@@ -1138,8 +1144,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
......
......@@ -6,16 +6,112 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
#include <net/xdp_sock_drv.h>
u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe)
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
return sizeof(struct mlx5e_umr_wqe) +
ALIGN(pages_per_wqe * sizeof(struct mlx5_mtt), MLX5_UMR_MTT_ALIGNMENT);
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
return min_page_shift ? : 12;
}
u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
{
u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
/* Regular RQ uses order-0 pages, the NIC must be able to map them. */
if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
min_page_shift = req_page_shift;
return max(req_page_shift, min_page_shift);
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
u8 max_pages_per_wqe, max_log_mpwqe_size;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned);
u8 pages_per_wqe;
pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
/* Sanity check for further calculations to succeed. */
BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
return pages_per_wqe;
}
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned);
u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
return umr_wqe_sz;
}
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, unaligned),
MLX5_SEND_WQE_BB);
}
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
/* Add another page as a buffer between WQEs. This page will absorb
* write overflow by the hardware, when receiving packets larger than
* MTU. These oversize packets are dropped by the driver at a later
* stage.
*/
return MLX5_ALIGN_MTTS(mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned) + 1);
}
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned)
{
if (unaligned)
return min(MLX5E_MAX_RQ_NUM_KSMS,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
return MLX5E_MAX_RQ_NUM_MTTS;
}
static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned)
{
u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, unaligned);
u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, unaligned);
return ilog2(max_entries / mtts_per_wqe);
}
u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe)
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned)
{
return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(pages_per_wqe), MLX5_SEND_WQE_BB);
return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned) +
mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) -
MLX5E_ORDER2_MAX_PACKET_MTU;
}
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
......@@ -52,14 +148,16 @@ static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
/* XSK frames are mapped as individual pages, because frames may come in
* an arbitrary order from random locations in the UMEM.
*/
if (xsk)
return PAGE_SIZE;
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
/* XDP in mlx5e doesn't support multiple packets per page. */
if (params->xdp_prog)
......@@ -68,15 +166,20 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5e_params *params,
return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(params, xsk);
u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_stride_sz);
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) -
order_base_2(linear_stride_sz);
}
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
......@@ -96,9 +199,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
}
static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
u8 log_stride_sz, u8 log_num_strides)
u8 log_stride_sz, u8 log_num_strides,
u8 page_shift, bool unaligned)
{
if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
if (log_stride_sz + log_num_strides !=
mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned))
return false;
if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
......@@ -118,28 +223,53 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
s8 log_num_strides;
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u8 log_num_strides;
u8 log_stride_sz;
u8 log_wqe_sz;
if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
return false;
log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned);
if (!mlx5e_rx_is_linear_skb(params, xsk))
if (log_wqe_sz < log_stride_sz)
return false;
log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk));
log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
log_num_strides = log_wqe_sz - log_stride_sz;
return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
log_num_strides, page_shift,
unaligned);
}
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
bool unaligned = xsk ? xsk->unaligned : false;
log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned);
/* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames <
log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
/* Ethtool's rx_max_pending is calculated for regular RQ, that uses
* pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
* frame size not equal to PAGE_SIZE.
* A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
* unexpected failure.
*/
if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
return max_log_rq_size;
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
......@@ -169,7 +299,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk)
{
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
return order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk));
return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
......@@ -178,7 +308,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
return MLX5_MPWRQ_LOG_WQE_SZ -
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) -
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
......@@ -327,7 +460,9 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, false))
return -EOPNOTSUPP;
if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
......@@ -339,12 +474,27 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u16 max_mtu_pkts;
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, xsk->unaligned))
return -EOPNOTSUPP;
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
return -EINVAL;
/* Current RQ length is too big for the given frame size, the
* needed number of WQEs exceeds the maximum.
*/
max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
if (params->log_rq_mtu_frames > max_mtu_pkts) {
mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
1 << params->log_rq_mtu_frames, xsk->chunk_size);
return -EINVAL;
}
return 0;
}
......@@ -358,7 +508,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
......@@ -385,7 +535,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
!mlx5e_mpwrq_validate_regular(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
!mlx5e_rx_is_linear_skb(mdev, params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
......@@ -428,10 +578,10 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
if (mlx5e_rx_is_linear_skb(params, xsk)) {
if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
int frag_stride;
frag_stride = mlx5e_rx_get_linear_stride_sz(params, xsk);
frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
......@@ -528,7 +678,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
......@@ -552,7 +702,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
else
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
......@@ -595,12 +745,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides)) {
log_wqe_num_of_strides,
page_shift, unaligned)) {
mlx5_core_err(mdev,
"Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
log_wqe_stride_size, log_wqe_num_of_strides);
"Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, unaligned %d\n",
log_wqe_stride_size, log_wqe_num_of_strides,
unaligned);
return -EINVAL;
}
......@@ -608,7 +762,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
MLX5_SET(wq, wq, shampo_enable, true);
MLX5_SET(wq, wq, log_reservation_size,
......@@ -720,13 +874,6 @@ static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
{
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
return MLX5_GET(wq, wq, log_wq_sz);
}
/* This function calculates the maximum number of headers entries that are needed
* per WQE, the formula is based on the size of the reservations and the
* restriction we have about max packets for reservation that is equal to max
......@@ -787,32 +934,82 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u8 umr_wqebbs;
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned);
return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
}
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
u32 wqebbs;
u32 wqebbs, total_pages, useful_space;
/* MLX5_WQ_TYPE_CYCLIC */
if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE) *
(1 << mlx5e_get_rq_log_wq_sz(rqp->rqc));
/* UMR WQEs for the regular RQ. */
wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
* Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
* doesn't affect its return value, as long as params->xdp_prog != NULL,
* so we can just multiply by 2.
*
* XSK uses different values of page_shift, and the total number of UMR
* WQEBBs depends on it. This dependency is complex and not monotonic,
* especially taking into consideration that some of the parameters come
* from capabilities. Hence, we have to try all valid values of XSK
* frame size (and page_shift) to find the maximum.
*/
if (params->xdp_prog)
wqebbs *= 2;
if (params->xdp_prog) {
u32 max_xsk_wqebbs = 0;
u8 frame_shift;
for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
frame_shift <= PAGE_SHIFT; frame_shift++) {
/* The headroom doesn't affect the calculation. */
struct mlx5e_xsk_param xsk = {
.chunk_size = 1 << frame_shift,
.unaligned = false,
};
/* XSK aligned mode. */
max_xsk_wqebbs = max(max_xsk_wqebbs,
mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
/* XSK unaligned mode, frame size is a power of two. */
xsk.unaligned = true;
max_xsk_wqebbs = max(max_xsk_wqebbs,
mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
}
wqebbs += max_xsk_wqebbs;
}
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
/* UMR WQEs don't cross the page boundary, they are padded with NOPs.
* This padding is always smaller than the max WQE size. That gives us
* at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
* per page. The number of pages is estimated as the total size of WQEs
* divided by the useful space in page, rounding up. If some WQEs don't
* fully fit into the useful space, they can occupy part of the padding,
* which proves this estimation to be correct (reserve enough space).
*/
useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
......@@ -866,7 +1063,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
......
......@@ -9,6 +9,7 @@
struct mlx5e_xsk_param {
u16 headroom;
u16 chunk_size;
bool unaligned;
};
struct mlx5e_cq_param {
......@@ -86,8 +87,14 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
/* Striding RQ dynamic parameters */
u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe);
u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe);
u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned);
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned);
/* Parameter calculations */
......@@ -106,12 +113,14 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
......
......@@ -162,10 +162,10 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
mlx5e_free_rx_descs(rq);
err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
if (err)
goto out;
return err;
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq);
rq->stats->recover++;
if (rq->channel)
......@@ -173,9 +173,6 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
else
mlx5e_trigger_napi_sched(rq->cq.napi);
return 0;
out:
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
return err;
}
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
......
......@@ -523,6 +523,53 @@ static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int i
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
struct mlx5e_channels *chs,
unsigned int ix)
{
u32 rqn;
int err;
mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
return;
if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
rqn, ix, err);
}
static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
unsigned int ix)
{
int err;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
res->drop_rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
return;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
}
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
{
unsigned int nch, ix;
......@@ -536,43 +583,10 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
mlx5e_rx_res_rss_enable(res);
for (ix = 0; ix < nch; ix++) {
u32 rqn;
mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
rqn, ix, err);
}
for (ix = nch; ix < res->max_nch; ix++) {
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
res->drop_rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
}
for (ix = 0; ix < nch; ix++)
mlx5e_rx_res_channel_activate_direct(res, chs, ix);
for (ix = nch; ix < res->max_nch; ix++)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
......@@ -595,22 +609,8 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
res->drop_rqn, ix, err);
if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
continue;
err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
res->drop_rqn, ix, err);
}
for (ix = 0; ix < res->max_nch; ix++)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
......
......@@ -72,6 +72,7 @@ void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *x
{
xsk->headroom = xsk_pool_get_headroom(pool);
xsk->chunk_size = xsk_pool_get_chunk_size(pool);
xsk->unaligned = pool->unaligned;
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
......
......@@ -30,7 +30,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
struct xdp_buff *xdp = wi->dma_info[page_idx].xsk;
struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
......@@ -83,7 +83,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
struct xdp_buff *xdp = wi->au->xsk;
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
......
......@@ -7,8 +7,6 @@
#include "en.h"
#include <net/xdp_sock_drv.h>
#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
/* RX data path */
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
......@@ -20,35 +18,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
retry:
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
/* Store the DMA address without headroom. In striding RQ case, we just
* provide pages for UMR, and headroom is counted at the setup stage
* when creating a WQE. In non-striding RQ case, headroom is accounted
* in mlx5e_alloc_rx_wqe.
*/
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
/* MTT page mapping has alignment requirements. If they are not
* satisfied, leak the descriptor so that it won't come again, and try
* to allocate a new one.
*/
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
xsk_buff_discard(dma_info->xsk);
goto retry;
}
}
return 0;
}
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
if (!xsk_uses_need_wakeup(rq->xsk_pool))
......
......@@ -5,20 +5,19 @@
#include "en/params.h"
#include "en/txrx.h"
#include "en/health.h"
#include <net/xdp_sock_drv.h>
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
* RQ, keep this check in the driver.
/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
* stride size of striding RQ.
*/
#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
#define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
if (xsk->chunk_size > PAGE_SIZE ||
xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear
......@@ -28,7 +27,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
return mlx5e_rx_is_linear_skb(params, xsk);
return mlx5e_rx_is_linear_skb(mdev, params, xsk);
}
}
......
......@@ -311,7 +311,13 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
/* Limitation for regular RQ. XSK RQ may clamp the queue length in
* mlx5e_mpwqe_get_log_rq_size.
*/
u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev, PAGE_SHIFT, false);
param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
......
......@@ -68,25 +68,25 @@
#include "qos.h"
#include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned)
{
bool striding_rq_umr, inline_umr;
u16 max_wqebbs;
u16 umr_wqebbs;
u16 umr_wqebbs, max_wqebbs;
bool striding_rq_umr;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE);
inline_umr = umr_wqebbs <= max_wqebbs;
if (!striding_rq_umr)
return false;
if (!inline_umr) {
mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%u) exceeds maximum supported (%u).\n",
umr_wqebbs * MLX5_SEND_WQE_BB,
max_wqebbs * MLX5_SEND_WQE_BB);
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
/* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
* calculated from mlx5e_get_max_sq_aligned_wqebbs.
*/
if (WARN_ON(umr_wqebbs > max_wqebbs))
return false;
}
return true;
}
......@@ -209,18 +209,21 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
u16 octowords;
u8 ds_cnt;
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mpwqe.pages_per_wqe),
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned),
MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->umr_mkey = rq->mkey_be;
cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
cpu_to_be16(MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe));
octowords = rq->mpwqe.unaligned ? MLX5_KSM_OCTW(rq->mpwqe.pages_per_wqe) :
MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe);
ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
......@@ -268,7 +271,7 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
size_t alloc_size;
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, dma_info,
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units,
rq->mpwqe.pages_per_wqe));
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
......@@ -280,39 +283,51 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
u64 npages, u8 page_shift, u32 *umr_mkey,
dma_addr_t filler_addr)
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 npages, u8 page_shift, u32 *umr_mkey,
dma_addr_t filler_addr, bool unaligned)
{
struct mlx5_mtt *mtt;
struct mlx5_ksm *ksm;
u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
if (unaligned && !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
return -EINVAL;
}
inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
unaligned ? sizeof(*ksm) : sizeof(*mtt),
npages);
if (inlen < 0)
return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
octwords = unaligned ? MLX5_KSM_OCTW(npages) : MLX5_MTT_OCTW(npages);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, access_mode_1_0,
unaligned ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
MLX5_SET(mkc, mkc, translations_octword_size,
MLX5_MTT_OCTW(npages));
MLX5_SET(mkc, mkc, translations_octword_size, octwords);
MLX5_SET(mkc, mkc, log_page_size, page_shift);
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
MLX5_MTT_OCTW(npages));
MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
......@@ -320,9 +335,20 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0 ; i < npages ; i++)
mtt[i].ptag = cpu_to_be64(filler_addr);
if (unaligned) {
ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++)
ksm[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
.va = cpu_to_be64(filler_addr),
};
} else {
mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++)
mtt[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(filler_addr),
};
}
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
......@@ -365,10 +391,26 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u64 num_mtts = mlx5_wq_ll_get_size(&rq->mpwqe.wq) * rq->mpwqe.mtts_per_wqe;
u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
u32 num_entries, max_num_entries;
u32 umr_mkey;
int err;
max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.unaligned);
return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, rq->mpwqe.page_shift,
&rq->umr_mkey, rq->wqe_overflow.addr);
/* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
&num_entries) ||
num_entries > max_num_entries))
mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
__func__, wq_size, rq->mpwqe.mtts_per_wqe,
max_num_entries);
err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
&umr_mkey, rq->wqe_overflow.addr,
rq->mpwqe.unaligned);
rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
return err;
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
......@@ -391,7 +433,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
next_frag.di = &rq->wqe.di[0];
next_frag.au = &rq->wqe.alloc_units[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
......@@ -401,7 +443,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
next_frag.di++;
next_frag.au++;
next_frag.offset = 0;
if (prev)
prev->last_in_page = true;
......@@ -418,12 +460,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)),
GFP_KERNEL, node);
if (!rq->wqe.alloc_units)
return -ENOMEM;
mlx5e_init_frags_partition(rq);
......@@ -431,9 +474,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
return 0;
}
void mlx5e_free_di_list(struct mlx5e_rq *rq)
static void mlx5e_free_au_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
......@@ -576,6 +619,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
......@@ -591,13 +636,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = PAGE_SHIFT;
rq->mpwqe.pages_per_wqe = MLX5_MPWRQ_PAGES_PER_WQE;
rq->mpwqe.umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(rq->mpwqe.pages_per_wqe);
rq->mpwqe.mtts_per_wqe = MLX5E_REQUIRED_WQE_MTTS;
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
rq->mpwqe.unaligned = xsk ? xsk->unaligned : false;
rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned);
rq->mpwqe.umr_wqebbs =
mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned);
rq->mpwqe.mtts_per_wqe =
mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned);
pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(params, xsk);
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
......@@ -609,7 +661,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
......@@ -617,7 +668,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
if (err)
goto err_free_by_rq_type;
goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
......@@ -642,11 +693,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
goto err_rq_wq_destroy;
}
err = mlx5e_init_di_list(rq, wq_sz, node);
err = mlx5e_init_au_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
......@@ -671,14 +720,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
goto err_free_shampo;
goto err_free_by_rq_type;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
goto err_free_shampo;
goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
......@@ -693,7 +742,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
wqe->data[0].lkey = rq->mkey_be;
wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
} else {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
......@@ -731,19 +780,21 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
err_free_shampo:
mlx5e_rq_free_shampo(rq);
err_destroy_page_pool:
page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_di_list(rq);
mlx5e_free_au_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
}
......@@ -771,24 +822,22 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
mlx5e_free_di_list(rq);
mlx5e_free_au_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
/* With AF_XDP, page_cache is not used, so this loop is not
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
mlx5e_page_release_dynamic(rq, dma_info->page, false);
mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
......@@ -4030,14 +4079,16 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
static bool mlx5e_params_validate_xdp(struct net_device *netdev,
struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
is_linear = mlx5e_rx_is_linear_skb(params, NULL);
is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
......@@ -4074,7 +4125,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
&new_params)) {
err = -EINVAL;
goto out;
}
......@@ -4094,8 +4146,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL);
u8 sz_old = mlx5e_mpwqe_get_log_rq_size(params, NULL);
u8 sz_new = mlx5e_mpwqe_get_log_rq_size(&new_params, NULL);
u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change.
......@@ -4553,7 +4605,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
if (!mlx5e_params_validate_xdp(netdev, &new_params))
if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
return -EINVAL;
return 0;
......@@ -4924,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev))
mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, false))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
......
......@@ -77,7 +77,7 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
static struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
{
size_t isz = struct_size(rq->mpwqe.info, dma_info, rq->mpwqe.pages_per_wqe);
size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe);
return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
}
......@@ -245,69 +245,69 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
return false;
}
cache->page_cache[cache->tail].page = page;
cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->page_cache[cache->tail] = page;
cache->tail = tail_next;
return true;
}
static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
dma_addr_t addr;
if (unlikely(cache->head == cache->tail)) {
stats->cache_empty++;
return false;
}
if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
if (page_ref_count(cache->page_cache[cache->head]) != 1) {
stats->cache_busy++;
return false;
}
*dma_info = cache->page_cache[cache->head];
au->page = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
stats->cache_reuse++;
dma_sync_single_for_device(rq->pdev, dma_info->addr,
/* Non-XSK always uses PAGE_SIZE. */
PAGE_SIZE,
DMA_FROM_DEVICE);
addr = page_pool_get_dma_addr(au->page);
/* Non-XSK always uses PAGE_SIZE. */
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
return true;
}
static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
if (mlx5e_rx_cache_get(rq, dma_info))
dma_addr_t addr;
if (mlx5e_rx_cache_get(rq, au))
return 0;
dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
if (unlikely(!dma_info->page))
au->page = page_pool_dev_alloc_pages(rq->page_pool);
if (unlikely(!au->page))
return -ENOMEM;
/* Non-XSK always uses PAGE_SIZE. */
dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL;
addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
page_pool_recycle_direct(rq->page_pool, au->page);
au->page = NULL;
return -ENOMEM;
}
page_pool_set_dma_addr(dma_info->page, dma_info->addr);
page_pool_set_dma_addr(au->page, addr);
return 0;
}
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
if (rq->xsk_pool)
return mlx5e_xsk_page_alloc_pool(rq, dma_info);
else
return mlx5e_page_alloc_pool(rq, dma_info);
if (rq->xsk_pool) {
au->xsk = xsk_buff_alloc(rq->xsk_pool);
return likely(au->xsk) ? 0 : -ENOMEM;
} else {
return mlx5e_page_alloc_pool(rq, au);
}
}
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
......@@ -335,7 +335,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec
}
static inline void mlx5e_page_release(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
union mlx5e_alloc_unit *au,
bool recycle)
{
if (rq->xsk_pool)
......@@ -343,9 +343,9 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
*/
xsk_buff_free(dma_info->xsk);
xsk_buff_free(au->xsk);
else
mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
mlx5e_page_release_dynamic(rq, au->page, recycle);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
......@@ -354,12 +354,12 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
int err = 0;
if (!frag->offset)
/* On first frag (offset == 0), replenish page (dma_info actually).
* Other frags that point to the same dma_info (with a different
/* On first frag (offset == 0), replenish page (alloc_unit actually).
* Other frags that point to the same alloc_unit (with a different
* offset) should just use the new one without replenishing again
* by themselves.
*/
err = mlx5e_page_alloc(rq, frag->di);
err = mlx5e_page_alloc(rq, frag->au);
return err;
}
......@@ -369,7 +369,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
bool recycle)
{
if (frag->last_in_page)
mlx5e_page_release(rq, frag->di, recycle);
mlx5e_page_release(rq, frag->au, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
......@@ -385,6 +385,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
dma_addr_t addr;
u16 headroom;
err = mlx5e_get_rx_frag(rq, frag);
......@@ -392,8 +393,9 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0;
wqe->data[i].addr = cpu_to_be64(frag->di->addr +
frag->offset + headroom);
addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(frag->au->xsk) :
page_pool_get_dma_addr(frag->au->page);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
return 0;
......@@ -458,36 +460,35 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
unsigned int truesize)
{
dma_sync_single_for_cpu(rq->pdev,
di->addr + frag_offset,
len, DMA_FROM_DEVICE);
page_ref_inc(di->page);
dma_addr_t addr = page_pool_get_dma_addr(au->page);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
di->page, frag_offset, len, truesize);
au->page, frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
const void *from = page_address(page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
DMA_FROM_DEVICE);
dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
struct mlx5e_dma_info *dma_info = wi->dma_info;
int i;
/* A common case for AF_XDP. */
......@@ -498,7 +499,7 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
mlx5e_page_release(rq, &dma_info[i], recycle);
mlx5e_page_release(rq, &alloc_units[i], recycle);
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
......@@ -583,11 +584,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
err = mlx5e_page_alloc(rq, dma_info);
union mlx5e_alloc_unit au;
err = mlx5e_page_alloc(rq, &au);
if (unlikely(err))
goto err_unmap;
addr = dma_info->addr;
page = dma_info->page;
page = dma_info->page = au.page;
addr = dma_info->addr = page_pool_get_dma_addr(au.page);
} else {
dma_info->addr = addr + header_offset;
dma_info->page = page;
......@@ -619,8 +622,12 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
while (--i >= 0) {
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
union mlx5e_alloc_unit au = {
.page = dma_info->page,
};
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);
mlx5e_page_release(rq, &au, true);
}
}
rq->stats->buff_alloc_err++;
......@@ -669,10 +676,11 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
struct mlx5e_dma_info *dma_info = &wi->dma_info[0];
union mlx5e_alloc_unit *au = &wi->alloc_units[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u32 offset; /* 17-bit value with MTT. */
u16 pi;
int err;
int i;
......@@ -694,13 +702,35 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, dma_info++) {
err = mlx5e_page_alloc(rq, dma_info);
if (unlikely(err))
goto err_unmap;
umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
if (unlikely(rq->mpwqe.unaligned)) {
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
dma_addr_t addr;
err = mlx5e_page_alloc(rq, au);
if (unlikely(err))
goto err_unmap;
/* Unaligned means XSK. */
addr = xsk_buff_xdp_get_frame_dma(au->xsk);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = rq->mkey_be,
.va = cpu_to_be64(addr),
};
}
} else {
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
dma_addr_t addr;
err = mlx5e_page_alloc(rq, au);
if (unlikely(err))
goto err_unmap;
addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(au->xsk) :
page_pool_get_dma_addr(au->page);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
}
}
bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
......@@ -709,8 +739,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
umr_wqe->uctrl.xlt_offset =
cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(ix * rq->mpwqe.mtts_per_wqe));
offset = ix * rq->mpwqe.mtts_per_wqe;
if (!rq->mpwqe.unaligned)
offset = MLX5_ALIGNED_MTTS_OCTW(offset);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
......@@ -726,8 +759,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
dma_info--;
mlx5e_page_release(rq, dma_info, true);
au--;
mlx5e_page_release(rq, au, true);
}
err:
......@@ -760,8 +793,12 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
hd_info = &shampo->info[index];
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) {
union mlx5e_alloc_unit au = {
.page = hd_info->page,
};
deleted_page = hd_info->page;
mlx5e_page_release(rq, hd_info, false);
mlx5e_page_release(rq, &au, false);
}
}
......@@ -1536,19 +1573,21 @@ static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = wi->di;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
dma_addr_t addr;
u32 frag_size;
va = page_address(di->page) + wi->offset;
va = page_address(au->page) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
......@@ -1558,7 +1597,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
......@@ -1571,7 +1610,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
return NULL;
/* queue up for recycling/reuse */
page_ref_inc(di->page);
page_ref_inc(au->page);
return skb;
}
......@@ -1582,20 +1621,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
void *va;
va = page_address(di->page) + wi->offset;
va = page_address(au->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
......@@ -1611,11 +1652,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
while (cqe_bcnt) {
skb_frag_t *frag;
di = wi->di;
au = wi->au;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, DMA_FROM_DEVICE);
if (!xdp_buff_has_frags(&xdp)) {
......@@ -1628,11 +1670,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
frag = &sinfo->frags[sinfo->nr_frags++];
__skb_frag_set_page(frag, di->page);
__skb_frag_set_page(frag, au->page);
skb_frag_off_set(frag, wi->offset);
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(di->page))
if (page_is_pfmemalloc(au->page))
xdp_buff_set_frag_pfmemalloc(&xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
......@@ -1643,10 +1685,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
}
di = head_wi->di;
au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
......@@ -1663,7 +1705,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (unlikely(!skb))
return NULL;
page_ref_inc(di->page);
page_ref_inc(au->page);
if (unlikely(xdp_buff_has_frags(&xdp))) {
int i;
......@@ -1858,8 +1900,8 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
u32 data_bcnt, u32 data_offset)
mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
......@@ -1873,12 +1915,12 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_i
else
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
mlx5e_add_skb_frag(rq, skb, di, data_offset,
mlx5e_add_skb_frag(rq, skb, au, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
di++;
au++;
}
}
......@@ -1886,12 +1928,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_dma_info *di = &wi->dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
struct mlx5e_dma_info *head_di = di;
union mlx5e_alloc_unit *head_au = au;
struct sk_buff *skb;
dma_addr_t addr;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
......@@ -1904,13 +1947,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
if (unlikely(frag_offset >= PAGE_SIZE)) {
di++;
au++;
frag_offset -= PAGE_SIZE;
}
mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
addr = page_pool_get_dma_addr(head_au->page);
mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
......@@ -1922,12 +1967,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
struct mlx5e_dma_info *di = &wi->dma_info[page_idx];
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
dma_addr_t addr;
u32 frag_size;
/* Check packet size. Note LRO doesn't use linear SKB */
......@@ -1936,11 +1982,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
va = page_address(di->page) + head_offset;
va = page_address(au->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
......@@ -1950,7 +1997,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
......@@ -1966,7 +2013,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
/* queue up for recycling/reuse */
page_ref_inc(di->page);
page_ref_inc(au->page);
return skb;
}
......@@ -2011,7 +2058,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
prefetchw(skb->data);
mlx5e_copy_skb_header(rq->pdev, skb, head,
mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
......@@ -2062,8 +2109,12 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
u64 addr = shampo->info[header_index].addr;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
union mlx5e_alloc_unit au = {
.page = shampo->info[header_index].page,
};
shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
mlx5e_page_release(rq, &shampo->info[header_index], true);
mlx5e_page_release(rq, &au, true);
}
bitmap_clear(shampo->bitmap, header_index, 1);
}
......@@ -2084,7 +2135,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5e_dma_info *di;
union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
......@@ -2134,8 +2185,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
di = &wi->dma_info[page_idx];
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
au = &wi->alloc_units[page_idx];
mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
......@@ -2431,7 +2482,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
mlx5e_rx_is_linear_skb(params, NULL) ?
mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
......@@ -2485,7 +2536,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{
rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
......
......@@ -143,6 +143,36 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
size_t item_size, size_t num_items,
const char *func, int line)
{
int inlen;
if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
__func__, func, line, fixed, item_size, num_items);
return -ENOMEM;
}
if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
__func__, func, line, fixed, item_size, num_items);
return -ENOMEM;
}
if (check_add_overflow((int)fixed, inlen, &inlen)) {
mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
__func__, func, line, fixed, item_size, num_items);
return -ENOMEM;
}
return inlen;
}
#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
......
......@@ -162,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
......@@ -476,6 +478,12 @@ struct mlx5_klm {
__be64 va;
};
struct mlx5_ksm {
__be32 reserved;
__be32 key;
__be64 va;
};
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
......
......@@ -9,6 +9,9 @@
#include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
#define XDP_UMEM_MIN_CHUNK_SHIFT 11
#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
#ifdef CONFIG_XDP_SOCKETS
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
......@@ -104,13 +107,6 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
static inline void xsk_buff_discard(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
xp_release(xskb);
}
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
......
......@@ -19,8 +19,6 @@
#include "xdp_umem.h"
#include "xsk_queue.h"
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
static DEFINE_IDA(umem_ida);
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment