Commit 168723c1 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: xsk: Use umr_mode to calculate striding RQ parameters

Instead of passing the unaligned flag, pass an enum that indicates the
UMR mode. The next commit will add the third mode (KLM for certain
configurations of XSK), which will be added to this enum instead of
adding another bool flag everywhere.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent cfb4d09c
...@@ -681,6 +681,11 @@ struct mlx5e_hw_gro_data { ...@@ -681,6 +681,11 @@ struct mlx5e_hw_gro_data {
int second_ip_id; int second_ip_id;
}; };
enum mlx5e_mpwrq_umr_mode {
MLX5E_MPWRQ_UMR_MODE_ALIGNED,
MLX5E_MPWRQ_UMR_MODE_UNALIGNED,
};
struct mlx5e_rq { struct mlx5e_rq {
/* data path */ /* data path */
union { union {
...@@ -708,7 +713,7 @@ struct mlx5e_rq { ...@@ -708,7 +713,7 @@ struct mlx5e_rq {
u8 pages_per_wqe; u8 pages_per_wqe;
u8 umr_wqebbs; u8 umr_wqebbs;
u8 mtts_per_wqe; u8 mtts_per_wqe;
u8 unaligned; u8 umr_mode;
struct mlx5e_shampo_hd *shampo; struct mlx5e_shampo_hd *shampo;
} mpwqe; } mpwqe;
}; };
...@@ -1008,7 +1013,7 @@ struct mlx5e_profile { ...@@ -1008,7 +1013,7 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void); void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned); enum mlx5e_mpwrq_umr_mode umr_mode);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
......
...@@ -27,9 +27,48 @@ u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xs ...@@ -27,9 +27,48 @@ u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xs
return max(req_page_shift, min_page_shift); return max(req_page_shift, min_page_shift);
} }
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) enum mlx5e_mpwrq_umr_mode
mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
{
/* Different memory management schemes use different mechanisms to map
* user-mode memory. The stricter guarantees we have, the faster
* mechanisms we use:
* 1. MTT - direct mapping in page granularity.
* 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
* all mappings have the same size.
*/
bool unaligned = xsk ? xsk->unaligned : false;
/* XSK frames can start at arbitrary unaligned locations, but they all
* have the same size which is a power of two. It allows to optimize to
* one KSM per frame.
*/
if (unaligned)
return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
/* XSK: frames are naturally aligned, MTT can be used.
* Non-XSK: Allocations happen in units of CPU pages, therefore, the
* mappings are naturally aligned.
*/
return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
}
u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
{ {
u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); switch (mode) {
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
return sizeof(struct mlx5_mtt);
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
return sizeof(struct mlx5_ksm);
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
return 0;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u8 max_pages_per_wqe, max_log_mpwqe_size; u8 max_pages_per_wqe, max_log_mpwqe_size;
u16 max_wqe_size; u16 max_wqe_size;
...@@ -44,9 +83,10 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unalig ...@@ -44,9 +83,10 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unalig
return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
} }
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
u8 pages_per_wqe; u8 pages_per_wqe;
pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1; pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
...@@ -59,10 +99,11 @@ u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool una ...@@ -59,10 +99,11 @@ u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool una
return pages_per_wqe; return pages_per_wqe;
} }
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u8 umr_entry_size = unaligned ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt); u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned); u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u16 umr_wqe_sz; u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
...@@ -73,25 +114,30 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unali ...@@ -73,25 +114,30 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unali
return umr_wqe_sz; return umr_wqe_sz;
} }
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, unaligned), return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
MLX5_SEND_WQE_BB); MLX5_SEND_WQE_BB);
} }
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
/* Add another page as a buffer between WQEs. This page will absorb /* Add another page as a buffer between WQEs. This page will absorb
* write overflow by the hardware, when receiving packets larger than * write overflow by the hardware, when receiving packets larger than
* MTU. These oversize packets are dropped by the driver at a later * MTU. These oversize packets are dropped by the driver at a later
* stage. * stage.
*/ */
return MLX5_ALIGN_MTTS(mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, unaligned) + 1); return MLX5_ALIGN_MTTS(pages_per_wqe + 1);
} }
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned) u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
if (unaligned) if (umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)
return min(MLX5E_MAX_RQ_NUM_KSMS, return min(MLX5E_MAX_RQ_NUM_KSMS,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size)); 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
...@@ -99,18 +145,19 @@ u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned) ...@@ -99,18 +145,19 @@ u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned)
} }
static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift, static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned) enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, unaligned); u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, unaligned); u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
return ilog2(max_entries / mtts_per_wqe); return ilog2(max_entries / mtts_per_wqe);
} }
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned) u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned) + return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
MLX5E_ORDER2_MAX_PACKET_MTU; MLX5E_ORDER2_MAX_PACKET_MTU;
} }
...@@ -171,10 +218,10 @@ static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev, ...@@ -171,10 +218,10 @@ static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true); u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
order_base_2(linear_stride_sz); order_base_2(linear_stride_sz);
} }
...@@ -200,10 +247,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, ...@@ -200,10 +247,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev, static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
u8 log_stride_sz, u8 log_num_strides, u8 log_stride_sz, u8 log_num_strides,
u8 page_shift, bool unaligned) u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
if (log_stride_sz + log_num_strides != if (log_stride_sz + log_num_strides !=
mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned)) mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
return false; return false;
if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE || if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
...@@ -223,8 +271,8 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, ...@@ -223,8 +271,8 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u8 log_num_strides; u8 log_num_strides;
u8 log_stride_sz; u8 log_stride_sz;
u8 log_wqe_sz; u8 log_wqe_sz;
...@@ -233,7 +281,7 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, ...@@ -233,7 +281,7 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
return false; return false;
log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true)); log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned); log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
if (log_wqe_sz < log_stride_sz) if (log_wqe_sz < log_stride_sz)
return false; return false;
...@@ -242,19 +290,19 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, ...@@ -242,19 +290,19 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
log_num_strides, page_shift, log_num_strides, page_shift,
unaligned); umr_mode);
} }
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev, u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 log_pkts_per_wqe, page_shift, max_log_rq_size; u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
bool unaligned = xsk ? xsk->unaligned : false;
log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk); log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, unaligned); max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
/* Numbers are unsigned, don't subtract to avoid underflow. */ /* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames < if (params->log_rq_mtu_frames <
...@@ -308,10 +356,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, ...@@ -308,10 +356,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, unaligned) - return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
} }
...@@ -460,9 +508,10 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev) ...@@ -460,9 +508,10 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params) int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, false)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
...@@ -474,11 +523,12 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params ...@@ -474,11 +523,12 @@ int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false; bool unaligned = xsk ? xsk->unaligned : false;
u16 max_mtu_pkts; u16 max_mtu_pkts;
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, xsk->unaligned)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
...@@ -781,16 +831,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, ...@@ -781,16 +831,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk); u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size, if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides, log_wqe_num_of_strides,
page_shift, unaligned)) { page_shift, umr_mode)) {
mlx5_core_err(mdev, mlx5_core_err(mdev,
"Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, unaligned %d\n", "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
log_wqe_stride_size, log_wqe_num_of_strides, log_wqe_stride_size, log_wqe_num_of_strides,
unaligned); umr_mode);
return -EINVAL; return -EINVAL;
} }
...@@ -974,11 +1024,11 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev, ...@@ -974,11 +1024,11 @@ static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
bool unaligned = xsk ? xsk->unaligned : false;
u8 umr_wqebbs; u8 umr_wqebbs;
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned); umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk)); return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
} }
......
...@@ -56,13 +56,23 @@ struct mlx5e_create_sq_param { ...@@ -56,13 +56,23 @@ struct mlx5e_create_sq_param {
/* Striding RQ dynamic parameters */ /* Striding RQ dynamic parameters */
u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk); u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); enum mlx5e_mpwrq_umr_mode
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); enum mlx5e_mpwrq_umr_mode umr_mode);
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev, bool unaligned); u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, bool unaligned); enum mlx5e_mpwrq_umr_mode umr_mode);
u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
enum mlx5e_mpwrq_umr_mode umr_mode);
u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
/* Parameter calculations */ /* Parameter calculations */
......
...@@ -41,7 +41,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -41,7 +41,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
if (unlikely(rq->mpwqe.unaligned)) { if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
for (i = 0; i < batch; i++) { for (i = 0; i < batch; i++) {
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk); dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
...@@ -67,7 +67,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -67,7 +67,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
offset = ix * rq->mpwqe.mtts_per_wqe; offset = ix * rq->mpwqe.mtts_per_wqe;
if (likely(!rq->mpwqe.unaligned)) if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
offset = MLX5_ALIGNED_MTTS_OCTW(offset); offset = MLX5_ALIGNED_MTTS_OCTW(offset);
umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
......
...@@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, ...@@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
/* Limitation for regular RQ. XSK RQ may clamp the queue length in /* Limitation for regular RQ. XSK RQ may clamp the queue length in
* mlx5e_mpwqe_get_log_rq_size. * mlx5e_mpwqe_get_log_rq_size.
*/ */
u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev, PAGE_SHIFT, false); u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev,
PAGE_SHIFT,
MLX5E_MPWRQ_UMR_MODE_ALIGNED);
param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
max_log_mpwrq_pkts); max_log_mpwrq_pkts);
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
#include "en/trap.h" #include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
bool unaligned) enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
u16 umr_wqebbs, max_wqebbs; u16 umr_wqebbs, max_wqebbs;
bool striding_rq_umr; bool striding_rq_umr;
...@@ -79,7 +79,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_ ...@@ -79,7 +79,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_
if (!striding_rq_umr) if (!striding_rq_umr)
return false; return false;
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, unaligned); umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev); max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
/* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
* calculated from mlx5e_get_max_sq_aligned_wqebbs. * calculated from mlx5e_get_max_sq_aligned_wqebbs.
...@@ -203,6 +203,18 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) ...@@ -203,6 +203,18 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
} }
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{
switch (umr_mode) {
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
return MLX5_MTT_OCTW(entries);
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
return MLX5_KSM_OCTW(entries);
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
return 0;
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq, struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe) struct mlx5e_umr_wqe *wqe)
...@@ -213,7 +225,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ...@@ -213,7 +225,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
u8 ds_cnt; u8 ds_cnt;
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift, ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned), rq->mpwqe.umr_mode),
MLX5_SEND_WQE_DS); MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
...@@ -221,8 +233,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ...@@ -221,8 +233,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->umr_mkey = rq->mpwqe.umr_mkey_be; cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
octowords = rq->mpwqe.unaligned ? MLX5_KSM_OCTW(rq->mpwqe.pages_per_wqe) : octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
MLX5_MTT_OCTW(rq->mpwqe.pages_per_wqe);
ucseg->xlt_octowords = cpu_to_be16(octowords); ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
} }
...@@ -283,9 +294,23 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) ...@@ -283,9 +294,23 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0; return 0;
} }
static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
{
switch (umr_mode) {
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
return MLX5_MKC_ACCESS_MODE_MTT;
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
return MLX5_MKC_ACCESS_MODE_KSM;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
return 0;
}
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 npages, u8 page_shift, u32 *umr_mkey, u32 npages, u8 page_shift, u32 *umr_mkey,
dma_addr_t filler_addr, bool unaligned) dma_addr_t filler_addr,
enum mlx5e_mpwrq_umr_mode umr_mode)
{ {
struct mlx5_mtt *mtt; struct mlx5_mtt *mtt;
struct mlx5_ksm *ksm; struct mlx5_ksm *ksm;
...@@ -296,14 +321,16 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, ...@@ -296,14 +321,16 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
int err; int err;
int i; int i;
if (unaligned && !MLX5_CAP_GEN(mdev, fixed_buffer_size)) { if (umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED &&
!MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n"); mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
return -EINVAL; return -EINVAL;
} }
octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in), inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
unaligned ? sizeof(*ksm) : sizeof(*mtt), MLX5_OCTWORD, octwords);
npages);
if (inlen < 0) if (inlen < 0)
return inlen; return inlen;
...@@ -311,16 +338,13 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, ...@@ -311,16 +338,13 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
octwords = unaligned ? MLX5_KSM_OCTW(npages) : MLX5_MTT_OCTW(npages);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
unaligned ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc); mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
...@@ -335,19 +359,22 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, ...@@ -335,19 +359,22 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped * the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page. * to the default page.
*/ */
if (unaligned) { switch (umr_mode) {
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
ksm[i] = (struct mlx5_ksm) { ksm[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey), .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
.va = cpu_to_be64(filler_addr), .va = cpu_to_be64(filler_addr),
}; };
} else { break;
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++) for (i = 0; i < npages; i++)
mtt[i] = (struct mlx5_mtt) { mtt[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(filler_addr), .ptag = cpu_to_be64(filler_addr),
}; };
break;
} }
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
...@@ -396,7 +423,7 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq ...@@ -396,7 +423,7 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
u32 umr_mkey; u32 umr_mkey;
int err; int err;
max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.unaligned); max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
/* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */ /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe, if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
...@@ -408,7 +435,7 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq ...@@ -408,7 +435,7 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift, err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
&umr_mkey, rq->wqe_overflow.addr, &umr_mkey, rq->wqe_overflow.addr,
rq->mpwqe.unaligned); rq->mpwqe.umr_mode);
rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey); rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
return err; return err;
} }
...@@ -644,16 +671,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -644,16 +671,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
rq->mpwqe.unaligned = xsk ? xsk->unaligned : false; rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
rq->mpwqe.pages_per_wqe = rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift, mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned); rq->mpwqe.umr_mode);
rq->mpwqe.umr_wqebbs = rq->mpwqe.umr_wqebbs =
mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift, mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned); rq->mpwqe.umr_mode);
rq->mpwqe.mtts_per_wqe = rq->mpwqe.mtts_per_wqe =
mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift, mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.unaligned); rq->mpwqe.umr_mode);
pool_size = rq->mpwqe.pages_per_wqe << pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk); mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
...@@ -5012,7 +5039,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -5012,7 +5039,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) && if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) && !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, false)) mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO; netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features; netdev->hw_features = netdev->vlan_features;
......
...@@ -1288,4 +1288,8 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) ...@@ -1288,4 +1288,8 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
return mlx5_is_roce_on(dev); return mlx5_is_roce_on(dev);
} }
enum {
MLX5_OCTWORD = 16,
};
#endif /* MLX5_DRIVER_H */ #endif /* MLX5_DRIVER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment