Commit f32f5bd2 authored by Daniel Jurgens's avatar Daniel Jurgens Committed by Saeed Mahameed

net/mlx5: Configure cache line size for start and end padding

There is a hardware feature that will pad the start or end of a DMA to
be cache line aligned to avoid RMWs on the last cache line. The default
cache line size setting for this feature is 64B. This change configures
the hardware to use 128B alignment on systems with 128B cache lines.

In addition we lower bound MPWRQ stride by HCA cacheline in mlx5e,
MPWRQ stride should be at least the HCA cacheline, the current default
is 64B and in case HCA_CAP.cach_line_128byte capability is set, MPWRQ RX
stride will automatically be aligned to 128B.
Signed-off-by: default avatarDaniel Jurgens <danielj@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent a61d5ce9
...@@ -70,8 +70,13 @@ ...@@ -70,8 +70,13 @@
#define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
#define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
......
...@@ -89,8 +89,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) ...@@ -89,8 +89,8 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
priv->params.mpwqe_log_stride_sz = priv->params.mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
MLX5_MPWRQ_LOG_STRIDE_SIZE; MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
priv->params.mpwqe_log_stride_sz; priv->params.mpwqe_log_stride_sz;
break; break;
......
...@@ -543,6 +543,12 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -543,6 +543,12 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
err = set_caps(dev, set_ctx, set_sz, err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
......
...@@ -804,10 +804,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -804,10 +804,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_150[0xa]; u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6]; u8 log_max_ra_res_qp[0x6];
u8 pad_cap[0x1]; u8 end_pad[0x1];
u8 cc_query_allowed[0x1]; u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1]; u8 cc_modify_allowed[0x1];
u8 reserved_at_163[0xd]; u8 start_pad[0x1];
u8 cache_line_128byte[0x1];
u8 reserved_at_163[0xb];
u8 gid_table_size[0x10]; u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1]; u8 out_of_seq_cnt[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment