Commit f3ca0ab1 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mini_cqe' into git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma for-next

Leon Romanovsky says:

====================
Introduce new internal to mlx5 CQE format - mini-CQE. It is a CQE in
compressed form that holds data needed to extra a single full CQE.

It is a stride index, byte count and packet checksum.
====================

* mini_cqe:
  IB/mlx5: Introduce a new mini-CQE format
  IB/mlx5: Refactor CQE compression response
  net/mlx5: Exposing a new mini-CQE format
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parents 5ef8c0c1 6f1006a4
...@@ -751,6 +751,28 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev, ...@@ -751,6 +751,28 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
return 0; return 0;
} }
enum {
MLX5_CQE_RES_FORMAT_HASH = 0,
MLX5_CQE_RES_FORMAT_CSUM = 1,
MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
};
static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
{
switch (format) {
case MLX5_IB_CQE_RES_FORMAT_HASH:
return MLX5_CQE_RES_FORMAT_HASH;
case MLX5_IB_CQE_RES_FORMAT_CSUM:
return MLX5_CQE_RES_FORMAT_CSUM;
case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq, struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, u32 **cqb, int entries, u32 **cqb,
...@@ -816,6 +838,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -816,6 +838,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0]; *index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) { if (ucmd.cqe_comp_en == 1) {
int mini_cqe_format;
if (!((*cqe_size == 128 && if (!((*cqe_size == 128 &&
MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) || MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
(*cqe_size == 64 && (*cqe_size == 64 &&
...@@ -826,20 +850,18 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -826,20 +850,18 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
goto err_cqb; goto err_cqb;
} }
if (unlikely(!ucmd.cqe_comp_res_format || mini_cqe_format =
!(ucmd.cqe_comp_res_format < mini_cqe_res_format_to_hw(dev,
MLX5_IB_CQE_RES_RESERVED) || ucmd.cqe_comp_res_format);
(ucmd.cqe_comp_res_format & if (mini_cqe_format < 0) {
(ucmd.cqe_comp_res_format - 1)))) { err = mini_cqe_format;
err = -EOPNOTSUPP; mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n", ucmd.cqe_comp_res_format, err);
ucmd.cqe_comp_res_format);
goto err_cqb; goto err_cqb;
} }
MLX5_SET(cqc, cqc, cqe_comp_en, 1); MLX5_SET(cqc, cqc, cqe_comp_en, 1);
MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
ilog2(ucmd.cqe_comp_res_format));
} }
if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) { if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
......
...@@ -982,13 +982,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, ...@@ -982,13 +982,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
} }
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
resp.cqe_comp_caps.supported_format =
MLX5_IB_CQE_RES_FORMAT_HASH |
MLX5_IB_CQE_RES_FORMAT_CSUM;
resp.response_length += sizeof(resp.cqe_comp_caps); resp.response_length += sizeof(resp.cqe_comp_caps);
if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev,
cqe_compression_max_num);
resp.cqe_comp_caps.supported_format =
MLX5_IB_CQE_RES_FORMAT_HASH |
MLX5_IB_CQE_RES_FORMAT_CSUM;
if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
resp.cqe_comp_caps.supported_format |=
MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
}
} }
if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) && if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
......
...@@ -1143,7 +1143,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1143,7 +1143,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_protocols[0x20]; u8 flex_parser_protocols[0x20];
u8 reserved_at_560[0x20]; u8 reserved_at_560[0x20];
u8 reserved_at_580[0x3d]; u8 reserved_at_580[0x3c];
u8 mini_cqe_resp_stride_index[0x1];
u8 cqe_128_always[0x1]; u8 cqe_128_always[0x1];
u8 cqe_compression_128[0x1]; u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1]; u8 cqe_compression[0x1];
......
...@@ -163,7 +163,7 @@ struct mlx5_ib_rss_caps { ...@@ -163,7 +163,7 @@ struct mlx5_ib_rss_caps {
enum mlx5_ib_cqe_comp_res_format { enum mlx5_ib_cqe_comp_res_format {
MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
MLX5_IB_CQE_RES_RESERVED = 1 << 2, MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
}; };
struct mlx5_ib_cqe_comp_caps { struct mlx5_ib_cqe_comp_caps {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment