Commit f72300c5 authored by Haggai Abramovsky's avatar Haggai Abramovsky Committed by Doug Ledford

IB/mlx5: Expose CQE version to user-space

Per user context, work with CQE version that both the user-space
and the kernel support. Report this CQE version via the response of
the alloc_ucontext command.
Signed-off-by: default avatarHaggai Abramovsky <hagaya@mellanox.com>
Reviewed-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent cfb5e088
...@@ -869,7 +869,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -869,7 +869,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (req.total_num_uuars == 0) if (req.total_num_uuars == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (req.comp_mask) if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (reqlen > sizeof(req) && if (reqlen > sizeof(req) &&
...@@ -892,6 +892,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -892,6 +892,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
resp.cqe_version = min_t(__u8,
(__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
req.max_cqe_version);
resp.response_length = min(offsetof(typeof(resp), response_length) + resp.response_length = min(offsetof(typeof(resp), response_length) +
sizeof(resp.response_length), udata->outlen); sizeof(resp.response_length), udata->outlen);
...@@ -945,8 +948,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -945,8 +948,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.tot_uuars = req.total_num_uuars; resp.tot_uuars = req.total_num_uuars;
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
if (field_avail(typeof(resp), reserved2, udata->outlen)) if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.reserved2); resp.response_length += sizeof(resp.cqe_version);
if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |= resp.comp_mask |=
...@@ -954,7 +957,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -954,7 +957,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.hca_core_clock_offset = resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % offsetof(struct mlx5_init_seg, internal_timer_h) %
PAGE_SIZE; PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset); resp.response_length += sizeof(resp.hca_core_clock_offset) +
sizeof(resp.reserved2) +
sizeof(resp.reserved3);
} }
err = ib_copy_to_udata(udata, &resp, resp.response_length); err = ib_copy_to_udata(udata, &resp, resp.response_length);
...@@ -965,6 +970,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -965,6 +970,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
uuari->num_low_latency_uuars = req.num_low_latency_uuars; uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars; uuari->uars = uars;
uuari->num_uars = num_uars; uuari->num_uars = num_uars;
context->cqe_version = resp.cqe_version;
return &context->ibucontext; return &context->ibucontext;
out_uars: out_uars:
......
...@@ -69,6 +69,10 @@ struct mlx5_ib_alloc_ucontext_req_v2 { ...@@ -69,6 +69,10 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 num_low_latency_uuars; __u32 num_low_latency_uuars;
__u32 flags; __u32 flags;
__u32 comp_mask; __u32 comp_mask;
__u8 max_cqe_version;
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
}; };
enum mlx5_ib_alloc_ucontext_resp_mask { enum mlx5_ib_alloc_ucontext_resp_mask {
...@@ -89,7 +93,9 @@ struct mlx5_ib_alloc_ucontext_resp { ...@@ -89,7 +93,9 @@ struct mlx5_ib_alloc_ucontext_resp {
__u16 reserved1; __u16 reserved1;
__u32 comp_mask; __u32 comp_mask;
__u32 response_length; __u32 response_length;
__u32 reserved2; __u8 cqe_version;
__u8 reserved2;
__u16 reserved3;
__u64 hca_core_clock_offset; __u64 hca_core_clock_offset;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment