Commit 5d6ff1ba authored by Yonatan Cohen's avatar Yonatan Cohen Committed by Doug Ledford

IB/mlx5: Support scatter to CQE for DC transport type

Scatter to CQE is a HW offload that saves PCI writes by scattering the
payload to the CQE.
This patch extends already existing functionality to support DC
transport type.
Signed-off-by: default avatarYonatan Cohen <yonatanc@mellanox.com>
Reviewed-by: default avatarGuy Levi <guyle@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5a8336d9
...@@ -1460,7 +1460,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) ...@@ -1460,7 +1460,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return err; return err;
} }
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
{ {
struct mlx5_ib_cq *cq; struct mlx5_ib_cq *cq;
......
...@@ -1127,7 +1127,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -1127,7 +1127,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags); int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
......
...@@ -1053,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) ...@@ -1053,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
static int is_connected(enum ib_qp_type qp_type) static int is_connected(enum ib_qp_type qp_type)
{ {
if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
qp_type == MLX5_IB_QPT_DCI)
return 1; return 1;
return 0; return 0;
...@@ -1684,6 +1685,49 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1684,6 +1685,49 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err; return err;
} }
static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
void *qpc)
{
int rcqe_sz;
if (init_attr->qp_type == MLX5_IB_QPT_DCI)
return;
rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
if (rcqe_sz == 128) {
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
return;
}
if (init_attr->qp_type != MLX5_IB_QPT_DCT)
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
void *qpc)
{
enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz;
if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
return;
if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
if (scqe_sz == 128) {
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
return;
}
if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp) struct ib_udata *udata, struct mlx5_ib_qp *qp)
...@@ -1787,7 +1831,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1787,7 +1831,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err; return err;
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) { if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET || if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
!tunnel_offload_supported(mdev)) { !tunnel_offload_supported(mdev)) {
...@@ -1911,23 +1956,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1911,23 +1956,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, cd_slave_receive, 1); MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) { if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz; configure_responder_scat_cqe(init_attr, qpc);
int scqe_sz; configure_requester_scat_cqe(dev, init_attr, qpc);
rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
if (rcqe_sz == 128)
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
else
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
if (scqe_sz == 128)
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
else
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}
} }
if (qp->rq.wqe_cnt) { if (qp->rq.wqe_cnt) {
...@@ -2302,6 +2332,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd, ...@@ -2302,6 +2332,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
MLX5_SET(dctc, dctc, user_index, uidx); MLX5_SET(dctc, dctc, user_index, uidx);
if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
configure_responder_scat_cqe(attr, dctc);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
return &qp->ibqp; return &qp->ibqp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment